diff --git a/.gitignore b/.gitignore index d0f3c6cf9..2b48712a3 100644 --- a/.gitignore +++ b/.gitignore @@ -327,10 +327,38 @@ project/android/.idea/caches/build_file_checksums.ser ### Temps 3rd_party/flatbuffers/tmp -schema/current +# FIXME(haijing): Xcode pre-build stage breaks compilation of flatbuffers by setting envs that do cmake cross-compilation for iOS +# schema/current schema/private tools/converter/source/IR benchmark/benchmark.txt -### Models -*.mnn +### Python MNN +pymnn/android/build/ +pymnn/android/local.properties +pymnn/android/.idea +pymnn/android/.idea/.name +pymnn/android/.idea/gradle.xml +pymnn/android/.idea/misc.xml +pymnn/android/.idea/modules.xml +pymnn/android/.idea/runConfigurations.xml +pymnn/android/.idea/vcs.xml +pymnn/android/.idea/caches/build_file_checksums.ser + +buildios +build*/ +include/MNN/VCS.h +source/backend/opencl/execution/cl/codegen/opencl_program.cc +source/backend/opencl/execution/cl/opencl_program.cc +# FIXME(haijing): MTL issues..... +# source/backend/metal/MetalOPRegister.mm +source/backend/opengl/AllShader.cpp +include/MNN/backend/opengl/shaders/AllShader.h +source/backend/vulkan/compiler/AllShader.cpp +include/MNN/backend/vulkan/shaders/AllShader.h +.idea +project/ios/ios_64 +project/ios/ios_32 +project/ios/MNN.framework + +pymnn_build/ diff --git a/.travis.yml b/.travis.yml index e07fc1eca..3e40e1082 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,24 +8,29 @@ matrix: osx_image: xcode11.2 compiler: clang script: - - ./schema/generate.sh - - mkdir macosbuild - - cd macosbuild - - cmake ../ -DCMAKE_BUILD_TYPE=Release -DMNN_BUILD_TRAIN=ON -DMNN_BUILD_DEMO=ON -DMNN_BUILD_QUANTOOLS=ON -DMNN_EVALUATION=ON -DMNN_BUILD_CONVERTER=ON -DMNN_SUPPORT_TFLITE_QUAN=ON -DMNN_METAL=ON -DMNN_BUILD_TEST=ON -DMNN_BUILD_BENCHMARK=ON - - make -j8 + - ./ciscripts/macOS/CPU_Metal.sh name: "macOS11.2 | CPU_Metal" - os: osx language: cpp osx_image: xcode11.2 compiler: clang script: - - ./schema/generate.sh - - xcodebuild -configuration Release -project project/ios/MNN.xcodeproj - - find . -name ".DS_Store" -delete - - cd project/ios/build/Release-iphoneos/ - - zip -r MNN.iOS.framework.zip ./ - - curl -T MNN.iOS.framework.zip -umnn:${BINTRAY_DEPLOY_TOKEN} https://api.bintray.com/content/mnnteam/Pods/Nightly/0.0.0/MNN-iOS-Nightly.zip - name: "iOS | CPU_Metal" + - ./ciscripts/macOS/CPU.sh + name: "macOS11.2 | CPU" + - os: osx + language: cpp + osx_image: xcode11.2 + compiler: clang + script: + - ./ciscripts/iOS/Xcode.sh + name: "iOS | CPU_Metal | Xcode" + - os: osx + language: cpp + osx_image: xcode11.2 + compiler: clang + script: + - ./ciscripts/iOS/CMake.sh + name: "iOS | CPU_Metal | CMake" - os: linux sudo: required dist: bionic @@ -34,12 +39,81 @@ matrix: - sudo apt-get install ant libprotobuf-dev libvulkan-dev libglew-dev freeglut3-dev protobuf-compiler ocl-icd-opencl-dev libglfw3-dev compiler: gcc script: - - ./schema/generate.sh - - mkdir linuxbuild - - cd linuxbuild - - cmake ../ -DCMAKE_BUILD_TYPE=Release -DMNN_BUILD_TRAIN=ON -DMNN_BUILD_DEMO=ON -DMNN_BUILD_QUANTOOLS=ON -DMNN_EVALUATION=ON -DMNN_BUILD_CONVERTER=ON -DMNN_SUPPORT_TFLITE_QUAN=ON -DMNN_BUILD_TEST=ON -DMNN_OPENCL=ON -DMNN_VULKAN=ON -DMNN_OPENMP=ON -DMNN_BUILD_BENCHMARK=ON - - make -j8 - name: "Linux | CPU_CL_OpenMP_Vulkan" + - ./ciscripts/Linux/CL_ThreadPool_Vulkan.sh + name: "Linux | CPU_CL_ThreadPool_Vulkan" + - os: linux + sudo: required + dist: trusty + language: android + compiler: clang + android: + components: + - tools + - build-tools + - platform-tools + - android-21 + licenses: + - 'android-sdk-preview-license-.+' + - 'android-sdk-license-.+' + - 'google-gdk-license-.+' + before_script: + - sudo apt-get install ant libprotobuf-dev protobuf-compiler + - sudo apt-get remove cmake + - echo yes | sdkmanager "ndk-bundle" + - echo yes | sdkmanager "cmake;3.10.2.4988404" + - export ANDROID_NDK=$ANDROID_HOME/ndk-bundle + - export PATH=/usr/local/android-sdk/cmake/3.10.2.4988404/bin/:$PATH + script: + - ./ciscripts/Android/32.sh + name: "Android | AArch32_ThreadPool_Vulkan" + - os: linux + sudo: required + dist: trusty + language: android + compiler: clang + android: + components: + - tools + - build-tools + - platform-tools + - android-21 + licenses: + - 'android-sdk-preview-license-.+' + - 'android-sdk-license-.+' + - 'google-gdk-license-.+' + before_script: + - sudo apt-get install ant libprotobuf-dev protobuf-compiler + - echo yes | sdkmanager "ndk-bundle" + - echo yes | sdkmanager "cmake;3.10.2.4988404" + - export ANDROID_NDK=$ANDROID_HOME/ndk-bundle + - export PATH=/usr/local/android-sdk/cmake/3.10.2.4988404/bin/:$PATH + script: + - ./ciscripts/Android/32OMP.sh + name: "Android | AArch32_OMP_Vulkan" + - os: linux + sudo: required + dist: trusty + language: android + compiler: clang + android: + components: + - tools + - build-tools + - platform-tools + - android-21 + licenses: + - 'android-sdk-preview-license-.+' + - 'android-sdk-license-.+' + - 'google-gdk-license-.+' + before_script: + - sudo apt-get install ant libprotobuf-dev protobuf-compiler + - echo yes | sdkmanager "ndk-bundle" + - echo yes | sdkmanager "cmake;3.10.2.4988404" + - export ANDROID_NDK=$ANDROID_HOME/ndk-bundle + - export PATH=/usr/local/android-sdk/cmake/3.10.2.4988404/bin/:$PATH + script: + - ./ciscripts/Android/64.sh + name: "Android | AArch64_ThreadPool_Vulkan" - os: linux sudo: required dist: trusty @@ -56,11 +130,11 @@ matrix: - 'android-sdk-license-.+' - 'google-gdk-license-.+' before_script: - - sudo apt-get install ant libprotobuf-dev protobuf-compiler tree + - sudo apt-get install ant libprotobuf-dev protobuf-compiler - echo yes | sdkmanager "ndk-bundle" - echo yes | sdkmanager "cmake;3.10.2.4988404" - export ANDROID_NDK=$ANDROID_HOME/ndk-bundle + - export PATH=/usr/local/android-sdk/cmake/3.10.2.4988404/bin/:$PATH script: - - cd project/android/ - - ./gradlew assembleRelease bintrayUpload -PbintrayKey=${BINTRAY_DEPLOY_TOKEN} - name: "Android | AArch32" + - ./ciscripts/Android/64OMP.sh + name: "Android | AArch64_OMP_Vulkan" diff --git a/source/backend/opencl/3rdParty/CL/cl.h b/3rd_party/OpenCLHeaders/CL/cl.h similarity index 100% rename from source/backend/opencl/3rdParty/CL/cl.h rename to 3rd_party/OpenCLHeaders/CL/cl.h diff --git a/source/backend/opencl/3rdParty/CL/cl.hpp b/3rd_party/OpenCLHeaders/CL/cl.hpp similarity index 100% rename from source/backend/opencl/3rdParty/CL/cl.hpp rename to 3rd_party/OpenCLHeaders/CL/cl.hpp diff --git a/source/backend/opencl/3rdParty/CL/cl2.hpp b/3rd_party/OpenCLHeaders/CL/cl2.hpp similarity index 100% rename from source/backend/opencl/3rdParty/CL/cl2.hpp rename to 3rd_party/OpenCLHeaders/CL/cl2.hpp diff --git a/source/backend/opencl/3rdParty/CL/cl_d3d10.h b/3rd_party/OpenCLHeaders/CL/cl_d3d10.h similarity index 100% rename from source/backend/opencl/3rdParty/CL/cl_d3d10.h rename to 3rd_party/OpenCLHeaders/CL/cl_d3d10.h diff --git a/source/backend/opencl/3rdParty/CL/cl_d3d11.h b/3rd_party/OpenCLHeaders/CL/cl_d3d11.h similarity index 100% rename from source/backend/opencl/3rdParty/CL/cl_d3d11.h rename to 3rd_party/OpenCLHeaders/CL/cl_d3d11.h diff --git a/source/backend/opencl/3rdParty/CL/cl_dx9_media_sharing.h b/3rd_party/OpenCLHeaders/CL/cl_dx9_media_sharing.h similarity index 100% rename from source/backend/opencl/3rdParty/CL/cl_dx9_media_sharing.h rename to 3rd_party/OpenCLHeaders/CL/cl_dx9_media_sharing.h diff --git a/source/backend/opencl/3rdParty/CL/cl_dx9_media_sharing_intel.h b/3rd_party/OpenCLHeaders/CL/cl_dx9_media_sharing_intel.h similarity index 100% rename from source/backend/opencl/3rdParty/CL/cl_dx9_media_sharing_intel.h rename to 3rd_party/OpenCLHeaders/CL/cl_dx9_media_sharing_intel.h diff --git a/source/backend/opencl/3rdParty/CL/cl_egl.h b/3rd_party/OpenCLHeaders/CL/cl_egl.h similarity index 100% rename from source/backend/opencl/3rdParty/CL/cl_egl.h rename to 3rd_party/OpenCLHeaders/CL/cl_egl.h diff --git a/source/backend/opencl/3rdParty/CL/cl_ext.h b/3rd_party/OpenCLHeaders/CL/cl_ext.h similarity index 100% rename from source/backend/opencl/3rdParty/CL/cl_ext.h rename to 3rd_party/OpenCLHeaders/CL/cl_ext.h diff --git a/source/backend/opencl/3rdParty/CL/cl_ext_intel.h b/3rd_party/OpenCLHeaders/CL/cl_ext_intel.h similarity index 100% rename from source/backend/opencl/3rdParty/CL/cl_ext_intel.h rename to 3rd_party/OpenCLHeaders/CL/cl_ext_intel.h diff --git a/source/backend/opencl/3rdParty/CL/cl_gl.h b/3rd_party/OpenCLHeaders/CL/cl_gl.h similarity index 100% rename from source/backend/opencl/3rdParty/CL/cl_gl.h rename to 3rd_party/OpenCLHeaders/CL/cl_gl.h diff --git a/source/backend/opencl/3rdParty/CL/cl_gl_ext.h b/3rd_party/OpenCLHeaders/CL/cl_gl_ext.h similarity index 100% rename from source/backend/opencl/3rdParty/CL/cl_gl_ext.h rename to 3rd_party/OpenCLHeaders/CL/cl_gl_ext.h diff --git a/source/backend/opencl/3rdParty/CL/cl_platform.h b/3rd_party/OpenCLHeaders/CL/cl_platform.h similarity index 100% rename from source/backend/opencl/3rdParty/CL/cl_platform.h rename to 3rd_party/OpenCLHeaders/CL/cl_platform.h diff --git a/source/backend/opencl/3rdParty/CL/cl_va_api_media_sharing_intel.h b/3rd_party/OpenCLHeaders/CL/cl_va_api_media_sharing_intel.h similarity index 100% rename from source/backend/opencl/3rdParty/CL/cl_va_api_media_sharing_intel.h rename to 3rd_party/OpenCLHeaders/CL/cl_va_api_media_sharing_intel.h diff --git a/source/backend/opencl/3rdParty/CL/cl_version.h b/3rd_party/OpenCLHeaders/CL/cl_version.h similarity index 100% rename from source/backend/opencl/3rdParty/CL/cl_version.h rename to 3rd_party/OpenCLHeaders/CL/cl_version.h diff --git a/source/backend/opencl/3rdParty/CL/opencl.h b/3rd_party/OpenCLHeaders/CL/opencl.h similarity index 100% rename from source/backend/opencl/3rdParty/CL/opencl.h rename to 3rd_party/OpenCLHeaders/CL/opencl.h diff --git a/3rd_party/flatbuffers/docs/source/CONTRIBUTING.md b/3rd_party/flatbuffers/docs/source/CONTRIBUTING.md deleted file mode 100644 index 17428add5..000000000 --- a/3rd_party/flatbuffers/docs/source/CONTRIBUTING.md +++ /dev/null @@ -1,42 +0,0 @@ -Contributing {#contributing} -============ - -Want to contribute? Great! First, read this page (including the small print at -the end). - -# Before you contribute -Before we can use your code, you must sign the -[Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual?csw=1) -(CLA), which you can do online. The CLA is necessary mainly because you own the -copyright to your changes, even after your contribution becomes part of our -codebase, so we need your permission to use and distribute your code. We also -need to be sure of various other things—for instance that you'll tell us if you -know that your code infringes on other people's patents. You don't have to sign -the CLA until after you've submitted your code for review and a member has -approved it, but you must do it before we can put your code into our codebase. -Before you start working on a larger contribution, you should get in touch with -us first through the issue tracker with your idea so that we can help out and -possibly guide you. Coordinating up front makes it much easier to avoid -frustration later on. - -# Code reviews -All submissions, including submissions by project members, require review. We -use Github pull requests for this purpose. - -Some tips for good pull requests: -* Use our code - [style guide](https://google.github.io/styleguide/cppguide.html). - When in doubt, try to stay true to the existing code of the project. -* Write a descriptive commit message. What problem are you solving and what - are the consequences? Where and what did you test? Some good tips: - [here](http://robots.thoughtbot.com/5-useful-tips-for-a-better-commit-message) - and [here](https://www.kernel.org/doc/Documentation/SubmittingPatches). -* If your PR consists of multiple commits which are successive improvements / - fixes to your first commit, consider squashing them into a single commit - (`git rebase -i`) such that your PR is a single commit on top of the current - HEAD. This make reviewing the code so much easier, and our history more - readable. - -# The small print -Contributions made by corporations are covered by a different agreement than -the one above, the Software Grant and Corporate Contributor License Agreement. diff --git a/3rd_party/flatbuffers/docs/source/CONTRIBUTING.md b/3rd_party/flatbuffers/docs/source/CONTRIBUTING.md new file mode 120000 index 000000000..f939e75f2 --- /dev/null +++ b/3rd_party/flatbuffers/docs/source/CONTRIBUTING.md @@ -0,0 +1 @@ +../../CONTRIBUTING.md \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt index 6de37a670..9c97b4adb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,17 +1,59 @@ -cmake_minimum_required(VERSION 2.8) - -project(MNN) - +cmake_minimum_required(VERSION 3.0) +# Versioning stuff +if(NOT DEFINED MNN_VERSION_MAJOR) + set(MNN_VERSION_MAJOR 0) +endif() +if(NOT DEFINED MNN_VERSION_MINOR) + set(MNN_VERSION_MINOR 2) +endif() +if(NOT DEFINED MNN_VERSION_PATCH) + set(MNN_VERSION_PATCH 1) +endif() +if(NOT DEFINED MNN_VERSION_BUILD) + set(MNN_VERSION_BUILD 5) +endif() +if(NOT DEFINED MNN_VERSION_SUFFIX) + set(MNN_VERSION_SUFFIX git) +endif() +if (NOT PACKAGE_VERSION) + set(PACKAGE_VERSION + "${MNN_VERSION_MAJOR}.${MNN_VERSION_MINOR}.${MNN_VERSION_PATCH}.${MNN_VERSION_BUILD}${MNN_VERSION_SUFFIX}") +endif() +add_definitions("-DMNN_VERSION=\"${PACKAGE_VERSION}\"") +add_definitions("-DMNN_VERSION_MAJOR=${MNN_VERSION_MAJOR}") +add_definitions("-DMNN_VERSION_MINOR=${MNN_VERSION_MINOR}") +add_definitions("-DMNN_VERSION_PATCH=${MNN_VERSION_PATCH}") + +# CMP0048 is related to letting CMake managing the package version for us +# CMP0079 is required for OpenMP + +IF(POLICY CMP0048) + cmake_policy(SET CMP0048 NEW) +ENDIF() +IF(POLICY CMP0079) + cmake_policy(SET CMP0079 NEW) +ENDIF() +project(MNN VERSION ${MNN_VERSION_MAJOR}.${MNN_VERSION_MINOR}.${MNN_VERSION_PATCH}.${MNN_VERSION_BUILD} LANGUAGES C CXX ASM) +add_definitions(-fno-stack-check) # Workaround a Xcode 11.X bug # complier options set(CMAKE_C_STANDARD 99) set(CMAKE_CXX_STANDARD 11) -enable_language(ASM) -# set(CMAKE_C_COMPILER gcc) -# set(CMAKE_CXX_COMPILER g++) - - +set(CMAKE_CXX_STANDARD_REQUIRED ON) +set(CMAKE_MODULE_PATH + ${CMAKE_MODULE_PATH} + "${CMAKE_CURRENT_LIST_DIR}/cmake" +) +add_custom_command(OUTPUT "${CMAKE_CURRENT_LIST_DIR}/include/MNN/VCS.h" + COMMAND ${CMAKE_COMMAND} "-DNAMES=MNN" + "-DMNN_SOURCE_DIR=${CMAKE_CURRENT_LIST_DIR}" + "-DHEADER_FILE=${CMAKE_CURRENT_LIST_DIR}/include/MNN/VCS.h" + -P "${CMAKE_CURRENT_LIST_DIR}/cmake/GenerateVersionFromVCS.cmake" + COMMENT "Generating Version Control Info" +) +add_custom_target (GenVCSHDR DEPENDS "${CMAKE_CURRENT_LIST_DIR}/include/MNN/VCS.h") +# Required for OpenCL/OpenGL/Vulkan CodeGen +include(FindPythonInterp REQUIRED) # build options -option(MNN_USE_CPP11 "Enable MNN use c++11" ON) option(MNN_BUILD_HARD "Build -mfloat-abi=hard or not" OFF) option(MNN_BUILD_SHARED_LIBS "MNN build shared or static lib" ON) option(MNN_FORBID_MULTI_THREAD "Disable Multi Thread" OFF) @@ -23,10 +65,25 @@ option(MNN_BUILD_QUANTOOLS "Build Quantized Tools or not" OFF) option(MNN_EVALUATION "Build Evaluation Tools or not" OFF) option(MNN_BUILD_CONVERTER "Build Converter" OFF) option(MNN_SUPPORT_TFLITE_QUAN "Enable MNN's tflite quantized op" ON) -include(cmake/macros.cmake) -if (MNN_BUILD_CONVERTER) - add_subdirectory(tools/converter) -endif() +option(MNN_DEBUG_MEMORY "MNN Debug Memory Access" OFF) +option(MNN_DEBUG_TENSOR_SIZE "Enable Tensor Size" OFF) +option(MNN_GPU_TRACE "Enable MNN Gpu Debug" OFF) +option(MNN_PORTABLE_BUILD "Link the static version of third party libraries where possible to improve the portability of built executables" OFF) +option(MNN_SEP_BUILD "Build MNN Backends and expression seperately. Only works with MNN_BUILD_SHARED_LIBS=ON" ON) +option(MNN_AAPL_FMWK "Build MNN.framework instead of traditional .a/.dylib" OFF) +option(MNN_USE_SSE "Enable SSE Optimizations" ON) +option(MNN_USE_AVX "Enable AVX Optimizations" ON) +option(NATIVE_LIBRARY_OUTPUT "Native Library Path" OFF) +option(NATIVE_INCLUDE_OUTPUT "Native Include Path" OFF) + + +set(MNN_SCHEMA_SUFFIX "default" CACHE STRING "MNN Schema Source Path Suffix") +IF(APPLE AND MNN_AAPL_FMWK AND MNN_SEP_BUILD) + message(WARNING "MNN_SEP_BUILD AND MNN_AAPL_FMWK can't coexist. Turning off MNN_SEP_BUILD") + SET(MNN_SEP_BUILD OFF) +ENDIF() + +include(${CMAKE_CURRENT_LIST_DIR}/cmake/macros.cmake) if (MNN_USE_THREAD_POOL) set(MNN_OPENMP OFF) @@ -40,25 +97,7 @@ if(MNN_SUPPORT_TFLITE_QUAN) add_definitions(-DMNN_SUPPORT_TFLITE_QUAN) endif() -if (NOT MSVC) - if(MNN_USE_CPP11) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu99") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") - else() - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu99") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++0x") - endif() -endif() - # debug options -option(MNN_DEBUG "Enable MNN DEBUG" OFF) -if(CMAKE_BUILD_TYPE MATCHES "Debug") - set(MNN_DEBUG ON) -endif() -option(MNN_DEBUG_MEMORY "MNN Debug Memory Access" OFF) -option(MNN_DEBUG_TENSOR_SIZE "Enable Tensor Size" OFF ) -option(MNN_GPU_TRACE "Enable MNN Gpu Debug" OFF) - if(MNN_DEBUG_MEMORY) add_definitions(-DMNN_DEBUG_MEMORY) endif() @@ -69,6 +108,13 @@ if(MNN_GPU_TRACE) add_definitions(-DMNN_GPU_FORCE_FINISH) endif() +IF(MNN_USE_AVX) + add_definitions(-DMNN_USE_AVX) +ENDIF() +IF(MNN_USE_SSE) + add_definitions(-DMNN_USE_SSE) +ENDIF() + # backend options option(MNN_METAL "Enable Metal" OFF) option(MNN_OPENCL "Enable OpenCL" OFF) @@ -85,50 +131,29 @@ endif() option(MNN_BUILD_BENCHMARK "Build benchmark or not" OFF) option(MNN_BUILD_TEST "Build tests or not" OFF) option(MNN_BUILD_FOR_ANDROID_COMMAND "Build from command" OFF) -option(MNN_BUILD_FOR_IOS "Build for ios" OFF) set (MNN_HIDDEN FALSE) -if (NOT MNN_BUILD_TEST) - if (NOT MNN_DEBUG) - set (MNN_HIDDEN TRUE) - endif() -endif() +IF(CMAKE_BUILD_TYPE MATCHES Debug) +ELSE() + set(MNN_HIDDEN TRUE) +ENDIF(CMAKE_BUILD_TYPE MATCHES Debug) message(STATUS ">>>>>>>>>>>>>") message(STATUS "MNN BUILD INFO:") message(STATUS "\tSystem: ${CMAKE_SYSTEM_NAME}") message(STATUS "\tProcessor: ${CMAKE_SYSTEM_PROCESSOR}") -message(STATUS "\tDEBUG: ${MNN_DEBUG}") message(STATUS "\tMetal: ${MNN_METAL}") message(STATUS "\tOpenCL: ${MNN_OPENCL}") message(STATUS "\tOpenGL: ${MNN_OPENGL}") message(STATUS "\tVulkan: ${MNN_VULKAN}") message(STATUS "\tOpenMP: ${MNN_OPENMP}") message(STATUS "\tHidden: ${MNN_HIDDEN}") - -# flags -if(SYSTEM.Android) - add_definitions(-DMNN_BUILD_FOR_ANDROID) - if(PROCESSOR.arm) - add_definitions(-mfloat-abi=softfp -mfpu=neon) - endif() -endif() - -if(SYSTEM.Linux) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D__STRICT_ANSI__") - if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang") - # This is to workaround libgcc.a - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++") - endif() - if(CMAKE_SYSTEM_PROCESSOR MATCHES "^armv7") - add_definitions(-mfpu=neon) #please define in project/cross-compile/arm.toolchain.cmake - endif() - if(MNN_BUILD_HARD) - add_definitions(-mfloat-abi=hard) #better define in project/cross-compile/arm.toolchain.cmake - endif() -endif() +message(STATUS "\tBuild Path: ${CMAKE_CURRENT_BINARY_DIR}") if(WIN32) + if(${CMAKE_VERSION} VERSION_LESS "3.14.0") + message(FATAL_ERROR "MNN requires CMake 3.14+ to build on Windows!") + endif() foreach(flag_var CMAKE_C_FLAGS CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_RELEASE CMAKE_C_FLAGS_MINSIZEREL CMAKE_C_FLAGS_RELWITHDEBINFO @@ -144,14 +169,18 @@ if(WIN32) endif() endif () endforeach() - set(CMAKE_CXX_STANDARD 14) -elseif(SYSTEM.Android OR SYSTEM.Linux) - set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fomit-frame-pointer -fstrict-aliasing -ffunction-sections -fdata-sections -ffast-math") - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fomit-frame-pointer -fstrict-aliasing -ffunction-sections -fdata-sections -ffast-math -fno-rtti -fno-exceptions") +elseif(CMAKE_SYSTEM_NAME MATCHES "^Android" OR CMAKE_SYSTEM_NAME MATCHES "^Linux") add_definitions(-fPIC) endif() +if(CMAKE_SYSTEM_NAME MATCHES "^Android") + add_definitions(-DMNN_BUILD_FOR_ANDROID) + if(CMAKE_SYSTEM_PROCESSOR MATCHES "^arm") + add_definitions(-mfloat-abi=softfp -mfpu=neon) + endif() +endif() + -if(MNN_DEBUG) +IF(CMAKE_BUILD_TYPE MATCHES Debug) add_definitions(-DMNN_DEBUG -DDEBUG) if(MSVC) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /DEBUG") @@ -165,43 +194,27 @@ else() set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /O2") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /O2") else() - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O3 -fPIC") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3 -fPIC") - if(MNN_BUILD_FOR_ANDROID_COMMAND) - set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -s -Wl,--gc-sections") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O3") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3") + set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS}") + if(CMAKE_SYSTEM_NAME MATCHES "^Android") + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -pie -fPIE") + if(MNN_BUILD_FOR_ANDROID_COMMAND) + set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--gc-sections") + endif() + IF(NOT MNN_BUILD_FOR_ANDROID_COMMAND) + add_definitions(-DMNN_USE_LOGCAT) + ENDIF(NOT MNN_BUILD_FOR_ANDROID_COMMAND) + IF(NOT NATIVE_LIBRARY_OUTPUT) + set(NATIVE_LIBRARY_OUTPUT ".") + ENDIF() + set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${NATIVE_LIBRARY_OUTPUT}/${ANDROID_ABI}) endif() endif() -endif() -if ((MNN_HIDDEN) AND (NOT MSVC)) - add_definitions(-fvisibility=hidden) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility-inlines-hidden") -endif() - -# include files -set(MNN.Path source) -set(MNN.Source_DIR - ${MNN.Path} - ${MNN.Path}/core - ${MNN.Path}/backend/cpu - ${MNN.Path}/backend/cpu/arm - ${MNN.Path}/backend/cpu/compute - ${MNN.Path}/cv - ${MNN.Path}/math - ${MNN.Path}/shape -) -include_directories( - "include/" - "schema/current" - "3rd_party/flatbuffers/include" - "3rd_party/half" -) - -if(MNN_METAL) - set (MNN.Source_DIR ${MNN.Source_DIR} ${MNN.Path}/backend/metal) -endif() +ENDIF(CMAKE_BUILD_TYPE MATCHES Debug) if(${CMAKE_SYSTEM_NAME} MATCHES "^Linux") - if(PROCESSOR.arm OR PROCESSOR.aarch64) + if((CMAKE_SYSTEM_PROCESSOR MATCHES "^arm") OR (CMAKE_SYSTEM_PROCESSOR MATCHES "^aarch64")) set(aarch64_linux_include #/usr/include/c++/4.9 #/usr/lib/gcc/x86_64-linux-gnu/4.9 @@ -211,141 +224,202 @@ if(${CMAKE_SYSTEM_NAME} MATCHES "^Linux") include_directories(${aarch64_linux_include}) endif() endif() +include_directories(${CMAKE_CURRENT_LIST_DIR}/include/ + ${CMAKE_CURRENT_LIST_DIR}/source/ + ${CMAKE_CURRENT_LIST_DIR}/schema/current/ + ${CMAKE_CURRENT_LIST_DIR}/3rd_party/ + ${CMAKE_CURRENT_LIST_DIR}/3rd_party/flatbuffers/include + ${CMAKE_CURRENT_LIST_DIR}/3rd_party/half + ${CMAKE_CURRENT_LIST_DIR}/3rd_party/imageHelper + ${CMAKE_CURRENT_LIST_DIR}/3rd_party/OpenCLHeaders/ +) -if(CMAKE_SYSTEM_PROCESSOR MATCHES "(x86_64)|(X86_64)|(x64)|(X64)|(amd64)|(AMD64)") - add_definitions(-DMNN_USE_SSE) - file(GLOB AVX_Source_Files ${MNN.Path}/backend/cpu/x86_x64/avx/*.cpp) - if(WIN32 OR MSVC) - set_source_files_properties(${AVX_Source_Files} PROPERTIES COMPILE_FLAGS /arch:AVX) - else() - set_source_files_properties(${AVX_Source_Files} PROPERTIES COMPILE_FLAGS -mavx) - endif() - set(MNN.Source_DIR ${MNN.Source_DIR} ${MNN.Path}/backend/cpu/x86_x64) - set(MNN.Source_DIR ${MNN.Source_DIR} ${MNN.Path}/backend/cpu/x86_x64/sse) - set(MNN.Source_DIR ${MNN.Source_DIR} ${MNN.Path}/backend/cpu/x86_x64/avx) -endif() -# *.c -foreach(file_path ${MNN.Source_DIR}) - file(GLOB file_source_c ${file_path}/*.c ${file_path}/*.h) - set(MNN.Source_C ${MNN.Source_C} ${file_source_c}) -endforeach() -message(STATUS "[*] searching *.c") - -# *.cc -foreach(file_path ${MNN.Source_DIR}) - file(GLOB file_source_cc ${file_path}/*.cc ${file_path}/*.hpp) - set(MNN.Source_CC ${MNN.Source_CC} ${file_source_cc}) -endforeach() -message(STATUS "[*] searching *.cc") - -# *.cpp -foreach(file_path ${MNN.Source_DIR}) - file(GLOB file_source_cpp ${file_path}/*.cpp) - set(MNN.Source_CPP ${MNN.Source_CPP} ${file_source_cpp}) -endforeach() -message(STATUS "[*] searching *.cpp") - -if(MNN_METAL) - # *.m *.mm - foreach(file_path ${MNN.Source_DIR}) - file(GLOB file_source_m ${file_path}/*.mm ${file_path}/*.m ${file_path}/*.metal) - set(MNN.Source_M ${MNN.Source_M} ${file_source_m}) +# Import FlatBuffers and use standard way to generate schemas +IF(CMAKE_CROSSCOMPILING) + message(WARNING "Cross Compilation Detected. Third-Party tools like protobuf/flatbuffer are not built. You'll need to make sure they are available in your $PATH") +ELSE() + add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/3rd_party/flatbuffers ${CMAKE_CURRENT_BINARY_DIR}/flatbuffers-build EXCLUDE_FROM_ALL) +ENDIF() +FILE(GLOB MNN_SCHEMA_SRC ${CMAKE_CURRENT_LIST_DIR}/schema/${MNN_SCHEMA_SUFFIX}/*.fbs) +SET(SCHEMA_TARGETS "") +FOREACH(SCHEMA_SRC ${MNN_SCHEMA_SRC}) + file(MAKE_DIRECTORY "${CMAKE_CURRENT_LIST_DIR}/schema/current/") + get_filename_component(SCHEMA_NAME "${SCHEMA_SRC}" NAME_WE) + add_custom_command(OUTPUT "${CMAKE_CURRENT_LIST_DIR}/schema/current/${SCHEMA_NAME}_generated.h" COMMAND flatc -c -b --gen-object-api --reflect-names ${SCHEMA_SRC} COMMENT "Generating ${SCHEMA_NAME} Schema in ${CMAKE_CURRENT_LIST_DIR}/schema/${MNN_SCHEMA_SUFFIX}" WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/schema/current/ DEPENDS ${SCHEMA_SRC}) + ADD_CUSTOM_TARGET(MNN_SCHEMA_GEN_${SCHEMA_NAME} ALL DEPENDS "${CMAKE_CURRENT_LIST_DIR}/schema/current/${SCHEMA_NAME}_generated.h") + IF(NOT CMAKE_CROSSCOMPILING) + add_dependencies(MNN_SCHEMA_GEN_${SCHEMA_NAME} flatc) + ENDIF() + LIST(APPEND SCHEMA_TARGETS "${CMAKE_CURRENT_LIST_DIR}/schema/current/${SCHEMA_NAME}_generated.h") +ENDFOREACH() +add_custom_target(MNN_SCHEMA_GEN DEPENDS ${SCHEMA_TARGETS}) + +set(MNN_OBJECTS_TO_LINK "") +set(MNN_TARGETS "") + +# Core +FILE(GLOB MNN_Core_SRC ${CMAKE_CURRENT_LIST_DIR}/source/core/*.cpp ${CMAKE_CURRENT_LIST_DIR}/source/core/*.c) +add_library(MNNCore OBJECT ${MNN_Core_SRC} ${SCHEMA_TARGETS}) +list(APPEND MNN_OBJECTS_TO_LINK $) +list(APPEND MNN_TARGETS MNNCore) + +# CV +FILE(GLOB MNN_CV_SRC ${CMAKE_CURRENT_LIST_DIR}/source/cv/*.cpp ${CMAKE_CURRENT_LIST_DIR}/source/cv/*.c) +add_library(MNNCV OBJECT ${MNN_CV_SRC} ${SCHEMA_TARGETS}) +list(APPEND MNN_OBJECTS_TO_LINK $) +list(APPEND MNN_TARGETS MNNCV) + +# Math +FILE(GLOB MNN_Math_SRC ${CMAKE_CURRENT_LIST_DIR}/source/math/*.cpp ${CMAKE_CURRENT_LIST_DIR}/source/math/*.c) +add_library(MNNMath OBJECT ${MNN_Math_SRC} ${SCHEMA_TARGETS}) +list(APPEND MNN_OBJECTS_TO_LINK $) +list(APPEND MNN_TARGETS MNNMath) + +# Shape +FILE(GLOB MNN_Shape_SRC ${CMAKE_CURRENT_LIST_DIR}/source/shape/*.cpp ${CMAKE_CURRENT_LIST_DIR}/source/shape/*.c) +add_library(MNNShape OBJECT ${MNN_Shape_SRC} ${SCHEMA_TARGETS}) +list(APPEND MNN_OBJECTS_TO_LINK $) +list(APPEND MNN_TARGETS MNNShape) + +# CPU +FILE(GLOB MNN_CPU_SRC ${CMAKE_CURRENT_LIST_DIR}/source/backend/cpu/*.cpp) +add_library(MNNCPU OBJECT ${MNN_CPU_SRC} ${SCHEMA_TARGETS}) +list(APPEND MNN_OBJECTS_TO_LINK $) +list(APPEND MNN_TARGETS MNNCPU) + +# Compute +FILE(GLOB MNN_Compute_SRC ${CMAKE_CURRENT_LIST_DIR}/source/backend/cpu/compute/*.cpp) +add_library(MNNCompute OBJECT ${MNN_Compute_SRC} ${SCHEMA_TARGETS}) +list(APPEND MNN_OBJECTS_TO_LINK $) +list(APPEND MNN_TARGETS MNNCompute) + +# Include sub components +## add_subdirectory() avoids recompilation if an option was toggled +## However due to variable scope issues, add the following two lines to the end of sub-scope CMakeLists +### SET(MNN_OBJECTS_TO_LINK "${MNN_OBJECTS_TO_LINK}" PARENT_SCOPE) +### SET(MNN_TARGETS "${MNN_TARGETS}" PARENT_SCOPE) + +# X86_64 AVX/SSE +add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/source/backend/cpu/x86_x64/) + +# AArch32/64 Assemblies +add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/source/backend/cpu/arm/) + +# Metal +add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/source/backend/metal/) + +# Vulkan +add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/source/backend/vulkan/) + +# OpenCL +add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/source/backend/opencl/) + +# OpenGL +add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/source/backend/opengl/) + +# ARM82 Assemblies +add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/source/backend/arm82/) + +# Express +add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/express/) + +IF(NOT DEFINED IOS_ARCH) + set(IOS_ARCH "") +ENDIF() + +SET(MNN_PUB_HDRS "") +SET(MNN_EXPR_PUB_HDRS "") +list(APPEND MNN_PUB_HDRS "${CMAKE_CURRENT_SOURCE_DIR}/include/MNN/MNNDefine.h") +list(APPEND MNN_PUB_HDRS "${CMAKE_CURRENT_SOURCE_DIR}/include/MNN/Interpreter.hpp") +list(APPEND MNN_PUB_HDRS "${CMAKE_CURRENT_SOURCE_DIR}/include/MNN/HalideRuntime.h") +list(APPEND MNN_PUB_HDRS "${CMAKE_CURRENT_SOURCE_DIR}/include/MNN/Tensor.hpp") +list(APPEND MNN_PUB_HDRS "${CMAKE_CURRENT_SOURCE_DIR}/include/MNN/ErrorCode.hpp") +list(APPEND MNN_PUB_HDRS "${CMAKE_CURRENT_SOURCE_DIR}/include/MNN/ImageProcess.hpp") +list(APPEND MNN_PUB_HDRS "${CMAKE_CURRENT_SOURCE_DIR}/include/MNN/Matrix.h") +list(APPEND MNN_PUB_HDRS "${CMAKE_CURRENT_SOURCE_DIR}/include/MNN/Rect.h") +list(APPEND MNN_PUB_HDRS "${CMAKE_CURRENT_SOURCE_DIR}/include/MNN/MNNForwardType.h") +list(APPEND MNN_PUB_HDRS "${CMAKE_CURRENT_SOURCE_DIR}/include/MNN/AutoTime.hpp") +list(APPEND MNN_EXPR_PUB_HDRS "${CMAKE_CURRENT_SOURCE_DIR}/include/MNN/expr/Expr.hpp") +list(APPEND MNN_EXPR_PUB_HDRS "${CMAKE_CURRENT_SOURCE_DIR}/include/MNN/expr/ExprCreator.hpp") +list(APPEND MNN_EXPR_PUB_HDRS "${CMAKE_CURRENT_SOURCE_DIR}/include/MNN/expr/MathOp.hpp") +list(APPEND MNN_EXPR_PUB_HDRS "${CMAKE_CURRENT_SOURCE_DIR}/include/MNN/expr/NeuralNetWorkOp.hpp") +list(APPEND MNN_EXPR_PUB_HDRS "${CMAKE_CURRENT_SOURCE_DIR}/include/MNN/expr/Optimizer.hpp") + +IF(MNN_BUILD_SHARED_LIBS) + IF(MNN_SEP_BUILD) + # TODO: Find better ways to do this + IF(MNN_OPENCL) + list(REMOVE_ITEM MNN_OBJECTS_TO_LINK $) + add_library(MNN_CL SHARED $ ${CMAKE_CURRENT_LIST_DIR}/cmake/dummy.cpp) + ENDIF() + IF(MNN_OPENGL) + list(REMOVE_ITEM MNN_OBJECTS_TO_LINK $) + add_library(MNN_GL SHARED $ ${CMAKE_CURRENT_LIST_DIR}/cmake/dummy.cpp) + target_link_libraries(MNN_GL PUBLIC GLESv3 EGL) + ENDIF() + IF(MNN_VULKAN) + list(REMOVE_ITEM MNN_OBJECTS_TO_LINK $) + add_library(MNN_Vulkan SHARED $ ${CMAKE_CURRENT_LIST_DIR}/cmake/dummy.cpp) + ENDIF() + IF(MNN_ARM82) + list(REMOVE_ITEM MNN_OBJECTS_TO_LINK $) + add_library(MNN_Arm82 SHARED $ ${CMAKE_CURRENT_LIST_DIR}/cmake/dummy.cpp) + ENDIF() + list(REMOVE_ITEM MNN_OBJECTS_TO_LINK $) + add_library(MNN_Express SHARED $ ${CMAKE_CURRENT_LIST_DIR}/cmake/dummy.cpp) + ENDIF() + add_library(MNN SHARED ${CMAKE_CURRENT_LIST_DIR}/cmake/dummy.cpp ${MNN_OBJECTS_TO_LINK} ${MNN_PUB_HDRS} ${MNN_EXPR_PUB_HDRS}) + if (WIN32) + foreach(TARGET ${MNN_TARGETS}) + target_compile_definitions(${TARGET} PRIVATE "-DBUILDING_MNN_DLL") + target_compile_definitions(${TARGET} INTERFACE "-DUSING_MNN_DLL") endforeach() - message(STATUS "[*] searching *.m *.mm") -endif() - -set(GLOBAL.SOURCE ${GLOBAL.SOURCE} ${MNN.Source_C} ${MNN.Source_CPP} ${MNN.Source_M} ${MNN.Source_CC}) -set(GLOBAL.SOURCE_INCLUDE ${GLOBAL.SOURCE_INCLUDE} ${MNN.Source_DIR}) - -# *.s *.S assembly -if(CMAKE_SYSTEM_PROCESSOR MATCHES "^armv7") - set(MNN.Source_Assembly_DIR ${MNN.Path}/backend/cpu/arm/arm32) -elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^aarch64") - set(MNN.Source_Assembly_DIR ${MNN.Path}/backend/cpu/arm/arm64) -elseif(ARCH_NAME STREQUAL "ios_fat_arm") - set(MNN.Source_Assembly_DIR ${MNN.Source_Assembly_DIR} ${MNN.Path}/backend/cpu/arm/arm64) - set(MNN.Source_Assembly_DIR ${MNN.Source_Assembly_DIR} ${MNN.Path}/backend/cpu/arm/arm32) -endif() -message(STATUS "[*] searching *.s *.S") - -foreach(file_path ${MNN.Source_Assembly_DIR}) - file(GLOB file_source_assembly ${file_path}/*.s ${file_path}/*.S) - set(MNN.Source_Assembly ${MNN.Source_Assembly} ${file_source_assembly}) -endforeach() - -# set assembly file use c compiler flags -set_property(SOURCE ${MNN.Source_Assembly} PROPERTY LANGUAGE C) - -include_directories(${MNN.Source_DIR}) -include_directories("./") - -if(SYSTEM.Android AND NOT MNN_BUILD_FOR_ANDROID_COMMAND) - if(NOT DEFINED NATIVE_LIBRARY_OUTPUT) - set(NATIVE_LIBRARY_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}) - message(STATUS "Using default Android Build Library Directory: ${NATIVE_LIBRARY_OUTPUT}/MNN") - else() - message(STATUS "Using Android Build Library Directory: ${NATIVE_LIBRARY_OUTPUT}") - endif() - set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${NATIVE_LIBRARY_OUTPUT}/${ANDROID_ABI}) - add_definitions(-DMNN_USE_LOGCAT) + endif() +ELSE() + add_library(MNN STATIC ${CMAKE_CURRENT_LIST_DIR}/cmake/dummy.cpp ${MNN_OBJECTS_TO_LINK} ${MNN_PUB_HDRS} ${MNN_EXPR_PUB_HDRS}) +ENDIF() +if(APPLE) + IF(MNN_AAPL_FMWK) + set_target_properties(MNN PROPERTIES FRAMEWORK TRUE) + set_target_properties(MNN PROPERTIES + MACOSX_FRAMEWORK_IDENTIFIER com.alibaba.MNN + MACOSX_FRAMEWORK_SHORT_VERSION_STRING ${PACKAGE_VERSION} + MACOSX_FRAMEWORK_BUNDLE_VERSION ${PACKAGE_VERSION} + XCODE_ATTRIBUTE_CODE_SIGN_IDENTITY "iPhone Developer" + ) + set_target_properties(MNN PROPERTIES MACOSX_FRAMEWORK_INFO_PLIST ${CMAKE_CURRENT_SOURCE_DIR}/project/ios/MNN/Info.plist) + ENDIF() + find_library(FOUNDATION Foundation REQUIRED) + target_link_libraries(MNN PUBLIC ${FOUNDATION}) + IF(MNN_METAL) + find_library(METAL Metal REQUIRED) + target_link_libraries(MNN PUBLIC ${METAL}) + ENDIF() + IF(MNN_OPENCL) + IF(APPLE) + find_library(OPENCL OpenCL REQUIRED) + target_link_libraries(MNN PUBLIC ${OPENCL}) + ELSEIF(NOT ${CMAKE_SYSTEM_NAME} MATCHES "Android") + find_package(OpenCL REQUIRED) + target_link_libraries(MNN PRIVATE ${OpenCL_LIBRARIES}) + ELSEIF(${CMAKE_SYSTEM_NAME} MATCHES "Android") + add_definitions(-DMNN_USE_OPENCL_WRAPPER) + #add_definitions(-DENABLE_OPENCL_TURNING_PROFILER) + #add_definitions(-DLOG_VERBOSE) + ENDIF() + ENDIF() endif() -if(MNN_BUILD_FOR_IOS) - add_library(MNN ${GLOBAL.SOURCE} ${MNN.Source_Assembly} ${Metal.Resource_Metal}) - set_target_properties(MNN PROPERTIES FRAMEWORK TRUE) - set_target_properties(MNN PROPERTIES - MACOSX_FRAMEWORK_IDENTIFIER com.alibaba.MNN - XCODE_ATTRIBUTE_CODE_SIGN_IDENTITY "iPhone Developer" - ) - set_target_properties(MNN PROPERTIES MACOSX_FRAMEWORK_INFO_PLIST ${CMAKE_CURRENT_SOURCE_DIR}/project/ios/MNN/Info.plist) - # set_source_files_properties($Metal.Resource_Metal} PROPERTIES MACOSX_PACKAGE_LOCATION Resources) - # set_target_properties(MNN PROPERTIES RESOURCE ${Metal.Resource_Metal}) +if(CMAKE_SYSTEM_NAME MATCHES "^Linux") + target_link_libraries(MNN PUBLIC pthread) +elseif(CMAKE_SYSTEM_NAME MATCHES "^Android") + target_link_libraries(MNN PUBLIC log android m) else() - if(MNN_BUILD_SHARED_LIBS) - add_library(MNN SHARED ${GLOBAL.SOURCE} ${MNN.Source_Assembly}) - if (WIN32) - install(TARGETS MNN RUNTIME DESTINATION lib) - else() - install(TARGETS MNN LIBRARY DESTINATION lib) - endif() - else() - add_library(MNN STATIC ${GLOBAL.SOURCE} ${MNN.Source_Assembly}) - install(TARGETS MNN ARCHIVE DESTINATION lib) - endif() -endif() - -if(${CMAKE_SYSTEM_NAME} MATCHES "Android") - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -pie -fPIE -s") -endif() - - -# pthread lib -if(SYSTEM.Linux) - target_link_libraries(MNN pthread) -endif() -set(MNN_DEPEND MNN) -if(MNN_VULKAN) - list(APPEND MNN_DEPEND MNN_Vulkan) - add_subdirectory(${MNN.Path}/backend/vulkan) - message(STATUS "[*] linking MNN with Vulkan done") endif() if(MNN_OPENGL) - list(APPEND MNN_DEPEND MNN_GL) - add_subdirectory(${MNN.Path}/backend/opengl) - message(STATUS "[*] linking MNN with OpenGL done") -endif() -if(MNN_OPENCL) - list(APPEND MNN_DEPEND MNN_CL) - add_subdirectory(${MNN.Path}/backend/opencl) - message(STATUS "[*] linking MNN with OpenCL done") -endif() -if (MNN_ARM82) - list(APPEND MNN_DEPEND MNN_Arm82) - add_subdirectory(${MNN.Path}/backend/arm82) - message(STATUS "[*] linking MNN with ARM 82 done") + target_link_libraries(MNN PUBLIC GLESv3 EGL) endif() - if (MSVC OR WIN32) target_link_options(MNN PRIVATE "/IGNORE:4049,4217") foreach(DEPEND ${MNN_DEPEND}) @@ -356,146 +430,149 @@ if (MSVC OR WIN32) target_link_libraries(MNN PRIVATE ${DEPEND}) endforeach() endif() +set(MNN_DEPS "") +list(APPEND MNN_DEPS MNN) +IF(MNN_BUILD_SHARED_LIBS) + IF(MNN_SEP_BUILD) + # TODO: Find better ways to do this + IF(MNN_OPENCL) + target_link_libraries(MNN_CL PRIVATE MNN) + list(APPEND MNN_DEPS MNN_CL) + ENDIF() + IF(MNN_OPENGL) + target_link_libraries(MNN_GL PRIVATE MNN) + list(APPEND MNN_DEPS MNN_GL) + ENDIF() + IF(MNN_VULKAN) + target_link_libraries(MNN_Vulkan PRIVATE MNN) + list(APPEND MNN_DEPS MNN_Vulkan) + ENDIF() + IF(MNN_ARM82) + target_link_libraries(MNN_Arm82 PRIVATE MNN) + list(APPEND MNN_DEPS MNN_Arm82) + ENDIF() + target_link_libraries(MNN_Express PRIVATE MNN) + list(APPEND MNN_DEPS MNN_Express) + ENDIF() +ENDIF() if (NOT MNN_BUILD_SHARED_LIBS) - if (BUILD_IOS OR APPLE) + if(APPLE) set(MNN_DEPEND -Wl,-all_load ${MNN_DEPEND} -Wl,-noall_load) elseif (CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") set(MNN_DEPEND -Wl,--whole-archive ${MNN_DEPEND} -Wl,--no-whole-archive) endif() endif() - -if (BUILD_IOS OR APPLE) -else() - if(MNN_OPENMP) - message(STATUS "[*] Checking OpenMP") - find_package(OpenMP) - # For CMake < 3.9, we need to make the target ourselves - if(NOT TARGET OpenMP::OpenMP_CXX) - find_package(Threads REQUIRED) - add_library(OpenMP::OpenMP_CXX IMPORTED INTERFACE) - set_property(TARGET OpenMP::OpenMP_CXX - PROPERTY INTERFACE_COMPILE_OPTIONS ${OpenMP_CXX_FLAGS}) - # Only works if the same flag is passed to the linker; use CMake 3.9+ otherwise (Intel, AppleClang) - set_property(TARGET OpenMP::OpenMP_CXX - PROPERTY INTERFACE_LINK_LIBRARIES ${OpenMP_CXX_FLAGS} Threads::Threads) - endif() - if (WIN32) - set(OpenMP_C_FLAGS "/openmp ${OpenMP_C_FLAGS}") - set(OpenMP_CXX_FLAGS "/openmp ${OpenMP_CXX_FLAGS}") - endif() - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}") - set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${OpenMP_SHARED_LINKER_FLAGS}") - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${OpenMP_EXE_LINKER_FLAGS}") +if (NOT APPLE) + if(MNN_OPENMP) + message(STATUS "[*] Checking OpenMP") + find_package(OpenMP) + # For CMake < 3.9, we need to make the target ourselves + if(NOT TARGET OpenMP::OpenMP_CXX) + find_package(Threads REQUIRED) + add_library(OpenMP::OpenMP_CXX IMPORTED INTERFACE) + set_property(TARGET OpenMP::OpenMP_CXX + PROPERTY INTERFACE_COMPILE_OPTIONS ${OpenMP_CXX_FLAGS}) + # Only works if the same flag is passed to the linker; use CMake 3.9+ otherwise (Intel, AppleClang) + set_property(TARGET OpenMP::OpenMP_CXX + PROPERTY INTERFACE_LINK_LIBRARIES ${OpenMP_CXX_FLAGS} Threads::Threads) + endif() + # TODO: Don't pollute global CFLAGS + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}") + set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${OpenMP_SHARED_LINKER_FLAGS}") + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${OpenMP_EXE_LINKER_FLAGS}") + if (WIN32) + set(OpenMP_C_FLAGS "/openmp ${OpenMP_C_FLAGS}") + set(OpenMP_CXX_FLAGS "/openmp ${OpenMP_CXX_FLAGS}") + endif() + FOREACH(TARGET ${MNN_TARGETS}) + target_link_libraries(${TARGET} PUBLIC ${OpenMP_CXX_LIBRARIES}) + IF(WIN32) + target_compile_options(${TARGET} PUBLIC /openmp ${OpenMP_CXX_FLAGS} ${OpenMP_C_FLAGS}) + ELSE() + target_compile_options(${TARGET} PUBLIC ${OpenMP_CXX_FLAGS} ${OpenMP_C_FLAGS}) + ENDIF() + ENDFOREACH() endif() endif() - -# Android thrid party -if(CMAKE_SYSTEM_NAME MATCHES "^Android") - target_link_libraries(MNN log) -endif() - -# Linux library -if(CMAKE_SYSTEM_NAME MATCHES "^Linux") - set (CMAKE_FIND_ROOT_PATH C:/MinGW) -endif() - -# Darwin third party -if(MNN_BUILD_FOR_IOS OR MNN_METAL) - target_link_libraries(MNN "-framework Foundation") - target_link_libraries(MNN "-framework Metal") -endif() - -if(WIN32) - if (MNN_BUILD_SHARED_LIBS) - target_compile_definitions(MNN PRIVATE "-DBUILDING_MNN_DLL") - target_compile_definitions(MNN INTERFACE "-DUSING_MNN_DLL") - endif() - target_compile_definitions(MNN PUBLIC "-D_CRT_SECURE_NO_WARNINGS") - target_compile_options(MNN PUBLIC "/wd4244" "/wd4146" "/wd4018" "/wd4267" "/wd4996" "/wd4081" "/wd4251") -endif() - -if(SYSTEM.Android AND NOT MNN_BUILD_FOR_ANDROID_COMMAND) - if(NOT DEFINED NATIVE_INCLUDE_OUTPUT) - set(NATIVE_INCLUDE_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}) - message(STATUS "Using default Android Build Directory: ${NATIVE_INCLUDE_OUTPUT}/MNN") +list(APPEND MNN_TARGETS MNN) + FOREACH(TARGET ${MNN_TARGETS}) + add_dependencies(${TARGET} MNN_SCHEMA_GEN) + IF((NOT MSVC) AND (NOT WIN32)) + target_compile_options(${TARGET} PUBLIC -fomit-frame-pointer -fstrict-aliasing -ffunction-sections -fdata-sections -ffast-math -fno-rtti) + target_compile_options(${TARGET} PRIVATE -fno-exceptions) + if(MNN_HIDDEN) + target_compile_options(${TARGET} PRIVATE -fvisibility-inlines-hidden -fvisibility=hidden) + endif() else() - message(STATUS "Using Android Build Directory: ${NATIVE_INCLUDE_OUTPUT}") + add_compile_definitions("-D_CRT_SECURE_NO_WARNINGS") + add_compile_options("/wd4267" "/wd4018" "/wd4251" "/wd4996" "/wd4244" "/wd4146" "/wd4129" "/wd4305") endif() + ENDFOREACH() +list(REMOVE_ITEM MNN_TARGETS MNN) +include(${CMAKE_CURRENT_LIST_DIR}/demo/exec/CMakeLists.txt) +include(${CMAKE_CURRENT_LIST_DIR}/tools/cpp/CMakeLists.txt) +include(${CMAKE_CURRENT_LIST_DIR}/tools/train/CMakeLists.txt) +include(${CMAKE_CURRENT_LIST_DIR}/test/CMakeLists.txt) +include(${CMAKE_CURRENT_LIST_DIR}/benchmark/CMakeLists.txt) +include(${CMAKE_CURRENT_LIST_DIR}/tools/quantization/CMakeLists.txt) +include(${CMAKE_CURRENT_LIST_DIR}/tools/evaluation/CMakeLists.txt) +add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/tools/converter) + +# Install headers +IF(CMAKE_SYSTEM_NAME MATCHES "^Android" AND NOT MNN_BUILD_FOR_ANDROID_COMMAND) + IF(NOT NATIVE_INCLUDE_OUTPUT) + set(NATIVE_INCLUDE_OUTPUT ".") + ENDIF() set(MNN_INCLUDE_OUTPUT ${NATIVE_INCLUDE_OUTPUT}/MNN) add_custom_command( - TARGET MNN - POST_BUILD - COMMAND mkdir -p ${MNN_INCLUDE_OUTPUT} - COMMAND cp ${CMAKE_CURRENT_SOURCE_DIR}/${ROOT_SRC}/include/MNNDefine.h ${MNN_INCLUDE_OUTPUT} - COMMAND cp ${CMAKE_CURRENT_SOURCE_DIR}/${ROOT_SRC}/include/Interpreter.hpp ${MNN_INCLUDE_OUTPUT} - COMMAND cp ${CMAKE_CURRENT_SOURCE_DIR}/${ROOT_SRC}/include/MNNForwardType.h ${MNN_INCLUDE_OUTPUT} - COMMAND cp ${CMAKE_CURRENT_SOURCE_DIR}/${ROOT_SRC}/include/HalideRuntime.h ${MNN_INCLUDE_OUTPUT} - COMMAND cp ${CMAKE_CURRENT_SOURCE_DIR}/${ROOT_SRC}/include/Tensor.hpp ${MNN_INCLUDE_OUTPUT} - COMMAND cp ${CMAKE_CURRENT_SOURCE_DIR}/${ROOT_SRC}/include/AutoTime.hpp ${MNN_INCLUDE_OUTPUT} - COMMAND cp ${CMAKE_CURRENT_SOURCE_DIR}/${ROOT_SRC}/include/ErrorCode.hpp ${MNN_INCLUDE_OUTPUT} - COMMAND cp ${CMAKE_CURRENT_SOURCE_DIR}/${ROOT_SRC}/include/ImageProcess.hpp ${MNN_INCLUDE_OUTPUT} - COMMAND cp ${CMAKE_CURRENT_SOURCE_DIR}/${ROOT_SRC}/include/Matrix.h ${MNN_INCLUDE_OUTPUT} - COMMAND cp ${CMAKE_CURRENT_SOURCE_DIR}/${ROOT_SRC}/include/Rect.h ${MNN_INCLUDE_OUTPUT} - COMMAND cp ${CMAKE_CURRENT_SOURCE_DIR}/${ROOT_SRC}/express/include/Expr.hpp ${MNN_INCLUDE_OUTPUT} - COMMAND cp ${CMAKE_CURRENT_SOURCE_DIR}/${ROOT_SRC}/express/include/ExprCreator.hpp ${MNN_INCLUDE_OUTPUT} - COMMAND cp ${CMAKE_CURRENT_SOURCE_DIR}/${ROOT_SRC}/express/include/NeuralNetWorkOp.hpp ${MNN_INCLUDE_OUTPUT} - COMMAND cp ${CMAKE_CURRENT_SOURCE_DIR}/${ROOT_SRC}/express/include/MathOp.hpp ${MNN_INCLUDE_OUTPUT} - COMMAND cp ${CMAKE_CURRENT_SOURCE_DIR}/${ROOT_SRC}/express/include/Optimizer.hpp ${MNN_INCLUDE_OUTPUT} - + TARGET MNN + POST_BUILD + COMMAND ${CMAKE_COMMAND} + -E make_directory "${MNN_INCLUDE_OUTPUT}/" ) -else() - if (MNN_BUILD_DEMO) - add_subdirectory(demo/exec) - endif() - add_subdirectory(tools/cpp) -endif() - -add_subdirectory(express) - -if (MNN_BUILD_TRAIN) - add_subdirectory(tools/train) -endif() - -if(MNN_BUILD_TEST) - add_subdirectory(test) -endif() - -if(MNN_BUILD_BENCHMARK) - add_subdirectory(benchmark) -endif() -if(MNN_BUILD_QUANTOOLS) - add_subdirectory(tools/quantization) -endif() - -if(MNN_EVALUATION) - add_subdirectory(tools/evaluation) -endif() - -# schema generator -if(WIN32) - add_custom_target( MNN_SCHEMA ALL - COMMAND powershell ${CMAKE_CURRENT_SOURCE_DIR}/schema/generate.ps1 -lazy - ) -else() - add_custom_target( MNN_SCHEMA ALL - COMMAND bash ${CMAKE_CURRENT_SOURCE_DIR}/schema/generate.sh -lazy + add_custom_command( + TARGET MNN + POST_BUILD + COMMAND ${CMAKE_COMMAND} + -E make_directory "${MNN_INCLUDE_OUTPUT}/expr/" ) -endif() -add_dependencies(MNN MNN_SCHEMA) - -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/include/MNNDefine.h DESTINATION include) -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/include/Interpreter.hpp DESTINATION include) -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/include/HalideRuntime.h DESTINATION include) -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/include/Tensor.hpp DESTINATION include) -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/include/ErrorCode.hpp DESTINATION include) -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/include/ImageProcess.hpp DESTINATION include) -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/include/Matrix.h DESTINATION include) -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/include/Rect.h DESTINATION include) -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/include/MNNForwardType.h DESTINATION include) -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/include/AutoTime.hpp DESTINATION include) -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/express/include/Expr.hpp DESTINATION include) -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/express/include/ExprCreator.hpp DESTINATION include) -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/express/include/MathOp.hpp DESTINATION include) -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/express/include/NeuralNetWorkOp.hpp DESTINATION include) -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/express/include/Optimizer.hpp DESTINATION include) + FOREACH(header ${MNN_PUB_HDRS}) + add_custom_command( + TARGET MNN + POST_BUILD + COMMAND ${CMAKE_COMMAND} + ARGS -E copy ${header} "${MNN_INCLUDE_OUTPUT}/" + ) + ENDFOREACH() + FOREACH(header ${MNN_EXPR_PUB_HDRS}) + add_custom_command( + TARGET MNN + POST_BUILD + COMMAND ${CMAKE_COMMAND} + ARGS -E copy ${header} "${MNN_INCLUDE_OUTPUT}/expr/" + ) + ENDFOREACH() +ELSEIF(NOT APPLE) + INSTALL(FILES ${MNN_PUB_HDRS} DESTINATION include/MNN/) + INSTALL(FILES ${MNN_EXPR_PUB_HDRS} DESTINATION include/MNN/expr/) + install(TARGETS MNN + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib + ) +ELSE() + install(TARGETS MNN + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib + FRAMEWORK DESTINATION /Library/Frameworks/ + ) + FOREACH(HDR ${MNN_EXPR_PUB_HDRS}) + SET_SOURCE_FILES_PROPERTIES(${HDR} PROPERTIES MACOSX_PACKAGE_LOCATION Headers/expr/ ) + ENDFOREACH() + FOREACH(HDR ${MNN_PUB_HDRS}) + SET_SOURCE_FILES_PROPERTIES(${HDR} PROPERTIES MACOSX_PACKAGE_LOCATION Headers/ ) + ENDFOREACH() + IF(MNN_METAL) + SET_SOURCE_FILES_PROPERTIES(${CMAKE_CURRENT_BINARY_DIR}/mnn.metallib PROPERTIES MACOSX_PACKAGE_LOCATION Resources/) + ENDIF() +ENDIF() diff --git a/MNN.podspec b/MNN.podspec index 25e9aed43..a7539154a 100644 --- a/MNN.podspec +++ b/MNN.podspec @@ -32,38 +32,30 @@ Pod::Spec.new do |s| s.ios.deployment_target = '8.0' s.requires_arc = true - s.source = { :git => "https://github.com/alibaba/MNN.git", :branch => 'master' } + #s.source = { :git => "git@github.com:alibaba/MNN.git", :branch => 'master' } + s.prepare_command = <<-CMD + schema/generate.sh + python source/backend/metal/MetalCodeGen.py source/backend/metal/ source/backend/metal/MetalOPRegister.mm + CMD + s.source = {:git => "/Users/zhang/Development/AliNNPrivate/",:branch=> 'head'} s.frameworks = 'Metal', 'Accelerate' s.library = 'c++' - s.prepare_command = 'schema/generate.sh' - - s.subspec 'core' do |a| - a.source_files = \ - 'include/*.{h,hpp}',\ - 'schema/current/*.{h}',\ - '3rd_party/flatbuffers/include/flatbuffers/*.{h}',\ - 'source/core/**/*.{h,c,m,mm,cc,hpp,cpp}',\ - 'source/cv/**/*.{h,c,m,mm,cc,hpp,cpp}',\ - 'source/math/**/*.{h,c,m,mm,cc,hpp,cpp,metal}',\ - 'source/shape/*.{h,c,m,mm,cc,hpp,cpp}',\ - 'source/backend/cpu/*.{h,c,m,mm,cc,S,hpp,cpp}',\ - 'source/backend/cpu/arm/*.{h,c,m,mm,cc,S,hpp,cpp}',\ - 'source/backend/cpu/compute/*.{h,c,m,mm,cc,S,hpp,cpp}',\ - 'express/**/*.{hpp,cpp}' - end - s.subspec 'armv7' do |a| - a.source_files = 'source/backend/cpu/arm/arm32/*.{h,c,m,mm,cc,S,hpp,cpp}' - a.pod_target_xcconfig = {'HEADER_SEARCH_PATHS' => '"$(PODS_TARGET_SRCROOT)/source/backend/cpu/arm/"'} - end - s.subspec 'aarch64' do |a| - a.source_files = 'source/backend/cpu/arm/arm64/*.{h,c,m,mm,cc,S,hpp,cpp}' - a.pod_target_xcconfig = {'HEADER_SEARCH_PATHS' => '"$(PODS_TARGET_SRCROOT)/source/backend/cpu/arm/"'} - end - s.subspec 'metal' do |a| - a.source_files = 'source/backend/metal/**/*.{h,c,m,mm,cc,hpp,cpp,metal}' - end - - s.default_subspecs = 'core', 'armv7', 'aarch64', 'metal' - s.pod_target_xcconfig = {'METAL_LIBRARY_FILE_BASE' => 'mnn', 'HEADER_SEARCH_PATHS' => '$(PODS_TARGET_SRCROOT)/include $(PODS_TARGET_SRCROOT)/3rd_party/flatbuffers/include $(PODS_TARGET_SRCROOT)/schema/current $(PODS_TARGET_SRCROOT)/source/core/ $(PODS_TARGET_SRCROOT)/source/backend/cpu/ $(PODS_TARGET_SRCROOT)/source/backend/cpu/compute/ $(PODS_TARGET_SRCROOT)/source/math/ $(PODS_TARGET_SRCROOT)/3rd_party/half', 'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) MNN_CODEGEN_REGISTER=1 MNN_SUPPORT_TFLITE_QUAN=1'} - s.user_target_xcconfig = {'OTHER_LDFLAGS' => '-force_load $(BUILD_DIR)/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)/MNN/libMNN.a'} + s.source_files = \ + 'include/MNN/*.{h,hpp}',\ + 'include/MNN/expr/*.{h,hpp}',\ + 'schema/current/*.{h}',\ + '3rd_party/flatbuffers/include/flatbuffers/*.{h}',\ + 'source/core/**/*.{h,c,m,mm,cc,hpp,cpp}',\ + 'source/cv/**/*.{h,c,m,mm,cc,hpp,cpp}',\ + 'source/math/**/*.{h,c,m,mm,cc,hpp,cpp,metal}',\ + 'source/shape/*.{h,c,m,mm,cc,hpp,cpp}',\ + 'source/backend/cpu/*.{h,c,m,mm,cc,S,hpp,cpp}',\ + 'source/backend/cpu/arm/**/*.{h,c,m,mm,cc,S,hpp,cpp}',\ + 'source/backend/cpu/compute/*.{h,c,m,mm,cc,S,hpp,cpp}',\ + 'source/backend/metal/*.{h,c,m,mm,cc,hpp,cpp,metal}',\ + 'express/**/*.{hpp,cpp}' + s.header_mappings_dir = 'include' + + s.pod_target_xcconfig = {'METAL_LIBRARY_FILE_BASE' => 'mnn', 'HEADER_SEARCH_PATHS' => '"$(PODS_TARGET_SRCROOT)/include" "$(PODS_TARGET_SRCROOT)/3rd_party/flatbuffers/include" "$(PODS_TARGET_SRCROOT)/source" "$(PODS_TARGET_SRCROOT)/3rd_party/half"', 'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) MNN_CODEGEN_REGISTER=1 MNN_SUPPORT_TFLITE_QUAN=1'} + s.user_target_xcconfig = { 'OTHER_LDFLAGS' => '-force_load $(BUILD_DIR)/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)/MNN/libMNN.a', 'HEADER_SEARCH_PATHS' => '"$(PODS_TARGET_SRCROOT)/include"' } end diff --git a/README.md b/README.md index fc1469a33..4bb349418 100644 --- a/README.md +++ b/README.md @@ -88,4 +88,3 @@ MNN refers to the following projects: - [paddle-mobile](https://github.com/PaddlePaddle/paddle-mobile) - [stb](https://github.com/nothings/stb) - [rapidjson](https://github.com/Tencent/rapidjson) - diff --git a/benchmark/.gitignore b/benchmark/.gitignore new file mode 100644 index 000000000..8cef4e6b8 --- /dev/null +++ b/benchmark/.gitignore @@ -0,0 +1 @@ +!models/ diff --git a/benchmark/CMakeLists.txt b/benchmark/CMakeLists.txt index 31fab6dd2..25c2e91cf 100644 --- a/benchmark/CMakeLists.txt +++ b/benchmark/CMakeLists.txt @@ -1,12 +1,10 @@ -# put output to build dir -SET( CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/../) -message(STATUS ${CMAKE_CURRENT_BINARY_DIR}) +IF(MNN_BUILD_BENCHMARK) + add_executable(benchmark.out ${CMAKE_CURRENT_LIST_DIR}/benchmark.cpp ${CMAKE_CURRENT_SOURCE_DIR}/tools/cpp/revertMNNModel.cpp) + target_include_directories(benchmark.out PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/tools/cpp/ ${CMAKE_CURRENT_SOURCE_DIR}/tools/) + target_link_libraries(benchmark.out ${MNN_DEPS}) -add_executable(benchmark.out benchmark.cpp ../tools/cpp/revertMNNModel.cpp) -target_include_directories(benchmark.out PRIVATE "../tools/cpp" "../tools") -target_link_libraries(benchmark.out ${MNN_DEPEND}) - -file(GLOB_RECURSE SRC_FILES ./exprModels/*.cpp) -add_executable(benchmarkExprModels.out benchmarkExprModels.cpp ${SRC_FILES}) -target_include_directories(benchmarkExprModels.out PRIVATE "./exprModels" "../" "../include") -target_link_libraries(benchmarkExprModels.out ${MNN_DEPEND} MNN_Express) + file(GLOB_RECURSE SRC_FILES ${CMAKE_CURRENT_LIST_DIR}/exprModels/*.cpp) + add_executable(benchmarkExprModels.out ${CMAKE_CURRENT_LIST_DIR}/benchmarkExprModels.cpp ${SRC_FILES}) + target_include_directories(benchmarkExprModels.out PRIVATE "${CMAKE_CURRENT_LIST_DIR}/exprModels" ${CMAKE_CURRENT_SOURCE_DIR}/) + target_link_libraries(benchmarkExprModels.out ${MNN_DEPS}) +ENDIF() diff --git a/benchmark/benchmark.cpp b/benchmark/benchmark.cpp index 820223238..5646df591 100644 --- a/benchmark/benchmark.cpp +++ b/benchmark/benchmark.cpp @@ -27,10 +27,10 @@ #include #endif -#include "Backend.hpp" -#include "Interpreter.hpp" -#include "MNNDefine.h" -#include "Tensor.hpp" +#include "core/Backend.hpp" +#include +#include +#include #include "revertMNNModel.hpp" /** TODOs: diff --git a/benchmark/benchmarkExprModels.cpp b/benchmark/benchmarkExprModels.cpp index 5ca23bb47..07d3a22d4 100644 --- a/benchmark/benchmarkExprModels.cpp +++ b/benchmark/benchmarkExprModels.cpp @@ -17,9 +17,9 @@ #endif #include "MNN_generated.h" -#include "MNNForwardType.h" -#include "Interpreter.hpp" -#include "Expr.hpp" +#include +#include +#include #include "ExprModels.hpp" using namespace MNN; @@ -100,24 +100,24 @@ static std::vector runNet(VARP netOutput, const ScheduleConfig& config, i } auto outputTensor = net->getSessionOutput(session, NULL); std::shared_ptr outputTensorHost(Tensor::createHostTensorFromDevice(outputTensor, false)); - + // Warming up... for (int i = 0; i < 3; ++i) { inputTensor->copyFromHostTensor(inputTensorHost.get()); net->runSession(session); outputTensor->copyToHostTensor(outputTensorHost.get()); } - + std::vector costs; - + // start run for (int i = 0; i < loop; ++i) { auto timeBegin = getTimeInUs(); - + inputTensor->copyFromHostTensor(inputTensorHost.get()); net->runSession(session); outputTensor->copyToHostTensor(outputTensorHost.get()); - + auto timeEnd = getTimeInUs(); costs.push_back((timeEnd - timeBegin) / 1000.0); } @@ -184,7 +184,7 @@ int main(int argc, const char* argv[]) { config.backendConfig = &bnConfig; std::vector costs; - + // ResNet18 benchmark for (auto model : models) { auto modelArgs = splitArgs(model.c_str(), "_"); @@ -229,4 +229,3 @@ int main(int argc, const char* argv[]) { } return 0; } - diff --git a/benchmark/exprModels/ExprModels.hpp b/benchmark/exprModels/ExprModels.hpp index 2b5b7b3b1..e4f3aa40b 100644 --- a/benchmark/exprModels/ExprModels.hpp +++ b/benchmark/exprModels/ExprModels.hpp @@ -1,8 +1,8 @@ #ifndef ExprModels_hpp #define ExprModels_hpp -#include "Expr.hpp" -#include "MNNDefine.h" +#include +#include #include "MobileNetExpr.hpp" #include "ResNetExpr.hpp" diff --git a/benchmark/exprModels/GoogLeNetExpr.cpp b/benchmark/exprModels/GoogLeNetExpr.cpp index 9a28be9bb..814a41177 100644 --- a/benchmark/exprModels/GoogLeNetExpr.cpp +++ b/benchmark/exprModels/GoogLeNetExpr.cpp @@ -8,7 +8,7 @@ // #include "GoogLeNetExpr.hpp" -#include "ExprCreator.hpp" +#include using namespace MNN::Express; diff --git a/benchmark/exprModels/GoogLeNetExpr.hpp b/benchmark/exprModels/GoogLeNetExpr.hpp index ce060732c..f7d57a64a 100644 --- a/benchmark/exprModels/GoogLeNetExpr.hpp +++ b/benchmark/exprModels/GoogLeNetExpr.hpp @@ -10,7 +10,7 @@ #ifndef GoogLeNetExpr_hpp #define GoogLeNetExpr_hpp -#include "Expr.hpp" +#include MNN::Express::VARP googLeNetExpr(int numClass); diff --git a/benchmark/exprModels/MobileNetExpr.cpp b/benchmark/exprModels/MobileNetExpr.cpp index a5c3ddeea..619c844bb 100644 --- a/benchmark/exprModels/MobileNetExpr.cpp +++ b/benchmark/exprModels/MobileNetExpr.cpp @@ -9,7 +9,7 @@ #include #include "MobileNetExpr.hpp" -#include "ExprCreator.hpp" +#include using namespace MNN::Express; @@ -40,7 +40,7 @@ VARP mobileNetV1Expr(MobileNetWidthType alpha, MobileNetResolutionType beta, int inputSize = inputSizeMap[beta]; poolSize = inputSize / 32; } - + int channels[6]; // MobileNet_100, MobileNet_075, MobileNet_050, MobileNet_025 { auto channelsMap = std::map({ @@ -55,11 +55,11 @@ VARP mobileNetV1Expr(MobileNetWidthType alpha, MobileNetResolutionType beta, int } channels[0] = channelsMap[alpha]; } - + for (int i = 1; i < 6; ++i) { channels[i] = channels[0] * (1 << i); } - + auto x = _Input({1, 3, inputSize, inputSize}, NC4HW4); x = _Conv(0.0f, 0.0f, x, {3, channels[0]}, {3, 3}, SAME, {2, 2}, {1, 1}, 1); x = convBlock(x, {channels[0], channels[1]}, 1); diff --git a/benchmark/exprModels/MobileNetExpr.hpp b/benchmark/exprModels/MobileNetExpr.hpp index dcf68454f..5a46c44c2 100644 --- a/benchmark/exprModels/MobileNetExpr.hpp +++ b/benchmark/exprModels/MobileNetExpr.hpp @@ -12,7 +12,7 @@ #include #include -#include "Expr.hpp" +#include enum MobileNetWidthType { MobileNet_100, MobileNet_075, MobileNet_050, MobileNet_025 diff --git a/benchmark/exprModels/ResNetExpr.cpp b/benchmark/exprModels/ResNetExpr.cpp index a8dd87d4b..c43c51c79 100644 --- a/benchmark/exprModels/ResNetExpr.cpp +++ b/benchmark/exprModels/ResNetExpr.cpp @@ -10,7 +10,7 @@ #include #include #include "ResNetExpr.hpp" -#include "ExprCreator.hpp" +#include using namespace MNN::Express; diff --git a/benchmark/exprModels/ResNetExpr.hpp b/benchmark/exprModels/ResNetExpr.hpp index 71e68d662..668c9c840 100644 --- a/benchmark/exprModels/ResNetExpr.hpp +++ b/benchmark/exprModels/ResNetExpr.hpp @@ -12,7 +12,7 @@ #include #include -#include "Expr.hpp" +#include enum ResNetType { ResNet18, ResNet34, ResNet50, ResNet101, ResNet152 @@ -33,5 +33,5 @@ static inline ResNetType EnumResNetTypeByString(const std::string& key) { } MNN::Express::VARP resNetExpr(ResNetType resNetType, int numClass); - + #endif //ResNetExpr_hpp diff --git a/benchmark/exprModels/ShuffleNetExpr.cpp b/benchmark/exprModels/ShuffleNetExpr.cpp index a18d621b9..3887b74b6 100644 --- a/benchmark/exprModels/ShuffleNetExpr.cpp +++ b/benchmark/exprModels/ShuffleNetExpr.cpp @@ -9,7 +9,7 @@ #include #include "ShuffleNetExpr.hpp" -#include "ExprCreator.hpp" +#include using namespace MNN::Express; diff --git a/benchmark/exprModels/ShuffleNetExpr.hpp b/benchmark/exprModels/ShuffleNetExpr.hpp index d7d09b016..2df986a6f 100644 --- a/benchmark/exprModels/ShuffleNetExpr.hpp +++ b/benchmark/exprModels/ShuffleNetExpr.hpp @@ -10,7 +10,7 @@ #ifndef ShuffleNetExpr_hpp #define ShuffleNetExpr_hpp -#include "Expr.hpp" +#include MNN::Express::VARP shuffleNetExpr(int group, int numClass); diff --git a/benchmark/exprModels/SqueezeNetExpr.cpp b/benchmark/exprModels/SqueezeNetExpr.cpp index 769564065..d5f7f5672 100644 --- a/benchmark/exprModels/SqueezeNetExpr.cpp +++ b/benchmark/exprModels/SqueezeNetExpr.cpp @@ -8,7 +8,7 @@ // #include "SqueezeNetExpr.hpp" -#include "ExprCreator.hpp" +#include using namespace MNN::Express; diff --git a/benchmark/exprModels/SqueezeNetExpr.hpp b/benchmark/exprModels/SqueezeNetExpr.hpp index 03a117e61..58128a9e2 100644 --- a/benchmark/exprModels/SqueezeNetExpr.hpp +++ b/benchmark/exprModels/SqueezeNetExpr.hpp @@ -10,7 +10,7 @@ #ifndef SqueezeNetExpr_hpp #define SqueezeNetExpr_hpp -#include "Expr.hpp" +#include MNN::Express::VARP squeezeNetExpr(int numClass); diff --git a/ciscripts/Android/32.sh b/ciscripts/Android/32.sh new file mode 100755 index 000000000..81bd14827 --- /dev/null +++ b/ciscripts/Android/32.sh @@ -0,0 +1,6 @@ +set -e +schema/generate.sh +cd project/android +mkdir build_32 +cd build_32 +../build_32.sh -DMNN_VULKAN=ON -DMNN_OPENMP=OFF -DMNN_USE_THREAD_POOL=ON diff --git a/ciscripts/Android/32OMP.sh b/ciscripts/Android/32OMP.sh new file mode 100755 index 000000000..6aa6d6999 --- /dev/null +++ b/ciscripts/Android/32OMP.sh @@ -0,0 +1,6 @@ +set -e +schema/generate.sh +cd project/android +mkdir build_32 +cd build_32 +../build_32.sh -DMNN_VULKAN=ON -DMNN_OPENMP=ON -DMNN_USE_THREAD_POOL=OFF diff --git a/ciscripts/Android/64.sh b/ciscripts/Android/64.sh new file mode 100755 index 000000000..23f4d02f8 --- /dev/null +++ b/ciscripts/Android/64.sh @@ -0,0 +1,6 @@ +set -e +schema/generate.sh +cd project/android +mkdir build_64 +cd build_64 +../build_64.sh -DMNN_VULKAN=ON -DMNN_OPENMP=OFF -DMNN_USE_THREAD_POOL=ON diff --git a/ciscripts/Android/64OMP.sh b/ciscripts/Android/64OMP.sh new file mode 100755 index 000000000..3259dd695 --- /dev/null +++ b/ciscripts/Android/64OMP.sh @@ -0,0 +1,6 @@ +set -e +schema/generate.sh +cd project/android +mkdir build_64 +cd build_64 +../build_64.sh -DMNN_VULKAN=ON -DMNN_OPENMP=ON -DMNN_USE_THREAD_POOL=OFF diff --git a/ciscripts/Android/Gradle.sh b/ciscripts/Android/Gradle.sh new file mode 100755 index 000000000..7e369e0ff --- /dev/null +++ b/ciscripts/Android/Gradle.sh @@ -0,0 +1,9 @@ +set -e +schema/generate.sh +cd project/android/ +./gradlew assembleRelease +if [[ -z "${DEPLOY_ENV}" ]]; then + echo "Android Bintray uploaded due to untrusted CI environment" +else + ./gradlew bintrayUpload -PbintrayKey=${BINTRAY_DEPLOY_TOKEN} +fi diff --git a/ciscripts/Linux/CL_OMP_Vulkan.sh b/ciscripts/Linux/CL_OMP_Vulkan.sh new file mode 100755 index 000000000..0268c1e4d --- /dev/null +++ b/ciscripts/Linux/CL_OMP_Vulkan.sh @@ -0,0 +1,5 @@ +schema/generate.sh +mkdir linuxbuild +cd linuxbuild +cmake ../ -DCMAKE_BUILD_TYPE=Release -DMNN_BUILD_TRAIN=ON -DMNN_BUILD_DEMO=ON -DMNN_BUILD_QUANTOOLS=ON -DMNN_EVALUATION=ON -DMNN_BUILD_CONVERTER=ON -DMNN_SUPPORT_TFLITE_QUAN=ON -DMNN_BUILD_TEST=ON -DMNN_OPENCL=ON -DMNN_VULKAN=ON -DMNN_OPENMP=ON -DMNN_USE_THREAD_POOL=OFF -DMNN_BUILD_BENCHMARK=ON +make -j8 diff --git a/ciscripts/Linux/CL_ThreadPool_Vulkan.sh b/ciscripts/Linux/CL_ThreadPool_Vulkan.sh new file mode 100755 index 000000000..99252abf1 --- /dev/null +++ b/ciscripts/Linux/CL_ThreadPool_Vulkan.sh @@ -0,0 +1,5 @@ +schema/generate.sh +mkdir linuxbuild +cd linuxbuild +cmake ../ -DCMAKE_BUILD_TYPE=Release -DMNN_BUILD_TRAIN=ON -DMNN_BUILD_DEMO=ON -DMNN_BUILD_QUANTOOLS=ON -DMNN_EVALUATION=ON -DMNN_BUILD_CONVERTER=ON -DMNN_SUPPORT_TFLITE_QUAN=ON -DMNN_BUILD_TEST=ON -DMNN_OPENCL=ON -DMNN_VULKAN=ON -DMNN_OPENMP=OFF -DMNN_USE_THREAD_POOL=ON -DMNN_BUILD_BENCHMARK=ON +make -j8 diff --git a/ciscripts/Windows/X64Windows.bat b/ciscripts/Windows/X64Windows.bat new file mode 100644 index 000000000..923a93dbd --- /dev/null +++ b/ciscripts/Windows/X64Windows.bat @@ -0,0 +1,3 @@ +call "C:/Program Files (x86)/Microsoft Visual Studio/2017/BuildTools/VC/Auxiliary/Build/vcvars64.bat" +cmake -G "Ninja" -DCMAKE_BUILD_TYPE=Release .. +ninja \ No newline at end of file diff --git a/ciscripts/Windows/X86Windows.bat b/ciscripts/Windows/X86Windows.bat new file mode 100644 index 000000000..8173044e2 --- /dev/null +++ b/ciscripts/Windows/X86Windows.bat @@ -0,0 +1,3 @@ +call "C:/Program Files (x86)/Microsoft Visual Studio/2017/BuildTools/VC/Auxiliary/Build/vcvars32.bat" +cmake -G "Ninja" -DCMAKE_BUILD_TYPE=Release .. +ninja \ No newline at end of file diff --git a/ciscripts/iOS/CMake.sh b/ciscripts/iOS/CMake.sh new file mode 100755 index 000000000..bc1e80dc1 --- /dev/null +++ b/ciscripts/iOS/CMake.sh @@ -0,0 +1,3 @@ +set -e +schema/generate.sh +project/ios/buildiOS.sh diff --git a/ciscripts/iOS/Xcode.sh b/ciscripts/iOS/Xcode.sh new file mode 100755 index 000000000..efe7e8ed7 --- /dev/null +++ b/ciscripts/iOS/Xcode.sh @@ -0,0 +1,11 @@ +set -e +schema/generate.sh +xcodebuild CODE_SIGN_IDENTITY="" CODE_SIGNING_REQUIRED=NO ONLY_ACTIVE_ARCH=NO -configuration Release -project project/ios/MNN.xcodeproj +find . -name ".DS_Store" -delete +cd project/ios/build/Release-iphoneos/ +zip -r MNN.iOS.framework.zip ./ +if [[ -z "${DEPLOY_ENV}" ]]; then + echo "iOS Bintray uploaded due to untrusted CI environment" +else + curl -T MNN.iOS.framework.zip -umnn:${BINTRAY_DEPLOY_TOKEN} https://api.bintray.com/content/mnnteam/Pods/Nightly/0.0.0/MNN-iOS-Nightly.zip +fi diff --git a/ciscripts/macOS/CPU.sh b/ciscripts/macOS/CPU.sh new file mode 100755 index 000000000..a41dfe719 --- /dev/null +++ b/ciscripts/macOS/CPU.sh @@ -0,0 +1,6 @@ +set -e +schema/generate.sh +mkdir macosbuild +cd macosbuild +cmake ../ -DCMAKE_BUILD_TYPE=Release -DMNN_BUILD_TRAIN=ON -DMNN_BUILD_DEMO=ON -DMNN_BUILD_QUANTOOLS=ON -DMNN_EVALUATION=ON -DMNN_BUILD_CONVERTER=ON -DMNN_SUPPORT_TFLITE_QUAN=ON -DMNN_BUILD_TEST=ON -DMNN_BUILD_BENCHMARK=ON +make -j8 diff --git a/ciscripts/macOS/CPU_Metal.sh b/ciscripts/macOS/CPU_Metal.sh new file mode 100755 index 000000000..7735915d0 --- /dev/null +++ b/ciscripts/macOS/CPU_Metal.sh @@ -0,0 +1,6 @@ +set -e +schema/generate.sh +mkdir macosbuild +cd macosbuild +cmake ../ -DCMAKE_BUILD_TYPE=Release -DMNN_BUILD_TRAIN=ON -DMNN_BUILD_DEMO=ON -DMNN_BUILD_QUANTOOLS=ON -DMNN_EVALUATION=ON -DMNN_BUILD_CONVERTER=ON -DMNN_SUPPORT_TFLITE_QUAN=ON -DMNN_METAL=ON -DMNN_BUILD_TEST=ON -DMNN_BUILD_BENCHMARK=ON +make -j8 diff --git a/cmake/GenerateVersionFromVCS.cmake b/cmake/GenerateVersionFromVCS.cmake new file mode 100644 index 000000000..d8ec54df4 --- /dev/null +++ b/cmake/GenerateVersionFromVCS.cmake @@ -0,0 +1,51 @@ +# CMake script that writes version control information to a header. +# +# Input variables: +# NAMES - A list of names for each of the source directories. +# _SOURCE_DIR - A path to source directory for each name in NAMES. +# HEADER_FILE - The header file to write +# +# The output header will contain macros _REPOSITORY and _REVISION, +# where "" is substituted with the names specified in the input variables, +# for each of the _SOURCE_DIR given. + +get_filename_component(LLVM_CMAKE_DIR "${CMAKE_SCRIPT_MODE_FILE}" PATH) + +list(APPEND CMAKE_MODULE_PATH "${LLVM_CMAKE_DIR}") + +include(VersionFromVCS) + +# Handle strange terminals +set(ENV{TERM} "dumb") + +function(append_info name path) + if(path) + get_source_info("${path}" revision repository) + endif() + if(revision) + file(APPEND "${HEADER_FILE}.tmp" + "#define ${name}_REVISION \"${revision}\"\n") + else() + file(APPEND "${HEADER_FILE}.tmp" + "#undef ${name}_REVISION\n") + endif() + if(repository) + file(APPEND "${HEADER_FILE}.tmp" + "#define ${name}_REPOSITORY \"${repository}\"\n") + else() + file(APPEND "${HEADER_FILE}.tmp" + "#undef ${name}_REPOSITORY\n") + endif() +endfunction() + +foreach(name IN LISTS NAMES) + if(NOT DEFINED ${name}_SOURCE_DIR) + message(FATAL_ERROR "${name}_SOURCE_DIR is not defined") + endif() + append_info(${name} "${${name}_SOURCE_DIR}") +endforeach() + +# Copy the file only if it has changed. +execute_process(COMMAND ${CMAKE_COMMAND} -E copy_if_different + "${HEADER_FILE}.tmp" "${HEADER_FILE}") +file(REMOVE "${HEADER_FILE}.tmp") diff --git a/cmake/VersionFromVCS.cmake b/cmake/VersionFromVCS.cmake new file mode 100644 index 000000000..1b6519b4b --- /dev/null +++ b/cmake/VersionFromVCS.cmake @@ -0,0 +1,49 @@ +# Adds version control information to the variable VERS. For +# determining the Version Control System used (if any) it inspects the +# existence of certain subdirectories under SOURCE_DIR (if provided as an +# extra argument, otherwise uses CMAKE_CURRENT_SOURCE_DIR). + +function(get_source_info path revision repository) + find_package(Git) + if(GIT_FOUND) + execute_process(COMMAND ${GIT_EXECUTABLE} rev-parse --git-dir + WORKING_DIRECTORY ${path} + RESULT_VARIABLE git_result + OUTPUT_VARIABLE git_output + ERROR_QUIET) + if(git_result EQUAL 0) + string(STRIP "${git_output}" git_output) + get_filename_component(git_dir ${git_output} ABSOLUTE BASE_DIR ${path}) + execute_process(COMMAND ${GIT_EXECUTABLE} rev-parse HEAD + WORKING_DIRECTORY ${path} + RESULT_VARIABLE git_result + OUTPUT_VARIABLE git_output) + if(git_result EQUAL 0) + string(STRIP "${git_output}" git_output) + set(${revision} ${git_output} PARENT_SCOPE) + endif() + execute_process(COMMAND ${GIT_EXECUTABLE} rev-parse --abbrev-ref --symbolic-full-name @{upstream} + WORKING_DIRECTORY ${path} + RESULT_VARIABLE git_result + OUTPUT_VARIABLE git_output + ERROR_QUIET) + if(git_result EQUAL 0) + string(REPLACE "/" ";" branch ${git_output}) + list(GET branch 0 remote) + else() + set(remote "origin") + endif() + execute_process(COMMAND ${GIT_EXECUTABLE} remote get-url ${remote} + WORKING_DIRECTORY ${path} + RESULT_VARIABLE git_result + OUTPUT_VARIABLE git_output + ERROR_QUIET) + if(git_result EQUAL 0) + string(STRIP "${git_output}" git_output) + set(${repository} ${git_output} PARENT_SCOPE) + else() + set(${repository} ${path} PARENT_SCOPE) + endif() + endif() + endif() +endfunction() diff --git a/cmake/dummy.cpp b/cmake/dummy.cpp new file mode 100644 index 000000000..aaf82ad23 --- /dev/null +++ b/cmake/dummy.cpp @@ -0,0 +1,4 @@ +// A few build systems refuses to build a target from pure object library +// without at least one source file +// This is a dummy cpp that is used to work around this limitation +static int dummy=0x1337; diff --git a/cmake/ios.toolchain.cmake b/cmake/ios.toolchain.cmake index 7f740cca8..41bac17b4 100644 --- a/cmake/ios.toolchain.cmake +++ b/cmake/ios.toolchain.cmake @@ -44,41 +44,61 @@ # ***************************************************************************** # Now maintained by Alexander Widerberg (widerbergaren [at] gmail.com) # under the BSD-3-Clause license +# https://github.com/leetal/ios-cmake # ***************************************************************************** # # INFORMATION / HELP # -# The following variables control the behaviour of this toolchain: +# The following arguments control the behaviour of this toolchain: # -# IOS_PLATFORM: OS (default) or SIMULATOR or SIMULATOR64 or TVOS or SIMULATOR_TVOS +# PLATFORM: (default "OS") # OS = Build for iPhoneOS. -# SIMULATOR = Build for x86 i386 iPhone Simulator. -# SIMULATOR64 = Build for x86_64 iPhone Simulator. -# TVOS = Build for AppleTVOS. -# SIMULATOR_TVOS = Build for x86_64 AppleTV Simulator. -# CMAKE_OSX_SYSROOT: Path to the iOS SDK to use. By default this is -# automatically determined from IOS_PLATFORM and xcodebuild, but +# OS64 = Build for arm64 iphoneOS. +# OS64COMBINED = Build for arm64 x86_64 iphoneOS. Combined into FAT STATIC lib (supported on 3.14+ of CMakewith "-G Xcode" argument ONLY) +# SIMULATOR = Build for x86 i386 iphoneOS Simulator. +# SIMULATOR64 = Build for x86_64 iphoneOS Simulator. +# TVOS = Build for arm64 tvOS. +# TVOSCOMBINED = Build for arm64 x86_64 tvOS. Combined into FAT STATIC lib (supported on 3.14+ of CMake with "-G Xcode" argument ONLY) +# SIMULATOR_TVOS = Build for x86_64 tvOS Simulator. +# WATCHOS = Build for armv7k arm64_32 for watchOS. +# WATCHOSCOMBINED = Build for armv7k arm64_32 x86_64 watchOS. Combined into FAT STATIC lib (supported on 3.14+ of CMake with "-G Xcode" argument ONLY) +# SIMULATOR_WATCHOS = Build for x86_64 for watchOS Simulator. +# +# CMAKE_OSX_SYSROOT: Path to the SDK to use. By default this is +# automatically determined from PLATFORM and xcodebuild, but # can also be manually specified (although this should not be required). -# CMAKE_IOS_DEVELOPER_ROOT: Path to the Developer directory for the iOS platform +# +# CMAKE_DEVELOPER_ROOT: Path to the Developer directory for the platform # being compiled for. By default this is automatically determined from # CMAKE_OSX_SYSROOT, but can also be manually specified (although this should # not be required). +# +# DEPLOYMENT_TARGET: Minimum SDK version to target. Default 2.0 on watchOS and 9.0 on tvOS+iOS +# # ENABLE_BITCODE: (1|0) Enables or disables bitcode support. Default 1 (true) +# # ENABLE_ARC: (1|0) Enables or disables ARC support. Default 1 (true, ARC enabled by default) +# # ENABLE_VISIBILITY: (1|0) Enables or disables symbol visibility support. Default 0 (false, visibility hidden by default) -# IOS_ARCH: (armv7 armv7s arm64 i386 x86_64) If specified, will override the default architectures for the given IOS_PLATFORM -# OS = armv7 armv7s arm64 +# +# ENABLE_STRICT_TRY_COMPILE: (1|0) Enables or disables strict try_compile() on all Check* directives (will run linker +# to actually check if linking is possible). Default 0 (false, will set CMAKE_TRY_COMPILE_TARGET_TYPE to STATIC_LIBRARY) +# +# ARCHS: (armv7 armv7s armv7k arm64 arm64_32 i386 x86_64) If specified, will override the default architectures for the given PLATFORM +# OS = armv7 armv7s arm64 (if applicable) +# OS64 = arm64 (if applicable) # SIMULATOR = i386 # SIMULATOR64 = x86_64 # TVOS = arm64 -# SIMULATOR_TVOS = x86_64 +# SIMULATOR_TVOS = x86_64 (i386 has since long been deprecated) +# WATCHOS = armv7k arm64_32 (if applicable) +# SIMULATOR_WATCHOS = x86_64 (i386 has since long been deprecated) # # This toolchain defines the following variables for use externally: # # XCODE_VERSION: Version number (not including Build version) of Xcode detected. -# IOS_SDK_VERSION: Version of iOS SDK being used. -# CMAKE_OSX_ARCHITECTURES: Architectures being compiled for (generated from -# IOS_PLATFORM). +# SDK_VERSION: Version of SDK being used. +# CMAKE_OSX_ARCHITECTURES: Architectures being compiled for (generated from PLATFORM). # # This toolchain defines the following macros for use externally: # @@ -89,8 +109,18 @@ # # find_host_package (PROGRAM ARGS) # A macro used to find executable programs on the host system, not within the -# iOS environment. Thanks to the android-cmake project for providing the +# environment. Thanks to the android-cmake project for providing the # command. +# +# ******************************** DEPRECATIONS ******************************* +# +# IOS_DEPLOYMENT_TARGET: (Deprecated) Alias to DEPLOYMENT_TARGET +# CMAKE_IOS_DEVELOPER_ROOT: (Deprecated) Alias to CMAKE_DEVELOPER_ROOT +# IOS_PLATFORM: (Deprecated) Alias to PLATFORM +# IOS_ARCH: (Deprecated) Alias to ARCHS +# +# ***************************************************************************** +# # Fix for PThread library not in path set(CMAKE_THREAD_LIBS_INIT "-lpthread") @@ -98,6 +128,14 @@ set(CMAKE_HAVE_THREADS_LIBRARY 1) set(CMAKE_USE_WIN32_THREADS_INIT 0) set(CMAKE_USE_PTHREADS_INIT 1) +# Cache what generator is used +set(USED_CMAKE_GENERATOR "${CMAKE_GENERATOR}" CACHE STRING "Expose CMAKE_GENERATOR" FORCE) + +if(${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.14") + set(MODERN_CMAKE YES) + message(STATUS "Merging integrated CMake 3.14+ iOS,tvOS,watchOS,macOS toolchain(s) with this toolchain!") +endif() + # Get the Xcode version being used. execute_process(COMMAND xcodebuild -version OUTPUT_VARIABLE XCODE_VERSION @@ -106,125 +144,268 @@ execute_process(COMMAND xcodebuild -version string(REGEX MATCH "Xcode [0-9\\.]+" XCODE_VERSION "${XCODE_VERSION}") string(REGEX REPLACE "Xcode ([0-9\\.]+)" "\\1" XCODE_VERSION "${XCODE_VERSION}") message(STATUS "Building with Xcode version: ${XCODE_VERSION}") + +######## ALIASES (DEPRECATION WARNINGS) + +if(DEFINED IOS_PLATFORM) + set(PLATFORM ${IOS_PLATFORM}) + message(DEPRECATION "IOS_PLATFORM argument is DEPRECATED. Consider using the new PLATFORM argument instead.") +endif() + +if(DEFINED IOS_DEPLOYMENT_TARGET) + set(DEPLOYMENT_TARGET ${IOS_DEPLOYMENT_TARGET}) + message(DEPRECATION "IOS_DEPLOYMENT_TARGET argument is DEPRECATED. Consider using the new DEPLOYMENT_TARGET argument instead.") +endif() + +if(DEFINED CMAKE_IOS_DEVELOPER_ROOT) + set(CMAKE_DEVELOPER_ROOT ${CMAKE_IOS_DEVELOPER_ROOT}) + message(DEPRECATION "CMAKE_IOS_DEVELOPER_ROOT argument is DEPRECATED. Consider using the new CMAKE_DEVELOPER_ROOT argument instead.") +endif() + +if(DEFINED IOS_ARCH) + set(ARCHS ${IOS_ARCH}) + message(DEPRECATION "IOS_ARCH argument is DEPRECATED. Consider using the new ARCHS argument instead.") +endif() + +######## END ALIASES + +# Unset the FORCE on cache variables if in try_compile() +set(FORCE_CACHE FORCE) +get_property(_CMAKE_IN_TRY_COMPILE GLOBAL PROPERTY IN_TRY_COMPILE) +if(_CMAKE_IN_TRY_COMPILE) + unset(FORCE_CACHE) +endif() + # Default to building for iPhoneOS if not specified otherwise, and we cannot # determine the platform from the CMAKE_OSX_ARCHITECTURES variable. The use # of CMAKE_OSX_ARCHITECTURES is such that try_compile() projects can correctly -# determine the value of IOS_PLATFORM from the root project, as +# determine the value of PLATFORM from the root project, as # CMAKE_OSX_ARCHITECTURES is propagated to them by CMake. -if (NOT DEFINED IOS_PLATFORM) +if(NOT DEFINED PLATFORM) if (CMAKE_OSX_ARCHITECTURES) - if (CMAKE_OSX_ARCHITECTURES MATCHES ".*arm.*") - set(IOS_PLATFORM "OS") - elseif (CMAKE_OSX_ARCHITECTURES MATCHES "i386") - set(IOS_PLATFORM "SIMULATOR") - elseif (CMAKE_OSX_ARCHITECTURES MATCHES "x86_64") - set(IOS_PLATFORM "SIMULATOR64") + if(CMAKE_OSX_ARCHITECTURES MATCHES ".*arm.*" AND CMAKE_OSX_SYSROOT MATCHES ".*iphoneos.*") + set(PLATFORM "OS") + elseif(CMAKE_OSX_ARCHITECTURES MATCHES "i386" AND CMAKE_OSX_SYSROOT MATCHES ".*iphonesimulator.*") + set(PLATFORM "SIMULATOR") + elseif(CMAKE_OSX_ARCHITECTURES MATCHES "x86_64" AND CMAKE_OSX_SYSROOT MATCHES ".*iphonesimulator.*") + set(PLATFORM "SIMULATOR64") + elseif(CMAKE_OSX_ARCHITECTURES MATCHES "arm64" AND CMAKE_OSX_SYSROOT MATCHES ".*appletvos.*") + set(PLATFORM "TVOS") + elseif(CMAKE_OSX_ARCHITECTURES MATCHES "x86_64" AND CMAKE_OSX_SYSROOT MATCHES ".*appletvsimulator.*") + set(PLATFORM "SIMULATOR_TVOS") + elseif(CMAKE_OSX_ARCHITECTURES MATCHES ".*armv7k.*" AND CMAKE_OSX_SYSROOT MATCHES ".*watchos.*") + set(PLATFORM "WATCHOS") + elseif(CMAKE_OSX_ARCHITECTURES MATCHES "i386" AND CMAKE_OSX_SYSROOT MATCHES ".*watchsimulator.*") + set(PLATFORM "SIMULATOR_WATCHOS") endif() endif() - if (NOT IOS_PLATFORM) - set(IOS_PLATFORM "OS") + if (NOT PLATFORM) + set(PLATFORM "OS") endif() endif() -set(IOS_PLATFORM ${IOS_PLATFORM} CACHE STRING - "Type of iOS platform for which to build.") + +set(PLATFORM_INT "${PLATFORM}" CACHE STRING "Type of platform for which the build targets.") + +# Handle the case where we are targeting iOS and a version above 10.3.4 (32-bit support dropped officially) +if(PLATFORM_INT STREQUAL "OS" AND DEPLOYMENT_TARGET VERSION_GREATER_EQUAL 10.3.4) + set(PLATFORM_INT "OS64") + message(STATUS "Targeting minimum SDK version ${DEPLOYMENT_TARGET}. Dropping 32-bit support.") +elseif(PLATFORM_INT STREQUAL "SIMULATOR" AND DEPLOYMENT_TARGET VERSION_GREATER_EQUAL 10.3.4) + set(PLATFORM_INT "SIMULATOR64") + message(STATUS "Targeting minimum SDK version ${DEPLOYMENT_TARGET}. Dropping 32-bit support.") +endif() + # Determine the platform name and architectures for use in xcodebuild commands -# from the specified IOS_PLATFORM name. -if (IOS_PLATFORM STREQUAL "OS") - set(XCODE_IOS_PLATFORM iphoneos) - if(NOT IOS_ARCH) - set(IOS_ARCH armv7 armv7s arm64) +# from the specified PLATFORM name. +if(PLATFORM_INT STREQUAL "OS") + set(SDK_NAME iphoneos) + if(NOT ARCHS) + set(ARCHS armv7 armv7s arm64) + endif() +elseif(PLATFORM_INT STREQUAL "OS64") + set(SDK_NAME iphoneos) + if(NOT ARCHS) + if (XCODE_VERSION VERSION_GREATER 10.0) + set(ARCHS arm64) # Add arm64e when Apple have fixed the integration issues with it, libarclite_iphoneos.a is currently missung bitcode markers for example + else() + set(ARCHS arm64) + endif() + endif() +elseif(PLATFORM_INT STREQUAL "OS64COMBINED") + set(SDK_NAME iphoneos) + if(MODERN_CMAKE) + if(NOT ARCHS) + if (XCODE_VERSION VERSION_GREATER 10.0) + set(ARCHS arm64 x86_64) # Add arm64e when Apple have fixed the integration issues with it, libarclite_iphoneos.a is currently missung bitcode markers for example + else() + set(ARCHS arm64 x86_64) + endif() + endif() + else() + message(FATAL_ERROR "Please make sure that you are running CMake 3.14+ to make the OS64COMBINED setting work") + endif() +elseif(PLATFORM_INT STREQUAL "SIMULATOR") + set(SDK_NAME iphonesimulator) + if(NOT ARCHS) + set(ARCHS i386) + endif() + message(DEPRECATION "SIMULATOR IS DEPRECATED. Consider using SIMULATOR64 instead.") +elseif(PLATFORM_INT STREQUAL "SIMULATOR64") + set(SDK_NAME iphonesimulator) + if(NOT ARCHS) + set(ARCHS x86_64) + endif() +elseif(PLATFORM_INT STREQUAL "TVOS") + set(SDK_NAME appletvos) + if(NOT ARCHS) + set(ARCHS arm64) + endif() +elseif (PLATFORM_INT STREQUAL "TVOSCOMBINED") + set(SDK_NAME appletvos) + if(MODERN_CMAKE) + if(NOT ARCHS) + set(ARCHS arm64 x86_64) + endif() + else() + message(FATAL_ERROR "Please make sure that you are running CMake 3.14+ to make the TVOSCOMBINED setting work") endif() -elseif (IOS_PLATFORM STREQUAL "SIMULATOR") - set(XCODE_IOS_PLATFORM iphonesimulator) - if(NOT IOS_ARCH) - set(IOS_ARCH i386) +elseif(PLATFORM_INT STREQUAL "SIMULATOR_TVOS") + set(SDK_NAME appletvsimulator) + if(NOT ARCHS) + set(ARCHS x86_64) endif() -elseif(IOS_PLATFORM STREQUAL "SIMULATOR64") - set(XCODE_IOS_PLATFORM iphonesimulator) - if(NOT IOS_ARCH) - set(IOS_ARCH x86_64) +elseif(PLATFORM_INT STREQUAL "WATCHOS") + set(SDK_NAME watchos) + if(NOT ARCHS) + if (XCODE_VERSION VERSION_GREATER 10.0) + set(ARCHS armv7k arm64_32) + else() + set(ARCHS armv7k) + endif() endif() -elseif (IOS_PLATFORM STREQUAL "TVOS") - set(XCODE_IOS_PLATFORM appletvos) - if(NOT IOS_ARCH) - set(IOS_ARCH arm64) +elseif(PLATFORM_INT STREQUAL "WATCHOSCOMBINED") + set(SDK_NAME watchos) + if(MODERN_CMAKE) + if(NOT ARCHS) + if (XCODE_VERSION VERSION_GREATER 10.0) + set(ARCHS armv7k arm64_32 i386) + else() + set(ARCHS armv7k i386) + endif() + endif() + else() + message(FATAL_ERROR "Please make sure that you are running CMake 3.14+ to make the WATCHOSCOMBINED setting work") endif() -elseif (IOS_PLATFORM STREQUAL "SIMULATOR_TVOS") - set(XCODE_IOS_PLATFORM appletvsimulator) - if(NOT IOS_ARCH) - set(IOS_ARCH x86_64) +elseif(PLATFORM_INT STREQUAL "SIMULATOR_WATCHOS") + set(SDK_NAME watchsimulator) + if(NOT ARCHS) + set(ARCHS i386) endif() else() - message(FATAL_ERROR "Invalid IOS_PLATFORM: ${IOS_PLATFORM}") + message(FATAL_ERROR "Invalid PLATFORM: ${PLATFORM_INT}") endif() -message(STATUS "Configuring iOS build for platform: ${IOS_PLATFORM}, " - "architecture(s): ${IOS_ARCH}") +message(STATUS "Configuring ${SDK_NAME} build for platform: ${PLATFORM_INT}, architecture(s): ${ARCHS}") + +if(MODERN_CMAKE AND PLATFORM_INT MATCHES ".*COMBINED" AND NOT USED_CMAKE_GENERATOR MATCHES "Xcode") + message(FATAL_ERROR "The COMBINED options only work with Xcode generator, -G Xcode") +endif() + # If user did not specify the SDK root to use, then query xcodebuild for it. -if (NOT CMAKE_OSX_SYSROOT) - execute_process(COMMAND xcodebuild -version -sdk ${XCODE_IOS_PLATFORM} Path - OUTPUT_VARIABLE CMAKE_OSX_SYSROOT +execute_process(COMMAND xcodebuild -version -sdk ${SDK_NAME} Path + OUTPUT_VARIABLE CMAKE_OSX_SYSROOT_INT ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE) - message(STATUS "Using SDK: ${CMAKE_OSX_SYSROOT} for platform: ${IOS_PLATFORM}") -endif() -if (NOT EXISTS ${CMAKE_OSX_SYSROOT}) +if (NOT DEFINED CMAKE_OSX_SYSROOT_INT AND NOT DEFINED CMAKE_OSX_SYSROOT) + message(SEND_ERROR "Please make sure that Xcode is installed and that the toolchain" + "is pointing to the correct path. Please run:" + "sudo xcode-select -s /Applications/Xcode.app/Contents/Developer" + "and see if that fixes the problem for you.") message(FATAL_ERROR "Invalid CMAKE_OSX_SYSROOT: ${CMAKE_OSX_SYSROOT} " - "does not exist.") + "does not exist.") +elseif(DEFINED CMAKE_OSX_SYSROOT) + message(STATUS "Using SDK: ${CMAKE_OSX_SYSROOT} for platform: ${PLATFORM_INT} when checking compatibility") +elseif(DEFINED CMAKE_OSX_SYSROOT_INT) + message(STATUS "Using SDK: ${CMAKE_OSX_SYSROOT_INT} for platform: ${PLATFORM_INT}") + set(CMAKE_OSX_SYSROOT "${CMAKE_OSX_SYSROOT_INT}" CACHE INTERNAL "") +endif() + +# Set Xcode property for SDKROOT as well if Xcode generator is used +if(USED_CMAKE_GENERATOR MATCHES "Xcode") + set(CMAKE_OSX_SYSROOT "${SDK_NAME}" CACHE INTERNAL "") endif() + # Specify minimum version of deployment target. -if (NOT DEFINED IOS_DEPLOYMENT_TARGET) - # Unless specified, SDK version 8.0 is used by default as minimum target version. - set(IOS_DEPLOYMENT_TARGET "8.0" - CACHE STRING "Minimum iOS version to build for." ) - message(STATUS "Using the default min-version since IOS_DEPLOYMENT_TARGET not provided!") +if(NOT DEFINED DEPLOYMENT_TARGET) + if (PLATFORM_INT STREQUAL "WATCHOS" OR PLATFORM_INT STREQUAL "SIMULATOR_WATCHOS") + # Unless specified, SDK version 2.0 is used by default as minimum target version (watchOS). + set(DEPLOYMENT_TARGET "2.0" + CACHE STRING "Minimum SDK version to build for." ) + else() + # Unless specified, SDK version 9.0 is used by default as minimum target version (iOS, tvOS). + set(DEPLOYMENT_TARGET "9.0" + CACHE STRING "Minimum SDK version to build for." ) + endif() + message(STATUS "Using the default min-version since DEPLOYMENT_TARGET not provided!") endif() # Use bitcode or not -if (NOT DEFINED ENABLE_BITCODE AND NOT IOS_ARCH MATCHES "((^|, )(i386|x86_64))+") +if(NOT DEFINED ENABLE_BITCODE AND NOT ARCHS MATCHES "((^|;|, )(i386|x86_64))+") # Unless specified, enable bitcode support by default - set(ENABLE_BITCODE TRUE CACHE BOOL "Whether or not to enable bitcode") message(STATUS "Enabling bitcode support by default. ENABLE_BITCODE not provided!") -endif() -if (NOT DEFINED ENABLE_BITCODE) + set(ENABLE_BITCODE TRUE) +elseif(NOT DEFINED ENABLE_BITCODE) message(STATUS "Disabling bitcode support by default on simulators. ENABLE_BITCODE not provided for override!") + set(ENABLE_BITCODE FALSE) endif() +set(ENABLE_BITCODE_INT ${ENABLE_BITCODE} CACHE BOOL "Whether or not to enable bitcode" ${FORCE_CACHE}) # Use ARC or not -if (NOT DEFINED ENABLE_ARC) +if(NOT DEFINED ENABLE_ARC) # Unless specified, enable ARC support by default - set(ENABLE_ARC TRUE CACHE BOOL "Whether or not to enable ARC") + set(ENABLE_ARC TRUE) message(STATUS "Enabling ARC support by default. ENABLE_ARC not provided!") endif() +set(ENABLE_ARC_INT ${ENABLE_ARC} CACHE BOOL "Whether or not to enable ARC" ${FORCE_CACHE}) # Use hidden visibility or not -if (NOT DEFINED ENABLE_VISIBILITY) +if(NOT DEFINED ENABLE_VISIBILITY) # Unless specified, disable symbols visibility by default - set(ENABLE_VISIBILITY FALSE CACHE BOOL "Whether or not to hide symbols (-fvisibility=hidden)") + set(ENABLE_VISIBILITY FALSE) message(STATUS "Hiding symbols visibility by default. ENABLE_VISIBILITY not provided!") endif() +set(ENABLE_VISIBILITY_INT ${ENABLE_VISIBILITY} CACHE BOOL "Whether or not to hide symbols (-fvisibility=hidden)" ${FORCE_CACHE}) +# Set strict compiler checks or not +if(NOT DEFINED ENABLE_STRICT_TRY_COMPILE) + # Unless specified, disable strict try_compile() + set(ENABLE_STRICT_TRY_COMPILE FALSE) + message(STATUS "Using NON-strict compiler checks by default. ENABLE_STRICT_TRY_COMPILE not provided!") +endif() +set(ENABLE_STRICT_TRY_COMPILE_INT ${ENABLE_STRICT_TRY_COMPILE} CACHE BOOL "Whether or not to use strict compiler checks" ${FORCE_CACHE}) # Get the SDK version information. execute_process(COMMAND xcodebuild -sdk ${CMAKE_OSX_SYSROOT} -version SDKVersion - OUTPUT_VARIABLE IOS_SDK_VERSION + OUTPUT_VARIABLE SDK_VERSION ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE) + # Find the Developer root for the specific iOS platform being compiled for # from CMAKE_OSX_SYSROOT. Should be ../../ from SDK specified in -# CMAKE_OSX_SYSROOT. There does not appear to be a direct way to obtain +# CMAKE_OSX_SYSROOT. There does not appear to be a direct way to obtain # this information from xcrun or xcodebuild. -if (NOT CMAKE_IOS_DEVELOPER_ROOT) - get_filename_component(IOS_PLATFORM_SDK_DIR ${CMAKE_OSX_SYSROOT} PATH) - get_filename_component(CMAKE_IOS_DEVELOPER_ROOT ${IOS_PLATFORM_SDK_DIR} PATH) -endif() -if (NOT EXISTS ${CMAKE_IOS_DEVELOPER_ROOT}) - message(FATAL_ERROR "Invalid CMAKE_IOS_DEVELOPER_ROOT: " - "${CMAKE_IOS_DEVELOPER_ROOT} does not exist.") +if (NOT DEFINED CMAKE_DEVELOPER_ROOT AND NOT USED_CMAKE_GENERATOR MATCHES "Xcode") + get_filename_component(PLATFORM_SDK_DIR ${CMAKE_OSX_SYSROOT} PATH) + get_filename_component(CMAKE_DEVELOPER_ROOT ${PLATFORM_SDK_DIR} PATH) + + if (NOT DEFINED CMAKE_DEVELOPER_ROOT) + message(FATAL_ERROR "Invalid CMAKE_DEVELOPER_ROOT: " + "${CMAKE_DEVELOPER_ROOT} does not exist.") + endif() endif() # Find the C & C++ compilers for the specified SDK. -if (NOT CMAKE_C_COMPILER) +if(NOT CMAKE_C_COMPILER) execute_process(COMMAND xcrun -sdk ${CMAKE_OSX_SYSROOT} -find clang OUTPUT_VARIABLE CMAKE_C_COMPILER ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE) message(STATUS "Using C compiler: ${CMAKE_C_COMPILER}") endif() -if (NOT CMAKE_CXX_COMPILER) +if(NOT CMAKE_CXX_COMPILER) execute_process(COMMAND xcrun -sdk ${CMAKE_OSX_SYSROOT} -find clang++ OUTPUT_VARIABLE CMAKE_CXX_COMPILER ERROR_QUIET @@ -233,41 +414,67 @@ if (NOT CMAKE_CXX_COMPILER) endif() # Find (Apple's) libtool. execute_process(COMMAND xcrun -sdk ${CMAKE_OSX_SYSROOT} -find libtool - OUTPUT_VARIABLE IOS_LIBTOOL + OUTPUT_VARIABLE BUILD_LIBTOOL ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE) -message(STATUS "Using libtool: ${IOS_LIBTOOL}") +message(STATUS "Using libtool: ${BUILD_LIBTOOL}") # Configure libtool to be used instead of ar + ranlib to build static libraries. # This is required on Xcode 7+, but should also work on previous versions of # Xcode. set(CMAKE_C_CREATE_STATIC_LIBRARY - "${IOS_LIBTOOL} -static -o ") + "${BUILD_LIBTOOL} -static -o ") set(CMAKE_CXX_CREATE_STATIC_LIBRARY - "${IOS_LIBTOOL} -static -o ") + "${BUILD_LIBTOOL} -static -o ") +# Find the toolchain's provided install_name_tool if none is found on the host +if(NOT CMAKE_INSTALL_NAME_TOOL) + execute_process(COMMAND xcrun -sdk ${CMAKE_OSX_SYSROOT} -find install_name_tool + OUTPUT_VARIABLE CMAKE_INSTALL_NAME_TOOL_INT + ERROR_QUIET + OUTPUT_STRIP_TRAILING_WHITESPACE) + set(CMAKE_INSTALL_NAME_TOOL ${CMAKE_INSTALL_NAME_TOOL_INT} CACHE STRING "" ${FORCE_CACHE}) + message(STATUS "Using install_name_tool: ${CMAKE_INSTALL_NAME_TOOL}") +endif() # Get the version of Darwin (OS X) of the host. execute_process(COMMAND uname -r OUTPUT_VARIABLE CMAKE_HOST_SYSTEM_VERSION ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE) +# CMake 3.14+ support building for iOS, watchOS and tvOS out of the box. +if(MODERN_CMAKE) + if(SDK_NAME MATCHES "iphone") + set(CMAKE_SYSTEM_NAME iOS CACHE INTERNAL "" ${FORCE_CACHE}) + elseif(SDK_NAME MATCHES "appletv") + set(CMAKE_SYSTEM_NAME tvOS CACHE INTERNAL "" ${FORCE_CACHE}) + elseif(SDK_NAME MATCHES "watch") + set(CMAKE_SYSTEM_NAME watchOS CACHE INTERNAL "" ${FORCE_CACHE}) + endif() + + # Provide flags for a combined FAT library build on newer CMake versions + if(PLATFORM_INT MATCHES ".*COMBINED") + set(CMAKE_XCODE_ATTRIBUTE_ONLY_ACTIVE_ARCH NO CACHE INTERNAL "" ${FORCE_CACHE}) + set(CMAKE_IOS_INSTALL_COMBINED YES CACHE INTERNAL "" ${FORCE_CACHE}) + message(STATUS "Will combine built (static) artifacts into FAT lib...") + endif() +else() + # Legacy code path prior to CMake 3.14 + set(CMAKE_SYSTEM_NAME Darwin CACHE INTERNAL "" ${FORCE_CACHE}) +endif() # Standard settings. -set(CMAKE_SYSTEM_NAME Darwin CACHE INTERNAL "") -set(CMAKE_SYSTEM_VERSION ${IOS_SDK_VERSION} CACHE INTERNAL "") +set(CMAKE_SYSTEM_VERSION ${SDK_VERSION} CACHE INTERNAL "") set(UNIX TRUE CACHE BOOL "") set(APPLE TRUE CACHE BOOL "") set(IOS TRUE CACHE BOOL "") set(CMAKE_AR ar CACHE FILEPATH "" FORCE) set(CMAKE_RANLIB ranlib CACHE FILEPATH "" FORCE) -# Force unset of OS X-specific deployment target (otherwise autopopulated), -# required as of cmake 2.8.10. -set(CMAKE_OSX_DEPLOYMENT_TARGET "" CACHE STRING - "Must be empty for iOS builds." FORCE) +set(CMAKE_STRIP strip CACHE FILEPATH "" FORCE) # Set the architectures for which to build. -set(CMAKE_OSX_ARCHITECTURES ${IOS_ARCH} CACHE STRING "Build architecture for iOS") -# Skip the platform compiler checks for cross compiling. -set(CMAKE_CXX_COMPILER_FORCED TRUE) -set(CMAKE_CXX_COMPILER_WORKS TRUE) -set(CMAKE_C_COMPILER_FORCED TRUE) -set(CMAKE_C_COMPILER_WORKS TRUE) +set(CMAKE_OSX_ARCHITECTURES ${ARCHS} CACHE STRING "Build architecture for iOS") +# Change the type of target generated for try_compile() so it'll work when cross-compiling, weak compiler checks +if(ENABLE_STRICT_TRY_COMPILE_INT) + message(STATUS "Using strict compiler checks (default in CMake).") +else() + set(CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) +endif() # All iOS/Darwin specific settings - some may be redundant. set(CMAKE_SHARED_LIBRARY_PREFIX "lib") set(CMAKE_SHARED_LIBRARY_SUFFIX ".dylib") @@ -284,130 +491,161 @@ set(CMAKE_C_OSX_CURRENT_VERSION_FLAG "-current_version ") set(CMAKE_CXX_OSX_COMPATIBILITY_VERSION_FLAG "${CMAKE_C_OSX_COMPATIBILITY_VERSION_FLAG}") set(CMAKE_CXX_OSX_CURRENT_VERSION_FLAG "${CMAKE_C_OSX_CURRENT_VERSION_FLAG}") -if(IOS_ARCH MATCHES "((^|, )(arm64|x86_64))+") +if(ARCHS MATCHES "((^|;|, )(arm64|arm64e|x86_64))+") set(CMAKE_C_SIZEOF_DATA_PTR 8) set(CMAKE_CXX_SIZEOF_DATA_PTR 8) + if(ARCHS MATCHES "((^|;|, )(arm64|arm64e))+") + set(CMAKE_SYSTEM_PROCESSOR "aarch64") + else() + set(CMAKE_SYSTEM_PROCESSOR "x86_64") + endif() message(STATUS "Using a data_ptr size of 8") else() set(CMAKE_C_SIZEOF_DATA_PTR 4) set(CMAKE_CXX_SIZEOF_DATA_PTR 4) + set(CMAKE_SYSTEM_PROCESSOR "arm") message(STATUS "Using a data_ptr size of 4") endif() -message(STATUS "Building for minimum iOS version: ${IOS_DEPLOYMENT_TARGET}" - " (SDK version: ${IOS_SDK_VERSION})") +message(STATUS "Building for minimum ${SDK_NAME} version: ${DEPLOYMENT_TARGET}" + " (SDK version: ${SDK_VERSION})") # Note that only Xcode 7+ supports the newer more specific: -# -m${XCODE_IOS_PLATFORM}-version-min flags, older versions of Xcode use: +# -m${SDK_NAME}-version-min flags, older versions of Xcode use: # -m(ios/ios-simulator)-version-min instead. -if (IOS_PLATFORM STREQUAL "OS") - if (XCODE_VERSION VERSION_LESS 7.0) - set(XCODE_IOS_PLATFORM_VERSION_FLAGS - "-mios-version-min=${IOS_DEPLOYMENT_TARGET}") +if(PLATFORM_INT STREQUAL "OS" OR PLATFORM_INT STREQUAL "OS64") + if(XCODE_VERSION VERSION_LESS 7.0) + set(SDK_NAME_VERSION_FLAGS + "-mios-version-min=${DEPLOYMENT_TARGET}") else() - # Xcode 7.0+ uses flags we can build directly from XCODE_IOS_PLATFORM. - set(XCODE_IOS_PLATFORM_VERSION_FLAGS - "-m${XCODE_IOS_PLATFORM}-version-min=${IOS_DEPLOYMENT_TARGET}") + # Xcode 7.0+ uses flags we can build directly from SDK_NAME. + set(SDK_NAME_VERSION_FLAGS + "-m${SDK_NAME}-version-min=${DEPLOYMENT_TARGET}") endif() -elseif (IOS_PLATFORM STREQUAL "TVOS") - set(XCODE_IOS_PLATFORM_VERSION_FLAGS - "-mtvos-version-min=${IOS_DEPLOYMENT_TARGET}") -elseif (IOS_PLATFORM STREQUAL "SIMULATOR_TVOS") - set(XCODE_IOS_PLATFORM_VERSION_FLAGS - "-mtvos-simulator-version-min=${IOS_DEPLOYMENT_TARGET}") +elseif(PLATFORM_INT STREQUAL "TVOS") + set(SDK_NAME_VERSION_FLAGS + "-mtvos-version-min=${DEPLOYMENT_TARGET}") +elseif(PLATFORM_INT STREQUAL "SIMULATOR_TVOS") + set(SDK_NAME_VERSION_FLAGS + "-mtvos-simulator-version-min=${DEPLOYMENT_TARGET}") +elseif(PLATFORM_INT STREQUAL "WATCHOS") + set(SDK_NAME_VERSION_FLAGS + "-mwatchos-version-min=${DEPLOYMENT_TARGET}") +elseif(PLATFORM_INT STREQUAL "SIMULATOR_WATCHOS") + set(SDK_NAME_VERSION_FLAGS + "-mwatchos-simulator-version-min=${DEPLOYMENT_TARGET}") else() # SIMULATOR or SIMULATOR64 both use -mios-simulator-version-min. - set(XCODE_IOS_PLATFORM_VERSION_FLAGS - "-mios-simulator-version-min=${IOS_DEPLOYMENT_TARGET}") + set(SDK_NAME_VERSION_FLAGS + "-mios-simulator-version-min=${DEPLOYMENT_TARGET}") endif() -message(STATUS "Version flags set to: ${XCODE_IOS_PLATFORM_VERSION_FLAGS}") +message(STATUS "Version flags set to: ${SDK_NAME_VERSION_FLAGS}") +set(CMAKE_OSX_DEPLOYMENT_TARGET ${DEPLOYMENT_TARGET} CACHE STRING + "Set CMake deployment target" ${FORCE_CACHE}) -if (ENABLE_BITCODE) +if(ENABLE_BITCODE_INT) set(BITCODE "-fembed-bitcode") - set(HEADER_PAD "") + set(CMAKE_XCODE_ATTRIBUTE_BITCODE_GENERATION_MODE bitcode CACHE INTERNAL "") message(STATUS "Enabling bitcode support.") else() set(BITCODE "") - set(HEADER_PAD "-headerpad_max_install_names") + set(CMAKE_XCODE_ATTRIBUTE_ENABLE_BITCODE NO CACHE INTERNAL "") message(STATUS "Disabling bitcode support.") endif() -if (ENABLE_ARC) +if(ENABLE_ARC_INT) set(FOBJC_ARC "-fobjc-arc") + set(CMAKE_XCODE_ATTRIBUTE_CLANG_ENABLE_OBJC_ARC YES CACHE INTERNAL "") message(STATUS "Enabling ARC support.") else() set(FOBJC_ARC "-fno-objc-arc") + set(CMAKE_XCODE_ATTRIBUTE_CLANG_ENABLE_OBJC_ARC NO CACHE INTERNAL "") message(STATUS "Disabling ARC support.") endif() -if (NOT ENABLE_VISIBILITY) +if(NOT ENABLE_VISIBILITY_INT) set(VISIBILITY "-fvisibility=hidden") + set(CMAKE_XCODE_ATTRIBUTE_GCC_SYMBOLS_PRIVATE_EXTERN YES CACHE INTERNAL "") message(STATUS "Hiding symbols (-fvisibility=hidden).") else() set(VISIBILITY "") + set(CMAKE_XCODE_ATTRIBUTE_GCC_SYMBOLS_PRIVATE_EXTERN NO CACHE INTERNAL "") endif() -set(CMAKE_C_FLAGS -"${XCODE_IOS_PLATFORM_VERSION_FLAGS} ${BITCODE} -fobjc-abi-version=2 ${FOBJC_ARC} ${C_FLAGS}") -# Hidden visibilty is required for C++ on iOS. -set(CMAKE_CXX_FLAGS -"${XCODE_IOS_PLATFORM_VERSION_FLAGS} ${BITCODE} ${VISIBILITY} -fvisibility-inlines-hidden -fobjc-abi-version=2 ${FOBJC_ARC} ${CXX_FLAGS}") -set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS} -DNDEBUG -Os -fomit-frame-pointer -ffast-math ${BITCODE} ${CXX_FLAGS_MINSIZEREL}") -set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS} -DNDEBUG -O2 -g -fomit-frame-pointer -ffast-math ${BITCODE} ${CXX_FLAGS_RELWITHDEBINFO}") -set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS} -DNDEBUG -O3 -fomit-frame-pointer -ffast-math ${BITCODE} ${CXX_FLAGS_RELEASE}") -set(CMAKE_C_LINK_FLAGS "${XCODE_IOS_PLATFORM_VERSION_FLAGS} -Wl,-search_paths_first ${C_LINK_FLAGS}") -set(CMAKE_CXX_LINK_FLAGS "${XCODE_IOS_PLATFORM_VERSION_FLAGS} -Wl,-search_paths_first ${CXX_LINK_FLAGS}") - -# In order to ensure that the updated compiler flags are used in try_compile() -# tests, we have to forcibly set them in the CMake cache, not merely set them -# in the local scope. -list(APPEND VARS_TO_FORCE_IN_CACHE - CMAKE_C_FLAGS - CMAKE_CXX_FLAGS - CMAKE_CXX_FLAGS_RELWITHDEBINFO - CMAKE_CXX_FLAGS_MINSIZEREL - CMAKE_CXX_FLAGS_RELEASE - CMAKE_C_LINK_FLAGS - CMAKE_CXX_LINK_FLAGS) -foreach(VAR_TO_FORCE ${VARS_TO_FORCE_IN_CACHE}) - set(${VAR_TO_FORCE} "${${VAR_TO_FORCE}}" CACHE STRING "" FORCE) -endforeach() +#Check if Xcode generator is used, since that will handle these flags automagically +if(USED_CMAKE_GENERATOR MATCHES "Xcode") + message(STATUS "Not setting any manual command-line buildflags, since Xcode is selected as generator.") +else() + set(CMAKE_C_FLAGS + "${SDK_NAME_VERSION_FLAGS} ${BITCODE} -fobjc-abi-version=2 ${FOBJC_ARC} ${CMAKE_C_FLAGS}") + # Hidden visibilty is required for C++ on iOS. + set(CMAKE_CXX_FLAGS + "${SDK_NAME_VERSION_FLAGS} ${BITCODE} ${VISIBILITY} -fvisibility-inlines-hidden -fobjc-abi-version=2 ${FOBJC_ARC} ${CMAKE_CXX_FLAGS}") + set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS} -O0 -g ${CMAKE_CXX_FLAGS_DEBUG}") + set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS} -DNDEBUG -Os -ffast-math ${CMAKE_CXX_FLAGS_MINSIZEREL}") + set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS} -DNDEBUG -O2 -g -ffast-math ${CMAKE_CXX_FLAGS_RELWITHDEBINFO}") + set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS} -DNDEBUG -O3 -ffast-math ${CMAKE_CXX_FLAGS_RELEASE}") + set(CMAKE_C_LINK_FLAGS "${SDK_NAME_VERSION_FLAGS} -Wl,-search_paths_first ${CMAKE_C_LINK_FLAGS}") + set(CMAKE_CXX_LINK_FLAGS "${SDK_NAME_VERSION_FLAGS} -Wl,-search_paths_first ${CMAKE_CXX_LINK_FLAGS}") + + # In order to ensure that the updated compiler flags are used in try_compile() + # tests, we have to forcibly set them in the CMake cache, not merely set them + # in the local scope. + list(APPEND VARS_TO_FORCE_IN_CACHE + CMAKE_C_FLAGS + CMAKE_CXX_FLAGS + CMAKE_CXX_FLAGS_DEBUG + CMAKE_CXX_FLAGS_RELWITHDEBINFO + CMAKE_CXX_FLAGS_MINSIZEREL + CMAKE_CXX_FLAGS_RELEASE + CMAKE_C_LINK_FLAGS + CMAKE_CXX_LINK_FLAGS) + foreach(VAR_TO_FORCE ${VARS_TO_FORCE_IN_CACHE}) + set(${VAR_TO_FORCE} "${${VAR_TO_FORCE}}" CACHE STRING "") + endforeach() +endif() set(CMAKE_PLATFORM_HAS_INSTALLNAME 1) -set (CMAKE_SHARED_LINKER_FLAGS "-rpath @executable_path/Frameworks -rpath @loader_path/Frameworks") -set(CMAKE_SHARED_LIBRARY_CREATE_C_FLAGS "-dynamiclib ${HEADER_PAD}") -set(CMAKE_SHARED_MODULE_CREATE_C_FLAGS "-bundle ${HEADER_PAD}") +set(CMAKE_SHARED_LINKER_FLAGS "-rpath @executable_path/Frameworks -rpath @loader_path/Frameworks") +set(CMAKE_SHARED_LIBRARY_CREATE_C_FLAGS "-dynamiclib -Wl,-headerpad_max_install_names") +set(CMAKE_SHARED_MODULE_CREATE_C_FLAGS "-bundle -Wl,-headerpad_max_install_names") set(CMAKE_SHARED_MODULE_LOADER_C_FLAG "-Wl,-bundle_loader,") set(CMAKE_SHARED_MODULE_LOADER_CXX_FLAG "-Wl,-bundle_loader,") -set(CMAKE_FIND_LIBRARY_SUFFIXES ".dylib" ".so" ".a") - -# Hack: if a new cmake (which uses CMAKE_INSTALL_NAME_TOOL) runs on an old -# build tree (where install_name_tool was hardcoded) and where -# CMAKE_INSTALL_NAME_TOOL isn't in the cache and still cmake didn't fail in -# CMakeFindBinUtils.cmake (because it isn't rerun) hardcode -# CMAKE_INSTALL_NAME_TOOL here to install_name_tool, so it behaves as it did -# before, Alex. -if (NOT DEFINED CMAKE_INSTALL_NAME_TOOL) - find_program(CMAKE_INSTALL_NAME_TOOL install_name_tool) -endif (NOT DEFINED CMAKE_INSTALL_NAME_TOOL) +set(CMAKE_FIND_LIBRARY_SUFFIXES ".tbd" ".dylib" ".so" ".a") +set(CMAKE_SHARED_LIBRARY_SONAME_C_FLAG "-install_name") # Set the find root to the iOS developer roots and to user defined paths. -set(CMAKE_FIND_ROOT_PATH ${CMAKE_IOS_DEVELOPER_ROOT} ${CMAKE_OSX_SYSROOT} - ${CMAKE_PREFIX_PATH} CACHE STRING "iOS find search path root" FORCE) +set(CMAKE_FIND_ROOT_PATH ${CMAKE_OSX_SYSROOT_INT} ${CMAKE_PREFIX_PATH} CACHE STRING "Root path that will be prepended + to all search paths") # Default to searching for frameworks first. set(CMAKE_FIND_FRAMEWORK FIRST) # Set up the default search directories for frameworks. -set(CMAKE_SYSTEM_FRAMEWORK_PATH - ${CMAKE_OSX_SYSROOT}/System/Library/Frameworks - ${CMAKE_OSX_SYSROOT}/System/Library/PrivateFrameworks - ${CMAKE_OSX_SYSROOT}/Developer/Library/Frameworks) -# Only search the specified iOS SDK, not the remainder of the host filesystem. -set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ONLY) -set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) -set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) -# This little macro lets you set any XCode specific property. +set(CMAKE_FRAMEWORK_PATH + ${CMAKE_DEVELOPER_ROOT}/Library/PrivateFrameworks + ${CMAKE_OSX_SYSROOT_INT}/System/Library/Frameworks + ${CMAKE_FRAMEWORK_PATH} CACHE STRING "Frameworks search paths" ${FORCE_CACHE}) + +# By default, search both the specified iOS SDK and the remainder of the host filesystem. +if(NOT CMAKE_FIND_ROOT_PATH_MODE_PROGRAM) + set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM BOTH CACHE STRING "" ${FORCE_CACHE}) +endif() +if(NOT CMAKE_FIND_ROOT_PATH_MODE_LIBRARY) + set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY CACHE STRING "" ${FORCE_CACHE}) +endif() +if(NOT CMAKE_FIND_ROOT_PATH_MODE_INCLUDE) + set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY CACHE STRING "" ${FORCE_CACHE}) +endif() +if(NOT CMAKE_FIND_ROOT_PATH_MODE_PACKAGE) + set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY CACHE STRING "" ${FORCE_CACHE}) +endif() + +# +# Some helper-macros below to simplify and beautify the CMakeFile +# + +# This little macro lets you set any Xcode specific property. macro(set_xcode_property TARGET XCODE_PROPERTY XCODE_VALUE XCODE_RELVERSION) set(XCODE_RELVERSION_I "${XCODE_RELVERSION}") - if (XCODE_RELVERSION_I STREQUAL "All") + if(XCODE_RELVERSION_I STREQUAL "All") set_property(TARGET ${TARGET} PROPERTY XCODE_ATTRIBUTE_${XCODE_PROPERTY} "${XCODE_VALUE}") else() @@ -420,10 +658,12 @@ macro(find_host_package) set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY NEVER) set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE NEVER) + set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE NEVER) set(IOS FALSE) find_package(${ARGN}) set(IOS TRUE) - set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ONLY) - set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) - set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) + set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM BOTH) + set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY BOTH) + set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE BOTH) + set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE BOTH) endmacro(find_host_package) diff --git a/cmake/macros.cmake b/cmake/macros.cmake index 08287e4d6..d0b17d748 100644 --- a/cmake/macros.cmake +++ b/cmake/macros.cmake @@ -10,28 +10,3 @@ else (UNIX) set(LIB_PFX "") set(LIB_EXT ".dll") endif (UNIX) - -if(CMAKE_SYSTEM_NAME MATCHES "^Android") - set(SYSTEM.Android 1) -elseif(CMAKE_SYSTEM_NAME MATCHES "^Linux") - set(SYSTEM.Linux 1) -endif() - -if(CMAKE_SYSTEM_PROCESSOR MATCHES "^arm") - set(PROCESSOR.arm 1) -elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^aarch64") - set(PROCESSOR.aarch64 1) -elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^x86") - set(PROCESSOR.x86 1) -endif() - -# The Compiler ID -if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") - set(COMPILER_NAME "CLANG") -elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") - set(COMPILER_NAME "GCC") -elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel") - # using Intel C++ -elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") - set(COMPILER_NAME "MSVC") -endif() diff --git a/demo/android/app/includes/MNN/AutoTime.hpp b/demo/android/app/includes/MNN/AutoTime.hpp index 1575f9bbd..2d14eb166 100644 --- a/demo/android/app/includes/MNN/AutoTime.hpp +++ b/demo/android/app/includes/MNN/AutoTime.hpp @@ -11,7 +11,7 @@ #include #include -#include "MNNDefine.h" +#include namespace MNN { diff --git a/demo/android/app/includes/MNN/ImageProcess.hpp b/demo/android/app/includes/MNN/ImageProcess.hpp index 3ffc05002..c511550cd 100644 --- a/demo/android/app/includes/MNN/ImageProcess.hpp +++ b/demo/android/app/includes/MNN/ImageProcess.hpp @@ -9,9 +9,9 @@ #ifndef ImageProcess_hpp #define ImageProcess_hpp -#include "ErrorCode.hpp" +#include #include "Matrix.h" -#include "Tensor.hpp" +#include namespace MNN { namespace CV { diff --git a/demo/android/app/includes/MNN/Interpreter.hpp b/demo/android/app/includes/MNN/Interpreter.hpp index ccc116710..b5eae7985 100644 --- a/demo/android/app/includes/MNN/Interpreter.hpp +++ b/demo/android/app/includes/MNN/Interpreter.hpp @@ -12,9 +12,9 @@ #include #include #include -#include "ErrorCode.hpp" -#include "MNNForwardType.h" -#include "Tensor.hpp" +#include +#include +#include namespace MNN { diff --git a/demo/android/app/includes/MNN/Rect.h b/demo/android/app/includes/MNN/Rect.h index 598b64ce9..91c4950f4 100644 --- a/demo/android/app/includes/MNN/Rect.h +++ b/demo/android/app/includes/MNN/Rect.h @@ -29,7 +29,7 @@ #include #include #include -#include "MNNDefine.h" +#include namespace MNN { namespace CV { diff --git a/demo/android/app/includes/MNN/Tensor.hpp b/demo/android/app/includes/MNN/Tensor.hpp index 973d2b032..33f8d88b4 100644 --- a/demo/android/app/includes/MNN/Tensor.hpp +++ b/demo/android/app/includes/MNN/Tensor.hpp @@ -11,7 +11,7 @@ #include #include "HalideRuntime.h" -#include "MNNDefine.h" +#include namespace MNN { diff --git a/demo/android/app/src/main/jni/mnnportraitnative.cpp b/demo/android/app/src/main/jni/mnnportraitnative.cpp index c021f7d53..eddbeae02 100644 --- a/demo/android/app/src/main/jni/mnnportraitnative.cpp +++ b/demo/android/app/src/main/jni/mnnportraitnative.cpp @@ -75,4 +75,4 @@ Java_com_taobao_android_mnn_MNNPortraitNative_nativeConvertMaskToPixelsMultiChan env->ReleaseFloatArrayElements(jmaskarray, scores, 0); return arr; -} \ No newline at end of file +} diff --git a/demo/exec/CMakeLists.txt b/demo/exec/CMakeLists.txt index 785685d9f..a38b8111e 100644 --- a/demo/exec/CMakeLists.txt +++ b/demo/exec/CMakeLists.txt @@ -1,16 +1,17 @@ -# put output to build dir -SET( CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/../../) -include_directories(../../3rd_party/imageHelper/) -add_executable(pictureRecognition.out pictureRecognition.cpp) -target_link_libraries(pictureRecognition.out ${MNN_DEPEND}) -add_executable(pictureRotate.out pictureRotate.cpp) -target_link_libraries(pictureRotate.out MNN) +IF(MNN_BUILD_DEMO) + message(STATUS "MNN Depends:" ${MNN_DEPS}) + add_executable(pictureRecognition.out ${CMAKE_CURRENT_LIST_DIR}/pictureRecognition.cpp) + target_link_libraries(pictureRecognition.out ${MNN_DEPS}) + add_executable(pictureRotate.out ${CMAKE_CURRENT_LIST_DIR}/pictureRotate.cpp) + target_link_libraries(pictureRotate.out ${MNN_DEPS}) -add_executable(multiPose.out multiPose.cpp) -target_link_libraries(multiPose.out ${MNN_DEPEND}) + add_executable(multiPose.out ${CMAKE_CURRENT_LIST_DIR}/multiPose.cpp) + target_link_libraries(multiPose.out ${MNN_DEPS}) -add_executable(segment.out segment.cpp) -target_link_libraries(segment.out MNN_Express ${MNN_DEPEND}) + add_executable(segment.out ${CMAKE_CURRENT_LIST_DIR}/segment.cpp) + target_link_libraries(segment.out ${MNN_DEPS}) -add_executable(expressDemo.out expressDemo.cpp) -target_link_libraries(expressDemo.out MNN_Express ${MNN_DEPEND}) + add_executable(expressDemo.out ${CMAKE_CURRENT_LIST_DIR}/expressDemo.cpp) + target_link_libraries(expressDemo.out ${MNN_DEPS}) + +ENDIF() diff --git a/demo/exec/expressDemo.cpp b/demo/exec/expressDemo.cpp index 4f31c74f2..8ab5153af 100644 --- a/demo/exec/expressDemo.cpp +++ b/demo/exec/expressDemo.cpp @@ -1,13 +1,12 @@ - -#include "Expr.hpp" -#include "ExprCreator.hpp" -#include "Optimizer.hpp" +#include +#include +#include #include #include #include #include #define MNN_OPEN_TIME_TRACE -#include "AutoTime.hpp" +#include using namespace MNN::Express; #define UP_DIV(x) (((x)+3)/4) @@ -26,7 +25,7 @@ static std::pair _makeGEMMByMatMul(int e, int l, int h) { static std::pair _makeGEMMByConvolution(int e, int l, int h) { auto icC4 = UP_DIV(l); auto ocC4 = UP_DIV(h); - + auto input = _Input({1, icC4*4, 1, e}); return std::make_pair(input, _Conv(0.0f, 0.0f, input, {icC4*4, ocC4*4}, {1, 1})); } @@ -94,7 +93,7 @@ static void _testGEMM() { auto flops = (float)x * (float)y * (float)z / 1024.0f / 1024.0f; FUNC_PRINT_ALL(flops, f); } - + auto conv = _makeGEMMByConvolution(1024, 1024, 1024); for (int v=0; v<10; ++v) { conv.first->writeMap(); @@ -137,7 +136,9 @@ int main(int argc, const char* argv[]) { } auto model = Variable::loadMap(modelFileName); auto inputOutput = Variable::getInputAndOutput(model); - auto optimizer = Optimizer::create(device); + Optimizer::Config config; + config.device = device; + auto optimizer = Optimizer::create(config); auto inputs = inputOutput.first; auto outputs = inputOutput.second; if (nullptr == optimizer) { @@ -197,7 +198,7 @@ int main(int argc, const char* argv[]) { } input->unMap(); } - + { auto outputPtr = output->readMap(); if (nullptr == outputPtr) { diff --git a/demo/exec/multiPose.cpp b/demo/exec/multiPose.cpp index 142466576..48e8a4b4d 100644 --- a/demo/exec/multiPose.cpp +++ b/demo/exec/multiPose.cpp @@ -15,12 +15,12 @@ #include "stb_image.h" #include "stb_image_write.h" -#include "ImageProcess.hpp" -#include "Interpreter.hpp" +#include +#include #include "PoseNames.hpp" #define MNN_OPEN_TIME_TRACE -#include "AutoTime.hpp" +#include using namespace MNN; #define MODEL_IMAGE_SIZE 513 @@ -361,7 +361,7 @@ int main(int argc, char* argv[]) { const auto rgbaPtr = reinterpret_cast(inputImage); pretreat->convert(rgbaPtr, originalWidth, originalHeight, 0, input); } - + // read image data from txt // { // MNN::Tensor givenTensor(input, Tensor::CAFFE); @@ -392,7 +392,7 @@ int main(int argc, char* argv[]) { Tensor displacementFwdHost(displacementFwd, Tensor::CAFFE); Tensor displacementBwdHost(displacementBwd, Tensor::CAFFE); Tensor heatmapsHost(heatmaps, Tensor::CAFFE); - + offsets->copyToHostTensor(&offsetsHost); displacementFwd->copyToHostTensor(&displacementFwdHost); displacementBwd->copyToHostTensor(&displacementBwdHost); diff --git a/demo/exec/pictureRecognition.cpp b/demo/exec/pictureRecognition.cpp index 26b1dbf9b..4284d5758 100644 --- a/demo/exec/pictureRecognition.cpp +++ b/demo/exec/pictureRecognition.cpp @@ -7,8 +7,8 @@ // #include -#include "ImageProcess.hpp" -#include "Interpreter.hpp" +#include +#include #define MNN_OPEN_TIME_TRACE #include #include @@ -16,7 +16,7 @@ #include #include #include -#include "AutoTime.hpp" +#include #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" #include "stb_image_write.h" diff --git a/demo/exec/pictureRotate.cpp b/demo/exec/pictureRotate.cpp index 750e7a472..7964646d1 100644 --- a/demo/exec/pictureRotate.cpp +++ b/demo/exec/pictureRotate.cpp @@ -7,14 +7,14 @@ // #include -#include "ImageProcess.hpp" +#include #define MNN_OPEN_TIME_TRACE #include #include #include #include #include -#include "AutoTime.hpp" +#include #define STB_IMAGE_IMPLEMENTATION #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb_image.h" diff --git a/demo/exec/segment.cpp b/demo/exec/segment.cpp index d1a2744ba..0b1015b51 100644 --- a/demo/exec/segment.cpp +++ b/demo/exec/segment.cpp @@ -7,7 +7,7 @@ // #include -#include "ImageProcess.hpp" +#include #define MNN_OPEN_TIME_TRACE #include #include @@ -15,10 +15,10 @@ #include #include #include -#include "Expr.hpp" -#include "ExprCreator.hpp" -#include "AutoTime.hpp" -#include "Optimizer.hpp" +#include +#include +#include +#include #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION @@ -38,9 +38,11 @@ int main(int argc, const char* argv[]) { MNN_ERROR("Invalid Model\n"); return 0; } - auto optimizer = Optimizer::create(Optimizer::CPU); + Optimizer::Config config; + config.device = Optimizer::CPU; + auto optimizer = Optimizer::create(config); optimizer->onExecute(Variable::mapToSequence(net.second)); - + auto input = net.first.begin()->second; auto info = input->getInfo(); if (nullptr == info) { @@ -55,7 +57,7 @@ int main(int argc, const char* argv[]) { MNN_ERROR("Alloc memory or compute size error\n"); return 0; } - + { int size_w = 0; int size_h = 0; @@ -76,7 +78,7 @@ int main(int argc, const char* argv[]) { if (size_w == 0) size_w = 1; MNN_PRINT("input: w:%d , h:%d, bpp: %d\n", size_w, size_h, bpp); - + auto inputPatch = argv[2]; int width, height, channel; auto inputImage = stbi_load(inputPatch, &width, &height, &channel, 4); @@ -98,7 +100,7 @@ int main(int argc, const char* argv[]) { ::memcpy(config.normal, normals, sizeof(normals)); config.sourceFormat = RGBA; config.destFormat = RGB; - + std::shared_ptr pretreat(ImageProcess::create(config)); pretreat->setMatrix(trans); pretreat->convert((uint8_t*)inputImage, width, height, 0, input->writeMap(), size_w, size_h, 4, 0, halide_type_of()); diff --git a/express/CMakeLists.txt b/express/CMakeLists.txt index 8be35569c..d0d2b156c 100644 --- a/express/CMakeLists.txt +++ b/express/CMakeLists.txt @@ -1,27 +1,6 @@ -# put output to build dir - -set(CMAKE_CXX_STANDARD 11) -if (NOT (WIN32 OR MSVC)) - if(CMAKE_BUILD_TYPE MATCHES "Release") - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fomit-frame-pointer -fstrict-aliasing -ffunction-sections -fdata-sections -ffast-math -fno-rtti -fno-exceptions") - add_definitions(-fvisibility=hidden) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility-inlines-hidden") - endif() -endif() -file(GLOB_RECURSE Files "*.cpp" "*.hpp") -if(MNN_BUILD_SHARED_LIBS) - add_library(MNN_Express SHARED ${Files}) - if (WIN32) - install(TARGETS MNN_Express RUNTIME DESTINATION lib) - target_compile_definitions(MNN_Express PRIVATE "-DBUILDING_MNN_EXPRESS_DLL") - target_compile_definitions(MNN_Express INTERFACE "-DUSING_MNN_EXPRESS_DLL") - else() - install(TARGETS MNN_Express LIBRARY DESTINATION lib) - endif() -else() - add_library(MNN_Express STATIC ${Files}) - install(TARGETS MNN_Express ARCHIVE DESTINATION lib) -endif() -target_include_directories(MNN_Express PUBLIC "include" "../") - -target_link_libraries(MNN_Express ${MNN_DEPEND}) +file(GLOB_RECURSE MNN_EXPR_SRCS "${CMAKE_CURRENT_LIST_DIR}/*.cpp") +add_library(MNNExpress OBJECT ${MNN_EXPR_SRCS}) +list(APPEND MNN_OBJECTS_TO_LINK $) +list(APPEND MNN_TARGETS MNNExpress) +SET(MNN_OBJECTS_TO_LINK "${MNN_OBJECTS_TO_LINK}" PARENT_SCOPE) +SET(MNN_TARGETS "${MNN_TARGETS}" PARENT_SCOPE) diff --git a/express/Executor.cpp b/express/Executor.cpp new file mode 100644 index 000000000..8e64322ec --- /dev/null +++ b/express/Executor.cpp @@ -0,0 +1,424 @@ +// +// Executor.cpp +// MNN +// +// Created by MNN on 2019/07/26. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" +#include "Utils.hpp" +#include "core/Backend.hpp" +#include +#include "BasicOptimizer_generated.h" +namespace MNN { +namespace Express { +void Executor::setGlobalExecutorConfig(MNNForwardType type, const BackendConfig& config, int numberThread) { + std::lock_guard _l(mMutex); + auto creator = MNNGetExtraBackendCreator(type); + if (nullptr == creator) { + MNN_ERROR("Error to find creator of %d\n", type); + return; + } + mSolutions.clear(); + Backend::Info info; + info.type = type; + info.numThread = numberThread; + std::shared_ptr bn(creator->onCreate(info)); + mBackend = bn; +} +void Executor::gc(GCFlag flag) { + std::lock_guard _l(mMutex); + mSolutions.clear(); + mBackend->onClearBuffer(); +} + +std::shared_ptr Executor::getGlobalExecutor() { + static std::once_flag of; + static std::shared_ptr gExecutor; + std::call_once(of, [&]() { + auto creator = MNNGetExtraBackendCreator(MNN_FORWARD_CPU); + SizeComputerSuite::init(); + Backend::Info info; + info.type = MNN_FORWARD_CPU; + info.numThread = 1; + std::shared_ptr bn(creator->onCreate(info)); + gExecutor.reset(new Executor(bn)); + }); + return gExecutor; +} + +class Solution { +public: + Solution(){} + virtual ~ Solution(){} + virtual ErrorCode computeInfo(Expr* expr) = 0; + virtual ErrorCode compute(Expr* expr) = 0; +}; +class UnitSolution : public Solution { +public: + UnitSolution(Expr* expr, std::shared_ptr bn) { + mOutputs.resize(expr->outputSize()); + mContent.resize(expr->outputSize()); + for (int i=0; ibuffer().host = nullptr; + } + mInputs.resize(expr->inputs().size()); + mInputContent.resize(expr->inputs().size()); + for (int i=0; ibuffer().host = nullptr; + } + mBackend = bn; + mExpr = expr; + } + ~ UnitSolution() { + for (auto t : mOutputs) { + if (nullptr != t->host()) { + mBackend->onReleaseBuffer(t, Backend::DYNAMIC); + } + } + mExpr->setInfoDirty(); + } + virtual ErrorCode computeInfo(Expr* expr) override { + auto op = expr->get(); + for (int i = 0; i < expr->inputs().size(); ++i) { + auto inputExpr = expr->inputs()[i]->expr(); + Utils::copyInfoToTensor(mInputContent[i].get(), inputExpr.first->outputInfo(inputExpr.second)); + } + bool res = SizeComputer::computeOutputSize(op, mInputs, mOutputs); + if (!res) { + // Compute Error + #ifdef MNN_EXPRESS_ERROR_REPORT + FUNC_PRINT(op->type()); + #endif + return COMPUTE_SIZE_ERROR; + } + for (int i = 0; i < mOutputs.size(); ++i) { + auto tensor = mOutputs[i]; + for (int j = 0; j < tensor->dimensions(); ++j) { + if (tensor->length(j) <= 0) { + #ifdef MNN_EXPRESS_ERROR_REPORT + if (nullptr != op->name()) { + auto name = op->name()->str(); + MNN_ERROR("Error to compute shape for %s\n", op->name()->c_str()); + } + #endif + return COMPUTE_SIZE_ERROR; + } + } + auto shape = expr->outputInfo(i); + Utils::copyTensorToInfo(shape, tensor); + } + mNeedResize = true; + return NO_ERROR; + } + ErrorCode prepare(Expr* expr) { + for (int i = 0; i < expr->inputs().size(); ++i) { + auto inputExpr = expr->inputs()[i]->expr(); + mInputContent[i]->buffer().host = (uint8_t*)inputExpr.first->outputInfo(inputExpr.second)->ptr; + } + if (nullptr == mExecution) { + mExecution.reset(mBackend->onCreate(mInputs, mOutputs, expr->get())); + } + for (auto& output : mOutputs) { + if (output->host() != nullptr) { + mBackend->onReleaseBuffer(output, Backend::DYNAMIC); + output->buffer().host = nullptr; + } + TensorUtils::setLinearLayout(output); + auto res = mBackend->onAcquireBuffer(output, Backend::DYNAMIC); + if (!res) { + return OUT_OF_MEMORY; + } + } + for (int i = 0; i < mOutputs.size(); ++i) { + expr->outputInfo(i)->ptr = mOutputs[i]->host(); + } + return mExecution->onResize(mInputs, mOutputs); + } + virtual ErrorCode compute(Expr* expr) override { + if (mNeedResize) { + auto code = prepare(expr); + if (NO_ERROR != code) { + return code; + } + mNeedResize = false; + } + mBackend->onExecuteBegin(); + auto code = mExecution->onExecute(mInputs, mOutputs); + mBackend->onExecuteEnd(); + return code; + } +private: + std::shared_ptr mExecution; + std::vector mInputs; + std::vector mOutputs; + std::vector> mContent; + std::vector> mInputContent; + std::shared_ptr mBackend; + bool mNeedResize = false; + Expr* mExpr; +}; +static Tensor::DimensionType getDimType(const Tensor* origin) { + auto dimformat = TensorUtils::getDescribe(origin)->dimensionFormat; + switch (dimformat) { + case MNN_DATA_FORMAT_NHWC: + return Tensor::TENSORFLOW; + case MNN_DATA_FORMAT_NCHW: + return Tensor::CAFFE; + case MNN_DATA_FORMAT_NC4HW4: + return Tensor::CAFFE_C4; + default: + break; + } + return Tensor::CAFFE; +} +class MergeExpr : public Solution{ +public: + MergeExpr(const Optimizer::Merge* merge, int inputSize, int outputSize) { + MNN_ASSERT(nullptr != merge); + MNN_ASSERT(nullptr != merge->backend()); + MNN_ASSERT(nullptr != merge->oplists()); + MNN_ASSERT(nullptr != merge->outputIndexes()); + + //Create tensors + Schedule::ScheduleInfo schedule; + std::vector pipelineInfos; + schedule.allTensors.resize(merge->tensorNumber()); + for (int i=0; itensorNumber(); ++i) { + schedule.allTensors[i].second.reset(new Tensor); + } + pipelineInfos.resize(merge->oplists()->size()); + for (int i = 0; i < merge->oplists()->size(); ++i) { + auto& pipelineInfo = pipelineInfos[i]; + auto op = merge->oplists()->GetAs(i); + if (nullptr != op->inputIndexes()) { + auto data = op->inputIndexes()->data(); + pipelineInfo.inputs.resize(op->inputIndexes()->size()); + for (int j = 0; j < op->inputIndexes()->size(); ++j) { + auto index = data[j]; + schedule.allTensors[index].first += 1; + pipelineInfo.inputs[j] = schedule.allTensors[index].second.get(); + } + } + if (nullptr != op->outputIndexes()) { + auto data = op->outputIndexes()->data(); + pipelineInfo.outputs.resize(op->outputIndexes()->size()); + for (int j = 0; j < op->outputIndexes()->size(); ++j) { + auto index = data[j]; + pipelineInfo.outputs[j] = schedule.allTensors[index].second.get(); + } + } + pipelineInfo.op = op; + } + mOutputs.resize(merge->outputIndexes()->size()); + for (int i=0; ioutputIndexes()->size(); ++i) { + schedule.allTensors[merge->outputIndexes()->data()[i]].first += 1; + mOutputs[i].first = schedule.allTensors[merge->outputIndexes()->data()[i]].second.get(); + } + if (nullptr != merge->inputIndexes()) { + mInputs.resize(merge->inputIndexes()->size()); + for (int i=0; iinputIndexes()->size(); ++i) { + mInputs[i].first = schedule.allTensors[merge->inputIndexes()->data()[i]].second.get(); + mInputs[i].second.reset(new Tensor); + } + } + //Create Backend + auto backendInfo = merge->backend(); + auto creator = MNNGetExtraBackendCreator((MNNForwardType)backendInfo->type()); + if (nullptr == creator) { + mValid = false; + MNN_ERROR("Get Backend Creator Error\n"); + return; + } + Backend::Info info; + info.type = (MNNForwardType)backendInfo->type(); + info.numThread = backendInfo->numberThread(); + info.mode = Backend::Info::INDIRECT; + BackendConfig backendConfig; + backendConfig.memory = (BackendConfig::MemoryMode)backendInfo->memroy(); + backendConfig.power = (BackendConfig::PowerMode)backendInfo->power(); + backendConfig.precision = (BackendConfig::PrecisionMode)backendInfo->precision(); + info.user = &backendConfig; + creator->onValid(info); + mDirect = info.mode == Backend::Info::DIRECT; + schedule.pipelineInfo.emplace_back(std::make_pair(info, pipelineInfos)); + mSession.reset(new Session(schedule)); + } + + ~ MergeExpr () { + //Do nothing + } + virtual ErrorCode computeInfo(Expr* expr) override { + MNN_ASSERT(expr->outputSize() == mOutputs.size()); + MNN_ASSERT(expr->inputs().size() == mInputs.size()); + bool needResize = mSession->getNeedResize(); + auto& inputs = expr->inputs(); + if (!needResize) { + for (int i=0; igetInfo(); + auto check = mInputs[i].first; + if (src->dim.size() != check->dimensions()) { + needResize = true; + break; + } + for (int d=0; ddim.size(); ++d) { + if (src->dim[d] != check->length(d)) { + needResize = true; + break; + } + } + if (needResize) { + break; + } + } + } + if (needResize) { + for (int i=0; igetInfo(); + auto dst = mInputs[i].first; + Utils::copyInfoToTensor(dst, src); + } + mSession->setNeedResize(); + auto code = mSession->resize(); + if (NO_ERROR != code) { + return code; + } + } + for (int i=0; ioutputInfo(i), mOutputs[i].second.get()); + } + mResized = false; + return NO_ERROR; + } + ErrorCode prepare(Expr* expr) { + auto inputs = expr->inputs(); + for (int i=0; igetInfo(); + TensorUtils::copyShape(mInputs[i].first, mInputs[i].second.get(), true); + mInputs[i].second->buffer().host = (uint8_t*)src->ptr; + } + for (int i=0; ioutputSize(); ++i) { + expr->outputInfo(i)->ptr = mOutputs[i].second->host(); + } + return NO_ERROR; + } + virtual ErrorCode compute(Expr* expr) override { + if (!mResized) { + auto code = prepare(expr); + if (NO_ERROR != code) { + return code; + } + mResized = true; + } + for (auto& input : mInputs) { + input.first->copyFromHostTensor(input.second.get()); + } + auto code = mSession->run(); + if (NO_ERROR != code) { + return code; + } + for (auto& tensor : mOutputs) { + tensor.first->copyToHostTensor(tensor.second.get()); + } + return NO_ERROR; + } + bool valid() const {return mValid;} +private: + std::shared_ptr mSession; + std::vector>> mInputs; + std::vector>> mOutputs; + bool mValid = true; + bool mDirect = true; + bool mResized = false; +}; + +Executor::Executor(std::shared_ptr bn) { + mBackend = bn; +} +Executor:: ~Executor() { + for (auto iter : mSolutions) { + iter.first->setInfoDirty(); + } +} + +Executor::Requirement Executor::onGetRequirement(Expr* expr) const { + Executor::Requirement req; + auto op = expr->get(); + auto inputSize = expr->inputs().size(); + req.contentNeedContent.resize(inputSize); + req.shapeNeedContent.resize(inputSize); + req.supportError.resize(inputSize); + if (op->type() == OpType_Extra) { + for (int i = 0; i < inputSize; ++i) { + req.contentNeedContent[i] = true; + req.shapeNeedContent[i] = false; + req.supportError[i] = false; + } + return req; + } + for (int i = 0; i < inputSize; ++i) { + req.contentNeedContent[i] = SizeComputer::opNeedContent(op->type(), i); + req.shapeNeedContent[i] = false; + if (op->type() != OpType_Concat) { + req.supportError[i] = false; + } else { + req.supportError[i] = true; + } + } + auto needIndexId = SizeComputer::needInputContent(op); + for (auto index : needIndexId) { + if (index < req.shapeNeedContent.size()) { + req.shapeNeedContent[index] = true; + } + } + return req; +} + +ErrorCode Executor::onComputeInfo(Expr* expr) { + if (expr->get()->type() == OpType_Extra) { + auto param = expr->get()->main_as_Extra(); + if (nullptr == param || "MNN" != param->engine()->str()) { + FUNC_PRINT(1); + return NOT_SUPPORT; + } + } + std::lock_guard _l(mMutex); + auto iter = mSolutions.find(expr); + std::shared_ptr solution; + if (iter == mSolutions.end()) { + if (expr->get()->type() != OpType_Extra) { + solution.reset(new UnitSolution(expr, mBackend)); + } else { + auto param = expr->get()->main_as_Extra(); + auto blob = param->info(); + auto merge = flatbuffers::GetRoot(blob->data()); + solution.reset(new MergeExpr(merge, expr->inputs().size(), expr->outputSize())); + } + mSolutions[expr] = solution; + } else { + solution = iter->second; + } + return solution->computeInfo(expr); +} +ErrorCode Executor::onComputeContent(Expr* expr) { + std::lock_guard _l(mMutex); + //MNN_PRINT("Compute for %s \n", EnumNameOpType(expr->get()->type())); + auto code = mSolutions[expr]->compute(expr); + return code; +} +void Executor::recycle(Expr* expr) { + std::lock_guard _l(mMutex); + mSolutions.erase(expr); + return; +} + +} // namespace Express +} // namespace MNN diff --git a/express/Expr.cpp b/express/Expr.cpp new file mode 100644 index 000000000..56baacbef --- /dev/null +++ b/express/Expr.cpp @@ -0,0 +1,876 @@ +// +// Expr.cpp +// MNN +// +// Created by MNN on 2019/06/10. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#define FLATBUFFERS_PREFER_PRINTF +#include +#include +#include +#include "core/MNNMemoryUtils.h" +#include "Utils.hpp" +#include +#include "core/FileLoader.hpp" +#include +#include "flatbuffers/util.h" +#include "MNN_generated.h" +#define MNN_OPEN_TIME_TRACE +#include "MNN/AutoTime.hpp" + +//#define MNN_EXPRESS_ERROR_REPORT +static inline std::string numberToString(int index) { + return flatbuffers::NumToString(index); +} + +namespace MNN { +namespace Express { +void Variable::Info::syncSize() { + size = 1; + for (int i=0; iexpr().first->get()) { + mContent->expr().first->mType = type; + return true; + } + auto info = mContent->getInfo(); + if (nullptr == info) { + return false; + } + VARP newVar; + switch (type) { + case INPUT: { + newVar = _Input(info->dim, info->order, info->type); + auto ptr = mContent->readMap(); + if (nullptr != ptr) { + auto dstPtr = newVar->writeMap(); + ::memcpy(dstPtr, ptr, info->size * info->type.bytes()); + } + break; + } + case CONST: { + auto ptr = mContent->readMap(); + if (nullptr == ptr) { + return false; + } + newVar = _Const(ptr, info->dim, info->order, info->type); + break; + } + case TRAINABLE: { + auto ptr = mContent->readMap(); + if (nullptr == ptr) { + return false; + } + newVar = _TrainableParam(ptr, info->dim, info->order, info->type); + break; + } + default: + return false; + } + auto temp = VARP(mContent); + Variable::replace(temp, newVar); + return true; +} + +struct Expr::Inside { + std::vector mInputInfos; + std::vector mOutputInfos; + Executor::Requirement mReq; +}; +Expr::Expr(int outputSize) { + mInside.reset(new Inside); + mInside->mOutputInfos.resize(outputSize); + mOutputNames.resize(outputSize); +} + +Expr::~Expr() { + Executor::getGlobalExecutor()->recycle(this); + mInside.reset(); +} +void Expr::set(const OpT* op) { + MNN_ASSERT(nullptr != op); + flatbuffers::FlatBufferBuilder builder; + auto offset = Op::Pack(builder, op); + builder.Finish(offset); + mExtraBuffer.reset(new char[builder.GetSize()]); + ::memcpy(mExtraBuffer.get(), builder.GetBufferPointer(), builder.GetSize()); + mOp = flatbuffers::GetMutableRoot(mExtraBuffer.get()); + mOpBufferSize = builder.GetSize(); + mContentDirty = true; + mInfoDirty = true; +} +Variable::Info* Expr::outputInfo(int index) { + return mInside->mOutputInfos.data() + index; +} + +void Expr::_addLinkForInputs(EXPRP expr) { + auto inputs = expr->inputs(); + for (int i=0; imFrom; + for (int j=0; jmTo.size(); ++j) { + auto ref = inputExpr->mTo[j].lock(); + if (nullptr == ref) { + inputExpr->mTo[j] = WeakEXPRP(expr); + findEmpty = true; + break; + } + } + if (!findEmpty) { + inputExpr->mTo.emplace_back(WeakEXPRP(expr)); + } + } +} +EXPRP Expr::create(Variable::Info&& info) { + EXPRP expr(new Expr(1)); + expr->mOp = nullptr; + auto originPtr = info.ptr; + expr->mInside->mOutputInfos[0] = std::move(info); + auto& dstInfo = expr->mInside->mOutputInfos[0]; + dstInfo.syncSize(); + if (dstInfo.size > 0) { + expr->mExtraBuffer.reset(new char[dstInfo.size * dstInfo.type.bytes()]); + expr->mInside->mOutputInfos[0].ptr = expr->mExtraBuffer.get(); + expr->mInfoDirty = false; + } else { + expr->mInside->mOutputInfos[0].ptr = nullptr; + expr->mInfoDirty = true; + } + if (nullptr == originPtr) { + expr->mType = VARP::INPUT; + expr->mContentDirty = true; + return expr; + } + expr->mType = VARP::CONST; + expr->mContentDirty = false; + ::memcpy(expr->mInside->mOutputInfos[0].ptr, originPtr, dstInfo.size * dstInfo.type.bytes()); + return expr; +} + +EXPRP Expr::create(const OpT* op, std::vector inputs, int outputSize) { + EXPRP expr(new Expr(outputSize)); + if (OpType_Input == op->type) { + Variable::Info info; + info.dim = op->main.AsInput()->dims; + if (info.dim.size() >= 1 && -1 == info.dim[0]) { + info.dim[0] = 1; + } + info.order = Utils::revertFormat(op->main.AsInput()->dformat); + info.ptr = nullptr; + info.type = Utils::revertDataType(op->main.AsInput()->dtype); + return create(std::move(info)); + } + if (OpType_Const == op->type || OpType_TrainableParam == op->type) { + Variable::Info info; + info.dim = op->main.AsBlob()->dims; + info.order = Utils::revertFormat(op->main.AsBlob()->dataFormat); + info.ptr = nullptr; + info.type = Utils::revertDataType(op->main.AsBlob()->dataType); + switch (op->main.AsBlob()->dataType) { + case DataType_DT_INT8: + info.ptr = (void*)op->main.AsBlob()->int8s.data(); + break; + case DataType_DT_INT32: + info.ptr = (void*)op->main.AsBlob()->int32s.data(); + break; + case DataType_DT_UINT8: + info.ptr = (void*)op->main.AsBlob()->uint8s.data(); + break; + case DataType_DT_FLOAT: + info.ptr = (void*)op->main.AsBlob()->float32s.data(); + break; + default: + break; + } + auto expr = create(std::move(info)); + if (OpType_TrainableParam == op->type) { + expr->mType = VARP::TRAINABLE; + } + return expr; + } + expr->set(op); + expr->mInputs = std::move(inputs); + _addLinkForInputs(expr); + return expr; +} +void Expr::setName(const std::string& name) { + mName = name; +} +bool Expr::requireInfo() { + if (nullptr == mOp) { + return true; + } + if (!mInfoDirty) { + return true; + } + if (!mValid) { + return false; + } + bool ready = true; + mInside->mInputInfos.resize(mInputs.size()); + if (mInside->mReq.shapeNeedContent.empty()) { + mInside->mReq = Executor::getGlobalExecutor()->onGetRequirement(this); + } + for (int i = 0; i < mInputs.size(); ++i) { + if (nullptr == mInputs[i] || nullptr == mInputs[i]->mFrom) { + // The Variable is set nullptr by api + return false; + } + mInside->mInputInfos[i] = mInputs[i]->getInfo(); + if (nullptr == mInside->mInputInfos[i] && (!mInside->mReq.supportError[i])) { +#ifdef MNN_EXPRESS_ERROR_REPORT + MNN_ERROR("%s, %d input not ready\n", mName.c_str(), i); +#endif + mValid = false; + return false; + } + } + for (int i = 0; i < mInputs.size(); ++i) { + auto& v = mInputs[i]; + if (mInside->mReq.shapeNeedContent[i]) { + auto res = v->expr().first->requireCompute(); + if (!res) { +#ifdef MNN_EXPRESS_ERROR_REPORT + MNN_ERROR("%s, Error for compute shape %d\n", mName.c_str(), i); +#endif + ready = false; + mValid = false; + break; + } + } + } + if (!ready) { + return false; + } + //MNN_PRINT("Info %s, %p Start\n", mName.c_str(), this); + auto res = Executor::getGlobalExecutor()->onComputeInfo(this); + //MNN_PRINT("Info Compute %s\n", mName.c_str()); + + if (NO_ERROR == res) { + mInfoDirty = false; + } else { + mValid = false; + } + return NO_ERROR == res; +} + +bool Expr::requireCompute() { + if (nullptr == mOp) { + if (mType == VARP::INPUT) { + return !mContentDirty; + } + return true; + } + if ((!mContentDirty) && mValid) { + return true; + } + if (!mValid) { + return false; + } +#ifdef DEBUG_OVERFLOW + if (mTo.size() > 1) { + if (mName.size() > 0) { + MNN_PRINT("output: %d, type:%s, name: %s\n", mTo.size(), EnumNameOpType(mOp->type()), mName.c_str()); + } else { + MNN_PRINT("output: %d, type:%s\n", mTo.size(), EnumNameOpType(mOp->type())); + } + for (auto t : mTo) { + auto tp = t.lock(); + if (nullptr == tp) { + MNN_PRINT("nullptr\t"); + } else { + MNN_PRINT("%s\n", EnumNameOpType(tp->get()->type())); + } + } + MNN_PRINT("\n"); + //FUNC_PRINT(mTo.size()); + } +#endif + bool res = requireInfo(); + if (!res) { + return false; + } + for (int i = 0; i < mInputs.size(); ++i) { + if (mInside->mReq.contentNeedContent[i]) { + auto& input = mInputs[i]; + auto expr = input->expr().first; + res = expr->requireCompute(); + if (!res) { +#ifdef MNN_EXPRESS_ERROR_REPORT + MNN_ERROR("%s compute input %d error , \n", mName.c_str(), i); +#endif + if (!mInside->mReq.supportError[i]) { + mValid = false; + return false; + } + } + } + } + auto code = Executor::getGlobalExecutor()->onComputeContent(this); + //MNN_PRINT("Compute %s, %p End\n", mName.c_str(), this); + res = code == NO_ERROR; + if (!res) { +#ifdef MNN_EXPRESS_ERROR_REPORT + MNN_ERROR("Error for compute %s\n", mName.c_str()); +#endif + mValid = false; + return false; + } + mContentDirty = false; + return true; +} + +size_t Variable::linkNumber() const { + return mFrom->outputs().size(); +} +const std::vector& Variable::toExprs() const { + return mFrom->outputs(); +} + +VARP Variable::create(EXPRP expr, int index) { + VARP res(new Variable(expr, index)); + return res; +} +void Expr::replace(EXPRP old, EXPRP from) { + if (old.get() == from.get()) { + return; + } + for (auto input : old->inputs()) { + for (int j=0; jmFrom->mTo.size(); ++j) { + auto ref = input->mFrom->mTo[j].lock(); + if (ref.get() == old.get()) { + input->mFrom->mTo[j].reset(); + } + } + } + for (auto input : from->inputs()) { + bool hasSet = false; + for (int j=0; jmFrom->mTo.size(); ++j) { + auto ref = input->mFrom->mTo[j].lock(); + if (ref.get() == old.get()) { + hasSet = true; + break; + } + } + if (!hasSet) { + for (int j=0; jmFrom->mTo.size(); ++j) { + auto ref = input->mFrom->mTo[j].lock(); + if (nullptr == ref) { + input->mFrom->mTo[j] = WeakEXPRP(old); + hasSet = true; + break; + } + } + } + if (!hasSet) { + input->mFrom->mTo.emplace_back(WeakEXPRP(old)); + } + } + Executor::getGlobalExecutor()->recycle(old.get()); + old->mOp = from->mOp; + old->mName = from->mName; + old->mOutputNames = from->mOutputNames; + old->mExtraBuffer = from->mExtraBuffer; + old->mOpBufferSize = from->mOpBufferSize; + old->mType = from->mType; + old->mInside = from->mInside; + old->mContentDirty = from->mContentDirty; + old->mInfoDirty = true; + old->mInputs = from->mInputs; + old->visitOutputs([&](EXPRP expr, int index) { + if (expr->mInfoDirty) { + return false; + } + expr->mContentDirty = true; + expr->mInfoDirty = true; + return true; + }); +} + +void Variable::setName(const std::string& name) { + mFrom->mOutputNames[mFromIndex] = name; + if (mFrom->name().empty()) { + mFrom->setName(name); + } +} +const std::string& Variable::name() const { + return mFrom->outputName(mFromIndex); +} +bool Variable::input(VARP src) { + if (nullptr != mFrom->get() && VARP::INPUT != mFrom->mType) { + MNN_ERROR("Can't input to no-input op\n"); + return false; + } + if (nullptr == src) { + /*Close the Input*/ + mFrom->visitOutputs([](EXPRP expr, int index) { + auto recurse = expr->mValid; expr->mValid = false; + return recurse; + }); + mFrom->mValid = false; + return false; + } + auto info = src->getInfo(); + std::shared_ptr tempInfo; + bool needCopy = true; + if (nullptr == info || 0 == info->size) { + tempInfo.reset(new Variable::Info); + tempInfo->type = halide_type_of(); + info = tempInfo.get(); + needCopy = false; + } + auto dstInfo = getInfo(); + bool needChange = nullptr == dstInfo || info->order != dstInfo->order || info->dim.size() != dstInfo->dim.size(); + if (!needChange) { + for (int i=0; idim.size(); ++i) { + if (dstInfo->dim[i] != info->dim[i]) { + needChange = true; + break; + } + } + } + if (needChange) { + bool needAlloc = info->size * info->type.bytes() > mFrom->mInside->mOutputInfos[0].size * mFrom->mInside->mOutputInfos[0].type.bytes(); + mFrom->mInside->mOutputInfos[0] = *info; + if (needAlloc) { + mFrom->mExtraBuffer.reset(new char[info->size * info->type.bytes()]); + } + mFrom->mInside->mOutputInfos[0].ptr = mFrom->mExtraBuffer.get(); + } + if (needCopy) { + auto dstPtr = writeInternal(false); + auto srcPtr = src->readMap(); + if (nullptr == dstPtr || nullptr == srcPtr) { + MNN_ERROR("Alloc memory error or compute src error in Variable::Input\n"); + return false; + } + ::memcpy(dstPtr, srcPtr, info->size * info->type.bytes()); + } + if (needChange) { + mFrom->visitOutputs([](EXPRP expr, int index) { return expr->setInfoDirty(); }); + } else { + informDirty(); + } + return true; +} + +void Variable::replace(VARP dst, VARP src) { + if (nullptr == src) { + dst->setExpr(nullptr, 0); + return; + } + Expr::replace(dst->mFrom, src->mFrom); + dst->mFromIndex = src->mFromIndex; +} + +const Variable::Info* Variable::getInfo() { + if (nullptr == mFrom) { + return nullptr; + } + auto res = mFrom->requireInfo(); + if (!res) { + return nullptr; + } + return mFrom->mInside->mOutputInfos.data() + mFromIndex; +} + +bool Variable::resize(INTS dims) { + if (nullptr != mFrom->get() && VARP::INPUT != mFrom->mType) { + MNN_ERROR("Can't resize variable not from input\n"); + return false; + } + auto& info = mFrom->mInside->mOutputInfos[0]; + if (dims.size() == info.dim.size()) { + bool theSame = true; + for (int i=0; imExtraBuffer.reset(new char[info.size * info.type.bytes()]); + info.ptr = mFrom->mExtraBuffer.get(); + + mFrom->mContentDirty = true; + mFrom->mValid = true; + mFrom->mInside->mInputInfos.clear(); + + mFrom->visitOutputs([](EXPRP expr, int index) { return expr->setInfoDirty(); }); + return true; +} +void Expr::visit(EXPRP expr, const std::function& before, const std::function& after) { + bool next = before(expr); + if (!next) { + return; + } + for (int i = 0; i < expr->inputs().size(); ++i) { + visit(expr->inputs()[i]->mFrom, before, after); + } + after(expr); +} + +void* Variable::readInternal() { + if (nullptr == mFrom->get()) { + if (mFrom->mContentDirty) { + return nullptr; + } + return mFrom->outputInfo(mFromIndex)->ptr; + } + auto res = mFrom->requireCompute(); + if (!res) { + return nullptr; + } + return mFrom->outputInfo(mFromIndex)->ptr; +} + +void Variable::informDirty() { + mFrom->visitOutputs([](EXPRP expr, int index) { + auto needRecurse = expr->setContentDirty(index); + return needRecurse; + }); +} + +void* Variable::writeInternal(bool inform) { + if (inform) { + informDirty(); + } + mFrom->mContentDirty = false; + return mFrom->mInside->mOutputInfos[0].ptr; +} + +void Variable::unMap() { + //mFrom->inside()->onUnMapContent(mFromIndex); +} + +void Expr::visitOutputs(const std::function& visit) { + for (auto iter = mTo.begin(); iter != mTo.end();) { + auto expr = iter->lock(); + if (nullptr == expr) { + iter = mTo.erase(iter); + continue; + } + bool recurse = false; + auto inputs = expr->inputs(); + for (int i=0; imFrom.get() == this) { + recurse = recurse || visit(expr, i); + } + } + if (recurse) { + expr->visitOutputs(visit); + } + iter++; + } +} +bool Expr::setContentDirty(int inputIndex) { + if (mContentDirty) { + return false; + } + if (nullptr != mInside) { + if (mInside->mReq.shapeNeedContent[inputIndex]) { + visitOutputs([](EXPRP expr, int index) { return expr->setInfoDirty(); }); + return setInfoDirty(); + } + if (!mInside->mReq.contentNeedContent[inputIndex]) { + return false; + } + } + mContentDirty = true; + return true; +} +bool Expr::setInfoDirty() { + if (mInfoDirty && mValid) { + //MNN_PRINT("End Info Dirty for %s\n", mName.c_str()); + return false; + } + //MNN_PRINT("Set Info Dirty for %s\n", mName.c_str()); + mInfoDirty = true; + mContentDirty = true; + mValid = true; + return true; +} + +std::vector Variable::load(const char* fileName) { + AUTOTIME; + FileLoader loader(fileName); + if (!loader.valid()) { + MNN_ERROR("Error for open %s\n", fileName); + return {}; + } + loader.read(); + if (!loader.valid()) { + return {}; + } + AutoStorage buffer; + loader.merge(buffer); + if (buffer.get() == nullptr) { + return {}; + } + flatbuffers::Verifier verify((const uint8_t*)(buffer.get()), buffer.size()); + if (false == VerifyNetBuffer(verify)) { + MNN_PRINT("Invalidate buffer to create variable\n"); + return {}; + } + std::unique_ptr source(UnPackNet(buffer.get())); + if (nullptr == source) { + return {}; + } + if (source->oplists.empty()) { + MNN_ERROR("Invalid net\n"); + return {}; + } + // FUNC_PRINT(source->oplists.size()); + + auto opSize = source->oplists.size(); + auto tensorCount = source->tensorName.size(); + if (tensorCount == 0) { + tensorCount = source->tensorNumber; + } + std::vector variable; + variable.reserve(tensorCount); + std::map variableMap; + + // Generate All Exprs by order of net + for (int i = 0; i < opSize; ++i) { + std::vector inputs; + auto op = source->oplists[i].get(); + for (int index = 0; index < op->inputIndexes.size(); ++index) { + auto inputIndex = op->inputIndexes[index]; + if (variableMap.find(inputIndex) == variableMap.end()) { + MNN_ERROR("Can't find variable for %s, the graph is error\n", op->name.c_str()); + break; + } + inputs.emplace_back(variableMap[inputIndex]); + } + EXPRP expr = Expr::create(source->oplists[i].get(), inputs, (int)op->outputIndexes.size()); + expr->setName(source->oplists[i]->name); + + for (int index = 0; index < op->outputIndexes.size(); ++index) { + auto outputIndex = op->outputIndexes[index]; + if (variableMap.find(outputIndex) == variableMap.end()) { + auto newVariable = Variable::create(expr, index); + if (source->tensorName.size() > outputIndex) { + newVariable->setName(source->tensorName[outputIndex]); + } + variableMap[outputIndex] = newVariable; + variable.emplace_back(newVariable); + } + } + } + return variable; +} +std::map Variable::loadMap(const char* fileName) { + AUTOTIME; + auto variables = load(fileName); + std::map varMap; + for (auto v : variables) { + varMap[v->name()] = v; + } + return varMap; +} +std::vector Variable::mapToSequence(const std::map& source) { + std::vector outputs; + outputs.reserve(source.size()); + for (auto& iter : source) { + outputs.emplace_back(iter.second); + } + return outputs; +} +void Variable::save(const std::vector& vars, NetT* dest) { + auto executeOrder = getExecuteOrder(vars); + + // Get Expr - TensorOffset Map + std::map varIndexInfo; + { + int tensorOffset = 0; + for (int i=0; ioutputSize(); + varIndexInfo[expr] = tensorOffset; + tensorOffset += outputSize; + } + dest->tensorName.resize(tensorOffset); + } + + // Create All Op + for (int index = 0; index < executeOrder.size(); ++index) { + auto expr = executeOrder[index]; + auto mOp = expr->get(); + std::unique_ptr op; + if (nullptr != mOp) { + op.reset(mOp->UnPack()); + } else { + MNN_ASSERT(1 == expr->outputSize()); + auto& info = expr->mInside->mOutputInfos[0]; + op.reset(new OpT); + if (expr->mType != VARP::INPUT) { + auto blob = new BlobT; + blob->dataFormat = (MNN_DATA_FORMAT)Utils::convertFormat(info.order); + blob->dims = info.dim; + if (info.type.code == halide_type_float) { + blob->dataType = DataType_DT_FLOAT; + blob->float32s.resize(info.size); + ::memcpy(blob->float32s.data(), info.ptr, info.size * sizeof(float)); + } else if (info.type.code == halide_type_int) { + blob->dataType = DataType_DT_INT32; + blob->int32s.resize(info.size); + ::memcpy(blob->int32s.data(), info.ptr, info.size * sizeof(int)); + } + else if (info.type.code == halide_type_uint && info.type.bits == 8) { + blob->dataType = DataType_DT_UINT8; + blob->uint8s.resize(info.size); + ::memcpy(blob->uint8s.data(), info.ptr, info.size * sizeof(uint8_t)); + } + op->type = OpType_Const; + if (expr->mType == VARP::TRAINABLE) { + op->type = OpType_TrainableParam; + } + op->main.type = OpParameter_Blob; + op->main.value = blob; + } else { + op->type = OpType_Input; + op->main.type = OpParameter_Input; + op->main.value = new InputT; + op->main.AsInput()->dtype = (MNN::DataType)Utils::convertDataType(info.type); + MNN_ASSERT(op->main.AsInput()->dtype != DataType_DT_INVALID); + op->main.AsInput()->dims = info.dim; + op->main.AsInput()->dformat = (MNN_DATA_FORMAT)Utils::convertFormat(info.order); + } + } + op->name = expr->name(); + op->inputIndexes.resize(expr->inputs().size()); + for (int i = 0; i < op->inputIndexes.size(); ++i) { + auto inputExpr = expr->inputs()[i]->expr(); + op->inputIndexes[i] = varIndexInfo[inputExpr.first] + inputExpr.second; + } + if (op->name.empty()) { + op->name = EnumNameOpType(op->type) + numberToString(index+1); + } + op->outputIndexes.resize(expr->outputSize()); + auto tensorIndexOffset = varIndexInfo[expr]; + for (int v=0; voutputSize(); ++v) { + op->outputIndexes[v] = tensorIndexOffset + v; + dest->tensorName[tensorIndexOffset+v] = expr->outputName(v); + } + dest->oplists.emplace_back(std::move(op)); + } + + // Fill Empty Tensor Name With Default Op Name + for (int index = 0; index < executeOrder.size(); ++index) { + auto expr = executeOrder[index]; + auto op = dest->oplists[index].get(); + auto tensorIndexOffset = varIndexInfo[expr]; + for (int v=0; voutputSize(); ++v) { + auto index = tensorIndexOffset + v; + if (dest->tensorName[index].empty()) { + if (v == 0) { + dest->tensorName[index] = op->name; + } else { + dest->tensorName[index] = op->name + numberToString(v); + } + } + } + } +} +void Variable::save(const std::vector& vars, const char* fileName) { + std::unique_ptr net(new NetT); + save(vars, net.get()); + // FUNC_PRINT(net->oplists.size()); + flatbuffers::FlatBufferBuilder builder(1024); + auto offset = Net::Pack(builder, net.get()); + builder.Finish(offset); + // TODO, use FileWriter instead + FILE* f = fopen(fileName, "wb"); + if (nullptr == f) { + MNN_ERROR("Open %s error\n", fileName); + return; + } + static const size_t block = 4096; + size_t totalSize = builder.GetSize(); + size_t blockSize = UP_DIV(totalSize, block); + for (size_t i = 0; i < blockSize; ++i) { + size_t sta = block * i; + size_t fin = std::min(sta + block, totalSize); + if (fin > sta) { + auto realSize = fwrite((const char*)builder.GetBufferPointer() + sta, 1, fin - sta, f); + if (realSize != fin - sta) { + MNN_ERROR("Write %s error\n", fileName); + } + } + } + fclose(f); +} +std::pair, std::map> Variable::getInputAndOutput(const std::map& allVariable) { + std::pair, std::map> res; + for (auto& iter : allVariable) { + auto var = iter.second; + if (var->expr().first->get() == nullptr && var->expr().first->mType == VARP::INPUT) { + res.first[var->name()] = var; + } + if (var->linkNumber() == 0) { + res.second[var->name()] = var; + } + } + return res; +} + +std::vector Variable::getExecuteOrder(const std::vector& outputs) { + std::vector sequence; + for (auto output : outputs) { + Expr::visit( + output->mFrom, [](EXPRP expr) { return !expr->visited(); }, + [&sequence](EXPRP expr) { + //FUNC_PRINT_ALL(var->name().c_str(), s); + if (!expr->visited()) { + sequence.emplace_back(expr); + expr->setVisited(true); + } + return true; + }); + } + for (auto expr : sequence) { + expr->setVisited(false); + } + return sequence; +} + +VARP VARP::operator+(VARP var) const { + return _Add(VARP(mContent), var); +} +VARP VARP::operator-(VARP var) const { + return _Subtract(VARP(mContent), var); +} +VARP VARP::operator*(VARP var) const { + return _Multiply(VARP(mContent), var); +} +VARP VARP::operator/(VARP var) const { + return _Divide(VARP(mContent), var); +} +VARP VARP::mean(INTS dims) const { + return _ReduceMean(VARP(mContent), dims); +} +VARP VARP::sum(INTS dims) const { + return _ReduceSum(VARP(mContent), dims); +} + +} // namespace Express +} // namespace MNN diff --git a/express/MathOp.cpp b/express/MathOp.cpp new file mode 100644 index 000000000..8b10bd237 --- /dev/null +++ b/express/MathOp.cpp @@ -0,0 +1,822 @@ +// +// MathOp.cpp +// MNN +// +// Created by MNN on 2019/06/27. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include +#include +#include +#include +#include "MNN_generated.h" + +namespace MNN { +namespace Express { +static DataType _convertDataType(halide_type_t type) { + if (type.code == halide_type_float) { + return DataType_DT_FLOAT; + } + if (type.code == halide_type_uint && type.bits == 8) { + return DataType_DT_UINT8; + } + if (type.code == halide_type_int && type.bits == 8) { + return DataType_DT_INT8; + } + if (type.code == halide_type_int && type.bits == 32) { + return DataType_DT_INT32; + } + return DataType_DT_INVALID; +} +static VARP _Binary(VARP x, VARP y, BinaryOpOperation operation) { + std::unique_ptr op(new OpT); + op->main.type = OpParameter_BinaryOp; + op->type = OpType_BinaryOp; + op->main.value = new BinaryOpT; + op->main.AsBinaryOp()->opType = operation; + op->main.AsBinaryOp()->T = DataType_DT_FLOAT; + return (Variable::create(Expr::create(op.get(), {x, y}))); +} +static VARP _Unary(VARP x, UnaryOpOperation operation) { + std::unique_ptr op(new OpT); + op->main.type = OpParameter_UnaryOp; + op->type = OpType_UnaryOp; + op->main.value = new UnaryOpT; + op->main.AsUnaryOp()->opType = operation; + op->main.AsUnaryOp()->T = DataType_DT_FLOAT; + return (Variable::create(Expr::create(op.get(), {x}))); +} +static VARP _Reduce(VARP x, INTS dim, ReductionType type, bool keepDim) { + std::unique_ptr op(new OpT); + op->main.type = OpParameter_ReductionParam; + op->type = OpType_Reduction; + op->main.value = new ReductionParamT; + op->main.AsReductionParam()->dType = DataType_DT_FLOAT; + op->main.AsReductionParam()->operation= type; + op->main.AsReductionParam()->dim = dim; + op->main.AsReductionParam()->keepDims = keepDim; + return (Variable::create(Expr::create(op.get(), {x}))); +} +static VARP _Eltwise(VARP a, VARP b, EltwiseType type, std::vector coeff) { + std::unique_ptr op(new OpT); + op->main.type = OpParameter_Eltwise; + op->type = OpType_Eltwise; + op->main.value = new EltwiseT; + op->main.AsEltwise()->type = type; + op->main.AsEltwise()->coeff = coeff; + return (Variable::create(Expr::create(std::move(op), {a, b}))); +} +/*Casts a variable to a new type. +Args: +x: A variable. Must be one of the following types: Halide_Type_Int or Halide_Type_Float, Halide_Type_Int64, Halide_Type_Uint8 +dtype: The destination type. The list of supported dtypes is the same as x. +Returns: +A variable with same shape as x and same type as dtype. +*/ +VARP _Cast(VARP x, halide_type_t dtype) { + std::unique_ptr op(new OpT); + op->main.type = OpParameter_CastParam; + op->type = OpType_Cast; + op->main.value = new CastParamT; + op->main.AsCastParam()->dstT = _convertDataType(dtype); + return (Variable::create(Expr::create(std::move(op), {x}))); +} + +/*Computes the absolute value of a variable. +Given a variable of integer or floating-point values, this operation returns a variable of the same type, +where each element contains the absolute value of the corresponding element in the input. +x = MNN.const((-1.0, -2.0, 3.0), (3, )) +x = MNN.abs(x) # (1.0, 2.0, 3.0) +Args: +x: A variable of type Halide_Type_Int or Halide_Type_Float +Returns: +A variable the same size, type as x with absolute values. +*/ +VARP _Abs(VARP x) +{ + return _Unary(x, UnaryOpOperation_ABS); +} +/*Computes numerical negative value element-wise. +x = MNN.const((-1.0, -2.0, 3.0), (3, )) +x = MNN.negative(x) #(1.0, 2.0, -3.0) +Args: +x: A variable. Must be one of the following types: Halide_Type_Int or Halide_Type_Float +Returns: +A variable. Has the same type as x. +*/ +VARP _Negative(VARP x) +{ + return _Unary(x, UnaryOpOperation_NEG); +} +/*Returns element-wise largest integer not greater than x. +Args: +x: A variable. Must be one of the following types: Halide_Type_Int or Halide_Type_Float +Returns: +A variable. Has the same type as x. +*/ +VARP _Floor(VARP x) +{ + return _Unary(x, UnaryOpOperation_FLOOR); +} +/*Returns element-wise smallest integer not less than x. +Args: +x: A variable. Must be one of the following types: Halide_Type_Int or Halide_Type_Float +Returns: +A variable. Has the same type as x. +*/ +VARP _Ceil(VARP x) +{ + return _Unary(x, UnaryOpOperation_CEIL); +} + +/*Computes square of x element-wise. +Args: +x: A variable. Must be one of the following types: Halide_Type_Int or Halide_Type_Float +Returns: +A variable. Has the same type as x. +*/ +VARP _Square(VARP x) +{ + return _Unary(x, UnaryOpOperation_SQUARE); +} + +/*Computes square root of x element-wise. +Args: +x: A variable. Must be one of the following types: Halide_Type_Int or Halide_Type_Float +Returns: +A variable. Has the same type as x. +*/ +VARP _Sqrt(VARP x) +{ + return _Unary(x, UnaryOpOperation_SQRT); +} + +/*Computes reciprocal of square root of x element-wise. +Args: +x: A variable. Must be one of the following types: Halide_Type_Int or Halide_Type_Float +Returns: +A variable. Has the same type as x. +*/ +VARP _Rsqrt(VARP x) +{ + return _Unary(x, UnaryOpOperation_RSQRT); +} + +/*Computes exponential of x element-wise. +Args: +x: A variable. Must be one of the following types: Halide_Type_Int or Halide_Type_Float +Returns: +A variable. Has the same type as x. +*/ +VARP _Exp(VARP x) +{ + return _Unary(x, UnaryOpOperation_EXP); +} + +/*Computes natural logarithm of x element-wise. +Args: +x: A variable. Must be one of the following types: Halide_Type_Int or Halide_Type_Float +Returns: +A variable. Has the same type as x. +*/ +VARP _Log(VARP x) +{ + return _Unary(x, UnaryOpOperation_LOG); +} + +/*Computes sine of x element-wise. +Given an input variable, this function computes sine of every element in the variable. +Input range is (-inf, inf) and output range is [-1,1]. +Args: +x: A variable. Must be one of the following types: Halide_Type_Float +Returns: +A variable. Has the same type as x. +*/ +VARP _Sin(VARP x) +{ + return _Unary(x, UnaryOpOperation_SIN); +} + +/*Computes cos of x element-wise. +Given an input variable, this function computes cosine of every element in the variable. +Input range is (-inf, inf) and output range is [-1,1]. If input lies outside the boundary, nan is returned. +Args: +x: A variable. Must be one of the following types: Halide_Type_Float +Returns: +A variable. Has the same type as x. +*/ +VARP _Cos(VARP x) +{ + return _Unary(x, UnaryOpOperation_COS); +} + +/*Computes tan of x element-wise. +Given an input variable, this function computes tangent of every element in the variable. +Input range is (-inf, inf) and output range is (-inf, inf). If input lies outside the boundary, nan is returned. +Args: +x: A variable. Must be one of the following types: Halide_Type_Int or Halide_Type_Float +Returns: +A variable. Has the same type as x. +*/ +VARP _Tan(VARP x) +{ + return _Unary(x, UnaryOpOperation_TAN); +} + +/*Computes the trignometric inverse sine of x element-wise. +The asin operation returns the inverse of sin, such that if y = sin(x) then, x = asin(y). +Note: The output of asin will lie within the invertible range of sine, i.e [-pi/2, pi/2]. +Args: +x: A variable. Must be one of the following types: Halide_Type_Int or Halide_Type_Float +Returns: +A variable. Has the same type as x. +*/ +VARP _Asin(VARP x) +{ + return _Unary(x, UnaryOpOperation_ASIN); +} +/*Computes acos of x element-wise. +Args: +x: A variable. Must be one of the following types: Halide_Type_Int or Halide_Type_Float +Note: The output of atan will lie within the invertible range of tan, i.e (0.0, pi). +Returns: +A variable. Has the same type as x. +*/ +VARP _Acos(VARP x) +{ + return _Unary(x, UnaryOpOperation_ACOS); +} + + +/*Computes sign of x eltment-wise + sign(x) = 0 if x=0 + sign(x) =-1 if x<0 + sign(x) = 1 if x>0 + */ +VARP _Sign(VARP x) { + return _Unary(x, UnaryOpOperation_SIGN); +} + +/*Computes the trignometric inverse tangent of x element-wise. +The atan operation returns the inverse of tan, such that if y = tan(x) then, x = atan(y). +Note: The output of atan will lie within the invertible range of tan, i.e (-pi/2, pi/2). +Args: +x: A variable. Must be one of the following types: Halide_Type_Int or Halide_Type_Float +Returns: +A variable. Has the same type as x. +*/ +VARP _Atan(VARP x) +{ + return _Unary(x, UnaryOpOperation_ATAN); +} + +/*Computes the reciprocal of x element-wise. +Args: +x: A variable. Must be one of the following types: Halide_Type_Int or Halide_Type_Float +Returns: +A variable. Has the same type as x. +*/ +VARP _Reciprocal(VARP x) +{ + return _Unary(x, UnaryOpOperation_RECIPROCAL); +} + +/*Computes natural logarithm of (1 + x) element-wise. +Args: +x: A variable. Must be one of the following types: Halide_Type_Int or Halide_Type_Float +Returns: +A variable. Has the same type as x. +*/ +VARP _Log1p(VARP x) +{ + return _Unary(x, UnaryOpOperation_LOG1P); +} + +/*Computes hyperbolic tangent of x element-wise. +Given an input variable, this function computes hyperbolic tangent of every element in the variable. +Input range is [-inf, inf] and output range is [-1,1]. +Args: +x: A variable. Must be one of the following types: Halide_Type_Float +Returns: +A variable. Has the same type as x. +*/ +VARP _Tanh(VARP x) { + std::unique_ptr op(new OpT); + op->type = OpType_TanH; + return (Variable::create(Expr::create(op.get(), {x}))); +} +/*Computes sigmoid of x element-wise. +Args: +x: A variable. Must be one of the following types: Halide_Type_Float +Returns: +A variable. Has the same type as x. +*/ +VARP _Sigmoid(VARP x) { + std::unique_ptr op(new OpT); + op->type = OpType_Sigmoid; + return (Variable::create(Expr::create(op.get(), {x}))); +} + + +/*Returns x + y element-wise. +Args: +x: A variable. Must be one of the following types: +Halide_Type_Int or Halide_Type_Float, Halide_Type_Int64, Halide_Type_Uint8. +y: A variable. Must have the same type as x. +Returns: +A variable. Has the same type as x. +*/ +VARP _Add(VARP x, VARP y) { + return _Binary(x, y, BinaryOpOperation_ADD); +} + +/*Returns x - y element-wise. +Args: +x: A variable. Must be one of the following types: +Halide_Type_Int or Halide_Type_Float, Halide_Type_Int64, Halide_Type_Uint8. +y: A variable. Must have the same type as x. +Returns: +A variable. Has the same type as x. +*/ +VARP _Subtract(VARP x, VARP y) { + return _Binary(x, y, BinaryOpOperation_SUB); +} + +/*Returns x * y element-wise. +Args: +x: A variable. Must be one of the following types: +Halide_Type_Int or Halide_Type_Float, Halide_Type_Int64, Halide_Type_Uint8. +y: A variable. Must have the same type as x. +Returns: +A variable. Has the same type as x. +*/ +VARP _Multiply(VARP x, VARP y) { + return _Binary(x, y, BinaryOpOperation_MUL); +} + +/*Computes Python style division of x by y. +Args: +x: A variable. Must be one of the following types: +Halide_Type_Int or Halide_Type_Float, Halide_Type_Int64, Halide_Type_Uint8. +y: A variable. Must have the same type as x. +Returns: +A variable. Has the same type as x. +*/ +VARP _Divide(VARP x, VARP y) { + return _Binary(x, y, BinaryOpOperation_REALDIV); +} + +/*Computes the power of one value to another. +Args: +x: A variable. Must be one of the following types: +Halide_Type_Int or Halide_Type_Float, Halide_Type_Int64 +y: A variable. Must be one of the following types: +Halide_Type_Int or Halide_Type_Float, Halide_Type_Int64 +Returns: +A variable. Has the same type as x. +*/ +VARP _Pow(VARP x, VARP y) { + return _Binary(x, y, BinaryOpOperation_POW); +} + +/*Returns the min of x and y (i.e. x < y ? x : y) element-wise. +Args: +x: A variable. Must be one of the following types: +Halide_Type_Int or Halide_Type_Float, Halide_Type_Int64 +y: A variable. Must have the same type as x. +Returns: +A variable. Has the same type as x. +*/ +VARP _Minimum(VARP x, VARP y) { + return _Binary(x, y, BinaryOpOperation_MINIMUM); +} +/*Returns the max of x and y (i.e. x > y ? x : y) element-wise. +Args: +x: A variable. Must be one of the following types: +Halide_Type_Int or Halide_Type_Float, Halide_Type_Int64 +y: A variable. Must have the same type as x. +Returns: +A variable. Has the same type as x. +*/ +VARP _Maximum(VARP x, VARP y) { + return _Binary(x, y, BinaryOpOperation_MAXIMUM); +} + +/*Adds bias to value. +This is (mostly) a special case of add where bias is restricted to 1-D. +Broadcasting is supported, so value may have any number of dimensions. +Unlike add, the type of bias is allowed to differ from value in the case where both types are quantized. +Args: +value: A variable with type Halide_Type_Float, Halide_Type_Int +bias: A 1-D variable with size matching the channel dimension of value. +Must be the same type as value unless value is a quantized type, in which case a different quantized type may be used. +Returns: +A variable with the same type as value. +*/ +VARP _BiasAdd(VARP value, VARP bias) { + return _Add(value, bias); +} + +/*Returns the truth value of (x > y) element-wise. +Args: +x: A variable. Must be one of the following types: Halide_Type_Float, Halide_Type_Int +y: A variable. Must have the same type as x. +Returns: +A variable of type bool. +*/ + +VARP _Greater(VARP x, VARP y) { + return _Binary(x, y, BinaryOpOperation_GREATER); +} + +/*Returns the truth value of (x >= y) element-wise. +Args: +x: A variable. Must be one of the following types: Halide_Type_Float, Halide_Type_Int +y: A variable. Must have the same type as x. +Returns: +A variable of type bool. +*/ + +VARP _GreaterEqual(VARP x, VARP y) { + return _Binary(x, y, BinaryOpOperation_GREATER_EQUAL); +} + +/*Returns the truth value of (x < y) element-wise. +Args: +x: A variable. Must be one of the following types: Halide_Type_Float, Halide_Type_Int +y: A variable. Must have the same type as x. +Returns: +A variable of type bool. +*/ + +VARP _Less(VARP x, VARP y) { + return _Binary(x, y, BinaryOpOperation_LESS); +} + +/*Returns the value of (x // y) element-wise. +Args: +x: A variable. Must be one of the following types: Halide_Type_Float, Halide_Type_Int +y: A variable. Must have the same type as x. +Returns: +A variable. Has the same type as x. +*/ + +VARP _FloorDiv(VARP x, VARP y) { + return _Binary(x, y, BinaryOpOperation_FLOORDIV); +} + +/*Returns the value of (x - y)(x - y) element-wise. +Args: +x: A variable. Must be one of the following types: Halide_Type_Float, Halide_Type_Int +y: A variable. Must have the same type as x. +Returns: +A variable. Has the same type as x. +*/ + +VARP _SquaredDifference(VARP x, VARP y) { + return _Binary(x, y, BinaryOpOperation_SquaredDifference); +} + +/*Returns the truth value of (x == y) element-wise. +Args: +x: A variable. Must be one of the following types: Halide_Type_Float, Halide_Type_Int +y: A variable. Must have the same type as x. +Returns: +A variable of type bool. +*/ + +VARP _Equal(VARP x, VARP y) { + return _Binary(x, y, BinaryOpOperation_EQUAL); +} + +/*Returns the truth value of (x <= y) element-wise. +Args: +x: A variable. Must be one of the following types: Halide_Type_Float, Halide_Type_Int +y: A variable. Must have the same type as x. +Returns: +A variable of type bool. +*/ + +VARP _LessEqual(VARP x, VARP y) { + return _Binary(x, y, BinaryOpOperation_LESS_EQUAL); +} + +/*Returns element-wise remainder of division +Args: +x: A variable. Must be one of the following types: Halide_Type_Float, Halide_Type_Int +y: A variable. Must have the same type as x. +Returns: +A variable. Has the same type as x. +*/ + +VARP _FloorMod(VARP x, VARP y) { + return _Binary(x, y, BinaryOpOperation_FLOORMOD); +} + +/*Computes the sum of elements across dimensions of a variable +Reduces input_variable along the dimensions given in axis. +Unless keepdims is true, the rank of the variable is reduced by 1 for each entry in axis. +If keepdims is true, the reduced dimensions are retained with length 1. +If axis is empty, all dimensions are reduced, and a variable with a single element is returned. +Args: +input_variable: The variable to reduce. Should have numeric type. +axis: The dimensions to reduce. If empty(the default), reduces all dimensions. + Must be in the range [-rank(input_variable), rank(input_variable)). +keepdims: If true, retains reduced dimensions with length 1. +Returns: +The reduced variable, of the same dtype as the input_variable. +*/ +VARP _ReduceSum(VARP input_variable, INTS axis, bool keepdims) { + return _Reduce(input_variable, axis, ReductionType_SUM, keepdims); +} +//ruhuan:TODO: ReductionType_ASUM and ReductionType_SUMSQ + + + +/*Computes the mean of elements across dimensions of a variable. +Reduces input_variable along the dimensions given in axis. +Unless keepdims is true, the rank of the variable is reduced by 1 for each entry in axis. +If keepdims is true, the reduced dimensions are retained with length 1. +If axis is empty, all dimensions are reduced, and a variable with a single element is returned. +Args: +input_variable: The variable to reduce. Should have numeric type. +axis: The dimensions to reduce. If empty(the default), reduces all dimensions. + Must be in the range [-rank(input_variable), rank(input_variable)). +keepdims: If true, retains reduced dimensions with length 1. +Returns: +The reduced variable, of the same dtype as the input_variable. +*/ +VARP _ReduceMean(VARP input_variable, INTS axis, bool keepdims) { + return _Reduce(input_variable, axis, ReductionType_MEAN, keepdims); +} + +/*Computes the variance of elements across dimensions of a variable. +Reduces input_variable along the dimensions given in axis. +Unless keepdims is true, the rank of the variable is reduced by 1 for each entry in axis. +If keepdims is true, the reduced dimensions are retained with length 1. +If axis is empty, all dimensions are reduced, and a variable with a single element is returned. +Args: +input_variable: The variable to reduce. Should have numeric type. +axis: The dimensions to reduce. If empty(the default), reduces all dimensions. + Must be in the range [-rank(input_variable), rank(input_variable)). +keepdims: If true, retains reduced dimensions with length 1. +Returns: +The reduced variable, of the same dtype as the input_variable. +*/ +VARP _ReduceVariance(VARP input_variable, INTS axis, bool keepdims) { + auto mean = _ReduceMean(input_variable, axis, true); // to use broadcast of subtract + auto variance = _ReduceMean(_Square(_Subtract(input_variable, mean)), axis, keepdims); + return variance; +} + +/*Computes the maximum of elements across dimensions of a variable. +Reduces input_variable along the dimensions given in axis. +Unless keepdims is true, the rank of the variable is reduced by 1 for each entry in axis. +If keepdims is true, the reduced dimensions are retained with length 1. +If axis is empty, all dimensions are reduced, and a variable with a single element is returned. +Args: +input_variable: The variable to reduce. Should have numeric type. +axis: The dimensions to reduce. If empty(the default), reduces all dimensions. + Must be in the range [-rank(input_variable), rank(input_variable)). +keepdims: If true, retains reduced dimensions with length 1. +Returns: +The reduced variable, of the same dtype as the input_variable. +*/ +VARP _ReduceMax(VARP input_variable, INTS axis, bool keepdims) { + return _Reduce(input_variable, axis, ReductionType_MAXIMUM, keepdims); +} + +/*Computes the minimum of elements across dimensions of a variable. +Reduces input_variable along the dimensions given in axis. +Unless keepdims is true, the rank of the variable is reduced by 1 for each entry in axis. +If keepdims is true, the reduced dimensions are retained with length 1. +If axis is empty, all dimensions are reduced, and a variable with a single element is returned. +Args: +input_variable: The variable to reduce. Should have numeric type. +axis: The dimensions to reduce. If empty(the default), reduces all dimensions. + Must be in the range [-rank(input_variable), rank(input_variable)). +keepdims: If true, retains reduced dimensions with length 1. +Returns: +The reduced variable, of the same dtype as the input_variable. +*/ +VARP _ReduceMin(VARP input_variable, INTS axis, bool keepdims) { + return _Reduce(input_variable, axis, ReductionType_MINIMUM, keepdims); +} + +/*Computes the product of elements across dimensions of a variable. +Reduces input_variable along the dimensions given in axis. +Unless keepdims is true, the rank of the variable is reduced by 1 for each entry in axis. +If keepdims is true, the reduced dimensions are retained with length 1. +If axis is empty, all dimensions are reduced, and a variable with a single element is returned. +Args: +input_variable: The variable to reduce. Should have numeric type. +axis: The dimensions to reduce. If empty(the default), reduces all dimensions. + Must be in the range [-rank(input_variable), rank(input_variable)). +keepdims: If true, retains reduced dimensions with length 1. +Returns: +The reduced variable, of the same dtype as the input_variable. +*/ +VARP _ReduceProd(VARP input_variable, INTS axis, bool keepdims) { + return _Reduce(input_variable, axis, ReductionType_PROD, keepdims); +} +/*Computes the "logical or" of elements across dimensions of a variable. +Reduces input_variable along the dimensions given in axis. +Unless keepdims is true, the rank of the variable is reduced by 1 for each entry in axis. +If keepdims is true, the reduced dimensions are retained with length 1. +If axis is empty, all dimensions are reduced, and a variable with a single element is returned. +Args: +input_variable: The variable to reduce. Should have booling type. +axis: The dimensions to reduce. If empty(the default), reduces all dimensions. + Must be in the range [-rank(input_variable), rank(input_variable)). +keepdims: If true, retains reduced dimensions with length 1. +Returns: +The reduced variable, of the same dtype as the input_variable. +*/ +VARP _ReduceAny(VARP input_variable, INTS axis, bool keepdims) { + return _Reduce(input_variable, axis, ReductionType_ANY, keepdims); +} +/*Computes the "logical and" of elements across dimensions of a variable. +Reduces input_variable along the dimensions given in axis. +Unless keepdims is true, the rank of the variable is reduced by 1 for each entry in axis. +If keepdims is true, the reduced dimensions are retained with length 1. +If axis is empty, all dimensions are reduced, and a variable with a single element is returned. +Args: +input_variable: The variable to reduce. Should have booling type. +axis: The dimensions to reduce. If empty(the default), reduces all dimensions. + Must be in the range [-rank(input_variable), rank(input_variable)). +keepdims: If true, retains reduced dimensions with length 1. +Returns: +The reduced variable, of the same dtype as the input_variable. +*/ +VARP _ReduceAll(VARP input_variable, INTS axis, bool keepdims) { + return _Reduce(input_variable, axis, ReductionType_ALL, keepdims); +} + +/*Multiply the matrix "a" by the matrix "b". +The inputs must be two-dimensional matrices and the inner dimension of "a" (after being transposed if transpose_a is true) +must match the outer dimension of "b" (after being transposed if transposed_b is true). +Arguments: +a: a variable representing a matrix "a" +b: a variable representing a matrix "b" +tranposeA: If true, "a" is transposed before multiplication. +tranposeB: If true, "b" is transposed before multiplication. +Returns: +The product variable. +*/ +VARP _MatMul(VARP a, VARP b, bool tranposeA, bool tranposeB) { + std::unique_ptr op(new OpT); + op->main.type = OpParameter_MatMul; + op->type = OpType_MatMul; + op->main.value = new MatMulT; + op->main.AsMatMul()->transposeA = tranposeA; + op->main.AsMatMul()->transposeB = tranposeB; + return (Variable::create(Expr::create(op.get(), {a, b}))); +} +VARP _Normalize(VARP x, int32_t acrossSpatial, int32_t channelShared, float eps, std::vector scale) { + std::unique_ptr op(new OpT); + op->main.type = OpParameter_Normalize; + op->type = OpType_Normalize; + op->main.value = new NormalizeT; + op->main.AsNormalize()->acrossSpatial = acrossSpatial; + op->main.AsNormalize()->channelShared = channelShared; + op->main.AsNormalize()->eps = eps; + op->main.AsNormalize()->scale = scale; + return (Variable::create(Expr::create(std::move(op), {x}))); +} +/* Compute the element-wise prod +Args: +a: A variable. Must be one of the following types: Halide_Type_Float +b: A variable. Must be one of the following types: Halide_Type_Float +coeff: blob-wise coefficients +Returns: +The prod variable. +*/ +VARP _Prod(VARP a, VARP b, std::vector coeff) { + return _Eltwise(a, b, EltwiseType_PROD, coeff); +} +/* Compute the element-wise sum +Args: +a: A variable. Must be one of the following types: Halide_Type_Float +b: A variable. Must be one of the following types: Halide_Type_Float +coeff: blob-wise coefficients +Returns: +The sum variable. +*/ +VARP _Sum(VARP a, VARP b, std::vector coeff) { + return _Eltwise(a, b, EltwiseType_SUM, coeff); +} +/* Compute the element-wise max +Args: +a: A variable. Must be one of the following types: Halide_Type_Float +b: A variable. Must be one of the following types: Halide_Type_Float +coeff: blob-wise coefficients +Returns: +The max variable. +*/ +VARP _Max(VARP a, VARP b, std::vector coeff) { + return _Eltwise(a, b, EltwiseType_MAXIMUM, coeff); +} +/* Compute the element-wise sub +Args: +a: A variable. Must be one of the following types: Halide_Type_Float +b: A variable. Must be one of the following types: Halide_Type_Float +coeff: blob-wise coefficients +Returns: +The sub variable. +*/ +VARP _Sub(VARP a, VARP b, std::vector coeff) { + return _Eltwise(a, b, EltwiseType_SUB, coeff); +} + + +/*Returns the index with the largest value across axes of a tensor. +Args: input: A variable. Must be one of the following types: Halide_Type_Float, Halide_Type_Int + axis: A int. + must be in the range -rank(input), rank(input)). Describes which axis of the input variable to reduce across. + For vectors, use axis = 0. +Returns: +A variable of type int. +*/ +VARP _ArgMax(VARP input, int axis) { + std::unique_ptr op(new OpT); + op->main.type = OpParameter_ArgMax; + op->type = OpType_ArgMax; + op->main.value = new ArgMaxT; + op->main.AsArgMax()->axis = axis; + op->main.AsArgMax()->outMaxVal = 0; + op->main.AsArgMax()->topK = 0; + op->main.AsArgMax()->softmaxThreshold = 0; + return (Variable::create(Expr::create(std::move(op), {input}))); + +} + +/*Multiplies slices of two variable in batches +Multiplies all slices of variable x and y (each slice can be viewed as an element of a batch), +and arranges the individual results in a single output variable of the same batch size. +Each of the individual slices can optionally be adjointed (to adjoint a matrix means to transpose and conjugate it) +before multiplication by setting the adj_x or adj_y flag to True, which are by default False. +The input variable x and y are 2-D or higher with shape [..., r_x, c_x] and [..., r_y, c_y]. +The output variable is 2-D or higher with shape [..., r_o, c_o], where: +r_o = c_x if adj_x else r_x +c_o = r_y if adj_y else c_y +It is computed as: +output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) +Arguments: +x: 2-D or higher with shape [..., r_x, c_x]. +y: 2-D or higher with shape [..., r_y, c_y]. +Optional: +adj_x: If True, adjoint the slices of x. Defaults to False. +adj_y: If True, adjoint the slices of y. Defaults to False. +Returns: +Output: 3-D or higher with shape [..., r_o, c_o] +*/ +VARP _BatchMatMul(VARP x, VARP y, bool adj_x, bool adj_y) { + std::unique_ptr op(new OpT); + op->main.type = OpParameter_BatchMatMulParam; + op->type = OpType_BatchMatMul; + op->main.value = new BatchMatMulParamT; + op->main.AsBatchMatMulParam()->adjX = adj_x; + op->main.AsBatchMatMulParam()->adjY = adj_y; + + return (Variable::create(Expr::create(std::move(op), {x, y}))); +} + + +VARP _UnravelIndex(VARP indices, VARP dims) { + std::unique_ptr op(new OpT); + op->main.type = OpParameter_NONE; + op->type = OpType_UnravelIndex; + op->main.value = nullptr; + + return (Variable::create(Expr::create(std::move(op), {indices, dims}))); +} + +VARP _ScatterNd(VARP indices, VARP updates, VARP shape) { + std::unique_ptr op(new OpT); + op->main.type = OpParameter_NONE; + op->type = OpType_ScatterNd; + op->main.value = nullptr; + return (Variable::create(Expr::create(std::move(op), {indices, updates, shape}))); +} + +VARP _OneHot(VARP indices, VARP depth, VARP onValue, VARP offValue, int axis) { + std::unique_ptr op(new OpT); + op->type = OpType_OneHot; + op->main.type = OpParameter_OneHotParam; + op->main.value = new OneHotParamT; + op->main.AsOneHotParam()->axis = axis; + + return (Variable::create(Expr::create(std::move(op), {indices, depth, onValue, offValue}))); +} + +VARP _BroadcastTo(VARP a, VARP shape) { + std::unique_ptr op(new OpT); + op->type = OpType_BroadcastTo; + op->main.type = OpParameter_NONE; + op->main.value = nullptr; + return (Variable::create(Expr::create(std::move(op), {a, shape}))); +} +} // namespace Express +} // namespace MNN diff --git a/express/MergeOptimizer.cpp b/express/MergeOptimizer.cpp new file mode 100644 index 000000000..f676b3b3e --- /dev/null +++ b/express/MergeOptimizer.cpp @@ -0,0 +1,140 @@ +// +// MergeOptimizer.cpp +// MNN +// +// Created by MNN on 2019/08/20. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "MergeOptimizer.hpp" +#include +#include "Utils.hpp" +#include "BasicOptimizer_generated.h" +#define FLATBUFFERS_PREFER_PRINTF +#include "flatbuffers/util.h" + +namespace MNN { +namespace Express { + +MergeOptimizer::MergeOptimizer(MNNForwardType type, int numberThread, BackendConfig* config) { + if (nullptr != config) { + mConfig = *config; + } + mType = type; + mNumberThread = numberThread; +} + +Optimizer::Cost MergeOptimizer::onMeasure(const std::vector& outputs, std::shared_ptr parameters) { + Cost cost; + cost.compute = 0.0f; + cost.memory = 0.0f; + return cost; +} +bool MergeOptimizer::onExecute(const std::vector& outputs, std::shared_ptr parameters) { + auto sequence = Variable::getExecuteOrder(outputs); + if (1 == sequence.size()) { + return true; + } + std::map varIndexOffset; + std::vector inputs; + std::unique_ptr merge(new MNN::Optimizer::MergeT); + merge->backend.reset(new MNN::Optimizer::BackendConfigT); + merge->backend->numberThread = mNumberThread; + merge->backend->type = (MNN::ForwardType)mType; + merge->backend->power = (int)mConfig.power; + merge->backend->precision = (int)mConfig.precision; + merge->backend->memroy = (int)mConfig.memory; + + int tensorOffset = 0; + for (int i = 0; i < sequence.size(); ++i) { + auto expr = sequence[i]; + if (nullptr != expr->get() && OpType_Extra == expr->get()->type()) { + return true; + } + varIndexOffset[expr] = tensorOffset; + tensorOffset += expr->outputSize(); + if (nullptr == expr->get()) { + if (expr->inputType() == VARP::INPUT) { + inputs.emplace_back(Variable::create(expr)); + merge->inputIndexes.emplace_back(varIndexOffset[expr]); + } else { + std::unique_ptr op; + VARP var = Variable::create(expr); + auto& info = *(var->getInfo()); + auto blob = new BlobT; + blob->dataFormat = (MNN_DATA_FORMAT)Utils::convertFormat(info.order); + blob->dims = info.dim; + if (info.type.code == halide_type_float) { + blob->dataType = DataType_DT_FLOAT; + blob->float32s.resize(info.size); + ::memcpy(blob->float32s.data(), info.ptr, info.size * sizeof(float)); + } else if (info.type.code == halide_type_int) { + blob->dataType = DataType_DT_INT32; + blob->int32s.resize(info.size); + ::memcpy(blob->int32s.data(), info.ptr, info.size * sizeof(int)); + } + else if (info.type.code == halide_type_uint && info.type.bits == 8) { + blob->dataType = DataType_DT_UINT8; + blob->uint8s.resize(info.size); + ::memcpy(blob->uint8s.data(), info.ptr, info.size * sizeof(uint8_t)); + } + op.reset(new OpT); + op->type = OpType_Const; + op->main.type = OpParameter_Blob; + op->main.value = blob; + op->outputIndexes = {varIndexOffset[expr]}; + merge->oplists.emplace_back(std::move(op)); + } + } + } + merge->tensorNumber = tensorOffset; + for (auto expr : sequence) { + if (nullptr == expr->get()) { + continue; + } + std::unique_ptr op(expr->get()->UnPack()); + auto outputIndexStart = varIndexOffset[expr]; + op->name = EnumNameOpType(op->type) + flatbuffers::NumToString(outputIndexStart+1); + op->outputIndexes.resize(expr->outputSize()); + for (int i=0; ioutputSize(); ++i) { + op->outputIndexes[i] = outputIndexStart + i; + } + auto exprinputs = expr->inputs(); + op->inputIndexes.resize(exprinputs.size()); + for (int i = 0; i < exprinputs.size(); ++i) { + auto inputExpr = exprinputs[i]->expr(); + op->inputIndexes[i] = varIndexOffset[inputExpr.first] + inputExpr.second; + } + merge->oplists.emplace_back(std::move(op)); + } + for (auto var : outputs) { + auto expr = var->expr(); + merge->outputIndexes.emplace_back(varIndexOffset[expr.first] + expr.second); + } + + std::unique_ptr mergeOp(new OpT); + mergeOp->type = OpType_Extra; + mergeOp->name = outputs[0]->name(); + mergeOp->main.type = OpParameter_Extra; + mergeOp->main.value = new ExtraT; + auto plugin = mergeOp->main.AsExtra(); + plugin->type = "Session"; + plugin->engine = "MNN"; + + flatbuffers::FlatBufferBuilder builder; + auto offset = MNN::Optimizer::Merge::Pack(builder, merge.get()); + builder.Finish(offset); + plugin->info.resize(builder.GetSize()); + ::memcpy(plugin->info.data(), builder.GetBufferPointer(), builder.GetSize()); + + auto mergeExpr = Expr::create(mergeOp.get(), inputs, (int)outputs.size()); + mergeExpr->setName(outputs[0]->name()); + for (int i = 0; i < outputs.size(); ++i) { + auto name = outputs[i]->name(); + outputs[i]->setExpr(mergeExpr, i); + outputs[i]->setName(name); // merge expr does not copy mOutputNames, so copy to prevent var's name to be erased + } + return true; +} +} // namespace Express +} // namespace MNN diff --git a/express/source/optimizer/MergeOptimizer.hpp b/express/MergeOptimizer.hpp similarity index 92% rename from express/source/optimizer/MergeOptimizer.hpp rename to express/MergeOptimizer.hpp index 01e5fbb37..9875eb75b 100644 --- a/express/source/optimizer/MergeOptimizer.hpp +++ b/express/MergeOptimizer.hpp @@ -8,8 +8,8 @@ #ifndef MergeOptimizer_hpp #define MergeOptimizer_hpp -#include "Optimizer.hpp" -#include "MNNForwardType.h" +#include +#include namespace MNN { namespace Express { class MergeOptimizer : public Optimizer { diff --git a/express/NeuralNetWorkOp.cpp b/express/NeuralNetWorkOp.cpp new file mode 100644 index 000000000..529e85f89 --- /dev/null +++ b/express/NeuralNetWorkOp.cpp @@ -0,0 +1,1009 @@ +// +// NeuralNetWorkOp.cpp +// MNN +// +// Created by MNN on 2019/06/27. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include +#include +#include +#include +#include "MNN_generated.h" +#include "Utils.hpp" +namespace MNN { +namespace Express { +static PadMode _convertPadMode(PaddingMode mode) { + switch (mode) { + case CAFFE: + return PadMode_CAFFE; + case VALID: + return PadMode_VALID; + case SAME: + return PadMode_SAME; + default: + break; + } + return PadMode_CAFFE; +} +static PoolPadType _convertPoollingPadMode(PaddingMode mode) { + switch (mode) { + case CAFFE: + return PoolPadType_CAFFE; + case VALID: + return PoolPadType_VALID; + case SAME: + return PoolPadType_SAME; + default: + break; + } + return PoolPadType_CAFFE; +} + +VARP _Input(INTS dims, Dimensionformat format, halide_type_t type) { + Variable::Info info; + info.dim = std::move(dims); + info.order = format; + info.type = type; + info.ptr = nullptr; + return (Variable::create(Expr::create(std::move(info)))); +} +VARP _Scalar(const void* ptr, halide_type_t type) { + Variable::Info info; + info.dim = {}; + info.order = NHWC; + info.type = type; + info.ptr = (void*)ptr; + return (Variable::create(Expr::create(std::move(info)))); +} +VARP _Const(const void* ptr, INTS dims, Dimensionformat format, halide_type_t type) { + Variable::Info info; + info.dim = std::move(dims); + info.order = format; + info.type = type; + info.ptr = (void*)ptr; + return (Variable::create(Expr::create(std::move(info)))); +} + +VARP _Const(float value, INTS dims, Dimensionformat format) { + auto size = std::accumulate(dims.begin(), dims.end(), 1, std::multiplies()); + std::vector values; + values.resize(size); + for (int i = 0; i < size; ++i) { + values[i] = value; + } + Variable::Info info; + info.dim = std::move(dims); + info.order = format; + info.type = halide_type_of(); + info.ptr = (void*)values.data(); + return (Variable::create(Expr::create(std::move(info)))); +} + +VARP _TrainableParam(const void* ptr, INTS dims, Dimensionformat format, halide_type_t type) { + auto v = _Const(ptr, dims, format, type); + v.fix(VARP::TRAINABLE); + return v; +} +VARP _TrainableParam(float value, INTS dims, Dimensionformat format) { + auto v = _Const(value, dims, format); + v.fix(VARP::TRAINABLE); + return v; +} + +VARP _Conv(VARP weight, VARP bias, VARP x, PaddingMode pad, INTS stride, INTS dilate, int group, INTS pads) { + std::unique_ptr convOp(new OpT); + convOp->type = OpType_Convolution; + auto shape = weight->getInfo(); + if (NHWC == shape->order) { + weight = _Transpose(weight, {0, 3, 1, 2}); + shape = weight->getInfo(); + } + auto channel = std::vector{shape->dim[1], shape->dim[0]}; + auto kernelSize = std::vector{shape->dim[3], shape->dim[2]}; + if (1 == channel[1] && channel[0] == group) { + convOp->type = OpType_ConvolutionDepthwise; + channel[1] = group; + } + convOp->main.type = OpParameter_Convolution2D; + convOp->main.value = new Convolution2DT; + auto conv2D = convOp->main.AsConvolution2D(); + conv2D->common.reset(new Convolution2DCommonT); + conv2D->common->padX = pads[0]; + conv2D->common->padY = pads[1]; + conv2D->common->padMode = _convertPadMode(pad); + conv2D->common->strideX = stride[0]; + conv2D->common->strideY = stride[1]; + conv2D->common->group = group; + conv2D->common->outputCount = channel[1]; + conv2D->common->inputCount = channel[0]; + conv2D->common->dilateX = dilate[0]; + conv2D->common->dilateY = dilate[1]; + conv2D->common->kernelX = kernelSize[0]; + conv2D->common->kernelY = kernelSize[1]; + return (Variable::create(Expr::create(convOp.get(), {x, weight, bias}))); +} +VARP _Conv(std::vector&& weight, std::vector&& bias, VARP x, INTS channel, INTS kernelSize, + PaddingMode pad, INTS stride, INTS dilate, int group, INTS pads) { + std::unique_ptr convOp(new OpT); + convOp->type = OpType_Convolution; + if (channel[0] == channel[1] && channel[0] == group) { + convOp->type = OpType_ConvolutionDepthwise; + } + convOp->main.type = OpParameter_Convolution2D; + convOp->main.value = new Convolution2DT; + auto conv2D = convOp->main.AsConvolution2D(); + conv2D->common.reset(new Convolution2DCommonT); + conv2D->common->padMode = _convertPadMode(pad); + conv2D->common->padX = pads[0]; + conv2D->common->padY = pads[1]; + conv2D->common->strideX = stride[0]; + conv2D->common->strideY = stride[1]; + conv2D->common->group = group; + conv2D->common->outputCount = channel[1]; + conv2D->common->inputCount = channel[0]; + conv2D->common->dilateX = dilate[0]; + conv2D->common->dilateY = dilate[1]; + conv2D->common->kernelX = kernelSize[0]; + conv2D->common->kernelY = kernelSize[1]; + MNN_ASSERT(weight.size() == channel[1] * (channel[0] / group) * kernelSize[0] * kernelSize[1]); + conv2D->weight = std::move(weight); + MNN_ASSERT(bias.size() == channel[1]); + conv2D->bias = std::move(bias); + return (Variable::create(Expr::create(convOp.get(), {x}))); +} + +VARP _Conv(float weight, float bias, VARP x, INTS channel, INTS kernelSize, PaddingMode pad, INTS stride, INTS dilate, + int group) { + std::unique_ptr convOp(new OpT); + convOp->type = OpType_Convolution; + if (channel[0] == channel[1] && channel[0] == group) { + convOp->type = OpType_ConvolutionDepthwise; + } + convOp->main.type = OpParameter_Convolution2D; + convOp->main.value = new Convolution2DT; + auto conv2D = convOp->main.AsConvolution2D(); + conv2D->common.reset(new Convolution2DCommonT); + conv2D->common->padMode = _convertPadMode(pad); + conv2D->common->strideX = stride[0]; + conv2D->common->strideY = stride[1]; + conv2D->common->group = group; + conv2D->common->outputCount = channel[1]; + conv2D->common->inputCount = channel[0]; + conv2D->common->dilateX = dilate[0]; + conv2D->common->dilateY = dilate[1]; + conv2D->common->kernelX = kernelSize[0]; + conv2D->common->kernelY = kernelSize[1]; + conv2D->weight.resize(channel[1] * (channel[0] / group) * kernelSize[0] * kernelSize[1]); + std::fill(conv2D->weight.begin(), conv2D->weight.end(), weight); + conv2D->bias.resize(channel[1]); + std::fill(conv2D->bias.begin(), conv2D->bias.end(), bias); + return (Variable::create(Expr::create(convOp.get(), {x}))); +} + +VARP _Deconv(VARP weight, VARP bias, VARP x, PaddingMode pad, INTS stride, INTS dilate, int group, INTS pads) { + std::unique_ptr convOp(new OpT); + convOp->type = OpType_Deconvolution; + auto shape = weight->getInfo(); + auto channel = std::vector{shape->dim[1], shape->dim[0]}; + auto kernelSize = std::vector{shape->dim[3], shape->dim[2]}; + if (1 == channel[1] && channel[0] == group) { + convOp->type = OpType_DeconvolutionDepthwise; + channel[1] = group; + } + convOp->main.type = OpParameter_Convolution2D; + convOp->main.value = new Convolution2DT; + auto conv2D = convOp->main.AsConvolution2D(); + conv2D->common.reset(new Convolution2DCommonT); + static std::map padmap{ + {CAFFE, PadMode_CAFFE}, + {VALID, PadMode_VALID}, + {SAME, PadMode_SAME}, + }; + conv2D->common->padX = pads[0]; + conv2D->common->padY = pads[1]; + conv2D->common->padMode = padmap[pad]; + conv2D->common->strideX = stride[0]; + conv2D->common->strideY = stride[1]; + conv2D->common->group = group; + conv2D->common->outputCount = channel[0]; + conv2D->common->inputCount = channel[1]; + conv2D->common->dilateX = dilate[0]; + conv2D->common->dilateY = dilate[1]; + conv2D->common->kernelX = kernelSize[0]; + conv2D->common->kernelY = kernelSize[1]; + if (nullptr != bias) { + return (Variable::create(Expr::create(std::move(convOp), {x, weight, bias}))); + } + return (Variable::create(Expr::create(std::move(convOp), {x, weight}))); +} + +static VARP _Pool(VARP x, INTS kernel, INTS stride, PoolType type, PaddingMode pad, INTS pads) { + std::unique_ptr pool(new OpT); + pool->type = OpType_Pooling; + pool->main.type = OpParameter_Pool; + pool->main.value = new PoolT; + if (kernel[0] == -1 && kernel[1] == -1) { + pool->main.AsPool()->isGlobal = true; + } + pool->main.AsPool()->padX = 0; + pool->main.AsPool()->padY = 0; + if (pads.size() >= 2) { + pool->main.AsPool()->padX = pads[0]; + pool->main.AsPool()->padY = pads[1]; + } + pool->main.AsPool()->padType = _convertPoollingPadMode(pad); + pool->main.AsPool()->kernelX = kernel[0]; + pool->main.AsPool()->kernelY = kernel[1]; + pool->main.AsPool()->strideX = stride[0]; + pool->main.AsPool()->strideY = stride[1]; + pool->main.AsPool()->type = type; + return (Variable::create(Expr::create(pool.get(), {x}))); +} + +VARP _AvePool(VARP x, INTS kernel, INTS stride, PaddingMode pad, INTS pads) { + return _Pool(x, kernel, stride, PoolType_AVEPOOL, pad, pads); +} + +VARP _MaxPool(VARP x, INTS kernel, INTS stride, PaddingMode pad, INTS pads) { + return _Pool(x, kernel, stride, PoolType_MAXPOOL, pad, pads); +} +VARP _Reshape(VARP x, INTS dim, Dimensionformat format) { + std::unique_ptr reshape(new OpT); + reshape->type = OpType_Reshape; + reshape->main.type = OpParameter_Reshape; + reshape->main.value = new ReshapeT; + reshape->main.AsReshape()->dims = dim; + reshape->main.AsReshape()->dimType = (MNN_DATA_FORMAT)Utils::convertFormat(format); + return (Variable::create(Expr::create(reshape.get(), {x}))); +} +VARP _Reshape(VARP x, VARP shape) { + std::unique_ptr reshape(new OpT); + reshape->type = OpType_Reshape; + reshape->main.type = OpParameter_Reshape; + reshape->main.value = new ReshapeT; + reshape->main.AsReshape()->dimType = MNN_DATA_FORMAT_NCHW; + return (Variable::create(Expr::create(reshape.get(), {x, shape}))); +} +VARP _Scale(VARP x, int channels, std::vector&& scales, std::vector&& bias) { + std::unique_ptr scale(new OpT); + scale->type = OpType_Scale; + scale->main.type = OpParameter_Scale; + scale->main.value = new ScaleT; + scale->main.AsScale()->channels = channels; + scale->main.AsScale()->scaleData = std::move(scales); + scale->main.AsScale()->biasData = std::move(bias); + return (Variable::create(Expr::create(std::move(scale), {x}))); +} +VARP _Relu(VARP x, float slope) { + std::unique_ptr relu(new OpT); + relu->type = OpType_ReLU; + relu->main.type = OpParameter_Relu; + relu->main.value = new ReluT; + relu->main.AsRelu()->slope = slope; + return (Variable::create(Expr::create(relu.get(), {x}))); +} +VARP _Relu6(VARP x) { + std::unique_ptr relu(new OpT); + relu->type = OpType_ReLU6; + return (Variable::create(Expr::create(relu.get(), {x}))); +} +VARP _PRelu(VARP x, std::vector&& slopes) { + std::unique_ptr prelu(new OpT); + prelu->type = OpType_PReLU; + prelu->main.type = OpParameter_PRelu; + prelu->main.value = new PReluT; + prelu->main.AsPRelu()->slope = slopes; + prelu->main.AsPRelu()->slopeCount = slopes.size(); + return (Variable::create(Expr::create(prelu.get(), {x}))); +} + +VARP _Softmax(VARP x, int axis) { + std::unique_ptr softmax(new OpT); + softmax->type = OpType_Softmax; + softmax->main.type = OpParameter_Axis; + softmax->main.value = new AxisT; + softmax->main.AsAxis()->axis = axis; + return (Variable::create(Expr::create(softmax.get(), {x}))); +} + +VARP _Softplus(VARP x) { + return _Log(_Add(_Exp(x), _Const(1))); +} + +VARP _Softsign(VARP x) { + return _Divide(x, _Add(_Abs(x), _Const(1))); +} + +VARP _Concat(VARPS xs, int axis) { + std::unique_ptr concat(new OpT); + concat->type = OpType_Concat; + concat->main.type = OpParameter_Axis; + concat->main.value = new AxisT; + concat->main.AsAxis()->axis = axis; + return (Variable::create(Expr::create(concat.get(), xs))); +} + +VARP _Convert(VARP x, Dimensionformat dest) { + std::unique_ptr convert(new OpT); + if (nullptr != x->getInfo()) { + auto source = x->getInfo()->order; + if (source == dest) { + return x; + } + } + convert->type = OpType_ConvertTensor; + convert->main.type = OpParameter_TensorConvertInfo; + convert->main.value = new TensorConvertInfoT; + convert->main.AsTensorConvertInfo()->dest = (MNN_DATA_FORMAT)Utils::convertFormat(dest); + return (Variable::create(Expr::create(convert.get(), {x}))); +} + +std::vector _Split(VARP x, INTS points, int axis) { + MNN_ASSERT(points.size() >= 1); + std::unique_ptr op(new OpT); + op->type = OpType_Slice; + op->main.type = OpParameter_Slice; + op->main.value = new SliceT; + op->main.AsSlice()->axis = axis; + op->main.AsSlice()->sourceType = NetSource_TENSORFLOW; + op->main.AsSlice()->slicePoints = points; + + int slices = points.size() == 1 ? points[0] : (int)points.size(); + EXPRP expr = Expr::create(std::move(op), {x}, slices); + std::vector res; + for (int i = 0; i < slices; ++i) { + res.emplace_back(Variable::create(expr, i)); + } + return res; +} + +VARP _Slice(VARP x, VARP starts, VARP sizes) { + std::unique_ptr slice(new OpT); + slice->type = OpType_SliceTf; + return (Variable::create(Expr::create(slice.get(), {x, starts, sizes}))); +} + +VARP _StridedSlice(VARP x, VARP begin, VARP end, VARP strided, halide_type_t type, int32_t beginMask, + int32_t endMask, int32_t ellipsisMask, int32_t newAxisMask, int32_t shrinkAxisMask) { + std::unique_ptr op(new OpT); + op->type = OpType_StridedSlice; + op->main.type = OpParameter_StridedSliceParam; + op->main.value = new StridedSliceParamT; + + op->main.AsStridedSliceParam()->T = (MNN::DataType)Utils::convertDataType(type);; + op->main.AsStridedSliceParam()->beginMask = beginMask; + op->main.AsStridedSliceParam()->endMask = endMask; + op->main.AsStridedSliceParam()->ellipsisMask = ellipsisMask; + op->main.AsStridedSliceParam()->newAxisMask = newAxisMask; + op->main.AsStridedSliceParam()->shrinkAxisMask = shrinkAxisMask; + return (Variable::create(Expr::create(op.get(), {x, begin, end, strided}))); +} + +VARP _Transpose(VARP x, INTS perm) { + auto permVar = _Const((const void*)perm.data(), {static_cast(perm.size())}, NHWC, halide_type_of()); + return _Transpose(x, permVar); +} +VARP _Transpose(VARP x, VARP perm) { + std::unique_ptr transpose(new OpT); + transpose->type = OpType_Transpose; + transpose->main.type = OpParameter_Transpose; + transpose->main.value = new TransposeT; + transpose->main.AsTranspose()->Tperm = DataType_DT_INT32; + return (Variable::create(Expr::create(std::move(transpose), {x, perm}))); +} + +VARP _ChannelShuffle(VARP x, int group) { + x = _Convert(x, NHWC); + x = _Reshape(x, {0, 0, 0, group, -1}, NHWC); + x = _Transpose(x, {0, 1, 2, 4, 3}); + x = _Reshape(x, {0, 0, 0, -1}, NHWC); + x = _Convert(x, NC4HW4); + return x; +} +VARP _ReverseSequence(VARP x, VARP y, int batchDim, int seqDim) { + std::unique_ptr op(new OpT); + op->type = OpType_ReverseSequence; + op->main.type = OpParameter_ReverseSequenceParam; + op->main.value = new ReverseSequenceParamT; + op->main.AsReverseSequenceParam()->batchDim = batchDim; + op->main.AsReverseSequenceParam()->seqDim = seqDim; + return (Variable::create(Expr::create(op.get(), {x, y}))); +} +VARP _ChangeInputFormat(VARP x, Dimensionformat requireInput) { + if (nullptr == x || nullptr == x->getInfo()) { + return nullptr; + } + if (x->getInfo()->order == requireInput) { + return x; + } + auto input = _Input(x->getInfo()->dim, requireInput, x->getInfo()->type); + auto convert = _Convert(input, x->getInfo()->order); + Variable::replace(x, convert); + return input; +} + +VARP _Clone(VARP source, bool deepCopy) { + if (nullptr == source || nullptr == source->expr().first) { + return nullptr; + } + if (!deepCopy) { + return Variable::create(source->expr().first, source->expr().second); + } + auto info = source->getInfo(); + auto sourcePtr = source->readMap(); + if (nullptr == info || nullptr == sourcePtr) { + MNN_ERROR("Source Buffer Not Available\n"); + return nullptr; + } + auto inputVar = _Input(info->dim, info->order, info->type); + auto destPtr = inputVar->writeMap(); + if (nullptr == destPtr) { + MNN_ERROR("Alloc Buffer Error\n"); + return nullptr; + } + ::memcpy(destPtr, sourcePtr, info->size * info->type.bytes()); + return inputVar; +} +VARP _Conv2DBackPropFilter(VARP weight, VARP input, VARP inputGrad, PaddingMode pad, INTS stride, INTS dilate, + int group, INTS pads) { + std::unique_ptr convOp(new OpT); + convOp->type = OpType_Conv2DBackPropFilter; + auto shape = weight->getInfo(); + auto channel = std::vector{shape->dim[1], shape->dim[0]}; + auto kernelSize = std::vector{shape->dim[3], shape->dim[2]}; + convOp->main.type = OpParameter_Convolution2D; + convOp->main.value = new Convolution2DT; + auto conv2D = convOp->main.AsConvolution2D(); + conv2D->common.reset(new Convolution2DCommonT); + conv2D->common->padX = pads[0]; + conv2D->common->padY = pads[1]; + conv2D->common->padMode = _convertPadMode(pad); + conv2D->common->strideX = stride[0]; + conv2D->common->strideY = stride[1]; + conv2D->common->group = group; + conv2D->common->outputCount = channel[1]; + conv2D->common->inputCount = channel[0]; + conv2D->common->dilateX = dilate[0]; + conv2D->common->dilateY = dilate[1]; + conv2D->common->kernelX = kernelSize[0]; + conv2D->common->kernelY = kernelSize[1]; + INTS weightDims = {channel[1], channel[0] / group, kernelSize[1], kernelSize[0]}; + + return Variable::create(Expr::create(std::move(convOp), {weight, input, inputGrad})); +} + +VARP _PoolGrad(VARP originInput, VARP originOutput, VARP inputGrad, INTS kernel, INTS stride, PoolingMode type, + PaddingMode pad, INTS pads) { + std::unique_ptr pool(new OpT); + pool->type = OpType_PoolGrad; + pool->main.type = OpParameter_Pool; + pool->main.value = new PoolT; + if (kernel[0] == -1 && kernel[1] == -1) { + pool->main.AsPool()->isGlobal = true; + } + pool->main.AsPool()->padX = 0; + pool->main.AsPool()->padY = 0; + if (pads.size() >= 2) { + pool->main.AsPool()->padX = pads[0]; + pool->main.AsPool()->padY = pads[1]; + } + pool->main.AsPool()->padType = _convertPoollingPadMode(pad); + pool->main.AsPool()->kernelX = kernel[0]; + pool->main.AsPool()->kernelY = kernel[1]; + pool->main.AsPool()->strideX = stride[0]; + pool->main.AsPool()->strideY = stride[1]; + pool->main.AsPool()->type = (PoolType)type; + return (Variable::create(Expr::create(std::move(pool), {originInput, originOutput, inputGrad}))); +} + +VARP _Crop(VARP x, VARP s, int axis, INTS offset) { + std::unique_ptr crop(new OpT); + crop->type = OpType_Crop; + crop->main.type = OpParameter_Crop; + crop->main.value = new CropT; + crop->main.AsCrop()->axis = axis; + crop->main.AsCrop()->offset = offset; + return (Variable::create(Expr::create(std::move(crop), {x, s}))); +} +VARP _Resize(VARP x, float xScale, float yScale) { + std::unique_ptr resize(new OpT); + resize->type = OpType_Resize; + resize->main.type = OpParameter_Resize; + resize->main.value = new ResizeT; + resize->main.AsResize()->xScale = xScale; + resize->main.AsResize()->yScale = yScale; + return (Variable::create(Expr::create(std::move(resize), {x}))); +} +VARP _Pad(VARP x, VARP pads, PadValueMode mode) { + std::unique_ptr pad(new OpT); + pad->type = OpType_Padding; + pad->main.type = OpParameter_PadParam; + pad->main.value = new PadParamT; + switch (mode) { + case CONSTANT: + pad->main.AsPadParam()->mode = MNN::PadValueMode_CONSTANT; + break; + case SYMMETRIC: + pad->main.AsPadParam()->mode = MNN::PadValueMode_SYMMETRIC; + break; + case REFLECT: + pad->main.AsPadParam()->mode = MNN::PadValueMode_REFLECT; + break; + default: + pad->main.AsPadParam()->mode = MNN::PadValueMode_CONSTANT; + break; + } + return (Variable::create(Expr::create(std::move(pad), {x, pads}))); +} +VARP _ExpandDims(VARP x, int axis) { + std::unique_ptr expand(new OpT); + expand->type = OpType_ExpandDims; + expand->main.type = OpParameter_ExpandDims; + expand->main.value = new ExpandDimsT; + expand->main.AsExpandDims()->axis = axis; + return (Variable::create(Expr::create(std::move(expand), {x}))); +} +VARP _ExpandDims(VARP x, VARP axis) { + std::unique_ptr expand(new OpT); + expand->type = OpType_ExpandDims; + expand->main.type = OpParameter_ExpandDims; + expand->main.value = new ExpandDimsT; + return (Variable::create(Expr::create(std::move(expand), {x, axis}))); +} + +VARP _Shape(VARP x) { + std::unique_ptr shape(new OpT); + shape->type = OpType_Shape; + return (Variable::create(Expr::create(std::move(shape), {x}))); +} +/*Stacks a list of rank-R variables into one rank-(R+1) variable. +Packs the list of variables in `values` into a ariable with rank one higher than each variable in values, +by packing them along the axis dimension. +Given a list of length N of variables of shape (A, B, C); +if axis == 0 then the output variable will have the shape (N, A, B, C). +if axis == 1 then the output variable will have the shape (A, N, B, C). Etc. +Args: +values: A list of variable objects with the same shape and type. +axis: An int. The axis to stack along. Defaults to the first dimension. Negative values wrap around, +so the valid range is [-(R+1), R+1). +Returns: +output: A stacked variable with the same type as `values`. +*/ +VARP _Stack(VARPS values, int axis) { + std::unique_ptr pack(new OpT); + pack->type = OpType_Pack; + MNN_ASSERT(values.size()>0); + auto info_first = values[0]->getInfo(); + MNN_ASSERT(nullptr != info_first); + pack->main.type = OpParameter_PackParam; + pack->main.value = new PackParamT; + pack->main.AsPackParam()->dataType = (MNN::DataType)Utils::convertDataType(info_first->type); + pack->main.AsPackParam()->axis = axis; + return (Variable::create(Expr::create(std::move(pack), values))); +} +VARP _CropAndResize(VARP image, VARP boxes, VARP indexes, VARP sizes, float extrapolation, InterpolationMethod method) { + std::unique_ptr car(new OpT); + car->type = OpType_CropAndResize; + car->main.type = OpParameter_CropAndResize; + car->main.value = new CropAndResizeT; + car->main.AsCropAndResize()->extrapolationValue = extrapolation; + switch (method) { + case NEAREST: + car->main.AsCropAndResize()->method = CropAndResizeMethod_NEAREST; + break; + case BILINEAR: + default: + car->main.AsCropAndResize()->method = CropAndResizeMethod_BILINEAR; + break; + } + return (Variable::create(Expr::create(std::move(car), {image, boxes, indexes, sizes}))); +} +VARP _Fill(VARP s, VARP v) { + std::unique_ptr fill(new OpT); + fill->type = OpType_Fill; + fill->main.type = OpParameter_Fill; + fill->main.value = new FillT; + return (Variable::create(Expr::create(std::move(fill), {s, v}))); +} +VARP _Tile(VARP x, VARP mul) { + std::unique_ptr tile(new OpT); + tile->type = OpType_Tile; + return (Variable::create(Expr::create(std::move(tile), {x, mul}))); +} +VARP _Gather(VARP embedding, VARP indices) { + std::unique_ptr gather(new OpT); + gather->type = OpType_Gather; + gather->main.value = new GatherT; + return (Variable::create(Expr::create(std::move(gather), {embedding, indices}))); +} +VARP _GatherV2(VARP params, VARP indices, VARP axis) { + std::unique_ptr gather(new OpT); + gather->type = OpType_GatherV2; + gather->main.value = new GatherV2T; + if (axis.get()) { + return (Variable::create(Expr::create(std::move(gather), {params, indices, axis}))); + } else { + return (Variable::create(Expr::create(std::move(gather), {params, indices}))); + } +} + +VARP _Squeeze(VARP x, INTS axes) { + std::unique_ptr squeeze(new OpT); + squeeze->type = OpType_Squeeze; + auto squeezeParam = new SqueezeParamT; + squeezeParam->squeezeDims = axes; + squeeze->main.type = OpParameter_SqueezeParam; + squeeze->main.value = squeezeParam; + return Variable::create(Expr::create(std::move(squeeze), {x})); +} + +VARP _Unsqueeze(VARP x, INTS axes) { + std::unique_ptr squeeze(new OpT); + squeeze->type = OpType_Unsqueeze; + auto squeezeParam = new SqueezeParamT; + squeezeParam->squeezeDims = axes; + squeeze->main.type = OpParameter_SqueezeParam; + squeeze->main.value = squeezeParam; + return Variable::create(Expr::create(std::move(squeeze), {x})); +} +/*Computes exponential linear: alpha * (exp(features) - 1) if < 0, features otherwise. +features: A variable of type Halide_Type_Float +alpha: Alpha factor (positive float) +Returns: +A variable. Has the same type as features. +*/ +VARP _Elu(VARP features, float alpha) { + std::unique_ptr op(new OpT); + op->type = OpType_ELU; + auto eluParam = new ELUT; + op->main.type = OpParameter_ELU; + eluParam->alpha = alpha; + op->main.value = eluParam; + return (Variable::create(Expr::create(std::move(op), {features}))); +} +/*Computes the size of the variable +Args: +input: A variable of type Halide_Type_Float or Halide_Type_Int +Returns: +A variable. The shape is (), and type is Halide_Type_Int +*/ +VARP _Size(VARP input) { + std::unique_ptr op(new OpT); + op->type = OpType_Size; + return (Variable::create(Expr::create(std::move(op), {input}))); +} + +/*Computes scaled exponential linear: scale * alpha * (exp(features) - 1) if < 0, scale * features otherwise. +Args: +features: A variable of type Halide_Type_Float +scale: Scaling factor (positive float) +alpha: Alpha factor (positive float) +Returns: +A variable. Has the same type as features. +*/ +VARP _Selu(VARP features, float scale, float alpha) { + std::unique_ptr op(new OpT); + op->type = OpType_Selu; + auto seluParam = new SeluT; + op->main.type = OpParameter_Selu; + seluParam->scale = scale; + seluParam->alpha = alpha; + op->main.value = seluParam; + return (Variable::create(Expr::create(std::move(op), {features}))); + +} +/*Gather slices from params into a variable with shape specified by indices. +Args: +params: A variable. The variables from which to gather values. +indices: A variable. Must be one of the following types: Halide_Type_Int. +Returns: +A variable. Has the same type as params. +*/ +VARP _GatherND(VARP params, VARP indices) { + std::unique_ptr op(new OpT); + op->type = OpType_GatherND; + return (Variable::create(Expr::create(std::move(op), {params, indices}))); +} + +/*BatchToSpace for N-D variables +This operation reshapes the "batch" dimension 0 into M + 1 dimensions of shape block_shape + [batch], +interleaves these blocks back into the grid defined by the spatial dimensions [1, ..., M], +to obtain a result with the same rank as the input. +The spatial dimensions of this intermediate result are then optionally cropped according to crops to +produce the output. This is the reverse of SpaceToBatch. See below for a precise description. +Arguments: +input: must be 4-D with NC4HW4 format. N-D with shape input_shape = [batch] + spatial_shape + remaining_shape, where spatial_shape has M dimensions. +block_shape: 1-D with shape [M], all values must be >= 1. +crops: 2-D with shape [M, 2], all values must be >= 0. crops[i] = [crop_start, crop_end] specifies the amount to crop from input dimension i + 1, +which corresponds to spatial dimension i. It is required that crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]. +This operation is equivalent to the following steps: +Reshape input to reshaped of shape: [block_shape[0], ..., block_shape[M-1], batch / prod(block_shape), +input_shape[1], ..., input_shape[N-1]] +Permute dimensions of reshaped to produce permuted of shape +[batch / prod(block_shape),input_shape[1], block_shape[0], ..., input_shape[M], block_shape[M-1],input_shape[M+1], ..., input_shape[N-1]] +Reshape permuted to produce reshaped_permuted of shape +[batch / prod(block_shape),input_shape[1] * block_shape[0], ..., input_shape[M] * block_shape[M-1],input_shape[M+1], ..., input_shape[N-1]] +Crop the start and end of dimensions [1, ..., M] of reshaped_permuted according to crops to produce the output of shape: +[batch / prod(block_shape),input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1], ..., input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],input_shape[M+1], ..., input_shape[N-1]] +Some examples: +for the following input of shape [4, 1, 1, 3], block_shape = [2, 2], and crops = [[0, 0], [0, 0]]: +[[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] +The output variable has shape [1, 2, 2, 3] and value: +x = [[[[1, 2, 3], [4, 5, 6]], + [[7, 8, 9], [10, 11, 12]]]] +Returns: +Output: The output variable +*/ + +VARP _BatchToSpaceND(VARP input, VARP block_shape, VARP crops) { + std::unique_ptr op(new OpT); + std::unique_ptr blob_blockShape(new BlobT); + std::unique_ptr blob_paddings(new BlobT); + + auto info_block_shape = block_shape->getInfo(); + auto info_crops = crops->getInfo(); + MNN_ASSERT(info_block_shape != nullptr); + MNN_ASSERT(info_crops != nullptr); + MNN_ASSERT(halide_type_int == info_block_shape->type.code); + MNN_ASSERT(halide_type_int == info_crops->type.code); + + blob_blockShape->dims = info_block_shape->dim; + blob_blockShape->dataFormat = (MNN_DATA_FORMAT)Utils::convertFormat(info_block_shape->order); + blob_blockShape->dataType = (MNN::DataType)Utils::convertDataType(info_block_shape->type); + auto data_block_shape = block_shape->readMap(); + for (int i=0; isize; i++) + { + blob_blockShape->int32s.emplace_back(data_block_shape[i]); + } + blob_paddings->dims = info_crops->dim; + blob_paddings->dataFormat = (MNN_DATA_FORMAT)Utils::convertFormat(info_crops->order); + blob_paddings->dataType = (MNN::DataType)Utils::convertDataType(info_crops->type); + auto data_crop = crops->readMap(); + for (int i=0; isize; i++) + { + blob_paddings->int32s.emplace_back(data_crop[i]); + } + op->main.type = OpParameter_SpaceBatch; + op->type = OpType_BatchToSpaceND; + op->main.value = new SpaceBatchT; + op->main.AsSpaceBatch()->blockShape = std::move(blob_blockShape); + op->main.AsSpaceBatch()->padding = std::move(blob_paddings); + return Variable::create(Expr::create(std::move(op), {input})); +} +/*Copies a variable setting everything outside a central band in each innermost matrix. +Arguments: +input: Rank k variable. +num_lower: Number of subdiagonals to keep. If negative, keep entire lower triangle. +num_upper: Number of superdiagonals to keep. If negative, keep entire upper triangle. +Returns: +Output: Rank k variable of the same shape as input. The extracted banded tensor. +*/ +VARP _MatrixBandPart(VARP input, VARP num_lower, VARP num_upper) { + std::unique_ptr op(new OpT); + op->type = OpType_MatrixBandPart; + auto lrnParam = new LRNT; + op->main.type = OpParameter_NONE; + return (Variable::create(Expr::create(std::move(op), {input, num_lower, num_upper}))); +} +/*Calculates the mean and variance of x. +Args: +x: A variable. must be 4-D with NC4HW4 format. +axes: Array of ints. Axes along which to compute mean and variance. Ignored for this implementation: must be {2, 3} +shift: Not used in the current implementation. +keepdims: produce moments with the same dimensionality as the input. Ignored for this implementation: must be true. +Returns: +Two variable objects: mean and variance. +*/ +std::vector _Moments(VARP x, INTS axis, VARP shift, bool keepDims) { + std::unique_ptr op(new OpT); + axis = {2, 3}; + keepDims = true; + // if axis != {2,3} or keepDims != true, print warning. + // ignore shift. + op->type = OpType_Moments; + auto momentsParam = new MomentsParamT; + op->main.type = OpParameter_MomentsParam; + momentsParam->dim = axis; + momentsParam->keepDims = keepDims; + momentsParam->dType = (MNN::DataType)Utils::convertDataType(x->getInfo()->type); + op->main.value = momentsParam; + EXPRP expr = Expr::create(std::move(op), {x}, 2); + std::vector res; + res.emplace_back(Variable::create(expr, 0)); + res.emplace_back(Variable::create(expr, 1)); + return res; +} +/*Computes the difference between two lists of numbers or strings. +Given a list x and a list y, this operation returns a list out that represents all values that are in x but not in y. +The returned list out is sorted in the same order that the numbers appear in x (duplicates are preserved). +This operation also returns a list idx that represents the position of each out element in x. +Arguments: +x: 1-D variable of type Halide_Type_Int. Values to keep. +y: 1-D variable of type Halide_Type_Int. Values to remove. +Returns: +Output out: 1-D variable of type Halide_Type_Int. Values present in x but not in y. +*/ +VARP _SetDiff1D(VARP x, VARP y) { + std::unique_ptr op(new OpT); + op->type = OpType_SetDiff1D; + op->main.type = OpParameter_NONE; + op->main.value = nullptr; + return Variable::create(Expr::create(std::move(op), {x, y})); +} +/*Rearranges blocks of spatial data, into depth. +More specifically, it outputs a copy of the input variable where values from the height and width dimensions are moved to the depth dimension. +The block_size indicates the input block size. +Non-overlapping blocks of size block_size x block_size are rearranged into depth at each location. +The depth of the output variable is block_size * block_size * input_depth. +The Y, X coordinates within each block of the input become the high order component of the output channel index. +The input variable's height and width must be divisible by block_size +Args: +input: A variable. +block_size: An int that is >= 2. The size of the spatial block. +Returns: +A variable. Has the same type as input. +*/ +VARP _SpaceToDepth(VARP input, int block_size) { + std::unique_ptr op(new OpT); + op->type = OpType_SpaceToDepth; + auto param = new DepthSpaceParamT; + param->blockSize = block_size; + op->main.type = OpParameter_DepthSpaceParam; + op->main.value = param; + return Variable::create(Expr::create(std::move(op), {input})); +} + +/*This operation divides "spatial" dimensions [1, ..., M] of the input into a grid of blocks of shape block_shape, +and interleaves these blocks with the "batch" dimension +such that in the output, the spatial dimensions [1, ..., M] correspond to the position within the grid, +and the batch dimension combines both the position within a spatial block and the original batch position. +Prior to division into blocks, the spatial dimensions of the input are optionally zero padded according to paddings. +See below for a precise description. +Args: +input: A variable. must be 4-D with NC4HW4 format. N-D with shape input_shape = [batch] + spatial_shape + remaining_shape, where spatial_shape has M dimensions. +block_shape: A variable. Must be one of the following types: int32, int64. 1-D with shape [M], all values must be >= 1. +paddings: A variable. Must be one of the following types: int32, int64. 2-D with shape [M, 2], all values must be >= 0. paddings[i] = [pad_start, pad_end] specifies the padding for input dimension i + 1, which corresponds to spatial dimension i. It is required that block_shape[i] divides input_shape[i + 1] + pad_start + pad_end. +Returns: +A variable. Has the same type as input. +*/ +VARP _SpaceToBatchND(VARP input, VARP block_shape, VARP paddings) { + std::unique_ptr op(new OpT); + std::unique_ptr blob_blockShape(new BlobT); + std::unique_ptr blob_paddings(new BlobT); + op->type = OpType_SpaceToBatchND; + auto param = new SpaceBatchT; + auto info_block_shape = block_shape->getInfo(); + auto info_paddings = paddings->getInfo(); + MNN_ASSERT(info_block_shape != nullptr); + MNN_ASSERT(info_paddings != nullptr); + MNN_ASSERT(halide_type_int == info_block_shape->type.code); + MNN_ASSERT(halide_type_int == info_paddings->type.code); + + blob_blockShape->dims = info_block_shape->dim; + blob_blockShape->dataFormat = (MNN::MNN_DATA_FORMAT)Utils::convertFormat(info_block_shape->order); + blob_blockShape->dataType = (MNN::DataType)Utils::convertDataType(info_block_shape->type); + auto data_block_shape = block_shape->readMap(); + for (int i=0; isize; i++) + { + blob_blockShape->int32s.emplace_back(data_block_shape[i]); + } + blob_paddings->dims = info_paddings->dim; + blob_paddings->dataFormat = (MNN::MNN_DATA_FORMAT)Utils::convertFormat(info_paddings->order); + blob_paddings->dataType = (MNN::DataType)Utils::convertDataType(info_paddings->type); + auto data_paddings = paddings->readMap(); + for (int i=0; isize; i++) + { + blob_paddings->int32s.emplace_back(data_paddings[i]); + } + param->blockShape = std::move(blob_blockShape); + param->padding = std::move(blob_paddings); + op->main.type = OpParameter_SpaceBatch; + op->main.value = param; + return Variable::create(Expr::create(std::move(op), {input})); +} +/*Creates a variable with all elements set to zero. +Args: +input: A variable. +Returns: +A variable with all elements set to zero. +*/ + +VARP _ZerosLike(VARP input) { + std::unique_ptr op(new OpT); + op->type = OpType_ZerosLike; + op->main.type = OpParameter_NONE; + op->main.value = nullptr; + return Variable::create(Expr::create(std::move(op), {input})); +} +/*Unpacks the given dimension of a rank-R tensor into rank-(R-1) variable. +For example, given a variable of shape (A, B, C, D); +If axis == 0 then the i'th variable in output is the slice value[i, :, :, :] and each variable in output will have shape (B, C, D). +(Note that the dimension unpacked along is gone, unlike split). +If axis == 1 then the i'th variable in output is the slice value[:, i, :, :] and each variable in output will have shape (A, C, D). +Args: +value: A rank R > 0 variable to be unstacked. +num: An int. The length of the dimension axis. Automatically inferred if None (the default). +axis: An int. The axis to unstack along. Defaults to the first dimension. Negative values wrap around, so the valid range is [-R, R). +Returns: +The list of variable objects unstacked from value. +*/ +std::vector _Unstack(VARP value, int axis) { + std::unique_ptr op(new OpT); + op->type = OpType_Unpack; + auto info_value = value->getInfo(); + MNN_ASSERT(info_value != nullptr); + auto dims = info_value->dim; + auto dimsize = dims.size(); + MNN_ASSERT(dimsize > 1); + axis = axis % dimsize; + if(axis < 0) { + axis += dimsize; + } + auto size = dims[axis]; + MNN_ASSERT(size > 0); + auto axisParam = new AxisT; + axisParam->axis = axis; + op->main.type = OpParameter_Axis; + op->main.value = axisParam; + EXPRP expr = Expr::create(std::move(op), {value}, size); + std::vector res; + for (int i = 0; i < size; ++i) { + res.emplace_back(Variable::create(expr, i)); + } + return res; +} + +/*Returns the rank of a variable. +Returns a 0-D int32 variable representing the rank of input. +Note: The rank of a variable is not the same as the rank of a matrix. +It's the number of indices required to uniquely select each element of the variable. +It's also known as "order", "degree", or "ndims." +Args: +input: A variable. +Returns: +A 0-D variable of type Halide_Type_Int +*/ +VARP _Rank(VARP input) { + std::unique_ptr op(new OpT); + op->type = OpType_Rank; + op->main.type = OpParameter_NONE; + op->main.value = nullptr; + return Variable::create(Expr::create(std::move(op), {input})); +} +/*Creates a sequence of numbers. +Args: +start: A 0-D variable (scalar). +limit: A 0-D variable (scalar). +delta: A 0-D variable (scalar). +*/ +VARP _Range(VARP start, VARP limit, VARP delta) { + std::unique_ptr op(new OpT); + op->type = OpType_Range; + auto rangeParam = new RangeT; + rangeParam->Tidx = (MNN::DataType)Utils::convertDataType(start->getInfo()->type); + op->main.type = OpParameter_Range; + op->main.value = rangeParam; + return Variable::create(Expr::create(std::move(op), {start, limit, delta})); +} + + +VARP _Interp(VARPS xs, float widthScale, float heightScale, int outputWidth, int outputHeight, int resizeType, bool alignCorners) { + std::unique_ptr interp(new OpT); + interp->type = OpType_Interp; + auto param = new InterpT; + param->widthScale = widthScale; + param->heightScale = heightScale; + param->outputWidth = outputWidth; + param->outputHeight = outputHeight; + param->resizeType = resizeType; + param->alignCorners = alignCorners; + interp->main.value = param; + interp->main.type = OpParameter_Interp; + return Variable::create(Expr::create(std::move(interp), xs)); +} + +} // namespace Express +} // namespace MNN diff --git a/express/source/Optimizer.cpp b/express/Optimizer.cpp similarity index 60% rename from express/source/Optimizer.cpp rename to express/Optimizer.cpp index 3269b7da9..455b678e7 100644 --- a/express/source/Optimizer.cpp +++ b/express/Optimizer.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Optimizer.hpp" -#include "optimizer/MergeOptimizer.hpp" -#include "Backend.hpp" +#include +#include "MergeOptimizer.hpp" +#include "core/Backend.hpp" namespace MNN { namespace Express { Optimizer::Parameters::Parameters(int n) { @@ -21,16 +21,26 @@ Optimizer::Parameters::~Parameters() { delete[] mValue; } } -std::shared_ptr Optimizer::create(Device device) { +std::shared_ptr Optimizer::create(Config config) { + const int numThread = config.numThread; + auto forwardType = config.forwardType; + if (forwardType != MNN_FORWARD_ALL) { + if (MNNGetExtraBackendCreator(forwardType) == nullptr) { + return nullptr; + } + return std::shared_ptr(new MergeOptimizer(config.forwardType, numThread, nullptr)); + } + + auto device = config.device; if (CPU == device) { - return std::shared_ptr(new MergeOptimizer(MNN_FORWARD_CPU, 4, nullptr)); + return std::shared_ptr(new MergeOptimizer(MNN_FORWARD_CPU, numThread, nullptr)); } if (GPU == device) { std::vector types {MNN_FORWARD_METAL, MNN_FORWARD_OPENCL, MNN_FORWARD_VULKAN, MNN_FORWARD_OPENGL}; for (auto type : types) { auto creator = MNNGetExtraBackendCreator(type); if (nullptr != creator) { - return std::shared_ptr(new MergeOptimizer(type, 4, nullptr)); + return std::shared_ptr(new MergeOptimizer(type, numThread, nullptr)); } } } diff --git a/express/source/Utils.cpp b/express/Utils.cpp similarity index 57% rename from express/source/Utils.cpp rename to express/Utils.cpp index e55dbe263..ab4833ffb 100644 --- a/express/source/Utils.cpp +++ b/express/Utils.cpp @@ -9,13 +9,17 @@ #include "Utils.hpp" #include #include "MNN_generated.h" -#include "TensorUtils.hpp" +#include "core/TensorUtils.hpp" namespace MNN { namespace Express { +#define CONVERT(src, dst, f)\ +if (f == src) return dst; + int Utils::convertFormat(Dimensionformat format) { - static std::map gMap = { - {NCHW, MNN_DATA_FORMAT_NCHW}, {NHWC, MNN_DATA_FORMAT_NHWC}, {NC4HW4, MNN_DATA_FORMAT_NC4HW4}}; - return gMap[format]; + CONVERT(NCHW, MNN_DATA_FORMAT_NCHW, format); + CONVERT(NHWC, MNN_DATA_FORMAT_NHWC, format); + CONVERT(NC4HW4, MNN_DATA_FORMAT_NC4HW4, format); + return MNN_DATA_FORMAT_UNKNOWN; } int Utils::convertDataType(halide_type_t type) { @@ -33,23 +37,20 @@ int Utils::convertDataType(halide_type_t type) { } return DataType_DT_INVALID; } -static Express::Dimensionformat _convertFormat(MNN_DATA_FORMAT format) { - static std::map gMap = { - {MNN_DATA_FORMAT_NCHW, Express::NCHW}, - {MNN_DATA_FORMAT_NHWC, Express::NHWC}, - {MNN_DATA_FORMAT_NC4HW4, Express::NC4HW4}, - }; - return gMap[format]; +halide_type_t Utils::revertDataType(int dataType) { + CONVERT(DataType_DT_FLOAT, halide_type_of(), dataType); + CONVERT(DataType_DT_INT32, halide_type_of(), dataType); + CONVERT(DataType_DT_INT64, halide_type_of(), dataType); + CONVERT(DataType_DT_UINT8, halide_type_of(), dataType); + CONVERT(DataType_DT_INT8, halide_type_of(), dataType); + return halide_type_of(); } -static MNN_DATA_FORMAT _revertFormat(Express::Dimensionformat format) { - static std::map gRevertMap = { - {Express::NCHW, MNN_DATA_FORMAT_NCHW}, - {Express::NHWC, MNN_DATA_FORMAT_NHWC}, - {Express::NC4HW4, MNN_DATA_FORMAT_NC4HW4}, - }; - return gRevertMap[format]; +Express::Dimensionformat Utils::revertFormat(int format) { + CONVERT(MNN_DATA_FORMAT_NCHW, Express::NCHW, format); + CONVERT(MNN_DATA_FORMAT_NHWC, Express::NHWC, format); + CONVERT(MNN_DATA_FORMAT_NC4HW4, Express::NC4HW4, format); + return NCHW; } - void Utils::copyInfoToTensor(Tensor* dest, const Variable::Info* source) { if (nullptr == source) { dest->buffer().dimensions = 0; @@ -61,15 +62,16 @@ void Utils::copyInfoToTensor(Tensor* dest, const Variable::Info* source) { dest->buffer().dimensions = (int)source->dim.size(); dest->buffer().type = source->type; dest->buffer().host = (uint8_t*)source->ptr; - TensorUtils::getDescribe(dest)->dimensionFormat = _revertFormat(source->order); + TensorUtils::getDescribe(dest)->dimensionFormat = (MNN_DATA_FORMAT)Utils::convertFormat(source->order); TensorUtils::setLinearLayout(dest); } void Utils::copyTensorToInfo(Variable::Info* shape, const Tensor* tensor) { shape->type = tensor->getType(); shape->dim = tensor->shape(); shape->size = tensor->elementSize(); - shape->order = _convertFormat(TensorUtils::getDescribe(tensor)->dimensionFormat); + shape->order = Utils::revertFormat(TensorUtils::getDescribe(tensor)->dimensionFormat); shape->ptr = tensor->host(); } + } // namespace Express } // namespace MNN diff --git a/express/source/Utils.hpp b/express/Utils.hpp similarity index 73% rename from express/source/Utils.hpp rename to express/Utils.hpp index dee5bd268..110dc3a09 100644 --- a/express/source/Utils.hpp +++ b/express/Utils.hpp @@ -6,8 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Expr.hpp" -#include "Tensor.hpp" +#include +#include + namespace MNN { namespace Express { class Utils { @@ -16,6 +17,8 @@ class Utils { static void copyTensorToInfo(Variable::Info* dest, const Tensor* source); static int convertDataType(halide_type_t type); static int convertFormat(Dimensionformat format); + static Express::Dimensionformat revertFormat(int format); + static halide_type_t revertDataType(int dataType); }; } // namespace Express } // namespace MNN diff --git a/express/include/MathOp.hpp b/express/include/MathOp.hpp deleted file mode 100644 index 7ee9a6d93..000000000 --- a/express/include/MathOp.hpp +++ /dev/null @@ -1,43 +0,0 @@ -// -// MathOp.hpp -// MNN -// -// Created by MNN on 2019/06/27. -// Copyright © 2018, Alibaba Group Holding Limited -// - -namespace MNN { -namespace Express { -MNN_EXPRESS_PUBLIC VARP _Cast(VARP a, halide_type_t srcType, halide_type_t dstType); - -MNN_EXPRESS_PUBLIC VARP _Mul(VARP x, VARP y); -MNN_EXPRESS_PUBLIC VARP _Sub(VARP x, VARP y); -MNN_EXPRESS_PUBLIC VARP _Add(VARP x, VARP y); -MNN_EXPRESS_PUBLIC VARP _Div(VARP x, VARP y); -MNN_EXPRESS_PUBLIC VARP _Min(VARP x, VARP y); -MNN_EXPRESS_PUBLIC VARP _Max(VARP x, VARP y); -MNN_EXPRESS_PUBLIC VARP _Log(VARP x); -MNN_EXPRESS_PUBLIC VARP _Neg(VARP x); -MNN_EXPRESS_PUBLIC VARP _Rsqrt(VARP x); -MNN_EXPRESS_PUBLIC VARP _Tanh(VARP x); -MNN_EXPRESS_PUBLIC VARP _Exp(VARP x); -MNN_EXPRESS_PUBLIC VARP _Square(VARP x); -MNN_EXPRESS_PUBLIC VARP _Sigmoid(VARP x); - -MNN_EXPRESS_PUBLIC VARP _ReduceMin(VARP x, INTS dim, bool keepDim = false); -MNN_EXPRESS_PUBLIC VARP _ReduceMax(VARP x, INTS dim, bool keepDim = false); -MNN_EXPRESS_PUBLIC VARP _Sum(VARP x, INTS dim, bool keepDim = false); -MNN_EXPRESS_PUBLIC VARP _Mean(VARP x, INTS dim, bool keepDim = false); -MNN_EXPRESS_PUBLIC VARP _Prod(VARP x, INTS dim, bool keepDim = false); -MNN_EXPRESS_PUBLIC VARP _Any(VARP x, INTS dim, bool keepDim = false); -MNN_EXPRESS_PUBLIC VARP _All(VARP x, INTS dim, bool keepDim = false); -MNN_EXPRESS_PUBLIC VARP _MatMul(VARP a, VARP b, bool tranposeA = false, bool tranposeB = false); -MNN_EXPRESS_PUBLIC VARP _Normalize(VARP x, int32_t acrossSpatial, int32_t channelShared, float eps, std::vector scale); - -MNN_EXPRESS_PUBLIC VARP _Prod(VARP a, VARP b, std::vector coeff); -MNN_EXPRESS_PUBLIC VARP _Sum(VARP a, VARP b, std::vector coeff); -MNN_EXPRESS_PUBLIC VARP _Max(VARP a, VARP b, std::vector coeff); -MNN_EXPRESS_PUBLIC VARP _Sub(VARP a, VARP b, std::vector coeff); - -}; // namespace Express -}; // namespace MNN diff --git a/express/include/NeuralNetWorkOp.hpp b/express/include/NeuralNetWorkOp.hpp deleted file mode 100644 index 9a3bf21d8..000000000 --- a/express/include/NeuralNetWorkOp.hpp +++ /dev/null @@ -1,69 +0,0 @@ -// -// NeuralNetWorkOp.hpp -// MNN -// -// Created by MNN on 2019/06/27. -// Copyright © 2018, Alibaba Group Holding Limited -// - -namespace MNN { -namespace Express { -enum PaddingMode {CAFFE, VALID, SAME}; -enum PoolingMode {MAXPOOL, AVEPOOL}; -MNN_EXPRESS_PUBLIC VARP _Input(INTS dims = {}, Dimensionformat format = NC4HW4, halide_type_t type = halide_type_of()); -MNN_EXPRESS_PUBLIC VARP _Clone(VARP source, bool deepCopy=false); - -MNN_EXPRESS_PUBLIC VARP _Const(float value, INTS dims = {}, Dimensionformat format = NHWC); -MNN_EXPRESS_PUBLIC VARP _Const(const void* ptr, INTS dims = {}, Dimensionformat format = NHWC, - halide_type_t type = halide_type_of()); -MNN_EXPRESS_PUBLIC VARP _Conv(VARP weight, VARP bias, VARP x, PaddingMode pad = VALID, INTS stride = {1, 1}, - INTS dilate = {1, 1}, int group = 1, INTS pads = {0, 0}); - -MNN_EXPRESS_PUBLIC VARP _Conv(float weight, float bias, VARP x, INTS channel, INTS kernelSize, PaddingMode pad = VALID, - INTS stride = {1, 1}, INTS dilate = {1, 1}, int group = 1); -MNN_EXPRESS_PUBLIC VARP _Conv(std::vector&& weight, std::vector&& bias, VARP x, INTS channel, INTS kernelSize, - PaddingMode pad = VALID, INTS stride = {1, 1}, INTS dilate = {1, 1}, int group = 1, INTS pads = {0, 0}); -MNN_EXPRESS_PUBLIC VARP _Deconv(VARP weight, VARP bias, VARP x, PaddingMode pad = VALID, INTS stride = {1, 1}, - INTS dilate = {1, 1}, int group = 1, INTS pads = {0, 0}); -MNN_EXPRESS_PUBLIC VARP _MaxPool(VARP x, INTS kernel, INTS stride, PaddingMode pad = VALID, INTS pads= {0, 0}); -MNN_EXPRESS_PUBLIC VARP _AvePool(VARP x, INTS kernel, INTS stride, PaddingMode pad = VALID, INTS pads= {0, 0}); -MNN_EXPRESS_PUBLIC VARP _Reshape(VARP x, INTS dim, Dimensionformat format); -MNN_EXPRESS_PUBLIC VARP _Reshape(VARP x, VARP shape); -MNN_EXPRESS_PUBLIC VARP _Scale(VARP x, int channels, std::vector&& scales, std::vector&& bias); - -MNN_EXPRESS_PUBLIC VARP _Relu(VARP x, float slope = 0.0f); -MNN_EXPRESS_PUBLIC VARP _Relu6(VARP x); -MNN_EXPRESS_PUBLIC VARP _PRelu(VARP x, std::vector &&slopes); -MNN_EXPRESS_PUBLIC VARP _Softmax(VARP x, int axis); -MNN_EXPRESS_PUBLIC std::vector _Split(VARP x, INTS points, int axis); -MNN_EXPRESS_PUBLIC VARP _Slice(VARP x, VARP starts, VARP sizes); -MNN_EXPRESS_PUBLIC VARP _Concat(VARPS xs, int axis); -MNN_EXPRESS_PUBLIC VARP _Convert(VARP x, Dimensionformat dest); -MNN_EXPRESS_PUBLIC VARP _Transpose(VARP x, INTS perm); -MNN_EXPRESS_PUBLIC VARP _Transpose(VARP x, VARP perm); -MNN_EXPRESS_PUBLIC VARP _ChannelShuffle(VARP x, int group); -MNN_EXPRESS_PUBLIC VARP _ChangeInputFormat(VARP x, Dimensionformat requireInput); -MNN_EXPRESS_PUBLIC VARP _Conv2DBackPropFilter(VARP weight, VARP input, VARP inputGrad, PaddingMode pad = VALID, INTS stride = {1, 1}, INTS dilate = {1, 1}, int group = 1, INTS pads = {0, 0}); -MNN_EXPRESS_PUBLIC VARP _PoolGrad(VARP originInput, VARP originOutput, VARP inputGrad, INTS kernel, INTS stride, PoolingMode type, PaddingMode pad = VALID, INTS pads= {0, 0}); -// FIXME: move the api to Array Ops -MNN_EXPRESS_PUBLIC VARP _ReverseSequence(VARP x, VARP y, int batchDim, int seqDim); -// FIXME: move the api to Image Ops -MNN_EXPRESS_PUBLIC VARP _Crop(VARP x, VARP s, int axis, INTS offset); -MNN_EXPRESS_PUBLIC VARP _Resize(VARP x, float xScale, float yScale); -MNN_EXPRESS_PUBLIC VARP _Pad(VARP x, VARP pads); -MNN_EXPRESS_PUBLIC VARP _ExpandDims(VARP x, int axis); -MNN_EXPRESS_PUBLIC VARP _ExpandDims(VARP x, VARP axis); - -MNN_EXPRESS_PUBLIC VARP _Shape(VARP x); -MNN_EXPRESS_PUBLIC VARP _Pack(VARPS xs, halide_type_t dtype, int axis); -enum InterpolationMethod {BILINEAR, NEAREST}; -MNN_EXPRESS_PUBLIC VARP _CropAndResize(VARP image, VARP boxes, VARP indexes, VARP sizes, float extrapolation, InterpolationMethod method); -MNN_EXPRESS_PUBLIC VARP _Fill(VARP s, VARP v); -MNN_EXPRESS_PUBLIC VARP _Tile(VARP x, VARP mul); -MNN_EXPRESS_PUBLIC VARP _Gather(VARP embedding, VARP indices); -MNN_EXPRESS_PUBLIC VARP _GatherV2(VARP params, VARP indices, VARP axis = nullptr); -MNN_EXPRESS_PUBLIC VARP _Squeeze(VARP x, INTS axes = {}); -MNN_EXPRESS_PUBLIC VARP _Unsqueeze(VARP x, INTS axes = {}); - -} // namespace Express -} // namespace MNN diff --git a/express/source/Expr.cpp b/express/source/Expr.cpp deleted file mode 100644 index be8f45c01..000000000 --- a/express/source/Expr.cpp +++ /dev/null @@ -1,725 +0,0 @@ -// -// Expr.cpp -// MNN -// -// Created by MNN on 2019/06/10. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#define FLATBUFFERS_PREFER_PRINTF -#include "Expr.hpp" -#include -#include "FileLoader.hpp" -#include "InsideExpr.hpp" -#include "Utils.hpp" -#include "flatbuffers/util.h" -#include "MNN_generated.h" -#define MNN_OPEN_TIME_TRACE -#include "AutoTime.hpp" - -//#define MNN_EXPRESS_ERROR_REPORT -static inline std::string numberToString(int index) { - return flatbuffers::NumToString(index); -} - -namespace MNN { -namespace Express { - -struct Expr::Inside { - std::vector> mOutputInfosContent; - std::vector mInputInfos; - std::vector mOutputInfos; - - std::shared_ptr mSolution; - Solution::Requirement mReq; -}; -Expr::Expr(int outputSize) : mOutputSize(outputSize) { - mInside.reset(new Inside); - mInside->mOutputInfos.resize(outputSize); - mInside->mOutputInfosContent.resize(outputSize); - for (int i = 0; i < mInside->mOutputInfosContent.size(); ++i) { - mInside->mOutputInfosContent[i].reset(new Variable::Info); - mInside->mOutputInfos[i] = mInside->mOutputInfosContent[i].get(); - } -} - -Expr::~Expr() { - if (nullptr != mExtraBuffer) { - delete mExtraBuffer; - } - mInside.reset(); -} -void Expr::set(const OpT* op) { - MNN_ASSERT(nullptr != op); - if (nullptr != mExtraBuffer) { - delete mExtraBuffer; - mExtraBuffer = nullptr; - } - flatbuffers::FlatBufferBuilder builder; - auto offset = Op::Pack(builder, op); - builder.Finish(offset); - mExtraBuffer = new char[builder.GetSize()]; - ::memcpy(mExtraBuffer, builder.GetBufferPointer(), builder.GetSize()); - mOp = flatbuffers::GetMutableRoot(mExtraBuffer); - mInside->mSolution.reset(); -} - -EXPRP Expr::create(const OpT* op, std::vector inputs, int outputSize, std::shared_ptr exe) { - if (exe == nullptr && inputs.size() > 0) { - exe = inputs[0]->expr().first->mExecutor; - } - if (exe == nullptr) { - exe = std::shared_ptr(new DefaultSolutionCreator); - } - EXPRP expr(new Expr(outputSize)); - expr->set(op); - expr->mExecutor = exe; - expr->mInputs = inputs; - for (int i=0; imTo.emplace_back(std::make_pair(i, WeakEXPRP(expr))); - } - return expr; -} -void Expr::setName(const std::string& name) { - mName = name; -} -Solution* Expr::inside() { - if (mInside->mSolution == nullptr) { - mInside->mSolution.reset(mExecutor->onCreate(mOp, (int)mInputs.size(), mOutputSize)); - if (nullptr != mInside->mSolution) { - mInside->mReq = mInside->mSolution->onGetRequirement(); - } - } - return mInside->mSolution.get(); -} -const Variable::Info* Expr::outputInfo(int index) const { - return mInside->mOutputInfos[index]; -} - -bool Expr::requireInfo() { - if (!mInfoDirty) { - return true; - } - if (!mValid) { - return false; - } - bool ready = true; - auto insidePtr = inside(); - if (nullptr == insidePtr) { - mValid = false; - return false; - } - mInside->mInputInfos.resize(mInputs.size()); - for (int i = 0; i < mInputs.size(); ++i) { - if (nullptr == mInputs[i] || nullptr == mInputs[i]->mFrom) { - // The Variable is set nullptr by api - return false; - } - mInside->mInputInfos[i] = mInputs[i]->getInfo(); - if (nullptr == mInside->mInputInfos[i] && (!mInside->mReq.supportError[i])) { -#ifdef MNN_EXPRESS_ERROR_REPORT - MNN_ERROR("%s, %d input not ready\n", mName.c_str(), i); -#endif - mValid = false; - return false; - } - } - for (int i = 0; i < mInputs.size(); ++i) { - auto& v = mInputs[i]; - if (mInside->mReq.shapeNeedContent[i]) { - auto res = v->expr().first->requireCompute(); - if (!res) { -#ifdef MNN_EXPRESS_ERROR_REPORT - MNN_ERROR("%s, Error for compute shape %d\n", mName.c_str(), i); -#endif - ready = false; - mValid = false; - break; - } - } - } - if (!ready) { - return false; - } - //MNN_PRINT("Info %s, %p Start\n", mName.c_str(), this); - auto res = insidePtr->onComputeInfo(mInside->mInputInfos, mInside->mOutputInfos); - //MNN_PRINT("Info Compute %s\n", mName.c_str()); - - if (NO_ERROR == res) { - mInfoDirty = false; - } else { - mValid = false; - } - return NO_ERROR == res; -} - -bool Expr::requireCompute() { - if ((!mContentDirty) && mValid) { - return true; - } - if (!mValid) { - return false; - } - //MNN_PRINT("Compute %s, %p Start\n", mName.c_str(), this); - bool res = requireAlloc(); - if (!res) { -#ifdef MNN_EXPRESS_ERROR_REPORT - MNN_ERROR("%s Alloc Error \n", mName.c_str()); -#endif - return false; - } - auto solution = inside(); - - for (int i = 0; i < mInputs.size(); ++i) { - if (mInside->mReq.contentNeedContent[i]) { - auto& input = mInputs[i]; - auto expr = input->expr().first; - res = expr->requireCompute(); - if (!res) { -#ifdef MNN_EXPRESS_ERROR_REPORT - MNN_ERROR("%s compute input %d error , \n", mName.c_str(), i); -#endif - if (!mInside->mReq.supportError[i]) { - mValid = false; - return false; - } - } - } - } - auto code = solution->onComputeContent(mInside->mInputInfos, mInside->mOutputInfos); - //MNN_PRINT("Compute %s, %p End\n", mName.c_str(), this); - res = code == NO_ERROR; - if (!res) { -#ifdef MNN_EXPRESS_ERROR_REPORT - MNN_ERROR("Error for compute %s\n", mName.c_str()); -#endif - mValid = false; - return false; - } - mContentDirty = false; - return true; -} - -bool Expr::requireAlloc() { - if (mAllocated) { - return true; - } - if (!requireInfo()) { - return false; - } - for (int i = 0; i < mInputs.size(); ++i) { - if (mInside->mReq.contentNeedContent[i]) { - auto& input = mInputs[i]; - auto expr = input->expr().first; - auto res = expr->requireAlloc(); - if ((!res) && (!mInside->mReq.supportError[i])) { - mValid = false; - return false; - } - } - } - auto code = inside()->onAlloc(mInside->mInputInfos, mInside->mOutputInfos); - if (NO_ERROR != code) { -#ifdef MNN_EXPRESS_ERROR_REPORT - MNN_ERROR("Error for alloc, code = %d \n", code); -#endif - return false; - } - mAllocated = true; - return true; -} - -VARP Variable::create(EXPRP expr, int index) { - VARP res(new Variable(expr, index)); - expr->mOutputs.emplace_back(WeakVARP(res)); - return res; -} -void Variable::setExpr(VARP dst, EXPRP from, int index) { - if (from.get() == dst->mFrom.get() && index == dst->mFromIndex) { - return; - } - if (from.get() != dst->mFrom.get()) { - for (auto iter = dst->mFrom->mOutputs.begin(); iter != dst->mFrom->mOutputs.end(); iter++) { - auto v = iter->lock(); - if (nullptr != v && v.get() == dst.get()) { - dst->mFrom->mOutputs.erase(iter); - break; - } - } - dst->mFrom = from; - if (nullptr != from) { - from->mOutputs.emplace_back(WeakVARP(dst)); - } - } - dst->mFromIndex = index; - std::set worked; - dst->visitOutputs([&](VARP var, int index) { - if (worked.find(var.get()) != worked.end()) { - return false; - } - auto expr = var->mFrom; - worked.insert(var.get()); - expr->mInside->mSolution.reset(); - expr->mInside->mInputInfos.clear(); - expr->mContentDirty = true; - expr->mInfoDirty = true; - expr->mAllocated = false; - return true; - }); -} -void Expr::setInput(EXPRP expr, VARP src, int index) { - MNN_ASSERT(expr->mInputs.size() > index && index >= 0); - if (expr->mInputs[index].get() == src.get()) { - return; - } - auto originVar = expr->mInputs[index]; - for (auto iter = originVar->mTo.begin(); iter != originVar->mTo.end(); iter++) { - auto v = iter->second.lock(); - if (nullptr != v && v.get() == expr.get()) { - originVar->mTo.erase(iter); - break; - } - } - expr->mInputs[index] = src; - if (nullptr != src) { - src->mTo.emplace_back(std::make_pair(index, WeakEXPRP(expr))); - } - expr->mInside->mSolution.reset(); - expr->mInside->mInputInfos.clear(); - expr->mContentDirty = true; - expr->mInfoDirty = true; - expr->mAllocated = false; -} - -void Variable::setName(const std::string& name) { - mName = name; - if (mFrom->name().empty()) { - mFrom->setName(name); - } -} - -bool Variable::input(VARP src) { - if (mFrom->get()->type() != OpType_Input) { - MNN_ERROR("Can't input to no-input op\n"); - return false; - } - if (nullptr == src) { - /*Close the Input*/ - visitOutputs([](VARP var, int index) { - auto recurse = var->mFrom->mValid; var->mFrom->mValid = false; - return recurse; - }); - mFrom->mValid = false; - return false; - } - auto info = src->getInfo(); - std::shared_ptr tempInfo; - bool needCopy = true; - if (nullptr == info || 0 == info->size) { - tempInfo.reset(new Variable::Info); - tempInfo->type = halide_type_of(); - info = tempInfo.get(); - needCopy = false; - } - auto dstInfo = getInfo(); - bool needChange = nullptr == dstInfo || info->order != dstInfo->order || info->dim.size() != dstInfo->dim.size(); - if (!needChange) { - for (int i=0; idim.size(); ++i) { - if (dstInfo->dim[i] != info->dim[i]) { - needChange = true; - break; - } - } - } - if (needChange) { - std::unique_ptr inputOp(mFrom->get()->UnPack()); - inputOp->main.AsInput()->dims = info->dim; - inputOp->main.AsInput()->dtype = (MNN::DataType)Utils::convertDataType(info->type); - inputOp->main.AsInput()->dformat = (MNN::MNN_DATA_FORMAT)Utils::convertFormat(info->order); - mFrom->set(inputOp.get()); - mFrom->mAllocated = false; - mFrom->mContentDirty = true; - mFrom->mInfoDirty = true; - mFrom->mValid = true; - mFrom->mInside->mSolution.reset(); - mFrom->mInside->mInputInfos.clear(); - } - if (needCopy) { - auto dstPtr = writeInternal(false); - auto srcPtr = src->readMap(); - if (nullptr == dstPtr || nullptr == srcPtr) { - MNN_ERROR("Alloc memory error or compute src error in Variable::Input\n"); - return false; - } - ::memcpy(dstPtr, srcPtr, info->size * info->type.bytes()); - } - if (needChange) { - visitOutputs([](VARP var, int index) { return var->mFrom->setInfoDirty(); }); - } else { - informDirty(); - } - return true; -} - -void Variable::replace(VARP dst, VARP src) { - if (nullptr == src) { - Variable::setExpr(dst, nullptr, 0); - return; - } - Variable::setExpr(dst, src->mFrom, src->mFromIndex); -} - -const Variable::Info* Variable::getInfo() { - if (nullptr == mFrom) { - return nullptr; - } - auto res = mFrom->requireInfo(); - if (!res) { - return nullptr; - } - return mFrom->mInside->mOutputInfos[mFromIndex]; -} - -bool Variable::resize(INTS dims) { - if (mFrom->get()->type() != OpType_Input) { - MNN_ERROR("Can't resize variable not from input\n"); - return false; - } - auto info = getInfo(); - if (nullptr != info && dims.size() == info->dim.size()) { - bool theSame = true; - for (int i=0; idim[i] != dims[i]) { - theSame = false; - break; - } - } - if (theSame) { - return true; - } - } - std::unique_ptr inputOp(mFrom->get()->UnPack()); - inputOp->main.AsInput()->dims = dims; - mFrom->set(inputOp.get()); - mFrom->mAllocated = false; - mFrom->mContentDirty = true; - mFrom->mInfoDirty = true; - mFrom->mValid = true; - mFrom->mInside->mInputInfos.clear(); - - visitOutputs([](VARP var, int index) { return var->mFrom->setInfoDirty(); }); - return true; -} -void Variable::visit(VARP var, const std::function& before, const std::function& after) { - bool next = before(var); - if (!next) { - return; - } - for (int i = 0; i < var->mFrom->inputs().size(); ++i) { - visit(var->mFrom->inputs()[i], before, after); - } - after(var); -} - -void* Variable::readInternal() { - auto res = mFrom->requireCompute(); - if (!res) { - return nullptr; - } - return mFrom->inside()->onMapContent(mFromIndex); -} - -void Variable::informDirty() { - visitOutputs([](VARP var, int index) { - auto expr = var->mFrom; - auto needRecurse = expr->setContentDirty(index); - return needRecurse; - }); -} - -void* Variable::writeInternal(bool inform) { - auto res = mFrom->requireAlloc(); - if (!res) { - return nullptr; - } - if (inform) { - informDirty(); - } - mFrom->mContentDirty = false; - return mFrom->inside()->onMapContent(mFromIndex); -} - -void Variable::unMap() { - mFrom->inside()->onUnMapContent(mFromIndex); -} - -void Variable::visitOutputs(const std::function& visit) { - for (auto iter = mTo.begin(); iter != mTo.end();) { - auto expr = iter->second.lock(); - if (nullptr == expr) { - iter = mTo.erase(iter); - continue; - } - bool recurse = false; - for (auto varIter = expr->mOutputs.begin(); varIter != expr->mOutputs.end();) { - auto var = varIter->lock(); - if (nullptr == var) { - varIter = expr->mOutputs.erase(varIter); - continue; - } - recurse = recurse || visit(var, iter->first); - varIter++; - } - if (recurse) { - for (auto varIter = expr->mOutputs.begin(); varIter != expr->mOutputs.end(); varIter++) { - auto var = varIter->lock(); - var->visitOutputs(visit); - } - } - iter++; - } -} -void Expr::setExecutor(std::shared_ptr exe) { - mExecutor = exe; - mInside->mSolution = nullptr; -} -bool Expr::setContentDirty(int inputIndex) { - if (mContentDirty) { - return false; - } - if (nullptr != mInside) { - if (mInside->mReq.shapeNeedContent[inputIndex]) { - for (auto& w : mOutputs) { - auto var = w.lock(); - if (nullptr != var) { - var->visitOutputs([](VARP var, int index) { return var->mFrom->setInfoDirty(); }); - } - } - return setInfoDirty(); - } - if (!mInside->mReq.contentNeedContent[inputIndex]) { - return false; - } - } - mContentDirty = true; - return true; -} -bool Expr::setInfoDirty() { - if (mInfoDirty && mValid) { - //MNN_PRINT("End Info Dirty for %s\n", mName.c_str()); - return false; - } - //MNN_PRINT("Set Info Dirty for %s\n", mName.c_str()); - mInfoDirty = true; - mAllocated = false; - mContentDirty = true; - mValid = true; - return true; -} - -std::vector Variable::load(const char* fileName) { - AUTOTIME; - FileLoader loader(fileName); - if (!loader.valid()) { - MNN_ERROR("Error for open %s\n", fileName); - return {}; - } - loader.read(); - if (!loader.valid()) { - return {}; - } - AutoStorage buffer; - loader.merge(buffer); - if (buffer.get() == nullptr) { - return {}; - } - flatbuffers::Verifier verify((const uint8_t*)(buffer.get()), buffer.size()); - if (false == VerifyNetBuffer(verify)) { - MNN_PRINT("Invalidate buffer to create variable\n"); - return {}; - } - std::unique_ptr source(UnPackNet(buffer.get())); - if (nullptr == source) { - return {}; - } - if (source->oplists.empty()) { - MNN_ERROR("Invalid net\n"); - return {}; - } - // FUNC_PRINT(source->oplists.size()); - - auto opSize = source->oplists.size(); - auto tensorCount = source->tensorName.size(); - if (tensorCount == 0) { - tensorCount = source->tensorNumber; - } - std::vector variable; - variable.reserve(tensorCount); - std::map variableMap; - - // Generate All Exprs by order of net - for (int i = 0; i < opSize; ++i) { - std::vector inputs; - auto op = source->oplists[i].get(); - for (int index = 0; index < op->inputIndexes.size(); ++index) { - auto inputIndex = op->inputIndexes[index]; - if (variableMap.find(inputIndex) == variableMap.end()) { - MNN_ERROR("Can't find variable for %s, the graph is error\n", op->name.c_str()); - break; - } - inputs.emplace_back(variableMap[inputIndex]); - } - EXPRP expr = Expr::create(source->oplists[i].get(), inputs, (int)op->outputIndexes.size()); - expr->setName(source->oplists[i]->name); - - for (int index = 0; index < op->outputIndexes.size(); ++index) { - auto outputIndex = op->outputIndexes[index]; - if (variableMap.find(outputIndex) == variableMap.end()) { - auto newVariable = Variable::create(expr, index); - if (source->tensorName.size() > outputIndex) { - newVariable->setName(source->tensorName[outputIndex]); - } - variableMap[outputIndex] = newVariable; - variable.emplace_back(newVariable); - } - } - } - return variable; -} -std::map Variable::loadMap(const char* fileName) { - AUTOTIME; - auto variables = load(fileName); - std::map varMap; - for (auto v : variables) { - varMap[v->name()] = v; - } - return varMap; -} -std::vector Variable::mapToSequence(const std::map& source) { - std::vector outputs; - outputs.reserve(source.size()); - for (auto& iter : source) { - outputs.emplace_back(iter.second); - } - return outputs; -} -void Variable::save(const std::vector& vars, NetT* dest) { - auto executeOrder = getExecuteOrder(vars); - dest->tensorName.resize(executeOrder.size()); - std::map, int> varIndex; - for (int i=0; iexpr()] = i; - } - for (int index = 0; index < executeOrder.size(); ++index) { - auto v = executeOrder[index]; - auto expr = v->expr(); - std::shared_ptr _defer(nullptr, [&](void*) { - if (!v->name().empty()) { - dest->tensorName[index] = v->name(); - return; - } - auto name = v->expr().first->name(); - if (v->expr().second != 0) { - name = name + "_" + numberToString(v->expr().second); - } - dest->tensorName[index] = name; - }); - if (expr.first->visited()) { - continue; - } - auto mOp = expr.first->get(); - std::unique_ptr op(mOp->UnPack()); - op->name = expr.first->name(); - op->inputIndexes.resize(expr.first->inputs().size()); - for (int i = 0; i < op->inputIndexes.size(); ++i) { - op->inputIndexes[i] = varIndex[expr.first->inputs()[i]->expr()]; - } - int outputIndex = (int)dest->tensorName.size(); - if (op->name.empty()) { - op->name = EnumNameOpType(op->type) + numberToString(outputIndex); - } - op->outputIndexes.resize(expr.first->outputSize()); - auto exprOutputs = expr.first->outputs(); - for (auto outputVar : exprOutputs) { - auto out = outputVar.lock(); - if (nullptr == out) { - continue; - } - op->outputIndexes[out->mFromIndex] = varIndex[out->expr()]; - } - dest->oplists.emplace_back(std::move(op)); - expr.first->setVisited(true); - } - for (int index = 0; index < executeOrder.size(); ++index) { - executeOrder[index]->expr().first->setVisited(false); - } - -} - -void Variable::save(const std::vector& vars, const char* fileName) { - std::unique_ptr net(new NetT); - save(vars, net.get()); - // FUNC_PRINT(net->oplists.size()); - flatbuffers::FlatBufferBuilder builder(1024); - auto offset = Net::Pack(builder, net.get()); - builder.Finish(offset); - // TODO, use FileWriter instead - FILE* f = fopen(fileName, "wb"); - if (nullptr == f) { - MNN_ERROR("Open %s error\n", fileName); - return; - } - static const size_t block = 4096; - size_t totalSize = builder.GetSize(); - size_t blockSize = UP_DIV(totalSize, block); - for (size_t i = 0; i < blockSize; ++i) { - size_t sta = block * i; - size_t fin = std::min(sta + block, totalSize); - if (fin > sta) { - auto realSize = fwrite((const char*)builder.GetBufferPointer() + sta, 1, fin - sta, f); - if (realSize != fin - sta) { - MNN_ERROR("Write %s error\n", fileName); - } - } - } - fclose(f); -} -std::pair, std::map> Variable::getInputAndOutput(const std::map& allVariable) { - std::pair, std::map> res; - for (auto& iter : allVariable) { - auto var = iter.second; - if (var->expr().first->get()->type() == OpType_Input) { - res.first[var->name()] = var; - } - if (var->linkNumber() == 0) { - res.second[var->name()] = var; - } - } - return res; -} - -std::vector Variable::getExecuteOrder(const std::vector& outputs) { - std::vector sequence; - for (auto output : outputs) { - Variable::visit( - output, [](VARP var) { return !var->expr().first->visited(); }, - [&sequence](VARP var) { - //FUNC_PRINT_ALL(var->name().c_str(), s); - for (auto v : var->expr().first->outputs()) { - auto sharedV = v.lock(); - if (nullptr != sharedV) { - sequence.emplace_back(sharedV); - } - } - var->expr().first->setVisited(true); - return true; - }); - } - for (auto var : sequence) { - var->expr().first->setVisited(false); - } - return sequence; -} - -} // namespace Express -} // namespace MNN diff --git a/express/source/InsideExpr.cpp b/express/source/InsideExpr.cpp deleted file mode 100644 index cd7db9cf6..000000000 --- a/express/source/InsideExpr.cpp +++ /dev/null @@ -1,432 +0,0 @@ -// -// InsideExpr.cpp -// MNN -// -// Created by MNN on 2019/06/25. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#include "InsideExpr.hpp" -#include "Session.hpp" -#include "TensorUtils.hpp" -#include "Utils.hpp" -#include "BasicOptimizer_generated.h" - -namespace MNN { -namespace Express { -struct Command { - std::vector mInputs; - std::vector mOutputs; - std::shared_ptr mExecution; - ErrorCode resize() { - return mExecution->onResize(mInputs, mOutputs); - } - ErrorCode run() { - return mExecution->onExecute(mInputs, mOutputs); - } -}; -static Tensor::DimensionType getDimType(const Tensor* origin) { - auto dimformat = TensorUtils::getDescribe(origin)->dimensionFormat; - switch (dimformat) { - case MNN_DATA_FORMAT_NHWC: - return Tensor::TENSORFLOW; - case MNN_DATA_FORMAT_NCHW: - return Tensor::CAFFE; - case MNN_DATA_FORMAT_NC4HW4: - return Tensor::CAFFE_C4; - default: - break; - } - return Tensor::CAFFE; -} -class MergeExpr : public Solution { -public: - MergeExpr(const Optimizer::Merge* merge, int inputSize, int outputSize) : Solution(inputSize, outputSize) { - SizeComputerSuite::init(); - MNN_ASSERT(nullptr != merge); - MNN_ASSERT(nullptr != merge->backend()); - MNN_ASSERT(nullptr != merge->oplists()); - MNN_ASSERT(nullptr != merge->outputIndexes()); - - //Create tensors - Schedule::ScheduleInfo schedule; - std::vector pipelineInfos; - schedule.allTensors.resize(merge->tensorNumber()); - for (int i=0; itensorNumber(); ++i) { - schedule.allTensors[i].second.reset(new Tensor); - } - pipelineInfos.resize(merge->oplists()->size()); - for (int i = 0; i < merge->oplists()->size(); ++i) { - auto& pipelineInfo = pipelineInfos[i]; - auto op = merge->oplists()->GetAs(i); - if (nullptr != op->inputIndexes()) { - auto data = op->inputIndexes()->data(); - pipelineInfo.inputs.resize(op->inputIndexes()->size()); - for (int j = 0; j < op->inputIndexes()->size(); ++j) { - auto index = data[j]; - schedule.allTensors[index].first += 1; - pipelineInfo.inputs[j] = schedule.allTensors[index].second.get(); - } - } - if (nullptr != op->outputIndexes()) { - auto data = op->outputIndexes()->data(); - pipelineInfo.outputs.resize(op->outputIndexes()->size()); - for (int j = 0; j < op->outputIndexes()->size(); ++j) { - auto index = data[j]; - pipelineInfo.outputs[j] = schedule.allTensors[index].second.get(); - } - } - pipelineInfo.op = op; - } - mOutputs.resize(merge->outputIndexes()->size()); - for (int i=0; ioutputIndexes()->size(); ++i) { - schedule.allTensors[merge->outputIndexes()->data()[i]].first += 1; - mOutputs[i].first = schedule.allTensors[merge->outputIndexes()->data()[i]].second.get(); - } - if (nullptr != merge->inputIndexes()) { - mInputs.resize(merge->inputIndexes()->size()); - for (int i=0; iinputIndexes()->size(); ++i) { - mInputs[i].first = schedule.allTensors[merge->inputIndexes()->data()[i]].second.get(); - mInputs[i].second.reset(new Tensor); - } - } - //Create Backend - auto backendInfo = merge->backend(); - auto creator = MNNGetExtraBackendCreator((MNNForwardType)backendInfo->type()); - if (nullptr == creator) { - mValid = false; - MNN_ERROR("Get Backend Creator Error\n"); - return; - } - Backend::Info info; - info.type = (MNNForwardType)backendInfo->type(); - info.numThread = backendInfo->numberThread(); - info.mode = Backend::Info::INDIRECT; - BackendConfig backendConfig; - backendConfig.memory = (BackendConfig::MemoryMode)backendInfo->memroy(); - backendConfig.power = (BackendConfig::PowerMode)backendInfo->power(); - backendConfig.precision = (BackendConfig::PrecisionMode)backendInfo->precision(); - info.user = &backendConfig; - creator->onValid(info); - mDirect = info.mode == Backend::Info::DIRECT; - schedule.pipelineInfo.emplace_back(std::make_pair(info, pipelineInfos)); - mSession.reset(new Session(schedule)); - } - - ~ MergeExpr () { - //Do nothing - } - virtual Requirement onGetRequirement() const override { - auto size = mInputSize; - Solution::Requirement req; - req.contentNeedContent.resize(size); - req.shapeNeedContent.resize(size); - req.supportError.resize(size); - for (int i = 0; i < size; ++i) { - req.contentNeedContent[i] = true; - req.shapeNeedContent[i] = false; - req.supportError[i] = false; - } - return req; - } - virtual ErrorCode onComputeInfo(const std::vector& inputs, - const std::vector& outputs) override { - MNN_ASSERT(outputs.size() == mOutputs.size()); - MNN_ASSERT(inputs.size() == mInputs.size()); - bool needResize = mSession->getNeedResize(); - if (!needResize) { - for (int i=0; idim.size() != check->dimensions()) { - needResize = true; - break; - } - for (int d=0; ddim.size(); ++d) { - if (src->dim[d] != check->length(d)) { - needResize = true; - break; - } - } - if (needResize) { - break; - } - } - } - if (needResize) { - for (int i=0; isetNeedResize(); - auto code = mSession->resize(); - if (NO_ERROR != code) { - return code; - } - } - for (int i=0; i& inputs, - const std::vector& outputs) override { - for (int i=0; ibuffer().host = (uint8_t*)src->ptr; - } - return NO_ERROR; - } - virtual ErrorCode onComputeContent(const std::vector& inputs, - const std::vector& outputs) override { - for (auto& input : mInputs) { - input.first->copyFromHostTensor(input.second.get()); - } - auto code = mSession->run(); - if (NO_ERROR != code) { - return code; - } - for (auto& tensor : mOutputs) { - tensor.first->copyToHostTensor(tensor.second.get()); - } - return NO_ERROR; - } - - // Map output's content to host - virtual void* onMapContent(int index) override { - return mOutputs[index].second->host(); - } - virtual void onUnMapContent(int index) override { - return; - } - - bool valid() const {return mValid;} -private: - std::shared_ptr mSession; - std::vector>> mInputs; - std::vector>> mOutputs; - bool mValid = true; - bool mDirect = true; -}; -class InsideExpr : public Solution { -public: - InsideExpr(std::shared_ptr bn, const Op* op, int inputSize, int outputSize); - ~InsideExpr(); - - virtual ErrorCode onComputeInfo(const std::vector& inputs, - const std::vector& outputs) override; - virtual ErrorCode onAlloc(const std::vector& inputs, - const std::vector& outputs) override; - virtual ErrorCode onComputeContent(const std::vector& inputs, - const std::vector& outputs) override; - virtual Solution::Requirement onGetRequirement() const override; - - // Map output's content to host - virtual void* onMapContent(int index) override; - virtual void onUnMapContent(int index) override; - -private: - void _makeInfo(); - - std::vector> mOutputs; - std::vector> mInputs; - std::shared_ptr mCommand; - std::shared_ptr mBackend; - const Op* mOp; -}; - -InsideExpr::InsideExpr(std::shared_ptr bn, const Op* op, int inputSize, int outputSize) - : Solution(inputSize, outputSize) { - MNN_ASSERT(nullptr != bn); - SizeComputerSuite::init(); - mOp = op; - mOutputs.resize(mOutputSize); - for (auto& v : mOutputs) { - v.reset(new Tensor); - } - mBackend = bn; -} -InsideExpr::~InsideExpr() { - for (auto& v : mOutputs) { - if (v->host() != nullptr) { - mBackend->onReleaseBuffer(v.get(), Backend::STATIC); - } - } -} - -void InsideExpr::_makeInfo() { - if (nullptr == mCommand) { - mCommand.reset(new Command); - mCommand->mOutputs.resize(mOutputs.size()); - for (int i = 0; i < mOutputs.size(); ++i) { - mCommand->mOutputs[i] = mOutputs[i].get(); - } - mCommand->mInputs.resize(mInputSize); - mInputs.resize(mInputSize); - for (int i = 0; i < mInputSize; ++i) { - mInputs[i].reset(new Tensor); - mCommand->mInputs[i] = mInputs[i].get(); - } - } -} -Solution::Requirement InsideExpr::onGetRequirement() const { - Solution::Requirement req; - auto op = mOp; - req.contentNeedContent.resize(mInputSize); - req.shapeNeedContent.resize(mInputSize); - req.supportError.resize(mInputSize); - for (int i = 0; i < mInputSize; ++i) { - req.contentNeedContent[i] = SizeComputer::opNeedContent(op->type(), i); - req.shapeNeedContent[i] = false; - if (op->type() != OpType_Concat) { - req.supportError[i] = false; - } else { - req.supportError[i] = true; - } - } - auto needIndexId = SizeComputer::needInputContent(mOp); - for (auto index : needIndexId) { - if (index < req.shapeNeedContent.size()) { - req.shapeNeedContent[index] = true; - } - } - return req; -} - -ErrorCode InsideExpr::onComputeInfo(const std::vector& inputs, - const std::vector& outputs) { - _makeInfo(); - auto op = mOp; - // TODO Support Every Op for user defined shape - if (op->type() == OpType_Input) { - auto inputParm = op->main_as_Input(); - auto output = mCommand->mOutputs[0]; - if (nullptr != inputParm->dims()) { - output->buffer().dimensions = inputParm->dims()->size(); - for (int i = 0; i < output->dimensions(); ++i) { - auto dim = inputParm->dims()->data()[i]; - if (-1 == dim && 0 == i) { - dim = 1; - } - if (0 > dim) { -#ifdef MNN_DEBUG_INPUT - MNN_ERROR("The Input %s is not ready: order=%d, pos=%d, dim=%d\n", mOp->name()->c_str(), - inputParm->dformat(), i, dim); -#endif - return COMPUTE_SIZE_ERROR; - } - output->setLength(i, dim); - } - } else { - output->buffer().dimensions = 0; - } - output->setType(inputParm->dtype()); - TensorUtils::getDescribe(output)->dimensionFormat = inputParm->dformat(); - auto shape = outputs[0]; - Utils::copyTensorToInfo(shape, output); - return NO_ERROR; - } - - MNN_ASSERT(inputs.size() == mInputs.size()); - for (int i = 0; i < mInputs.size(); ++i) { - auto tensor = mInputs[i]; - Utils::copyInfoToTensor(tensor.get(), inputs[i]); - } - bool res = SizeComputer::computeOutputSize(op, mCommand->mInputs, mCommand->mOutputs); - if (!res) { - // Compute Error -#ifdef MNN_EXPRESS_ERROR_REPORT - FUNC_PRINT(op->type()); -#endif - return COMPUTE_SIZE_ERROR; - } - for (int i = 0; i < mOutputs.size(); ++i) { - auto tensor = mCommand->mOutputs[i]; - for (int j = 0; j < tensor->dimensions(); ++j) { - if (tensor->length(j) <= 0) { - auto name = op->name()->str(); -#ifdef MNN_EXPRESS_ERROR_REPORT - MNN_ERROR("Error to compute shape for %s\n", op->name()->c_str()); -#endif - return COMPUTE_SIZE_ERROR; - } - } - auto shape = outputs[i]; - Utils::copyTensorToInfo(shape, tensor); - } - return NO_ERROR; -} -void* InsideExpr::onMapContent(int index) { - return mOutputs[index]->host(); -} -void InsideExpr::onUnMapContent(int index) { - // Do nothing -} -ErrorCode InsideExpr::onAlloc(const std::vector& inputs, - const std::vector& outputs) { - for (auto& output : mOutputs) { - if (output->host() != nullptr) { - mBackend->onReleaseBuffer(output.get(), Backend::STATIC); - output->buffer().host = nullptr; - } - TensorUtils::setLinearLayout(output.get()); - auto res = mBackend->onAcquireBuffer(output.get(), Backend::STATIC); - if (!res) { - return OUT_OF_MEMORY; - } - } - for (int i = 0; i < outputs.size(); ++i) { - outputs[i]->ptr = mOutputs[i]->host(); - } - auto op = mOp; - if (op->type() == OpType_Input) { - return NO_ERROR; - } - if (nullptr == mCommand->mExecution) { - mCommand->mExecution.reset(mBackend->onCreate(mCommand->mInputs, mCommand->mOutputs, op)); - if (nullptr == mCommand->mExecution) { - return NOT_SUPPORT; - } - } - for (int i = 0; i < mInputs.size(); ++i) { - if (nullptr != inputs[i]) { - mInputs[i]->buffer().host = (uint8_t*)inputs[i]->ptr; - } - } - mCommand->mExecution->onResize(mCommand->mInputs, mCommand->mOutputs); - return NO_ERROR; -} -ErrorCode InsideExpr::onComputeContent(const std::vector& inputs, -const std::vector& outputs) { - auto op = mOp; - if (op->type() == OpType_Input) { - return INPUT_DATA_ERROR; - } - auto code = mCommand->mExecution->onExecute(mCommand->mInputs, mCommand->mOutputs); - return code; -} -DefaultSolutionCreator::DefaultSolutionCreator() { - auto factory = MNNGetExtraBackendCreator(MNN_FORWARD_CPU); - Backend::Info info; - info.numThread = 1; - info.type = MNN_FORWARD_CPU; - mBackend.reset(factory->onCreate(info)); -} -Solution* DefaultSolutionCreator::onCreate(const Op* op, int inputSize, int outputSize) { - if (OpType_Extra != op->type()) { - return new InsideExpr(mBackend, op, inputSize, outputSize); - } - if (nullptr != op->main_as_Extra()) { - if (op->main_as_Extra()->type()->str() == "Session") { - auto blob = op->main_as_Extra()->info(); - auto merge = flatbuffers::GetRoot(blob->data()); - return new MergeExpr(merge, inputSize, outputSize); - } - } - return nullptr; -} -}; // namespace Express -}; // namespace MNN diff --git a/express/source/InsideExpr.hpp b/express/source/InsideExpr.hpp deleted file mode 100644 index a238a01d2..000000000 --- a/express/source/InsideExpr.hpp +++ /dev/null @@ -1,28 +0,0 @@ -// -// InsideExpr.hpp -// MNN -// -// Created by MNN on 2019/06/25. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#ifndef InsideExpr_hpp -#define InsideExpr_hpp - -#include "Solution.hpp" -namespace MNN { -class Backend; -namespace Express { -class DefaultSolutionCreator : public Executor { -public: - DefaultSolutionCreator(); - virtual ~DefaultSolutionCreator() = default; - virtual Solution* onCreate(const Op* op, int inputSize, int outputSize) override; - -private: - std::shared_ptr mBackend; -}; -}; // namespace Express -}; // namespace MNN - -#endif /* InsideExpr_hpp */ diff --git a/express/source/MathOp.cpp b/express/source/MathOp.cpp deleted file mode 100644 index 3e191b47a..000000000 --- a/express/source/MathOp.cpp +++ /dev/null @@ -1,184 +0,0 @@ -// -// MathOp.cpp -// MNN -// -// Created by MNN on 2019/06/27. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#include -#include -#include -#include "ExprCreator.hpp" -#include "MNNDefine.h" -#include "MNN_generated.h" - -namespace MNN { -namespace Express { -static DataType _convertDataType(halide_type_t type) { - if (type.code == halide_type_float) { - return DataType_DT_FLOAT; - } - if (type.code == halide_type_uint && type.bits == 8) { - return DataType_DT_UINT8; - } - if (type.code == halide_type_int && type.bits == 8) { - return DataType_DT_INT8; - } - if (type.code == halide_type_int && type.bits == 32) { - return DataType_DT_INT32; - } - return DataType_DT_INVALID; -} -VARP _Cast(VARP a, halide_type_t srcType, halide_type_t dstType) { - std::unique_ptr op(new OpT); - op->main.type = OpParameter_CastParam; - op->type = OpType_Cast; - op->main.value = new CastParamT; - op->main.AsCastParam()->srcT = _convertDataType(srcType); - op->main.AsCastParam()->dstT = _convertDataType(dstType); - return (Variable::create(Expr::create(std::move(op), {a}))); -} - -static VARP _Binary(VARP x, VARP y, BinaryOpOperation operation) { - std::unique_ptr op(new OpT); - op->main.type = OpParameter_BinaryOp; - op->type = OpType_BinaryOp; - op->main.value = new BinaryOpT; - op->main.AsBinaryOp()->opType = operation; - op->main.AsBinaryOp()->T = DataType_DT_FLOAT; - return (Variable::create(Expr::create(op.get(), {x, y}))); -} -static VARP _Unary(VARP x, UnaryOpOperation operation) { - std::unique_ptr op(new OpT); - op->main.type = OpParameter_UnaryOp; - op->type = OpType_UnaryOp; - op->main.value = new UnaryOpT; - op->main.AsUnaryOp()->opType = operation; - op->main.AsUnaryOp()->T = DataType_DT_FLOAT; - return (Variable::create(Expr::create(op.get(), {x}))); -} -VARP _Mul(VARP x, VARP y) { - return _Binary(x, y, BinaryOpOperation_MUL); -} -VARP _Div(VARP x, VARP y) { - return _Binary(x, y, BinaryOpOperation_REALDIV); -} -VARP _Sub(VARP x, VARP y) { - return _Binary(x, y, BinaryOpOperation_SUB); -} -VARP _Add(VARP x, VARP y) { - return _Binary(x, y, BinaryOpOperation_ADD); -} -VARP _Min(VARP x, VARP y) { - return _Binary(x, y, BinaryOpOperation_MINIMUM); -} -VARP _Max(VARP x, VARP y) { - return _Binary(x, y, BinaryOpOperation_MAXIMUM); -} -VARP _Neg(VARP x) { - return _Unary(x, UnaryOpOperation_NEG); -} -VARP _Rsqrt(VARP x) { - return _Unary(x, UnaryOpOperation_RSQRT); -} -VARP _Log(VARP x) { - return _Unary(x, UnaryOpOperation_LOG); -} -VARP _Exp(VARP x) { - return _Unary(x, UnaryOpOperation_EXP); -} -VARP _Square(VARP x) { - return _Unary(x, UnaryOpOperation_SQUARE); -} - -VARP _Tanh(VARP x) { - std::unique_ptr op(new OpT); - op->type = OpType_TanH; - return (Variable::create(Expr::create(op.get(), {x}))); -} - -VARP _Sigmoid(VARP x) { - std::unique_ptr op(new OpT); - op->type = OpType_Sigmoid; - return (Variable::create(Expr::create(op.get(), {x}))); -} -static VARP _Reduce(VARP x, INTS dim, ReductionType type, bool keepDim) { - std::unique_ptr op(new OpT); - op->main.type = OpParameter_ReductionParam; - op->type = OpType_Reduction; - op->main.value = new ReductionParamT; - op->main.AsReductionParam()->dType = DataType_DT_FLOAT; - op->main.AsReductionParam()->operation= type; - op->main.AsReductionParam()->dim = dim; - op->main.AsReductionParam()->keepDims = keepDim; - return (Variable::create(Expr::create(op.get(), {x}))); -} -VARP _ReduceMax(VARP x, INTS dim, bool keepDim) { - return _Reduce(x, dim, ReductionType_MAXIMUM, keepDim); -} -VARP _ReduceMin(VARP x, INTS dim, bool keepDim) { - return _Reduce(x, dim, ReductionType_MINIMUM, keepDim); -} -VARP _Sum(VARP x, INTS dim, bool keepDim) { - return _Reduce(x, dim, ReductionType_SUM, keepDim); -} -VARP _Mean(VARP x, INTS dim, bool keepDim) { - return _Reduce(x, dim, ReductionType_MEAN, keepDim); -} -VARP _Prod(VARP x, INTS dim, bool keepDim) { - return _Reduce(x, dim, ReductionType_PROD, keepDim); -} -VARP _Any(VARP x, INTS dim, bool keepDim) { - return _Reduce(x, dim, ReductionType_ANY, keepDim); -} -VARP _All(VARP x, INTS dim, bool keepDim) { - return _Reduce(x, dim, ReductionType_ALL, keepDim); -} -VARP _MatMul(VARP a, VARP b, bool tranposeA, bool tranposeB) { - std::unique_ptr op(new OpT); - op->main.type = OpParameter_MatMul; - op->type = OpType_MatMul; - op->main.value = new MatMulT; - op->main.AsMatMul()->transposeA = tranposeA; - op->main.AsMatMul()->transposeB = tranposeB; - return (Variable::create(Expr::create(op.get(), {a, b}))); -} - -VARP _Normalize(VARP x, int32_t acrossSpatial, int32_t channelShared, float eps, std::vector scale) { - std::unique_ptr op(new OpT); - op->main.type = OpParameter_Normalize; - op->type = OpType_Normalize; - op->main.value = new NormalizeT; - op->main.AsNormalize()->acrossSpatial = acrossSpatial; - op->main.AsNormalize()->channelShared = channelShared; - op->main.AsNormalize()->eps = eps; - op->main.AsNormalize()->scale = scale; - return (Variable::create(Expr::create(std::move(op), {x}))); -} - -static VARP _Eltwise(VARP a, VARP b, EltwiseType type, std::vector coeff) { - std::unique_ptr op(new OpT); - op->main.type = OpParameter_Eltwise; - op->type = OpType_Eltwise; - op->main.value = new EltwiseT; - op->main.AsEltwise()->type = type; - op->main.AsEltwise()->coeff = coeff; - return (Variable::create(Expr::create(std::move(op), {a, b}))); -} - -VARP _Prod(VARP a, VARP b, std::vector coeff) { - return _Eltwise(a, b, EltwiseType_PROD, coeff); -} -VARP _Sum(VARP a, VARP b, std::vector coeff) { - return _Eltwise(a, b, EltwiseType_SUM, coeff); -} -VARP _Max(VARP a, VARP b, std::vector coeff) { - return _Eltwise(a, b, EltwiseType_MAXIMUM, coeff); -} -VARP _Sub(VARP a, VARP b, std::vector coeff) { - return _Eltwise(a, b, EltwiseType_SUB, coeff); -} - -} // namespace Express -} // namespace MNN diff --git a/express/source/NeuralNetWorkOp.cpp b/express/source/NeuralNetWorkOp.cpp deleted file mode 100644 index bfdf49772..000000000 --- a/express/source/NeuralNetWorkOp.cpp +++ /dev/null @@ -1,607 +0,0 @@ -// -// NeuralNetWorkOp.cpp -// MNN -// -// Created by MNN on 2019/06/27. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#include -#include -#include -#include "ExprCreator.hpp" -#include "MNNDefine.h" -#include "MNN_generated.h" -#include "Utils.hpp" -namespace MNN { -namespace Express { -static MNN_DATA_FORMAT _convertFormat(Dimensionformat format) { - switch (format) { - case NCHW: - return MNN_DATA_FORMAT_NCHW; - case NHWC: - return MNN_DATA_FORMAT_NHWC; - case NC4HW4: - return MNN_DATA_FORMAT_NC4HW4; - default: - break; - } - return MNN_DATA_FORMAT_UNKNOWN; -} -static PadMode _convertPadMode(PaddingMode mode) { - switch (mode) { - case CAFFE: - return PadMode_CAFFE; - case VALID: - return PadMode_VALID; - case SAME: - return PadMode_SAME; - default: - break; - } - return PadMode_CAFFE; -} -static PoolPadType _convertPoollingPadMode(PaddingMode mode) { - switch (mode) { - case CAFFE: - return PoolPadType_CAFFE; - case VALID: - return PoolPadType_VALID; - case SAME: - return PoolPadType_SAME; - default: - break; - } - return PoolPadType_CAFFE; -} -VARP _Input(INTS dims, Dimensionformat format, halide_type_t type) { - std::unique_ptr input(new OpT); - input->type = OpType_Input; - input->main.type = OpParameter_Input; - input->main.value = new InputT; - input->main.AsInput()->dtype = (MNN::DataType)Utils::convertDataType(type); - MNN_ASSERT(input->main.AsInput()->dtype != DataType_DT_INVALID); - input->main.AsInput()->dims = dims; - input->main.AsInput()->dformat = (MNN_DATA_FORMAT)Utils::convertFormat(format); - return (Variable::create(Expr::create(input.get(), {}))); -} -VARP _Const(const void* ptr, INTS dims, Dimensionformat format, halide_type_t type) { - MNN_ASSERT(type.code == halide_type_float || type.code == halide_type_int); - auto blob = new BlobT; - blob->dataFormat = (MNN_DATA_FORMAT)Utils::convertFormat(format); - blob->dims = dims; - int length = 1; - for (int i = 0; i < dims.size(); ++i) { - length *= dims[i]; - } - if (type.code == halide_type_float) { - blob->dataType = DataType_DT_FLOAT; - blob->float32s.resize(length); - ::memcpy(blob->float32s.data(), ptr, length * sizeof(float)); - } else { - blob->dataType = DataType_DT_INT32; - blob->int32s.resize(length); - ::memcpy(blob->int32s.data(), ptr, length * sizeof(int)); - } - std::unique_ptr op(new OpT); - op->type = OpType_Const; - op->main.type = OpParameter_Blob; - op->main.value = blob; - return (Variable::create(Expr::create(op.get(), {}))); -} - -VARP _Const(float value, INTS dims, Dimensionformat format) { - std::unique_ptr constOp(new OpT); - constOp->type = OpType_Const; - constOp->main.type = OpParameter_Blob; - constOp->main.value = new BlobT; - constOp->main.AsBlob()->dataType = DataType_DT_FLOAT; - constOp->main.AsBlob()->dataFormat = (MNN_DATA_FORMAT)Utils::convertFormat(format); - constOp->main.AsBlob()->dims = dims; - auto size = std::accumulate(dims.begin(), dims.end(), 1, std::multiplies()); - constOp->main.AsBlob()->float32s.resize(size); - for (int i = 0; i < size; ++i) { - constOp->main.AsBlob()->float32s[i] = value; - } - return (Variable::create(Expr::create(constOp.get(), {}))); -} -VARP _Conv(VARP weight, VARP bias, VARP x, PaddingMode pad, INTS stride, INTS dilate, int group, INTS pads) { - std::unique_ptr convOp(new OpT); - convOp->type = OpType_Convolution; - auto shape = weight->getInfo(); - if (NHWC == shape->order) { - weight = _Transpose(weight, {0, 3, 1, 2}); - shape = weight->getInfo(); - } - auto channel = std::vector{shape->dim[1], shape->dim[0]}; - auto kernelSize = std::vector{shape->dim[3], shape->dim[2]}; - if (channel[0] == channel[1] && channel[0] == group) { - convOp->type = OpType_ConvolutionDepthwise; - } - convOp->main.type = OpParameter_Convolution2D; - convOp->main.value = new Convolution2DT; - auto conv2D = convOp->main.AsConvolution2D(); - conv2D->common.reset(new Convolution2DCommonT); - conv2D->common->padX = pads[0]; - conv2D->common->padY = pads[1]; - conv2D->common->padMode = _convertPadMode(pad); - conv2D->common->strideX = stride[0]; - conv2D->common->strideY = stride[1]; - conv2D->common->group = group; - conv2D->common->outputCount = channel[1]; - conv2D->common->inputCount = channel[0]; - conv2D->common->dilateX = dilate[0]; - conv2D->common->dilateY = dilate[1]; - conv2D->common->kernelX = kernelSize[0]; - conv2D->common->kernelY = kernelSize[1]; - INTS weightDims = {channel[1], channel[0] / group, kernelSize[1], kernelSize[0]}; - return (Variable::create(Expr::create(convOp.get(), {x, weight, bias}))); -} -VARP _Conv(std::vector&& weight, std::vector&& bias, VARP x, INTS channel, INTS kernelSize, - PaddingMode pad, INTS stride, INTS dilate, int group, INTS pads) { - std::unique_ptr convOp(new OpT); - convOp->type = OpType_Convolution; - if (channel[0] == channel[1] && channel[0] == group) { - convOp->type = OpType_ConvolutionDepthwise; - } - convOp->main.type = OpParameter_Convolution2D; - convOp->main.value = new Convolution2DT; - auto conv2D = convOp->main.AsConvolution2D(); - conv2D->common.reset(new Convolution2DCommonT); - conv2D->common->padMode = _convertPadMode(pad); - conv2D->common->padX = pads[0]; - conv2D->common->padY = pads[1]; - conv2D->common->strideX = stride[0]; - conv2D->common->strideY = stride[1]; - conv2D->common->group = group; - conv2D->common->outputCount = channel[1]; - conv2D->common->inputCount = channel[0]; - conv2D->common->dilateX = dilate[0]; - conv2D->common->dilateY = dilate[1]; - conv2D->common->kernelX = kernelSize[0]; - conv2D->common->kernelY = kernelSize[1]; - MNN_ASSERT(weight.size() == channel[1] * (channel[0] / group) * kernelSize[0] * kernelSize[1]); - conv2D->weight = std::move(weight); - MNN_ASSERT(bias.size() == channel[1]); - conv2D->bias = std::move(bias); - return (Variable::create(Expr::create(convOp.get(), {x}))); -} - -VARP _Conv(float weight, float bias, VARP x, INTS channel, INTS kernelSize, PaddingMode pad, INTS stride, INTS dilate, - int group) { - std::unique_ptr convOp(new OpT); - convOp->type = OpType_Convolution; - if (channel[0] == channel[1] && channel[0] == group) { - convOp->type = OpType_ConvolutionDepthwise; - } - convOp->main.type = OpParameter_Convolution2D; - convOp->main.value = new Convolution2DT; - auto conv2D = convOp->main.AsConvolution2D(); - conv2D->common.reset(new Convolution2DCommonT); - conv2D->common->padMode = _convertPadMode(pad); - conv2D->common->strideX = stride[0]; - conv2D->common->strideY = stride[1]; - conv2D->common->group = group; - conv2D->common->outputCount = channel[1]; - conv2D->common->inputCount = channel[0]; - conv2D->common->dilateX = dilate[0]; - conv2D->common->dilateY = dilate[1]; - conv2D->common->kernelX = kernelSize[0]; - conv2D->common->kernelY = kernelSize[1]; - conv2D->weight.resize(channel[1] * (channel[0] / group) * kernelSize[0] * kernelSize[1]); - std::fill(conv2D->weight.begin(), conv2D->weight.end(), weight); - conv2D->bias.resize(channel[1]); - std::fill(conv2D->bias.begin(), conv2D->bias.end(), bias); - return (Variable::create(Expr::create(convOp.get(), {x}))); -} - -VARP _Deconv(VARP weight, VARP bias, VARP x, PaddingMode pad, INTS stride, - INTS dilate, int group, INTS pads){ - std::unique_ptr convOp(new OpT); - convOp->type = OpType_Deconvolution; - auto shape = weight->getInfo(); - auto channel = std::vector{shape->dim[1], shape->dim[0]}; - auto kernelSize = std::vector{shape->dim[3], shape->dim[2]}; - if (channel[0] == channel[1] && channel[0] == group) { - convOp->type = OpType_DeconvolutionDepthwise; - } - convOp->main.type = OpParameter_Convolution2D; - convOp->main.value = new Convolution2DT; - auto conv2D = convOp->main.AsConvolution2D(); - conv2D->common.reset(new Convolution2DCommonT); - static std::map padmap{ - {CAFFE, PadMode_CAFFE}, - {VALID, PadMode_VALID}, - {SAME, PadMode_SAME}, - }; - conv2D->common->padX = pads[0]; - conv2D->common->padY = pads[1]; - conv2D->common->padMode = padmap[pad]; - conv2D->common->strideX = stride[0]; - conv2D->common->strideY = stride[1]; - conv2D->common->group = group; - conv2D->common->outputCount = channel[0]; - conv2D->common->inputCount = channel[1]; - conv2D->common->dilateX = dilate[0]; - conv2D->common->dilateY = dilate[1]; - conv2D->common->kernelX = kernelSize[0]; - conv2D->common->kernelY = kernelSize[1]; - INTS weightDims = {channel[1], channel[0] / group, kernelSize[1], kernelSize[0]}; - return (Variable::create(Expr::create(std::move(convOp), {x, weight, bias}))); -} - -static VARP _Pool(VARP x, INTS kernel, INTS stride, PoolType type, PaddingMode pad, INTS pads) { - std::unique_ptr pool(new OpT); - pool->type = OpType_Pooling; - pool->main.type = OpParameter_Pool; - pool->main.value = new PoolT; - if (kernel[0] == -1 && kernel[1] == -1) { - pool->main.AsPool()->isGlobal = true; - } - pool->main.AsPool()->padX = 0; - pool->main.AsPool()->padY = 0; - if (pads.size() >= 2) { - pool->main.AsPool()->padX = pads[0]; - pool->main.AsPool()->padY = pads[1]; - } - pool->main.AsPool()->padType = _convertPoollingPadMode(pad); - pool->main.AsPool()->kernelX = kernel[0]; - pool->main.AsPool()->kernelY = kernel[1]; - pool->main.AsPool()->strideX = stride[0]; - pool->main.AsPool()->strideY = stride[1]; - pool->main.AsPool()->type = type; - return (Variable::create(Expr::create(pool.get(), {x}))); -} - -VARP _AvePool(VARP x, INTS kernel, INTS stride, PaddingMode pad, INTS pads) { - return _Pool(x, kernel, stride, PoolType_AVEPOOL, pad, pads); -} - -VARP _MaxPool(VARP x, INTS kernel, INTS stride, PaddingMode pad, INTS pads) { - return _Pool(x, kernel, stride, PoolType_MAXPOOL, pad, pads); -} -VARP _Reshape(VARP x, INTS dim, Dimensionformat format) { - std::unique_ptr reshape(new OpT); - reshape->type = OpType_Reshape; - reshape->main.type = OpParameter_Reshape; - reshape->main.value = new ReshapeT; - reshape->main.AsReshape()->dims = dim; - reshape->main.AsReshape()->dimType = _convertFormat(format); - return (Variable::create(Expr::create(reshape.get(), {x}))); -} -VARP _Reshape(VARP x, VARP shape) { - std::unique_ptr reshape(new OpT); - reshape->type = OpType_Reshape; - reshape->main.type = OpParameter_Reshape; - reshape->main.value = new ReshapeT; - reshape->main.AsReshape()->dimType = MNN_DATA_FORMAT_NCHW; - return (Variable::create(Expr::create(reshape.get(), {x, shape}))); -} -VARP _Scale(VARP x, int channels, std::vector&& scales, std::vector&& bias) { - std::unique_ptr scale(new OpT); - scale->type = OpType_Scale; - scale->main.type = OpParameter_Scale; - scale->main.value = new ScaleT; - scale->main.AsScale()->channels = channels; - scale->main.AsScale()->scaleData = std::move(scales); - scale->main.AsScale()->biasData = std::move(bias); - return (Variable::create(Expr::create(std::move(scale), {x}))); -} -VARP _Relu(VARP x, float slope) { - std::unique_ptr relu(new OpT); - relu->type = OpType_ReLU; - relu->main.type = OpParameter_Relu; - relu->main.value = new ReluT; - relu->main.AsRelu()->slope = slope; - return (Variable::create(Expr::create(relu.get(), {x}))); -} -VARP _Relu6(VARP x) { - std::unique_ptr relu(new OpT); - relu->type = OpType_ReLU6; - return (Variable::create(Expr::create(relu.get(), {x}))); -} -VARP _PRelu(VARP x, std::vector &&slopes) { - std::unique_ptr prelu(new OpT); - prelu->type = OpType_PReLU; - prelu->main.type = OpParameter_PRelu; - prelu->main.value = new PReluT; - prelu->main.AsPRelu()->slope = slopes; - prelu->main.AsPRelu()->slopeCount = slopes.size(); - return (Variable::create(Expr::create(prelu.get(), {x}))); -} - -VARP _Softmax(VARP x, int axis) { - std::unique_ptr softmax(new OpT); - softmax->type = OpType_Softmax; - softmax->main.type = OpParameter_Axis; - softmax->main.value = new AxisT; - softmax->main.AsAxis()->axis = axis; - return (Variable::create(Expr::create(softmax.get(), {x}))); -} -VARP _Concat(VARPS xs, int axis) { - std::unique_ptr concat(new OpT); - concat->type = OpType_Concat; - concat->main.type = OpParameter_Axis; - concat->main.value = new AxisT; - concat->main.AsAxis()->axis = axis; - return (Variable::create(Expr::create(concat.get(), xs))); -} - -VARP _Convert(VARP x, Dimensionformat dest) { - std::unique_ptr convert(new OpT); - if (nullptr == x->getInfo()) { - return x; - } - auto source = x->getInfo()->order; - if (source == dest) { - return x; - } - convert->type = OpType_ConvertTensor; - convert->main.type = OpParameter_TensorConvertInfo; - convert->main.value = new TensorConvertInfoT; - convert->main.AsTensorConvertInfo()->source = (MNN_DATA_FORMAT)Utils::convertFormat(source); - convert->main.AsTensorConvertInfo()->dest = (MNN_DATA_FORMAT)Utils::convertFormat(dest); - return (Variable::create(Expr::create(convert.get(), {x}))); -} - -std::vector _Split(VARP x, INTS points, int axis) { - MNN_ASSERT(points.size() >= 1); - std::unique_ptr op(new OpT); - op->type = OpType_Slice; - op->main.type = OpParameter_Slice; - op->main.value = new SliceT; - op->main.AsSlice()->axis = axis; - op->main.AsSlice()->sourceType = NetSource_TENSORFLOW; - op->main.AsSlice()->slicePoints = points; - - int slices = points.size() == 1 ? points[0] : (int)points.size(); - EXPRP expr = Expr::create(std::move(op), {x}, slices); - std::vector res; - for (int i = 0; i < slices; ++i) { - res.emplace_back(Variable::create(expr, i)); - } - return res; -} - -VARP _Slice(VARP x, VARP starts, VARP sizes) { - std::unique_ptr slice(new OpT); - slice->type = OpType_SliceTf; - return (Variable::create(Expr::create(slice.get(), {x, starts, sizes}))); -} - -VARP _Transpose(VARP x, INTS perm) { - auto permVar = _Const((const void*)perm.data(), {static_cast(perm.size())}, NHWC, halide_type_of()); - return _Transpose(x, permVar); -} -VARP _Transpose(VARP x, VARP perm) { - std::unique_ptr transpose(new OpT); - transpose->type = OpType_Transpose; - transpose->main.type = OpParameter_Transpose; - transpose->main.value = new TransposeT; - transpose->main.AsTranspose()->Tperm = DataType_DT_INT32; - return (Variable::create(Expr::create(std::move(transpose), {x, perm}))); -} - -VARP _ChannelShuffle(VARP x, int group) { - x = _Convert(x, NHWC); - x = _Reshape(x, {0, 0, 0, group, -1}, NHWC); - x = _Transpose(x, {0, 1, 2, 4, 3}); - x = _Reshape(x, {0, 0, 0, -1}, NHWC); - x = _Convert(x, NC4HW4); - return x; -} -VARP _ReverseSequence(VARP x, VARP y, int batchDim, int seqDim) { - std::unique_ptr op(new OpT); - op->type = OpType_ReverseSequence; - op->main.type = OpParameter_ReverseSequenceParam; - op->main.value = new ReverseSequenceParamT; - op->main.AsReverseSequenceParam()->batchDim = batchDim; - op->main.AsReverseSequenceParam()->seqDim = seqDim; - return (Variable::create(Expr::create(op.get(), {x, y}))); -} -VARP _ChangeInputFormat(VARP x, Dimensionformat requireInput) { - if (nullptr == x || nullptr == x->getInfo()) { - return nullptr; - } - if (x->getInfo()->order == requireInput) { - return x; - } - auto input = _Input(x->getInfo()->dim, requireInput, x->getInfo()->type); - auto convert = _Convert(input, x->getInfo()->order); - Variable::replace(x, convert); - return input; -} - -VARP _Clone(VARP source, bool deepCopy) { - if (nullptr == source || nullptr == source->expr().first) { - return nullptr; - } - if (!deepCopy) { - return Variable::create(source->expr().first, source->expr().second); - } - auto info = source->getInfo(); - auto sourcePtr = source->readMap(); - if (nullptr == info || nullptr == sourcePtr) { - MNN_ERROR("Source Buffer Not Available\n"); - return nullptr; - } - auto inputVar = _Input(info->dim, info->order, info->type); - auto destPtr = inputVar->writeMap(); - if (nullptr == destPtr) { - MNN_ERROR("Alloc Buffer Error\n"); - return nullptr; - } - ::memcpy(destPtr, sourcePtr, info->size * info->type.bytes()); - return inputVar; -} -VARP _Conv2DBackPropFilter(VARP weight, VARP input, VARP inputGrad, PaddingMode pad, INTS stride, INTS dilate, int group, INTS pads){ - std::unique_ptr convOp(new OpT); - convOp->type = OpType_Conv2DBackPropFilter; - auto shape = weight->getInfo(); - auto channel = std::vector{shape->dim[1], shape->dim[0]}; - auto kernelSize = std::vector{shape->dim[3], shape->dim[2]}; - convOp->main.type = OpParameter_Convolution2D; - convOp->main.value = new Convolution2DT; - auto conv2D = convOp->main.AsConvolution2D(); - conv2D->common.reset(new Convolution2DCommonT); - conv2D->common->padX = pads[0]; - conv2D->common->padY = pads[1]; - conv2D->common->padMode = _convertPadMode(pad); - conv2D->common->strideX = stride[0]; - conv2D->common->strideY = stride[1]; - conv2D->common->group = group; - conv2D->common->outputCount = channel[1]; - conv2D->common->inputCount = channel[0]; - conv2D->common->dilateX = dilate[0]; - conv2D->common->dilateY = dilate[1]; - conv2D->common->kernelX = kernelSize[0]; - conv2D->common->kernelY = kernelSize[1]; - INTS weightDims = {channel[1], channel[0] / group, kernelSize[1], kernelSize[0]}; - - return Variable::create(Expr::create(std::move(convOp), {weight, input, inputGrad})); -} - -VARP _PoolGrad(VARP originInput, VARP originOutput, VARP inputGrad, INTS kernel, INTS stride, PoolingMode type, PaddingMode pad, INTS pads){ - std::unique_ptr pool(new OpT); - pool->type = OpType_PoolGrad; - pool->main.type = OpParameter_Pool; - pool->main.value = new PoolT; - if (kernel[0] == -1 && kernel[1] == -1) { - pool->main.AsPool()->isGlobal = true; - } - pool->main.AsPool()->padX = 0; - pool->main.AsPool()->padY = 0; - if (pads.size() >= 2) { - pool->main.AsPool()->padX = pads[0]; - pool->main.AsPool()->padY = pads[1]; - } - pool->main.AsPool()->padType = _convertPoollingPadMode(pad); - pool->main.AsPool()->kernelX = kernel[0]; - pool->main.AsPool()->kernelY = kernel[1]; - pool->main.AsPool()->strideX = stride[0]; - pool->main.AsPool()->strideY = stride[1]; - pool->main.AsPool()->type = (PoolType)type; - return (Variable::create(Expr::create(std::move(pool), {originInput, originOutput, inputGrad}))); -} - -VARP _Crop(VARP x, VARP s, int axis, INTS offset) { - std::unique_ptr crop(new OpT); - crop->type = OpType_Crop; - crop->main.type = OpParameter_Crop; - crop->main.value = new CropT; - crop->main.AsCrop()->axis = axis; - crop->main.AsCrop()->offset = offset; - return (Variable::create(Expr::create(std::move(crop), {x, s}))); -} -VARP _Resize(VARP x, float xScale, float yScale) { - std::unique_ptr resize(new OpT); - resize->type = OpType_Resize; - resize->main.type = OpParameter_Resize; - resize->main.value = new ResizeT; - resize->main.AsResize()->xScale = xScale; - resize->main.AsResize()->yScale = yScale; - return (Variable::create(Expr::create(std::move(resize), {x}))); -} -VARP _Pad(VARP x, VARP pads) { - std::unique_ptr pad(new OpT); - pad->type = OpType_Padding; - return (Variable::create(Expr::create(std::move(pad), {x, pads}))); -} -VARP _ExpandDims(VARP x, int axis) { - std::unique_ptr expand(new OpT); - expand->type = OpType_ExpandDims; - expand->main.type = OpParameter_ExpandDims; - expand->main.value = new ExpandDimsT; - expand->main.AsExpandDims()->axis = axis; - return (Variable::create(Expr::create(std::move(expand), {x}))); -} -VARP _ExpandDims(VARP x, VARP axis) { - std::unique_ptr expand(new OpT); - expand->type = OpType_ExpandDims; - expand->main.type = OpParameter_ExpandDims; - expand->main.value = new ExpandDimsT; - return (Variable::create(Expr::create(std::move(expand), {x, axis}))); -} - -VARP _Shape(VARP x) { - std::unique_ptr shape(new OpT); - shape->type = OpType_Shape; - return (Variable::create(Expr::create(std::move(shape), {x}))); -} -VARP _Pack(VARPS xs, halide_type_t dtype, int axis) { - std::unique_ptr pack(new OpT); - pack->type = OpType_Pack; - pack->main.type = OpParameter_PackParam; - pack->main.value = new PackParamT; - pack->main.AsPackParam()->dataType = (MNN::DataType)Utils::convertDataType(dtype); - pack->main.AsPackParam()->axis = axis; - return (Variable::create(Expr::create(std::move(pack), xs))); -} -VARP _CropAndResize(VARP image, VARP boxes, VARP indexes, VARP sizes, float extrapolation, InterpolationMethod method) { - std::unique_ptr car(new OpT); - car->type = OpType_CropAndResize; - car->main.type = OpParameter_CropAndResize; - car->main.value = new CropAndResizeT; - car->main.AsCropAndResize()->extrapolationValue = extrapolation; - switch (method) { - case NEAREST: - car->main.AsCropAndResize()->method = CropAndResizeMethod_NEAREST; - break; - case BILINEAR: - default: - car->main.AsCropAndResize()->method = CropAndResizeMethod_BILINEAR; - break; - } - return (Variable::create(Expr::create(std::move(car), {image, boxes, indexes, sizes}))); -} -VARP _Fill(VARP s, VARP v) { - std::unique_ptr fill(new OpT); - fill->type = OpType_Fill; - fill->main.type = OpParameter_Fill; - fill->main.value = new FillT; - return (Variable::create(Expr::create(std::move(fill), {s, v}))); -} -VARP _Tile(VARP x, VARP mul) { - std::unique_ptr tile(new OpT); - tile->type = OpType_Tile; - return (Variable::create(Expr::create(std::move(tile), {x, mul}))); -} -VARP _Gather(VARP embedding, VARP indices) { - std::unique_ptr gather(new OpT); - gather->type = OpType_Gather; - gather->main.value = new GatherT; - return (Variable::create(Expr::create(std::move(gather), {embedding, indices}))); -} -VARP _GatherV2(VARP params, VARP indices, VARP axis) { - std::unique_ptr gather(new OpT); - gather->type = OpType_GatherV2; - gather->main.value = new GatherV2T; - if (axis.get()) { - return (Variable::create(Expr::create(std::move(gather), {params, indices, axis}))); - } else { - return (Variable::create(Expr::create(std::move(gather), {params, indices}))); - } -} - -VARP _Squeeze(VARP x, INTS axes){ - std::unique_ptr squeeze(new OpT); - squeeze->type = OpType_Squeeze; - auto squeezeParam = new SqueezeParamT; - squeezeParam->squeezeDims = axes; - squeeze->main.type = OpParameter_SqueezeParam; - squeeze->main.value = squeezeParam; - return Variable::create(Expr::create(std::move(squeeze), {x})); -} - -VARP _Unsqueeze(VARP x, INTS axes){ - std::unique_ptr squeeze(new OpT); - squeeze->type = OpType_Unsqueeze; - auto squeezeParam = new SqueezeParamT; - squeezeParam->squeezeDims = axes; - squeeze->main.type = OpParameter_SqueezeParam; - squeeze->main.value = squeezeParam; - return Variable::create(Expr::create(std::move(squeeze), {x})); -} - -} // namespace Express -} // namespace MNN diff --git a/express/source/Solution.cpp b/express/source/Solution.cpp deleted file mode 100644 index 51b60de15..000000000 --- a/express/source/Solution.cpp +++ /dev/null @@ -1,26 +0,0 @@ -// -// Solution.cpp -// MNN -// -// Created by MNN on 2019/07/26. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#include "Solution.hpp" -namespace MNN { -namespace Express { -Solution::Requirement Solution::onGetRequirement() const { - auto size = mInputSize; - Solution::Requirement req; - req.contentNeedContent.resize(size); - req.shapeNeedContent.resize(size); - req.supportError.resize(size); - for (int i = 0; i < size; ++i) { - req.contentNeedContent[i] = true; - req.shapeNeedContent[i] = false; - req.supportError[i] = false; - } - return req; -} -} // namespace Express -} // namespace MNN diff --git a/express/source/Solution.hpp b/express/source/Solution.hpp deleted file mode 100644 index 0397021af..000000000 --- a/express/source/Solution.hpp +++ /dev/null @@ -1,50 +0,0 @@ -// -// Solution.hpp -// MNN -// -// Created by MNN on 2019/07/25. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#include "ErrorCode.hpp" -#include "Expr.hpp" - -namespace MNN { -namespace Express { -class Solution { -public: - struct Requirement { - std::vector contentNeedContent; - std::vector shapeNeedContent; - std::vector supportError; - }; - - Solution(int inputSize, int outputSize) : mInputSize(inputSize), mOutputSize(outputSize) { - // Do nothing - } - virtual ~Solution() = default; - virtual Requirement onGetRequirement() const; - - virtual ErrorCode onComputeInfo(const std::vector& inputs, - const std::vector& outputs) = 0; - virtual ErrorCode onAlloc(const std::vector& inputs, - const std::vector& outputs) = 0; - virtual ErrorCode onComputeContent(const std::vector& inputs, - const std::vector& outputs) = 0; - - // Map output's content to host - virtual void* onMapContent(int index) = 0; - virtual void onUnMapContent(int index) = 0; - -protected: - const int mInputSize; - const int mOutputSize; -}; -class Executor { -public: - Executor() = default; - virtual ~Executor() = default; - virtual Solution* onCreate(const Op* op, int inputSize, int outputSize) = 0; -}; -} // namespace Express -} // namespace MNN diff --git a/express/source/optimizer/MergeOptimizer.cpp b/express/source/optimizer/MergeOptimizer.cpp deleted file mode 100644 index 4da024127..000000000 --- a/express/source/optimizer/MergeOptimizer.cpp +++ /dev/null @@ -1,104 +0,0 @@ -// -// MergeOptimizer.cpp -// MNN -// -// Created by MNN on 2019/08/20. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#include "MergeOptimizer.hpp" -#include -#include "BasicOptimizer_generated.h" -namespace MNN { -namespace Express { - -MergeOptimizer::MergeOptimizer(MNNForwardType type, int numberThread, BackendConfig* config) { - if (nullptr != config) { - mConfig = *config; - } - mType = type; - mNumberThread = numberThread; -} - -Optimizer::Cost MergeOptimizer::onMeasure(const std::vector& outputs, std::shared_ptr parameters) { - Cost cost; - cost.compute = 0.0f; - cost.memory = 0.0f; - return cost; -} -bool MergeOptimizer::onExecute(const std::vector& outputs, std::shared_ptr parameters) { - auto sequence = Variable::getExecuteOrder(outputs); - if (1 == sequence.size()) { - return true; - } - std::map> worked; - std::map varIndex; - std::vector queue; - std::vector inputs; - std::unique_ptr merge(new MNN::Optimizer::MergeT); - queue.reserve(sequence.size()); - merge->tensorNumber = sequence.size(); - merge->backend.reset(new MNN::Optimizer::BackendConfigT); - merge->backend->numberThread = mNumberThread; - merge->backend->type = (MNN::ForwardType)mType; - merge->backend->power = (int)mConfig.power; - merge->backend->precision = (int)mConfig.precision; - merge->backend->memroy = (int)mConfig.memory; - - for (int i = 0; i < sequence.size(); ++i) { - auto var = sequence[i]; - varIndex[var] = i; - if (var->expr().first->get()->type() == OpType_Input) { - inputs.emplace_back(var); - } - auto exprInfo = var->expr(); - if (exprInfo.first->get()->type() == OpType_Input) { - merge->inputIndexes.emplace_back(i); - continue; - } - if (worked.find(exprInfo.first) != worked.end()) { - worked[exprInfo.first][exprInfo.second] = i; - continue; - } - worked.insert(std::make_pair(exprInfo.first, std::vector(exprInfo.first->outputSize()))); - worked[exprInfo.first][exprInfo.second] = i; - queue.emplace_back(exprInfo.first); - } - for (auto expr : queue) { - std::unique_ptr op(expr->get()->UnPack()); - op->outputIndexes = worked[expr]; - auto exprinputs = expr->inputs(); - op->inputIndexes.resize(exprinputs.size()); - for (int i = 0; i < exprinputs.size(); ++i) { - op->inputIndexes[i] = varIndex[exprinputs[i]]; - } - merge->oplists.emplace_back(std::move(op)); - } - for (auto var : outputs) { - merge->outputIndexes.emplace_back(varIndex[var]); - } - - std::unique_ptr mergeOp(new OpT); - mergeOp->type = OpType_Extra; - mergeOp->name = outputs[0]->name(); - mergeOp->main.type = OpParameter_Extra; - mergeOp->main.value = new ExtraT; - auto plugin = mergeOp->main.AsExtra(); - plugin->type = "Session"; - plugin->engine = "MNN"; - - flatbuffers::FlatBufferBuilder builder; - auto offset = MNN::Optimizer::Merge::Pack(builder, merge.get()); - builder.Finish(offset); - plugin->info.resize(builder.GetSize()); - ::memcpy(plugin->info.data(), builder.GetBufferPointer(), builder.GetSize()); - - auto mergeExpr = Expr::create(mergeOp.get(), inputs, (int)outputs.size()); - mergeExpr->setName(outputs[0]->name()); - for (int i = 0; i < outputs.size(); ++i) { - Variable::setExpr(outputs[i], mergeExpr, i); - } - return true; -} -} // namespace Express -} // namespace MNN diff --git a/include/AutoTime.hpp b/include/MNN/AutoTime.hpp similarity index 62% rename from include/AutoTime.hpp rename to include/MNN/AutoTime.hpp index 1575f9bbd..40e477405 100644 --- a/include/AutoTime.hpp +++ b/include/MNN/AutoTime.hpp @@ -11,12 +11,30 @@ #include #include -#include "MNNDefine.h" +#include namespace MNN { +class MNN_PUBLIC Timer { +public: + Timer(); + ~Timer(); + Timer(const Timer&) = delete; + Timer(const Timer&&) = delete; + Timer& operator=(const Timer&) = delete; + Timer& operator=(const Timer&&) = delete; + + // reset timer + void reset(); + // get duration (us) from init or latest reset. + uint64_t durationInUs(); + +protected: + uint64_t mLastResetTime; +}; + /** time tracing util. prints duration between init and deinit. */ -class MNN_PUBLIC AutoTime { +class MNN_PUBLIC AutoTime : Timer { public: AutoTime(int line, const char* func); ~AutoTime(); @@ -28,7 +46,6 @@ class MNN_PUBLIC AutoTime { private: int mLine; char* mName; - uint64_t mCurrentTime; }; } // namespace MNN diff --git a/include/ErrorCode.hpp b/include/MNN/ErrorCode.hpp similarity index 100% rename from include/ErrorCode.hpp rename to include/MNN/ErrorCode.hpp diff --git a/include/HalideRuntime.h b/include/MNN/HalideRuntime.h similarity index 100% rename from include/HalideRuntime.h rename to include/MNN/HalideRuntime.h diff --git a/include/ImageProcess.hpp b/include/MNN/ImageProcess.hpp similarity index 97% rename from include/ImageProcess.hpp rename to include/MNN/ImageProcess.hpp index b0147b7ad..a7f353336 100644 --- a/include/ImageProcess.hpp +++ b/include/MNN/ImageProcess.hpp @@ -9,9 +9,9 @@ #ifndef ImageProcess_hpp #define ImageProcess_hpp -#include "ErrorCode.hpp" -#include "Matrix.h" -#include "Tensor.hpp" +#include +#include +#include namespace MNN { namespace CV { @@ -22,6 +22,7 @@ enum ImageFormat { GRAY, BGRA, YUV_NV21 = 11, + YUV_NV12 = 12, }; enum Filter { NEAREST = 0, BILINEAR = 1, BICUBIC = 2 }; diff --git a/include/Interpreter.hpp b/include/MNN/Interpreter.hpp similarity index 98% rename from include/Interpreter.hpp rename to include/MNN/Interpreter.hpp index ccc116710..b3f547bea 100644 --- a/include/Interpreter.hpp +++ b/include/MNN/Interpreter.hpp @@ -12,9 +12,9 @@ #include #include #include -#include "ErrorCode.hpp" -#include "MNNForwardType.h" -#include "Tensor.hpp" +#include +#include +#include namespace MNN { diff --git a/include/MNNDefine.h b/include/MNN/MNNDefine.h similarity index 98% rename from include/MNNDefine.h rename to include/MNN/MNNDefine.h index 5dc4e8fae..79c134728 100644 --- a/include/MNNDefine.h +++ b/include/MNN/MNNDefine.h @@ -13,7 +13,7 @@ #include #if defined(__APPLE__) -#include "TargetConditionals.h" +#include #if TARGET_OS_IPHONE #define MNN_BUILD_FOR_IOS #endif diff --git a/include/MNNForwardType.h b/include/MNN/MNNForwardType.h similarity index 100% rename from include/MNNForwardType.h rename to include/MNN/MNNForwardType.h diff --git a/include/MNNSharedContext.h b/include/MNN/MNNSharedContext.h similarity index 100% rename from include/MNNSharedContext.h rename to include/MNN/MNNSharedContext.h diff --git a/include/Matrix.h b/include/MNN/Matrix.h similarity index 99% rename from include/Matrix.h rename to include/MNN/Matrix.h index 5bba72976..0d59f6634 100644 --- a/include/Matrix.h +++ b/include/MNN/Matrix.h @@ -26,7 +26,7 @@ #include #include -#include "Rect.h" +#include namespace MNN { namespace CV { diff --git a/include/Rect.h b/include/MNN/Rect.h similarity index 99% rename from include/Rect.h rename to include/MNN/Rect.h index 598b64ce9..91c4950f4 100644 --- a/include/Rect.h +++ b/include/MNN/Rect.h @@ -29,7 +29,7 @@ #include #include #include -#include "MNNDefine.h" +#include namespace MNN { namespace CV { diff --git a/include/Tensor.hpp b/include/MNN/Tensor.hpp similarity index 99% rename from include/Tensor.hpp rename to include/MNN/Tensor.hpp index 239c1a6a3..bf6dca7a6 100644 --- a/include/Tensor.hpp +++ b/include/MNN/Tensor.hpp @@ -10,8 +10,8 @@ #define Tensor_hpp #include -#include "HalideRuntime.h" -#include "MNNDefine.h" +#include +#include namespace MNN { @@ -221,7 +221,7 @@ class MNN_PUBLIC Tensor { if (getDimensionType() == TENSORFLOW) { return mBuffer.dim[2].extent; } - + return mBuffer.dim[3].extent; } inline int height() const { diff --git a/include/MNN/expr/Executor.hpp b/include/MNN/expr/Executor.hpp new file mode 100644 index 000000000..426a2e539 --- /dev/null +++ b/include/MNN/expr/Executor.hpp @@ -0,0 +1,45 @@ +// +// Executor.hpp +// MNN +// +// Created by MNN on 2019/07/25. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include +#include +#include +#include +#include +namespace MNN { +class Backend; +namespace Express { +class Solution; +class MNN_PUBLIC Executor { +public: + struct Requirement { + std::vector contentNeedContent; + std::vector shapeNeedContent; + std::vector supportError; + }; + virtual ~Executor(); + virtual Requirement onGetRequirement(Expr* expr) const; + virtual ErrorCode onComputeInfo(Expr* expr); + virtual ErrorCode onComputeContent(Expr* expr); + void recycle(Expr* expr); + void setGlobalExecutorConfig(MNNForwardType type, const BackendConfig& config, int numberThread); + enum GCFlag { + ALL, + UNACTIVE + }; + void gc(GCFlag flag = ALL); + static std::shared_ptr getGlobalExecutor(); +private: + Executor(std::shared_ptr backend); + std::shared_ptr mBackend; + std::map> mSolutions; + std::mutex mMutex; +}; +} // namespace Express +} // namespace MNN diff --git a/express/include/Expr.hpp b/include/MNN/expr/Expr.hpp similarity index 51% rename from express/include/Expr.hpp rename to include/MNN/expr/Expr.hpp index fa0e5224d..aaf663579 100644 --- a/express/include/Expr.hpp +++ b/include/MNN/expr/Expr.hpp @@ -15,38 +15,92 @@ #include #include #include -#include "HalideRuntime.h" -#include "MNNDefine.h" - -#if defined(_MSC_VER) -#if defined(BUILDING_MNN_EXPRESS_DLL) -#define MNN_EXPRESS_PUBLIC __declspec(dllexport) -#elif defined(USING_MNN_EXPRESS_DLL) -#define MNN_EXPRESS_PUBLIC __declspec(dllimport) -#else -#define MNN_EXPRESS_PUBLIC -#endif -#else -#define MNN_EXPRESS_PUBLIC __attribute__((visibility("default"))) -#endif +#include +#include namespace MNN { struct OpT; struct Op; struct NetT; namespace Express { -class Solution; class Variable; class Expr; class Executor; typedef std::shared_ptr EXPRP; typedef std::weak_ptr WeakEXPRP; -typedef std::shared_ptr VARP; -typedef std::weak_ptr WeakVARP; typedef std::vector INTS; -typedef std::vector VARPS; enum Dimensionformat { NHWC, NC4HW4, NCHW }; -class MNN_EXPRESS_PUBLIC Variable { +class MNN_PUBLIC VARP { +public: + VARP() { + // Do nothing + } + VARP(std::shared_ptr c) { + mContent = std::move(c); + } + VARP(Variable* c) { + mContent.reset(c); + } + Variable* get() const { + return mContent.get(); + } + ~ VARP() { + // Do nothing + } + VARP(const VARP& var) { + mContent = var.mContent; + } + VARP(VARP&& var) { + mContent = std::move(var.mContent); + } + VARP operator+(VARP var) const; + VARP operator-(VARP var) const; + VARP operator*(VARP var) const; + VARP operator/(VARP var) const; + VARP mean(INTS dims) const; + VARP sum(INTS dims) const; + + bool operator==(const VARP& var) { + return var.mContent == mContent; + } + bool operator<(const VARP& var) { + return mContent < var.mContent; + } + bool operator<=(const VARP& var) { + return mContent <= var.mContent; + } + VARP& operator=(const VARP& var) { + mContent = var.mContent; + return *this; + } + VARP& operator=(Variable* var) { + mContent.reset(var); + return *this; + } + Variable* operator->() const { + return mContent.get(); + } + enum InputType { + INPUT = 0, + CONST = 1, + TRAINABLE = 2, + }; + bool fix(InputType type) const; +private: + std::shared_ptr mContent; +}; +inline bool operator==(Variable* src, VARP dst) { + return src == dst.get(); +} +inline bool operator!=(Variable* src, VARP dst) { + return src != dst.get(); +} +inline bool operator<(VARP src, VARP dst) { + return src.get() < dst.get(); +} +typedef std::vector VARPS; + +class MNN_PUBLIC Variable { public: struct Info { Dimensionformat order = NHWC; @@ -54,16 +108,13 @@ class MNN_EXPRESS_PUBLIC Variable { halide_type_t type; int size; void* ptr = nullptr; + void syncSize(); }; - const std::string& name() const { - return mName; - } + const std::string& name() const; void setName(const std::string& name); std::pair expr() const { return std::make_pair(mFrom, mFromIndex); } - static void setExpr(VARP dst, EXPRP from, int index); - // If compute info error, return nullptr const Info* getInfo(); bool resize(INTS dims); @@ -76,35 +127,29 @@ class MNN_EXPRESS_PUBLIC Variable { T* writeMap() { return (T*)writeInternal(); } - + //Depecerate void unMap(); - + bool input(VARP src); static void replace(VARP dst, VARP src); static VARP create(EXPRP expr, int index = 0); - void visitOutputs(const std::function& visit); - - static void visit(VARP var, const std::function& before, const std::function& after); - static std::vector load(const char* fileName); static std::map loadMap(const char* fileName); static std::pair, std::map> getInputAndOutput(const std::map& allVariable); static std::vector mapToSequence(const std::map& source); - static std::vector getExecuteOrder(const std::vector& output); + static std::vector getExecuteOrder(const std::vector& output); static void save(const std::vector& vars, const char* fileName); static void save(const std::vector& vars, NetT* dest); - size_t linkNumber() const { - return mTo.size(); - } - - const std::list< std::pair >& toExprs() const{ - return mTo; + size_t linkNumber() const; + const std::vector& toExprs() const; + void setExpr(EXPRP expr, int index) { + mFrom = expr; + mFromIndex = index; } - private: Variable(EXPRP expr, int index) { mFrom = expr; @@ -118,43 +163,36 @@ class MNN_EXPRESS_PUBLIC Variable { friend class Expr; EXPRP mFrom; int mFromIndex; - std::string mName; - std::list> mTo; }; -class MNN_EXPRESS_PUBLIC Expr { +class MNN_PUBLIC Expr { public: struct Inside; - static EXPRP create(const OpT* op, std::vector inputs, int outputSize = 1, - std::shared_ptr executor = nullptr); - static EXPRP create(std::unique_ptr&& op, std::vector inputs, int outputSize = 1, - std::shared_ptr executor = nullptr) { - return create(op.get(), inputs, outputSize, executor); + static EXPRP create(Variable::Info&& info); + static EXPRP create(const OpT* op, std::vector inputs, int outputSize = 1); + static EXPRP create(std::unique_ptr&& op, std::vector inputs, int outputSize = 1) { + return create(op.get(), inputs, outputSize); } void setName(const std::string& name); - void setExecutor(std::shared_ptr exe); const Op* get() const { return mOp; } - void set(const OpT* op); const std::vector& inputs() const { return mInputs; } - const Variable::Info* outputInfo(int index) const; int outputSize() const { - return mOutputSize; + return mOutputNames.size(); } + static void replace(EXPRP oldExpr, EXPRP newExpr); bool requireInfo(); - bool requireAlloc(); bool requireCompute(); + void visitOutputs(const std::function& visit); + static void visit(EXPRP expr, const std::function& before, const std::function& after); - Solution* inside(); - - const std::list& outputs() const { - return mOutputs; + const std::vector& outputs() const { + return mTo; } - static void setInput(EXPRP dst, VARP src, int index); ~Expr(); bool visited() const { @@ -166,28 +204,39 @@ class MNN_EXPRESS_PUBLIC Expr { const std::string& name() const { return mName; } + const std::string& outputName(int index) { + return mOutputNames[index]; + } + VARP::InputType inputType() const {return mType;} + Variable::Info* outputInfo(int index); + std::pair, int> extra() const { + return std::make_pair(mExtraBuffer, mOpBufferSize); + } + bool setInfoDirty(); private: + void set(const OpT* op); + static void _addLinkForInputs(EXPRP expr); bool setContentDirty(int inputIndex); - bool setInfoDirty(); Expr(int outputSize); friend class Variable; + friend class VARP; + VARP::InputType mType; const Op* mOp; std::vector mInputs; - std::list mOutputs; - const int mOutputSize; + std::vector mOutputNames; bool mValid = true; bool mInfoDirty = true; - bool mAllocated = false; bool mContentDirty = true; - char* mExtraBuffer = nullptr; + std::shared_ptr mExtraBuffer; + int mOpBufferSize = 0; std::string mName; std::shared_ptr mInside = nullptr; bool mVisited = false; - std::shared_ptr mExecutor; + std::vector mTo; }; } // namespace Express } // namespace MNN diff --git a/express/include/ExprCreator.hpp b/include/MNN/expr/ExprCreator.hpp similarity index 64% rename from express/include/ExprCreator.hpp rename to include/MNN/expr/ExprCreator.hpp index 0619d5bee..0a896011c 100644 --- a/express/include/ExprCreator.hpp +++ b/include/MNN/expr/ExprCreator.hpp @@ -9,8 +9,8 @@ #ifndef ExprCreator_hpp #define ExprCreator_hpp -#include "Expr.hpp" -#include "MathOp.hpp" -#include "NeuralNetWorkOp.hpp" +#include +#include +#include #endif diff --git a/include/MNN/expr/MathOp.hpp b/include/MNN/expr/MathOp.hpp new file mode 100644 index 000000000..0722d2ec7 --- /dev/null +++ b/include/MNN/expr/MathOp.hpp @@ -0,0 +1,83 @@ +// +// MathOp.hpp +// MNN +// +// Created by MNN on 2019/06/27. +// Copyright © 2018, Alibaba Group Holding Limited +// + +namespace MNN { +namespace Express { +//BinaryOPs +MNN_PUBLIC VARP _Add(VARP x, VARP y); +MNN_PUBLIC VARP _Subtract(VARP x, VARP y); +MNN_PUBLIC VARP _Multiply(VARP x, VARP y); +MNN_PUBLIC VARP _Divide(VARP x, VARP y); +MNN_PUBLIC VARP _Pow(VARP x, VARP y); +MNN_PUBLIC VARP _Minimum(VARP x, VARP y); +MNN_PUBLIC VARP _Maximum(VARP x, VARP y); +MNN_PUBLIC VARP _BiasAdd(VARP value, VARP bias); +MNN_PUBLIC VARP _Greater(VARP x, VARP y); +MNN_PUBLIC VARP _GreaterEqual(VARP x, VARP y); +MNN_PUBLIC VARP _Less(VARP x, VARP y); +MNN_PUBLIC VARP _FloorDiv(VARP x, VARP y); +MNN_PUBLIC VARP _SquaredDifference(VARP x, VARP y); +MNN_PUBLIC VARP _Equal(VARP x, VARP y); +MNN_PUBLIC VARP _LessEqual(VARP x, VARP y); +MNN_PUBLIC VARP _FloorMod(VARP x, VARP y); + +//UnaryOPs +MNN_PUBLIC VARP _Sign(VARP a); +MNN_PUBLIC VARP _Abs(VARP x); +MNN_PUBLIC VARP _Negative(VARP x); +MNN_PUBLIC VARP _Floor(VARP x); +MNN_PUBLIC VARP _Ceil(VARP x); +MNN_PUBLIC VARP _Square(VARP x); +MNN_PUBLIC VARP _Sqrt(VARP x); +MNN_PUBLIC VARP _Rsqrt(VARP x); +MNN_PUBLIC VARP _Exp(VARP x); +MNN_PUBLIC VARP _Log(VARP x); +MNN_PUBLIC VARP _Sin(VARP x); +MNN_PUBLIC VARP _Cos(VARP x); +MNN_PUBLIC VARP _Tan(VARP x); +MNN_PUBLIC VARP _Asin(VARP x); +MNN_PUBLIC VARP _Acos(VARP x); +MNN_PUBLIC VARP _Atan(VARP x); +MNN_PUBLIC VARP _Reciprocal(VARP x); +MNN_PUBLIC VARP _Log1p(VARP x); +//Only one but not in UnaryOPs +MNN_PUBLIC VARP _Tanh(VARP x); +MNN_PUBLIC VARP _Sigmoid(VARP x); + + +//ReduceOPs +MNN_PUBLIC VARP _ReduceSum(VARP input_variable, INTS axis = {}, bool keepDims = false); +MNN_PUBLIC VARP _ReduceMean(VARP input_variable, INTS axis = {}, bool keepDims = false); +MNN_PUBLIC VARP _ReduceMax(VARP input_variable, INTS axis = {}, bool keepDims = false); +MNN_PUBLIC VARP _ReduceMin(VARP input_variable, INTS axis = {}, bool keepDims = false); +MNN_PUBLIC VARP _ReduceProd(VARP input_variable, INTS axis = {}, bool keepDims = false); +MNN_PUBLIC VARP _ReduceAny(VARP input_variable, INTS axis = {}, bool keepDims = false); +MNN_PUBLIC VARP _ReduceAll(VARP input_variable, INTS axis = {}, bool keepDims = false); + +//EltwiseOPs +MNN_PUBLIC VARP _Prod(VARP a, VARP b, std::vector coeff); +MNN_PUBLIC VARP _Sum(VARP a, VARP b, std::vector coeff); +MNN_PUBLIC VARP _Max(VARP a, VARP b, std::vector coeff); +MNN_PUBLIC VARP _Sub(VARP a, VARP b, std::vector coeff); + +//OtherOPs +template +MNN_PUBLIC VARP _Cast(VARP x) { + return _Cast(x, halide_type_of()); +} +MNN_PUBLIC VARP _Cast(VARP x, halide_type_t dtype); +MNN_PUBLIC VARP _MatMul(VARP a, VARP b, bool tranposeA = false, bool tranposeB = false); +MNN_PUBLIC VARP _Normalize(VARP x, int32_t acrossSpatial, int32_t channelShared, float eps, std::vector scale); +MNN_PUBLIC VARP _ArgMax(VARP input, int axis = 0); +MNN_PUBLIC VARP _BatchMatMul(VARP x, VARP y, bool adj_x = false, bool adj_y = false); +MNN_PUBLIC VARP _UnravelIndex(VARP indices, VARP dims); +MNN_PUBLIC VARP _ScatterNd(VARP indices, VARP updates, VARP shape); +MNN_PUBLIC VARP _OneHot(VARP indices, VARP depth, VARP onValue, VARP offValue, int axis = -1); +MNN_PUBLIC VARP _BroadcastTo(VARP a, VARP shape); +}; // namespace Express +}; // namespace MNN diff --git a/include/MNN/expr/NeuralNetWorkOp.hpp b/include/MNN/expr/NeuralNetWorkOp.hpp new file mode 100644 index 000000000..527c51f7e --- /dev/null +++ b/include/MNN/expr/NeuralNetWorkOp.hpp @@ -0,0 +1,100 @@ +// +// NeuralNetWorkOp.hpp +// MNN +// +// Created by MNN on 2019/06/27. +// Copyright © 2018, Alibaba Group Holding Limited +// + +namespace MNN { +namespace Express { +enum PaddingMode {CAFFE, VALID, SAME}; +enum PoolingMode {MAXPOOL, AVEPOOL}; +enum PadValueMode {CONSTANT, REFLECT, SYMMETRIC}; +MNN_PUBLIC VARP _Input(INTS dims = {}, Dimensionformat format = NC4HW4, halide_type_t type = halide_type_of()); +MNN_PUBLIC VARP _Clone(VARP source, bool deepCopy=false); + +MNN_PUBLIC VARP _Scalar(const void* ptr, halide_type_t type); + +template +VARP _Scalar(T value) { + return _Scalar(&value, halide_type_of()); +} + + +MNN_PUBLIC VARP _Const(float value, INTS dims = {}, Dimensionformat format = NHWC); +MNN_PUBLIC VARP _Const(const void* ptr, INTS dims = {}, Dimensionformat format = NHWC, + halide_type_t type = halide_type_of()); +MNN_PUBLIC VARP _TrainableParam(float value, INTS dims, Dimensionformat format); +MNN_PUBLIC VARP _TrainableParam(const void* ptr, INTS dims, Dimensionformat format, + halide_type_t type = halide_type_of()); +MNN_PUBLIC VARP _Conv(VARP weight, VARP bias, VARP x, PaddingMode pad = VALID, INTS stride = {1, 1}, + INTS dilate = {1, 1}, int group = 1, INTS pads = {0, 0}); + +MNN_PUBLIC VARP _Conv(float weight, float bias, VARP x, INTS channel, INTS kernelSize, PaddingMode pad = VALID, + INTS stride = {1, 1}, INTS dilate = {1, 1}, int group = 1); +MNN_PUBLIC VARP _Conv(std::vector&& weight, std::vector&& bias, VARP x, INTS channel, INTS kernelSize, + PaddingMode pad = VALID, INTS stride = {1, 1}, INTS dilate = {1, 1}, int group = 1, INTS pads = {0, 0}); +MNN_PUBLIC VARP _Deconv(VARP weight, VARP bias, VARP x, PaddingMode pad = VALID, INTS stride = {1, 1}, + INTS dilate = {1, 1}, int group = 1, INTS pads = {0, 0}); +MNN_PUBLIC VARP _MaxPool(VARP x, INTS kernel, INTS stride, PaddingMode pad = VALID, INTS pads= {0, 0}); +MNN_PUBLIC VARP _AvePool(VARP x, INTS kernel, INTS stride, PaddingMode pad = VALID, INTS pads= {0, 0}); +MNN_PUBLIC VARP _Reshape(VARP x, INTS dim, Dimensionformat format = NHWC); +MNN_PUBLIC VARP _Reshape(VARP x, VARP shape); +MNN_PUBLIC VARP _Scale(VARP x, int channels, std::vector&& scales, std::vector&& bias); + +MNN_PUBLIC VARP _Relu(VARP x, float slope = 0.0f); +MNN_PUBLIC VARP _Relu6(VARP x); +MNN_PUBLIC VARP _PRelu(VARP x, std::vector &&slopes); +MNN_PUBLIC VARP _Softmax(VARP x, int axis); +MNN_PUBLIC VARP _Softplus(VARP x); +MNN_PUBLIC VARP _Softsign(VARP x); +MNN_PUBLIC std::vector _Split(VARP x, INTS points, int axis); +MNN_PUBLIC VARP _Slice(VARP x, VARP starts, VARP sizes); +MNN_PUBLIC VARP _StridedSlice(VARP x, VARP begin, VARP end, VARP strided, halide_type_t type, + int32_t beginMask, int32_t endMask, int32_t ellipsisMask, + int32_t newAxisMask, int32_t shrinkAxisMask); +MNN_PUBLIC VARP _Concat(VARPS xs, int axis); +MNN_PUBLIC VARP _Convert(VARP x, Dimensionformat dest); +MNN_PUBLIC VARP _Transpose(VARP x, INTS perm); +MNN_PUBLIC VARP _Transpose(VARP x, VARP perm); +MNN_PUBLIC VARP _ChannelShuffle(VARP x, int group); +MNN_PUBLIC VARP _ChangeInputFormat(VARP x, Dimensionformat requireInput); +MNN_PUBLIC VARP _Conv2DBackPropFilter(VARP weight, VARP input, VARP inputGrad, PaddingMode pad = VALID, INTS stride = {1, 1}, INTS dilate = {1, 1}, int group = 1, INTS pads = {0, 0}); +MNN_PUBLIC VARP _PoolGrad(VARP originInput, VARP originOutput, VARP inputGrad, INTS kernel, INTS stride, PoolingMode type, PaddingMode pad = VALID, INTS pads= {0, 0}); +// FIXME: move the api to Array Ops +MNN_PUBLIC VARP _ReverseSequence(VARP x, VARP y, int batchDim, int seqDim); +// FIXME: move the api to Image Ops +MNN_PUBLIC VARP _Crop(VARP x, VARP s, int axis, INTS offset); +MNN_PUBLIC VARP _Resize(VARP x, float xScale, float yScale); +MNN_PUBLIC VARP _Pad(VARP x, VARP pads, PadValueMode mode = CONSTANT); +MNN_PUBLIC VARP _ExpandDims(VARP x, int axis); +MNN_PUBLIC VARP _ExpandDims(VARP x, VARP axis); + +MNN_PUBLIC VARP _Shape(VARP x); +MNN_PUBLIC VARP _Stack(VARPS values, int axis=0); +enum InterpolationMethod {BILINEAR, NEAREST}; +MNN_PUBLIC VARP _CropAndResize(VARP image, VARP boxes, VARP indexes, VARP sizes, float extrapolation, InterpolationMethod method); +MNN_PUBLIC VARP _Fill(VARP s, VARP v); +MNN_PUBLIC VARP _Tile(VARP x, VARP mul); +MNN_PUBLIC VARP _Gather(VARP embedding, VARP indices); +MNN_PUBLIC VARP _GatherV2(VARP params, VARP indices, VARP axis = nullptr); +MNN_PUBLIC VARP _Squeeze(VARP x, INTS axes = {}); +MNN_PUBLIC VARP _Unsqueeze(VARP x, INTS axes = {}); +MNN_PUBLIC VARP _BatchToSpaceND(VARP input, VARP block_shape, VARP crops); +MNN_PUBLIC VARP _GatherND(VARP params, VARP indices); +MNN_PUBLIC VARP _Selu(VARP features, float scale, float alpha); +MNN_PUBLIC VARP _Size(VARP input); +MNN_PUBLIC VARP _Elu(VARP features, float alpha=1.0); +MNN_PUBLIC VARP _MatrixBandPart(VARP input, VARP num_lower, VARP num_upper); +MNN_PUBLIC std::vector _Moments(VARP x, INTS axis, VARP shift, bool keepDims); +MNN_PUBLIC VARP _SetDiff1D(VARP x, VARP y); +MNN_PUBLIC VARP _SpaceToDepth(VARP input, int block_size); +MNN_PUBLIC VARP _SpaceToBatchND(VARP input, VARP block_shape, VARP paddings); +MNN_PUBLIC VARP _ZerosLike(VARP input); +MNN_PUBLIC std::vector _Unstack(VARP value, int axis=0); +MNN_PUBLIC VARP _Rank(VARP input); +MNN_PUBLIC VARP _Range(VARP start, VARP limit, VARP delta); +MNN_PUBLIC VARP _Interp(VARPS xs, float widthScale, float heightScale, int outputWidth, int outputHeight, int resizeType, bool alignCorners); +} // namespace Express +} // namespace MNN diff --git a/express/include/Optimizer.hpp b/include/MNN/expr/Optimizer.hpp similarity index 78% rename from express/include/Optimizer.hpp rename to include/MNN/expr/Optimizer.hpp index 19a6092b6..b1304c8d8 100644 --- a/express/include/Optimizer.hpp +++ b/include/MNN/expr/Optimizer.hpp @@ -7,10 +7,12 @@ // #ifndef Optimizer_hpp #define Optimizer_hpp -#include "Expr.hpp" +#include +#include + namespace MNN { namespace Express { -class MNN_EXPRESS_PUBLIC Optimizer { +class MNN_PUBLIC Optimizer { public: enum Device { CPU = 0, @@ -18,7 +20,12 @@ class MNN_EXPRESS_PUBLIC Optimizer { OTHER = 2, AUTO = 3 }; - static std::shared_ptr create(Device device = CPU); + struct Config { + Device device = CPU; + MNNForwardType forwardType = MNN_FORWARD_ALL; + int numThread = 4; + }; + static std::shared_ptr create(Config config); struct Cost { float compute; // MFlops float memory; // MB @@ -42,9 +49,9 @@ class MNN_EXPRESS_PUBLIC Optimizer { virtual std::shared_ptr onGetParameters(const std::vector& outputs) { return nullptr; } - + //Given paramters and measure cost, the parameters must be the same as onGetParameters - virtual Cost onMeasure(const std::vector& outputs, std::shared_ptr parameters = nullptr) = 0; + virtual Cost onMeasure(const std::vector& outputs, std::shared_ptr parameters = nullptr) = 0; //Modify the output directly, the parameters must be the same as onGetParameters virtual bool onExecute(const std::vector& outputs, std::shared_ptr parameters = nullptr) = 0; diff --git a/project/android/build.gradle b/project/android/build.gradle index 50868efb9..c9fb8d2eb 100644 --- a/project/android/build.gradle +++ b/project/android/build.gradle @@ -7,25 +7,15 @@ buildscript { } dependencies { classpath 'com.android.tools.build:gradle:3.2.1' - - classpath 'com.novoda:bintray-release:0.9.1' } } repositories { - google() - jcenter() mavenLocal() } apply plugin: 'com.android.library' -ext { - group = 'com.alibaba.android' - artifactName = 'mnn' - version = '0.0.4' -} - android { compileSdkVersion 28 @@ -51,37 +41,6 @@ android { } apply from: "nativepub.gradle" -apply plugin: 'com.novoda.bintray-release' - -publishing { - publications { - nativepacked(MavenPublication) { - groupId = project.publish.groupId - artifactId project.publish.artifactId - version = project.publish.publishVersion - - artifact "${project.buildDir}/outputs/aar/${project.name}-release.aar" - pom.packaging "aar" - } - } - -} - -publish { - groupId = project.ext.group - artifactId = project.ext.artifactName - publishVersion = project.ext.version - - bintrayUser = 'MNN' - userOrg = 'mnnteam' - repoName = 'maven' - dryRun = false - - desc = 'MNN Library' - website = 'https://github.com/alibaba/MNN' - publications = ["nativepacked"] -} - task wrapper(type: Wrapper) { gradleVersion = '4.1' diff --git a/project/android/build_32.sh b/project/android/build_32.sh index 684ac3a4d..98e2c5cf2 100755 --- a/project/android/build_32.sh +++ b/project/android/build_32.sh @@ -8,7 +8,6 @@ cmake ../../../ \ -DANDROID_NATIVE_API_LEVEL=android-21 \ -DANDROID_TOOLCHAIN=clang \ -DMNN_BUILD_FOR_ANDROID_COMMAND=true \ --DMNN_DEBUG=false \ --DNATIVE_LIBRARY_OUTPUT=. $1 $2 $3 +-DNATIVE_LIBRARY_OUTPUT=. -DNATIVE_INCLUDE_OUTPUT=. $1 $2 $3 make -j4 diff --git a/project/android/build_32_ndk14.sh b/project/android/build_32_ndk14.sh index 8986a9119..8ed322237 100755 --- a/project/android/build_32_ndk14.sh +++ b/project/android/build_32_ndk14.sh @@ -8,7 +8,6 @@ cmake ../../../ \ -DANDROID_NATIVE_API_LEVEL=android-14 \ -DANDROID_TOOLCHAIN=gcc \ -DMNN_BUILD_FOR_ANDROID_COMMAND=true \ --DMNN_DEBUG=false \ --DNATIVE_LIBRARY_OUTPUT=. $1 $2 $3 +-DNATIVE_LIBRARY_OUTPUT=. -DNATIVE_INCLUDE_OUTPUT=. $1 $2 $3 make -j4 diff --git a/project/android/build_32_shared.sh b/project/android/build_32_shared.sh index 319e46258..137dced1e 100755 --- a/project/android/build_32_shared.sh +++ b/project/android/build_32_shared.sh @@ -8,7 +8,6 @@ cmake ../../../ \ -DANDROID_NATIVE_API_LEVEL=android-21 \ -DANDROID_TOOLCHAIN=clang \ -DMNN_BUILD_FOR_ANDROID_COMMAND=true \ --DMNN_DEBUG=false \ --DNATIVE_LIBRARY_OUTPUT=. $1 $2 $3 +-DNATIVE_LIBRARY_OUTPUT=. -DNATIVE_INCLUDE_OUTPUT=. $1 $2 $3 make -j4 diff --git a/project/android/build_32_stl_shared.sh b/project/android/build_32_stl_shared.sh index 319e46258..137dced1e 100755 --- a/project/android/build_32_stl_shared.sh +++ b/project/android/build_32_stl_shared.sh @@ -8,7 +8,6 @@ cmake ../../../ \ -DANDROID_NATIVE_API_LEVEL=android-21 \ -DANDROID_TOOLCHAIN=clang \ -DMNN_BUILD_FOR_ANDROID_COMMAND=true \ --DMNN_DEBUG=false \ --DNATIVE_LIBRARY_OUTPUT=. $1 $2 $3 +-DNATIVE_LIBRARY_OUTPUT=. -DNATIVE_INCLUDE_OUTPUT=. $1 $2 $3 make -j4 diff --git a/project/android/build_64.sh b/project/android/build_64.sh index f69803b35..655d61f29 100755 --- a/project/android/build_64.sh +++ b/project/android/build_64.sh @@ -1,13 +1,13 @@ #!/bin/bash cmake ../../../ \ -DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK/build/cmake/android.toolchain.cmake \ - -DCMAKE_BUILD_TYPE=Release \ - -DMNN_DEBUG=false \ +-DCMAKE_BUILD_TYPE=Release \ +-DMNN_OPENCL=ON \ +-DMNN_BUILD_TRAIN=ON \ -DANDROID_ABI="arm64-v8a" \ -DANDROID_STL=c++_static \ -DANDROID_NATIVE_API_LEVEL=android-21 \ --DMNN_DEBUG=false \ -DMNN_BUILD_FOR_ANDROID_COMMAND=true \ --DNATIVE_LIBRARY_OUTPUT=. $1 $2 +-DNATIVE_LIBRARY_OUTPUT=. -DNATIVE_INCLUDE_OUTPUT=. $1 $2 make -j4 diff --git a/project/android/build_gnu_32.sh b/project/android/build_gnu_32.sh index 091f9c347..52dfb1653 100755 --- a/project/android/build_gnu_32.sh +++ b/project/android/build_gnu_32.sh @@ -8,7 +8,6 @@ cmake ../../../ \ -DANDROID_NATIVE_API_LEVEL=android-21 \ -DANDROID_TOOLCHAIN=gcc \ -DMNN_BUILD_FOR_ANDROID_COMMAND=true \ --DMNN_DEBUG=false \ --DNATIVE_LIBRARY_OUTPUT=. $1 $2 $3 +-DNATIVE_LIBRARY_OUTPUT=. -DNATIVE_INCLUDE_OUTPUT=. $1 $2 $3 make -j4 diff --git a/project/android/build_vulkan.sh b/project/android/build_vulkan.sh index 7bb680a1d..38874403e 100755 --- a/project/android/build_vulkan.sh +++ b/project/android/build_vulkan.sh @@ -29,8 +29,7 @@ cmake ../../ \ -DMNN_OPENGL=OFF \ -DMNN_OPENCL=OFF \ -DMNN_VULKAN=ON \ --DMNN_DEBUG=false \ -DMNN_BUILD_FOR_ANDROID_COMMAND=true \ --DNATIVE_LIBRARY_OUTPUT=. $1 $2 +-DNATIVE_LIBRARY_OUTPUT=. -DNATIVE_INCLUDE_OUTPUT=. $1 $2 make -j4 diff --git a/project/android/updateTest.sh b/project/android/updateTest.sh index 77813cb6f..e89d6efaf 100755 --- a/project/android/updateTest.sh +++ b/project/android/updateTest.sh @@ -1,11 +1,11 @@ #!/bin/bash make -j16 adb push ./libMNN.so /data/local/tmp/MNN/libMNN.so -adb push ./source/backend/opencl/libMNN_CL.so /data/local/tmp/MNN/libMNN_CL.so -adb push ./source/backend/vulkan/libMNN_Vulkan.so /data/local/tmp/MNN/libMNN_Vulkan.so -adb push ./source/backend/opengl/libMNN_GL.so /data/local/tmp/MNN/libMNN_GL.so -adb push ./express/libMNN_Express.so /data/local/tmp/MNN/libMNN_Express.so -adb push ./source/backend/arm82/libMNN_Arm82.so /data/local/tmp/MNN/libMNN_Arm82.so +adb push ./libMNN_CL.so /data/local/tmp/MNN/libMNN_CL.so +adb push ./libMNN_Vulkan.so /data/local/tmp/MNN/libMNN_Vulkan.so +adb push ./libMNN_GL.so /data/local/tmp/MNN/libMNN_GL.so +adb push ./libMNN_Express.so /data/local/tmp/MNN/libMNN_Express.so +adb push ./libMNN_Arm82.so /data/local/tmp/MNN/libMNN_Arm82.so adb push ./MNNV2Basic.out /data/local/tmp/MNN/MNNV2Basic.out adb shell "cd /data/local/tmp/MNN && rm -r output" adb shell "cd /data/local/tmp/MNN && mkdir output" @@ -15,3 +15,4 @@ adb push ./testModelWithDescrisbe.out /data/local/tmp/MNN/testModelWithDescrisbe adb push ./backendTest.out /data/local/tmp/MNN/backendTest.out adb push ./timeProfile.out /data/local/tmp/MNN/timeProfile.out +adb push ./train.out /data/local/tmp/MNN/train.out diff --git a/project/ios/MNN.xcodeproj/project.pbxproj b/project/ios/MNN.xcodeproj/project.pbxproj index 9585184ec..7cf5e813d 100644 --- a/project/ios/MNN.xcodeproj/project.pbxproj +++ b/project/ios/MNN.xcodeproj/project.pbxproj @@ -7,477 +7,759 @@ objects = { /* Begin PBXBuildFile section */ - 0F22069B211060A000EAE225 /* MNNForwardType.h in Headers */ = {isa = PBXBuildFile; fileRef = 4805294B2105BADB00AA776E /* MNNForwardType.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 11EDD60A22E55A09007F3793 /* CPUDepthToSpace.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 11EDD60622E55A09007F3793 /* CPUDepthToSpace.cpp */; }; - 11EDD60B22E55A09007F3793 /* CPUDepthToSpace.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 11EDD60722E55A09007F3793 /* CPUDepthToSpace.hpp */; }; - 11EDD60C22E55A09007F3793 /* CPUSpaceToDepth.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 11EDD60822E55A09007F3793 /* CPUSpaceToDepth.cpp */; }; - 11EDD60D22E55A09007F3793 /* CPUSpaceToDepth.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 11EDD60922E55A09007F3793 /* CPUSpaceToDepth.hpp */; }; + 1F501EEF2397BA26004E8721 /* ImageSampler.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501EEA2397BA26004E8721 /* ImageSampler.hpp */; }; + 1F501EF02397BA26004E8721 /* ImageFloatBlitter.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501EEB2397BA26004E8721 /* ImageFloatBlitter.hpp */; }; + 1F501EF12397BA26004E8721 /* SkNx_neon.h in Headers */ = {isa = PBXBuildFile; fileRef = 1F501EEC2397BA26004E8721 /* SkNx_neon.h */; }; + 1F501EF22397BA26004E8721 /* ImageBlitter.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501EED2397BA26004E8721 /* ImageBlitter.hpp */; }; + 1F501EF32397BA26004E8721 /* SkNx.h in Headers */ = {isa = PBXBuildFile; fileRef = 1F501EEE2397BA26004E8721 /* SkNx.h */; }; + 1F501EF72397BA31004E8721 /* Matrix.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501EF42397BA31004E8721 /* Matrix.hpp */; }; + 1F501EF82397BA31004E8721 /* Vec4.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501EF52397BA31004E8721 /* Vec4.hpp */; }; + 1F501EF92397BA31004E8721 /* WingoradGenerater.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501EF62397BA31004E8721 /* WingoradGenerater.hpp */; }; + 1F501F362397BA4D004E8721 /* MetalQuantizedReshape.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501EFA2397BA49004E8721 /* MetalQuantizedReshape.hpp */; }; + 1F501F372397BA4D004E8721 /* MetalInterp.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501EFB2397BA49004E8721 /* MetalInterp.hpp */; }; + 1F501F382397BA4D004E8721 /* MetalBatchToSpaceND.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501EFC2397BA49004E8721 /* MetalBatchToSpaceND.hpp */; }; + 1F501F392397BA4D004E8721 /* MNNMetalContext.h in Headers */ = {isa = PBXBuildFile; fileRef = 1F501EFD2397BA49004E8721 /* MNNMetalContext.h */; }; + 1F501F3A2397BA4D004E8721 /* MetalSpatialProduct.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501EFE2397BA49004E8721 /* MetalSpatialProduct.hpp */; }; + 1F501F3B2397BA4D004E8721 /* MetalUnary.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501EFF2397BA49004E8721 /* MetalUnary.hpp */; }; + 1F501F3C2397BA4D004E8721 /* MetalBackend.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F002397BA49004E8721 /* MetalBackend.hpp */; }; + 1F501F3D2397BA4D004E8721 /* MetalCast.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F012397BA49004E8721 /* MetalCast.hpp */; }; + 1F501F3E2397BA4D004E8721 /* MetalBinary.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F022397BA49004E8721 /* MetalBinary.hpp */; }; + 1F501F3F2397BA4D004E8721 /* MetalLSTM.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F032397BA49004E8721 /* MetalLSTM.hpp */; }; + 1F501F402397BA4D004E8721 /* MetalROIPooling.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F042397BA49004E8721 /* MetalROIPooling.hpp */; }; + 1F501F412397BA4D004E8721 /* MetalReLU6.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F052397BA49004E8721 /* MetalReLU6.hpp */; }; + 1F501F422397BA4D004E8721 /* MetalGather.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F062397BA4A004E8721 /* MetalGather.hpp */; }; + 1F501F432397BA4D004E8721 /* MetalSliceTF.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F072397BA4A004E8721 /* MetalSliceTF.hpp */; }; + 1F501F442397BA4D004E8721 /* MetalResize.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F082397BA4A004E8721 /* MetalResize.hpp */; }; + 1F501F452397BA4D004E8721 /* MetalConvolutionDepthwise.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F092397BA4A004E8721 /* MetalConvolutionDepthwise.hpp */; }; + 1F501F462397BA4D004E8721 /* MetalDefine.h in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F0A2397BA4A004E8721 /* MetalDefine.h */; }; + 1F501F472397BA4D004E8721 /* MetalSoftmax.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F0B2397BA4A004E8721 /* MetalSoftmax.hpp */; }; + 1F501F482397BA4D004E8721 /* MetalMatMul.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F0C2397BA4A004E8721 /* MetalMatMul.hpp */; }; + 1F501F492397BA4D004E8721 /* MetalTensorConverter.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F0D2397BA4A004E8721 /* MetalTensorConverter.hpp */; }; + 1F501F4A2397BA4D004E8721 /* MetalCropAndResize.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F0E2397BA4A004E8721 /* MetalCropAndResize.hpp */; }; + 1F501F4B2397BA4D004E8721 /* MetalRank.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F0F2397BA4A004E8721 /* MetalRank.hpp */; }; + 1F501F4C2397BA4D004E8721 /* MetalTranspose.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F102397BA4A004E8721 /* MetalTranspose.hpp */; }; + 1F501F4D2397BA4D004E8721 /* MetalConvolutionCommon.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F112397BA4A004E8721 /* MetalConvolutionCommon.hpp */; }; + 1F501F4E2397BA4D004E8721 /* MetalQuantizedMaxPool.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F122397BA4A004E8721 /* MetalQuantizedMaxPool.hpp */; }; + 1F501F4F2397BA4D004E8721 /* MetalFill.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F132397BA4B004E8721 /* MetalFill.hpp */; }; + 1F501F502397BA4D004E8721 /* MetalSqueeze.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F142397BA4B004E8721 /* MetalSqueeze.hpp */; }; + 1F501F512397BA4D004E8721 /* MetalCrop.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F152397BA4B004E8721 /* MetalCrop.hpp */; }; + 1F501F522397BA4D004E8721 /* MetalScale.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F162397BA4B004E8721 /* MetalScale.hpp */; }; + 1F501F532397BA4D004E8721 /* MetalTile.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F172397BA4B004E8721 /* MetalTile.hpp */; }; + 1F501F542397BA4D004E8721 /* MetalSlice.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F182397BA4B004E8721 /* MetalSlice.hpp */; }; + 1F501F552397BA4D004E8721 /* MetalConvolution1x1.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F192397BA4B004E8721 /* MetalConvolution1x1.hpp */; }; + 1F501F562397BA4D004E8721 /* MetalPooling.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F1A2397BA4B004E8721 /* MetalPooling.hpp */; }; + 1F501F572397BA4D004E8721 /* MetalQuantizedAdd.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F1B2397BA4B004E8721 /* MetalQuantizedAdd.hpp */; }; + 1F501F582397BA4D004E8721 /* MetalTanH.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F1C2397BA4B004E8721 /* MetalTanH.hpp */; }; + 1F501F592397BA4D004E8721 /* MetalTFQuantizedConv2D.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F1D2397BA4B004E8721 /* MetalTFQuantizedConv2D.hpp */; }; + 1F501F5A2397BA4D004E8721 /* MetalConvolutionWinograd.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F1E2397BA4B004E8721 /* MetalConvolutionWinograd.hpp */; }; + 1F501F5B2397BA4D004E8721 /* MetalSpaceToBatchND.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F1F2397BA4B004E8721 /* MetalSpaceToBatchND.hpp */; }; + 1F501F5C2397BA4D004E8721 /* MetalEltwise.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F202397BA4B004E8721 /* MetalEltwise.hpp */; }; + 1F501F5D2397BA4D004E8721 /* MetalReLU.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F212397BA4B004E8721 /* MetalReLU.hpp */; }; + 1F501F5E2397BA4D004E8721 /* MetalSize.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F222397BA4B004E8721 /* MetalSize.hpp */; }; + 1F501F5F2397BA4D004E8721 /* MetalSigmoid.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F232397BA4B004E8721 /* MetalSigmoid.hpp */; }; + 1F501F602397BA4D004E8721 /* MetalSeLU.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F242397BA4B004E8721 /* MetalSeLU.hpp */; }; + 1F501F612397BA4D004E8721 /* MetalNormalize.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F252397BA4C004E8721 /* MetalNormalize.hpp */; }; + 1F501F622397BA4D004E8721 /* MetalQuantizedSoftmax.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F262397BA4C004E8721 /* MetalQuantizedSoftmax.hpp */; }; + 1F501F632397BA4D004E8721 /* MetalRange.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F272397BA4C004E8721 /* MetalRange.hpp */; }; + 1F501F642397BA4D004E8721 /* MetalDequantize.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F282397BA4C004E8721 /* MetalDequantize.hpp */; }; + 1F501F652397BA4D004E8721 /* MetalConvolutionGEMM.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F292397BA4C004E8721 /* MetalConvolutionGEMM.hpp */; }; + 1F501F662397BA4D004E8721 /* MetalGatherV2.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F2A2397BA4C004E8721 /* MetalGatherV2.hpp */; }; + 1F501F672397BA4D004E8721 /* MetalConvolution.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F2B2397BA4C004E8721 /* MetalConvolution.hpp */; }; + 1F501F682397BA4D004E8721 /* MetalConcat.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F2C2397BA4C004E8721 /* MetalConcat.hpp */; }; + 1F501F692397BA4D004E8721 /* MetalPack.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F2D2397BA4C004E8721 /* MetalPack.hpp */; }; + 1F501F6A2397BA4D004E8721 /* MetalPermute.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F2E2397BA4C004E8721 /* MetalPermute.hpp */; }; + 1F501F6B2397BA4D004E8721 /* MetalLRN.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F2F2397BA4C004E8721 /* MetalLRN.hpp */; }; + 1F501F6C2397BA4D004E8721 /* MetalPReLU.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F302397BA4C004E8721 /* MetalPReLU.hpp */; }; + 1F501F6D2397BA4D004E8721 /* MetalDeconvolution.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F312397BA4C004E8721 /* MetalDeconvolution.hpp */; }; + 1F501F6E2397BA4D004E8721 /* MetalQuantizedAvgPool.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F322397BA4C004E8721 /* MetalQuantizedAvgPool.hpp */; }; + 1F501F6F2397BA4D004E8721 /* MetalStridedSlice.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F332397BA4C004E8721 /* MetalStridedSlice.hpp */; }; + 1F501F702397BA4D004E8721 /* MetalReduction.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F342397BA4D004E8721 /* MetalReduction.hpp */; }; + 1F501F712397BA4D004E8721 /* MetalReshape.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F352397BA4D004E8721 /* MetalReshape.hpp */; }; + 1F501F7F2397BA5B004E8721 /* HalideRuntime.h in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F722397BA5A004E8721 /* HalideRuntime.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 1F501F802397BA5B004E8721 /* MNNDefine.h in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F732397BA5A004E8721 /* MNNDefine.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 1F501F812397BA5B004E8721 /* AutoTime.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F742397BA5A004E8721 /* AutoTime.hpp */; settings = {ATTRIBUTES = (Public, ); }; }; + 1F501F822397BA5B004E8721 /* Interpreter.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F752397BA5A004E8721 /* Interpreter.hpp */; settings = {ATTRIBUTES = (Public, ); }; }; + 1F501F842397BA5B004E8721 /* ImageProcess.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F772397BA5A004E8721 /* ImageProcess.hpp */; settings = {ATTRIBUTES = (Public, ); }; }; + 1F501F852397BA5B004E8721 /* ErrorCode.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F782397BA5A004E8721 /* ErrorCode.hpp */; settings = {ATTRIBUTES = (Public, ); }; }; + 1F501F862397BA5B004E8721 /* Rect.h in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F792397BA5A004E8721 /* Rect.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 1F501F872397BA5B004E8721 /* Matrix.h in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F7A2397BA5A004E8721 /* Matrix.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 1F501F882397BA5B004E8721 /* Tensor.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F7B2397BA5A004E8721 /* Tensor.hpp */; settings = {ATTRIBUTES = (Public, ); }; }; + 1F501F892397BA5B004E8721 /* MNNForwardType.h in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F7C2397BA5A004E8721 /* MNNForwardType.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 1F501F8B2397BA5B004E8721 /* MNNSharedContext.h in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F7E2397BA5B004E8721 /* MNNSharedContext.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 1F501F9D2397BB00004E8721 /* expr in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F762397BA5A004E8721 /* expr */; settings = {ATTRIBUTES = (Public, ); }; }; + 1FD952C023A89CA100888FC3 /* CPUQuantizedAvgPool.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD951E923A89C7200888FC3 /* CPUQuantizedAvgPool.cpp */; }; + 1FD952C123A89CA100888FC3 /* CPUUnravelIndex.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD951EA23A89C7200888FC3 /* CPUUnravelIndex.hpp */; }; + 1FD952C223A89CA100888FC3 /* CPUNonMaxSuppressionV2.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD951EB23A89C7200888FC3 /* CPUNonMaxSuppressionV2.hpp */; }; + 1FD952C323A89CA100888FC3 /* CPUQuantizedLogistic.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD951EC23A89C7200888FC3 /* CPUQuantizedLogistic.hpp */; }; + 1FD952C423A89CA100888FC3 /* CPUBackend.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD951ED23A89C7300888FC3 /* CPUBackend.cpp */; }; + 1FD952C523A89CA100888FC3 /* CPUProposal.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD951EE23A89C7300888FC3 /* CPUProposal.cpp */; }; + 1FD952C623A89CA100888FC3 /* CPUMoments.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD951EF23A89C7300888FC3 /* CPUMoments.hpp */; }; + 1FD952C723A89CA100888FC3 /* CPUStridedSlice.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD951F023A89C7300888FC3 /* CPUStridedSlice.hpp */; }; + 1FD952C823A89CA100888FC3 /* CPUInt8ToFloat.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD951F123A89C7300888FC3 /* CPUInt8ToFloat.cpp */; }; + 1FD952C923A89CA100888FC3 /* CPUEltwise.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD951F223A89C7300888FC3 /* CPUEltwise.cpp */; }; + 1FD952CA23A89CA100888FC3 /* CPUPool.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD951F323A89C7400888FC3 /* CPUPool.hpp */; }; + 1FD952CB23A89CA100888FC3 /* CPUReduction.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD951F423A89C7400888FC3 /* CPUReduction.hpp */; }; + 1FD952CC23A89CA100888FC3 /* CPUConcat.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD951F523A89C7400888FC3 /* CPUConcat.cpp */; }; + 1FD952CD23A89CA100888FC3 /* CPUPermute.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD951F623A89C7400888FC3 /* CPUPermute.hpp */; }; + 1FD952CE23A89CA100888FC3 /* CPUQuantizedConcat.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD951F723A89C7400888FC3 /* CPUQuantizedConcat.cpp */; }; + 1FD952CF23A89CA100888FC3 /* CPUDetectionOutput.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD951F823A89C7400888FC3 /* CPUDetectionOutput.hpp */; }; + 1FD952D023A89CA100888FC3 /* CPUQuantizedLogistic.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD951F923A89C7500888FC3 /* CPUQuantizedLogistic.cpp */; }; + 1FD952D123A89CA100888FC3 /* CPUReluGrad.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD951FA23A89C7500888FC3 /* CPUReluGrad.cpp */; }; + 1FD952D223A89CA100888FC3 /* CPUReverseSequence.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD951FB23A89C7500888FC3 /* CPUReverseSequence.hpp */; }; + 1FD952D323A89CA100888FC3 /* CPUReshape.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD951FC23A89C7500888FC3 /* CPUReshape.hpp */; }; + 1FD952D423A89CA100888FC3 /* CPUPack.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD951FD23A89C7500888FC3 /* CPUPack.hpp */; }; + 1FD952D523A89CA100888FC3 /* CPUScale.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD951FE23A89C7500888FC3 /* CPUScale.hpp */; }; + 1FD952D623A89CA100888FC3 /* CPULinSpace.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD951FF23A89C7600888FC3 /* CPULinSpace.hpp */; }; + 1FD952D723A89CA100888FC3 /* CPUSpatialProduct.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9520023A89C7600888FC3 /* CPUSpatialProduct.hpp */; }; + 1FD952D823A89CA100888FC3 /* CPUDeconvolutionDepthwise.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9520123A89C7600888FC3 /* CPUDeconvolutionDepthwise.hpp */; }; + 1FD952D923A89CA100888FC3 /* CPUArgMax.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9520223A89C7600888FC3 /* CPUArgMax.cpp */; }; + 1FD952DA23A89CA100888FC3 /* CPUFill.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9520323A89C7600888FC3 /* CPUFill.cpp */; }; + 1FD952DB23A89CA100888FC3 /* CPUQuantizedReshape.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9520423A89C7600888FC3 /* CPUQuantizedReshape.cpp */; }; + 1FD952DC23A89CA100888FC3 /* CPUQuanConvolutionDepthwise.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9520523A89C7600888FC3 /* CPUQuanConvolutionDepthwise.hpp */; }; + 1FD952DD23A89CA100888FC3 /* CPUSqueeze.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9520623A89C7700888FC3 /* CPUSqueeze.cpp */; }; + 1FD952DE23A89CA100888FC3 /* CPUInstanceNorm.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9520723A89C7700888FC3 /* CPUInstanceNorm.hpp */; }; + 1FD952DF23A89CA100888FC3 /* CPUSpaceToDepth.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9520823A89C7700888FC3 /* CPUSpaceToDepth.hpp */; }; + 1FD952E023A89CA100888FC3 /* CPUFill.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9520923A89C7700888FC3 /* CPUFill.hpp */; }; + 1FD952E123A89CA100888FC3 /* CPUPoolGrad.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9520A23A89C7700888FC3 /* CPUPoolGrad.cpp */; }; + 1FD952E223A89CA100888FC3 /* CPUInnerProduct.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9520B23A89C7700888FC3 /* CPUInnerProduct.hpp */; }; + 1FD952E323A89CA100888FC3 /* CPUMatrixBandPart.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9520C23A89C7700888FC3 /* CPUMatrixBandPart.hpp */; }; + 1FD952E423A89CA100888FC3 /* CPUBatchToSpaceND.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9520D23A89C7800888FC3 /* CPUBatchToSpaceND.cpp */; }; + 1FD952E523A89CA100888FC3 /* CPUElu.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9520E23A89C7800888FC3 /* CPUElu.hpp */; }; + 1FD952E623A89CA100888FC3 /* CPUNormalize.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9520F23A89C7800888FC3 /* CPUNormalize.cpp */; }; + 1FD952E723A89CA100888FC3 /* CPUQuantizedMaxPool.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9521023A89C7800888FC3 /* CPUQuantizedMaxPool.hpp */; }; + 1FD952E823A89CA100888FC3 /* CPUUnary.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9521123A89C7800888FC3 /* CPUUnary.cpp */; }; + 1FD952E923A89CA100888FC3 /* CPURNNSequenceGRU.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9521223A89C7800888FC3 /* CPURNNSequenceGRU.hpp */; }; + 1FD952EA23A89CA100888FC3 /* CPULinSpace.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9521323A89C7800888FC3 /* CPULinSpace.cpp */; }; + 1FD952EB23A89CA100888FC3 /* CPUUnravelIndex.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9521423A89C7900888FC3 /* CPUUnravelIndex.cpp */; }; + 1FD952EC23A89CA100888FC3 /* CPUArgMax.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9521523A89C7900888FC3 /* CPUArgMax.hpp */; }; + 1FD952ED23A89CA100888FC3 /* CPUGatherV2.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9521623A89C7900888FC3 /* CPUGatherV2.cpp */; }; + 1FD952EE23A89CA100888FC3 /* CPUQuantizedSoftmax.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9521723A89C7900888FC3 /* CPUQuantizedSoftmax.hpp */; }; + 1FD952EF23A89CA100888FC3 /* CPUDetectionOutput.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9521823A89C7900888FC3 /* CPUDetectionOutput.cpp */; }; + 1FD952F023A89CA100888FC3 /* CPUTranspose.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9521923A89C7900888FC3 /* CPUTranspose.cpp */; }; + 1FD952F123A89CA100888FC3 /* CPUMatrixBandPart.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9521A23A89C7900888FC3 /* CPUMatrixBandPart.cpp */; }; + 1FD952F223A89CA100888FC3 /* CPUCosineSimilarity.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9521B23A89C7900888FC3 /* CPUCosineSimilarity.cpp */; }; + 1FD952F323A89CA100888FC3 /* CPUDequantize.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9521C23A89C7A00888FC3 /* CPUDequantize.cpp */; }; + 1FD952F423A89CA100888FC3 /* CPUROIPooling.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9521D23A89C7A00888FC3 /* CPUROIPooling.hpp */; }; + 1FD952F523A89CA100888FC3 /* CPUDepthwiseConvInt8.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9521E23A89C7A00888FC3 /* CPUDepthwiseConvInt8.cpp */; }; + 1FD952F623A89CA100888FC3 /* CPUAsString.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9521F23A89C7A00888FC3 /* CPUAsString.hpp */; }; + 1FD952F723A89CA100888FC3 /* CPUGatherV2.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9522023A89C7A00888FC3 /* CPUGatherV2.hpp */; }; + 1FD952F823A89CA100888FC3 /* CPUConvInt8.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9522123A89C7A00888FC3 /* CPUConvInt8.cpp */; }; + 1FD952F923A89CA100888FC3 /* CPUSliceTf.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9522223A89C7A00888FC3 /* CPUSliceTf.cpp */; }; + 1FD952FA23A89CA100888FC3 /* CPUBackend.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9522323A89C7B00888FC3 /* CPUBackend.hpp */; }; + 1FD952FB23A89CA100888FC3 /* CPUMatMul.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9522423A89C7B00888FC3 /* CPUMatMul.cpp */; }; + 1FD952FC23A89CA100888FC3 /* CPUScatterNd.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9522523A89C7B00888FC3 /* CPUScatterNd.cpp */; }; + 1FD952FD23A89CA100888FC3 /* CPUConvolutionDepthwise.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9522623A89C7B00888FC3 /* CPUConvolutionDepthwise.hpp */; }; + 1FD952FE23A89CA100888FC3 /* CPUCropAndResize.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9522723A89C7B00888FC3 /* CPUCropAndResize.hpp */; }; + 1FD952FF23A89CA100888FC3 /* CPUPriorbox.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9522823A89C7B00888FC3 /* CPUPriorbox.hpp */; }; + 1FD9530023A89CA100888FC3 /* CPULRN.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9522923A89C7C00888FC3 /* CPULRN.cpp */; }; + 1FD9530123A89CA100888FC3 /* CPUPadding.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9522A23A89C7C00888FC3 /* CPUPadding.hpp */; }; + 1FD9530223A89CA100888FC3 /* CPUScatterNd.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9522B23A89C7C00888FC3 /* CPUScatterNd.hpp */; }; + 1FD9530323A89CA100888FC3 /* CPUResize.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9522C23A89C7C00888FC3 /* CPUResize.cpp */; }; + 1FD9530423A89CA100888FC3 /* CPUGather.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9522D23A89C7C00888FC3 /* CPUGather.hpp */; }; + 1FD9530523A89CA100888FC3 /* CPUNonMaxSuppressionV2.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9522E23A89C7C00888FC3 /* CPUNonMaxSuppressionV2.cpp */; }; + 1FD9530623A89CA100888FC3 /* CPUQuantizationUtils.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9522F23A89C7C00888FC3 /* CPUQuantizationUtils.hpp */; }; + 1FD9530723A89CA100888FC3 /* CPUUnpack.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9523023A89C7C00888FC3 /* CPUUnpack.cpp */; }; + 1FD9530823A89CA100888FC3 /* CPUConvolutionDepthwise.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9523123A89C7D00888FC3 /* CPUConvolutionDepthwise.cpp */; }; + 1FD9530923A89CA100888FC3 /* CPUUnary.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9523223A89C7D00888FC3 /* CPUUnary.hpp */; }; + 1FD9530A23A89CA100888FC3 /* CPUBatchMatMul.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9523323A89C7D00888FC3 /* CPUBatchMatMul.cpp */; }; + 1FD9530B23A89CA100888FC3 /* CPUCosineSimilarity.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9523423A89C7D00888FC3 /* CPUCosineSimilarity.hpp */; }; + 1FD9530C23A89CA100888FC3 /* CPUBinary.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9523523A89C7D00888FC3 /* CPUBinary.cpp */; }; + 1FD9530D23A89CA100888FC3 /* CPUConst.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9523623A89C7D00888FC3 /* CPUConst.cpp */; }; + 1FD9530E23A89CA100888FC3 /* CPUDilation2D.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9523723A89C7D00888FC3 /* CPUDilation2D.cpp */; }; + 1FD9530F23A89CA100888FC3 /* CPUConvolution.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9523823A89C7D00888FC3 /* CPUConvolution.cpp */; }; + 1FD9531023A89CA100888FC3 /* CPURank.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9523923A89C7E00888FC3 /* CPURank.hpp */; }; + 1FD9531123A89CA100888FC3 /* CPUPoolInt8.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9523A23A89C7E00888FC3 /* CPUPoolInt8.cpp */; }; + 1FD9531223A89CA100888FC3 /* CPUQuantizedMaxPool.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9523B23A89C7E00888FC3 /* CPUQuantizedMaxPool.cpp */; }; + 1FD9531323A89CA100888FC3 /* CPUShape.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9523C23A89C7E00888FC3 /* CPUShape.hpp */; }; + 1FD9531423A89CA100888FC3 /* CPUPriorbox.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9523D23A89C7E00888FC3 /* CPUPriorbox.cpp */; }; + 1FD9531523A89CA100888FC3 /* CPUTopKV2.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9523E23A89C7E00888FC3 /* CPUTopKV2.cpp */; }; + 1FD9531623A89CA100888FC3 /* CPUTile.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9523F23A89C7F00888FC3 /* CPUTile.cpp */; }; + 1FD9531723A89CA100888FC3 /* CPUPoolInt8.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9524023A89C7F00888FC3 /* CPUPoolInt8.hpp */; }; + 1FD9531823A89CA100888FC3 /* CPUTensorConvert.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9524123A89C7F00888FC3 /* CPUTensorConvert.hpp */; }; + 1FD9531923A89CA100888FC3 /* CPUThreshold.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9524223A89C7F00888FC3 /* CPUThreshold.hpp */; }; + 1FD9531A23A89CA100888FC3 /* CPUReduction.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9524323A89C7F00888FC3 /* CPUReduction.cpp */; }; + 1FD9531B23A89CA100888FC3 /* CPUNormalize.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9524423A89C7F00888FC3 /* CPUNormalize.hpp */; }; + 1FD9531C23A89CA100888FC3 /* CPUTensorConvert.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9524523A89C7F00888FC3 /* CPUTensorConvert.cpp */; }; + 1FD9531D23A89CA100888FC3 /* ThreadPool.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9524623A89C8000888FC3 /* ThreadPool.hpp */; }; + 1FD9531E23A89CA100888FC3 /* CPUSize.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9524723A89C8000888FC3 /* CPUSize.hpp */; }; + 1FD9531F23A89CA100888FC3 /* CPUFixedPoint.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9524823A89C8000888FC3 /* CPUFixedPoint.hpp */; }; + 1FD9532023A89CA100888FC3 /* CPUPadding.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9524923A89C8000888FC3 /* CPUPadding.cpp */; }; + 1FD9532123A89CA100888FC3 /* CPUReshape.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9524A23A89C8000888FC3 /* CPUReshape.cpp */; }; + 1FD9532223A89CA100888FC3 /* CPURelu.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9524B23A89C8000888FC3 /* CPURelu.hpp */; }; + 1FD9532323A89CA100888FC3 /* CPUDetectionPostProcess.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9524C23A89C8000888FC3 /* CPUDetectionPostProcess.hpp */; }; + 1FD9532423A89CA100888FC3 /* CPUSlice.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9524D23A89C8000888FC3 /* CPUSlice.hpp */; }; + 1FD9532523A89CA100888FC3 /* CPUReduceJoin.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9524E23A89C8100888FC3 /* CPUReduceJoin.cpp */; }; + 1FD9532623A89CA100888FC3 /* CPUBroadcastTo.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9524F23A89C8100888FC3 /* CPUBroadcastTo.hpp */; }; + 1FD9532723A89CA100888FC3 /* CPUInterp.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9525023A89C8100888FC3 /* CPUInterp.hpp */; }; + 1FD9532823A89CA100888FC3 /* CPUSize.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9525123A89C8100888FC3 /* CPUSize.cpp */; }; + 1FD9532923A89CA100888FC3 /* CPUOPRegister.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9525223A89C8100888FC3 /* CPUOPRegister.cpp */; }; + 1FD9532A23A89CA100888FC3 /* CPUTanh.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9525323A89C8200888FC3 /* CPUTanh.hpp */; }; + 1FD9532B23A89CA100888FC3 /* ThreadPool.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9525423A89C8200888FC3 /* ThreadPool.cpp */; }; + 1FD9532C23A89CA100888FC3 /* CPUSoftmax.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9525523A89C8200888FC3 /* CPUSoftmax.cpp */; }; + 1FD9532D23A89CA100888FC3 /* CPURange.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9525623A89C8300888FC3 /* CPURange.cpp */; }; + 1FD9532E23A89CA100888FC3 /* CPUQuantizedReshape.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9525723A89C8300888FC3 /* CPUQuantizedReshape.hpp */; }; + 1FD9532F23A89CA100888FC3 /* CPUOneHot.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9525823A89C8300888FC3 /* CPUOneHot.hpp */; }; + 1FD9533023A89CA100888FC3 /* CPUExpandDims.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9525923A89C8300888FC3 /* CPUExpandDims.hpp */; }; + 1FD9533123A89CA100888FC3 /* CPUProposal.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9525A23A89C8300888FC3 /* CPUProposal.hpp */; }; + 1FD9533223A89CA100888FC3 /* CPUDeconvolutionDepthwise.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9525B23A89C8300888FC3 /* CPUDeconvolutionDepthwise.cpp */; }; + 1FD9533323A89CA100888FC3 /* CPUGatherND.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9525C23A89C8400888FC3 /* CPUGatherND.hpp */; }; + 1FD9533423A89CA100888FC3 /* CPUEltwiseInt8.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9525D23A89C8400888FC3 /* CPUEltwiseInt8.cpp */; }; + 1FD9533523A89CA100888FC3 /* CPUSigmoid.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9525E23A89C8400888FC3 /* CPUSigmoid.cpp */; }; + 1FD9533623A89CA100888FC3 /* CPURank.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9525F23A89C8400888FC3 /* CPURank.cpp */; }; + 1FD9533723A89CA100888FC3 /* CPUROIPooling.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9526023A89C8500888FC3 /* CPUROIPooling.cpp */; }; + 1FD9533823A89CA100888FC3 /* CPUQuantizedSoftmax.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9526123A89C8500888FC3 /* CPUQuantizedSoftmax.cpp */; }; + 1FD9533923A89CA100888FC3 /* CPURelu.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9526223A89C8500888FC3 /* CPURelu.cpp */; }; + 1FD9533A23A89CA100888FC3 /* CPUQuantizedAdd.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9526323A89C8500888FC3 /* CPUQuantizedAdd.hpp */; }; + 1FD9533B23A89CA100888FC3 /* CPUCrop.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9526423A89C8500888FC3 /* CPUCrop.hpp */; }; + 1FD9533C23A89CA100888FC3 /* CPUSliceTf.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9526523A89C8500888FC3 /* CPUSliceTf.hpp */; }; + 1FD9533D23A89CA100888FC3 /* CPUInnerProduct.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9526623A89C8500888FC3 /* CPUInnerProduct.cpp */; }; + 1FD9533E23A89CA100888FC3 /* CPUUnpack.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9526723A89C8600888FC3 /* CPUUnpack.hpp */; }; + 1FD9533F23A89CA100888FC3 /* CPUConv2DBackPropFilter.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9526823A89C8600888FC3 /* CPUConv2DBackPropFilter.cpp */; }; + 1FD9534023A89CA100888FC3 /* CPUSlice.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9526923A89C8600888FC3 /* CPUSlice.cpp */; }; + 1FD9534123A89CA100888FC3 /* CPUSelu.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9526A23A89C9800888FC3 /* CPUSelu.cpp */; }; + 1FD9534223A89CA100888FC3 /* CPUSetDiff1D.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9526B23A89C9800888FC3 /* CPUSetDiff1D.hpp */; }; + 1FD9534323A89CA100888FC3 /* CPURuntime.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9526C23A89C9800888FC3 /* CPURuntime.cpp */; }; + 1FD9534423A89CA100888FC3 /* CPUFloatToInt8.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9526D23A89C9800888FC3 /* CPUFloatToInt8.hpp */; }; + 1FD9534523A89CA100888FC3 /* CPUDilation2D.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9526E23A89C9800888FC3 /* CPUDilation2D.hpp */; }; + 1FD9534623A89CA100888FC3 /* CPUZeroLike.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9526F23A89C9800888FC3 /* CPUZeroLike.hpp */; }; + 1FD9534723A89CA100888FC3 /* CPUCropAndResize.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9527023A89C9800888FC3 /* CPUCropAndResize.cpp */; }; + 1FD9534823A89CA100888FC3 /* CPUMoments.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9527123A89C9800888FC3 /* CPUMoments.cpp */; }; + 1FD9534923A89CA100888FC3 /* CPUScale.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9527223A89C9800888FC3 /* CPUScale.cpp */; }; + 1FD9534A23A89CA100888FC3 /* CPUCast.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9527323A89C9900888FC3 /* CPUCast.cpp */; }; + 1FD9534B23A89CA200888FC3 /* CPUSpatialProduct.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9527423A89C9900888FC3 /* CPUSpatialProduct.cpp */; }; + 1FD9534C23A89CA200888FC3 /* CPUReluGrad.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9527523A89C9900888FC3 /* CPUReluGrad.hpp */; }; + 1FD9534D23A89CA200888FC3 /* CPUResize.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9527623A89C9900888FC3 /* CPUResize.hpp */; }; + 1FD9534E23A89CA200888FC3 /* CPUQuanConvolutionDepthwise.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9527723A89C9900888FC3 /* CPUQuanConvolutionDepthwise.cpp */; }; + 1FD9534F23A89CA200888FC3 /* CPUBatchToSpaceND.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9527823A89C9900888FC3 /* CPUBatchToSpaceND.hpp */; }; + 1FD9535023A89CA200888FC3 /* CPUEltwise.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9527923A89C9900888FC3 /* CPUEltwise.hpp */; }; + 1FD9535123A89CA200888FC3 /* CPUConvolution.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9527A23A89C9900888FC3 /* CPUConvolution.hpp */; }; + 1FD9535223A89CA200888FC3 /* CPUConvInt8.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9527B23A89C9A00888FC3 /* CPUConvInt8.hpp */; }; + 1FD9535323A89CA200888FC3 /* CPUSoftmax.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9527C23A89C9A00888FC3 /* CPUSoftmax.hpp */; }; + 1FD9535423A89CA200888FC3 /* CPUDepthToSpace.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9527D23A89C9A00888FC3 /* CPUDepthToSpace.cpp */; }; + 1FD9535523A89CA200888FC3 /* CPURuntime.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9527E23A89C9A00888FC3 /* CPURuntime.hpp */; }; + 1FD9535623A89CA200888FC3 /* CPUExpandDims.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9527F23A89C9A00888FC3 /* CPUExpandDims.cpp */; }; + 1FD9535723A89CA200888FC3 /* CPUDetectionPostProcess.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9528023A89C9A00888FC3 /* CPUDetectionPostProcess.cpp */; }; + 1FD9535823A89CA200888FC3 /* CPUDepthToSpace.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9528123A89C9A00888FC3 /* CPUDepthToSpace.hpp */; }; + 1FD9535923A89CA200888FC3 /* CPUMatMul.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9528223A89C9A00888FC3 /* CPUMatMul.hpp */; }; + 1FD9535A23A89CA200888FC3 /* CPUSelect.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9528323A89C9A00888FC3 /* CPUSelect.cpp */; }; + 1FD9535B23A89CA200888FC3 /* CPUSelu.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9528423A89C9A00888FC3 /* CPUSelu.hpp */; }; + 1FD9535C23A89CA200888FC3 /* CPUSoftmaxGrad.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9528523A89C9B00888FC3 /* CPUSoftmaxGrad.cpp */; }; + 1FD9535D23A89CA200888FC3 /* CPUTFQuantizedConv2D.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9528623A89C9B00888FC3 /* CPUTFQuantizedConv2D.hpp */; }; + 1FD9535E23A89CA200888FC3 /* CPUTranspose.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9528723A89C9B00888FC3 /* CPUTranspose.hpp */; }; + 1FD9535F23A89CA200888FC3 /* CPUTopKV2.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9528823A89C9B00888FC3 /* CPUTopKV2.hpp */; }; + 1FD9536023A89CA200888FC3 /* CPUGather.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9528923A89C9B00888FC3 /* CPUGather.cpp */; }; + 1FD9536123A89CA200888FC3 /* CPUCast.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9528A23A89C9B00888FC3 /* CPUCast.hpp */; }; + 1FD9536223A89CA200888FC3 /* CPUSelect.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9528B23A89C9B00888FC3 /* CPUSelect.hpp */; }; + 1FD9536323A89CA200888FC3 /* CPUBinary.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9528C23A89C9B00888FC3 /* CPUBinary.hpp */; }; + 1FD9536423A89CA200888FC3 /* CPUCrop.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9528D23A89C9B00888FC3 /* CPUCrop.cpp */; }; + 1FD9536523A89CA200888FC3 /* CPUFloatToInt8.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9528E23A89C9B00888FC3 /* CPUFloatToInt8.cpp */; }; + 1FD9536623A89CA200888FC3 /* CPUConcat.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9528F23A89C9C00888FC3 /* CPUConcat.hpp */; }; + 1FD9536723A89CA200888FC3 /* CPUSetDiff1D.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9529023A89C9C00888FC3 /* CPUSetDiff1D.cpp */; }; + 1FD9536823A89CA200888FC3 /* CPUSpaceToBatchND.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9529123A89C9C00888FC3 /* CPUSpaceToBatchND.cpp */; }; + 1FD9536923A89CA200888FC3 /* CPUAsString.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9529223A89C9C00888FC3 /* CPUAsString.cpp */; }; + 1FD9536A23A89CA200888FC3 /* CPUReduceJoin.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9529323A89C9C00888FC3 /* CPUReduceJoin.hpp */; }; + 1FD9536B23A89CA200888FC3 /* CPUConst.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9529423A89C9C00888FC3 /* CPUConst.hpp */; }; + 1FD9536C23A89CA200888FC3 /* CPUSpaceToDepth.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9529523A89C9C00888FC3 /* CPUSpaceToDepth.cpp */; }; + 1FD9536D23A89CA200888FC3 /* CPUQuantizedAvgPool.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9529623A89C9C00888FC3 /* CPUQuantizedAvgPool.hpp */; }; + 1FD9536E23A89CA200888FC3 /* CPUThreshold.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9529723A89C9C00888FC3 /* CPUThreshold.cpp */; }; + 1FD9536F23A89CA200888FC3 /* CPUPoolGrad.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9529823A89C9C00888FC3 /* CPUPoolGrad.hpp */; }; + 1FD9537023A89CA200888FC3 /* CPULSTM.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9529923A89C9D00888FC3 /* CPULSTM.hpp */; }; + 1FD9537123A89CA200888FC3 /* CPUSigmoid.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9529A23A89C9D00888FC3 /* CPUSigmoid.hpp */; }; + 1FD9537223A89CA200888FC3 /* CPUZeroLike.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9529B23A89C9D00888FC3 /* CPUZeroLike.cpp */; }; + 1FD9537323A89CA200888FC3 /* CPUOneHot.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9529C23A89C9D00888FC3 /* CPUOneHot.cpp */; }; + 1FD9537423A89CA200888FC3 /* CPUWhere.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9529D23A89C9D00888FC3 /* CPUWhere.cpp */; }; + 1FD9537523A89CA200888FC3 /* CPUSqueeze.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9529E23A89C9D00888FC3 /* CPUSqueeze.hpp */; }; + 1FD9537623A89CA200888FC3 /* CPUShape.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9529F23A89C9D00888FC3 /* CPUShape.cpp */; }; + 1FD9537723A89CA200888FC3 /* CPUBatchMatMul.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD952A023A89C9D00888FC3 /* CPUBatchMatMul.hpp */; }; + 1FD9537823A89CA200888FC3 /* CPUQuantizedConcat.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD952A123A89C9E00888FC3 /* CPUQuantizedConcat.hpp */; }; + 1FD9537923A89CA200888FC3 /* CPUDeconvolution.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD952A223A89C9E00888FC3 /* CPUDeconvolution.hpp */; }; + 1FD9537A23A89CA200888FC3 /* CPURNNSequenceGRU.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD952A323A89C9E00888FC3 /* CPURNNSequenceGRU.cpp */; }; + 1FD9537B23A89CA200888FC3 /* CPUPack.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD952A423A89C9E00888FC3 /* CPUPack.cpp */; }; + 1FD9537C23A89CA200888FC3 /* CPUInterp.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD952A523A89C9E00888FC3 /* CPUInterp.cpp */; }; + 1FD9537D23A89CA200888FC3 /* CPUSpaceToBatchND.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD952A623A89C9E00888FC3 /* CPUSpaceToBatchND.hpp */; }; + 1FD9537E23A89CA200888FC3 /* CPUTFQuantizedConv2D.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD952A723A89C9E00888FC3 /* CPUTFQuantizedConv2D.cpp */; }; + 1FD9537F23A89CA200888FC3 /* CPUQuantizedAdd.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD952A823A89C9E00888FC3 /* CPUQuantizedAdd.cpp */; }; + 1FD9538023A89CA200888FC3 /* CPURange.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD952A923A89C9E00888FC3 /* CPURange.hpp */; }; + 1FD9538123A89CA200888FC3 /* CPUDepthwiseConvInt8.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD952AA23A89C9F00888FC3 /* CPUDepthwiseConvInt8.hpp */; }; + 1FD9538223A89CA200888FC3 /* CPUInt8ToFloat.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD952AB23A89C9F00888FC3 /* CPUInt8ToFloat.hpp */; }; + 1FD9538323A89CA200888FC3 /* CPUBroadcastTo.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD952AC23A89C9F00888FC3 /* CPUBroadcastTo.cpp */; }; + 1FD9538423A89CA200888FC3 /* CPUConvolution3D.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD952AD23A89C9F00888FC3 /* CPUConvolution3D.cpp */; }; + 1FD9538523A89CA200888FC3 /* CPUConvolution3D.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD952AE23A89C9F00888FC3 /* CPUConvolution3D.hpp */; }; + 1FD9538623A89CA200888FC3 /* CPULRN.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD952AF23A89C9F00888FC3 /* CPULRN.hpp */; }; + 1FD9538723A89CA200888FC3 /* CPUDeconvolution.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD952B023A89C9F00888FC3 /* CPUDeconvolution.cpp */; }; + 1FD9538823A89CA200888FC3 /* CPUStridedSlice.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD952B123A89C9F00888FC3 /* CPUStridedSlice.cpp */; }; + 1FD9538923A89CA200888FC3 /* CPUEltwiseInt8.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD952B223A89C9F00888FC3 /* CPUEltwiseInt8.hpp */; }; + 1FD9538A23A89CA200888FC3 /* CPUConv2DBackPropFilter.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD952B323A89C9F00888FC3 /* CPUConv2DBackPropFilter.hpp */; }; + 1FD9538B23A89CA200888FC3 /* CPUSoftmaxGrad.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD952B423A89CA000888FC3 /* CPUSoftmaxGrad.hpp */; }; + 1FD9538C23A89CA200888FC3 /* CPUPermute.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD952B523A89CA000888FC3 /* CPUPermute.cpp */; }; + 1FD9538D23A89CA200888FC3 /* CPUInstanceNorm.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD952B623A89CA000888FC3 /* CPUInstanceNorm.cpp */; }; + 1FD9538E23A89CA200888FC3 /* CPUElu.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD952B723A89CA000888FC3 /* CPUElu.cpp */; }; + 1FD9538F23A89CA200888FC3 /* CPUPool.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD952B823A89CA000888FC3 /* CPUPool.cpp */; }; + 1FD9539023A89CA200888FC3 /* CPUTanh.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD952B923A89CA000888FC3 /* CPUTanh.cpp */; }; + 1FD9539123A89CA200888FC3 /* CPUWhere.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD952BA23A89CA000888FC3 /* CPUWhere.hpp */; }; + 1FD9539223A89CA200888FC3 /* CPUGatherND.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD952BB23A89CA000888FC3 /* CPUGatherND.cpp */; }; + 1FD9539323A89CA200888FC3 /* CPUDequantize.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD952BC23A89CA000888FC3 /* CPUDequantize.hpp */; }; + 1FD9539423A89CA200888FC3 /* CPULSTM.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD952BD23A89CA000888FC3 /* CPULSTM.cpp */; }; + 1FD9539523A89CA200888FC3 /* CPUReverseSequence.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD952BE23A89CA100888FC3 /* CPUReverseSequence.cpp */; }; + 1FD9539623A89CA200888FC3 /* CPUTile.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD952BF23A89CA100888FC3 /* CPUTile.hpp */; }; + 1FD953B823A89CD000888FC3 /* Backend.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9539723A89CCE00888FC3 /* Backend.cpp */; }; + 1FD953B923A89CD000888FC3 /* FileLoader.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9539823A89CCE00888FC3 /* FileLoader.cpp */; }; + 1FD953BA23A89CD000888FC3 /* Concurrency.h in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9539923A89CCE00888FC3 /* Concurrency.h */; }; + 1FD953BB23A89CD000888FC3 /* BufferAllocator.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9539A23A89CCE00888FC3 /* BufferAllocator.hpp */; }; + 1FD953BC23A89CD000888FC3 /* Interpreter.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9539B23A89CCE00888FC3 /* Interpreter.cpp */; }; + 1FD953BD23A89CD000888FC3 /* Execution.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9539C23A89CCE00888FC3 /* Execution.hpp */; }; + 1FD953BE23A89CD000888FC3 /* Session.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9539D23A89CCE00888FC3 /* Session.hpp */; }; + 1FD953BF23A89CD000888FC3 /* TensorUtils.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9539E23A89CCE00888FC3 /* TensorUtils.hpp */; }; + 1FD953C023A89CD100888FC3 /* AutoTime.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9539F23A89CCE00888FC3 /* AutoTime.cpp */; }; + 1FD953C123A89CD100888FC3 /* BufferAllocator.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953A023A89CCE00888FC3 /* BufferAllocator.cpp */; }; + 1FD953C223A89CD100888FC3 /* AutoStorage.h in Headers */ = {isa = PBXBuildFile; fileRef = 1FD953A123A89CCE00888FC3 /* AutoStorage.h */; }; + 1FD953C323A89CD100888FC3 /* Pipeline.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953A223A89CCF00888FC3 /* Pipeline.cpp */; }; + 1FD953C423A89CD100888FC3 /* BackendFactory.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953A323A89CCF00888FC3 /* BackendFactory.cpp */; }; + 1FD953C523A89CD100888FC3 /* SizeComputer.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD953A423A89CCF00888FC3 /* SizeComputer.hpp */; }; + 1FD953C623A89CD100888FC3 /* Schedule.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953A523A89CCF00888FC3 /* Schedule.cpp */; }; + 1FD953C723A89CD100888FC3 /* MNNMemoryUtils.h in Headers */ = {isa = PBXBuildFile; fileRef = 1FD953A623A89CCF00888FC3 /* MNNMemoryUtils.h */; }; + 1FD953C823A89CD100888FC3 /* DirectedAcyclicGraph.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD953A723A89CCF00888FC3 /* DirectedAcyclicGraph.hpp */; }; + 1FD953C923A89CD100888FC3 /* Pipeline.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD953A823A89CCF00888FC3 /* Pipeline.hpp */; }; + 1FD953CA23A89CD100888FC3 /* TensorUtils.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953A923A89CCF00888FC3 /* TensorUtils.cpp */; }; + 1FD953CB23A89CD100888FC3 /* NonCopyable.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD953AA23A89CCF00888FC3 /* NonCopyable.hpp */; }; + 1FD953CC23A89CD100888FC3 /* MNNMemoryUtils.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953AB23A89CCF00888FC3 /* MNNMemoryUtils.cpp */; }; + 1FD953CD23A89CD100888FC3 /* FileLoader.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD953AC23A89CCF00888FC3 /* FileLoader.hpp */; }; + 1FD953CE23A89CD100888FC3 /* WrapExecution.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD953AD23A89CCF00888FC3 /* WrapExecution.hpp */; }; + 1FD953CF23A89CD100888FC3 /* Schedule.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD953AE23A89CD000888FC3 /* Schedule.hpp */; }; + 1FD953D023A89CD100888FC3 /* Tensor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953AF23A89CD000888FC3 /* Tensor.cpp */; }; + 1FD953D123A89CD100888FC3 /* SizeComputer.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953B023A89CD000888FC3 /* SizeComputer.cpp */; }; + 1FD953D223A89CD100888FC3 /* Session.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953B123A89CD000888FC3 /* Session.cpp */; }; + 1FD953D323A89CD100888FC3 /* Execution.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953B223A89CD000888FC3 /* Execution.cpp */; }; + 1FD953D423A89CD100888FC3 /* BackendFactory.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD953B323A89CD000888FC3 /* BackendFactory.hpp */; }; + 1FD953D523A89CD100888FC3 /* BackendRegister.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953B423A89CD000888FC3 /* BackendRegister.cpp */; }; + 1FD953D623A89CD100888FC3 /* WrapExecution.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953B523A89CD000888FC3 /* WrapExecution.cpp */; }; + 1FD953D723A89CD100888FC3 /* Macro.h in Headers */ = {isa = PBXBuildFile; fileRef = 1FD953B623A89CD000888FC3 /* Macro.h */; }; + 1FD953D823A89CD100888FC3 /* Backend.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD953B723A89CD000888FC3 /* Backend.hpp */; }; + 1FD9541F23A89CF300888FC3 /* ShapeInterp.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953D923A89CED00888FC3 /* ShapeInterp.cpp */; }; + 1FD9542023A89CF300888FC3 /* ShapeDetectionPostProcess.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953DA23A89CED00888FC3 /* ShapeDetectionPostProcess.cpp */; }; + 1FD9542123A89CF300888FC3 /* ShapePadding.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953DB23A89CEE00888FC3 /* ShapePadding.cpp */; }; + 1FD9542223A89CF300888FC3 /* ShapeScatterNd.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953DC23A89CEE00888FC3 /* ShapeScatterNd.cpp */; }; + 1FD9542323A89CF300888FC3 /* ShapeLinSpace.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953DD23A89CEE00888FC3 /* ShapeLinSpace.cpp */; }; + 1FD9542423A89CF300888FC3 /* ShapeSize.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953DE23A89CEE00888FC3 /* ShapeSize.cpp */; }; + 1FD9542523A89CF300888FC3 /* ShapeTile.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953DF23A89CEE00888FC3 /* ShapeTile.cpp */; }; + 1FD9542623A89CF300888FC3 /* ShapeAsString.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953E023A89CEE00888FC3 /* ShapeAsString.cpp */; }; + 1FD9542723A89CF300888FC3 /* ShapeRank.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953E123A89CEE00888FC3 /* ShapeRank.cpp */; }; + 1FD9542823A89CF300888FC3 /* ShapeMoments.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953E223A89CEE00888FC3 /* ShapeMoments.cpp */; }; + 1FD9542923A89CF300888FC3 /* ShapePack.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953E323A89CEE00888FC3 /* ShapePack.cpp */; }; + 1FD9542A23A89CF300888FC3 /* ShapeProposal.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953E423A89CEE00888FC3 /* ShapeProposal.cpp */; }; + 1FD9542B23A89CF300888FC3 /* ShapeBinaryOp.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953E523A89CEE00888FC3 /* ShapeBinaryOp.cpp */; }; + 1FD9542C23A89CF300888FC3 /* ShapePermute.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953E623A89CEE00888FC3 /* ShapePermute.cpp */; }; + 1FD9542D23A89CF300888FC3 /* ShapeBroadcastTo.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953E723A89CEE00888FC3 /* ShapeBroadcastTo.cpp */; }; + 1FD9542E23A89CF300888FC3 /* ShapeUnravelIndex.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953E823A89CEE00888FC3 /* ShapeUnravelIndex.cpp */; }; + 1FD9542F23A89CF300888FC3 /* ShapePool.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953E923A89CEF00888FC3 /* ShapePool.cpp */; }; + 1FD9543023A89CF300888FC3 /* ShapeNonMaxSuppressionV2.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953EA23A89CEF00888FC3 /* ShapeNonMaxSuppressionV2.cpp */; }; + 1FD9543123A89CF300888FC3 /* ShapeCropAndResize.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953EB23A89CEF00888FC3 /* ShapeCropAndResize.cpp */; }; + 1FD9543223A89CF300888FC3 /* ShapeTFQuantizedConv2D.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953EC23A89CEF00888FC3 /* ShapeTFQuantizedConv2D.cpp */; }; + 1FD9543323A89CF300888FC3 /* ShapeShape.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953ED23A89CEF00888FC3 /* ShapeShape.cpp */; }; + 1FD9543423A89CF300888FC3 /* ShapeDepthToSpace.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953EE23A89CEF00888FC3 /* ShapeDepthToSpace.cpp */; }; + 1FD9543523A89CF300888FC3 /* ShapeTopKV2.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953EF23A89CEF00888FC3 /* ShapeTopKV2.cpp */; }; + 1FD9543623A89CF300888FC3 /* ShapeRange.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953F023A89CEF00888FC3 /* ShapeRange.cpp */; }; + 1FD9543723A89CF300888FC3 /* ShapeQuantizedReshape.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953F123A89CEF00888FC3 /* ShapeQuantizedReshape.cpp */; }; + 1FD9543823A89CF300888FC3 /* ShapeLSTM.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953F223A89CEF00888FC3 /* ShapeLSTM.cpp */; }; + 1FD9543923A89CF300888FC3 /* ShapeOneHot.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953F323A89CEF00888FC3 /* ShapeOneHot.cpp */; }; + 1FD9543A23A89CF300888FC3 /* ShapeEltwise.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953F423A89CEF00888FC3 /* ShapeEltwise.cpp */; }; + 1FD9543B23A89CF300888FC3 /* ShapeInnerProduct.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953F523A89CEF00888FC3 /* ShapeInnerProduct.cpp */; }; + 1FD9543C23A89CF300888FC3 /* ShapeSliceTf.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953F623A89CF000888FC3 /* ShapeSliceTf.cpp */; }; + 1FD9543D23A89CF300888FC3 /* ShapeUnpack.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953F723A89CF000888FC3 /* ShapeUnpack.cpp */; }; + 1FD9543E23A89CF300888FC3 /* ShapeSlice.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953F823A89CF000888FC3 /* ShapeSlice.cpp */; }; + 1FD9543F23A89CF300888FC3 /* ShapeConcat.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953F923A89CF000888FC3 /* ShapeConcat.cpp */; }; + 1FD9544023A89CF300888FC3 /* ShapeBatchMatMul.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953FA23A89CF000888FC3 /* ShapeBatchMatMul.cpp */; }; + 1FD9544123A89CF300888FC3 /* ShapeReduceJoin.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953FB23A89CF000888FC3 /* ShapeReduceJoin.cpp */; }; + 1FD9544223A89CF300888FC3 /* ShapeArgMax.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953FC23A89CF000888FC3 /* ShapeArgMax.cpp */; }; + 1FD9544323A89CF300888FC3 /* ShapeConvolution.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953FD23A89CF000888FC3 /* ShapeConvolution.cpp */; }; + 1FD9544423A89CF300888FC3 /* ShapeRegister.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953FE23A89CF000888FC3 /* ShapeRegister.cpp */; }; + 1FD9544523A89CF300888FC3 /* ShapeWhere.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD953FF23A89CF000888FC3 /* ShapeWhere.cpp */; }; + 1FD9544623A89CF300888FC3 /* ShapeDetectionOutput.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9540023A89CF100888FC3 /* ShapeDetectionOutput.cpp */; }; + 1FD9544723A89CF300888FC3 /* ShapePool3D.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9540123A89CF100888FC3 /* ShapePool3D.cpp */; }; + 1FD9544823A89CF300888FC3 /* ShapeFill.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9540223A89CF100888FC3 /* ShapeFill.cpp */; }; + 1FD9544923A89CF300888FC3 /* ShapeDequantize.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9540323A89CF100888FC3 /* ShapeDequantize.cpp */; }; + 1FD9544A23A89CF300888FC3 /* ShapeConst.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9540423A89CF100888FC3 /* ShapeConst.cpp */; }; + 1FD9544B23A89CF300888FC3 /* ShapeSpaceToDepth.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9540523A89CF100888FC3 /* ShapeSpaceToDepth.cpp */; }; + 1FD9544C23A89CF300888FC3 /* ShapeTranspose.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9540623A89CF100888FC3 /* ShapeTranspose.cpp */; }; + 1FD9544D23A89CF300888FC3 /* ShapeGatherND.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9540723A89CF100888FC3 /* ShapeGatherND.cpp */; }; + 1FD9544E23A89CF300888FC3 /* ShapeReshape.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9540823A89CF100888FC3 /* ShapeReshape.cpp */; }; + 1FD9544F23A89CF300888FC3 /* ShapeGatherV2.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9540923A89CF100888FC3 /* ShapeGatherV2.cpp */; }; + 1FD9545023A89CF300888FC3 /* ShapeRNNSequenceGRU.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9540A23A89CF100888FC3 /* ShapeRNNSequenceGRU.cpp */; }; + 1FD9545123A89CF300888FC3 /* ShapeROIPooling.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9540B23A89CF100888FC3 /* ShapeROIPooling.cpp */; }; + 1FD9545223A89CF300888FC3 /* ShapeExpandDims.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9540C23A89CF100888FC3 /* ShapeExpandDims.cpp */; }; + 1FD9545323A89CF300888FC3 /* ShapeReduction.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9540D23A89CF200888FC3 /* ShapeReduction.cpp */; }; + 1FD9545423A89CF300888FC3 /* ShapeGather.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9540E23A89CF200888FC3 /* ShapeGather.cpp */; }; + 1FD9545523A89CF300888FC3 /* ShapeConvolution3D.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9540F23A89CF200888FC3 /* ShapeConvolution3D.cpp */; }; + 1FD9545623A89CF300888FC3 /* ShapeCrop.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9541023A89CF200888FC3 /* ShapeCrop.cpp */; }; + 1FD9545723A89CF300888FC3 /* ShapeDeconvolution.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9541123A89CF200888FC3 /* ShapeDeconvolution.cpp */; }; + 1FD9545823A89CF300888FC3 /* ShapeQuantizedMaxPool.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9541223A89CF200888FC3 /* ShapeQuantizedMaxPool.cpp */; }; + 1FD9545923A89CF300888FC3 /* ShapeResize.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9541323A89CF200888FC3 /* ShapeResize.cpp */; }; + 1FD9545A23A89CF300888FC3 /* ShapeTensorConvert.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9541423A89CF200888FC3 /* ShapeTensorConvert.cpp */; }; + 1FD9545B23A89CF300888FC3 /* ShapeSelect.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9541523A89CF200888FC3 /* ShapeSelect.cpp */; }; + 1FD9545C23A89CF300888FC3 /* ShapeQuantizedAvgPool.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9541623A89CF200888FC3 /* ShapeQuantizedAvgPool.cpp */; }; + 1FD9545D23A89CF300888FC3 /* ShapeSqueeze.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9541723A89CF200888FC3 /* ShapeSqueeze.cpp */; }; + 1FD9545E23A89CF300888FC3 /* ShapeCosineSimilarity.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9541823A89CF300888FC3 /* ShapeCosineSimilarity.cpp */; }; + 1FD9545F23A89CF300888FC3 /* ShapeBatchToSpaceND.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9541923A89CF300888FC3 /* ShapeBatchToSpaceND.cpp */; }; + 1FD9546023A89CF300888FC3 /* ShapeSpaceToBatchND.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9541A23A89CF300888FC3 /* ShapeSpaceToBatchND.cpp */; }; + 1FD9546123A89CF300888FC3 /* ShapeCast.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9541B23A89CF300888FC3 /* ShapeCast.cpp */; }; + 1FD9546223A89CF300888FC3 /* ShapeMatMul.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9541C23A89CF300888FC3 /* ShapeMatMul.cpp */; }; + 1FD9546323A89CF300888FC3 /* ShapeStridedSlice.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9541D23A89CF300888FC3 /* ShapeStridedSlice.cpp */; }; + 1FD9546423A89CF300888FC3 /* ShapePriorbox.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9541E23A89CF300888FC3 /* ShapePriorbox.cpp */; }; + 1FD9548B23A89D1300888FC3 /* ConvolutionWinograd.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9546523A89D1000888FC3 /* ConvolutionWinograd.cpp */; }; + 1FD9548C23A89D1300888FC3 /* DeconvolutionWithStride.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9546623A89D1000888FC3 /* DeconvolutionWithStride.hpp */; }; + 1FD9548D23A89D1300888FC3 /* ConvolutionWinograd.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9546723A89D1000888FC3 /* ConvolutionWinograd.hpp */; }; + 1FD9548E23A89D1300888FC3 /* ConvolutionWinograd3D.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9546823A89D1100888FC3 /* ConvolutionWinograd3D.hpp */; }; + 1FD9548F23A89D1300888FC3 /* Convolution3D3x3.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9546923A89D1100888FC3 /* Convolution3D3x3.hpp */; }; + 1FD9549023A89D1300888FC3 /* OptimizedComputer.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9546A23A89D1100888FC3 /* OptimizedComputer.hpp */; }; + 1FD9549123A89D1300888FC3 /* Convolution3x3.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9546B23A89D1100888FC3 /* Convolution3x3.cpp */; }; + 1FD9549223A89D1300888FC3 /* ConvolutionDepthwise3x3.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9546C23A89D1100888FC3 /* ConvolutionDepthwise3x3.hpp */; }; + 1FD9549323A89D1300888FC3 /* StrassenMatmulComputor.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9546D23A89D1100888FC3 /* StrassenMatmulComputor.hpp */; }; + 1FD9549423A89D1300888FC3 /* ConvolutionGroup.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9546E23A89D1100888FC3 /* ConvolutionGroup.cpp */; }; + 1FD9549523A89D1300888FC3 /* ConvolutionInt8Executor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9546F23A89D1100888FC3 /* ConvolutionInt8Executor.cpp */; }; + 1FD9549623A89D1300888FC3 /* Convolution1x1Strassen.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9547023A89D1100888FC3 /* Convolution1x1Strassen.hpp */; }; + 1FD9549723A89D1300888FC3 /* ConvolutionWinograd3D.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9547123A89D1100888FC3 /* ConvolutionWinograd3D.cpp */; }; + 1FD9549823A89D1300888FC3 /* StrassenMatmulComputor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9547223A89D1100888FC3 /* StrassenMatmulComputor.cpp */; }; + 1FD9549923A89D1300888FC3 /* DeconvolutionWithStride.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9547323A89D1100888FC3 /* DeconvolutionWithStride.cpp */; }; + 1FD9549A23A89D1300888FC3 /* WinogradOptFunction.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9547423A89D1100888FC3 /* WinogradOptFunction.cpp */; }; + 1FD9549B23A89D1300888FC3 /* Convolution1x1Strassen.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9547523A89D1200888FC3 /* Convolution1x1Strassen.cpp */; }; + 1FD9549C23A89D1300888FC3 /* OptimizedComputer.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9547623A89D1200888FC3 /* OptimizedComputer.cpp */; }; + 1FD9549D23A89D1300888FC3 /* WinogradOptFunction.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9547723A89D1200888FC3 /* WinogradOptFunction.hpp */; }; + 1FD9549E23A89D1300888FC3 /* ResizeFunction.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9547823A89D1200888FC3 /* ResizeFunction.cpp */; }; + 1FD9549F23A89D1300888FC3 /* Int8FunctionsOpt.h in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9547923A89D1200888FC3 /* Int8FunctionsOpt.h */; }; + 1FD954A023A89D1300888FC3 /* ConvolutionFloatFactory.h in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9547A23A89D1200888FC3 /* ConvolutionFloatFactory.h */; }; + 1FD954A123A89D1300888FC3 /* ConvolutionIntFactory.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9547B23A89D1200888FC3 /* ConvolutionIntFactory.cpp */; }; + 1FD954A223A89D1300888FC3 /* Convolution3D3x3.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9547C23A89D1200888FC3 /* Convolution3D3x3.cpp */; }; + 1FD954A323A89D1300888FC3 /* ConvolutionDepthwise3x3.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9547D23A89D1200888FC3 /* ConvolutionDepthwise3x3.cpp */; }; + 1FD954A423A89D1300888FC3 /* CommonOptFunction.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9547E23A89D1200888FC3 /* CommonOptFunction.cpp */; }; + 1FD954A523A89D1300888FC3 /* Int8FunctionsOpt.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9547F23A89D1200888FC3 /* Int8FunctionsOpt.cpp */; }; + 1FD954A623A89D1400888FC3 /* ConvolutionTiledExecutor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9548023A89D1200888FC3 /* ConvolutionTiledExecutor.cpp */; }; + 1FD954A723A89D1400888FC3 /* ResizeFunction.h in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9548123A89D1300888FC3 /* ResizeFunction.h */; }; + 1FD954A823A89D1400888FC3 /* ConvOpt.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9548223A89D1300888FC3 /* ConvOpt.cpp */; }; + 1FD954A923A89D1400888FC3 /* Convolution3x3.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9548323A89D1300888FC3 /* Convolution3x3.hpp */; }; + 1FD954AA23A89D1400888FC3 /* CommonOptFunction.h in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9548423A89D1300888FC3 /* CommonOptFunction.h */; }; + 1FD954AB23A89D1400888FC3 /* ConvolutionIntFactory.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9548523A89D1300888FC3 /* ConvolutionIntFactory.hpp */; }; + 1FD954AC23A89D1400888FC3 /* ConvolutionTiledExecutor.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9548623A89D1300888FC3 /* ConvolutionTiledExecutor.hpp */; }; + 1FD954AD23A89D1400888FC3 /* ConvolutionGroup.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9548723A89D1300888FC3 /* ConvolutionGroup.hpp */; }; + 1FD954AE23A89D1400888FC3 /* ConvolutionInt8Executor.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9548823A89D1300888FC3 /* ConvolutionInt8Executor.hpp */; }; + 1FD954AF23A89D1400888FC3 /* ConvOpt.h in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9548923A89D1300888FC3 /* ConvOpt.h */; }; + 1FD954B023A89D1400888FC3 /* ConvolutionFloatFactory.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9548A23A89D1300888FC3 /* ConvolutionFloatFactory.cpp */; }; + 1FD954F123A89D3500888FC3 /* MNNNV21ToBGRUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954B123A89D2F00888FC3 /* MNNNV21ToBGRUnit.S */; }; + 1FD954F223A89D3500888FC3 /* MNNConvRunForUnitDepthWiseInt8.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954B223A89D2F00888FC3 /* MNNConvRunForUnitDepthWiseInt8.S */; }; + 1FD954F323A89D3500888FC3 /* MNNGemmInt8AddBiasScale_16x4_Unit.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954B323A89D2F00888FC3 /* MNNGemmInt8AddBiasScale_16x4_Unit.S */; }; + 1FD954F423A89D3500888FC3 /* MNNConvRunForUnitDepthWise.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954B423A89D2F00888FC3 /* MNNConvRunForUnitDepthWise.S */; }; + 1FD954F523A89D3500888FC3 /* MNNMatrixProd.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954B523A89D3000888FC3 /* MNNMatrixProd.S */; }; + 1FD954F623A89D3500888FC3 /* MNNCoefLine.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954B623A89D3000888FC3 /* MNNCoefLine.S */; }; + 1FD954F723A89D3500888FC3 /* MNNWinogradMatrixProductRight.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954B723A89D3000888FC3 /* MNNWinogradMatrixProductRight.S */; }; + 1FD954F823A89D3500888FC3 /* MNNInt8ScaleToFloat.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954B823A89D3000888FC3 /* MNNInt8ScaleToFloat.S */; }; + 1FD954F923A89D3500888FC3 /* MNNQuanToDestUint8.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954B923A89D3000888FC3 /* MNNQuanToDestUint8.S */; }; + 1FD954FA23A89D3500888FC3 /* MNNConvRunForLineDepthWiseUint8.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954BA23A89D3000888FC3 /* MNNConvRunForLineDepthWiseUint8.S */; }; + 1FD954FB23A89D3500888FC3 /* MNNUInt8ToInt16WithOffsetC4Common.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954BB23A89D3000888FC3 /* MNNUInt8ToInt16WithOffsetC4Common.S */; }; + 1FD954FC23A89D3500888FC3 /* MNNConvSlideWindowMiddle.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954BC23A89D3000888FC3 /* MNNConvSlideWindowMiddle.S */; }; + 1FD954FD23A89D3500888FC3 /* MNNAddC4WithStride.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954BD23A89D3000888FC3 /* MNNAddC4WithStride.S */; }; + 1FD954FE23A89D3500888FC3 /* MNNConvRunForUnitDepthWiseUint8.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954BE23A89D3000888FC3 /* MNNConvRunForUnitDepthWiseUint8.S */; }; + 1FD954FF23A89D3500888FC3 /* MNNSamplerC4NearestOpt.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954BF23A89D3000888FC3 /* MNNSamplerC4NearestOpt.S */; }; + 1FD9550023A89D3500888FC3 /* MNNMatrixAdd.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954C023A89D3000888FC3 /* MNNMatrixAdd.S */; }; + 1FD9550123A89D3500888FC3 /* MNNConvDwF23SourceTransUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954C123A89D3100888FC3 /* MNNConvDwF23SourceTransUnit.S */; }; + 1FD9550223A89D3500888FC3 /* MNNSamplerC1BilinearOpt.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954C223A89D3100888FC3 /* MNNSamplerC1BilinearOpt.S */; }; + 1FD9550323A89D3500888FC3 /* MNNConvDwF23MulTransUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954C323A89D3100888FC3 /* MNNConvDwF23MulTransUnit.S */; }; + 1FD9550423A89D3500888FC3 /* MNNMaxFloat.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954C423A89D3100888FC3 /* MNNMaxFloat.S */; }; + 1FD9550523A89D3500888FC3 /* MNNLineDepthWiseInt8AddBiasScaleUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954C523A89D3100888FC3 /* MNNLineDepthWiseInt8AddBiasScaleUnit.S */; }; + 1FD9550623A89D3500888FC3 /* MNNLoadU8AndSum.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954C623A89D3100888FC3 /* MNNLoadU8AndSum.S */; }; + 1FD9550723A89D3500888FC3 /* MNNSamplerC1NearestOpt.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954C723A89D3100888FC3 /* MNNSamplerC1NearestOpt.S */; }; + 1FD9550823A89D3500888FC3 /* MNNGemmFloatCommon_4.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954C823A89D3100888FC3 /* MNNGemmFloatCommon_4.S */; }; + 1FD9550923A89D3500888FC3 /* MNNBlitC3ToFloatRGBA.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954C923A89D3100888FC3 /* MNNBlitC3ToFloatRGBA.S */; }; + 1FD9550A23A89D3500888FC3 /* MNNPowC8.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954CA23A89D3100888FC3 /* MNNPowC8.S */; }; + 1FD9550B23A89D3500888FC3 /* MNNReluWithSlope.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954CB23A89D3100888FC3 /* MNNReluWithSlope.S */; }; + 1FD9550C23A89D3500888FC3 /* MNNBlitC1ToFloatRGBA.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954CC23A89D3200888FC3 /* MNNBlitC1ToFloatRGBA.S */; }; + 1FD9550D23A89D3500888FC3 /* MNNDepthWiseInt8AddBiasScaleUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954CD23A89D3200888FC3 /* MNNDepthWiseInt8AddBiasScaleUnit.S */; }; + 1FD9550E23A89D3500888FC3 /* MNNUnPackC4.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954CE23A89D3200888FC3 /* MNNUnPackC4.S */; }; + 1FD9550F23A89D3500888FC3 /* MNNConvSlideWindowBorder.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954CF23A89D3200888FC3 /* MNNConvSlideWindowBorder.S */; }; + 1FD9551023A89D3500888FC3 /* MNNCopyC4WithStride.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954D023A89D3200888FC3 /* MNNCopyC4WithStride.S */; }; + 1FD9551123A89D3500888FC3 /* MNNPackC4.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954D123A89D3200888FC3 /* MNNPackC4.S */; }; + 1FD9551223A89D3500888FC3 /* MNNExpC8.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954D223A89D3200888FC3 /* MNNExpC8.S */; }; + 1FD9551323A89D3500888FC3 /* MNNConvRunForLineDepthwise.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954D323A89D3200888FC3 /* MNNConvRunForLineDepthwise.S */; }; + 1FD9551423A89D3500888FC3 /* MNNAddBias.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954D423A89D3200888FC3 /* MNNAddBias.S */; }; + 1FD9551523A89D3500888FC3 /* MNNReluWithSlopeChannel.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954D523A89D3200888FC3 /* MNNReluWithSlopeChannel.S */; }; + 1FD9551623A89D3500888FC3 /* MNNAddBiasRelu.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954D623A89D3200888FC3 /* MNNAddBiasRelu.S */; }; + 1FD9551723A89D3500888FC3 /* MNNGemmInt16to32_4x4_Common.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954D723A89D3200888FC3 /* MNNGemmInt16to32_4x4_Common.S */; }; + 1FD9551823A89D3500888FC3 /* MNNAddBiasRelu6.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954D823A89D3300888FC3 /* MNNAddBiasRelu6.S */; }; + 1FD9551923A89D3500888FC3 /* MNNMatrixSub.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954D923A89D3300888FC3 /* MNNMatrixSub.S */; }; + 1FD9551A23A89D3500888FC3 /* MNNStrassenMergeCFunction.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954DA23A89D3300888FC3 /* MNNStrassenMergeCFunction.S */; }; + 1FD9551B23A89D3500888FC3 /* MNNBilinearProcC1.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954DB23A89D3300888FC3 /* MNNBilinearProcC1.S */; }; + 1FD9551C23A89D3500888FC3 /* MNNGemmFloatUnit_4.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954DC23A89D3300888FC3 /* MNNGemmFloatUnit_4.S */; }; + 1FD9551D23A89D3500888FC3 /* MNNReluInt8.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954DD23A89D3300888FC3 /* MNNReluInt8.S */; }; + 1FD9551E23A89D3500888FC3 /* MNNScaleAndAddBias.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954DE23A89D3300888FC3 /* MNNScaleAndAddBias.S */; }; + 1FD9551F23A89D3500888FC3 /* MNNNV21ToRGBUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954DF23A89D3300888FC3 /* MNNNV21ToRGBUnit.S */; }; + 1FD9552023A89D3500888FC3 /* MNNSamplerC4BilinearOpt.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954E023A89D3300888FC3 /* MNNSamplerC4BilinearOpt.S */; }; + 1FD9552123A89D3500888FC3 /* MNNScaleBias2FloatC4.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954E123A89D3300888FC3 /* MNNScaleBias2FloatC4.S */; }; + 1FD9552223A89D3500888FC3 /* MNNFloat2Int8.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954E223A89D3300888FC3 /* MNNFloat2Int8.S */; }; + 1FD9552323A89D3500888FC3 /* MNNDeconvRunForUnitDepthWise.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954E323A89D3400888FC3 /* MNNDeconvRunForUnitDepthWise.S */; }; + 1FD9552423A89D3500888FC3 /* MNNScaleAddInt8.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954E423A89D3400888FC3 /* MNNScaleAddInt8.S */; }; + 1FD9552523A89D3500888FC3 /* MNNGemmInt8toFloat32_8x4_Unit.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954E523A89D3400888FC3 /* MNNGemmInt8toFloat32_8x4_Unit.S */; }; + 1FD9552623A89D3500888FC3 /* MNNGemmInt16to32_4x4_Unit.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954E623A89D3400888FC3 /* MNNGemmInt16to32_4x4_Unit.S */; }; + 1FD9552723A89D3500888FC3 /* MNNCubicSampleC4.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954E723A89D3400888FC3 /* MNNCubicSampleC4.S */; }; + 1FD9552823A89D3500888FC3 /* MNNConvRunForLineDepthWiseInt8.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954E823A89D3400888FC3 /* MNNConvRunForLineDepthWiseInt8.S */; }; + 1FD9552923A89D3500888FC3 /* MNNMatrixMax.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954E923A89D3400888FC3 /* MNNMatrixMax.S */; }; + 1FD9552A23A89D3500888FC3 /* MNNGemmFloatOne_4.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954EA23A89D3400888FC3 /* MNNGemmFloatOne_4.S */; }; + 1FD9552B23A89D3500888FC3 /* MNNNV21ToRGBAUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954EB23A89D3400888FC3 /* MNNNV21ToRGBAUnit.S */; }; + 1FD9552C23A89D3500888FC3 /* MNNGemmint8to32_8x4_Unit.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954EC23A89D3400888FC3 /* MNNGemmint8to32_8x4_Unit.S */; }; + 1FD9552D23A89D3500888FC3 /* MNNMinFloat.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954ED23A89D3400888FC3 /* MNNMinFloat.S */; }; + 1FD9552E23A89D3500888FC3 /* MNNUInt8ToInt16WithOffsetC4Fast.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954EE23A89D3400888FC3 /* MNNUInt8ToInt16WithOffsetC4Fast.S */; }; + 1FD9552F23A89D3500888FC3 /* MNNCubicLineC4.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954EF23A89D3500888FC3 /* MNNCubicLineC4.S */; }; + 1FD9553023A89D3500888FC3 /* MNNWinogradMatrixProductLeft.S in Sources */ = {isa = PBXBuildFile; fileRef = 1FD954F023A89D3500888FC3 /* MNNWinogradMatrixProductLeft.S */; }; + 1FD9553B23A89D4F00888FC3 /* SkNx_neon.h in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9553123A89D4E00888FC3 /* SkNx_neon.h */; }; + 1FD9553C23A89D4F00888FC3 /* ImageProcess.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9553223A89D4E00888FC3 /* ImageProcess.cpp */; }; + 1FD9553D23A89D4F00888FC3 /* ImageFloatBlitter.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9553323A89D4E00888FC3 /* ImageFloatBlitter.cpp */; }; + 1FD9553E23A89D4F00888FC3 /* Matrix_CV.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9553423A89D4E00888FC3 /* Matrix_CV.cpp */; }; + 1FD9553F23A89D4F00888FC3 /* ImageBlitter.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9553523A89D4F00888FC3 /* ImageBlitter.hpp */; }; + 1FD9554023A89D4F00888FC3 /* ImageBlitter.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9553623A89D4F00888FC3 /* ImageBlitter.cpp */; }; + 1FD9554123A89D4F00888FC3 /* ImageFloatBlitter.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9553723A89D4F00888FC3 /* ImageFloatBlitter.hpp */; }; + 1FD9554223A89D4F00888FC3 /* ImageSampler.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9553823A89D4F00888FC3 /* ImageSampler.cpp */; }; + 1FD9554323A89D4F00888FC3 /* ImageSampler.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9553923A89D4F00888FC3 /* ImageSampler.hpp */; }; + 1FD9554423A89D4F00888FC3 /* SkNx.h in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9553A23A89D4F00888FC3 /* SkNx.h */; }; + 1FD9554A23A89D5B00888FC3 /* WingoradGenerater.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9554523A89D5B00888FC3 /* WingoradGenerater.cpp */; }; + 1FD9554B23A89D5B00888FC3 /* Matrix.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9554623A89D5B00888FC3 /* Matrix.cpp */; }; + 1FD9554C23A89D5B00888FC3 /* WingoradGenerater.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9554723A89D5B00888FC3 /* WingoradGenerater.hpp */; }; + 1FD9554D23A89D5B00888FC3 /* Matrix.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9554823A89D5B00888FC3 /* Matrix.hpp */; }; + 1FD9554E23A89D5B00888FC3 /* Vec4.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9554923A89D5B00888FC3 /* Vec4.hpp */; }; + 1FD9560023A89D8A00888FC3 /* MetalTFQuantizedConv2D.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9554F23A89D7700888FC3 /* MetalTFQuantizedConv2D.metal */; }; + 1FD9560123A89D8A00888FC3 /* MetalDeconvolution.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9555023A89D7700888FC3 /* MetalDeconvolution.mm */; }; + 1FD9560223A89D8A00888FC3 /* MetalQuantizedSoftmax.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9555123A89D7700888FC3 /* MetalQuantizedSoftmax.metal */; }; + 1FD9560323A89D8A00888FC3 /* MetalROIPooling.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9555223A89D7700888FC3 /* MetalROIPooling.metal */; }; + 1FD9560423A89D8A00888FC3 /* MetalDequantize.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9555323A89D7800888FC3 /* MetalDequantize.mm */; }; + 1FD9560523A89D8A00888FC3 /* MetalReLU6.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9555423A89D7800888FC3 /* MetalReLU6.hpp */; }; + 1FD9560623A89D8A00888FC3 /* MetalConcat.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9555523A89D7800888FC3 /* MetalConcat.hpp */; }; + 1FD9560723A89D8A00888FC3 /* MetalNormalize.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9555623A89D7800888FC3 /* MetalNormalize.metal */; }; + 1FD9560823A89D8A00888FC3 /* MetalConvolutionGEMM.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9555723A89D7800888FC3 /* MetalConvolutionGEMM.mm */; }; + 1FD9560923A89D8A00888FC3 /* MetalCrop.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9555823A89D7800888FC3 /* MetalCrop.hpp */; }; + 1FD9560A23A89D8A00888FC3 /* MetalResize.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9555923A89D7800888FC3 /* MetalResize.metal */; }; + 1FD9560B23A89D8A00888FC3 /* MetalQuantizedAdd.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9555A23A89D7800888FC3 /* MetalQuantizedAdd.hpp */; }; + 1FD9560C23A89D8A00888FC3 /* MetalMatMul.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9555B23A89D7800888FC3 /* MetalMatMul.metal */; }; + 1FD9560D23A89D8A00888FC3 /* MetalScale.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9555C23A89D7800888FC3 /* MetalScale.hpp */; }; + 1FD9560E23A89D8A00888FC3 /* MetalFill.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9555D23A89D7800888FC3 /* MetalFill.hpp */; }; + 1FD9560F23A89D8A00888FC3 /* MetalSpaceToBatchND.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9555E23A89D7800888FC3 /* MetalSpaceToBatchND.mm */; }; + 1FD9561023A89D8A00888FC3 /* MetalCropAndResize.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9555F23A89D7800888FC3 /* MetalCropAndResize.mm */; }; + 1FD9561123A89D8A00888FC3 /* MetalConvolution1x1.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9556023A89D7900888FC3 /* MetalConvolution1x1.metal */; }; + 1FD9561223A89D8A00888FC3 /* MetalSigmoid.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9556123A89D7900888FC3 /* MetalSigmoid.metal */; }; + 1FD9561323A89D8A00888FC3 /* MetalSlice.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9556223A89D7900888FC3 /* MetalSlice.mm */; }; + 1FD9561423A89D8A00888FC3 /* MetalFixedPoint.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9556323A89D7900888FC3 /* MetalFixedPoint.metal */; }; + 1FD9561523A89D8A00888FC3 /* MetalTanH.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9556423A89D7900888FC3 /* MetalTanH.hpp */; }; + 1FD9561623A89D8A00888FC3 /* MetalPack.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9556523A89D7900888FC3 /* MetalPack.metal */; }; + 1FD9561723A89D8A00888FC3 /* MetalBackend.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9556623A89D7900888FC3 /* MetalBackend.hpp */; }; + 1FD9561823A89D8A00888FC3 /* MetalBatchToSpaceND.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9556723A89D7900888FC3 /* MetalBatchToSpaceND.hpp */; }; + 1FD9561923A89D8A00888FC3 /* MetalLRN.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9556823A89D7900888FC3 /* MetalLRN.mm */; }; + 1FD9561A23A89D8A00888FC3 /* MetalBinary.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9556923A89D7900888FC3 /* MetalBinary.mm */; }; + 1FD9561B23A89D8A00888FC3 /* MetalReLU.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9556A23A89D7900888FC3 /* MetalReLU.metal */; }; + 1FD9561C23A89D8A00888FC3 /* MetalConvolutionCommon.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9556B23A89D7900888FC3 /* MetalConvolutionCommon.hpp */; }; + 1FD9561D23A89D8A00888FC3 /* MetalReLU6.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9556C23A89D7900888FC3 /* MetalReLU6.metal */; }; + 1FD9561E23A89D8A00888FC3 /* MetalNormalize.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9556D23A89D7A00888FC3 /* MetalNormalize.hpp */; }; + 1FD9561F23A89D8A00888FC3 /* MetalDefine.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9556E23A89D7A00888FC3 /* MetalDefine.metal */; }; + 1FD9562023A89D8A00888FC3 /* MetalResize.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9556F23A89D7A00888FC3 /* MetalResize.mm */; }; + 1FD9562123A89D8A00888FC3 /* MetalCropAndResize.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9557023A89D7A00888FC3 /* MetalCropAndResize.hpp */; }; + 1FD9562223A89D8A00888FC3 /* MetalPermute.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9557223A89D7A00888FC3 /* MetalPermute.metal */; }; + 1FD9562323A89D8A00888FC3 /* MetalBackend.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9557323A89D7A00888FC3 /* MetalBackend.metal */; }; + 1FD9562423A89D8A00888FC3 /* MetalConvolution.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9557423A89D7A00888FC3 /* MetalConvolution.mm */; }; + 1FD9562523A89D8A00888FC3 /* MetalConvolutionWinograd.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9557523A89D7A00888FC3 /* MetalConvolutionWinograd.metal */; }; + 1FD9562623A89D8A00888FC3 /* MetalPermute.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9557623A89D7A00888FC3 /* MetalPermute.hpp */; }; + 1FD9562723A89D8A00888FC3 /* MetalQuantizedReshape.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9557723A89D7A00888FC3 /* MetalQuantizedReshape.mm */; }; + 1FD9562823A89D8A00888FC3 /* MetalSliceTF.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9557823A89D7A00888FC3 /* MetalSliceTF.mm */; }; + 1FD9562923A89D8A00888FC3 /* MetalSoftmax.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9557923A89D7B00888FC3 /* MetalSoftmax.hpp */; }; + 1FD9562A23A89D8A00888FC3 /* MetalTensorConverter.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9557A23A89D7B00888FC3 /* MetalTensorConverter.hpp */; }; + 1FD9562B23A89D8A00888FC3 /* MetalDeconvolution.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9557B23A89D7B00888FC3 /* MetalDeconvolution.metal */; }; + 1FD9562C23A89D8A00888FC3 /* MetalTFQuantizedConv2D.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9557C23A89D7B00888FC3 /* MetalTFQuantizedConv2D.mm */; }; + 1FD9562D23A89D8A00888FC3 /* MetalReduction.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9557D23A89D7B00888FC3 /* MetalReduction.hpp */; }; + 1FD9562E23A89D8A00888FC3 /* MetalReshape.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9557E23A89D7B00888FC3 /* MetalReshape.mm */; }; + 1FD9562F23A89D8A00888FC3 /* MetalFill.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9557F23A89D7B00888FC3 /* MetalFill.metal */; }; + 1FD9563023A89D8A00888FC3 /* MetalPReLU.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9558023A89D7B00888FC3 /* MetalPReLU.mm */; }; + 1FD9563123A89D8A00888FC3 /* MetalSeLU.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9558123A89D7B00888FC3 /* MetalSeLU.hpp */; }; + 1FD9563223A89D8A00888FC3 /* MetalBinary.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9558223A89D7B00888FC3 /* MetalBinary.metal */; }; + 1FD9563323A89D8A00888FC3 /* MetalGather.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9558323A89D7B00888FC3 /* MetalGather.metal */; }; + 1FD9563423A89D8A00888FC3 /* MetalStridedSlice.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9558423A89D7B00888FC3 /* MetalStridedSlice.metal */; }; + 1FD9563523A89D8A00888FC3 /* MetalSeLU.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9558523A89D7B00888FC3 /* MetalSeLU.metal */; }; + 1FD9563623A89D8A00888FC3 /* MetalPReLU.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9558623A89D7B00888FC3 /* MetalPReLU.metal */; }; + 1FD9563723A89D8A00888FC3 /* MetalCast.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9558723A89D7B00888FC3 /* MetalCast.hpp */; }; + 1FD9563823A89D8A00888FC3 /* MNNMetalContext.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9558823A89D7B00888FC3 /* MNNMetalContext.mm */; }; + 1FD9563923A89D8A00888FC3 /* MetalBinary.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9558923A89D7B00888FC3 /* MetalBinary.hpp */; }; + 1FD9563A23A89D8A00888FC3 /* MetalTanH.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9558A23A89D7B00888FC3 /* MetalTanH.metal */; }; + 1FD9563B23A89D8A00888FC3 /* MetalReLU.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9558B23A89D7B00888FC3 /* MetalReLU.hpp */; }; + 1FD9563C23A89D8A00888FC3 /* MetalTranspose.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9558C23A89D7B00888FC3 /* MetalTranspose.mm */; }; + 1FD9563D23A89D8A00888FC3 /* MetalDequantize.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9558D23A89D7C00888FC3 /* MetalDequantize.hpp */; }; + 1FD9563E23A89D8A00888FC3 /* MetalCast.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9558E23A89D7C00888FC3 /* MetalCast.mm */; }; + 1FD9563F23A89D8A00888FC3 /* MetalConvolution1x1.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9558F23A89D7C00888FC3 /* MetalConvolution1x1.hpp */; }; + 1FD9564023A89D8A00888FC3 /* MetalPooling.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9559023A89D7C00888FC3 /* MetalPooling.hpp */; }; + 1FD9564123A89D8A00888FC3 /* MetalSpaceToBatchND.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9559123A89D7C00888FC3 /* MetalSpaceToBatchND.hpp */; }; + 1FD9564223A89D8A00888FC3 /* MetalDequantize.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9559223A89D7C00888FC3 /* MetalDequantize.metal */; }; + 1FD9564323A89D8A00888FC3 /* MetalGatherV2.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9559323A89D7C00888FC3 /* MetalGatherV2.mm */; }; + 1FD9564423A89D8A00888FC3 /* MetalReLU.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9559423A89D7C00888FC3 /* MetalReLU.mm */; }; + 1FD9564523A89D8A00888FC3 /* MetalUnary.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9559523A89D7C00888FC3 /* MetalUnary.metal */; }; + 1FD9564623A89D8A00888FC3 /* MetalFill.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9559623A89D7C00888FC3 /* MetalFill.mm */; }; + 1FD9564723A89D8A00888FC3 /* MetalGatherV2.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9559723A89D7C00888FC3 /* MetalGatherV2.hpp */; }; + 1FD9564823A89D8A00888FC3 /* MetalMatMul.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9559823A89D7C00888FC3 /* MetalMatMul.mm */; }; + 1FD9564923A89D8A00888FC3 /* MetalNormalize.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9559923A89D7C00888FC3 /* MetalNormalize.mm */; }; + 1FD9564A23A89D8A00888FC3 /* MetalSpatialProduct.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9559A23A89D7C00888FC3 /* MetalSpatialProduct.hpp */; }; + 1FD9564B23A89D8A00888FC3 /* MetalInterp.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9559B23A89D7D00888FC3 /* MetalInterp.hpp */; }; + 1FD9564C23A89D8A00888FC3 /* MetalRank.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9559C23A89D7D00888FC3 /* MetalRank.hpp */; }; + 1FD9564D23A89D8A00888FC3 /* MetalSpaceToBatchND.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9559D23A89D7D00888FC3 /* MetalSpaceToBatchND.metal */; }; + 1FD9564E23A89D8A00888FC3 /* MetalConcat.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD9559E23A89D7D00888FC3 /* MetalConcat.mm */; }; + 1FD9564F23A89D8A00888FC3 /* MetalConvolution.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD9559F23A89D7D00888FC3 /* MetalConvolution.hpp */; }; + 1FD9565023A89D8A00888FC3 /* MetalResize.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD955A023A89D7D00888FC3 /* MetalResize.hpp */; }; + 1FD9565123A89D8A00888FC3 /* MetalConvolutionWinograd.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955A123A89D7D00888FC3 /* MetalConvolutionWinograd.mm */; }; + 1FD9565223A89D8A00888FC3 /* MetalRange.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955A223A89D7D00888FC3 /* MetalRange.metal */; }; + 1FD9565323A89D8A00888FC3 /* MetalPack.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955A323A89D7D00888FC3 /* MetalPack.mm */; }; + 1FD9565423A89D8A00888FC3 /* MetalTile.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955A423A89D7D00888FC3 /* MetalTile.mm */; }; + 1FD9565523A89D8A00888FC3 /* MetalBatchToSpaceND.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955A523A89D7D00888FC3 /* MetalBatchToSpaceND.mm */; }; + 1FD9565623A89D8A00888FC3 /* MetalTranspose.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955A623A89D7E00888FC3 /* MetalTranspose.metal */; }; + 1FD9565723A89D8A00888FC3 /* MetalConvolution.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955A723A89D7E00888FC3 /* MetalConvolution.metal */; }; + 1FD9565823A89D8A00888FC3 /* MetalReduction.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955A823A89D7E00888FC3 /* MetalReduction.mm */; }; + 1FD9565923A89D8A00888FC3 /* MetalQuantizedSoftmax.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD955A923A89D7E00888FC3 /* MetalQuantizedSoftmax.hpp */; }; + 1FD9565A23A89D8A00888FC3 /* MetalConvolutionDepthwise.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955AA23A89D7E00888FC3 /* MetalConvolutionDepthwise.mm */; }; + 1FD9565B23A89D8A00888FC3 /* MetalQuantizedMaxPool.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955AB23A89D7E00888FC3 /* MetalQuantizedMaxPool.mm */; }; + 1FD9565C23A89D8A00888FC3 /* MetalInterp.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955AC23A89D7E00888FC3 /* MetalInterp.mm */; }; + 1FD9565D23A89D8A00888FC3 /* MetalConvolutionGEMM.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955AD23A89D7E00888FC3 /* MetalConvolutionGEMM.metal */; }; + 1FD9565E23A89D8A00888FC3 /* MetalQuantizedAvgPool.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955AE23A89D7E00888FC3 /* MetalQuantizedAvgPool.metal */; }; + 1FD9565F23A89D8A00888FC3 /* MetalConvolutionDepthwise.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955AF23A89D7E00888FC3 /* MetalConvolutionDepthwise.metal */; }; + 1FD9566023A89D8A00888FC3 /* MetalPooling.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955B023A89D7E00888FC3 /* MetalPooling.metal */; }; + 1FD9566123A89D8A00888FC3 /* MetalCast.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955B123A89D7E00888FC3 /* MetalCast.metal */; }; + 1FD9566223A89D8A00888FC3 /* MetalSize.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955B223A89D7F00888FC3 /* MetalSize.mm */; }; + 1FD9566323A89D8A00888FC3 /* MetalSliceTF.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD955B323A89D7F00888FC3 /* MetalSliceTF.hpp */; }; + 1FD9566423A89D8A00888FC3 /* MetalTensorConverter.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955B423A89D7F00888FC3 /* MetalTensorConverter.mm */; }; + 1FD9566523A89D8A00888FC3 /* MetalTile.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD955B523A89D7F00888FC3 /* MetalTile.hpp */; }; + 1FD9566623A89D8A00888FC3 /* MetalSqueeze.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955B623A89D7F00888FC3 /* MetalSqueeze.mm */; }; + 1FD9566723A89D8A00888FC3 /* MetalGather.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD955B723A89D7F00888FC3 /* MetalGather.hpp */; }; + 1FD9566823A89D8A00888FC3 /* MetalReLU6.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955B823A89D7F00888FC3 /* MetalReLU6.mm */; }; + 1FD9566923A89D8A00888FC3 /* MetalRange.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD955B923A89D7F00888FC3 /* MetalRange.hpp */; }; + 1FD9566A23A89D8A00888FC3 /* MetalPack.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD955BA23A89D7F00888FC3 /* MetalPack.hpp */; }; + 1FD9566B23A89D8A00888FC3 /* MetalLRN.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD955BB23A89D7F00888FC3 /* MetalLRN.hpp */; }; + 1FD9566C23A89D8A00888FC3 /* MetalUnary.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD955BC23A89D7F00888FC3 /* MetalUnary.hpp */; }; + 1FD9566D23A89D8A00888FC3 /* MetalDefine.h in Headers */ = {isa = PBXBuildFile; fileRef = 1FD955BD23A89D8000888FC3 /* MetalDefine.h */; }; + 1FD9566E23A89D8A00888FC3 /* MetalQuantizedAvgPool.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955BE23A89D8000888FC3 /* MetalQuantizedAvgPool.mm */; }; + 1FD9566F23A89D8A00888FC3 /* MetalSqueeze.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD955BF23A89D8000888FC3 /* MetalSqueeze.hpp */; }; + 1FD9567023A89D8A00888FC3 /* MetalEltwise.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD955C023A89D8000888FC3 /* MetalEltwise.hpp */; }; + 1FD9567123A89D8A00888FC3 /* MetalROIPooling.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955C123A89D8000888FC3 /* MetalROIPooling.mm */; }; + 1FD9567223A89D8A00888FC3 /* MetalMatMul.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD955C223A89D8000888FC3 /* MetalMatMul.hpp */; }; + 1FD9567323A89D8A00888FC3 /* MetalSigmoid.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD955C323A89D8000888FC3 /* MetalSigmoid.hpp */; }; + 1FD9567423A89D8A00888FC3 /* MetalSize.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD955C423A89D8000888FC3 /* MetalSize.hpp */; }; + 1FD9567523A89D8A00888FC3 /* MetalQuantizedMaxPool.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955C523A89D8000888FC3 /* MetalQuantizedMaxPool.metal */; }; + 1FD9567623A89D8A00888FC3 /* MetalReshape.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955C623A89D8000888FC3 /* MetalReshape.metal */; }; + 1FD9567723A89D8A00888FC3 /* MetalSoftmax.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955C723A89D8000888FC3 /* MetalSoftmax.mm */; }; + 1FD9567823A89D8A00888FC3 /* MetalCrop.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955C823A89D8000888FC3 /* MetalCrop.mm */; }; + 1FD9567923A89D8A00888FC3 /* MetalROIPooling.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD955C923A89D8100888FC3 /* MetalROIPooling.hpp */; }; + 1FD9567A23A89D8A00888FC3 /* MetalDeconvolution.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD955CA23A89D8100888FC3 /* MetalDeconvolution.hpp */; }; + 1FD9567B23A89D8A00888FC3 /* MetalQuantizedAvgPool.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD955CB23A89D8100888FC3 /* MetalQuantizedAvgPool.hpp */; }; + 1FD9567C23A89D8A00888FC3 /* MetalTanH.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955CC23A89D8100888FC3 /* MetalTanH.mm */; }; + 1FD9567D23A89D8A00888FC3 /* MetalPooling.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955CD23A89D8100888FC3 /* MetalPooling.mm */; }; + 1FD9567E23A89D8A00888FC3 /* MetalEltwise.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955CE23A89D8100888FC3 /* MetalEltwise.metal */; }; + 1FD9567F23A89D8A00888FC3 /* MetalConvolutionWinograd.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD955CF23A89D8100888FC3 /* MetalConvolutionWinograd.hpp */; }; + 1FD9568023A89D8A00888FC3 /* MetalTile.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955D023A89D8100888FC3 /* MetalTile.metal */; }; + 1FD9568123A89D8A00888FC3 /* MetalSlice.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD955D123A89D8100888FC3 /* MetalSlice.hpp */; }; + 1FD9568223A89D8A00888FC3 /* MetalLSTM.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955D223A89D8100888FC3 /* MetalLSTM.mm */; }; + 1FD9568323A89D8A00888FC3 /* MetalQuantizedAdd.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955D323A89D8200888FC3 /* MetalQuantizedAdd.metal */; }; + 1FD9568423A89D8A00888FC3 /* MetalGatherV2.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955D423A89D8200888FC3 /* MetalGatherV2.metal */; }; + 1FD9568523A89D8A00888FC3 /* MetalQuantizedSoftmax.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955D523A89D8200888FC3 /* MetalQuantizedSoftmax.mm */; }; + 1FD9568623A89D8A00888FC3 /* MetalOPRegister.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955D623A89D8200888FC3 /* MetalOPRegister.mm */; }; + 1FD9568723A89D8A00888FC3 /* MetalPermute.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955D723A89D8200888FC3 /* MetalPermute.mm */; }; + 1FD9568823A89D8A00888FC3 /* MetalReduction.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955D823A89D8200888FC3 /* MetalReduction.metal */; }; + 1FD9568923A89D8A00888FC3 /* MetalPReLU.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD955D923A89D8200888FC3 /* MetalPReLU.hpp */; }; + 1FD9568A23A89D8A00888FC3 /* MetalSeLU.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955DA23A89D8200888FC3 /* MetalSeLU.mm */; }; + 1FD9568B23A89D8A00888FC3 /* MetalScale.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955DB23A89D8200888FC3 /* MetalScale.metal */; }; + 1FD9568C23A89D8A00888FC3 /* MetalSoftmax.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955DC23A89D8200888FC3 /* MetalSoftmax.metal */; }; + 1FD9568D23A89D8A00888FC3 /* MetalStridedSlice.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955DD23A89D8200888FC3 /* MetalStridedSlice.mm */; }; + 1FD9568E23A89D8A00888FC3 /* MetalCropAndResize.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955DE23A89D8300888FC3 /* MetalCropAndResize.metal */; }; + 1FD9568F23A89D8A00888FC3 /* MetalConvolutionDepthwise.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD955DF23A89D8300888FC3 /* MetalConvolutionDepthwise.hpp */; }; + 1FD9569023A89D8A00888FC3 /* MetalLRN.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955E023A89D8300888FC3 /* MetalLRN.metal */; }; + 1FD9569123A89D8A00888FC3 /* MetalRange.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955E123A89D8300888FC3 /* MetalRange.mm */; }; + 1FD9569223A89D8A00888FC3 /* MetalSpatialProduct.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955E223A89D8300888FC3 /* MetalSpatialProduct.metal */; }; + 1FD9569323A89D8A00888FC3 /* MetalQuantizedAdd.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955E323A89D8600888FC3 /* MetalQuantizedAdd.mm */; }; + 1FD9569423A89D8A00888FC3 /* MetalLSTM.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD955E423A89D8600888FC3 /* MetalLSTM.hpp */; }; + 1FD9569523A89D8A00888FC3 /* MetalRank.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955E523A89D8700888FC3 /* MetalRank.metal */; }; + 1FD9569623A89D8A00888FC3 /* MetalLSTM.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955E623A89D8700888FC3 /* MetalLSTM.metal */; }; + 1FD9569723A89D8A00888FC3 /* MetalSliceTF.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955E723A89D8700888FC3 /* MetalSliceTF.metal */; }; + 1FD9569823A89D8A00888FC3 /* MetalCrop.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955E823A89D8700888FC3 /* MetalCrop.metal */; }; + 1FD9569923A89D8A00888FC3 /* MetalQuantizedReshape.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD955E923A89D8700888FC3 /* MetalQuantizedReshape.hpp */; }; + 1FD9569A23A89D8A00888FC3 /* MetalReshape.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD955EA23A89D8700888FC3 /* MetalReshape.hpp */; }; + 1FD9569B23A89D8A00888FC3 /* MetalScale.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955EB23A89D8700888FC3 /* MetalScale.mm */; }; + 1FD9569C23A89D8A00888FC3 /* MetalSize.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955EC23A89D8700888FC3 /* MetalSize.metal */; }; + 1FD9569D23A89D8A00888FC3 /* MetalEltwise.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955ED23A89D8700888FC3 /* MetalEltwise.mm */; }; + 1FD9569E23A89D8A00888FC3 /* MetalConvolutionCommon.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955EE23A89D8700888FC3 /* MetalConvolutionCommon.mm */; }; + 1FD9569F23A89D8A00888FC3 /* MetalConvolutionActivation.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955EF23A89D8800888FC3 /* MetalConvolutionActivation.metal */; }; + 1FD956A023A89D8A00888FC3 /* MetalSigmoid.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955F023A89D8800888FC3 /* MetalSigmoid.mm */; }; + 1FD956A123A89D8A00888FC3 /* MetalStridedSlice.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD955F123A89D8800888FC3 /* MetalStridedSlice.hpp */; }; + 1FD956A223A89D8A00888FC3 /* MetalTranspose.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD955F223A89D8800888FC3 /* MetalTranspose.hpp */; }; + 1FD956A323A89D8A00888FC3 /* MetalGather.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955F323A89D8800888FC3 /* MetalGather.mm */; }; + 1FD956A423A89D8A00888FC3 /* MetalTFQuantizedConv2D.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD955F423A89D8800888FC3 /* MetalTFQuantizedConv2D.hpp */; }; + 1FD956A523A89D8A00888FC3 /* MetalBackend.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955F523A89D8800888FC3 /* MetalBackend.mm */; }; + 1FD956A623A89D8A00888FC3 /* MetalQuantizedMaxPool.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD955F623A89D8800888FC3 /* MetalQuantizedMaxPool.hpp */; }; + 1FD956A723A89D8A00888FC3 /* MetalBatchToSpaceND.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955F723A89D8900888FC3 /* MetalBatchToSpaceND.metal */; }; + 1FD956A823A89D8A00888FC3 /* MNNMetalContext.h in Headers */ = {isa = PBXBuildFile; fileRef = 1FD955F823A89D8900888FC3 /* MNNMetalContext.h */; }; + 1FD956A923A89D8A00888FC3 /* MetalConvolutionGEMM.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 1FD955F923A89D8900888FC3 /* MetalConvolutionGEMM.hpp */; }; + 1FD956AA23A89D8A00888FC3 /* MetalConcat.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955FA23A89D8900888FC3 /* MetalConcat.metal */; }; + 1FD956AB23A89D8A00888FC3 /* MetalRank.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955FB23A89D8900888FC3 /* MetalRank.mm */; }; + 1FD956AC23A89D8A00888FC3 /* MetalSpatialProduct.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955FC23A89D8900888FC3 /* MetalSpatialProduct.mm */; }; + 1FD956AD23A89D8A00888FC3 /* MetalUnary.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955FD23A89D8900888FC3 /* MetalUnary.mm */; }; + 1FD956AE23A89D8A00888FC3 /* MetalConvolution1x1.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955FE23A89D8900888FC3 /* MetalConvolution1x1.mm */; }; + 1FD956AF23A89D8A00888FC3 /* MetalSlice.metal in Sources */ = {isa = PBXBuildFile; fileRef = 1FD955FF23A89D8900888FC3 /* MetalSlice.metal */; }; 22EA50A92051677800C3906C /* Metal.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0F78AC261FCD495800205A7C /* Metal.framework */; settings = {ATTRIBUTES = (Required, ); }; }; 22EA50B02051681600C3906C /* MNN.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0F1465B71FA18D1000F9860A /* MNN.framework */; }; - 480529622105DDA400AA776E /* Interpreter.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 480529612105DDA400AA776E /* Interpreter.hpp */; settings = {ATTRIBUTES = (Public, ); }; }; - 48057D852330A8F900F922BE /* ShapeGatherND.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48057D842330A8F900F922BE /* ShapeGatherND.cpp */; }; - 48057D882330A90900F922BE /* CPUGatherND.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48057D862330A90900F922BE /* CPUGatherND.hpp */; }; - 48057D892330A90900F922BE /* CPUGatherND.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48057D872330A90900F922BE /* CPUGatherND.cpp */; }; - 48057D8C2330E85C00F922BE /* CPUMatrixBandPart.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48057D8A2330E85C00F922BE /* CPUMatrixBandPart.hpp */; }; - 48057D8D2330E85C00F922BE /* CPUMatrixBandPart.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48057D8B2330E85C00F922BE /* CPUMatrixBandPart.cpp */; }; - 48070719231A7B5100528CE5 /* CPUReverseSequence.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48070717231A7B5000528CE5 /* CPUReverseSequence.hpp */; }; - 4807071A231A7B5100528CE5 /* CPUReverseSequence.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48070718231A7B5000528CE5 /* CPUReverseSequence.cpp */; }; - 48070732231E512D00528CE5 /* NeuralNetWorkOp.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4807071E231E512D00528CE5 /* NeuralNetWorkOp.hpp */; settings = {ATTRIBUTES = (Public, ); }; }; - 48070733231E512D00528CE5 /* Expr.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4807071F231E512D00528CE5 /* Expr.hpp */; settings = {ATTRIBUTES = (Public, ); }; }; - 48070734231E512D00528CE5 /* MathOp.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48070720231E512D00528CE5 /* MathOp.hpp */; settings = {ATTRIBUTES = (Public, ); }; }; - 48070735231E512D00528CE5 /* ExprCreator.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48070721231E512D00528CE5 /* ExprCreator.hpp */; }; - 48070736231E512D00528CE5 /* Optimizer.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48070722231E512D00528CE5 /* Optimizer.hpp */; settings = {ATTRIBUTES = (Public, ); }; }; - 48070737231E512D00528CE5 /* Optimizer.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48070724231E512D00528CE5 /* Optimizer.cpp */; }; - 48070738231E512D00528CE5 /* Utils.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48070725231E512D00528CE5 /* Utils.cpp */; }; - 48070739231E512D00528CE5 /* Solution.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48070726231E512D00528CE5 /* Solution.cpp */; }; - 4807073A231E512D00528CE5 /* MergeOptimizer.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48070728231E512D00528CE5 /* MergeOptimizer.hpp */; }; - 4807073B231E512D00528CE5 /* MergeOptimizer.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48070729231E512D00528CE5 /* MergeOptimizer.cpp */; }; - 4807073C231E512D00528CE5 /* InsideExpr.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4807072A231E512D00528CE5 /* InsideExpr.hpp */; }; - 4807073D231E512D00528CE5 /* Expr.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4807072B231E512D00528CE5 /* Expr.cpp */; }; - 4807073E231E512D00528CE5 /* MathOp.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4807072C231E512D00528CE5 /* MathOp.cpp */; }; - 4807073F231E512D00528CE5 /* InsideExpr.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4807072D231E512D00528CE5 /* InsideExpr.cpp */; }; - 48070740231E512D00528CE5 /* Utils.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4807072E231E512D00528CE5 /* Utils.hpp */; }; - 48070741231E512D00528CE5 /* Solution.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4807072F231E512D00528CE5 /* Solution.hpp */; }; - 48070742231E512D00528CE5 /* NeuralNetWorkOp.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48070730231E512D00528CE5 /* NeuralNetWorkOp.cpp */; }; - 48070744231E52E300528CE5 /* BasicOptimizer_generated.h in Headers */ = {isa = PBXBuildFile; fileRef = 48070743231E52E300528CE5 /* BasicOptimizer_generated.h */; }; - 48265469210ABA3000B2CFEA /* AutoTime.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48265468210ABA3000B2CFEA /* AutoTime.hpp */; settings = {ATTRIBUTES = (Public, ); }; }; - 4826546C210AF76E00B2CFEA /* HalideRuntime.h in Headers */ = {isa = PBXBuildFile; fileRef = 4826546A210AF76D00B2CFEA /* HalideRuntime.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 4829D55022AF5C340093E3BE /* CPUSetDiff1D.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4829D54E22AF5C340093E3BE /* CPUSetDiff1D.cpp */; }; - 4829D55122AF5C340093E3BE /* CPUSetDiff1D.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4829D54F22AF5C340093E3BE /* CPUSetDiff1D.hpp */; }; - 483CD482216B1C7B00B05BE9 /* DeconvolutionWithStride.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 483CD480216B1C7B00B05BE9 /* DeconvolutionWithStride.cpp */; }; - 483CD483216B1C7B00B05BE9 /* DeconvolutionWithStride.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 483CD481216B1C7B00B05BE9 /* DeconvolutionWithStride.hpp */; }; - 483CD486216B2F0400B05BE9 /* WinogradOptFunction.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 483CD484216B2F0400B05BE9 /* WinogradOptFunction.cpp */; }; - 483CD487216B2F0400B05BE9 /* WinogradOptFunction.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 483CD485216B2F0400B05BE9 /* WinogradOptFunction.hpp */; }; - 483CD489216CDDA100B05BE9 /* MNNAddC4WithStride.S in Sources */ = {isa = PBXBuildFile; fileRef = 483CD488216CDDA100B05BE9 /* MNNAddC4WithStride.S */; }; - 483CD48B216CE20D00B05BE9 /* MNNAddC4WithStride.S in Sources */ = {isa = PBXBuildFile; fileRef = 483CD48A216CE20D00B05BE9 /* MNNAddC4WithStride.S */; }; - 483CD48D216CE3B500B05BE9 /* MNNCopyC4WithStride.S in Sources */ = {isa = PBXBuildFile; fileRef = 483CD48C216CE3B500B05BE9 /* MNNCopyC4WithStride.S */; }; - 483CD48F216CE3BB00B05BE9 /* MNNCopyC4WithStride.S in Sources */ = {isa = PBXBuildFile; fileRef = 483CD48E216CE3BB00B05BE9 /* MNNCopyC4WithStride.S */; }; - 4841B5F621EAE98B002E5D66 /* SizeComputer.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4841B5F221EAE98B002E5D66 /* SizeComputer.hpp */; }; - 4841B5F721EAE98B002E5D66 /* Backend.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4841B5F321EAE98B002E5D66 /* Backend.cpp */; }; - 4841B5F821EAE98B002E5D66 /* SizeComputer.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4841B5F421EAE98B002E5D66 /* SizeComputer.cpp */; }; - 4841B5F921EAE98B002E5D66 /* Backend.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4841B5F521EAE98B002E5D66 /* Backend.hpp */; }; - 4841B5FC21EAE998002E5D66 /* Execution.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4841B5FA21EAE998002E5D66 /* Execution.cpp */; }; - 4841B5FD21EAE998002E5D66 /* Execution.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4841B5FB21EAE998002E5D66 /* Execution.hpp */; }; - 4841B60C21EC607E002E5D66 /* CPUQuantizedConcat.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4841B60621EC607D002E5D66 /* CPUQuantizedConcat.cpp */; }; - 4841B60D21EC607E002E5D66 /* CPUQuantizedLogistic.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4841B60721EC607D002E5D66 /* CPUQuantizedLogistic.hpp */; }; - 4841B60E21EC607E002E5D66 /* CPUDequantize.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4841B60821EC607D002E5D66 /* CPUDequantize.hpp */; }; - 4841B60F21EC607E002E5D66 /* CPUQuantizedConcat.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4841B60921EC607D002E5D66 /* CPUQuantizedConcat.hpp */; }; - 4841B61021EC607E002E5D66 /* CPUQuantizedLogistic.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4841B60A21EC607D002E5D66 /* CPUQuantizedLogistic.cpp */; }; - 4841B61121EC607E002E5D66 /* CPUDequantize.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4841B60B21EC607D002E5D66 /* CPUDequantize.cpp */; }; - 4841B61421EC6267002E5D66 /* ShapeDequantize.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4841B61221EC6267002E5D66 /* ShapeDequantize.cpp */; }; - 4843AA5422A7E9AB00889A63 /* CPUConv2DBackPropFilter.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4843AA4C22A7E9AA00889A63 /* CPUConv2DBackPropFilter.hpp */; }; - 4843AA5522A7E9AB00889A63 /* CPUReluGrad.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4843AA4D22A7E9AA00889A63 /* CPUReluGrad.cpp */; }; - 4843AA5622A7E9AB00889A63 /* CPUPoolGrad.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4843AA4E22A7E9AB00889A63 /* CPUPoolGrad.hpp */; }; - 4843AA5722A7E9AB00889A63 /* CPUReluGrad.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4843AA4F22A7E9AB00889A63 /* CPUReluGrad.hpp */; }; - 4843AA5822A7E9AB00889A63 /* CPUPoolGrad.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4843AA5022A7E9AB00889A63 /* CPUPoolGrad.cpp */; }; - 4843AA5922A7E9AB00889A63 /* CPUConv2DBackPropFilter.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4843AA5122A7E9AB00889A63 /* CPUConv2DBackPropFilter.cpp */; }; - 4843AA5A22A7E9AB00889A63 /* CPUSoftmaxGrad.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4843AA5222A7E9AB00889A63 /* CPUSoftmaxGrad.cpp */; }; - 4843AA5B22A7E9AB00889A63 /* CPUSoftmaxGrad.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4843AA5322A7E9AB00889A63 /* CPUSoftmaxGrad.hpp */; }; - 4847D41D22C0739A0049F3CA /* ShapePadding.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4847D41C22C0739A0049F3CA /* ShapePadding.cpp */; }; - 4847D42022C07E850049F3CA /* CPUPadding.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4847D41E22C07E850049F3CA /* CPUPadding.cpp */; }; - 4847D42122C07E850049F3CA /* CPUPadding.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4847D41F22C07E850049F3CA /* CPUPadding.hpp */; }; - 4851BE102122C1BC009BB0AC /* Tensor.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4851BE0F2122C1BC009BB0AC /* Tensor.hpp */; settings = {ATTRIBUTES = (Public, ); }; }; - 485DD411217F495500129159 /* CPUQuantizedAdd.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 485DD40B217F495400129159 /* CPUQuantizedAdd.hpp */; }; - 485DD412217F495500129159 /* CPUQuantizedSoftmax.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 485DD40C217F495500129159 /* CPUQuantizedSoftmax.cpp */; }; - 485DD414217F495500129159 /* CPUQuantizedAdd.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 485DD40E217F495500129159 /* CPUQuantizedAdd.cpp */; }; - 485DD415217F495500129159 /* CPUQuantizedSoftmax.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 485DD40F217F495500129159 /* CPUQuantizedSoftmax.hpp */; }; - 485DD418217F49C500129159 /* CPUQuanConvolutionDepthwise.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 485DD416217F49C500129159 /* CPUQuanConvolutionDepthwise.hpp */; }; - 485DD419217F49C500129159 /* CPUQuanConvolutionDepthwise.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 485DD417217F49C500129159 /* CPUQuanConvolutionDepthwise.cpp */; }; - 485DD423217F4C7600129159 /* CPUFixedPoint.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 485DD422217F4C7600129159 /* CPUFixedPoint.hpp */; }; - 485DD425218161E100129159 /* MNNConvRunForUnitDepthWiseUint8.S in Sources */ = {isa = PBXBuildFile; fileRef = 485DD424218161E100129159 /* MNNConvRunForUnitDepthWiseUint8.S */; }; - 485DD4272181898C00129159 /* MNNUInt8ToInt16WithOffsetC4Common.S in Sources */ = {isa = PBXBuildFile; fileRef = 485DD4262181898C00129159 /* MNNUInt8ToInt16WithOffsetC4Common.S */; }; - 485DD4292181938C00129159 /* MNNQuanToDestUint8.S in Sources */ = {isa = PBXBuildFile; fileRef = 485DD4282181938C00129159 /* MNNQuanToDestUint8.S */; }; - 485DD42B21819FB000129159 /* MNNUInt8ToInt16WithOffsetC4Fast.S in Sources */ = {isa = PBXBuildFile; fileRef = 485DD42A21819FB000129159 /* MNNUInt8ToInt16WithOffsetC4Fast.S */; }; - 485DD42D2181A68F00129159 /* MNNConvRunForLineDepthWiseUint8.S in Sources */ = {isa = PBXBuildFile; fileRef = 485DD42C2181A68F00129159 /* MNNConvRunForLineDepthWiseUint8.S */; }; - 485DD4302181E94300129159 /* MNNQuanToDestUint8.S in Sources */ = {isa = PBXBuildFile; fileRef = 485DD42E2181E94300129159 /* MNNQuanToDestUint8.S */; }; - 485DD4312181E94300129159 /* MNNUInt8ToInt16WithOffsetC4Common.S in Sources */ = {isa = PBXBuildFile; fileRef = 485DD42F2181E94300129159 /* MNNUInt8ToInt16WithOffsetC4Common.S */; }; - 485DD4342182AE8100129159 /* MNNConvRunForLineDepthWiseUint8.S in Sources */ = {isa = PBXBuildFile; fileRef = 485DD4322182AE8000129159 /* MNNConvRunForLineDepthWiseUint8.S */; }; - 485DD4352182AE8100129159 /* MNNConvRunForUnitDepthWiseUint8.S in Sources */ = {isa = PBXBuildFile; fileRef = 485DD4332182AE8100129159 /* MNNConvRunForUnitDepthWiseUint8.S */; }; - 485DD4372182B07B00129159 /* MNNUInt8ToInt16WithOffsetC4Fast.S in Sources */ = {isa = PBXBuildFile; fileRef = 485DD4362182B07B00129159 /* MNNUInt8ToInt16WithOffsetC4Fast.S */; }; - 486B4BB9222901D6001E73E3 /* MNNMatrixProd.S in Sources */ = {isa = PBXBuildFile; fileRef = 486B4BB8222901D5001E73E3 /* MNNMatrixProd.S */; }; - 486B4BBB222901E5001E73E3 /* MNNMatrixProd.S in Sources */ = {isa = PBXBuildFile; fileRef = 486B4BBA222901E5001E73E3 /* MNNMatrixProd.S */; }; - 486B4BC1222D4831001E73E3 /* MNNMatrixMax.S in Sources */ = {isa = PBXBuildFile; fileRef = 486B4BC0222D4831001E73E3 /* MNNMatrixMax.S */; }; - 486B4BC3222D4845001E73E3 /* MNNMatrixMax.S in Sources */ = {isa = PBXBuildFile; fileRef = 486B4BC2222D4845001E73E3 /* MNNMatrixMax.S */; }; - 486FDF40223E495B00F487FB /* CPUBinary.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 486FDF3C223E495A00F487FB /* CPUBinary.hpp */; }; - 486FDF41223E495B00F487FB /* CPUBinary.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 486FDF3D223E495A00F487FB /* CPUBinary.cpp */; }; - 486FDF42223E495B00F487FB /* CPUUnary.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 486FDF3E223E495A00F487FB /* CPUUnary.cpp */; }; - 486FDF43223E495B00F487FB /* CPUUnary.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 486FDF3F223E495B00F487FB /* CPUUnary.hpp */; }; 486FDF47223E4B2800F487FB /* MetalBinary.mm in Sources */ = {isa = PBXBuildFile; fileRef = 486FDF44223E4B2700F487FB /* MetalBinary.mm */; }; 486FDF48223E4B2800F487FB /* MetalBinary.metal in Sources */ = {isa = PBXBuildFile; fileRef = 486FDF45223E4B2800F487FB /* MetalBinary.metal */; }; - 486FDF49223E4B2800F487FB /* MetalBinary.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 486FDF46223E4B2800F487FB /* MetalBinary.hpp */; }; - 486FDF4C2241E95700F487FB /* CPURuntime.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 486FDF4A2241E95700F487FB /* CPURuntime.cpp */; }; - 486FDF4D2241E95700F487FB /* CPURuntime.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 486FDF4B2241E95700F487FB /* CPURuntime.hpp */; }; - 487970D422C9BF4B00795502 /* CPUFloatToInt8.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 487970CC22C9BF4A00795502 /* CPUFloatToInt8.hpp */; }; - 487970D522C9BF4B00795502 /* CPUDepthwiseConvInt8.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 487970CD22C9BF4A00795502 /* CPUDepthwiseConvInt8.cpp */; }; - 487970D622C9BF4B00795502 /* CPUConvInt8.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 487970CE22C9BF4A00795502 /* CPUConvInt8.hpp */; }; - 487970D722C9BF4B00795502 /* CPUDepthwiseConvInt8.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 487970CF22C9BF4A00795502 /* CPUDepthwiseConvInt8.hpp */; }; - 487970D822C9BF4B00795502 /* CPUInt8ToFloat.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 487970D022C9BF4A00795502 /* CPUInt8ToFloat.cpp */; }; - 487970D922C9BF4B00795502 /* CPUInt8ToFloat.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 487970D122C9BF4A00795502 /* CPUInt8ToFloat.hpp */; }; - 487970DA22C9BF4B00795502 /* CPUConvInt8.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 487970D222C9BF4B00795502 /* CPUConvInt8.cpp */; }; - 487970DB22C9BF4B00795502 /* CPUFloatToInt8.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 487970D322C9BF4B00795502 /* CPUFloatToInt8.cpp */; }; - 487970E222C9BF5E00795502 /* MNNGemmInt8AddBiasScale_8x4_Unit.S in Sources */ = {isa = PBXBuildFile; fileRef = 487970DC22C9BF5E00795502 /* MNNGemmInt8AddBiasScale_8x4_Unit.S */; }; - 487970E322C9BF5E00795502 /* MNNGemmInt8AddBiasScale_16x4_Unit.S in Sources */ = {isa = PBXBuildFile; fileRef = 487970DD22C9BF5E00795502 /* MNNGemmInt8AddBiasScale_16x4_Unit.S */; }; - 487970E422C9BF5E00795502 /* MNNDepthWiseInt8AddBiasScaleUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 487970DE22C9BF5E00795502 /* MNNDepthWiseInt8AddBiasScaleUnit.S */; }; - 487970E522C9BF5E00795502 /* MNNLineDepthWiseInt8AddBiasScaleUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 487970DF22C9BF5E00795502 /* MNNLineDepthWiseInt8AddBiasScaleUnit.S */; }; - 487970E622C9BF5E00795502 /* MNNInt8ScaleToFloat.S in Sources */ = {isa = PBXBuildFile; fileRef = 487970E022C9BF5E00795502 /* MNNInt8ScaleToFloat.S */; }; - 487970E722C9BF5E00795502 /* MNNReluInt8.S in Sources */ = {isa = PBXBuildFile; fileRef = 487970E122C9BF5E00795502 /* MNNReluInt8.S */; }; - 487970ED22C9BF7200795502 /* MNNInt8ScaleToFloat.S in Sources */ = {isa = PBXBuildFile; fileRef = 487970E822C9BF7200795502 /* MNNInt8ScaleToFloat.S */; }; - 487970EE22C9BF7200795502 /* MNNDepthWiseInt8AddBiasScaleUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 487970E922C9BF7200795502 /* MNNDepthWiseInt8AddBiasScaleUnit.S */; }; - 487970EF22C9BF7200795502 /* MNNLineDepthWiseInt8AddBiasScaleUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 487970EA22C9BF7200795502 /* MNNLineDepthWiseInt8AddBiasScaleUnit.S */; }; - 487970F022C9BF7200795502 /* MNNReluInt8.S in Sources */ = {isa = PBXBuildFile; fileRef = 487970EB22C9BF7200795502 /* MNNReluInt8.S */; }; - 487970F422C9C07000795502 /* CPUPoolInt8.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 487970F222C9C07000795502 /* CPUPoolInt8.hpp */; }; - 487970F522C9C07000795502 /* CPUPoolInt8.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 487970F322C9C07000795502 /* CPUPoolInt8.cpp */; }; - 487970F722C9C19F00795502 /* MNNGemmInt8AddBiasScale_16x4_Unit.S in Sources */ = {isa = PBXBuildFile; fileRef = 487970F622C9C19F00795502 /* MNNGemmInt8AddBiasScale_16x4_Unit.S */; }; - 4887145A215153F900CCE0D8 /* ErrorCode.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48871459215153F900CCE0D8 /* ErrorCode.hpp */; settings = {ATTRIBUTES = (Public, ); }; }; - 48871465215225D600CCE0D8 /* ImageProcess.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48871464215225D600CCE0D8 /* ImageProcess.hpp */; settings = {ATTRIBUTES = (Public, ); }; }; - 4887147A215249EA00CCE0D8 /* Matrix.h in Headers */ = {isa = PBXBuildFile; fileRef = 48871478215249EA00CCE0D8 /* Matrix.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 4887147B215249EA00CCE0D8 /* Rect.h in Headers */ = {isa = PBXBuildFile; fileRef = 48871479215249EA00CCE0D8 /* Rect.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 48887582215B639F0079B12E /* TensorUtils.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 488873AF215B639D0079B12E /* TensorUtils.hpp */; }; - 48887584215B639F0079B12E /* Concurrency.h in Headers */ = {isa = PBXBuildFile; fileRef = 488873B1215B639D0079B12E /* Concurrency.h */; }; - 48887588215B639F0079B12E /* AutoStorage.h in Headers */ = {isa = PBXBuildFile; fileRef = 488873B5215B639D0079B12E /* AutoStorage.h */; }; - 4888758A215B639F0079B12E /* Macro.h in Headers */ = {isa = PBXBuildFile; fileRef = 488873B7215B639D0079B12E /* Macro.h */; }; - 4888758C215B639F0079B12E /* MNNMemoryUtils.h in Headers */ = {isa = PBXBuildFile; fileRef = 488873B9215B639D0079B12E /* MNNMemoryUtils.h */; }; - 4888758D215B639F0079B12E /* TensorUtils.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 488873BA215B639D0079B12E /* TensorUtils.cpp */; }; - 48887590215B639F0079B12E /* BufferAllocator.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 488873BD215B639D0079B12E /* BufferAllocator.hpp */; }; - 48887595215B639F0079B12E /* Tensor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 488873C2215B639D0079B12E /* Tensor.cpp */; }; - 48887596215B639F0079B12E /* MNNMemoryUtils.c in Sources */ = {isa = PBXBuildFile; fileRef = 488873C3215B639D0079B12E /* MNNMemoryUtils.c */; }; - 48887597215B639F0079B12E /* AutoTime.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 488873C4215B639D0079B12E /* AutoTime.cpp */; }; - 48887599215B639F0079B12E /* NonCopyable.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 488873C6215B639D0079B12E /* NonCopyable.hpp */; }; 4888759B215B639F0079B12E /* MetalSpatialProduct.mm in Sources */ = {isa = PBXBuildFile; fileRef = 488873C9215B639D0079B12E /* MetalSpatialProduct.mm */; }; 4888759C215B639F0079B12E /* MetalReshape.metal in Sources */ = {isa = PBXBuildFile; fileRef = 488873CA215B639D0079B12E /* MetalReshape.metal */; }; 4888759D215B639F0079B12E /* MetalLRN.metal in Sources */ = {isa = PBXBuildFile; fileRef = 488873CB215B639D0079B12E /* MetalLRN.metal */; }; - 4888759E215B639F0079B12E /* MetalLRN.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 488873CC215B639D0079B12E /* MetalLRN.hpp */; }; 4888759F215B639F0079B12E /* MetalPermute.metal in Sources */ = {isa = PBXBuildFile; fileRef = 488873CD215B639D0079B12E /* MetalPermute.metal */; }; - 488875A0215B639F0079B12E /* MetalReshape.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 488873CE215B639D0079B12E /* MetalReshape.hpp */; }; 488875A1215B639F0079B12E /* MetalTanH.mm in Sources */ = {isa = PBXBuildFile; fileRef = 488873CF215B639D0079B12E /* MetalTanH.mm */; }; 488875A2215B639F0079B12E /* MetalSoftmax.mm in Sources */ = {isa = PBXBuildFile; fileRef = 488873D0215B639D0079B12E /* MetalSoftmax.mm */; }; 488875A3215B639F0079B12E /* MetalReLU.metal in Sources */ = {isa = PBXBuildFile; fileRef = 488873D1215B639D0079B12E /* MetalReLU.metal */; }; - 488875A5215B639F0079B12E /* MetalROIPooling.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 488873D3215B639D0079B12E /* MetalROIPooling.hpp */; }; - 488875A6215B639F0079B12E /* MetalTanH.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 488873D4215B639D0079B12E /* MetalTanH.hpp */; }; 488875A7215B639F0079B12E /* MetalLRN.mm in Sources */ = {isa = PBXBuildFile; fileRef = 488873D5215B639D0079B12E /* MetalLRN.mm */; }; 488875A8215B639F0079B12E /* MetalNormalize.mm in Sources */ = {isa = PBXBuildFile; fileRef = 488873D6215B639D0079B12E /* MetalNormalize.mm */; }; 488875A9215B639F0079B12E /* MNNMetalContext.mm in Sources */ = {isa = PBXBuildFile; fileRef = 488873D7215B639D0079B12E /* MNNMetalContext.mm */; }; 488875AB215B639F0079B12E /* MetalLSTM.metal in Sources */ = {isa = PBXBuildFile; fileRef = 488873D9215B639D0079B12E /* MetalLSTM.metal */; }; - 488875AC215B639F0079B12E /* MetalNormalize.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 488873DA215B639D0079B12E /* MetalNormalize.hpp */; }; - 488875AD215B639F0079B12E /* MetalBackend.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 488873DB215B639D0079B12E /* MetalBackend.hpp */; }; 488875AE215B639F0079B12E /* MetalConvolution.metal in Sources */ = {isa = PBXBuildFile; fileRef = 488873DC215B639D0079B12E /* MetalConvolution.metal */; }; 488875AF215B639F0079B12E /* MetalSlice.mm in Sources */ = {isa = PBXBuildFile; fileRef = 488873DD215B639D0079B12E /* MetalSlice.mm */; }; 488875B0215B639F0079B12E /* MetalEltwise.mm in Sources */ = {isa = PBXBuildFile; fileRef = 488873DE215B639D0079B12E /* MetalEltwise.mm */; }; 488875B2215B639F0079B12E /* MetalBackend.metal in Sources */ = {isa = PBXBuildFile; fileRef = 488873E0215B639D0079B12E /* MetalBackend.metal */; }; 488875B3215B639F0079B12E /* MetalConvolution.mm in Sources */ = {isa = PBXBuildFile; fileRef = 488873E1215B639D0079B12E /* MetalConvolution.mm */; }; - 488875B4215B639F0079B12E /* MetalReLU.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 488873E2215B639D0079B12E /* MetalReLU.hpp */; }; - 488875B5215B639F0079B12E /* MetalEltwise.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 488873E3215B639D0079B12E /* MetalEltwise.hpp */; }; - 488875B6215B639F0079B12E /* MetalPooling.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 488873E4215B639D0079B12E /* MetalPooling.hpp */; }; - 488875B7215B639F0079B12E /* MetalSlice.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 488873E5215B639D0079B12E /* MetalSlice.hpp */; }; 488875B8215B639F0079B12E /* MetalConcat.mm in Sources */ = {isa = PBXBuildFile; fileRef = 488873E6215B639D0079B12E /* MetalConcat.mm */; }; 488875BA215B639F0079B12E /* MetalNormalize.metal in Sources */ = {isa = PBXBuildFile; fileRef = 488873E8215B639D0079B12E /* MetalNormalize.metal */; }; 488875BB215B639F0079B12E /* MetalSoftmax.metal in Sources */ = {isa = PBXBuildFile; fileRef = 488873E9215B639D0079B12E /* MetalSoftmax.metal */; }; - 488875BC215B639F0079B12E /* MetalLSTM.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 488873EA215B639D0079B12E /* MetalLSTM.hpp */; }; - 488875BD215B639F0079B12E /* MetalPReLU.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 488873EB215B639D0079B12E /* MetalPReLU.hpp */; }; - 488875BE215B639F0079B12E /* MetalPermute.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 488873EC215B639D0079B12E /* MetalPermute.hpp */; }; - 488875C2215B639F0079B12E /* MetalSpatialProduct.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 488873F0215B639D0079B12E /* MetalSpatialProduct.hpp */; }; - 488875C3215B639F0079B12E /* MetalResize.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 488873F1215B639D0079B12E /* MetalResize.hpp */; }; 488875C4215B639F0079B12E /* MetalDeconvolution.mm in Sources */ = {isa = PBXBuildFile; fileRef = 488873F2215B639D0079B12E /* MetalDeconvolution.mm */; }; 488875C5215B639F0079B12E /* MetalReLU.mm in Sources */ = {isa = PBXBuildFile; fileRef = 488873F3215B639D0079B12E /* MetalReLU.mm */; }; 488875C6215B639F0079B12E /* MetalPooling.mm in Sources */ = {isa = PBXBuildFile; fileRef = 488873F4215B639D0079B12E /* MetalPooling.mm */; }; 488875C8215B639F0079B12E /* MetalScale.metal in Sources */ = {isa = PBXBuildFile; fileRef = 488873F6215B639D0079B12E /* MetalScale.metal */; }; - 488875C9215B639F0079B12E /* MetalConvolution.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 488873F7215B639D0079B12E /* MetalConvolution.hpp */; }; 488875CA215B639F0079B12E /* MetalScale.mm in Sources */ = {isa = PBXBuildFile; fileRef = 488873F8215B639D0079B12E /* MetalScale.mm */; }; - 488875CB215B639F0079B12E /* MetalSoftmax.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 488873F9215B639D0079B12E /* MetalSoftmax.hpp */; }; 488875CC215B639F0079B12E /* MetalReshape.mm in Sources */ = {isa = PBXBuildFile; fileRef = 488873FA215B639D0079B12E /* MetalReshape.mm */; }; 488875CD215B639F0079B12E /* MetalTanH.metal in Sources */ = {isa = PBXBuildFile; fileRef = 488873FB215B639D0079B12E /* MetalTanH.metal */; }; 488875CE215B639F0079B12E /* MetalDeconvolution.metal in Sources */ = {isa = PBXBuildFile; fileRef = 488873FC215B639D0079B12E /* MetalDeconvolution.metal */; }; - 488875CF215B639F0079B12E /* MetalDeconvolution.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 488873FD215B639D0079B12E /* MetalDeconvolution.hpp */; }; 488875D0215B639F0079B12E /* MetalPooling.metal in Sources */ = {isa = PBXBuildFile; fileRef = 488873FE215B639D0079B12E /* MetalPooling.metal */; }; 488875D1215B639F0079B12E /* MetalInterp.mm in Sources */ = {isa = PBXBuildFile; fileRef = 488873FF215B639D0079B12E /* MetalInterp.mm */; }; 488875D2215B639F0079B12E /* MetalROIPooling.metal in Sources */ = {isa = PBXBuildFile; fileRef = 48887400215B639D0079B12E /* MetalROIPooling.metal */; }; 488875D3215B639F0079B12E /* MetalSpatialProduct.metal in Sources */ = {isa = PBXBuildFile; fileRef = 48887401215B639D0079B12E /* MetalSpatialProduct.metal */; }; 488875D4215B639F0079B12E /* MetalROIPooling.mm in Sources */ = {isa = PBXBuildFile; fileRef = 48887402215B639D0079B12E /* MetalROIPooling.mm */; }; - 488875D5215B639F0079B12E /* MetalInterp.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887403215B639D0079B12E /* MetalInterp.hpp */; }; - 488875D6215B639F0079B12E /* MNNMetalContext.h in Headers */ = {isa = PBXBuildFile; fileRef = 48887404215B639D0079B12E /* MNNMetalContext.h */; }; 488875D7215B639F0079B12E /* MetalBackend.mm in Sources */ = {isa = PBXBuildFile; fileRef = 48887405215B639D0079B12E /* MetalBackend.mm */; }; - 488875D8215B639F0079B12E /* MetalScale.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887406215B639D0079B12E /* MetalScale.hpp */; }; 488875D9215B639F0079B12E /* MetalSlice.metal in Sources */ = {isa = PBXBuildFile; fileRef = 48887407215B639D0079B12E /* MetalSlice.metal */; }; 488875DA215B639F0079B12E /* MetalResize.metal in Sources */ = {isa = PBXBuildFile; fileRef = 48887408215B639D0079B12E /* MetalResize.metal */; }; 488875DB215B639F0079B12E /* MetalPReLU.metal in Sources */ = {isa = PBXBuildFile; fileRef = 48887409215B639D0079B12E /* MetalPReLU.metal */; }; - 488875DC215B639F0079B12E /* MetalConcat.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4888740A215B639D0079B12E /* MetalConcat.hpp */; }; 488875DD215B639F0079B12E /* MetalPermute.mm in Sources */ = {isa = PBXBuildFile; fileRef = 4888740B215B639D0079B12E /* MetalPermute.mm */; }; 488875DE215B639F0079B12E /* MetalPReLU.mm in Sources */ = {isa = PBXBuildFile; fileRef = 4888740C215B639D0079B12E /* MetalPReLU.mm */; }; 488875DF215B639F0079B12E /* MetalLSTM.mm in Sources */ = {isa = PBXBuildFile; fileRef = 4888740D215B639D0079B12E /* MetalLSTM.mm */; }; 488875E0215B639F0079B12E /* MetalResize.mm in Sources */ = {isa = PBXBuildFile; fileRef = 4888740E215B639D0079B12E /* MetalResize.mm */; }; 488875E1215B639F0079B12E /* MetalEltwise.metal in Sources */ = {isa = PBXBuildFile; fileRef = 4888740F215B639D0079B12E /* MetalEltwise.metal */; }; - 488875E2215B639F0079B12E /* CPUCropAndResize.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887411215B639D0079B12E /* CPUCropAndResize.cpp */; }; - 488875E3215B639F0079B12E /* CPUSelu.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887412215B639D0079B12E /* CPUSelu.cpp */; }; - 488875E4215B639F0079B12E /* CPUArgMax.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887413215B639D0079B12E /* CPUArgMax.cpp */; }; - 488875E5215B639F0079B12E /* CPURange.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887414215B639D0079B12E /* CPURange.hpp */; }; - 488875E6215B639F0079B12E /* CPUConvolutionDepthwise.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887415215B639D0079B12E /* CPUConvolutionDepthwise.hpp */; }; - 488875E8215B639F0079B12E /* CPUTFQuantizedConv2D.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887417215B639D0079B12E /* CPUTFQuantizedConv2D.hpp */; }; - 488875EA215B639F0079B12E /* CPUScale.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887419215B639D0079B12E /* CPUScale.cpp */; }; - 488875EB215B639F0079B12E /* CPUTensorConvert.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4888741A215B639D0079B12E /* CPUTensorConvert.hpp */; }; - 488875EC215B639F0079B12E /* CPUQuantizationUtils.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4888741B215B639D0079B12E /* CPUQuantizationUtils.hpp */; }; - 488875ED215B639F0079B12E /* CPUSoftmax.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4888741C215B639D0079B12E /* CPUSoftmax.cpp */; }; - 488875EE215B639F0079B12E /* CPUTile.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4888741D215B639D0079B12E /* CPUTile.cpp */; }; - 488875EF215B639F0079B12E /* CPUQuantizedAvgPool.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4888741E215B639D0079B12E /* CPUQuantizedAvgPool.hpp */; }; - 488875F1215B639F0079B12E /* CPUConvolution.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887420215B639D0079B12E /* CPUConvolution.hpp */; }; - 488875F2215B639F0079B12E /* CPUEltwise.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887421215B639D0079B12E /* CPUEltwise.hpp */; }; - 488875F3215B639F0079B12E /* CPUResize.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887422215B639D0079B12E /* CPUResize.cpp */; }; - 488875F4215B639F0079B12E /* CPUCast.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887423215B639D0079B12E /* CPUCast.cpp */; }; - 488875F5215B639F0079B12E /* CPUGather.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887424215B639D0079B12E /* CPUGather.cpp */; }; - 488875F6215B639F0079B12E /* CPUAsString.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887425215B639D0079B12E /* CPUAsString.hpp */; }; - 488875F7215B639F0079B12E /* CPUProposal.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887426215B639D0079B12E /* CPUProposal.cpp */; }; - 488875F8215B639F0079B12E /* CPUTanh.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887427215B639D0079B12E /* CPUTanh.hpp */; }; - 488875F9215B639F0079B12E /* CPUReduceJoin.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887428215B639D0079B12E /* CPUReduceJoin.hpp */; }; - 488875FA215B639F0079B12E /* CPUInterp.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887429215B639D0079B12E /* CPUInterp.cpp */; }; - 488875FB215B639F0079B12E /* CPUConst.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4888742A215B639D0079B12E /* CPUConst.cpp */; }; - 488875FC215B639F0079B12E /* CPUQuantizedReshape.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4888742B215B639D0079B12E /* CPUQuantizedReshape.hpp */; }; - 488875FD215B639F0079B12E /* CPUDetectionOutput.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4888742C215B639D0079B12E /* CPUDetectionOutput.cpp */; }; - 488875FE215B639F0079B12E /* CPUPriorbox.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4888742D215B639D0079B12E /* CPUPriorbox.hpp */; }; - 488875FF215B639F0079B12E /* CPUSize.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4888742E215B639D0079B12E /* CPUSize.cpp */; }; - 48887600215B639F0079B12E /* CPUMatMul.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4888742F215B639D0079B12E /* CPUMatMul.cpp */; }; - 48887602215B639F0079B12E /* CPUPermute.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887431215B639D0079B12E /* CPUPermute.hpp */; }; - 48887603215B639F0079B12E /* CPUFill.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887432215B639D0079B12E /* CPUFill.hpp */; }; - 48887604215B639F0079B12E /* CPUTranspose.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887433215B639D0079B12E /* CPUTranspose.hpp */; }; - 48887605215B639F0079B12E /* CPUSlice.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887434215B639D0079B12E /* CPUSlice.hpp */; }; - 48887606215B639F0079B12E /* CPULRN.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887435215B639D0079B12E /* CPULRN.hpp */; }; - 48887607215B639F0079B12E /* CPUStridedSlice.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887436215B639D0079B12E /* CPUStridedSlice.hpp */; }; - 48887608215B639F0079B12E /* CPUWhere.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887437215B639D0079B12E /* CPUWhere.cpp */; }; - 48887609215B639F0079B12E /* CPUTopKV2.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887438215B639D0079B12E /* CPUTopKV2.hpp */; }; - 4888760A215B639F0079B12E /* CPUROIPooling.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887439215B639D0079B12E /* CPUROIPooling.hpp */; }; - 4888760B215B639F0079B12E /* CPUUnpack.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4888743A215B639D0079B12E /* CPUUnpack.hpp */; }; - 4888760C215B639F0079B12E /* CPUQuantizedMaxPool.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4888743B215B639D0079B12E /* CPUQuantizedMaxPool.cpp */; }; - 4888760D215B639F0079B12E /* CPUExpandDims.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4888743C215B639D0079B12E /* CPUExpandDims.hpp */; }; - 4888760E215B639F0079B12E /* CPUReshape.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4888743D215B639D0079B12E /* CPUReshape.hpp */; }; - 4888760F215B639F0079B12E /* CPUReduction.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4888743E215B639D0079B12E /* CPUReduction.hpp */; }; - 48887610215B639F0079B12E /* CPUDeconvolution.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4888743F215B639D0079B12E /* CPUDeconvolution.cpp */; }; - 48887611215B639F0079B12E /* CPURelu.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887440215B639D0079B12E /* CPURelu.hpp */; }; - 48887612215B639F0079B12E /* CPUSigmoid.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887441215B639D0079B12E /* CPUSigmoid.hpp */; }; - 48887616215B639F0079B12E /* CPUNormalize.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887445215B639D0079B12E /* CPUNormalize.hpp */; }; - 48887617215B639F0079B12E /* CPULSTM.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887446215B639D0079B12E /* CPULSTM.hpp */; }; - 48887618215B639F0079B12E /* CPUPool.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887447215B639D0079B12E /* CPUPool.cpp */; }; - 48887619215B639F0079B12E /* CPUDeconvolutionDepthwise.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887448215B639D0079B12E /* CPUDeconvolutionDepthwise.hpp */; }; - 4888761C215B639F0079B12E /* CPURank.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4888744B215B639D0079B12E /* CPURank.hpp */; }; - 4888761D215B639F0079B12E /* CPUSpatialProduct.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4888744C215B639D0079B12E /* CPUSpatialProduct.hpp */; }; - 4888761E215B639F0079B12E /* CPUShape.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4888744D215B639D0079B12E /* CPUShape.cpp */; }; - 4888761F215B639F0079B12E /* CPUBackend.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4888744E215B639D0079B12E /* CPUBackend.hpp */; }; - 48887620215B639F0079B12E /* CPUConcat.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4888744F215B639D0079B12E /* CPUConcat.cpp */; }; - 48887621215B639F0079B12E /* CPUInnerProduct.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887450215B639D0079B12E /* CPUInnerProduct.hpp */; }; - 48887623215B639F0079B12E /* CPUPack.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887452215B639D0079B12E /* CPUPack.hpp */; }; - 48887624215B639F0079B12E /* CPUNonMaxSuppressionV2.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887453215B639D0079B12E /* CPUNonMaxSuppressionV2.hpp */; }; - 48887625215B639F0079B12E /* CPUSliceTf.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887454215B639D0079B12E /* CPUSliceTf.hpp */; }; - 48887626215B639F0079B12E /* CPUGatherV2.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887455215B639D0079B12E /* CPUGatherV2.hpp */; }; - 48887628215B639F0079B12E /* CPUSqueeze.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887457215B639D0079B12E /* CPUSqueeze.hpp */; }; - 48887629215B639F0079B12E /* CPUTranspose.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887458215B639D0079B12E /* CPUTranspose.cpp */; }; - 4888762A215B639F0079B12E /* CPUFill.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887459215B639D0079B12E /* CPUFill.cpp */; }; - 4888762B215B639F0079B12E /* CPUSlice.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4888745A215B639D0079B12E /* CPUSlice.cpp */; }; - 4888762C215B639F0079B12E /* CPUWhere.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4888745B215B639D0079B12E /* CPUWhere.hpp */; }; - 4888762D215B639F0079B12E /* CPULRN.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4888745C215B639D0079B12E /* CPULRN.cpp */; }; - 4888762E215B639F0079B12E /* CPUStridedSlice.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4888745D215B639D0079B12E /* CPUStridedSlice.cpp */; }; - 4888762F215B639F0079B12E /* CPUROIPooling.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4888745E215B639D0079B12E /* CPUROIPooling.cpp */; }; - 48887630215B639F0079B12E /* CPUTopKV2.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4888745F215B639D0079B12E /* CPUTopKV2.cpp */; }; - 48887631215B639F0079B12E /* CPUUnpack.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887460215B639D0079B12E /* CPUUnpack.cpp */; }; - 48887633215B639F0079B12E /* CPUSigmoid.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887462215B639D0079B12E /* CPUSigmoid.cpp */; }; - 48887634215B639F0079B12E /* CPUQuantizedMaxPool.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887463215B639D0079B12E /* CPUQuantizedMaxPool.hpp */; }; - 48887635215B639F0079B12E /* CPUReduction.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887464215B639D0079B12E /* CPUReduction.cpp */; }; - 48887636215B639F0079B12E /* CPUReshape.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887465215B639D0079B12E /* CPUReshape.cpp */; }; - 48887637215B639F0079B12E /* CPUDeconvolution.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887466215B639D0079B12E /* CPUDeconvolution.hpp */; }; - 48887638215B639F0079B12E /* CPUExpandDims.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887467215B639D0079B12E /* CPUExpandDims.cpp */; }; - 48887639215B639F0079B12E /* CPURelu.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887468215B639D0079B12E /* CPURelu.cpp */; }; - 4888763B215B639F0079B12E /* CommonOptFunction.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4888746B215B639D0079B12E /* CommonOptFunction.cpp */; }; - 4888763D215B639F0079B12E /* Convolution3x3.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4888746D215B639D0079B12E /* Convolution3x3.cpp */; }; - 4888763F215B639F0079B12E /* CommonOptFunction.h in Headers */ = {isa = PBXBuildFile; fileRef = 4888746F215B639D0079B12E /* CommonOptFunction.h */; }; - 48887640215B639F0079B12E /* ConvolutionWinograd.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887470215B639D0079B12E /* ConvolutionWinograd.cpp */; }; - 48887641215B639F0079B12E /* Int8FunctionsOpt.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887471215B639D0079B12E /* Int8FunctionsOpt.cpp */; }; - 48887643215B639F0079B12E /* ConvOpt.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887473215B639D0079B12E /* ConvOpt.cpp */; }; - 48887644215B639F0079B12E /* ConvolutionTiledExecutor.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887474215B639D0079B12E /* ConvolutionTiledExecutor.hpp */; }; - 48887646215B639F0079B12E /* ConvolutionIntFactory.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887476215B639D0079B12E /* ConvolutionIntFactory.cpp */; }; - 48887647215B639F0079B12E /* ConvolutionGroup.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887477215B639D0079B12E /* ConvolutionGroup.hpp */; }; - 48887648215B639F0079B12E /* ConvolutionFloatFactory.h in Headers */ = {isa = PBXBuildFile; fileRef = 48887478215B639D0079B12E /* ConvolutionFloatFactory.h */; }; - 4888764A215B639F0079B12E /* ConvolutionInt8Executor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4888747A215B639D0079B12E /* ConvolutionInt8Executor.cpp */; }; - 4888764B215B639F0079B12E /* ResizeFunction.h in Headers */ = {isa = PBXBuildFile; fileRef = 4888747B215B639D0079B12E /* ResizeFunction.h */; }; - 4888764D215B639F0079B12E /* ConvolutionIntFactory.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4888747D215B639D0079B12E /* ConvolutionIntFactory.hpp */; }; - 4888764F215B639F0079B12E /* ConvolutionGroup.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4888747F215B639D0079B12E /* ConvolutionGroup.cpp */; }; - 48887651215B639F0079B12E /* ConvolutionFloatFactory.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887481215B639D0079B12E /* ConvolutionFloatFactory.cpp */; }; - 48887652215B639F0079B12E /* ConvolutionInt8Executor.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887482215B639D0079B12E /* ConvolutionInt8Executor.hpp */; }; - 48887653215B639F0079B12E /* ResizeFunction.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887483215B639D0079B12E /* ResizeFunction.cpp */; }; - 48887654215B639F0079B12E /* Convolution3x3.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887484215B639D0079B12E /* Convolution3x3.hpp */; }; - 48887655215B639F0079B12E /* ConvOpt.h in Headers */ = {isa = PBXBuildFile; fileRef = 48887485215B639D0079B12E /* ConvOpt.h */; }; - 48887657215B639F0079B12E /* ConvolutionWinograd.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887487215B639D0079B12E /* ConvolutionWinograd.hpp */; }; - 48887659215B639F0079B12E /* Int8FunctionsOpt.h in Headers */ = {isa = PBXBuildFile; fileRef = 48887489215B639D0079B12E /* Int8FunctionsOpt.h */; }; - 4888765A215B639F0079B12E /* ConvolutionTiledExecutor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4888748A215B639D0079B12E /* ConvolutionTiledExecutor.cpp */; }; - 4888765C215B639F0079B12E /* CPUNormalize.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4888748C215B639E0079B12E /* CPUNormalize.cpp */; }; - 4888765F215B639F0079B12E /* CPULSTM.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4888748F215B639E0079B12E /* CPULSTM.cpp */; }; - 48887661215B639F0079B12E /* CPUDeconvolutionDepthwise.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887491215B639E0079B12E /* CPUDeconvolutionDepthwise.cpp */; }; - 48887662215B639F0079B12E /* CPUPool.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887492215B639E0079B12E /* CPUPool.hpp */; }; - 48887664215B639F0079B12E /* CPUSpatialProduct.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887494215B639E0079B12E /* CPUSpatialProduct.cpp */; }; - 48887665215B639F0079B12E /* CPURank.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887495215B639E0079B12E /* CPURank.cpp */; }; - 48887667215B639F0079B12E /* CPUShape.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887497215B639E0079B12E /* CPUShape.hpp */; }; - 48887668215B639F0079B12E /* CPUInnerProduct.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887498215B639E0079B12E /* CPUInnerProduct.cpp */; }; - 48887669215B639F0079B12E /* CPUBackend.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887499215B639E0079B12E /* CPUBackend.cpp */; }; - 4888766A215B639F0079B12E /* CPUConcat.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4888749A215B639E0079B12E /* CPUConcat.hpp */; }; - 4888766B215B639F0079B12E /* CPUNonMaxSuppressionV2.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4888749B215B639E0079B12E /* CPUNonMaxSuppressionV2.cpp */; }; - 4888766C215B639F0079B12E /* CPUSliceTf.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4888749C215B639E0079B12E /* CPUSliceTf.cpp */; }; - 4888766D215B639F0079B12E /* CPUPack.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4888749D215B639E0079B12E /* CPUPack.cpp */; }; - 4888766E215B639F0079B12E /* CPUGatherV2.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4888749E215B639E0079B12E /* CPUGatherV2.cpp */; }; - 4888766F215B639F0079B12E /* CPUSqueeze.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4888749F215B639E0079B12E /* CPUSqueeze.cpp */; }; - 48887671215B639F0079B12E /* CPUArgMax.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 488874A1215B639E0079B12E /* CPUArgMax.hpp */; }; - 48887672215B639F0079B12E /* CPUSelu.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 488874A2215B639E0079B12E /* CPUSelu.hpp */; }; - 48887673215B639F0079B12E /* CPUCropAndResize.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 488874A3215B639E0079B12E /* CPUCropAndResize.hpp */; }; - 48887675215B639F0079B12E /* CPUConvolutionDepthwise.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 488874A5215B639E0079B12E /* CPUConvolutionDepthwise.cpp */; }; - 48887676215B639F0079B12E /* CPURange.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 488874A6215B639E0079B12E /* CPURange.cpp */; }; - 48887678215B639F0079B12E /* CPUTFQuantizedConv2D.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 488874A8215B639E0079B12E /* CPUTFQuantizedConv2D.cpp */; }; - 48887679215B639F0079B12E /* CPUScale.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 488874A9215B639E0079B12E /* CPUScale.hpp */; }; - 4888767A215B639F0079B12E /* CPUTensorConvert.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 488874AA215B639E0079B12E /* CPUTensorConvert.cpp */; }; - 4888767B215B639F0079B12E /* CPUTile.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 488874AB215B639E0079B12E /* CPUTile.hpp */; }; - 4888767C215B639F0079B12E /* CPUSoftmax.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 488874AC215B639E0079B12E /* CPUSoftmax.hpp */; }; - 4888767D215B639F0079B12E /* MNNAsmGlobal.h in Headers */ = {isa = PBXBuildFile; fileRef = 488874AE215B639E0079B12E /* MNNAsmGlobal.h */; }; - 4888767E215B639F0079B12E /* MNNFloat2Int8.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874B0215B639E0079B12E /* MNNFloat2Int8.S */; }; - 4888767F215B639F0079B12E /* MNNGemmFloatUnit_4.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874B1215B639E0079B12E /* MNNGemmFloatUnit_4.S */; }; - 48887680215B639F0079B12E /* MNNConvRunForLineDepthWiseInt8.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874B2215B639E0079B12E /* MNNConvRunForLineDepthWiseInt8.S */; }; - 48887681215B639F0079B12E /* MNNGemmInt16to32_4x4_Common.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874B3215B639E0079B12E /* MNNGemmInt16to32_4x4_Common.S */; }; - 48887682215B639F0079B12E /* MNNConvRunForUnitDepthWiseInt8.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874B4215B639E0079B12E /* MNNConvRunForUnitDepthWiseInt8.S */; }; - 48887683215B639F0079B12E /* MNNMinFloat.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874B5215B639E0079B12E /* MNNMinFloat.S */; }; - 48887684215B639F0079B12E /* MNNBilinearProcC1.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874B6215B639E0079B12E /* MNNBilinearProcC1.S */; }; - 48887685215B639F0079B12E /* MNNMaxFloat.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874B7215B639E0079B12E /* MNNMaxFloat.S */; }; - 48887689215B639F0079B12E /* MNNCubicLineC4.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874BB215B639E0079B12E /* MNNCubicLineC4.S */; }; - 48887690215B639F0079B12E /* MNNDeconvRunForUnitDepthWise.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874C2215B639E0079B12E /* MNNDeconvRunForUnitDepthWise.S */; }; - 48887696215B639F0079B12E /* MNNWinogradMatrixProductLeft.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874C8215B639E0079B12E /* MNNWinogradMatrixProductLeft.S */; }; - 48887699215B639F0079B12E /* MNNConvRunForLineDepthwise.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874CB215B639E0079B12E /* MNNConvRunForLineDepthwise.S */; }; - 4888769A215B639F0079B12E /* MNNConvSlideWindowMiddle.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874CC215B639E0079B12E /* MNNConvSlideWindowMiddle.S */; }; - 4888769B215B639F0079B12E /* MNNScaleBias2FloatC4.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874CD215B639E0079B12E /* MNNScaleBias2FloatC4.S */; }; - 4888769D215B639F0079B12E /* MNNAddBiasRelu6.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874CF215B639E0079B12E /* MNNAddBiasRelu6.S */; }; - 4888769E215B639F0079B12E /* MNNGemmInt16to32_4x4_Unit.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874D0215B639E0079B12E /* MNNGemmInt16to32_4x4_Unit.S */; }; - 4888769F215B639F0079B12E /* MNNScaleAndAddBias.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874D1215B639E0079B12E /* MNNScaleAndAddBias.S */; }; - 488876A2215B639F0079B12E /* MNNGemmFloatCommon_4.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874D4215B639E0079B12E /* MNNGemmFloatCommon_4.S */; }; - 488876A3215B639F0079B12E /* MNNCoefLine.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874D5215B639E0079B12E /* MNNCoefLine.S */; }; - 488876A5215B639F0079B12E /* MNNWinogradMatrixProductRight.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874D7215B639E0079B12E /* MNNWinogradMatrixProductRight.S */; }; - 488876A7215B639F0079B12E /* MNNFloat2Int8.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874DA215B639E0079B12E /* MNNFloat2Int8.S */; }; - 488876A8215B639F0079B12E /* MNNGemmFloatUnit_4.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874DB215B639E0079B12E /* MNNGemmFloatUnit_4.S */; }; - 488876A9215B639F0079B12E /* MNNConvRunForLineDepthWiseInt8.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874DC215B639E0079B12E /* MNNConvRunForLineDepthWiseInt8.S */; }; - 488876AA215B639F0079B12E /* MNNGemmInt16to32_4x4_Common.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874DD215B639E0079B12E /* MNNGemmInt16to32_4x4_Common.S */; }; - 488876AB215B639F0079B12E /* MNNConvRunForUnitDepthWiseInt8.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874DE215B639E0079B12E /* MNNConvRunForUnitDepthWiseInt8.S */; }; - 488876AC215B639F0079B12E /* MNNMinFloat.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874DF215B639E0079B12E /* MNNMinFloat.S */; }; - 488876AD215B639F0079B12E /* MNNBilinearProcC1.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874E0215B639E0079B12E /* MNNBilinearProcC1.S */; }; - 488876AE215B639F0079B12E /* MNNMaxFloat.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874E1215B639E0079B12E /* MNNMaxFloat.S */; }; - 488876B2215B639F0079B12E /* MNNCubicLineC4.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874E5215B639E0079B12E /* MNNCubicLineC4.S */; }; - 488876B9215B639F0079B12E /* MNNDeconvRunForUnitDepthWise.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874EC215B639E0079B12E /* MNNDeconvRunForUnitDepthWise.S */; }; - 488876C0215B639F0079B12E /* MNNWinogradMatrixProductLeft.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874F3215B639E0079B12E /* MNNWinogradMatrixProductLeft.S */; }; - 488876C4215B639F0079B12E /* MNNConvRunForLineDepthwise.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874F7215B639E0079B12E /* MNNConvRunForLineDepthwise.S */; }; - 488876C5215B639F0079B12E /* MNNConvSlideWindowMiddle.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874F8215B639E0079B12E /* MNNConvSlideWindowMiddle.S */; }; - 488876C6215B639F0079B12E /* MNNScaleBias2FloatC4.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874F9215B639E0079B12E /* MNNScaleBias2FloatC4.S */; }; - 488876C8215B639F0079B12E /* MNNAddBiasRelu6.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874FB215B639E0079B12E /* MNNAddBiasRelu6.S */; }; - 488876C9215B639F0079B12E /* MNNGemmInt16to32_4x4_Unit.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874FC215B639E0079B12E /* MNNGemmInt16to32_4x4_Unit.S */; }; - 488876CA215B639F0079B12E /* MNNScaleAndAddBias.S in Sources */ = {isa = PBXBuildFile; fileRef = 488874FD215B639E0079B12E /* MNNScaleAndAddBias.S */; }; - 488876CD215B639F0079B12E /* MNNGemmFloatCommon_4.S in Sources */ = {isa = PBXBuildFile; fileRef = 48887500215B639E0079B12E /* MNNGemmFloatCommon_4.S */; }; - 488876CE215B639F0079B12E /* MNNCoefLine.S in Sources */ = {isa = PBXBuildFile; fileRef = 48887501215B639E0079B12E /* MNNCoefLine.S */; }; - 488876D0215B639F0079B12E /* MNNWinogradMatrixProductRight.S in Sources */ = {isa = PBXBuildFile; fileRef = 48887503215B639E0079B12E /* MNNWinogradMatrixProductRight.S */; }; - 488876D1215B639F0079B12E /* CPUQuantizedAvgPool.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887504215B639E0079B12E /* CPUQuantizedAvgPool.cpp */; }; - 488876D2215B639F0079B12E /* CPUConvolution.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887505215B639E0079B12E /* CPUConvolution.cpp */; }; - 488876D3215B639F0079B12E /* CPUEltwise.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887506215B639E0079B12E /* CPUEltwise.cpp */; }; - 488876D5215B639F0079B12E /* CPUCast.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887508215B639E0079B12E /* CPUCast.hpp */; }; - 488876D6215B639F0079B12E /* CPUResize.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887509215B639E0079B12E /* CPUResize.hpp */; }; - 488876D7215B639F0079B12E /* CPUGather.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4888750A215B639E0079B12E /* CPUGather.hpp */; }; - 488876D8215B639F0079B12E /* CPUAsString.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4888750B215B639E0079B12E /* CPUAsString.cpp */; }; - 488876D9215B639F0079B12E /* CPUTanh.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4888750C215B639E0079B12E /* CPUTanh.cpp */; }; - 488876DA215B639F0079B12E /* CPUProposal.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4888750D215B639E0079B12E /* CPUProposal.hpp */; }; - 488876DB215B639F0079B12E /* CPUInterp.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 4888750E215B639E0079B12E /* CPUInterp.hpp */; }; - 488876DC215B639F0079B12E /* CPUReduceJoin.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4888750F215B639E0079B12E /* CPUReduceJoin.cpp */; }; - 488876DD215B639F0079B12E /* CPUConst.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887510215B639E0079B12E /* CPUConst.hpp */; }; - 488876DE215B639F0079B12E /* CPUPriorbox.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887511215B639E0079B12E /* CPUPriorbox.cpp */; }; - 488876DF215B639F0079B12E /* CPUSize.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887512215B639E0079B12E /* CPUSize.hpp */; }; - 488876E0215B639F0079B12E /* CPUQuantizedReshape.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887513215B639E0079B12E /* CPUQuantizedReshape.cpp */; }; - 488876E1215B639F0079B12E /* CPUDetectionOutput.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887514215B639E0079B12E /* CPUDetectionOutput.hpp */; }; - 488876E2215B639F0079B12E /* CPUMatMul.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887515215B639E0079B12E /* CPUMatMul.hpp */; }; - 488876E4215B639F0079B12E /* CPUPermute.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887517215B639E0079B12E /* CPUPermute.cpp */; }; 48887728215B639F0079B12E /* WingoradGenerater.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48887567215B639E0079B12E /* WingoradGenerater.cpp */; }; - 48887729215B639F0079B12E /* Matrix.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887568215B639E0079B12E /* Matrix.hpp */; }; - 4888772A215B639F0079B12E /* WingoradGenerater.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48887569215B639E0079B12E /* WingoradGenerater.hpp */; }; 4888772B215B639F0079B12E /* Matrix.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4888756A215B639E0079B12E /* Matrix.cpp */; }; - 4888773E215CD3BF0079B12E /* MNNBlitC3ToFloatRGBA.S in Sources */ = {isa = PBXBuildFile; fileRef = 4888773D215CD3BF0079B12E /* MNNBlitC3ToFloatRGBA.S */; }; - 48887740215CD3D00079B12E /* MNNBlitC1ToFloatRGBA.S in Sources */ = {isa = PBXBuildFile; fileRef = 4888773F215CD3D00079B12E /* MNNBlitC1ToFloatRGBA.S */; }; - 48887743215CFF7B0079B12E /* MNNBlitC3ToFloatRGBA.S in Sources */ = {isa = PBXBuildFile; fileRef = 48887741215CFF7B0079B12E /* MNNBlitC3ToFloatRGBA.S */; }; - 48887744215CFF7B0079B12E /* MNNBlitC1ToFloatRGBA.S in Sources */ = {isa = PBXBuildFile; fileRef = 48887742215CFF7B0079B12E /* MNNBlitC1ToFloatRGBA.S */; }; - 489BFA2C230E3D1F00F6B785 /* FileLoader.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 489BFA2A230E3D1F00F6B785 /* FileLoader.hpp */; }; - 489BFA2D230E3D1F00F6B785 /* FileLoader.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 489BFA2B230E3D1F00F6B785 /* FileLoader.cpp */; }; - 48A8A60221CDF55E00C2B9A7 /* MNNSamplerC1NearestOpt.S in Sources */ = {isa = PBXBuildFile; fileRef = 48A8A60121CDF55E00C2B9A7 /* MNNSamplerC1NearestOpt.S */; }; - 48A8A60521CDF87000C2B9A7 /* MNNSamplerC1NearestOpt.S in Sources */ = {isa = PBXBuildFile; fileRef = 48A8A60321CDF86F00C2B9A7 /* MNNSamplerC1NearestOpt.S */; }; - 48A8A60621CDF87000C2B9A7 /* MNNSamplerC4NearestOpt.S in Sources */ = {isa = PBXBuildFile; fileRef = 48A8A60421CDF86F00C2B9A7 /* MNNSamplerC4NearestOpt.S */; }; - 48A8A60F21D101A700C2B9A7 /* ImageSampler.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48A8A60821D101A700C2B9A7 /* ImageSampler.hpp */; }; - 48A8A61021D101A700C2B9A7 /* ImageBlitter.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48A8A60921D101A700C2B9A7 /* ImageBlitter.hpp */; }; - 48A8A61121D101A700C2B9A7 /* ImageFloatBlitter.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48A8A60A21D101A700C2B9A7 /* ImageFloatBlitter.hpp */; }; 48A8A61221D101A700C2B9A7 /* ImageProcess.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48A8A60B21D101A700C2B9A7 /* ImageProcess.cpp */; }; 48A8A61321D101A700C2B9A7 /* ImageSampler.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48A8A60C21D101A700C2B9A7 /* ImageSampler.cpp */; }; 48A8A61421D101A700C2B9A7 /* ImageBlitter.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48A8A60D21D101A700C2B9A7 /* ImageBlitter.cpp */; }; 48A8A61521D101A700C2B9A7 /* ImageFloatBlitter.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48A8A60E21D101A700C2B9A7 /* ImageFloatBlitter.cpp */; }; - 48A8A61921D101DE00C2B9A7 /* SkNx_neon.h in Headers */ = {isa = PBXBuildFile; fileRef = 48A8A61621D101DD00C2B9A7 /* SkNx_neon.h */; }; 48A8A61A21D101DE00C2B9A7 /* Matrix_CV.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48A8A61721D101DD00C2B9A7 /* Matrix_CV.cpp */; }; - 48A8A61B21D101DE00C2B9A7 /* SkNx.h in Headers */ = {isa = PBXBuildFile; fileRef = 48A8A61821D101DE00C2B9A7 /* SkNx.h */; }; - 48A8A61D21D20BE700C2B9A7 /* MNNNV21ToRGBUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 48A8A61C21D20BE700C2B9A7 /* MNNNV21ToRGBUnit.S */; }; - 48A8A61F21D235DF00C2B9A7 /* MNNNV21ToRGBUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 48A8A61E21D235DF00C2B9A7 /* MNNNV21ToRGBUnit.S */; }; - 48A8A62121D3569800C2B9A7 /* MNNGemmInt8toFloat32_8x4_Unit.S in Sources */ = {isa = PBXBuildFile; fileRef = 48A8A62021D3569800C2B9A7 /* MNNGemmInt8toFloat32_8x4_Unit.S */; }; - 48A8A62321D37FB500C2B9A7 /* MNNGemmInt8toFloat32_8x4_Unit.S in Sources */ = {isa = PBXBuildFile; fileRef = 48A8A62221D37FB500C2B9A7 /* MNNGemmInt8toFloat32_8x4_Unit.S */; }; - 48A8A62621D47B5A00C2B9A7 /* OptimizedComputer.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48A8A62421D47B5A00C2B9A7 /* OptimizedComputer.hpp */; }; - 48A8A62721D47B5A00C2B9A7 /* OptimizedComputer.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48A8A62521D47B5A00C2B9A7 /* OptimizedComputer.cpp */; }; - 48A8A62921D5FE1E00C2B9A7 /* MNNNV21ToRGBAUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 48A8A62821D5FE1D00C2B9A7 /* MNNNV21ToRGBAUnit.S */; }; - 48A8A62B21D5FE3100C2B9A7 /* MNNNV21ToRGBAUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 48A8A62A21D5FE3100C2B9A7 /* MNNNV21ToRGBAUnit.S */; }; - 48A8A63721D8A43D00C2B9A7 /* BufferAllocator.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48A8A63621D8A43D00C2B9A7 /* BufferAllocator.cpp */; }; - 48AE9E9F2211950B009DB6F4 /* StrassenMatmulComputor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48AE9E9D2211950B009DB6F4 /* StrassenMatmulComputor.cpp */; }; - 48AE9EA02211950B009DB6F4 /* StrassenMatmulComputor.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48AE9E9E2211950B009DB6F4 /* StrassenMatmulComputor.hpp */; }; - 48AE9EA32212B2C2009DB6F4 /* Convolution1x1Strassen.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48AE9EA12212B2C2009DB6F4 /* Convolution1x1Strassen.cpp */; }; - 48AE9EA42212B2C2009DB6F4 /* Convolution1x1Strassen.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48AE9EA22212B2C2009DB6F4 /* Convolution1x1Strassen.hpp */; }; - 48AE9EA62212D3F9009DB6F4 /* MNNMatrixSub.S in Sources */ = {isa = PBXBuildFile; fileRef = 48AE9EA52212D3F9009DB6F4 /* MNNMatrixSub.S */; }; - 48AE9EA82212D403009DB6F4 /* MNNMatrixAdd.S in Sources */ = {isa = PBXBuildFile; fileRef = 48AE9EA72212D403009DB6F4 /* MNNMatrixAdd.S */; }; - 48AE9EAB2212E94F009DB6F4 /* MNNMatrixAdd.S in Sources */ = {isa = PBXBuildFile; fileRef = 48AE9EA92212E94F009DB6F4 /* MNNMatrixAdd.S */; }; - 48AE9EAC2212E94F009DB6F4 /* MNNMatrixSub.S in Sources */ = {isa = PBXBuildFile; fileRef = 48AE9EAA2212E94F009DB6F4 /* MNNMatrixSub.S */; }; - 48AE9EAE22151E20009DB6F4 /* MNNStrassenMergeCFunction.S in Sources */ = {isa = PBXBuildFile; fileRef = 48AE9EAD22151E20009DB6F4 /* MNNStrassenMergeCFunction.S */; }; - 48AE9EB0221539C3009DB6F4 /* MNNStrassenMergeCFunction.S in Sources */ = {isa = PBXBuildFile; fileRef = 48AE9EAF221539C2009DB6F4 /* MNNStrassenMergeCFunction.S */; }; - 48AE9EB222154C9D009DB6F4 /* MNNGemmFloatOne_4.S in Sources */ = {isa = PBXBuildFile; fileRef = 48AE9EB122154C9D009DB6F4 /* MNNGemmFloatOne_4.S */; }; - 48AE9EB42215628E009DB6F4 /* MNNGemmFloatOne_4.S in Sources */ = {isa = PBXBuildFile; fileRef = 48AE9EB32215628D009DB6F4 /* MNNGemmFloatOne_4.S */; }; - 48B904A222953DFF003116BB /* CPUSelect.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48B904A022953DFF003116BB /* CPUSelect.cpp */; }; - 48B904A322953DFF003116BB /* CPUSelect.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48B904A122953DFF003116BB /* CPUSelect.hpp */; }; - 48B904A622953E0F003116BB /* CPUZeroLike.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48B904A422953E0F003116BB /* CPUZeroLike.cpp */; }; - 48B904A722953E0F003116BB /* CPUZeroLike.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48B904A522953E0F003116BB /* CPUZeroLike.hpp */; }; - 48B904A9229550CF003116BB /* ShapeSelect.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48B904A8229550CF003116BB /* ShapeSelect.cpp */; }; - 48BF218221A3E4C300AFF78E /* MNNSamplerC4BilinearOpt.S in Sources */ = {isa = PBXBuildFile; fileRef = 48BF218121A3E4C300AFF78E /* MNNSamplerC4BilinearOpt.S */; }; - 48BF218421A4073500AFF78E /* MNNSamplerC4BilinearOpt.S in Sources */ = {isa = PBXBuildFile; fileRef = 48BF218321A4073500AFF78E /* MNNSamplerC4BilinearOpt.S */; }; - 48BF218621A4257500AFF78E /* MNNSamplerC1BilinearOpt.S in Sources */ = {isa = PBXBuildFile; fileRef = 48BF218521A4257500AFF78E /* MNNSamplerC1BilinearOpt.S */; }; - 48BF218821A4380A00AFF78E /* MNNSamplerC1BilinearOpt.S in Sources */ = {isa = PBXBuildFile; fileRef = 48BF218721A4380A00AFF78E /* MNNSamplerC1BilinearOpt.S */; }; - 48BF21BE21ABBDA300AFF78E /* MNNLoadU8AndSum.S in Sources */ = {isa = PBXBuildFile; fileRef = 48BF21BD21ABBDA300AFF78E /* MNNLoadU8AndSum.S */; }; - 48BF21C221ABC45100AFF78E /* MNNLoadU8AndSum.S in Sources */ = {isa = PBXBuildFile; fileRef = 48BF21C021ABC45100AFF78E /* MNNLoadU8AndSum.S */; }; - 48BF21F421CA43AE00AFF78E /* MNNSamplerC4NearestOpt.S in Sources */ = {isa = PBXBuildFile; fileRef = 48BF21F321CA43AE00AFF78E /* MNNSamplerC4NearestOpt.S */; }; 48C054882201996200E91945 /* MetalConvolutionWinograd.mm in Sources */ = {isa = PBXBuildFile; fileRef = 48C054862201996200E91945 /* MetalConvolutionWinograd.mm */; }; - 48C054892201996200E91945 /* MetalConvolutionWinograd.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48C054872201996200E91945 /* MetalConvolutionWinograd.hpp */; }; - 48C054922205B91A00E91945 /* MNNPackC4.S in Sources */ = {isa = PBXBuildFile; fileRef = 48C054912205B91A00E91945 /* MNNPackC4.S */; }; - 48C054942205B94400E91945 /* MNNUnPackC4.S in Sources */ = {isa = PBXBuildFile; fileRef = 48C054932205B94400E91945 /* MNNUnPackC4.S */; }; - 48C054962205B9A500E91945 /* MNNPackC4.S in Sources */ = {isa = PBXBuildFile; fileRef = 48C054952205B9A500E91945 /* MNNPackC4.S */; }; - 48C054982205B9B400E91945 /* MNNUnPackC4.S in Sources */ = {isa = PBXBuildFile; fileRef = 48C054972205B9B400E91945 /* MNNUnPackC4.S */; }; - 48C0549A2205BB8400E91945 /* MNNConvSlideWindowBorder.S in Sources */ = {isa = PBXBuildFile; fileRef = 48C054992205BB8400E91945 /* MNNConvSlideWindowBorder.S */; }; - 48C0549C2205BC8C00E91945 /* MNNConvSlideWindowBorder.S in Sources */ = {isa = PBXBuildFile; fileRef = 48C0549B2205BC8C00E91945 /* MNNConvSlideWindowBorder.S */; }; - 48C0549F22081AC200E91945 /* MNNAddBias.S in Sources */ = {isa = PBXBuildFile; fileRef = 48C0549E22081AC200E91945 /* MNNAddBias.S */; }; - 48C054A122081B5B00E91945 /* MNNReluWithSlope.S in Sources */ = {isa = PBXBuildFile; fileRef = 48C054A022081B5B00E91945 /* MNNReluWithSlope.S */; }; - 48C054A322081C9B00E91945 /* MNNAddBiasRelu.S in Sources */ = {isa = PBXBuildFile; fileRef = 48C054A222081C9B00E91945 /* MNNAddBiasRelu.S */; }; - 48C054A522081CDA00E91945 /* MNNReluWithSlopeChannel.S in Sources */ = {isa = PBXBuildFile; fileRef = 48C054A422081CDA00E91945 /* MNNReluWithSlopeChannel.S */; }; - 48C054A7220A745900E91945 /* MNNAddBiasRelu.S in Sources */ = {isa = PBXBuildFile; fileRef = 48C054A6220A745900E91945 /* MNNAddBiasRelu.S */; }; - 48C054A9220A749100E91945 /* MNNAddBias.S in Sources */ = {isa = PBXBuildFile; fileRef = 48C054A8220A749100E91945 /* MNNAddBias.S */; }; - 48C054AB220A74B200E91945 /* MNNReluWithSlope.S in Sources */ = {isa = PBXBuildFile; fileRef = 48C054AA220A74B200E91945 /* MNNReluWithSlope.S */; }; - 48C054AD220A74D800E91945 /* MNNReluWithSlopeChannel.S in Sources */ = {isa = PBXBuildFile; fileRef = 48C054AC220A74D800E91945 /* MNNReluWithSlopeChannel.S */; }; - 48C054AF220A758B00E91945 /* MNNCubicSampleC4.S in Sources */ = {isa = PBXBuildFile; fileRef = 48C054AE220A758B00E91945 /* MNNCubicSampleC4.S */; }; - 48C054B1220A762C00E91945 /* MNNConvRunForUnitDepthWise.S in Sources */ = {isa = PBXBuildFile; fileRef = 48C054B0220A762C00E91945 /* MNNConvRunForUnitDepthWise.S */; }; - 48C054B3220A7A4600E91945 /* MNNCubicSampleC4.S in Sources */ = {isa = PBXBuildFile; fileRef = 48C054B2220A7A4600E91945 /* MNNCubicSampleC4.S */; }; - 48C054B5220A7A9600E91945 /* MNNConvRunForUnitDepthWise.S in Sources */ = {isa = PBXBuildFile; fileRef = 48C054B4220A7A9600E91945 /* MNNConvRunForUnitDepthWise.S */; }; - 48C5E79B22FBF87700EAC2A6 /* ShapeSpaceToDepth.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48C5E79922FBF87600EAC2A6 /* ShapeSpaceToDepth.cpp */; }; - 48C5E79C22FBF87700EAC2A6 /* ShapeDepthToSpace.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48C5E79A22FBF87600EAC2A6 /* ShapeDepthToSpace.cpp */; }; - 48C5E79E2306C84400EAC2A6 /* MNNGemmint8to32_8x4_Unit.S in Sources */ = {isa = PBXBuildFile; fileRef = 48C5E79D2306C84400EAC2A6 /* MNNGemmint8to32_8x4_Unit.S */; }; - 48C5E7A02306C84D00EAC2A6 /* MNNGemmint8to32_8x4_Unit.S in Sources */ = {isa = PBXBuildFile; fileRef = 48C5E79F2306C84D00EAC2A6 /* MNNGemmint8to32_8x4_Unit.S */; }; - 48DA297D21F1F7CF00E3BEB2 /* MNNExpC8.S in Sources */ = {isa = PBXBuildFile; fileRef = 48DA297C21F1F7CF00E3BEB2 /* MNNExpC8.S */; }; - 48DA297F21F2051800E3BEB2 /* MNNExpC8.S in Sources */ = {isa = PBXBuildFile; fileRef = 48DA297E21F2051800E3BEB2 /* MNNExpC8.S */; }; - 48EB45E62254B9D2006C2322 /* ConvolutionDepthwise3x3.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48EB45E42254B9D2006C2322 /* ConvolutionDepthwise3x3.cpp */; }; - 48EB45E72254B9D2006C2322 /* ConvolutionDepthwise3x3.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48EB45E52254B9D2006C2322 /* ConvolutionDepthwise3x3.hpp */; }; - 48EB45E922559525006C2322 /* MNNConvDwF23MulTransUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 48EB45E822559525006C2322 /* MNNConvDwF23MulTransUnit.S */; }; - 48EB45EB2255B70C006C2322 /* MNNConvDwF23SourceTransUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 48EB45EA2255B70C006C2322 /* MNNConvDwF23SourceTransUnit.S */; }; - 48EB45EE2255D271006C2322 /* MNNConvDwF23MulTransUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 48EB45EC2255D270006C2322 /* MNNConvDwF23MulTransUnit.S */; }; - 48EB45EF2255D271006C2322 /* MNNConvDwF23SourceTransUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 48EB45ED2255D270006C2322 /* MNNConvDwF23SourceTransUnit.S */; }; - 71E8789F2203E88500268E24 /* MNNNV21ToBGRUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 71E8789E2203E88500268E24 /* MNNNV21ToBGRUnit.S */; }; - 71E878A32203E9D200268E24 /* MNNNV21ToBGRUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 71E878A12203E9D200268E24 /* MNNNV21ToBGRUnit.S */; }; + 48FA474423AA127B00172C3B /* MergeOptimizer.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48FA473C23AA127A00172C3B /* MergeOptimizer.hpp */; }; + 48FA474523AA127B00172C3B /* Executor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48FA473D23AA127B00172C3B /* Executor.cpp */; }; + 48FA474623AA127B00172C3B /* NeuralNetWorkOp.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48FA473E23AA127B00172C3B /* NeuralNetWorkOp.cpp */; }; + 48FA474723AA127B00172C3B /* Optimizer.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48FA473F23AA127B00172C3B /* Optimizer.cpp */; }; + 48FA474823AA127B00172C3B /* Expr.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48FA474023AA127B00172C3B /* Expr.cpp */; }; + 48FA474923AA127B00172C3B /* MathOp.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48FA474123AA127B00172C3B /* MathOp.cpp */; }; + 48FA474A23AA127B00172C3B /* Utils.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48FA474223AA127B00172C3B /* Utils.cpp */; }; + 48FA474B23AA127B00172C3B /* Utils.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 48FA474323AA127B00172C3B /* Utils.hpp */; }; + 48FA474D23AA136300172C3B /* MergeOptimizer.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 48FA474C23AA136300172C3B /* MergeOptimizer.cpp */; }; 9200049921EDBDF600BCE892 /* TensorTest.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 9200045D21EDBDF600BCE892 /* TensorTest.cpp */; }; 9200049A21EDBDF600BCE892 /* ImageProcessTest.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 9200045F21EDBDF600BCE892 /* ImageProcessTest.cpp */; }; 9200049B21EDBDF600BCE892 /* MatrixTest.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 9200046021EDBDF600BCE892 /* MatrixTest.cpp */; }; @@ -534,240 +816,548 @@ 920004D421EDBE1100BCE892 /* TestUtils.mm in Sources */ = {isa = PBXBuildFile; fileRef = 9200045721EDBCF700BCE892 /* TestUtils.mm */; }; 920004D821EDC30E00BCE892 /* MetalDequantize.mm in Sources */ = {isa = PBXBuildFile; fileRef = 920004D521EDC30E00BCE892 /* MetalDequantize.mm */; }; 920004D921EDC30E00BCE892 /* MetalDequantize.metal in Sources */ = {isa = PBXBuildFile; fileRef = 920004D621EDC30E00BCE892 /* MetalDequantize.metal */; }; - 920004DA21EDC30E00BCE892 /* MetalDequantize.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 920004D721EDC30E00BCE892 /* MetalDequantize.hpp */; }; - 921722EE21DDF63A004583BF /* MNN_generated.h in Headers */ = {isa = PBXBuildFile; fileRef = 921722CF21DDF63A004583BF /* MNN_generated.h */; }; - 921722EF21DDF63A004583BF /* Type_generated.h in Headers */ = {isa = PBXBuildFile; fileRef = 921722D021DDF63A004583BF /* Type_generated.h */; }; - 921722F021DDF63A004583BF /* GpuLibrary_generated.h in Headers */ = {isa = PBXBuildFile; fileRef = 921722D121DDF63A004583BF /* GpuLibrary_generated.h */; }; - 921722F221DDF63A004583BF /* Tensor_generated.h in Headers */ = {isa = PBXBuildFile; fileRef = 921722D321DDF63A004583BF /* Tensor_generated.h */; }; - 921722F421DDF63A004583BF /* TFQuantizeOp_generated.h in Headers */ = {isa = PBXBuildFile; fileRef = 921722D521DDF63A004583BF /* TFQuantizeOp_generated.h */; }; 9223E10F21D327F40067544A /* MetalSqueeze.mm in Sources */ = {isa = PBXBuildFile; fileRef = 9223E10D21D327F40067544A /* MetalSqueeze.mm */; }; - 9223E11021D327F40067544A /* MetalSqueeze.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 9223E10E21D327F40067544A /* MetalSqueeze.hpp */; }; 9223E11821D34BE40067544A /* MetalSpaceToBatchND.mm in Sources */ = {isa = PBXBuildFile; fileRef = 9223E11621D34BE40067544A /* MetalSpaceToBatchND.mm */; }; - 9223E11921D34BE40067544A /* MetalSpaceToBatchND.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 9223E11721D34BE40067544A /* MetalSpaceToBatchND.hpp */; }; 9223E11B21D34C260067544A /* MetalSpaceToBatchND.metal in Sources */ = {isa = PBXBuildFile; fileRef = 9223E11A21D34C260067544A /* MetalSpaceToBatchND.metal */; }; - 9223E12021D34C6B0067544A /* CPUBatchToSpaceND.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 9223E11C21D34C6B0067544A /* CPUBatchToSpaceND.hpp */; }; - 9223E12121D34C6B0067544A /* CPUBatchToSpaceND.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 9223E11D21D34C6B0067544A /* CPUBatchToSpaceND.cpp */; }; - 9223E12221D34C6B0067544A /* CPUSpaceToBatchND.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 9223E11E21D34C6B0067544A /* CPUSpaceToBatchND.cpp */; }; - 9223E12321D34C6B0067544A /* CPUSpaceToBatchND.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 9223E11F21D34C6B0067544A /* CPUSpaceToBatchND.hpp */; }; 9223E12921D3755F0067544A /* MetalBatchToSpaceND.mm in Sources */ = {isa = PBXBuildFile; fileRef = 9223E12721D3755F0067544A /* MetalBatchToSpaceND.mm */; }; - 9223E12A21D3755F0067544A /* MetalBatchToSpaceND.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 9223E12821D3755F0067544A /* MetalBatchToSpaceND.hpp */; }; 9223E12C21D3756B0067544A /* MetalBatchToSpaceND.metal in Sources */ = {isa = PBXBuildFile; fileRef = 9223E12B21D3756B0067544A /* MetalBatchToSpaceND.metal */; }; 92256935219D14CD00F251E2 /* MetalSliceTF.mm in Sources */ = {isa = PBXBuildFile; fileRef = 92256933219D14CD00F251E2 /* MetalSliceTF.mm */; }; - 92256936219D14CD00F251E2 /* MetalSliceTF.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92256934219D14CD00F251E2 /* MetalSliceTF.hpp */; }; 92256938219D150900F251E2 /* MetalSliceTF.metal in Sources */ = {isa = PBXBuildFile; fileRef = 92256937219D150900F251E2 /* MetalSliceTF.metal */; }; 92256947219D698100F251E2 /* MetalRank.mm in Sources */ = {isa = PBXBuildFile; fileRef = 92256945219D698100F251E2 /* MetalRank.mm */; }; - 92256948219D698100F251E2 /* MetalRank.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92256946219D698100F251E2 /* MetalRank.hpp */; }; 9225694A219D698900F251E2 /* MetalRank.metal in Sources */ = {isa = PBXBuildFile; fileRef = 92256949219D698900F251E2 /* MetalRank.metal */; }; 92256950219D6E0200F251E2 /* MetalRange.mm in Sources */ = {isa = PBXBuildFile; fileRef = 9225694E219D6E0200F251E2 /* MetalRange.mm */; }; - 92256951219D6E0200F251E2 /* MetalRange.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 9225694F219D6E0200F251E2 /* MetalRange.hpp */; }; 92256953219D6E1000F251E2 /* MetalRange.metal in Sources */ = {isa = PBXBuildFile; fileRef = 92256952219D6E1000F251E2 /* MetalRange.metal */; }; 92351C8721992AB2002CA341 /* MetalQuantizedAdd.mm in Sources */ = {isa = PBXBuildFile; fileRef = 92351C8521992AB2002CA341 /* MetalQuantizedAdd.mm */; }; - 92351C8821992AB2002CA341 /* MetalQuantizedAdd.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92351C8621992AB2002CA341 /* MetalQuantizedAdd.hpp */; }; 92351C8A21992AC6002CA341 /* MetalQuantizedAdd.metal in Sources */ = {isa = PBXBuildFile; fileRef = 92351C8921992AC6002CA341 /* MetalQuantizedAdd.metal */; }; 92369E62222544DE009D3A05 /* MetalConvolutionGEMM.mm in Sources */ = {isa = PBXBuildFile; fileRef = 92369E61222544DD009D3A05 /* MetalConvolutionGEMM.mm */; }; 92369E64222544FE009D3A05 /* MetalConvolutionGEMM.metal in Sources */ = {isa = PBXBuildFile; fileRef = 92369E63222544FE009D3A05 /* MetalConvolutionGEMM.metal */; }; 923B7F8921A653AB002AFCE0 /* MetalGather.mm in Sources */ = {isa = PBXBuildFile; fileRef = 923B7F8721A653AB002AFCE0 /* MetalGather.mm */; }; - 923B7F8A21A653AB002AFCE0 /* MetalGather.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 923B7F8821A653AB002AFCE0 /* MetalGather.hpp */; }; 923B7F8C21A653BB002AFCE0 /* MetalGather.metal in Sources */ = {isa = PBXBuildFile; fileRef = 923B7F8B21A653BB002AFCE0 /* MetalGather.metal */; }; 923B7F9221A68091002AFCE0 /* MetalGatherV2.mm in Sources */ = {isa = PBXBuildFile; fileRef = 923B7F9021A68091002AFCE0 /* MetalGatherV2.mm */; }; - 923B7F9321A68091002AFCE0 /* MetalGatherV2.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 923B7F9121A68091002AFCE0 /* MetalGatherV2.hpp */; }; 923B7F9521A680A1002AFCE0 /* MetalGatherV2.metal in Sources */ = {isa = PBXBuildFile; fileRef = 923B7F9421A680A1002AFCE0 /* MetalGatherV2.metal */; }; 923B7F9B21A69E2E002AFCE0 /* MetalQuantizedReshape.mm in Sources */ = {isa = PBXBuildFile; fileRef = 923B7F9921A69E2E002AFCE0 /* MetalQuantizedReshape.mm */; }; - 923B7F9C21A69E2E002AFCE0 /* MetalQuantizedReshape.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 923B7F9A21A69E2E002AFCE0 /* MetalQuantizedReshape.hpp */; }; 923B7FA421A6C92F002AFCE0 /* MetalCropAndResize.mm in Sources */ = {isa = PBXBuildFile; fileRef = 923B7FA221A6C92F002AFCE0 /* MetalCropAndResize.mm */; }; - 923B7FA521A6C92F002AFCE0 /* MetalCropAndResize.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 923B7FA321A6C92F002AFCE0 /* MetalCropAndResize.hpp */; }; 923B7FA721A6C940002AFCE0 /* MetalCropAndResize.metal in Sources */ = {isa = PBXBuildFile; fileRef = 923B7FA621A6C940002AFCE0 /* MetalCropAndResize.metal */; }; 9243106C2239FE0B0016DA25 /* MetalSize.mm in Sources */ = {isa = PBXBuildFile; fileRef = 9243106A2239FE0A0016DA25 /* MetalSize.mm */; }; - 9243106D2239FE0B0016DA25 /* MetalSize.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 9243106B2239FE0B0016DA25 /* MetalSize.hpp */; }; 9243106F2239FE190016DA25 /* MetalSize.metal in Sources */ = {isa = PBXBuildFile; fileRef = 9243106E2239FE190016DA25 /* MetalSize.metal */; }; 924F131921A81C74006D46A4 /* MetalTranspose.mm in Sources */ = {isa = PBXBuildFile; fileRef = 924F131721A81C74006D46A4 /* MetalTranspose.mm */; }; - 924F131A21A81C74006D46A4 /* MetalTranspose.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 924F131821A81C74006D46A4 /* MetalTranspose.hpp */; }; 924F131C21A81C80006D46A4 /* MetalTranspose.metal in Sources */ = {isa = PBXBuildFile; fileRef = 924F131B21A81C80006D46A4 /* MetalTranspose.metal */; }; 924F132221ABD470006D46A4 /* MetalQuantizedSoftmax.mm in Sources */ = {isa = PBXBuildFile; fileRef = 924F132021ABD470006D46A4 /* MetalQuantizedSoftmax.mm */; }; - 924F132321ABD470006D46A4 /* MetalQuantizedSoftmax.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 924F132121ABD470006D46A4 /* MetalQuantizedSoftmax.hpp */; }; 924F132521ABD47F006D46A4 /* MetalQuantizedSoftmax.metal in Sources */ = {isa = PBXBuildFile; fileRef = 924F132421ABD47F006D46A4 /* MetalQuantizedSoftmax.metal */; }; 924F132721ABEA28006D46A4 /* MetalFixedPoint.metal in Sources */ = {isa = PBXBuildFile; fileRef = 924F132621ABEA28006D46A4 /* MetalFixedPoint.metal */; }; 925702D021EF0F5300A2A3CA /* TensorUtilsTest.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 925702CE21EF0F5300A2A3CA /* TensorUtilsTest.cpp */; }; 925702D221EF270D00A2A3CA /* BufferAllocatorTest.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 925702D121EF270D00A2A3CA /* BufferAllocatorTest.cpp */; }; 925702F621EF604400A2A3CA /* SizeComputerTest.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 925702F521EF604400A2A3CA /* SizeComputerTest.cpp */; }; - 92575979219EA07F00918499 /* MetalStridedSlice.mm in Sources */ = {isa = PBXBuildFile; fileRef = 92575977219EA07F00918499 /* MetalStridedSlice.mm */; }; - 9257597A219EA07F00918499 /* MetalStridedSlice.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92575978219EA07F00918499 /* MetalStridedSlice.hpp */; }; - 9257597C219EA08400918499 /* MetalStridedSlice.metal in Sources */ = {isa = PBXBuildFile; fileRef = 9257597B219EA08400918499 /* MetalStridedSlice.metal */; }; - 9258013E2223B77C00555D43 /* MetalConvolutionDepthwise.mm in Sources */ = {isa = PBXBuildFile; fileRef = 9258013C2223B77C00555D43 /* MetalConvolutionDepthwise.mm */; }; - 9258013F2223B77C00555D43 /* MetalConvolutionDepthwise.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 9258013D2223B77C00555D43 /* MetalConvolutionDepthwise.hpp */; }; - 925801412223B79600555D43 /* MetalConvolutionDepthwise.metal in Sources */ = {isa = PBXBuildFile; fileRef = 925801402223B79600555D43 /* MetalConvolutionDepthwise.metal */; }; - 925801442223B8D100555D43 /* MetalConvolutionCommon.mm in Sources */ = {isa = PBXBuildFile; fileRef = 925801422223B8D100555D43 /* MetalConvolutionCommon.mm */; }; - 925801452223B8D100555D43 /* MetalConvolutionCommon.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 925801432223B8D100555D43 /* MetalConvolutionCommon.hpp */; }; - 925A89122223951200D22428 /* MetalConvolutionActivation.metal in Sources */ = {isa = PBXBuildFile; fileRef = 925A89112223951200D22428 /* MetalConvolutionActivation.metal */; }; - 925A8915222395ED00D22428 /* MetalConvolution1x1.mm in Sources */ = {isa = PBXBuildFile; fileRef = 925A8913222395ED00D22428 /* MetalConvolution1x1.mm */; }; - 925A8916222395ED00D22428 /* MetalConvolution1x1.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 925A8914222395ED00D22428 /* MetalConvolution1x1.hpp */; }; - 925A89182223961F00D22428 /* MetalConvolution1x1.metal in Sources */ = {isa = PBXBuildFile; fileRef = 925A89172223961F00D22428 /* MetalConvolution1x1.metal */; }; - 925E87E0220447900000192E /* MetalConvolutionWinograd.metal in Sources */ = {isa = PBXBuildFile; fileRef = 925E87DF220447900000192E /* MetalConvolutionWinograd.metal */; }; 925F018921FF1E0B00E648A1 /* SqueezeNetTest.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 925F018821FF1E0B00E648A1 /* SqueezeNetTest.cpp */; }; 925F018B21FF222E00E648A1 /* model in Resources */ = {isa = PBXBuildFile; fileRef = 925F018A21FF222E00E648A1 /* model */; }; 925F018D21FFF3D300E648A1 /* MobileNetTest.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 925F018C21FFF3D300E648A1 /* MobileNetTest.cpp */; }; - 9260B27221A7C5CD00D48C97 /* MetalQuantizedMaxPool.mm in Sources */ = {isa = PBXBuildFile; fileRef = 9260B27021A7C5CD00D48C97 /* MetalQuantizedMaxPool.mm */; }; - 9260B27321A7C5CD00D48C97 /* MetalQuantizedMaxPool.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 9260B27121A7C5CD00D48C97 /* MetalQuantizedMaxPool.hpp */; }; - 9260B27521A7C5DC00D48C97 /* MetalQuantizedMaxPool.metal in Sources */ = {isa = PBXBuildFile; fileRef = 9260B27421A7C5DC00D48C97 /* MetalQuantizedMaxPool.metal */; }; - 9260B27821A7C5EA00D48C97 /* MetalQuantizedAvgPool.mm in Sources */ = {isa = PBXBuildFile; fileRef = 9260B27621A7C5EA00D48C97 /* MetalQuantizedAvgPool.mm */; }; - 9260B27921A7C5EA00D48C97 /* MetalQuantizedAvgPool.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 9260B27721A7C5EA00D48C97 /* MetalQuantizedAvgPool.hpp */; }; - 9260B27B21A7C5FC00D48C97 /* MetalQuantizedAvgPool.metal in Sources */ = {isa = PBXBuildFile; fileRef = 9260B27A21A7C5FC00D48C97 /* MetalQuantizedAvgPool.metal */; }; - 92682C4D2181729200B52B9D /* MetalTile.mm in Sources */ = {isa = PBXBuildFile; fileRef = 92682C4B2181729200B52B9D /* MetalTile.mm */; }; - 92682C4E2181729200B52B9D /* MetalTile.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92682C4C2181729200B52B9D /* MetalTile.hpp */; }; - 92682C50218172A300B52B9D /* MetalTile.metal in Sources */ = {isa = PBXBuildFile; fileRef = 92682C4F218172A300B52B9D /* MetalTile.metal */; }; - 92682C5321819BF100B52B9D /* MetalSeLU.mm in Sources */ = {isa = PBXBuildFile; fileRef = 92682C5121819BF100B52B9D /* MetalSeLU.mm */; }; - 92682C5421819BF100B52B9D /* MetalSeLU.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92682C5221819BF100B52B9D /* MetalSeLU.hpp */; }; - 92682C5621819BFA00B52B9D /* MetalSeLU.metal in Sources */ = {isa = PBXBuildFile; fileRef = 92682C5521819BFA00B52B9D /* MetalSeLU.metal */; }; - 92682C5F2181A2EF00B52B9D /* MetalFill.mm in Sources */ = {isa = PBXBuildFile; fileRef = 92682C5D2181A2EF00B52B9D /* MetalFill.mm */; }; - 92682C602181A2EF00B52B9D /* MetalFill.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92682C5E2181A2EF00B52B9D /* MetalFill.hpp */; }; - 92682C622181A2F900B52B9D /* MetalFill.metal in Sources */ = {isa = PBXBuildFile; fileRef = 92682C612181A2F900B52B9D /* MetalFill.metal */; }; 9273AB4F1FE7BE4D00477B22 /* AppDelegate.mm in Sources */ = {isa = PBXBuildFile; fileRef = 9273AB4E1FE7BE4D00477B22 /* AppDelegate.mm */; }; 9273AB571FE7BE4D00477B22 /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 9273AB561FE7BE4D00477B22 /* Assets.xcassets */; }; 9273AB5D1FE7BE4D00477B22 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = 9273AB5C1FE7BE4D00477B22 /* main.m */; }; - 92921A86219C24CD00B063D1 /* MetalPack.mm in Sources */ = {isa = PBXBuildFile; fileRef = 92921A84219C24CD00B063D1 /* MetalPack.mm */; }; - 92921A87219C24CD00B063D1 /* MetalPack.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92921A85219C24CD00B063D1 /* MetalPack.hpp */; }; - 92921A89219C272B00B063D1 /* MetalPack.metal in Sources */ = {isa = PBXBuildFile; fileRef = 92921A88219C272B00B063D1 /* MetalPack.metal */; }; - 92965EDE2175B3C300B86ABE /* MetalConcat.metal in Sources */ = {isa = PBXBuildFile; fileRef = 92965EDD2175B3C300B86ABE /* MetalConcat.metal */; }; 92A4E0FC21F05A4F000B0919 /* MemoryUtilsTest.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92A4E0FB21F05A4F000B0919 /* MemoryUtilsTest.cpp */; }; 92A4E10321F07C76000B0919 /* AutoStorageTest.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92A4E10221F07C76000B0919 /* AutoStorageTest.cpp */; }; - 92A8D70021A40695009C2201 /* MetalTFQuantizedConv2D.mm in Sources */ = {isa = PBXBuildFile; fileRef = 92A8D6FE21A40695009C2201 /* MetalTFQuantizedConv2D.mm */; }; - 92A8D70121A40695009C2201 /* MetalTFQuantizedConv2D.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92A8D6FF21A40695009C2201 /* MetalTFQuantizedConv2D.hpp */; }; - 92A8D70321A406A8009C2201 /* MetalTFQuantizedConv2D.metal in Sources */ = {isa = PBXBuildFile; fileRef = 92A8D70221A406A8009C2201 /* MetalTFQuantizedConv2D.metal */; }; - 92A8D70821A54087009C2201 /* MetalDefine.metal in Sources */ = {isa = PBXBuildFile; fileRef = 92A8D70721A54087009C2201 /* MetalDefine.metal */; }; - 92C674F922549A1600011D33 /* MetalReLU6.mm in Sources */ = {isa = PBXBuildFile; fileRef = 92C674F722549A1600011D33 /* MetalReLU6.mm */; }; - 92C674FA22549A1600011D33 /* MetalReLU6.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92C674F822549A1600011D33 /* MetalReLU6.hpp */; }; - 92C674FC22549A2500011D33 /* MetalReLU6.metal in Sources */ = {isa = PBXBuildFile; fileRef = 92C674FB22549A2500011D33 /* MetalReLU6.metal */; }; 92C674FF22549C9900011D33 /* ReLU6Test.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92C674FD22549C9900011D33 /* ReLU6Test.cpp */; }; - 92D765992228176500178BE5 /* TensorflowOp_generated.h in Headers */ = {isa = PBXBuildFile; fileRef = 92D765962228176500178BE5 /* TensorflowOp_generated.h */; }; - 92D7659A2228176500178BE5 /* CaffeOp_generated.h in Headers */ = {isa = PBXBuildFile; fileRef = 92D765972228176500178BE5 /* CaffeOp_generated.h */; }; - 92D7659B2228176500178BE5 /* UserDefine_generated.h in Headers */ = {isa = PBXBuildFile; fileRef = 92D765982228176500178BE5 /* UserDefine_generated.h */; }; - 92D765AA2228188700178BE5 /* Pipeline.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92D7659C2228188500178BE5 /* Pipeline.cpp */; }; - 92D765AB2228188700178BE5 /* WrapExecution.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92D7659D2228188500178BE5 /* WrapExecution.cpp */; }; - 92D765AD2228188700178BE5 /* Session.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92D7659F2228188500178BE5 /* Session.hpp */; }; - 92D765AE2228188700178BE5 /* Schedule.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92D765A02228188600178BE5 /* Schedule.hpp */; }; - 92D765AF2228188700178BE5 /* Interpreter.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92D765A12228188600178BE5 /* Interpreter.cpp */; }; - 92D765B02228188700178BE5 /* WrapExecution.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92D765A22228188600178BE5 /* WrapExecution.hpp */; }; - 92D765B12228188700178BE5 /* BackendFactory.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92D765A32228188600178BE5 /* BackendFactory.cpp */; }; - 92D765B22228188700178BE5 /* BackendFactory.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92D765A42228188600178BE5 /* BackendFactory.hpp */; }; - 92D765B32228188700178BE5 /* Session.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92D765A52228188600178BE5 /* Session.cpp */; }; - 92D765B52228188700178BE5 /* Schedule.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92D765A72228188700178BE5 /* Schedule.cpp */; }; - 92D765B62228188700178BE5 /* Pipeline.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92D765A82228188700178BE5 /* Pipeline.hpp */; }; 92D765BB222819EF00178BE5 /* BackendTest.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92D765B8222819EF00178BE5 /* BackendTest.cpp */; }; 92D765BC222819EF00178BE5 /* ScheduleTest.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92D765B9222819EF00178BE5 /* ScheduleTest.cpp */; }; 92D765BD222819EF00178BE5 /* DirectedAcyclicGraphTest.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92D765BA222819EF00178BE5 /* DirectedAcyclicGraphTest.cpp */; }; - 92D765BF22281D0000178BE5 /* DirectedAcyclicGraph.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92D765BE22281CFF00178BE5 /* DirectedAcyclicGraph.hpp */; }; 92EAC19921CB3CD60056F4C2 /* MetalCast.mm in Sources */ = {isa = PBXBuildFile; fileRef = 92EAC19721CB3CD60056F4C2 /* MetalCast.mm */; }; - 92EAC19A21CB3CD60056F4C2 /* MetalCast.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92EAC19821CB3CD60056F4C2 /* MetalCast.hpp */; }; 92EAC19C21CB3CE20056F4C2 /* MetalCast.metal in Sources */ = {isa = PBXBuildFile; fileRef = 92EAC19B21CB3CE20056F4C2 /* MetalCast.metal */; }; - 92EEFEB2217F0CBB00F89377 /* CPUCrop.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92EEFE3B217F0CBB00F89377 /* CPUCrop.hpp */; }; - 92EEFEFF217F0CBC00F89377 /* CPUCrop.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92EEFE8B217F0CBB00F89377 /* CPUCrop.cpp */; }; 92EEFF27217F0EEF00F89377 /* MetalCrop.mm in Sources */ = {isa = PBXBuildFile; fileRef = 92EEFF25217F0EEF00F89377 /* MetalCrop.mm */; }; - 92EEFF28217F0EEF00F89377 /* MetalCrop.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92EEFF26217F0EEF00F89377 /* MetalCrop.hpp */; }; 92EEFF2A217F0F0F00F89377 /* MetalCrop.metal in Sources */ = {isa = PBXBuildFile; fileRef = 92EEFF29217F0F0F00F89377 /* MetalCrop.metal */; }; 92EEFF302180159600F89377 /* MetalReduction.mm in Sources */ = {isa = PBXBuildFile; fileRef = 92EEFF2E2180159600F89377 /* MetalReduction.mm */; }; - 92EEFF312180159600F89377 /* MetalReduction.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92EEFF2F2180159600F89377 /* MetalReduction.hpp */; }; 92EEFF33218015A300F89377 /* MetalReduction.metal in Sources */ = {isa = PBXBuildFile; fileRef = 92EEFF32218015A300F89377 /* MetalReduction.metal */; }; - AE7BE47C22816FC9002CEEA6 /* ShapeMoments.cpp in Sources */ = {isa = PBXBuildFile; fileRef = AE7BE47A22816FC9002CEEA6 /* ShapeMoments.cpp */; }; - AE7BE4B7228555A2002CEEA6 /* BackendRegister.cpp in Sources */ = {isa = PBXBuildFile; fileRef = AE7BE4B6228555A2002CEEA6 /* BackendRegister.cpp */; }; - AE7BE4B922855638002CEEA6 /* ShapeRegister.cpp in Sources */ = {isa = PBXBuildFile; fileRef = AE7BE4B822855638002CEEA6 /* ShapeRegister.cpp */; }; - AE7BE4BB2285564F002CEEA6 /* CPUOPRegister.cpp in Sources */ = {isa = PBXBuildFile; fileRef = AE7BE4BA2285564F002CEEA6 /* CPUOPRegister.cpp */; }; + 92FF025523AA0B5A00AC97F6 /* CPUTanh.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF00D323AA0B4800AC97F6 /* CPUTanh.cpp */; }; + 92FF025623AA0B5A00AC97F6 /* CPUPadding.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF00D423AA0B4800AC97F6 /* CPUPadding.hpp */; }; + 92FF025723AA0B5A00AC97F6 /* CPUQuanConvolutionDepthwise.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF00D523AA0B4800AC97F6 /* CPUQuanConvolutionDepthwise.cpp */; }; + 92FF025823AA0B5A00AC97F6 /* CPUSqueeze.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF00D623AA0B4800AC97F6 /* CPUSqueeze.hpp */; }; + 92FF025923AA0B5A00AC97F6 /* CPUPoolInt8.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF00D723AA0B4800AC97F6 /* CPUPoolInt8.cpp */; }; + 92FF025A23AA0B5A00AC97F6 /* CPUDilation2D.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF00D823AA0B4800AC97F6 /* CPUDilation2D.hpp */; }; + 92FF025B23AA0B5A00AC97F6 /* CPUPoolGrad.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF00D923AA0B4800AC97F6 /* CPUPoolGrad.hpp */; }; + 92FF025C23AA0B5A00AC97F6 /* CPUGatherV2.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF00DA23AA0B4800AC97F6 /* CPUGatherV2.hpp */; }; + 92FF025D23AA0B5A00AC97F6 /* CPUInterp.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF00DB23AA0B4800AC97F6 /* CPUInterp.hpp */; }; + 92FF025E23AA0B5A00AC97F6 /* CPUROIPooling.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF00DC23AA0B4900AC97F6 /* CPUROIPooling.cpp */; }; + 92FF025F23AA0B5A00AC97F6 /* CPUPadding.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF00DD23AA0B4900AC97F6 /* CPUPadding.cpp */; }; + 92FF026023AA0B5A00AC97F6 /* CPURNNSequenceGRU.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF00DE23AA0B4900AC97F6 /* CPURNNSequenceGRU.hpp */; }; + 92FF026123AA0B5A00AC97F6 /* CPUCropAndResize.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF00DF23AA0B4900AC97F6 /* CPUCropAndResize.cpp */; }; + 92FF026223AA0B5A00AC97F6 /* CPUSelect.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF00E023AA0B4900AC97F6 /* CPUSelect.hpp */; }; + 92FF026323AA0B5A00AC97F6 /* CPUFloatToInt8.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF00E123AA0B4900AC97F6 /* CPUFloatToInt8.cpp */; }; + 92FF026423AA0B5A00AC97F6 /* CPUExpandDims.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF00E223AA0B4900AC97F6 /* CPUExpandDims.hpp */; }; + 92FF026523AA0B5A00AC97F6 /* CPUQuantizedAvgPool.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF00E323AA0B4900AC97F6 /* CPUQuantizedAvgPool.cpp */; }; + 92FF026623AA0B5A00AC97F6 /* CPUProposal.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF00E423AA0B4900AC97F6 /* CPUProposal.hpp */; }; + 92FF026723AA0B5A00AC97F6 /* CPUReduceJoin.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF00E523AA0B4900AC97F6 /* CPUReduceJoin.cpp */; }; + 92FF026823AA0B5A00AC97F6 /* CPUStridedSlice.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF00E623AA0B4900AC97F6 /* CPUStridedSlice.hpp */; }; + 92FF026923AA0B5A00AC97F6 /* CPUSelu.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF00E723AA0B4900AC97F6 /* CPUSelu.cpp */; }; + 92FF026A23AA0B5A00AC97F6 /* CPUNonMaxSuppressionV2.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF00E823AA0B4900AC97F6 /* CPUNonMaxSuppressionV2.cpp */; }; + 92FF026B23AA0B5A00AC97F6 /* CPUReverseSequence.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF00E923AA0B4900AC97F6 /* CPUReverseSequence.hpp */; }; + 92FF026C23AA0B5A00AC97F6 /* CPUSliceTf.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF00EA23AA0B4900AC97F6 /* CPUSliceTf.hpp */; }; + 92FF026D23AA0B5A00AC97F6 /* CPUMatrixBandPart.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF00EB23AA0B4900AC97F6 /* CPUMatrixBandPart.cpp */; }; + 92FF026E23AA0B5A00AC97F6 /* CPUQuantizationUtils.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF00EC23AA0B4900AC97F6 /* CPUQuantizationUtils.hpp */; }; + 92FF026F23AA0B5A00AC97F6 /* CPUInt8ToFloat.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF00ED23AA0B4900AC97F6 /* CPUInt8ToFloat.cpp */; }; + 92FF027023AA0B5A00AC97F6 /* CPUElu.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF00EE23AA0B4A00AC97F6 /* CPUElu.hpp */; }; + 92FF027123AA0B5A00AC97F6 /* CPUUnpack.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF00EF23AA0B4A00AC97F6 /* CPUUnpack.hpp */; }; + 92FF027223AA0B5A00AC97F6 /* CPUScatterNd.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF00F023AA0B4A00AC97F6 /* CPUScatterNd.hpp */; }; + 92FF027323AA0B5A00AC97F6 /* CPUPoolInt8.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF00F123AA0B4A00AC97F6 /* CPUPoolInt8.hpp */; }; + 92FF027423AA0B5A00AC97F6 /* CPUArgMax.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF00F223AA0B4A00AC97F6 /* CPUArgMax.cpp */; }; + 92FF027523AA0B5A00AC97F6 /* CPUConvolution.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF00F323AA0B4A00AC97F6 /* CPUConvolution.cpp */; }; + 92FF027623AA0B5A00AC97F6 /* CPUInt8ToFloat.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF00F423AA0B4A00AC97F6 /* CPUInt8ToFloat.hpp */; }; + 92FF027723AA0B5A00AC97F6 /* CPUUnary.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF00F523AA0B4A00AC97F6 /* CPUUnary.hpp */; }; + 92FF027823AA0B5A00AC97F6 /* CPUFill.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF00F623AA0B4A00AC97F6 /* CPUFill.hpp */; }; + 92FF027923AA0B5A00AC97F6 /* CPUSpaceToBatchND.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF00F723AA0B4A00AC97F6 /* CPUSpaceToBatchND.cpp */; }; + 92FF027A23AA0B5A00AC97F6 /* CPUPool.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF00F823AA0B4A00AC97F6 /* CPUPool.hpp */; }; + 92FF027B23AA0B5A00AC97F6 /* CPUQuanConvolutionDepthwise.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF00F923AA0B4A00AC97F6 /* CPUQuanConvolutionDepthwise.hpp */; }; + 92FF027C23AA0B5A00AC97F6 /* CPUAsString.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF00FA23AA0B4A00AC97F6 /* CPUAsString.hpp */; }; + 92FF027D23AA0B5A00AC97F6 /* CPUReverseSequence.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF00FB23AA0B4A00AC97F6 /* CPUReverseSequence.cpp */; }; + 92FF027E23AA0B5A00AC97F6 /* CPUTranspose.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF00FC23AA0B4A00AC97F6 /* CPUTranspose.cpp */; }; + 92FF027F23AA0B5A00AC97F6 /* CPUDeconvolutionDepthwise.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF00FD23AA0B4A00AC97F6 /* CPUDeconvolutionDepthwise.cpp */; }; + 92FF028023AA0B5A00AC97F6 /* CPUFloatToInt8.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF00FE23AA0B4B00AC97F6 /* CPUFloatToInt8.hpp */; }; + 92FF028123AA0B5A00AC97F6 /* CPUCosineSimilarity.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF00FF23AA0B4B00AC97F6 /* CPUCosineSimilarity.cpp */; }; + 92FF028223AA0B5A00AC97F6 /* CPUSoftmaxGrad.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF010023AA0B4B00AC97F6 /* CPUSoftmaxGrad.hpp */; }; + 92FF028323AA0B5A00AC97F6 /* CPUSize.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF010123AA0B4B00AC97F6 /* CPUSize.hpp */; }; + 92FF028423AA0B5A00AC97F6 /* CPUPriorbox.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF010223AA0B4B00AC97F6 /* CPUPriorbox.cpp */; }; + 92FF028523AA0B5A00AC97F6 /* CPUBroadcastTo.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF010323AA0B4B00AC97F6 /* CPUBroadcastTo.cpp */; }; + 92FF028623AA0B5A00AC97F6 /* CPUDeconvolution.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF010423AA0B4B00AC97F6 /* CPUDeconvolution.hpp */; }; + 92FF028723AA0B5A00AC97F6 /* CPUFixedPoint.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF010523AA0B4B00AC97F6 /* CPUFixedPoint.hpp */; }; + 92FF028823AA0B5A00AC97F6 /* CPUDequantize.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF010623AA0B4B00AC97F6 /* CPUDequantize.hpp */; }; + 92FF028923AA0B5A00AC97F6 /* CPUConv2DBackPropFilter.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF010723AA0B4B00AC97F6 /* CPUConv2DBackPropFilter.hpp */; }; + 92FF028A23AA0B5A00AC97F6 /* CPUSoftmax.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF010823AA0B4B00AC97F6 /* CPUSoftmax.hpp */; }; + 92FF028B23AA0B5A00AC97F6 /* CPUBatchToSpaceND.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF010923AA0B4B00AC97F6 /* CPUBatchToSpaceND.cpp */; }; + 92FF028C23AA0B5A00AC97F6 /* CPUReduction.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF010A23AA0B4B00AC97F6 /* CPUReduction.hpp */; }; + 92FF028D23AA0B5A00AC97F6 /* CPUWhere.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF010B23AA0B4B00AC97F6 /* CPUWhere.hpp */; }; + 92FF028E23AA0B5A00AC97F6 /* CPULinSpace.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF010C23AA0B4B00AC97F6 /* CPULinSpace.hpp */; }; + 92FF028F23AA0B5A00AC97F6 /* CPUDetectionOutput.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF010D23AA0B4C00AC97F6 /* CPUDetectionOutput.cpp */; }; + 92FF029023AA0B5A00AC97F6 /* CPULRN.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF010E23AA0B4C00AC97F6 /* CPULRN.hpp */; }; + 92FF029123AA0B5A00AC97F6 /* CPUSpatialProduct.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF010F23AA0B4C00AC97F6 /* CPUSpatialProduct.hpp */; }; + 92FF029223AA0B5A00AC97F6 /* CPUConv2DBackPropFilter.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF011023AA0B4C00AC97F6 /* CPUConv2DBackPropFilter.cpp */; }; + 92FF029323AA0B5A00AC97F6 /* CPURange.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF011123AA0B4C00AC97F6 /* CPURange.hpp */; }; + 92FF029423AA0B5A00AC97F6 /* CPUMatMul.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF011223AA0B4C00AC97F6 /* CPUMatMul.cpp */; }; + 92FF029523AA0B5A00AC97F6 /* CPUBatchMatMul.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF011323AA0B4C00AC97F6 /* CPUBatchMatMul.cpp */; }; + 92FF029623AA0B5A00AC97F6 /* CPUCast.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF011423AA0B4C00AC97F6 /* CPUCast.hpp */; }; + 92FF029723AA0B5A00AC97F6 /* CPUEltwiseInt8.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF011523AA0B4C00AC97F6 /* CPUEltwiseInt8.hpp */; }; + 92FF029823AA0B5A00AC97F6 /* CPUTFQuantizedConv2D.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF011623AA0B4C00AC97F6 /* CPUTFQuantizedConv2D.cpp */; }; + 92FF029923AA0B5A00AC97F6 /* CPUSlice.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF011723AA0B4C00AC97F6 /* CPUSlice.cpp */; }; + 92FF029A23AA0B5A00AC97F6 /* CPUQuantizedMaxPool.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF011823AA0B4C00AC97F6 /* CPUQuantizedMaxPool.cpp */; }; + 92FF029B23AA0B5A00AC97F6 /* CPUScale.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF011923AA0B4C00AC97F6 /* CPUScale.hpp */; }; + 92FF029C23AA0B5A00AC97F6 /* CPUPack.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF011A23AA0B4C00AC97F6 /* CPUPack.cpp */; }; + 92FF029D23AA0B5A00AC97F6 /* CPULRN.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF011B23AA0B4C00AC97F6 /* CPULRN.cpp */; }; + 92FF029E23AA0B5A00AC97F6 /* CPUDeconvolutionDepthwise.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF011C23AA0B4D00AC97F6 /* CPUDeconvolutionDepthwise.hpp */; }; + 92FF029F23AA0B5A00AC97F6 /* CPUReluGrad.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF011D23AA0B4D00AC97F6 /* CPUReluGrad.hpp */; }; + 92FF02A023AA0B5A00AC97F6 /* CPUZeroLike.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF011E23AA0B4D00AC97F6 /* CPUZeroLike.hpp */; }; + 92FF02A123AA0B5A00AC97F6 /* CPUDepthwiseConvInt8.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF011F23AA0B4D00AC97F6 /* CPUDepthwiseConvInt8.hpp */; }; + 92FF02A223AA0B5A00AC97F6 /* CPUSize.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF012023AA0B4D00AC97F6 /* CPUSize.cpp */; }; + 92FF02A323AA0B5A00AC97F6 /* CPUQuantizedLogistic.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF012123AA0B4D00AC97F6 /* CPUQuantizedLogistic.cpp */; }; + 92FF02A423AA0B5A00AC97F6 /* CPUBinary.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF012223AA0B4D00AC97F6 /* CPUBinary.cpp */; }; + 92FF02A523AA0B5A00AC97F6 /* CPUZeroLike.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF012323AA0B4D00AC97F6 /* CPUZeroLike.cpp */; }; + 92FF02A623AA0B5A00AC97F6 /* CPUQuantizedMaxPool.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF012423AA0B4D00AC97F6 /* CPUQuantizedMaxPool.hpp */; }; + 92FF02A723AA0B5A00AC97F6 /* CPUSelu.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF012523AA0B4D00AC97F6 /* CPUSelu.hpp */; }; + 92FF02A823AA0B5A00AC97F6 /* CPUGather.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF012623AA0B4D00AC97F6 /* CPUGather.hpp */; }; + 92FF02A923AA0B5A00AC97F6 /* CPUCropAndResize.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF012723AA0B4D00AC97F6 /* CPUCropAndResize.hpp */; }; + 92FF02AA23AA0B5A00AC97F6 /* CPUSpaceToDepth.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF012823AA0B4D00AC97F6 /* CPUSpaceToDepth.cpp */; }; + 92FF02AB23AA0B5A00AC97F6 /* CPUConst.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF012923AA0B4D00AC97F6 /* CPUConst.cpp */; }; + 92FF02AC23AA0B5A00AC97F6 /* CPUQuantizedSoftmax.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF012A23AA0B4D00AC97F6 /* CPUQuantizedSoftmax.cpp */; }; + 92FF02AD23AA0B5A00AC97F6 /* CPUConvInt8.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF012B23AA0B4D00AC97F6 /* CPUConvInt8.cpp */; }; + 92FF02AE23AA0B5A00AC97F6 /* CPUProposal.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF012C23AA0B4D00AC97F6 /* CPUProposal.cpp */; }; + 92FF02AF23AA0B5A00AC97F6 /* CPUConcat.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF012D23AA0B4D00AC97F6 /* CPUConcat.hpp */; }; + 92FF02B023AA0B5A00AC97F6 /* CPUDequantize.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF012E23AA0B4E00AC97F6 /* CPUDequantize.cpp */; }; + 92FF02B123AA0B5A00AC97F6 /* CPUBackend.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF012F23AA0B4E00AC97F6 /* CPUBackend.cpp */; }; + 92FF02B223AA0B5A00AC97F6 /* CPUBroadcastTo.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF013023AA0B4E00AC97F6 /* CPUBroadcastTo.hpp */; }; + 92FF02B323AA0B5A00AC97F6 /* CPUInstanceNorm.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF013123AA0B4E00AC97F6 /* CPUInstanceNorm.cpp */; }; + 92FF02B423AA0B5A00AC97F6 /* CPUMoments.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF013223AA0B4E00AC97F6 /* CPUMoments.hpp */; }; + 92FF02B523AA0B5A00AC97F6 /* CPUTopKV2.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF013323AA0B4E00AC97F6 /* CPUTopKV2.cpp */; }; + 92FF02B623AA0B5A00AC97F6 /* CPUUnary.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF013423AA0B4E00AC97F6 /* CPUUnary.cpp */; }; + 92FF02B723AA0B5A00AC97F6 /* CPUQuantizedAdd.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF013523AA0B4E00AC97F6 /* CPUQuantizedAdd.hpp */; }; + 92FF02B823AA0B5A00AC97F6 /* CPUWhere.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF013623AA0B4E00AC97F6 /* CPUWhere.cpp */; }; + 92FF02B923AA0B5A00AC97F6 /* CPUSoftmaxGrad.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF013723AA0B4E00AC97F6 /* CPUSoftmaxGrad.cpp */; }; + 92FF02BA23AA0B5A00AC97F6 /* CMakeLists.txt in Resources */ = {isa = PBXBuildFile; fileRef = 92FF013923AA0B4E00AC97F6 /* CMakeLists.txt */; }; + 92FF02BB23AA0B5A00AC97F6 /* MNNUInt8ToInt16WithOffsetC4Fast.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF013B23AA0B4E00AC97F6 /* MNNUInt8ToInt16WithOffsetC4Fast.S */; }; + 92FF02BC23AA0B5A00AC97F6 /* MNNScaleAddInt8.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF013C23AA0B4E00AC97F6 /* MNNScaleAddInt8.S */; }; + 92FF02BD23AA0B5A00AC97F6 /* MNNMatrixProd.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF013D23AA0B4E00AC97F6 /* MNNMatrixProd.S */; }; + 92FF02BE23AA0B5A00AC97F6 /* MNNFloat2Int8.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF013E23AA0B4E00AC97F6 /* MNNFloat2Int8.S */; }; + 92FF02BF23AA0B5A00AC97F6 /* MNNSamplerC4NearestOpt.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF013F23AA0B4E00AC97F6 /* MNNSamplerC4NearestOpt.S */; }; + 92FF02C023AA0B5A00AC97F6 /* MNNAddC4WithStride.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF014023AA0B4E00AC97F6 /* MNNAddC4WithStride.S */; }; + 92FF02C123AA0B5A00AC97F6 /* MNNQuanToDestUint8.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF014123AA0B4E00AC97F6 /* MNNQuanToDestUint8.S */; }; + 92FF02C223AA0B5A00AC97F6 /* MNNLoadU8AndSum.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF014223AA0B4E00AC97F6 /* MNNLoadU8AndSum.S */; }; + 92FF02C323AA0B5A00AC97F6 /* MNNCubicLineC4.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF014323AA0B4E00AC97F6 /* MNNCubicLineC4.S */; }; + 92FF02C423AA0B5A00AC97F6 /* MNNAddBiasRelu6.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF014423AA0B4E00AC97F6 /* MNNAddBiasRelu6.S */; }; + 92FF02C523AA0B5A00AC97F6 /* MNNStrassenMergeCFunction.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF014523AA0B4E00AC97F6 /* MNNStrassenMergeCFunction.S */; }; + 92FF02C623AA0B5A00AC97F6 /* MNNBlitC1ToFloatRGBA.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF014623AA0B4E00AC97F6 /* MNNBlitC1ToFloatRGBA.S */; }; + 92FF02C723AA0B5A00AC97F6 /* MNNCopyC4WithStride.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF014723AA0B4E00AC97F6 /* MNNCopyC4WithStride.S */; }; + 92FF02C823AA0B5A00AC97F6 /* MNNNV21ToBGRUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF014823AA0B4E00AC97F6 /* MNNNV21ToBGRUnit.S */; }; + 92FF02C923AA0B5A00AC97F6 /* MNNLineDepthWiseInt8AddBiasScaleUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF014923AA0B4E00AC97F6 /* MNNLineDepthWiseInt8AddBiasScaleUnit.S */; }; + 92FF02CA23AA0B5A00AC97F6 /* MNNUnPackC4.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF014A23AA0B4E00AC97F6 /* MNNUnPackC4.S */; }; + 92FF02CB23AA0B5A00AC97F6 /* MNNSamplerC1NearestOpt.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF014B23AA0B4E00AC97F6 /* MNNSamplerC1NearestOpt.S */; }; + 92FF02CC23AA0B5A00AC97F6 /* MNNGemmFloatCommon_4.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF014C23AA0B4E00AC97F6 /* MNNGemmFloatCommon_4.S */; }; + 92FF02CD23AA0B5A00AC97F6 /* MNNNV21ToRGBUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF014D23AA0B4E00AC97F6 /* MNNNV21ToRGBUnit.S */; }; + 92FF02CE23AA0B5A00AC97F6 /* MNNPackC4.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF014E23AA0B4E00AC97F6 /* MNNPackC4.S */; }; + 92FF02CF23AA0B5A00AC97F6 /* MNNMinFloat.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF014F23AA0B4E00AC97F6 /* MNNMinFloat.S */; }; + 92FF02D023AA0B5A00AC97F6 /* MNNGemmInt16to32_4x4_Common.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF015023AA0B4E00AC97F6 /* MNNGemmInt16to32_4x4_Common.S */; }; + 92FF02D123AA0B5A00AC97F6 /* MNNMaxFloat.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF015123AA0B4E00AC97F6 /* MNNMaxFloat.S */; }; + 92FF02D223AA0B5A00AC97F6 /* MNNNV21ToRGBAUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF015223AA0B4E00AC97F6 /* MNNNV21ToRGBAUnit.S */; }; + 92FF02D323AA0B5A00AC97F6 /* MNNGemmInt16to32_4x4_Unit.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF015323AA0B4E00AC97F6 /* MNNGemmInt16to32_4x4_Unit.S */; }; + 92FF02D423AA0B5A00AC97F6 /* MNNScaleBias2FloatC4.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF015423AA0B4E00AC97F6 /* MNNScaleBias2FloatC4.S */; }; + 92FF02D523AA0B5A00AC97F6 /* MNNMatrixMax.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF015523AA0B4E00AC97F6 /* MNNMatrixMax.S */; }; + 92FF02D623AA0B5A00AC97F6 /* MNNConvRunForLineDepthWiseInt8.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF015623AA0B4E00AC97F6 /* MNNConvRunForLineDepthWiseInt8.S */; }; + 92FF02D723AA0B5A00AC97F6 /* MNNConvRunForUnitDepthWiseUint8.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF015723AA0B4E00AC97F6 /* MNNConvRunForUnitDepthWiseUint8.S */; }; + 92FF02D823AA0B5A00AC97F6 /* MNNGemmInt8AddBiasScale_8x4_Unit.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF015823AA0B4E00AC97F6 /* MNNGemmInt8AddBiasScale_8x4_Unit.S */; }; + 92FF02D923AA0B5A00AC97F6 /* MNNGemmInt8toFloat32_8x4_Unit.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF015923AA0B4E00AC97F6 /* MNNGemmInt8toFloat32_8x4_Unit.S */; }; + 92FF02DA23AA0B5A00AC97F6 /* MNNConvRunForUnitDepthWiseInt8.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF015A23AA0B4E00AC97F6 /* MNNConvRunForUnitDepthWiseInt8.S */; }; + 92FF02DB23AA0B5A00AC97F6 /* MNNScaleAndAddBias.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF015B23AA0B4E00AC97F6 /* MNNScaleAndAddBias.S */; }; + 92FF02DC23AA0B5A00AC97F6 /* MNNReluInt8.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF015C23AA0B4E00AC97F6 /* MNNReluInt8.S */; }; + 92FF02DD23AA0B5A00AC97F6 /* MNNConvRunForLineDepthWiseUint8.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF015D23AA0B4E00AC97F6 /* MNNConvRunForLineDepthWiseUint8.S */; }; + 92FF02DE23AA0B5A00AC97F6 /* MNNSamplerC4BilinearOpt.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF015E23AA0B4E00AC97F6 /* MNNSamplerC4BilinearOpt.S */; }; + 92FF02DF23AA0B5A00AC97F6 /* MNNBilinearProcC1.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF015F23AA0B4E00AC97F6 /* MNNBilinearProcC1.S */; }; + 92FF02E023AA0B5A00AC97F6 /* MNNMatrixSub.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF016023AA0B4E00AC97F6 /* MNNMatrixSub.S */; }; + 92FF02E123AA0B5A00AC97F6 /* MNNPowC8.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF016123AA0B4E00AC97F6 /* MNNPowC8.S */; }; + 92FF02E223AA0B5A00AC97F6 /* MNNMatrixAdd.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF016223AA0B4E00AC97F6 /* MNNMatrixAdd.S */; }; + 92FF02E323AA0B5A00AC97F6 /* MNNExpC8.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF016323AA0B4E00AC97F6 /* MNNExpC8.S */; }; + 92FF02E423AA0B5A00AC97F6 /* MNNAddBiasRelu.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF016423AA0B4E00AC97F6 /* MNNAddBiasRelu.S */; }; + 92FF02E523AA0B5A00AC97F6 /* MNNConvDwF23SourceTransUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF016523AA0B4E00AC97F6 /* MNNConvDwF23SourceTransUnit.S */; }; + 92FF02E623AA0B5A00AC97F6 /* MNNWinogradMatrixProductLeft.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF016623AA0B4E00AC97F6 /* MNNWinogradMatrixProductLeft.S */; }; + 92FF02E723AA0B5A00AC97F6 /* MNNDeconvRunForUnitDepthWise.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF016723AA0B4E00AC97F6 /* MNNDeconvRunForUnitDepthWise.S */; }; + 92FF02E823AA0B5A00AC97F6 /* MNNSamplerC1BilinearOpt.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF016823AA0B4E00AC97F6 /* MNNSamplerC1BilinearOpt.S */; }; + 92FF02E923AA0B5A00AC97F6 /* MNNDepthWiseInt8AddBiasScaleUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF016923AA0B4E00AC97F6 /* MNNDepthWiseInt8AddBiasScaleUnit.S */; }; + 92FF02EA23AA0B5A00AC97F6 /* MNNGemmInt8AddBiasScale_16x4_Unit.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF016A23AA0B4E00AC97F6 /* MNNGemmInt8AddBiasScale_16x4_Unit.S */; }; + 92FF02EB23AA0B5A00AC97F6 /* MNNGemmFloatOne_4.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF016B23AA0B4E00AC97F6 /* MNNGemmFloatOne_4.S */; }; + 92FF02EC23AA0B5A00AC97F6 /* MNNWinogradMatrixProductRight.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF016C23AA0B4E00AC97F6 /* MNNWinogradMatrixProductRight.S */; }; + 92FF02EE23AA0B5A00AC97F6 /* MNNReluWithSlopeChannel.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF016E23AA0B4E00AC97F6 /* MNNReluWithSlopeChannel.S */; }; + 92FF02EF23AA0B5A00AC97F6 /* MNNAddBias.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF016F23AA0B4E00AC97F6 /* MNNAddBias.S */; }; + 92FF02F023AA0B5A00AC97F6 /* MNNCubicSampleC4.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF017023AA0B4E00AC97F6 /* MNNCubicSampleC4.S */; }; + 92FF02F123AA0B5A00AC97F6 /* MNNCoefLine.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF017123AA0B4E00AC97F6 /* MNNCoefLine.S */; }; + 92FF02F223AA0B5A00AC97F6 /* MNNBlitC3ToFloatRGBA.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF017223AA0B4E00AC97F6 /* MNNBlitC3ToFloatRGBA.S */; }; + 92FF02F323AA0B5A00AC97F6 /* MNNConvSlideWindowMiddle.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF017323AA0B4E00AC97F6 /* MNNConvSlideWindowMiddle.S */; }; + 92FF02F423AA0B5A00AC97F6 /* MNNUInt8ToInt16WithOffsetC4Common.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF017423AA0B4E00AC97F6 /* MNNUInt8ToInt16WithOffsetC4Common.S */; }; + 92FF02F523AA0B5A00AC97F6 /* MNNInt8ScaleToFloat.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF017523AA0B4E00AC97F6 /* MNNInt8ScaleToFloat.S */; }; + 92FF02F623AA0B5A00AC97F6 /* MNNConvRunForUnitDepthWise.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF017623AA0B4E00AC97F6 /* MNNConvRunForUnitDepthWise.S */; }; + 92FF02F723AA0B5A00AC97F6 /* MNNConvDwF23MulTransUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF017723AA0B4E00AC97F6 /* MNNConvDwF23MulTransUnit.S */; }; + 92FF02F823AA0B5A00AC97F6 /* MNNConvRunForLineDepthwise.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF017823AA0B4E00AC97F6 /* MNNConvRunForLineDepthwise.S */; }; + 92FF02F923AA0B5A00AC97F6 /* MNNGemmint8to32_8x4_Unit.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF017923AA0B4E00AC97F6 /* MNNGemmint8to32_8x4_Unit.S */; }; + 92FF02FA23AA0B5A00AC97F6 /* MNNGemmFloatUnit_4.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF017A23AA0B4E00AC97F6 /* MNNGemmFloatUnit_4.S */; }; + 92FF02FB23AA0B5A00AC97F6 /* MNNConvSlideWindowBorder.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF017B23AA0B4E00AC97F6 /* MNNConvSlideWindowBorder.S */; }; + 92FF02FC23AA0B5A00AC97F6 /* MNNUInt8ToInt16WithOffsetC4Fast.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF017D23AA0B4E00AC97F6 /* MNNUInt8ToInt16WithOffsetC4Fast.S */; }; + 92FF02FD23AA0B5A00AC97F6 /* MNNScaleAddInt8.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF017E23AA0B4E00AC97F6 /* MNNScaleAddInt8.S */; }; + 92FF02FE23AA0B5A00AC97F6 /* MNNMatrixProd.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF017F23AA0B4E00AC97F6 /* MNNMatrixProd.S */; }; + 92FF02FF23AA0B5A00AC97F6 /* MNNFloat2Int8.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF018023AA0B4E00AC97F6 /* MNNFloat2Int8.S */; }; + 92FF030023AA0B5A00AC97F6 /* MNNSamplerC4NearestOpt.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF018123AA0B4E00AC97F6 /* MNNSamplerC4NearestOpt.S */; }; + 92FF030123AA0B5A00AC97F6 /* MNNAddC4WithStride.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF018223AA0B4E00AC97F6 /* MNNAddC4WithStride.S */; }; + 92FF030223AA0B5A00AC97F6 /* MNNQuanToDestUint8.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF018323AA0B4E00AC97F6 /* MNNQuanToDestUint8.S */; }; + 92FF030323AA0B5A00AC97F6 /* MNNLoadU8AndSum.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF018423AA0B4E00AC97F6 /* MNNLoadU8AndSum.S */; }; + 92FF030423AA0B5A00AC97F6 /* MNNCubicLineC4.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF018523AA0B4E00AC97F6 /* MNNCubicLineC4.S */; }; + 92FF030523AA0B5A00AC97F6 /* MNNAddBiasRelu6.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF018623AA0B4E00AC97F6 /* MNNAddBiasRelu6.S */; }; + 92FF030623AA0B5A00AC97F6 /* MNNStrassenMergeCFunction.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF018723AA0B4E00AC97F6 /* MNNStrassenMergeCFunction.S */; }; + 92FF030723AA0B5A00AC97F6 /* MNNBlitC1ToFloatRGBA.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF018823AA0B4E00AC97F6 /* MNNBlitC1ToFloatRGBA.S */; }; + 92FF030823AA0B5A00AC97F6 /* MNNCopyC4WithStride.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF018923AA0B4E00AC97F6 /* MNNCopyC4WithStride.S */; }; + 92FF030923AA0B5A00AC97F6 /* MNNNV21ToBGRUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF018A23AA0B4E00AC97F6 /* MNNNV21ToBGRUnit.S */; }; + 92FF030A23AA0B5A00AC97F6 /* MNNLineDepthWiseInt8AddBiasScaleUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF018B23AA0B4E00AC97F6 /* MNNLineDepthWiseInt8AddBiasScaleUnit.S */; }; + 92FF030B23AA0B5A00AC97F6 /* MNNUnPackC4.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF018C23AA0B4E00AC97F6 /* MNNUnPackC4.S */; }; + 92FF030C23AA0B5A00AC97F6 /* MNNSamplerC1NearestOpt.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF018D23AA0B4E00AC97F6 /* MNNSamplerC1NearestOpt.S */; }; + 92FF030D23AA0B5A00AC97F6 /* MNNGemmFloatCommon_4.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF018E23AA0B4E00AC97F6 /* MNNGemmFloatCommon_4.S */; }; + 92FF030E23AA0B5A00AC97F6 /* MNNNV21ToRGBUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF018F23AA0B4E00AC97F6 /* MNNNV21ToRGBUnit.S */; }; + 92FF030F23AA0B5A00AC97F6 /* MNNPackC4.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF019023AA0B4E00AC97F6 /* MNNPackC4.S */; }; + 92FF031023AA0B5A00AC97F6 /* MNNMinFloat.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF019123AA0B4E00AC97F6 /* MNNMinFloat.S */; }; + 92FF031123AA0B5A00AC97F6 /* MNNGemmInt16to32_4x4_Common.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF019223AA0B4E00AC97F6 /* MNNGemmInt16to32_4x4_Common.S */; }; + 92FF031223AA0B5A00AC97F6 /* MNNMaxFloat.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF019323AA0B4E00AC97F6 /* MNNMaxFloat.S */; }; + 92FF031323AA0B5A00AC97F6 /* MNNNV21ToRGBAUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF019423AA0B4E00AC97F6 /* MNNNV21ToRGBAUnit.S */; }; + 92FF031423AA0B5A00AC97F6 /* MNNGemmInt16to32_4x4_Unit.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF019523AA0B4E00AC97F6 /* MNNGemmInt16to32_4x4_Unit.S */; }; + 92FF031523AA0B5A00AC97F6 /* MNNScaleBias2FloatC4.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF019623AA0B4E00AC97F6 /* MNNScaleBias2FloatC4.S */; }; + 92FF031623AA0B5A00AC97F6 /* MNNMatrixMax.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF019723AA0B4E00AC97F6 /* MNNMatrixMax.S */; }; + 92FF031723AA0B5A00AC97F6 /* MNNConvRunForLineDepthWiseInt8.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF019823AA0B4E00AC97F6 /* MNNConvRunForLineDepthWiseInt8.S */; }; + 92FF031823AA0B5A00AC97F6 /* MNNConvRunForUnitDepthWiseUint8.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF019923AA0B4E00AC97F6 /* MNNConvRunForUnitDepthWiseUint8.S */; }; + 92FF031923AA0B5A00AC97F6 /* MNNGemmInt8toFloat32_8x4_Unit.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF019A23AA0B4E00AC97F6 /* MNNGemmInt8toFloat32_8x4_Unit.S */; }; + 92FF031A23AA0B5A00AC97F6 /* MNNConvRunForUnitDepthWiseInt8.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF019B23AA0B4E00AC97F6 /* MNNConvRunForUnitDepthWiseInt8.S */; }; + 92FF031B23AA0B5A00AC97F6 /* MNNScaleAndAddBias.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF019C23AA0B4E00AC97F6 /* MNNScaleAndAddBias.S */; }; + 92FF031C23AA0B5A00AC97F6 /* MNNReluInt8.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF019D23AA0B4E00AC97F6 /* MNNReluInt8.S */; }; + 92FF031D23AA0B5A00AC97F6 /* MNNConvRunForLineDepthWiseUint8.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF019E23AA0B4E00AC97F6 /* MNNConvRunForLineDepthWiseUint8.S */; }; + 92FF031E23AA0B5A00AC97F6 /* MNNSamplerC4BilinearOpt.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF019F23AA0B4E00AC97F6 /* MNNSamplerC4BilinearOpt.S */; }; + 92FF031F23AA0B5A00AC97F6 /* MNNBilinearProcC1.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01A023AA0B4E00AC97F6 /* MNNBilinearProcC1.S */; }; + 92FF032023AA0B5A00AC97F6 /* MNNMatrixSub.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01A123AA0B4E00AC97F6 /* MNNMatrixSub.S */; }; + 92FF032123AA0B5A00AC97F6 /* MNNPowC8.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01A223AA0B4E00AC97F6 /* MNNPowC8.S */; }; + 92FF032223AA0B5A00AC97F6 /* MNNMatrixAdd.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01A323AA0B4E00AC97F6 /* MNNMatrixAdd.S */; }; + 92FF032323AA0B5A00AC97F6 /* MNNExpC8.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01A423AA0B4E00AC97F6 /* MNNExpC8.S */; }; + 92FF032423AA0B5A00AC97F6 /* MNNAddBiasRelu.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01A523AA0B4E00AC97F6 /* MNNAddBiasRelu.S */; }; + 92FF032523AA0B5A00AC97F6 /* MNNConvDwF23SourceTransUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01A623AA0B4E00AC97F6 /* MNNConvDwF23SourceTransUnit.S */; }; + 92FF032623AA0B5A00AC97F6 /* MNNWinogradMatrixProductLeft.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01A723AA0B4E00AC97F6 /* MNNWinogradMatrixProductLeft.S */; }; + 92FF032723AA0B5A00AC97F6 /* MNNDeconvRunForUnitDepthWise.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01A823AA0B4E00AC97F6 /* MNNDeconvRunForUnitDepthWise.S */; }; + 92FF032823AA0B5A00AC97F6 /* MNNSamplerC1BilinearOpt.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01A923AA0B4E00AC97F6 /* MNNSamplerC1BilinearOpt.S */; }; + 92FF032923AA0B5A00AC97F6 /* MNNDepthWiseInt8AddBiasScaleUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01AA23AA0B4E00AC97F6 /* MNNDepthWiseInt8AddBiasScaleUnit.S */; }; + 92FF032A23AA0B5A00AC97F6 /* MNNGemmInt8AddBiasScale_16x4_Unit.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01AB23AA0B4E00AC97F6 /* MNNGemmInt8AddBiasScale_16x4_Unit.S */; }; + 92FF032B23AA0B5A00AC97F6 /* MNNGemmFloatOne_4.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01AC23AA0B4E00AC97F6 /* MNNGemmFloatOne_4.S */; }; + 92FF032C23AA0B5A00AC97F6 /* MNNWinogradMatrixProductRight.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01AD23AA0B4E00AC97F6 /* MNNWinogradMatrixProductRight.S */; }; + 92FF032E23AA0B5A00AC97F6 /* MNNReluWithSlopeChannel.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01AF23AA0B4E00AC97F6 /* MNNReluWithSlopeChannel.S */; }; + 92FF032F23AA0B5A00AC97F6 /* MNNAddBias.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01B023AA0B4E00AC97F6 /* MNNAddBias.S */; }; + 92FF033023AA0B5A00AC97F6 /* MNNCubicSampleC4.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01B123AA0B4E00AC97F6 /* MNNCubicSampleC4.S */; }; + 92FF033123AA0B5A00AC97F6 /* MNNCoefLine.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01B223AA0B4E00AC97F6 /* MNNCoefLine.S */; }; + 92FF033223AA0B5A00AC97F6 /* MNNBlitC3ToFloatRGBA.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01B323AA0B4E00AC97F6 /* MNNBlitC3ToFloatRGBA.S */; }; + 92FF033323AA0B5A00AC97F6 /* MNNConvSlideWindowMiddle.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01B423AA0B4E00AC97F6 /* MNNConvSlideWindowMiddle.S */; }; + 92FF033423AA0B5A00AC97F6 /* MNNUInt8ToInt16WithOffsetC4Common.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01B523AA0B4E00AC97F6 /* MNNUInt8ToInt16WithOffsetC4Common.S */; }; + 92FF033523AA0B5A00AC97F6 /* MNNInt8ScaleToFloat.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01B623AA0B4E00AC97F6 /* MNNInt8ScaleToFloat.S */; }; + 92FF033623AA0B5A00AC97F6 /* MNNConvRunForUnitDepthWise.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01B723AA0B4E00AC97F6 /* MNNConvRunForUnitDepthWise.S */; }; + 92FF033723AA0B5A00AC97F6 /* MNNConvDwF23MulTransUnit.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01B823AA0B4E00AC97F6 /* MNNConvDwF23MulTransUnit.S */; }; + 92FF033823AA0B5A00AC97F6 /* MNNConvRunForLineDepthwise.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01B923AA0B4E00AC97F6 /* MNNConvRunForLineDepthwise.S */; }; + 92FF033923AA0B5A00AC97F6 /* MNNGemmint8to32_8x4_Unit.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01BA23AA0B4E00AC97F6 /* MNNGemmint8to32_8x4_Unit.S */; }; + 92FF033A23AA0B5A00AC97F6 /* MNNGemmFloatUnit_4.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01BB23AA0B4E00AC97F6 /* MNNGemmFloatUnit_4.S */; }; + 92FF033B23AA0B5A00AC97F6 /* MNNConvSlideWindowBorder.S in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01BC23AA0B4E00AC97F6 /* MNNConvSlideWindowBorder.S */; }; + 92FF033C23AA0B5A00AC97F6 /* MNNAsmGlobal.h in Headers */ = {isa = PBXBuildFile; fileRef = 92FF01BD23AA0B4E00AC97F6 /* MNNAsmGlobal.h */; }; + 92FF033D23AA0B5A00AC97F6 /* CPUReluGrad.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01BE23AA0B4E00AC97F6 /* CPUReluGrad.cpp */; }; + 92FF033E23AA0B5A00AC97F6 /* CPUDilation2D.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01BF23AA0B4E00AC97F6 /* CPUDilation2D.cpp */; }; + 92FF033F23AA0B5A00AC97F6 /* CPUArgMax.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF01C023AA0B4E00AC97F6 /* CPUArgMax.hpp */; }; + 92FF034023AA0B5A00AC97F6 /* CPUShape.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01C123AA0B4E00AC97F6 /* CPUShape.cpp */; }; + 92FF034123AA0B5A00AC97F6 /* CPURank.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF01C223AA0B4E00AC97F6 /* CPURank.hpp */; }; + 92FF034223AA0B5A00AC97F6 /* CPUReduction.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01C323AA0B4F00AC97F6 /* CPUReduction.cpp */; }; + 92FF034323AA0B5A00AC97F6 /* CPUStridedSlice.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01C423AA0B4F00AC97F6 /* CPUStridedSlice.cpp */; }; + 92FF034423AA0B5A00AC97F6 /* CPUGatherND.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01C523AA0B4F00AC97F6 /* CPUGatherND.cpp */; }; + 92FF034523AA0B5A00AC97F6 /* CPUQuantizedAvgPool.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF01C623AA0B4F00AC97F6 /* CPUQuantizedAvgPool.hpp */; }; + 92FF034623AA0B5A00AC97F6 /* CPUGatherND.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF01C723AA0B4F00AC97F6 /* CPUGatherND.hpp */; }; + 92FF034723AA0B5A00AC97F6 /* CPUConvolution3D.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF01C823AA0B4F00AC97F6 /* CPUConvolution3D.hpp */; }; + 92FF034823AA0B5A00AC97F6 /* CPUSpaceToDepth.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF01C923AA0B4F00AC97F6 /* CPUSpaceToDepth.hpp */; }; + 92FF034923AA0B5A00AC97F6 /* CPUSpatialProduct.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01CA23AA0B4F00AC97F6 /* CPUSpatialProduct.cpp */; }; + 92FF034A23AA0B5A00AC97F6 /* CPUTanh.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF01CB23AA0B4F00AC97F6 /* CPUTanh.hpp */; }; + 92FF034B23AA0B5A00AC97F6 /* CPUTile.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF01CC23AA0B4F00AC97F6 /* CPUTile.hpp */; }; + 92FF034C23AA0B5A00AC97F6 /* CPUSetDiff1D.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF01CD23AA0B4F00AC97F6 /* CPUSetDiff1D.hpp */; }; + 92FF034D23AA0B5A00AC97F6 /* CPUCast.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01CE23AA0B4F00AC97F6 /* CPUCast.cpp */; }; + 92FF034E23AA0B5A00AC97F6 /* CPUDepthToSpace.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01CF23AA0B4F00AC97F6 /* CPUDepthToSpace.cpp */; }; + 92FF034F23AA0B5A00AC97F6 /* CPUSliceTf.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01D023AA0B4F00AC97F6 /* CPUSliceTf.cpp */; }; + 92FF035023AA0B5A00AC97F6 /* CPUOneHot.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF01D123AA0B4F00AC97F6 /* CPUOneHot.hpp */; }; + 92FF035123AA0B5A00AC97F6 /* CPUCrop.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01D223AA0B4F00AC97F6 /* CPUCrop.cpp */; }; + 92FF035223AA0B5A00AC97F6 /* CPUThreshold.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF01D323AA0B4F00AC97F6 /* CPUThreshold.hpp */; }; + 92FF035323AA0B5A00AC97F6 /* CPUScatterNd.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01D423AA0B5000AC97F6 /* CPUScatterNd.cpp */; }; + 92FF035423AA0B5A00AC97F6 /* CPUSelect.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01D523AA0B5000AC97F6 /* CPUSelect.cpp */; }; + 92FF035523AA0B5A00AC97F6 /* CPUElu.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01D623AA0B5000AC97F6 /* CPUElu.cpp */; }; + 92FF035623AA0B5A00AC97F6 /* CPUConvolution.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF01D723AA0B5000AC97F6 /* CPUConvolution.hpp */; }; + 92FF035723AA0B5A00AC97F6 /* CPUOneHot.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01D823AA0B5000AC97F6 /* CPUOneHot.cpp */; }; + 92FF035823AA0B5A00AC97F6 /* CPUTFQuantizedConv2D.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF01D923AA0B5000AC97F6 /* CPUTFQuantizedConv2D.hpp */; }; + 92FF035923AA0B5A00AC97F6 /* CPUAsString.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01DA23AA0B5000AC97F6 /* CPUAsString.cpp */; }; + 92FF035A23AA0B5A00AC97F6 /* CPUDetectionPostProcess.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF01DB23AA0B5000AC97F6 /* CPUDetectionPostProcess.hpp */; }; + 92FF035B23AA0B5A00AC97F6 /* CPURelu.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF01DC23AA0B5000AC97F6 /* CPURelu.hpp */; }; + 92FF035C23AA0B5A00AC97F6 /* CPUPermute.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01DD23AA0B5000AC97F6 /* CPUPermute.cpp */; }; + 92FF035D23AA0B5A00AC97F6 /* CPUQuantizedReshape.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01DE23AA0B5000AC97F6 /* CPUQuantizedReshape.cpp */; }; + 92FF035E23AA0B5A00AC97F6 /* CPUConcat.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01DF23AA0B5000AC97F6 /* CPUConcat.cpp */; }; + 92FF035F23AA0B5A00AC97F6 /* CPUShape.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF01E023AA0B5000AC97F6 /* CPUShape.hpp */; }; + 92FF036023AA0B5A00AC97F6 /* CPUInnerProduct.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01E123AA0B5000AC97F6 /* CPUInnerProduct.cpp */; }; + 92FF036123AA0B5A00AC97F6 /* CPUQuantizedConcat.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF01E223AA0B5000AC97F6 /* CPUQuantizedConcat.hpp */; }; + 92FF036223AA0B5A00AC97F6 /* CPUFill.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01E323AA0B5000AC97F6 /* CPUFill.cpp */; }; + 92FF036323AA0B5A00AC97F6 /* CPUScale.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01E423AA0B5100AC97F6 /* CPUScale.cpp */; }; + 92FF036423AA0B5A00AC97F6 /* CPUUnravelIndex.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01E523AA0B5100AC97F6 /* CPUUnravelIndex.cpp */; }; + 92FF036523AA0B5A00AC97F6 /* CPUResize.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF01E623AA0B5100AC97F6 /* CPUResize.hpp */; }; + 92FF036623AA0B5A00AC97F6 /* CPUDetectionOutput.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF01E723AA0B5100AC97F6 /* CPUDetectionOutput.hpp */; }; + 92FF036723AA0B5A00AC97F6 /* CPURuntime.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF01E823AA0B5100AC97F6 /* CPURuntime.hpp */; }; + 92FF036823AA0B5A00AC97F6 /* CPUPermute.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF01E923AA0B5100AC97F6 /* CPUPermute.hpp */; }; + 92FF036923AA0B5A00AC97F6 /* CPUInnerProduct.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF01EA23AA0B5100AC97F6 /* CPUInnerProduct.hpp */; }; + 92FF036A23AA0B5A00AC97F6 /* CPURNNSequenceGRU.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01EB23AA0B5100AC97F6 /* CPURNNSequenceGRU.cpp */; }; + 92FF036B23AA0B5A00AC97F6 /* CPUResize.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01EC23AA0B5100AC97F6 /* CPUResize.cpp */; }; + 92FF036C23AA0B5A00AC97F6 /* CPUConst.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF01ED23AA0B5100AC97F6 /* CPUConst.hpp */; }; + 92FF036D23AA0B5A00AC97F6 /* CPULSTM.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01EE23AA0B5100AC97F6 /* CPULSTM.cpp */; }; + 92FF036E23AA0B5A00AC97F6 /* CPUQuantizedSoftmax.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF01EF23AA0B5100AC97F6 /* CPUQuantizedSoftmax.hpp */; }; + 92FF036F23AA0B5A00AC97F6 /* CPURuntime.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01F023AA0B5200AC97F6 /* CPURuntime.cpp */; }; + 92FF037023AA0B5A00AC97F6 /* CPUPool.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01F123AA0B5200AC97F6 /* CPUPool.cpp */; }; + 92FF037123AA0B5A00AC97F6 /* CPUBatchToSpaceND.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF01F223AA0B5200AC97F6 /* CPUBatchToSpaceND.hpp */; }; + 92FF037223AA0B5A00AC97F6 /* CPUExpandDims.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01F323AA0B5200AC97F6 /* CPUExpandDims.cpp */; }; + 92FF037323AA0B5A00AC97F6 /* CPUEltwiseInt8.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01F423AA0B5200AC97F6 /* CPUEltwiseInt8.cpp */; }; + 92FF037423AA0B5A00AC97F6 /* CPUConvolution3D.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01F523AA0B5200AC97F6 /* CPUConvolution3D.cpp */; }; + 92FF037523AA0B5A00AC97F6 /* CPUUnpack.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01F623AA0B5200AC97F6 /* CPUUnpack.cpp */; }; + 92FF037623AA0B5A00AC97F6 /* CPUBinary.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF01F723AA0B5200AC97F6 /* CPUBinary.hpp */; }; + 92FF037723AA0B5A00AC97F6 /* CPUConvolutionDepthwise.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01F823AA0B5200AC97F6 /* CPUConvolutionDepthwise.cpp */; }; + 92FF037823AA0B5A00AC97F6 /* CPUROIPooling.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF01F923AA0B5200AC97F6 /* CPUROIPooling.hpp */; }; + 92FF037923AA0B5A00AC97F6 /* CPUInstanceNorm.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF01FA23AA0B5200AC97F6 /* CPUInstanceNorm.hpp */; }; + 92FF037A23AA0B5A00AC97F6 /* CPUSigmoid.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01FB23AA0B5200AC97F6 /* CPUSigmoid.cpp */; }; + 92FF037B23AA0B5A00AC97F6 /* CPUQuantizedConcat.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01FC23AA0B5200AC97F6 /* CPUQuantizedConcat.cpp */; }; + 92FF037C23AA0B5A00AC97F6 /* CPULSTM.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF01FD23AA0B5200AC97F6 /* CPULSTM.hpp */; }; + 92FF037D23AA0B5A00AC97F6 /* CPURelu.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01FE23AA0B5200AC97F6 /* CPURelu.cpp */; }; + 92FF037E23AA0B5A00AC97F6 /* CPUDetectionPostProcess.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF01FF23AA0B5200AC97F6 /* CPUDetectionPostProcess.cpp */; }; + 92FF037F23AA0B5A00AC97F6 /* CPUSlice.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF020023AA0B5300AC97F6 /* CPUSlice.hpp */; }; + 92FF038023AA0B5A00AC97F6 /* CPUPoolGrad.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF020123AA0B5300AC97F6 /* CPUPoolGrad.cpp */; }; + 92FF038123AA0B5A00AC97F6 /* CPUNormalize.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF020223AA0B5300AC97F6 /* CPUNormalize.cpp */; }; + 92FF038223AA0B5A00AC97F6 /* CPUSetDiff1D.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF020323AA0B5300AC97F6 /* CPUSetDiff1D.cpp */; }; + 92FF038323AA0B5A00AC97F6 /* CPUSoftmax.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF020423AA0B5300AC97F6 /* CPUSoftmax.cpp */; }; + 92FF038423AA0B5A00AC97F6 /* CPUBatchMatMul.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF020523AA0B5300AC97F6 /* CPUBatchMatMul.hpp */; }; + 92FF038523AA0B5A00AC97F6 /* CPUMoments.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF020623AA0B5300AC97F6 /* CPUMoments.cpp */; }; + 92FF038623AA0B5A00AC97F6 /* CPULinSpace.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF020723AA0B5300AC97F6 /* CPULinSpace.cpp */; }; + 92FF038723AA0B5A00AC97F6 /* CPUTensorConvert.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF020823AA0B5300AC97F6 /* CPUTensorConvert.hpp */; }; + 92FF038823AA0B5A00AC97F6 /* CPUQuantizedLogistic.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF020923AA0B5300AC97F6 /* CPUQuantizedLogistic.hpp */; }; + 92FF038923AA0B5A00AC97F6 /* CPUSigmoid.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF020A23AA0B5300AC97F6 /* CPUSigmoid.hpp */; }; + 92FF038A23AA0B5A00AC97F6 /* CPURange.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF020B23AA0B5300AC97F6 /* CPURange.cpp */; }; + 92FF038B23AA0B5A00AC97F6 /* CPUUnravelIndex.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF020C23AA0B5500AC97F6 /* CPUUnravelIndex.hpp */; }; + 92FF038C23AA0B5A00AC97F6 /* CPUEltwise.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF020D23AA0B5500AC97F6 /* CPUEltwise.hpp */; }; + 92FF038D23AA0B5A00AC97F6 /* CPUMatrixBandPart.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF020E23AA0B5500AC97F6 /* CPUMatrixBandPart.hpp */; }; + 92FF038E23AA0B5A00AC97F6 /* CPUQuantizedReshape.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF020F23AA0B5500AC97F6 /* CPUQuantizedReshape.hpp */; }; + 92FF038F23AA0B5A00AC97F6 /* CPUPriorbox.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF021023AA0B5500AC97F6 /* CPUPriorbox.hpp */; }; + 92FF039023AA0B5A00AC97F6 /* CPUGather.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF021123AA0B5600AC97F6 /* CPUGather.cpp */; }; + 92FF039123AA0B5A00AC97F6 /* CPUBackend.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF021223AA0B5600AC97F6 /* CPUBackend.hpp */; }; + 92FF039223AA0B5A00AC97F6 /* CPUDeconvolution.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF021323AA0B5600AC97F6 /* CPUDeconvolution.cpp */; }; + 92FF039323AA0B5A00AC97F6 /* CPUQuantizedAdd.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF021423AA0B5600AC97F6 /* CPUQuantizedAdd.cpp */; }; + 92FF039423AA0B5A00AC97F6 /* CPUSpaceToBatchND.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF021523AA0B5600AC97F6 /* CPUSpaceToBatchND.hpp */; }; + 92FF039523AA0B5A00AC97F6 /* CPUSqueeze.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF021623AA0B5600AC97F6 /* CPUSqueeze.cpp */; }; + 92FF039623AA0B5A00AC97F6 /* CPUDepthwiseConvInt8.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF021723AA0B5600AC97F6 /* CPUDepthwiseConvInt8.cpp */; }; + 92FF039723AA0B5A00AC97F6 /* CPUCrop.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF021823AA0B5600AC97F6 /* CPUCrop.hpp */; }; + 92FF039823AA0B5A00AC97F6 /* CPUThreshold.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF021923AA0B5600AC97F6 /* CPUThreshold.cpp */; }; + 92FF039923AA0B5A00AC97F6 /* CPUReshape.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF021A23AA0B5600AC97F6 /* CPUReshape.hpp */; }; + 92FF039A23AA0B5A00AC97F6 /* Convolution1x1Strassen.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF021C23AA0B5600AC97F6 /* Convolution1x1Strassen.hpp */; }; + 92FF039B23AA0B5A00AC97F6 /* CommonOptFunction.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF021D23AA0B5600AC97F6 /* CommonOptFunction.cpp */; }; + 92FF039C23AA0B5A00AC97F6 /* Convolution3D3x3.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF021E23AA0B5600AC97F6 /* Convolution3D3x3.cpp */; }; + 92FF039D23AA0B5A00AC97F6 /* StrassenMatmulComputor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF021F23AA0B5600AC97F6 /* StrassenMatmulComputor.cpp */; }; + 92FF039E23AA0B5A00AC97F6 /* Convolution3x3.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF022023AA0B5600AC97F6 /* Convolution3x3.cpp */; }; + 92FF039F23AA0B5A00AC97F6 /* CommonOptFunction.h in Headers */ = {isa = PBXBuildFile; fileRef = 92FF022123AA0B5600AC97F6 /* CommonOptFunction.h */; }; + 92FF03A023AA0B5A00AC97F6 /* ConvolutionWinograd.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF022223AA0B5600AC97F6 /* ConvolutionWinograd.cpp */; }; + 92FF03A123AA0B5A00AC97F6 /* Int8FunctionsOpt.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF022323AA0B5600AC97F6 /* Int8FunctionsOpt.cpp */; }; + 92FF03A223AA0B5A00AC97F6 /* ConvolutionWinograd3D.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF022423AA0B5600AC97F6 /* ConvolutionWinograd3D.cpp */; }; + 92FF03A323AA0B5A00AC97F6 /* ConvOpt.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF022523AA0B5600AC97F6 /* ConvOpt.cpp */; }; + 92FF03A423AA0B5A00AC97F6 /* OptimizedComputer.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF022623AA0B5600AC97F6 /* OptimizedComputer.cpp */; }; + 92FF03A523AA0B5A00AC97F6 /* DeconvolutionWithStride.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF022723AA0B5600AC97F6 /* DeconvolutionWithStride.hpp */; }; + 92FF03A623AA0B5A00AC97F6 /* ConvolutionTiledExecutor.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF022823AA0B5600AC97F6 /* ConvolutionTiledExecutor.hpp */; }; + 92FF03A723AA0B5A00AC97F6 /* ConvolutionIntFactory.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF022923AA0B5600AC97F6 /* ConvolutionIntFactory.cpp */; }; + 92FF03A823AA0B5A00AC97F6 /* WinogradOptFunction.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF022A23AA0B5600AC97F6 /* WinogradOptFunction.cpp */; }; + 92FF03A923AA0B5A00AC97F6 /* ConvolutionGroup.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF022B23AA0B5600AC97F6 /* ConvolutionGroup.hpp */; }; + 92FF03AA23AA0B5A00AC97F6 /* ConvolutionFloatFactory.h in Headers */ = {isa = PBXBuildFile; fileRef = 92FF022C23AA0B5600AC97F6 /* ConvolutionFloatFactory.h */; }; + 92FF03AB23AA0B5A00AC97F6 /* ConvolutionInt8Executor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF022D23AA0B5600AC97F6 /* ConvolutionInt8Executor.cpp */; }; + 92FF03AC23AA0B5A00AC97F6 /* ResizeFunction.h in Headers */ = {isa = PBXBuildFile; fileRef = 92FF022E23AA0B5600AC97F6 /* ResizeFunction.h */; }; + 92FF03AD23AA0B5A00AC97F6 /* ConvolutionDepthwise3x3.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF022F23AA0B5600AC97F6 /* ConvolutionDepthwise3x3.cpp */; }; + 92FF03AE23AA0B5A00AC97F6 /* ConvolutionIntFactory.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF023023AA0B5600AC97F6 /* ConvolutionIntFactory.hpp */; }; + 92FF03AF23AA0B5A00AC97F6 /* WinogradOptFunction.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF023123AA0B5600AC97F6 /* WinogradOptFunction.hpp */; }; + 92FF03B023AA0B5A00AC97F6 /* ConvolutionGroup.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF023223AA0B5600AC97F6 /* ConvolutionGroup.cpp */; }; + 92FF03B123AA0B5A00AC97F6 /* ConvolutionFloatFactory.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF023323AA0B5600AC97F6 /* ConvolutionFloatFactory.cpp */; }; + 92FF03B223AA0B5A00AC97F6 /* ConvolutionInt8Executor.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF023423AA0B5600AC97F6 /* ConvolutionInt8Executor.hpp */; }; + 92FF03B323AA0B5A00AC97F6 /* ConvolutionDepthwise3x3.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF023523AA0B5600AC97F6 /* ConvolutionDepthwise3x3.hpp */; }; + 92FF03B423AA0B5A00AC97F6 /* Convolution1x1Strassen.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF023623AA0B5600AC97F6 /* Convolution1x1Strassen.cpp */; }; + 92FF03B523AA0B5A00AC97F6 /* ResizeFunction.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF023723AA0B5600AC97F6 /* ResizeFunction.cpp */; }; + 92FF03B623AA0B5A00AC97F6 /* StrassenMatmulComputor.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF023823AA0B5600AC97F6 /* StrassenMatmulComputor.hpp */; }; + 92FF03B723AA0B5A00AC97F6 /* Convolution3x3.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF023923AA0B5600AC97F6 /* Convolution3x3.hpp */; }; + 92FF03B823AA0B5A00AC97F6 /* Convolution3D3x3.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF023A23AA0B5600AC97F6 /* Convolution3D3x3.hpp */; }; + 92FF03B923AA0B5A00AC97F6 /* ConvOpt.h in Headers */ = {isa = PBXBuildFile; fileRef = 92FF023B23AA0B5600AC97F6 /* ConvOpt.h */; }; + 92FF03BA23AA0B5A00AC97F6 /* ConvolutionWinograd.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF023C23AA0B5600AC97F6 /* ConvolutionWinograd.hpp */; }; + 92FF03BB23AA0B5A00AC97F6 /* ConvolutionWinograd3D.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF023D23AA0B5600AC97F6 /* ConvolutionWinograd3D.hpp */; }; + 92FF03BC23AA0B5A00AC97F6 /* OptimizedComputer.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF023E23AA0B5600AC97F6 /* OptimizedComputer.hpp */; }; + 92FF03BD23AA0B5A00AC97F6 /* Int8FunctionsOpt.h in Headers */ = {isa = PBXBuildFile; fileRef = 92FF023F23AA0B5600AC97F6 /* Int8FunctionsOpt.h */; }; + 92FF03BE23AA0B5A00AC97F6 /* DeconvolutionWithStride.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF024023AA0B5600AC97F6 /* DeconvolutionWithStride.cpp */; }; + 92FF03BF23AA0B5A00AC97F6 /* ConvolutionTiledExecutor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF024123AA0B5600AC97F6 /* ConvolutionTiledExecutor.cpp */; }; + 92FF03C023AA0B5A00AC97F6 /* CPUPack.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF024223AA0B5600AC97F6 /* CPUPack.hpp */; }; + 92FF03C123AA0B5A00AC97F6 /* CPURank.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF024323AA0B5600AC97F6 /* CPURank.cpp */; }; + 92FF03C223AA0B5A00AC97F6 /* CPUTile.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF024423AA0B5600AC97F6 /* CPUTile.cpp */; }; + 92FF03C323AA0B5A00AC97F6 /* CPUEltwise.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF024523AA0B5700AC97F6 /* CPUEltwise.cpp */; }; + 92FF03C423AA0B5A00AC97F6 /* CPUInterp.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF024623AA0B5700AC97F6 /* CPUInterp.cpp */; }; + 92FF03C523AA0B5A00AC97F6 /* CPUReduceJoin.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF024723AA0B5700AC97F6 /* CPUReduceJoin.hpp */; }; + 92FF03C623AA0B5A00AC97F6 /* CPUNonMaxSuppressionV2.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF024823AA0B5700AC97F6 /* CPUNonMaxSuppressionV2.hpp */; }; + 92FF03C723AA0B5A00AC97F6 /* CPUTranspose.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF024923AA0B5700AC97F6 /* CPUTranspose.hpp */; }; + 92FF03C823AA0B5A00AC97F6 /* CPUNormalize.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF024A23AA0B5700AC97F6 /* CPUNormalize.hpp */; }; + 92FF03C923AA0B5A00AC97F6 /* CPUMatMul.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF024B23AA0B5700AC97F6 /* CPUMatMul.hpp */; }; + 92FF03CA23AA0B5A00AC97F6 /* CPUConvolutionDepthwise.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF024C23AA0B5700AC97F6 /* CPUConvolutionDepthwise.hpp */; }; + 92FF03CB23AA0B5A00AC97F6 /* CPUGatherV2.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF024D23AA0B5700AC97F6 /* CPUGatherV2.cpp */; }; + 92FF03CC23AA0B5A00AC97F6 /* CPUDepthToSpace.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF024E23AA0B5700AC97F6 /* CPUDepthToSpace.hpp */; }; + 92FF03CD23AA0B5A00AC97F6 /* CPUConvInt8.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF024F23AA0B5900AC97F6 /* CPUConvInt8.hpp */; }; + 92FF03CE23AA0B5A00AC97F6 /* CPUOPRegister.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF025023AA0B5900AC97F6 /* CPUOPRegister.cpp */; }; + 92FF03CF23AA0B5A00AC97F6 /* CPUCosineSimilarity.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF025123AA0B5900AC97F6 /* CPUCosineSimilarity.hpp */; }; + 92FF03D023AA0B5A00AC97F6 /* CPUTensorConvert.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF025223AA0B5900AC97F6 /* CPUTensorConvert.cpp */; }; + 92FF03D123AA0B5A00AC97F6 /* CPUTopKV2.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF025323AA0B5900AC97F6 /* CPUTopKV2.hpp */; }; + 92FF03D223AA0B5A00AC97F6 /* CPUReshape.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF025423AA0B5900AC97F6 /* CPUReshape.cpp */; }; + 92FF041923AA0B7100AC97F6 /* ShapeQuantizedMaxPool.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03D323AA0B6C00AC97F6 /* ShapeQuantizedMaxPool.cpp */; }; + 92FF041A23AA0B7100AC97F6 /* ShapeFill.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03D423AA0B6C00AC97F6 /* ShapeFill.cpp */; }; + 92FF041B23AA0B7100AC97F6 /* ShapeUnpack.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03D523AA0B6C00AC97F6 /* ShapeUnpack.cpp */; }; + 92FF041C23AA0B7100AC97F6 /* ShapeNonMaxSuppressionV2.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03D623AA0B6C00AC97F6 /* ShapeNonMaxSuppressionV2.cpp */; }; + 92FF041D23AA0B7100AC97F6 /* ShapePool.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03D723AA0B6C00AC97F6 /* ShapePool.cpp */; }; + 92FF041E23AA0B7100AC97F6 /* ShapeRange.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03D823AA0B6C00AC97F6 /* ShapeRange.cpp */; }; + 92FF041F23AA0B7100AC97F6 /* ShapeRank.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03D923AA0B6C00AC97F6 /* ShapeRank.cpp */; }; + 92FF042023AA0B7100AC97F6 /* ShapePack.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03DA23AA0B6D00AC97F6 /* ShapePack.cpp */; }; + 92FF042123AA0B7100AC97F6 /* ShapeDeconvolution.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03DB23AA0B6D00AC97F6 /* ShapeDeconvolution.cpp */; }; + 92FF042223AA0B7100AC97F6 /* ShapeConcat.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03DC23AA0B6D00AC97F6 /* ShapeConcat.cpp */; }; + 92FF042323AA0B7100AC97F6 /* ShapeScatterNd.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03DD23AA0B6D00AC97F6 /* ShapeScatterNd.cpp */; }; + 92FF042423AA0B7100AC97F6 /* ShapeROIPooling.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03DE23AA0B6D00AC97F6 /* ShapeROIPooling.cpp */; }; + 92FF042523AA0B7100AC97F6 /* ShapeSize.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03DF23AA0B6D00AC97F6 /* ShapeSize.cpp */; }; + 92FF042623AA0B7100AC97F6 /* ShapeCosineSimilarity.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03E023AA0B6D00AC97F6 /* ShapeCosineSimilarity.cpp */; }; + 92FF042723AA0B7100AC97F6 /* ShapeMatMul.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03E123AA0B6D00AC97F6 /* ShapeMatMul.cpp */; }; + 92FF042823AA0B7100AC97F6 /* ShapeInterp.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03E223AA0B6D00AC97F6 /* ShapeInterp.cpp */; }; + 92FF042923AA0B7100AC97F6 /* ShapeLinSpace.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03E323AA0B6D00AC97F6 /* ShapeLinSpace.cpp */; }; + 92FF042A23AA0B7100AC97F6 /* ShapeBatchMatMul.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03E423AA0B6D00AC97F6 /* ShapeBatchMatMul.cpp */; }; + 92FF042B23AA0B7100AC97F6 /* ShapeOneHot.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03E523AA0B6D00AC97F6 /* ShapeOneHot.cpp */; }; + 92FF042C23AA0B7100AC97F6 /* ShapeReduceJoin.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03E623AA0B6D00AC97F6 /* ShapeReduceJoin.cpp */; }; + 92FF042D23AA0B7100AC97F6 /* ShapePadding.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03E723AA0B6D00AC97F6 /* ShapePadding.cpp */; }; + 92FF042E23AA0B7100AC97F6 /* ShapeProposal.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03E823AA0B6D00AC97F6 /* ShapeProposal.cpp */; }; + 92FF042F23AA0B7100AC97F6 /* ShapeSliceTf.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03E923AA0B6D00AC97F6 /* ShapeSliceTf.cpp */; }; + 92FF043023AA0B7100AC97F6 /* ShapeQuantizedAvgPool.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03EA23AA0B6D00AC97F6 /* ShapeQuantizedAvgPool.cpp */; }; + 92FF043123AA0B7100AC97F6 /* ShapeSpaceToBatchND.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03EB23AA0B6E00AC97F6 /* ShapeSpaceToBatchND.cpp */; }; + 92FF043223AA0B7100AC97F6 /* ShapeWhere.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03EC23AA0B6E00AC97F6 /* ShapeWhere.cpp */; }; + 92FF043323AA0B7100AC97F6 /* ShapeCrop.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03ED23AA0B6E00AC97F6 /* ShapeCrop.cpp */; }; + 92FF043423AA0B7100AC97F6 /* ShapeStridedSlice.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03EE23AA0B6E00AC97F6 /* ShapeStridedSlice.cpp */; }; + 92FF043523AA0B7100AC97F6 /* ShapeConvolution3D.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03EF23AA0B6E00AC97F6 /* ShapeConvolution3D.cpp */; }; + 92FF043623AA0B7100AC97F6 /* ShapeSelect.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03F023AA0B6E00AC97F6 /* ShapeSelect.cpp */; }; + 92FF043723AA0B7100AC97F6 /* ShapeDetectionOutput.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03F123AA0B6E00AC97F6 /* ShapeDetectionOutput.cpp */; }; + 92FF043823AA0B7100AC97F6 /* ShapeUnravelIndex.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03F223AA0B6E00AC97F6 /* ShapeUnravelIndex.cpp */; }; + 92FF043923AA0B7100AC97F6 /* ShapeDequantize.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03F323AA0B6E00AC97F6 /* ShapeDequantize.cpp */; }; + 92FF043A23AA0B7100AC97F6 /* ShapePermute.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03F423AA0B6E00AC97F6 /* ShapePermute.cpp */; }; + 92FF043B23AA0B7100AC97F6 /* ShapeDetectionPostProcess.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03F523AA0B6E00AC97F6 /* ShapeDetectionPostProcess.cpp */; }; + 92FF043C23AA0B7100AC97F6 /* ShapeExpandDims.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03F623AA0B6E00AC97F6 /* ShapeExpandDims.cpp */; }; + 92FF043D23AA0B7100AC97F6 /* ShapeGatherV2.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03F723AA0B6E00AC97F6 /* ShapeGatherV2.cpp */; }; + 92FF043E23AA0B7100AC97F6 /* ShapeBatchToSpaceND.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03F823AA0B6E00AC97F6 /* ShapeBatchToSpaceND.cpp */; }; + 92FF043F23AA0B7100AC97F6 /* ShapeTensorConvert.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03F923AA0B6F00AC97F6 /* ShapeTensorConvert.cpp */; }; + 92FF044023AA0B7100AC97F6 /* ShapeSlice.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03FA23AA0B6F00AC97F6 /* ShapeSlice.cpp */; }; + 92FF044123AA0B7100AC97F6 /* ShapeMoments.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03FB23AA0B6F00AC97F6 /* ShapeMoments.cpp */; }; + 92FF044223AA0B7100AC97F6 /* ShapeQuantizedReshape.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03FC23AA0B6F00AC97F6 /* ShapeQuantizedReshape.cpp */; }; + 92FF044323AA0B7100AC97F6 /* ShapeTopKV2.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03FD23AA0B6F00AC97F6 /* ShapeTopKV2.cpp */; }; + 92FF044423AA0B7100AC97F6 /* ShapeLSTM.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03FE23AA0B6F00AC97F6 /* ShapeLSTM.cpp */; }; + 92FF044523AA0B7100AC97F6 /* ShapeSpaceToDepth.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF03FF23AA0B6F00AC97F6 /* ShapeSpaceToDepth.cpp */; }; + 92FF044623AA0B7100AC97F6 /* ShapeInnerProduct.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF040023AA0B6F00AC97F6 /* ShapeInnerProduct.cpp */; }; + 92FF044723AA0B7100AC97F6 /* ShapeSqueeze.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF040123AA0B6F00AC97F6 /* ShapeSqueeze.cpp */; }; + 92FF044823AA0B7100AC97F6 /* ShapeGather.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF040223AA0B6F00AC97F6 /* ShapeGather.cpp */; }; + 92FF044923AA0B7100AC97F6 /* ShapeGatherND.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF040323AA0B6F00AC97F6 /* ShapeGatherND.cpp */; }; + 92FF044A23AA0B7100AC97F6 /* ShapeConvolution.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF040423AA0B6F00AC97F6 /* ShapeConvolution.cpp */; }; + 92FF044B23AA0B7100AC97F6 /* ShapeTile.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF040523AA0B6F00AC97F6 /* ShapeTile.cpp */; }; + 92FF044C23AA0B7100AC97F6 /* ShapePool3D.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF040623AA0B6F00AC97F6 /* ShapePool3D.cpp */; }; + 92FF044D23AA0B7100AC97F6 /* ShapeConst.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF040723AA0B6F00AC97F6 /* ShapeConst.cpp */; }; + 92FF044E23AA0B7100AC97F6 /* ShapeEltwise.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF040823AA0B7000AC97F6 /* ShapeEltwise.cpp */; }; + 92FF044F23AA0B7100AC97F6 /* ShapeDepthToSpace.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF040923AA0B7000AC97F6 /* ShapeDepthToSpace.cpp */; }; + 92FF045023AA0B7100AC97F6 /* ShapeCropAndResize.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF040A23AA0B7000AC97F6 /* ShapeCropAndResize.cpp */; }; + 92FF045123AA0B7100AC97F6 /* ShapeArgMax.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF040B23AA0B7000AC97F6 /* ShapeArgMax.cpp */; }; + 92FF045223AA0B7100AC97F6 /* ShapeResize.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF040C23AA0B7000AC97F6 /* ShapeResize.cpp */; }; + 92FF045323AA0B7100AC97F6 /* ShapeAsString.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF040D23AA0B7000AC97F6 /* ShapeAsString.cpp */; }; + 92FF045423AA0B7100AC97F6 /* ShapeRNNSequenceGRU.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF040E23AA0B7000AC97F6 /* ShapeRNNSequenceGRU.cpp */; }; + 92FF045523AA0B7100AC97F6 /* ShapePriorbox.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF040F23AA0B7000AC97F6 /* ShapePriorbox.cpp */; }; + 92FF045623AA0B7100AC97F6 /* ShapeReshape.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF041023AA0B7000AC97F6 /* ShapeReshape.cpp */; }; + 92FF045723AA0B7100AC97F6 /* ShapeTranspose.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF041123AA0B7000AC97F6 /* ShapeTranspose.cpp */; }; + 92FF045823AA0B7100AC97F6 /* ShapeReduction.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF041223AA0B7000AC97F6 /* ShapeReduction.cpp */; }; + 92FF045923AA0B7100AC97F6 /* ShapeRegister.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF041323AA0B7000AC97F6 /* ShapeRegister.cpp */; }; + 92FF045A23AA0B7100AC97F6 /* ShapeBinaryOp.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF041423AA0B7000AC97F6 /* ShapeBinaryOp.cpp */; }; + 92FF045B23AA0B7100AC97F6 /* ShapeShape.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF041523AA0B7000AC97F6 /* ShapeShape.cpp */; }; + 92FF045C23AA0B7100AC97F6 /* ShapeBroadcastTo.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF041623AA0B7000AC97F6 /* ShapeBroadcastTo.cpp */; }; + 92FF045D23AA0B7100AC97F6 /* ShapeCast.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF041723AA0B7100AC97F6 /* ShapeCast.cpp */; }; + 92FF045E23AA0B7100AC97F6 /* ShapeTFQuantizedConv2D.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF041823AA0B7100AC97F6 /* ShapeTFQuantizedConv2D.cpp */; }; + 92FF04A423AA0BFB00AC97F6 /* Interpreter.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF048323AA0BF900AC97F6 /* Interpreter.cpp */; }; + 92FF04A523AA0BFB00AC97F6 /* AutoStorage.h in Headers */ = {isa = PBXBuildFile; fileRef = 92FF048423AA0BF900AC97F6 /* AutoStorage.h */; }; + 92FF04A623AA0BFB00AC97F6 /* FileLoader.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF048523AA0BFA00AC97F6 /* FileLoader.hpp */; }; + 92FF04A723AA0BFB00AC97F6 /* BackendRegister.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF048623AA0BFA00AC97F6 /* BackendRegister.cpp */; }; + 92FF04A823AA0BFB00AC97F6 /* AutoTime.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF048723AA0BFA00AC97F6 /* AutoTime.cpp */; }; + 92FF04A923AA0BFB00AC97F6 /* Schedule.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF048823AA0BFA00AC97F6 /* Schedule.hpp */; }; + 92FF04AA23AA0BFB00AC97F6 /* BufferAllocator.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF048923AA0BFA00AC97F6 /* BufferAllocator.cpp */; }; + 92FF04AB23AA0BFB00AC97F6 /* Pipeline.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF048A23AA0BFA00AC97F6 /* Pipeline.hpp */; }; + 92FF04AC23AA0BFB00AC97F6 /* BackendFactory.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF048B23AA0BFA00AC97F6 /* BackendFactory.hpp */; }; + 92FF04AD23AA0BFB00AC97F6 /* Execution.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF048C23AA0BFA00AC97F6 /* Execution.hpp */; }; + 92FF04AE23AA0BFB00AC97F6 /* Backend.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF048D23AA0BFA00AC97F6 /* Backend.cpp */; }; + 92FF04AF23AA0BFB00AC97F6 /* Macro.h in Headers */ = {isa = PBXBuildFile; fileRef = 92FF048E23AA0BFA00AC97F6 /* Macro.h */; }; + 92FF04B023AA0BFB00AC97F6 /* SizeComputer.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF048F23AA0BFA00AC97F6 /* SizeComputer.cpp */; }; + 92FF04B123AA0BFB00AC97F6 /* DirectedAcyclicGraph.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF049023AA0BFA00AC97F6 /* DirectedAcyclicGraph.hpp */; }; + 92FF04B223AA0BFB00AC97F6 /* BackendFactory.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF049123AA0BFA00AC97F6 /* BackendFactory.cpp */; }; + 92FF04B323AA0BFB00AC97F6 /* Schedule.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF049223AA0BFA00AC97F6 /* Schedule.cpp */; }; + 92FF04B423AA0BFB00AC97F6 /* MNNMemoryUtils.h in Headers */ = {isa = PBXBuildFile; fileRef = 92FF049323AA0BFA00AC97F6 /* MNNMemoryUtils.h */; }; + 92FF04B523AA0BFB00AC97F6 /* TensorUtils.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF049423AA0BFA00AC97F6 /* TensorUtils.hpp */; }; + 92FF04B623AA0BFB00AC97F6 /* TensorUtils.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF049523AA0BFA00AC97F6 /* TensorUtils.cpp */; }; + 92FF04B723AA0BFB00AC97F6 /* WrapExecution.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF049623AA0BFA00AC97F6 /* WrapExecution.hpp */; }; + 92FF04B823AA0BFB00AC97F6 /* MNNMemoryUtils.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF049723AA0BFB00AC97F6 /* MNNMemoryUtils.cpp */; }; + 92FF04B923AA0BFB00AC97F6 /* SizeComputer.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF049823AA0BFB00AC97F6 /* SizeComputer.hpp */; }; + 92FF04BA23AA0BFB00AC97F6 /* WrapExecution.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF049923AA0BFB00AC97F6 /* WrapExecution.cpp */; }; + 92FF04BB23AA0BFB00AC97F6 /* BufferAllocator.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF049A23AA0BFB00AC97F6 /* BufferAllocator.hpp */; }; + 92FF04BC23AA0BFB00AC97F6 /* NonCopyable.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF049B23AA0BFB00AC97F6 /* NonCopyable.hpp */; }; + 92FF04BD23AA0BFB00AC97F6 /* Execution.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF049C23AA0BFB00AC97F6 /* Execution.cpp */; }; + 92FF04BE23AA0BFB00AC97F6 /* FileLoader.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF049D23AA0BFB00AC97F6 /* FileLoader.cpp */; }; + 92FF04BF23AA0BFB00AC97F6 /* Concurrency.h in Headers */ = {isa = PBXBuildFile; fileRef = 92FF049E23AA0BFB00AC97F6 /* Concurrency.h */; }; + 92FF04C023AA0BFB00AC97F6 /* Tensor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF049F23AA0BFB00AC97F6 /* Tensor.cpp */; }; + 92FF04C123AA0BFB00AC97F6 /* Backend.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF04A023AA0BFB00AC97F6 /* Backend.hpp */; }; + 92FF04C223AA0BFB00AC97F6 /* Pipeline.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF04A123AA0BFB00AC97F6 /* Pipeline.cpp */; }; + 92FF04C323AA0BFB00AC97F6 /* Session.hpp in Headers */ = {isa = PBXBuildFile; fileRef = 92FF04A223AA0BFB00AC97F6 /* Session.hpp */; }; + 92FF04C423AA0BFB00AC97F6 /* Session.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92FF04A323AA0BFB00AC97F6 /* Session.cpp */; }; AE7BE4BD22855665002CEEA6 /* MetalOPRegister.mm in Sources */ = {isa = PBXBuildFile; fileRef = AE7BE4BC22855665002CEEA6 /* MetalOPRegister.mm */; }; - AEC3B31F211BEF710046AD86 /* MNNDefine.h in Headers */ = {isa = PBXBuildFile; fileRef = AEC3B31E211BEF710046AD86 /* MNNDefine.h */; settings = {ATTRIBUTES = (Public, ); }; }; - C422D738232634DD00FD59D0 /* ShapeConvolution3D.cpp in Sources */ = {isa = PBXBuildFile; fileRef = C422D737232634DD00FD59D0 /* ShapeConvolution3D.cpp */; }; - C422D73E2326449500FD59D0 /* ShapePool3D.cpp in Sources */ = {isa = PBXBuildFile; fileRef = C422D73D2326449500FD59D0 /* ShapePool3D.cpp */; }; - C422D768232F567300FD59D0 /* CPUConvolution3D.cpp in Sources */ = {isa = PBXBuildFile; fileRef = C422D766232F567300FD59D0 /* CPUConvolution3D.cpp */; }; - C422D769232F567300FD59D0 /* CPUConvolution3D.hpp in Headers */ = {isa = PBXBuildFile; fileRef = C422D767232F567300FD59D0 /* CPUConvolution3D.hpp */; }; - C422D7BA23320B2B00FD59D0 /* Convolution3D3x3.cpp in Sources */ = {isa = PBXBuildFile; fileRef = C422D7B823320B2900FD59D0 /* Convolution3D3x3.cpp */; }; - C422D7BB23320B2B00FD59D0 /* Convolution3D3x3.hpp in Headers */ = {isa = PBXBuildFile; fileRef = C422D7B923320B2A00FD59D0 /* Convolution3D3x3.hpp */; }; - C422D7C22339D0EE00FD59D0 /* CPUElu.cpp in Sources */ = {isa = PBXBuildFile; fileRef = C422D7C02339D0EC00FD59D0 /* CPUElu.cpp */; }; - C422D7C32339D0EE00FD59D0 /* CPUElu.hpp in Headers */ = {isa = PBXBuildFile; fileRef = C422D7C12339D0ED00FD59D0 /* CPUElu.hpp */; }; - C422D7C6233A0F0F00FD59D0 /* ConvolutionWinograd3D.cpp in Sources */ = {isa = PBXBuildFile; fileRef = C422D7C4233A0F0E00FD59D0 /* ConvolutionWinograd3D.cpp */; }; - C422D7C7233A0F0F00FD59D0 /* ConvolutionWinograd3D.hpp in Headers */ = {isa = PBXBuildFile; fileRef = C422D7C5233A0F0E00FD59D0 /* ConvolutionWinograd3D.hpp */; }; - C425F8742292A31F00B4682D /* MNNPowC8.S in Sources */ = {isa = PBXBuildFile; fileRef = C425F8732292A31D00B4682D /* MNNPowC8.S */; }; - C425F8762292A32B00B4682D /* MNNPowC8.S in Sources */ = {isa = PBXBuildFile; fileRef = C425F8752292A32A00B4682D /* MNNPowC8.S */; }; CE96FE7121707D58004AB400 /* MetalTensorConverter.mm in Sources */ = {isa = PBXBuildFile; fileRef = CE96FE5F21707D57004AB400 /* MetalTensorConverter.mm */; }; - CE96FE7221707D58004AB400 /* MetalUnary.hpp in Headers */ = {isa = PBXBuildFile; fileRef = CE96FE6021707D57004AB400 /* MetalUnary.hpp */; }; - CE96FE7321707D58004AB400 /* MetalSigmoid.hpp in Headers */ = {isa = PBXBuildFile; fileRef = CE96FE6121707D57004AB400 /* MetalSigmoid.hpp */; }; - CE96FE7421707D58004AB400 /* MetalTensorConverter.hpp in Headers */ = {isa = PBXBuildFile; fileRef = CE96FE6221707D57004AB400 /* MetalTensorConverter.hpp */; }; CE96FE7521707D58004AB400 /* MetalMatMul.mm in Sources */ = {isa = PBXBuildFile; fileRef = CE96FE6321707D57004AB400 /* MetalMatMul.mm */; }; - CE96FE7721707D58004AB400 /* MetalMatMul.hpp in Headers */ = {isa = PBXBuildFile; fileRef = CE96FE6521707D57004AB400 /* MetalMatMul.hpp */; }; CE96FE7821707D58004AB400 /* MetalUnary.mm in Sources */ = {isa = PBXBuildFile; fileRef = CE96FE6621707D57004AB400 /* MetalUnary.mm */; }; CE96FE7B21707D58004AB400 /* MetalUnary.metal in Sources */ = {isa = PBXBuildFile; fileRef = CE96FE6921707D58004AB400 /* MetalUnary.metal */; }; CE96FE7E21707D58004AB400 /* MetalSigmoid.mm in Sources */ = {isa = PBXBuildFile; fileRef = CE96FE6C21707D58004AB400 /* MetalSigmoid.mm */; }; CE96FE7F21707D58004AB400 /* MetalSigmoid.metal in Sources */ = {isa = PBXBuildFile; fileRef = CE96FE6D21707D58004AB400 /* MetalSigmoid.metal */; }; CE96FE8121707D58004AB400 /* MetalMatMul.metal in Sources */ = {isa = PBXBuildFile; fileRef = CE96FE6F21707D58004AB400 /* MetalMatMul.metal */; }; - EB288362230EAF6C00837188 /* CPUEltwiseInt8.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EB288360230EAF6C00837188 /* CPUEltwiseInt8.cpp */; }; - EB288363230EAF6C00837188 /* CPUEltwiseInt8.hpp in Headers */ = {isa = PBXBuildFile; fileRef = EB288361230EAF6C00837188 /* CPUEltwiseInt8.hpp */; }; - EB288365230EB05D00837188 /* MNNScaleAddInt8.S in Sources */ = {isa = PBXBuildFile; fileRef = EB288364230EB05C00837188 /* MNNScaleAddInt8.S */; }; - EB288367230EB06600837188 /* MNNScaleAddInt8.S in Sources */ = {isa = PBXBuildFile; fileRef = EB288366230EB06600837188 /* MNNScaleAddInt8.S */; }; - EB4925B4224A146000C512BB /* ShapeBatchMatMul.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EB4925B2224A146000C512BB /* ShapeBatchMatMul.cpp */; }; - EB4925B5224A146000C512BB /* ShapeRNNSequenceGRU.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EB4925B3224A146000C512BB /* ShapeRNNSequenceGRU.cpp */; }; - EB4925BE224A147E00C512BB /* CPUInstanceNorm.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EB4925B6224A147E00C512BB /* CPUInstanceNorm.cpp */; }; - EB4925BF224A147E00C512BB /* CPURNNSequenceGRU.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EB4925B7224A147E00C512BB /* CPURNNSequenceGRU.cpp */; }; - EB4925C0224A147E00C512BB /* CPUMoments.hpp in Headers */ = {isa = PBXBuildFile; fileRef = EB4925B8224A147E00C512BB /* CPUMoments.hpp */; }; - EB4925C1224A147E00C512BB /* CPURNNSequenceGRU.hpp in Headers */ = {isa = PBXBuildFile; fileRef = EB4925B9224A147E00C512BB /* CPURNNSequenceGRU.hpp */; }; - EB4925C2224A147E00C512BB /* CPUBatchMatMul.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EB4925BA224A147E00C512BB /* CPUBatchMatMul.cpp */; }; - EB4925C3224A147E00C512BB /* CPUMoments.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EB4925BB224A147E00C512BB /* CPUMoments.cpp */; }; - EB4925C4224A147E00C512BB /* CPUInstanceNorm.hpp in Headers */ = {isa = PBXBuildFile; fileRef = EB4925BC224A147E00C512BB /* CPUInstanceNorm.hpp */; }; - EB4925C5224A147E00C512BB /* CPUBatchMatMul.hpp in Headers */ = {isa = PBXBuildFile; fileRef = EB4925BD224A147E00C512BB /* CPUBatchMatMul.hpp */; }; - EB69637522E070E10065993C /* CPUCosineSimilarity.hpp in Headers */ = {isa = PBXBuildFile; fileRef = EB69637322E070E00065993C /* CPUCosineSimilarity.hpp */; }; - EB69637622E070E10065993C /* CPUCosineSimilarity.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EB69637422E070E00065993C /* CPUCosineSimilarity.cpp */; }; - EB69637822E072600065993C /* ShapeCosineSimilarity.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EB69637722E072600065993C /* ShapeCosineSimilarity.cpp */; }; EBAFCE672231133F000D4EF4 /* QuantizedAddTest.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBAFCE662231133F000D4EF4 /* QuantizedAddTest.cpp */; }; - EBB38F0021E748B9005F76D7 /* ShapeShape.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EC721E748B9005F76D7 /* ShapeShape.cpp */; }; - EBB38F0121E748B9005F76D7 /* ShapePriorbox.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EC821E748B9005F76D7 /* ShapePriorbox.cpp */; }; - EBB38F0221E748B9005F76D7 /* ShapeGather.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EC921E748B9005F76D7 /* ShapeGather.cpp */; }; - EBB38F0321E748B9005F76D7 /* ShapeAsString.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38ECA21E748B9005F76D7 /* ShapeAsString.cpp */; }; - EBB38F0421E748B9005F76D7 /* ShapeInterp.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38ECB21E748B9005F76D7 /* ShapeInterp.cpp */; }; - EBB38F0521E748B9005F76D7 /* ShapeCrop.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38ECC21E748B9005F76D7 /* ShapeCrop.cpp */; }; - EBB38F0621E748B9005F76D7 /* ShapeMatMul.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38ECD21E748B9005F76D7 /* ShapeMatMul.cpp */; }; - EBB38F0721E748B9005F76D7 /* ShapeTFQuantizedConv2D.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38ECE21E748B9005F76D7 /* ShapeTFQuantizedConv2D.cpp */; }; - EBB38F0821E748B9005F76D7 /* ShapeReshape.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38ECF21E748B9005F76D7 /* ShapeReshape.cpp */; }; - EBB38F0921E748B9005F76D7 /* ShapeStridedSlice.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38ED021E748B9005F76D7 /* ShapeStridedSlice.cpp */; }; - EBB38F0A21E748B9005F76D7 /* ShapeQuantizedReshape.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38ED121E748B9005F76D7 /* ShapeQuantizedReshape.cpp */; }; - EBB38F0B21E748B9005F76D7 /* ShapePool.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38ED221E748B9005F76D7 /* ShapePool.cpp */; }; - EBB38F0C21E748B9005F76D7 /* ShapeInnerProduct.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38ED321E748B9005F76D7 /* ShapeInnerProduct.cpp */; }; - EBB38F0D21E748B9005F76D7 /* ShapeReduction.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38ED421E748B9005F76D7 /* ShapeReduction.cpp */; }; - EBB38F0E21E748B9005F76D7 /* ShapeQuantizedAvgPool.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38ED521E748B9005F76D7 /* ShapeQuantizedAvgPool.cpp */; }; - EBB38F0F21E748B9005F76D7 /* ShapeArgMax.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38ED621E748B9005F76D7 /* ShapeArgMax.cpp */; }; - EBB38F1121E748B9005F76D7 /* ShapeSliceTf.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38ED821E748B9005F76D7 /* ShapeSliceTf.cpp */; }; - EBB38F1321E748B9005F76D7 /* ShapeResize.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EDA21E748B9005F76D7 /* ShapeResize.cpp */; }; - EBB38F1421E748B9005F76D7 /* ShapeTranspose.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EDB21E748B9005F76D7 /* ShapeTranspose.cpp */; }; - EBB38F1521E748B9005F76D7 /* ShapeWhere.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EDC21E748B9005F76D7 /* ShapeWhere.cpp */; }; - EBB38F1621E748B9005F76D7 /* ShapeSqueeze.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EDD21E748B9005F76D7 /* ShapeSqueeze.cpp */; }; - EBB38F1721E748B9005F76D7 /* ShapeBinaryOp.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EDE21E748B9005F76D7 /* ShapeBinaryOp.cpp */; }; - EBB38F1821E748B9005F76D7 /* ShapeNonMaxSuppressionV2.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EDF21E748B9005F76D7 /* ShapeNonMaxSuppressionV2.cpp */; }; - EBB38F1921E748B9005F76D7 /* ShapeGatherV2.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EE021E748B9005F76D7 /* ShapeGatherV2.cpp */; }; - EBB38F1A21E748B9005F76D7 /* ShapeConcat.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EE121E748B9005F76D7 /* ShapeConcat.cpp */; }; - EBB38F1B21E748B9005F76D7 /* ShapeCast.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EE221E748B9005F76D7 /* ShapeCast.cpp */; }; - EBB38F1C21E748B9005F76D7 /* ShapeBatchToSpaceND.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EE321E748B9005F76D7 /* ShapeBatchToSpaceND.cpp */; }; - EBB38F1D21E748B9005F76D7 /* ShapeTile.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EE421E748B9005F76D7 /* ShapeTile.cpp */; }; - EBB38F1E21E748B9005F76D7 /* ShapeEltwise.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EE521E748B9005F76D7 /* ShapeEltwise.cpp */; }; - EBB38F1F21E748B9005F76D7 /* ShapeConst.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EE621E748B9005F76D7 /* ShapeConst.cpp */; }; - EBB38F2021E748B9005F76D7 /* ShapeDetectionOutput.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EE721E748B9005F76D7 /* ShapeDetectionOutput.cpp */; }; - EBB38F2121E748B9005F76D7 /* ShapeSize.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EE821E748B9005F76D7 /* ShapeSize.cpp */; }; - EBB38F2221E748B9005F76D7 /* ShapeTensorConvert.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EE921E748B9005F76D7 /* ShapeTensorConvert.cpp */; }; - EBB38F2321E748B9005F76D7 /* ShapePermute.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EEA21E748B9005F76D7 /* ShapePermute.cpp */; }; - EBB38F2421E748B9005F76D7 /* ShapeExpandDims.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EEB21E748B9005F76D7 /* ShapeExpandDims.cpp */; }; - EBB38F2521E748B9005F76D7 /* ShapeROIPooling.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EEC21E748B9005F76D7 /* ShapeROIPooling.cpp */; }; - EBB38F2A21E748B9005F76D7 /* ShapeCropAndResize.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EF121E748B9005F76D7 /* ShapeCropAndResize.cpp */; }; - EBB38F2B21E748B9005F76D7 /* ShapeQuantizedMaxPool.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EF221E748B9005F76D7 /* ShapeQuantizedMaxPool.cpp */; }; - EBB38F2C21E748B9005F76D7 /* ShapeRange.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EF321E748B9005F76D7 /* ShapeRange.cpp */; }; - EBB38F2D21E748B9005F76D7 /* ShapeUnpack.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EF421E748B9005F76D7 /* ShapeUnpack.cpp */; }; - EBB38F2E21E748B9005F76D7 /* ShapeTopKV2.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EF521E748B9005F76D7 /* ShapeTopKV2.cpp */; }; - EBB38F2F21E748B9005F76D7 /* ShapeFill.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EF621E748B9005F76D7 /* ShapeFill.cpp */; }; - EBB38F3021E748B9005F76D7 /* ShapeProposal.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EF721E748B9005F76D7 /* ShapeProposal.cpp */; }; - EBB38F3121E748B9005F76D7 /* ShapeConvolution.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EF821E748B9005F76D7 /* ShapeConvolution.cpp */; }; - EBB38F3221E748B9005F76D7 /* ShapeRank.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EF921E748B9005F76D7 /* ShapeRank.cpp */; }; - EBB38F3321E748B9005F76D7 /* ShapeLSTM.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EFA21E748B9005F76D7 /* ShapeLSTM.cpp */; }; - EBB38F3421E748B9005F76D7 /* ShapeSlice.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EFB21E748B9005F76D7 /* ShapeSlice.cpp */; }; - EBB38F3521E748B9005F76D7 /* ShapeReduceJoin.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EFC21E748B9005F76D7 /* ShapeReduceJoin.cpp */; }; - EBB38F3621E748B9005F76D7 /* ShapeSpaceToBatchND.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EFD21E748B9005F76D7 /* ShapeSpaceToBatchND.cpp */; }; - EBB38F3721E748B9005F76D7 /* ShapePack.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EFE21E748B9005F76D7 /* ShapePack.cpp */; }; - EBB38F3821E748B9005F76D7 /* ShapeDeconvolution.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EFF21E748B9005F76D7 /* ShapeDeconvolution.cpp */; }; - EBD9FF12236A939700E188F5 /* ShapeDetectionPostProcess.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBD9FF11236A939700E188F5 /* ShapeDetectionPostProcess.cpp */; }; - EBD9FF15236A93AB00E188F5 /* CPUDetectionPostProcess.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBD9FF13236A93AB00E188F5 /* CPUDetectionPostProcess.cpp */; }; - EBD9FF16236A93AB00E188F5 /* CPUDetectionPostProcess.hpp in Headers */ = {isa = PBXBuildFile; fileRef = EBD9FF14236A93AB00E188F5 /* CPUDetectionPostProcess.hpp */; }; /* End PBXBuildFile section */ /* Begin PBXContainerItemProxy section */ @@ -784,480 +1374,146 @@ 0F1465B71FA18D1000F9860A /* MNN.framework */ = {isa = PBXFileReference; explicitFileType = wrapper.framework; includeInIndex = 0; path = MNN.framework; sourceTree = BUILT_PRODUCTS_DIR; }; 0F1465BB1FA18D1000F9860A /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; 0F78AC261FCD495800205A7C /* Metal.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Metal.framework; path = System/Library/Frameworks/Metal.framework; sourceTree = SDKROOT; }; - 11EDD60622E55A09007F3793 /* CPUDepthToSpace.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUDepthToSpace.cpp; sourceTree = ""; }; - 11EDD60722E55A09007F3793 /* CPUDepthToSpace.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUDepthToSpace.hpp; sourceTree = ""; }; - 11EDD60822E55A09007F3793 /* CPUSpaceToDepth.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUSpaceToDepth.cpp; sourceTree = ""; }; - 11EDD60922E55A09007F3793 /* CPUSpaceToDepth.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUSpaceToDepth.hpp; sourceTree = ""; }; - 4805294B2105BADB00AA776E /* MNNForwardType.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = MNNForwardType.h; sourceTree = ""; }; - 480529612105DDA400AA776E /* Interpreter.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = Interpreter.hpp; sourceTree = ""; }; - 48057D842330A8F900F922BE /* ShapeGatherND.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeGatherND.cpp; sourceTree = ""; }; - 48057D862330A90900F922BE /* CPUGatherND.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUGatherND.hpp; sourceTree = ""; }; - 48057D872330A90900F922BE /* CPUGatherND.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUGatherND.cpp; sourceTree = ""; }; - 48057D8A2330E85C00F922BE /* CPUMatrixBandPart.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUMatrixBandPart.hpp; sourceTree = ""; }; - 48057D8B2330E85C00F922BE /* CPUMatrixBandPart.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUMatrixBandPart.cpp; sourceTree = ""; }; - 48070717231A7B5000528CE5 /* CPUReverseSequence.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUReverseSequence.hpp; sourceTree = ""; }; - 48070718231A7B5000528CE5 /* CPUReverseSequence.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUReverseSequence.cpp; sourceTree = ""; }; - 4807071E231E512D00528CE5 /* NeuralNetWorkOp.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = NeuralNetWorkOp.hpp; sourceTree = ""; }; - 4807071F231E512D00528CE5 /* Expr.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = Expr.hpp; sourceTree = ""; }; - 48070720231E512D00528CE5 /* MathOp.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = MathOp.hpp; sourceTree = ""; }; - 48070721231E512D00528CE5 /* ExprCreator.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = ExprCreator.hpp; sourceTree = ""; }; - 48070722231E512D00528CE5 /* Optimizer.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = Optimizer.hpp; sourceTree = ""; }; - 48070724231E512D00528CE5 /* Optimizer.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Optimizer.cpp; sourceTree = ""; }; - 48070725231E512D00528CE5 /* Utils.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Utils.cpp; sourceTree = ""; }; - 48070726231E512D00528CE5 /* Solution.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Solution.cpp; sourceTree = ""; }; - 48070728231E512D00528CE5 /* MergeOptimizer.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = MergeOptimizer.hpp; sourceTree = ""; }; - 48070729231E512D00528CE5 /* MergeOptimizer.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MergeOptimizer.cpp; sourceTree = ""; }; - 4807072A231E512D00528CE5 /* InsideExpr.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = InsideExpr.hpp; sourceTree = ""; }; - 4807072B231E512D00528CE5 /* Expr.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Expr.cpp; sourceTree = ""; }; - 4807072C231E512D00528CE5 /* MathOp.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MathOp.cpp; sourceTree = ""; }; - 4807072D231E512D00528CE5 /* InsideExpr.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = InsideExpr.cpp; sourceTree = ""; }; - 4807072E231E512D00528CE5 /* Utils.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = Utils.hpp; sourceTree = ""; }; - 4807072F231E512D00528CE5 /* Solution.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = Solution.hpp; sourceTree = ""; }; - 48070730231E512D00528CE5 /* NeuralNetWorkOp.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = NeuralNetWorkOp.cpp; sourceTree = ""; }; - 48070743231E52E300528CE5 /* BasicOptimizer_generated.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BasicOptimizer_generated.h; sourceTree = ""; }; - 4821FA32216F214200B910CC /* MNNSharedContext.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = MNNSharedContext.h; sourceTree = ""; }; - 48265468210ABA3000B2CFEA /* AutoTime.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = AutoTime.hpp; sourceTree = ""; }; - 4826546A210AF76D00B2CFEA /* HalideRuntime.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = HalideRuntime.h; sourceTree = ""; }; - 4829D54E22AF5C340093E3BE /* CPUSetDiff1D.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = CPUSetDiff1D.cpp; sourceTree = ""; }; - 4829D54F22AF5C340093E3BE /* CPUSetDiff1D.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = CPUSetDiff1D.hpp; sourceTree = ""; }; - 483CD480216B1C7B00B05BE9 /* DeconvolutionWithStride.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = DeconvolutionWithStride.cpp; sourceTree = ""; }; - 483CD481216B1C7B00B05BE9 /* DeconvolutionWithStride.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = DeconvolutionWithStride.hpp; sourceTree = ""; }; - 483CD484216B2F0400B05BE9 /* WinogradOptFunction.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = WinogradOptFunction.cpp; sourceTree = ""; }; - 483CD485216B2F0400B05BE9 /* WinogradOptFunction.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = WinogradOptFunction.hpp; sourceTree = ""; }; - 483CD488216CDDA100B05BE9 /* MNNAddC4WithStride.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNAddC4WithStride.S; sourceTree = ""; }; - 483CD48A216CE20D00B05BE9 /* MNNAddC4WithStride.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNAddC4WithStride.S; sourceTree = ""; }; - 483CD48C216CE3B500B05BE9 /* MNNCopyC4WithStride.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNCopyC4WithStride.S; sourceTree = ""; }; - 483CD48E216CE3BB00B05BE9 /* MNNCopyC4WithStride.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNCopyC4WithStride.S; sourceTree = ""; }; - 4841B5F221EAE98B002E5D66 /* SizeComputer.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = SizeComputer.hpp; sourceTree = ""; }; - 4841B5F321EAE98B002E5D66 /* Backend.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Backend.cpp; sourceTree = ""; }; - 4841B5F421EAE98B002E5D66 /* SizeComputer.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = SizeComputer.cpp; sourceTree = ""; }; - 4841B5F521EAE98B002E5D66 /* Backend.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = Backend.hpp; sourceTree = ""; }; - 4841B5FA21EAE998002E5D66 /* Execution.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Execution.cpp; sourceTree = ""; }; - 4841B5FB21EAE998002E5D66 /* Execution.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = Execution.hpp; sourceTree = ""; }; - 4841B60621EC607D002E5D66 /* CPUQuantizedConcat.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUQuantizedConcat.cpp; sourceTree = ""; }; - 4841B60721EC607D002E5D66 /* CPUQuantizedLogistic.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUQuantizedLogistic.hpp; sourceTree = ""; }; - 4841B60821EC607D002E5D66 /* CPUDequantize.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUDequantize.hpp; sourceTree = ""; }; - 4841B60921EC607D002E5D66 /* CPUQuantizedConcat.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUQuantizedConcat.hpp; sourceTree = ""; }; - 4841B60A21EC607D002E5D66 /* CPUQuantizedLogistic.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUQuantizedLogistic.cpp; sourceTree = ""; }; - 4841B60B21EC607D002E5D66 /* CPUDequantize.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUDequantize.cpp; sourceTree = ""; }; - 4841B61221EC6267002E5D66 /* ShapeDequantize.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeDequantize.cpp; sourceTree = ""; }; - 4843AA4C22A7E9AA00889A63 /* CPUConv2DBackPropFilter.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUConv2DBackPropFilter.hpp; sourceTree = ""; }; - 4843AA4D22A7E9AA00889A63 /* CPUReluGrad.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUReluGrad.cpp; sourceTree = ""; }; - 4843AA4E22A7E9AB00889A63 /* CPUPoolGrad.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUPoolGrad.hpp; sourceTree = ""; }; - 4843AA4F22A7E9AB00889A63 /* CPUReluGrad.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUReluGrad.hpp; sourceTree = ""; }; - 4843AA5022A7E9AB00889A63 /* CPUPoolGrad.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUPoolGrad.cpp; sourceTree = ""; }; - 4843AA5122A7E9AB00889A63 /* CPUConv2DBackPropFilter.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUConv2DBackPropFilter.cpp; sourceTree = ""; }; - 4843AA5222A7E9AB00889A63 /* CPUSoftmaxGrad.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUSoftmaxGrad.cpp; sourceTree = ""; }; - 4843AA5322A7E9AB00889A63 /* CPUSoftmaxGrad.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUSoftmaxGrad.hpp; sourceTree = ""; }; - 4847D41C22C0739A0049F3CA /* ShapePadding.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = ShapePadding.cpp; sourceTree = ""; }; - 4847D41E22C07E850049F3CA /* CPUPadding.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = CPUPadding.cpp; sourceTree = ""; }; - 4847D41F22C07E850049F3CA /* CPUPadding.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = CPUPadding.hpp; sourceTree = ""; }; - 4851BE0F2122C1BC009BB0AC /* Tensor.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = Tensor.hpp; sourceTree = ""; }; - 485DD40B217F495400129159 /* CPUQuantizedAdd.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUQuantizedAdd.hpp; sourceTree = ""; }; - 485DD40C217F495500129159 /* CPUQuantizedSoftmax.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUQuantizedSoftmax.cpp; sourceTree = ""; }; - 485DD40E217F495500129159 /* CPUQuantizedAdd.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUQuantizedAdd.cpp; sourceTree = ""; }; - 485DD40F217F495500129159 /* CPUQuantizedSoftmax.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUQuantizedSoftmax.hpp; sourceTree = ""; }; - 485DD416217F49C500129159 /* CPUQuanConvolutionDepthwise.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUQuanConvolutionDepthwise.hpp; sourceTree = ""; }; - 485DD417217F49C500129159 /* CPUQuanConvolutionDepthwise.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUQuanConvolutionDepthwise.cpp; sourceTree = ""; }; - 485DD422217F4C7600129159 /* CPUFixedPoint.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CPUFixedPoint.hpp; sourceTree = ""; }; - 485DD424218161E100129159 /* MNNConvRunForUnitDepthWiseUint8.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNConvRunForUnitDepthWiseUint8.S; sourceTree = ""; }; - 485DD4262181898C00129159 /* MNNUInt8ToInt16WithOffsetC4Common.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNUInt8ToInt16WithOffsetC4Common.S; sourceTree = ""; }; - 485DD4282181938C00129159 /* MNNQuanToDestUint8.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNQuanToDestUint8.S; sourceTree = ""; }; - 485DD42A21819FB000129159 /* MNNUInt8ToInt16WithOffsetC4Fast.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNUInt8ToInt16WithOffsetC4Fast.S; sourceTree = ""; }; - 485DD42C2181A68F00129159 /* MNNConvRunForLineDepthWiseUint8.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNConvRunForLineDepthWiseUint8.S; sourceTree = ""; }; - 485DD42E2181E94300129159 /* MNNQuanToDestUint8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNQuanToDestUint8.S; sourceTree = ""; }; - 485DD42F2181E94300129159 /* MNNUInt8ToInt16WithOffsetC4Common.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNUInt8ToInt16WithOffsetC4Common.S; sourceTree = ""; }; - 485DD4322182AE8000129159 /* MNNConvRunForLineDepthWiseUint8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNConvRunForLineDepthWiseUint8.S; sourceTree = ""; }; - 485DD4332182AE8100129159 /* MNNConvRunForUnitDepthWiseUint8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNConvRunForUnitDepthWiseUint8.S; sourceTree = ""; }; - 485DD4362182B07B00129159 /* MNNUInt8ToInt16WithOffsetC4Fast.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNUInt8ToInt16WithOffsetC4Fast.S; sourceTree = ""; }; - 486B4BB8222901D5001E73E3 /* MNNMatrixProd.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNMatrixProd.S; sourceTree = ""; }; - 486B4BBA222901E5001E73E3 /* MNNMatrixProd.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNMatrixProd.S; sourceTree = ""; }; - 486B4BC0222D4831001E73E3 /* MNNMatrixMax.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNMatrixMax.S; sourceTree = ""; }; - 486B4BC2222D4845001E73E3 /* MNNMatrixMax.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNMatrixMax.S; sourceTree = ""; }; - 486FDF3C223E495A00F487FB /* CPUBinary.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUBinary.hpp; sourceTree = ""; }; - 486FDF3D223E495A00F487FB /* CPUBinary.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUBinary.cpp; sourceTree = ""; }; - 486FDF3E223E495A00F487FB /* CPUUnary.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUUnary.cpp; sourceTree = ""; }; - 486FDF3F223E495B00F487FB /* CPUUnary.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUUnary.hpp; sourceTree = ""; }; + 1F501EEA2397BA26004E8721 /* ImageSampler.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = ImageSampler.hpp; path = ../../include/MNN/cv/ImageSampler.hpp; sourceTree = ""; }; + 1F501EEB2397BA26004E8721 /* ImageFloatBlitter.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = ImageFloatBlitter.hpp; path = ../../include/MNN/cv/ImageFloatBlitter.hpp; sourceTree = ""; }; + 1F501EEC2397BA26004E8721 /* SkNx_neon.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = SkNx_neon.h; path = ../../include/MNN/cv/SkNx_neon.h; sourceTree = ""; }; + 1F501EED2397BA26004E8721 /* ImageBlitter.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = ImageBlitter.hpp; path = ../../include/MNN/cv/ImageBlitter.hpp; sourceTree = ""; }; + 1F501EEE2397BA26004E8721 /* SkNx.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = SkNx.h; path = ../../include/MNN/cv/SkNx.h; sourceTree = ""; }; + 1F501EF42397BA31004E8721 /* Matrix.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = Matrix.hpp; path = ../../include/MNN/math/Matrix.hpp; sourceTree = ""; }; + 1F501EF52397BA31004E8721 /* Vec4.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = Vec4.hpp; path = ../../include/MNN/math/Vec4.hpp; sourceTree = ""; }; + 1F501EF62397BA31004E8721 /* WingoradGenerater.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = WingoradGenerater.hpp; path = ../../include/MNN/math/WingoradGenerater.hpp; sourceTree = ""; }; + 1F501EFA2397BA49004E8721 /* MetalQuantizedReshape.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalQuantizedReshape.hpp; path = ../../../include/MNN/backend/metal/MetalQuantizedReshape.hpp; sourceTree = ""; }; + 1F501EFB2397BA49004E8721 /* MetalInterp.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalInterp.hpp; path = ../../../include/MNN/backend/metal/MetalInterp.hpp; sourceTree = ""; }; + 1F501EFC2397BA49004E8721 /* MetalBatchToSpaceND.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalBatchToSpaceND.hpp; path = ../../../include/MNN/backend/metal/MetalBatchToSpaceND.hpp; sourceTree = ""; }; + 1F501EFD2397BA49004E8721 /* MNNMetalContext.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = MNNMetalContext.h; path = ../../../include/MNN/backend/metal/MNNMetalContext.h; sourceTree = ""; }; + 1F501EFE2397BA49004E8721 /* MetalSpatialProduct.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalSpatialProduct.hpp; path = ../../../include/MNN/backend/metal/MetalSpatialProduct.hpp; sourceTree = ""; }; + 1F501EFF2397BA49004E8721 /* MetalUnary.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalUnary.hpp; path = ../../../include/MNN/backend/metal/MetalUnary.hpp; sourceTree = ""; }; + 1F501F002397BA49004E8721 /* MetalBackend.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalBackend.hpp; path = ../../../include/MNN/backend/metal/MetalBackend.hpp; sourceTree = ""; }; + 1F501F012397BA49004E8721 /* MetalCast.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalCast.hpp; path = ../../../include/MNN/backend/metal/MetalCast.hpp; sourceTree = ""; }; + 1F501F022397BA49004E8721 /* MetalBinary.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalBinary.hpp; path = ../../../include/MNN/backend/metal/MetalBinary.hpp; sourceTree = ""; }; + 1F501F032397BA49004E8721 /* MetalLSTM.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalLSTM.hpp; path = ../../../include/MNN/backend/metal/MetalLSTM.hpp; sourceTree = ""; }; + 1F501F042397BA49004E8721 /* MetalROIPooling.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalROIPooling.hpp; path = ../../../include/MNN/backend/metal/MetalROIPooling.hpp; sourceTree = ""; }; + 1F501F052397BA49004E8721 /* MetalReLU6.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalReLU6.hpp; path = ../../../include/MNN/backend/metal/MetalReLU6.hpp; sourceTree = ""; }; + 1F501F062397BA4A004E8721 /* MetalGather.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalGather.hpp; path = ../../../include/MNN/backend/metal/MetalGather.hpp; sourceTree = ""; }; + 1F501F072397BA4A004E8721 /* MetalSliceTF.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalSliceTF.hpp; path = ../../../include/MNN/backend/metal/MetalSliceTF.hpp; sourceTree = ""; }; + 1F501F082397BA4A004E8721 /* MetalResize.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalResize.hpp; path = ../../../include/MNN/backend/metal/MetalResize.hpp; sourceTree = ""; }; + 1F501F092397BA4A004E8721 /* MetalConvolutionDepthwise.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalConvolutionDepthwise.hpp; path = ../../../include/MNN/backend/metal/MetalConvolutionDepthwise.hpp; sourceTree = ""; }; + 1F501F0A2397BA4A004E8721 /* MetalDefine.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = MetalDefine.h; path = ../../../include/MNN/backend/metal/MetalDefine.h; sourceTree = ""; }; + 1F501F0B2397BA4A004E8721 /* MetalSoftmax.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalSoftmax.hpp; path = ../../../include/MNN/backend/metal/MetalSoftmax.hpp; sourceTree = ""; }; + 1F501F0C2397BA4A004E8721 /* MetalMatMul.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalMatMul.hpp; path = ../../../include/MNN/backend/metal/MetalMatMul.hpp; sourceTree = ""; }; + 1F501F0D2397BA4A004E8721 /* MetalTensorConverter.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalTensorConverter.hpp; path = ../../../include/MNN/backend/metal/MetalTensorConverter.hpp; sourceTree = ""; }; + 1F501F0E2397BA4A004E8721 /* MetalCropAndResize.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalCropAndResize.hpp; path = ../../../include/MNN/backend/metal/MetalCropAndResize.hpp; sourceTree = ""; }; + 1F501F0F2397BA4A004E8721 /* MetalRank.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalRank.hpp; path = ../../../include/MNN/backend/metal/MetalRank.hpp; sourceTree = ""; }; + 1F501F102397BA4A004E8721 /* MetalTranspose.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalTranspose.hpp; path = ../../../include/MNN/backend/metal/MetalTranspose.hpp; sourceTree = ""; }; + 1F501F112397BA4A004E8721 /* MetalConvolutionCommon.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalConvolutionCommon.hpp; path = ../../../include/MNN/backend/metal/MetalConvolutionCommon.hpp; sourceTree = ""; }; + 1F501F122397BA4A004E8721 /* MetalQuantizedMaxPool.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalQuantizedMaxPool.hpp; path = ../../../include/MNN/backend/metal/MetalQuantizedMaxPool.hpp; sourceTree = ""; }; + 1F501F132397BA4B004E8721 /* MetalFill.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalFill.hpp; path = ../../../include/MNN/backend/metal/MetalFill.hpp; sourceTree = ""; }; + 1F501F142397BA4B004E8721 /* MetalSqueeze.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalSqueeze.hpp; path = ../../../include/MNN/backend/metal/MetalSqueeze.hpp; sourceTree = ""; }; + 1F501F152397BA4B004E8721 /* MetalCrop.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalCrop.hpp; path = ../../../include/MNN/backend/metal/MetalCrop.hpp; sourceTree = ""; }; + 1F501F162397BA4B004E8721 /* MetalScale.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalScale.hpp; path = ../../../include/MNN/backend/metal/MetalScale.hpp; sourceTree = ""; }; + 1F501F172397BA4B004E8721 /* MetalTile.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalTile.hpp; path = ../../../include/MNN/backend/metal/MetalTile.hpp; sourceTree = ""; }; + 1F501F182397BA4B004E8721 /* MetalSlice.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalSlice.hpp; path = ../../../include/MNN/backend/metal/MetalSlice.hpp; sourceTree = ""; }; + 1F501F192397BA4B004E8721 /* MetalConvolution1x1.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalConvolution1x1.hpp; path = ../../../include/MNN/backend/metal/MetalConvolution1x1.hpp; sourceTree = ""; }; + 1F501F1A2397BA4B004E8721 /* MetalPooling.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalPooling.hpp; path = ../../../include/MNN/backend/metal/MetalPooling.hpp; sourceTree = ""; }; + 1F501F1B2397BA4B004E8721 /* MetalQuantizedAdd.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalQuantizedAdd.hpp; path = ../../../include/MNN/backend/metal/MetalQuantizedAdd.hpp; sourceTree = ""; }; + 1F501F1C2397BA4B004E8721 /* MetalTanH.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalTanH.hpp; path = ../../../include/MNN/backend/metal/MetalTanH.hpp; sourceTree = ""; }; + 1F501F1D2397BA4B004E8721 /* MetalTFQuantizedConv2D.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalTFQuantizedConv2D.hpp; path = ../../../include/MNN/backend/metal/MetalTFQuantizedConv2D.hpp; sourceTree = ""; }; + 1F501F1E2397BA4B004E8721 /* MetalConvolutionWinograd.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalConvolutionWinograd.hpp; path = ../../../include/MNN/backend/metal/MetalConvolutionWinograd.hpp; sourceTree = ""; }; + 1F501F1F2397BA4B004E8721 /* MetalSpaceToBatchND.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalSpaceToBatchND.hpp; path = ../../../include/MNN/backend/metal/MetalSpaceToBatchND.hpp; sourceTree = ""; }; + 1F501F202397BA4B004E8721 /* MetalEltwise.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalEltwise.hpp; path = ../../../include/MNN/backend/metal/MetalEltwise.hpp; sourceTree = ""; }; + 1F501F212397BA4B004E8721 /* MetalReLU.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalReLU.hpp; path = ../../../include/MNN/backend/metal/MetalReLU.hpp; sourceTree = ""; }; + 1F501F222397BA4B004E8721 /* MetalSize.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalSize.hpp; path = ../../../include/MNN/backend/metal/MetalSize.hpp; sourceTree = ""; }; + 1F501F232397BA4B004E8721 /* MetalSigmoid.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalSigmoid.hpp; path = ../../../include/MNN/backend/metal/MetalSigmoid.hpp; sourceTree = ""; }; + 1F501F242397BA4B004E8721 /* MetalSeLU.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalSeLU.hpp; path = ../../../include/MNN/backend/metal/MetalSeLU.hpp; sourceTree = ""; }; + 1F501F252397BA4C004E8721 /* MetalNormalize.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalNormalize.hpp; path = ../../../include/MNN/backend/metal/MetalNormalize.hpp; sourceTree = ""; }; + 1F501F262397BA4C004E8721 /* MetalQuantizedSoftmax.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalQuantizedSoftmax.hpp; path = ../../../include/MNN/backend/metal/MetalQuantizedSoftmax.hpp; sourceTree = ""; }; + 1F501F272397BA4C004E8721 /* MetalRange.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalRange.hpp; path = ../../../include/MNN/backend/metal/MetalRange.hpp; sourceTree = ""; }; + 1F501F282397BA4C004E8721 /* MetalDequantize.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalDequantize.hpp; path = ../../../include/MNN/backend/metal/MetalDequantize.hpp; sourceTree = ""; }; + 1F501F292397BA4C004E8721 /* MetalConvolutionGEMM.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalConvolutionGEMM.hpp; path = ../../../include/MNN/backend/metal/MetalConvolutionGEMM.hpp; sourceTree = ""; }; + 1F501F2A2397BA4C004E8721 /* MetalGatherV2.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalGatherV2.hpp; path = ../../../include/MNN/backend/metal/MetalGatherV2.hpp; sourceTree = ""; }; + 1F501F2B2397BA4C004E8721 /* MetalConvolution.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalConvolution.hpp; path = ../../../include/MNN/backend/metal/MetalConvolution.hpp; sourceTree = ""; }; + 1F501F2C2397BA4C004E8721 /* MetalConcat.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalConcat.hpp; path = ../../../include/MNN/backend/metal/MetalConcat.hpp; sourceTree = ""; }; + 1F501F2D2397BA4C004E8721 /* MetalPack.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalPack.hpp; path = ../../../include/MNN/backend/metal/MetalPack.hpp; sourceTree = ""; }; + 1F501F2E2397BA4C004E8721 /* MetalPermute.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalPermute.hpp; path = ../../../include/MNN/backend/metal/MetalPermute.hpp; sourceTree = ""; }; + 1F501F2F2397BA4C004E8721 /* MetalLRN.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalLRN.hpp; path = ../../../include/MNN/backend/metal/MetalLRN.hpp; sourceTree = ""; }; + 1F501F302397BA4C004E8721 /* MetalPReLU.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalPReLU.hpp; path = ../../../include/MNN/backend/metal/MetalPReLU.hpp; sourceTree = ""; }; + 1F501F312397BA4C004E8721 /* MetalDeconvolution.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalDeconvolution.hpp; path = ../../../include/MNN/backend/metal/MetalDeconvolution.hpp; sourceTree = ""; }; + 1F501F322397BA4C004E8721 /* MetalQuantizedAvgPool.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalQuantizedAvgPool.hpp; path = ../../../include/MNN/backend/metal/MetalQuantizedAvgPool.hpp; sourceTree = ""; }; + 1F501F332397BA4C004E8721 /* MetalStridedSlice.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalStridedSlice.hpp; path = ../../../include/MNN/backend/metal/MetalStridedSlice.hpp; sourceTree = ""; }; + 1F501F342397BA4D004E8721 /* MetalReduction.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalReduction.hpp; path = ../../../include/MNN/backend/metal/MetalReduction.hpp; sourceTree = ""; }; + 1F501F352397BA4D004E8721 /* MetalReshape.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = MetalReshape.hpp; path = ../../../include/MNN/backend/metal/MetalReshape.hpp; sourceTree = ""; }; + 1F501F722397BA5A004E8721 /* HalideRuntime.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = HalideRuntime.h; path = MNN/HalideRuntime.h; sourceTree = ""; }; + 1F501F732397BA5A004E8721 /* MNNDefine.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = MNNDefine.h; path = MNN/MNNDefine.h; sourceTree = ""; }; + 1F501F742397BA5A004E8721 /* AutoTime.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = AutoTime.hpp; path = MNN/AutoTime.hpp; sourceTree = ""; }; + 1F501F752397BA5A004E8721 /* Interpreter.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = Interpreter.hpp; path = MNN/Interpreter.hpp; sourceTree = ""; }; + 1F501F762397BA5A004E8721 /* expr */ = {isa = PBXFileReference; lastKnownFileType = folder; name = expr; path = MNN/expr; sourceTree = ""; }; + 1F501F772397BA5A004E8721 /* ImageProcess.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = ImageProcess.hpp; path = MNN/ImageProcess.hpp; sourceTree = ""; }; + 1F501F782397BA5A004E8721 /* ErrorCode.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = ErrorCode.hpp; path = MNN/ErrorCode.hpp; sourceTree = ""; }; + 1F501F792397BA5A004E8721 /* Rect.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = Rect.h; path = MNN/Rect.h; sourceTree = ""; }; + 1F501F7A2397BA5A004E8721 /* Matrix.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = Matrix.h; path = MNN/Matrix.h; sourceTree = ""; }; + 1F501F7B2397BA5A004E8721 /* Tensor.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = Tensor.hpp; path = MNN/Tensor.hpp; sourceTree = ""; }; + 1F501F7C2397BA5A004E8721 /* MNNForwardType.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = MNNForwardType.h; path = MNN/MNNForwardType.h; sourceTree = ""; }; + 1F501F7E2397BA5B004E8721 /* MNNSharedContext.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = MNNSharedContext.h; path = MNN/MNNSharedContext.h; sourceTree = ""; }; 486FDF44223E4B2700F487FB /* MetalBinary.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalBinary.mm; sourceTree = ""; }; 486FDF45223E4B2800F487FB /* MetalBinary.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; path = MetalBinary.metal; sourceTree = ""; }; - 486FDF46223E4B2800F487FB /* MetalBinary.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = MetalBinary.hpp; sourceTree = ""; }; - 486FDF4A2241E95700F487FB /* CPURuntime.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = CPURuntime.cpp; sourceTree = ""; }; - 486FDF4B2241E95700F487FB /* CPURuntime.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = CPURuntime.hpp; sourceTree = ""; }; - 487970CC22C9BF4A00795502 /* CPUFloatToInt8.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUFloatToInt8.hpp; sourceTree = ""; }; - 487970CD22C9BF4A00795502 /* CPUDepthwiseConvInt8.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUDepthwiseConvInt8.cpp; sourceTree = ""; }; - 487970CE22C9BF4A00795502 /* CPUConvInt8.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUConvInt8.hpp; sourceTree = ""; }; - 487970CF22C9BF4A00795502 /* CPUDepthwiseConvInt8.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUDepthwiseConvInt8.hpp; sourceTree = ""; }; - 487970D022C9BF4A00795502 /* CPUInt8ToFloat.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUInt8ToFloat.cpp; sourceTree = ""; }; - 487970D122C9BF4A00795502 /* CPUInt8ToFloat.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUInt8ToFloat.hpp; sourceTree = ""; }; - 487970D222C9BF4B00795502 /* CPUConvInt8.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUConvInt8.cpp; sourceTree = ""; }; - 487970D322C9BF4B00795502 /* CPUFloatToInt8.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUFloatToInt8.cpp; sourceTree = ""; }; - 487970DC22C9BF5E00795502 /* MNNGemmInt8AddBiasScale_8x4_Unit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmInt8AddBiasScale_8x4_Unit.S; sourceTree = ""; }; - 487970DD22C9BF5E00795502 /* MNNGemmInt8AddBiasScale_16x4_Unit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmInt8AddBiasScale_16x4_Unit.S; sourceTree = ""; }; - 487970DE22C9BF5E00795502 /* MNNDepthWiseInt8AddBiasScaleUnit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNDepthWiseInt8AddBiasScaleUnit.S; sourceTree = ""; }; - 487970DF22C9BF5E00795502 /* MNNLineDepthWiseInt8AddBiasScaleUnit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNLineDepthWiseInt8AddBiasScaleUnit.S; sourceTree = ""; }; - 487970E022C9BF5E00795502 /* MNNInt8ScaleToFloat.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNInt8ScaleToFloat.S; sourceTree = ""; }; - 487970E122C9BF5E00795502 /* MNNReluInt8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNReluInt8.S; sourceTree = ""; }; - 487970E822C9BF7200795502 /* MNNInt8ScaleToFloat.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNInt8ScaleToFloat.S; sourceTree = ""; }; - 487970E922C9BF7200795502 /* MNNDepthWiseInt8AddBiasScaleUnit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNDepthWiseInt8AddBiasScaleUnit.S; sourceTree = ""; }; - 487970EA22C9BF7200795502 /* MNNLineDepthWiseInt8AddBiasScaleUnit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNLineDepthWiseInt8AddBiasScaleUnit.S; sourceTree = ""; }; - 487970EB22C9BF7200795502 /* MNNReluInt8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNReluInt8.S; sourceTree = ""; }; - 487970EC22C9BF7200795502 /* MNNGemmInt8AddBiasScale_16x4_Unit_D4.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmInt8AddBiasScale_16x4_Unit_D4.S; sourceTree = ""; }; - 487970F222C9C07000795502 /* CPUPoolInt8.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUPoolInt8.hpp; sourceTree = ""; }; - 487970F322C9C07000795502 /* CPUPoolInt8.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUPoolInt8.cpp; sourceTree = ""; }; - 487970F622C9C19F00795502 /* MNNGemmInt8AddBiasScale_16x4_Unit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmInt8AddBiasScale_16x4_Unit.S; sourceTree = ""; }; - 487970F822CE19EA00795502 /* FileLoader.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = FileLoader.hpp; sourceTree = ""; }; - 487970F922CE19EA00795502 /* FileLoader.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = FileLoader.cpp; sourceTree = ""; }; - 48871459215153F900CCE0D8 /* ErrorCode.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = ErrorCode.hpp; sourceTree = ""; }; - 48871464215225D600CCE0D8 /* ImageProcess.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = ImageProcess.hpp; sourceTree = ""; }; - 48871478215249EA00CCE0D8 /* Matrix.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Matrix.h; sourceTree = ""; }; - 48871479215249EA00CCE0D8 /* Rect.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Rect.h; sourceTree = ""; }; - 488873AF215B639D0079B12E /* TensorUtils.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = TensorUtils.hpp; sourceTree = ""; }; - 488873B1215B639D0079B12E /* Concurrency.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Concurrency.h; sourceTree = ""; }; - 488873B5215B639D0079B12E /* AutoStorage.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AutoStorage.h; sourceTree = ""; }; - 488873B7215B639D0079B12E /* Macro.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Macro.h; sourceTree = ""; }; - 488873B9215B639D0079B12E /* MNNMemoryUtils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MNNMemoryUtils.h; sourceTree = ""; }; - 488873BA215B639D0079B12E /* TensorUtils.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = TensorUtils.cpp; sourceTree = ""; }; - 488873BD215B639D0079B12E /* BufferAllocator.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = BufferAllocator.hpp; sourceTree = ""; }; - 488873C2215B639D0079B12E /* Tensor.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Tensor.cpp; sourceTree = ""; }; - 488873C3215B639D0079B12E /* MNNMemoryUtils.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = MNNMemoryUtils.c; sourceTree = ""; }; - 488873C4215B639D0079B12E /* AutoTime.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = AutoTime.cpp; sourceTree = ""; }; - 488873C6215B639D0079B12E /* NonCopyable.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = NonCopyable.hpp; sourceTree = ""; }; 488873C9215B639D0079B12E /* MetalSpatialProduct.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalSpatialProduct.mm; sourceTree = ""; }; 488873CA215B639D0079B12E /* MetalReshape.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; path = MetalReshape.metal; sourceTree = ""; }; 488873CB215B639D0079B12E /* MetalLRN.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; path = MetalLRN.metal; sourceTree = ""; }; - 488873CC215B639D0079B12E /* MetalLRN.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = MetalLRN.hpp; sourceTree = ""; }; 488873CD215B639D0079B12E /* MetalPermute.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; path = MetalPermute.metal; sourceTree = ""; }; - 488873CE215B639D0079B12E /* MetalReshape.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = MetalReshape.hpp; sourceTree = ""; }; 488873CF215B639D0079B12E /* MetalTanH.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalTanH.mm; sourceTree = ""; }; 488873D0215B639D0079B12E /* MetalSoftmax.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalSoftmax.mm; sourceTree = ""; }; 488873D1215B639D0079B12E /* MetalReLU.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; path = MetalReLU.metal; sourceTree = ""; }; - 488873D3215B639D0079B12E /* MetalROIPooling.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = MetalROIPooling.hpp; sourceTree = ""; }; - 488873D4215B639D0079B12E /* MetalTanH.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = MetalTanH.hpp; sourceTree = ""; }; 488873D5215B639D0079B12E /* MetalLRN.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalLRN.mm; sourceTree = ""; }; 488873D6215B639D0079B12E /* MetalNormalize.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalNormalize.mm; sourceTree = ""; }; 488873D7215B639D0079B12E /* MNNMetalContext.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MNNMetalContext.mm; sourceTree = ""; }; 488873D9215B639D0079B12E /* MetalLSTM.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; path = MetalLSTM.metal; sourceTree = ""; }; - 488873DA215B639D0079B12E /* MetalNormalize.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = MetalNormalize.hpp; sourceTree = ""; }; - 488873DB215B639D0079B12E /* MetalBackend.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = MetalBackend.hpp; sourceTree = ""; }; 488873DC215B639D0079B12E /* MetalConvolution.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; path = MetalConvolution.metal; sourceTree = ""; }; 488873DD215B639D0079B12E /* MetalSlice.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalSlice.mm; sourceTree = ""; }; 488873DE215B639D0079B12E /* MetalEltwise.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalEltwise.mm; sourceTree = ""; }; 488873E0215B639D0079B12E /* MetalBackend.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; path = MetalBackend.metal; sourceTree = ""; }; 488873E1215B639D0079B12E /* MetalConvolution.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalConvolution.mm; sourceTree = ""; }; - 488873E2215B639D0079B12E /* MetalReLU.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = MetalReLU.hpp; sourceTree = ""; }; - 488873E3215B639D0079B12E /* MetalEltwise.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = MetalEltwise.hpp; sourceTree = ""; }; - 488873E4215B639D0079B12E /* MetalPooling.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = MetalPooling.hpp; sourceTree = ""; }; - 488873E5215B639D0079B12E /* MetalSlice.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = MetalSlice.hpp; sourceTree = ""; }; 488873E6215B639D0079B12E /* MetalConcat.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalConcat.mm; sourceTree = ""; }; 488873E8215B639D0079B12E /* MetalNormalize.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; path = MetalNormalize.metal; sourceTree = ""; }; 488873E9215B639D0079B12E /* MetalSoftmax.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; path = MetalSoftmax.metal; sourceTree = ""; }; - 488873EA215B639D0079B12E /* MetalLSTM.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = MetalLSTM.hpp; sourceTree = ""; }; - 488873EB215B639D0079B12E /* MetalPReLU.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = MetalPReLU.hpp; sourceTree = ""; }; - 488873EC215B639D0079B12E /* MetalPermute.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = MetalPermute.hpp; sourceTree = ""; }; - 488873F0215B639D0079B12E /* MetalSpatialProduct.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = MetalSpatialProduct.hpp; sourceTree = ""; }; - 488873F1215B639D0079B12E /* MetalResize.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = MetalResize.hpp; sourceTree = ""; }; 488873F2215B639D0079B12E /* MetalDeconvolution.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalDeconvolution.mm; sourceTree = ""; }; 488873F3215B639D0079B12E /* MetalReLU.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalReLU.mm; sourceTree = ""; }; 488873F4215B639D0079B12E /* MetalPooling.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalPooling.mm; sourceTree = ""; }; 488873F6215B639D0079B12E /* MetalScale.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; path = MetalScale.metal; sourceTree = ""; }; - 488873F7215B639D0079B12E /* MetalConvolution.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = MetalConvolution.hpp; sourceTree = ""; }; 488873F8215B639D0079B12E /* MetalScale.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalScale.mm; sourceTree = ""; }; - 488873F9215B639D0079B12E /* MetalSoftmax.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = MetalSoftmax.hpp; sourceTree = ""; }; 488873FA215B639D0079B12E /* MetalReshape.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalReshape.mm; sourceTree = ""; }; 488873FB215B639D0079B12E /* MetalTanH.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; path = MetalTanH.metal; sourceTree = ""; }; 488873FC215B639D0079B12E /* MetalDeconvolution.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; path = MetalDeconvolution.metal; sourceTree = ""; }; - 488873FD215B639D0079B12E /* MetalDeconvolution.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = MetalDeconvolution.hpp; sourceTree = ""; }; 488873FE215B639D0079B12E /* MetalPooling.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; path = MetalPooling.metal; sourceTree = ""; }; 488873FF215B639D0079B12E /* MetalInterp.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalInterp.mm; sourceTree = ""; }; 48887400215B639D0079B12E /* MetalROIPooling.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; path = MetalROIPooling.metal; sourceTree = ""; }; 48887401215B639D0079B12E /* MetalSpatialProduct.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; path = MetalSpatialProduct.metal; sourceTree = ""; }; 48887402215B639D0079B12E /* MetalROIPooling.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalROIPooling.mm; sourceTree = ""; }; - 48887403215B639D0079B12E /* MetalInterp.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = MetalInterp.hpp; sourceTree = ""; }; - 48887404215B639D0079B12E /* MNNMetalContext.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MNNMetalContext.h; sourceTree = ""; }; 48887405215B639D0079B12E /* MetalBackend.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalBackend.mm; sourceTree = ""; }; - 48887406215B639D0079B12E /* MetalScale.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = MetalScale.hpp; sourceTree = ""; }; 48887407215B639D0079B12E /* MetalSlice.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; path = MetalSlice.metal; sourceTree = ""; }; 48887408215B639D0079B12E /* MetalResize.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; path = MetalResize.metal; sourceTree = ""; }; 48887409215B639D0079B12E /* MetalPReLU.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; path = MetalPReLU.metal; sourceTree = ""; }; - 4888740A215B639D0079B12E /* MetalConcat.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = MetalConcat.hpp; sourceTree = ""; }; 4888740B215B639D0079B12E /* MetalPermute.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalPermute.mm; sourceTree = ""; }; 4888740C215B639D0079B12E /* MetalPReLU.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalPReLU.mm; sourceTree = ""; }; 4888740D215B639D0079B12E /* MetalLSTM.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalLSTM.mm; sourceTree = ""; }; 4888740E215B639D0079B12E /* MetalResize.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalResize.mm; sourceTree = ""; }; 4888740F215B639D0079B12E /* MetalEltwise.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; path = MetalEltwise.metal; sourceTree = ""; }; - 48887411215B639D0079B12E /* CPUCropAndResize.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUCropAndResize.cpp; sourceTree = ""; }; - 48887412215B639D0079B12E /* CPUSelu.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUSelu.cpp; sourceTree = ""; }; - 48887413215B639D0079B12E /* CPUArgMax.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUArgMax.cpp; sourceTree = ""; }; - 48887414215B639D0079B12E /* CPURange.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPURange.hpp; sourceTree = ""; }; - 48887415215B639D0079B12E /* CPUConvolutionDepthwise.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUConvolutionDepthwise.hpp; sourceTree = ""; }; - 48887417215B639D0079B12E /* CPUTFQuantizedConv2D.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUTFQuantizedConv2D.hpp; sourceTree = ""; }; - 48887419215B639D0079B12E /* CPUScale.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUScale.cpp; sourceTree = ""; }; - 4888741A215B639D0079B12E /* CPUTensorConvert.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUTensorConvert.hpp; sourceTree = ""; }; - 4888741B215B639D0079B12E /* CPUQuantizationUtils.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUQuantizationUtils.hpp; sourceTree = ""; }; - 4888741C215B639D0079B12E /* CPUSoftmax.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUSoftmax.cpp; sourceTree = ""; }; - 4888741D215B639D0079B12E /* CPUTile.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUTile.cpp; sourceTree = ""; }; - 4888741E215B639D0079B12E /* CPUQuantizedAvgPool.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUQuantizedAvgPool.hpp; sourceTree = ""; }; - 48887420215B639D0079B12E /* CPUConvolution.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUConvolution.hpp; sourceTree = ""; }; - 48887421215B639D0079B12E /* CPUEltwise.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUEltwise.hpp; sourceTree = ""; }; - 48887422215B639D0079B12E /* CPUResize.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUResize.cpp; sourceTree = ""; }; - 48887423215B639D0079B12E /* CPUCast.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUCast.cpp; sourceTree = ""; }; - 48887424215B639D0079B12E /* CPUGather.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUGather.cpp; sourceTree = ""; }; - 48887425215B639D0079B12E /* CPUAsString.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUAsString.hpp; sourceTree = ""; }; - 48887426215B639D0079B12E /* CPUProposal.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUProposal.cpp; sourceTree = ""; }; - 48887427215B639D0079B12E /* CPUTanh.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUTanh.hpp; sourceTree = ""; }; - 48887428215B639D0079B12E /* CPUReduceJoin.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUReduceJoin.hpp; sourceTree = ""; }; - 48887429215B639D0079B12E /* CPUInterp.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUInterp.cpp; sourceTree = ""; }; - 4888742A215B639D0079B12E /* CPUConst.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUConst.cpp; sourceTree = ""; }; - 4888742B215B639D0079B12E /* CPUQuantizedReshape.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUQuantizedReshape.hpp; sourceTree = ""; }; - 4888742C215B639D0079B12E /* CPUDetectionOutput.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUDetectionOutput.cpp; sourceTree = ""; }; - 4888742D215B639D0079B12E /* CPUPriorbox.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUPriorbox.hpp; sourceTree = ""; }; - 4888742E215B639D0079B12E /* CPUSize.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUSize.cpp; sourceTree = ""; }; - 4888742F215B639D0079B12E /* CPUMatMul.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUMatMul.cpp; sourceTree = ""; }; - 48887431215B639D0079B12E /* CPUPermute.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUPermute.hpp; sourceTree = ""; }; - 48887432215B639D0079B12E /* CPUFill.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUFill.hpp; sourceTree = ""; }; - 48887433215B639D0079B12E /* CPUTranspose.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUTranspose.hpp; sourceTree = ""; }; - 48887434215B639D0079B12E /* CPUSlice.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUSlice.hpp; sourceTree = ""; }; - 48887435215B639D0079B12E /* CPULRN.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPULRN.hpp; sourceTree = ""; }; - 48887436215B639D0079B12E /* CPUStridedSlice.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUStridedSlice.hpp; sourceTree = ""; }; - 48887437215B639D0079B12E /* CPUWhere.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUWhere.cpp; sourceTree = ""; }; - 48887438215B639D0079B12E /* CPUTopKV2.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUTopKV2.hpp; sourceTree = ""; }; - 48887439215B639D0079B12E /* CPUROIPooling.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUROIPooling.hpp; sourceTree = ""; }; - 4888743A215B639D0079B12E /* CPUUnpack.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUUnpack.hpp; sourceTree = ""; }; - 4888743B215B639D0079B12E /* CPUQuantizedMaxPool.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUQuantizedMaxPool.cpp; sourceTree = ""; }; - 4888743C215B639D0079B12E /* CPUExpandDims.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUExpandDims.hpp; sourceTree = ""; }; - 4888743D215B639D0079B12E /* CPUReshape.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUReshape.hpp; sourceTree = ""; }; - 4888743E215B639D0079B12E /* CPUReduction.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUReduction.hpp; sourceTree = ""; }; - 4888743F215B639D0079B12E /* CPUDeconvolution.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUDeconvolution.cpp; sourceTree = ""; }; - 48887440215B639D0079B12E /* CPURelu.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPURelu.hpp; sourceTree = ""; }; - 48887441215B639D0079B12E /* CPUSigmoid.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUSigmoid.hpp; sourceTree = ""; }; - 48887445215B639D0079B12E /* CPUNormalize.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUNormalize.hpp; sourceTree = ""; }; - 48887446215B639D0079B12E /* CPULSTM.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPULSTM.hpp; sourceTree = ""; }; - 48887447215B639D0079B12E /* CPUPool.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUPool.cpp; sourceTree = ""; }; - 48887448215B639D0079B12E /* CPUDeconvolutionDepthwise.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUDeconvolutionDepthwise.hpp; sourceTree = ""; }; - 4888744B215B639D0079B12E /* CPURank.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPURank.hpp; sourceTree = ""; }; - 4888744C215B639D0079B12E /* CPUSpatialProduct.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUSpatialProduct.hpp; sourceTree = ""; }; - 4888744D215B639D0079B12E /* CPUShape.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUShape.cpp; sourceTree = ""; }; - 4888744E215B639D0079B12E /* CPUBackend.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUBackend.hpp; sourceTree = ""; }; - 4888744F215B639D0079B12E /* CPUConcat.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUConcat.cpp; sourceTree = ""; }; - 48887450215B639D0079B12E /* CPUInnerProduct.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUInnerProduct.hpp; sourceTree = ""; }; - 48887452215B639D0079B12E /* CPUPack.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUPack.hpp; sourceTree = ""; }; - 48887453215B639D0079B12E /* CPUNonMaxSuppressionV2.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUNonMaxSuppressionV2.hpp; sourceTree = ""; }; - 48887454215B639D0079B12E /* CPUSliceTf.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUSliceTf.hpp; sourceTree = ""; }; - 48887455215B639D0079B12E /* CPUGatherV2.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUGatherV2.hpp; sourceTree = ""; }; - 48887457215B639D0079B12E /* CPUSqueeze.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUSqueeze.hpp; sourceTree = ""; }; - 48887458215B639D0079B12E /* CPUTranspose.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUTranspose.cpp; sourceTree = ""; }; - 48887459215B639D0079B12E /* CPUFill.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUFill.cpp; sourceTree = ""; }; - 4888745A215B639D0079B12E /* CPUSlice.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUSlice.cpp; sourceTree = ""; }; - 4888745B215B639D0079B12E /* CPUWhere.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUWhere.hpp; sourceTree = ""; }; - 4888745C215B639D0079B12E /* CPULRN.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPULRN.cpp; sourceTree = ""; }; - 4888745D215B639D0079B12E /* CPUStridedSlice.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUStridedSlice.cpp; sourceTree = ""; }; - 4888745E215B639D0079B12E /* CPUROIPooling.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUROIPooling.cpp; sourceTree = ""; }; - 4888745F215B639D0079B12E /* CPUTopKV2.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUTopKV2.cpp; sourceTree = ""; }; - 48887460215B639D0079B12E /* CPUUnpack.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUUnpack.cpp; sourceTree = ""; }; - 48887462215B639D0079B12E /* CPUSigmoid.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUSigmoid.cpp; sourceTree = ""; }; - 48887463215B639D0079B12E /* CPUQuantizedMaxPool.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUQuantizedMaxPool.hpp; sourceTree = ""; }; - 48887464215B639D0079B12E /* CPUReduction.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUReduction.cpp; sourceTree = ""; }; - 48887465215B639D0079B12E /* CPUReshape.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUReshape.cpp; sourceTree = ""; }; - 48887466215B639D0079B12E /* CPUDeconvolution.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUDeconvolution.hpp; sourceTree = ""; }; - 48887467215B639D0079B12E /* CPUExpandDims.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUExpandDims.cpp; sourceTree = ""; }; - 48887468215B639D0079B12E /* CPURelu.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPURelu.cpp; sourceTree = ""; }; - 4888746B215B639D0079B12E /* CommonOptFunction.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CommonOptFunction.cpp; sourceTree = ""; }; - 4888746D215B639D0079B12E /* Convolution3x3.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Convolution3x3.cpp; sourceTree = ""; }; - 4888746F215B639D0079B12E /* CommonOptFunction.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CommonOptFunction.h; sourceTree = ""; }; - 48887470215B639D0079B12E /* ConvolutionWinograd.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ConvolutionWinograd.cpp; sourceTree = ""; }; - 48887471215B639D0079B12E /* Int8FunctionsOpt.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Int8FunctionsOpt.cpp; sourceTree = ""; }; - 48887473215B639D0079B12E /* ConvOpt.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ConvOpt.cpp; sourceTree = ""; }; - 48887474215B639D0079B12E /* ConvolutionTiledExecutor.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = ConvolutionTiledExecutor.hpp; sourceTree = ""; }; - 48887476215B639D0079B12E /* ConvolutionIntFactory.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ConvolutionIntFactory.cpp; sourceTree = ""; }; - 48887477215B639D0079B12E /* ConvolutionGroup.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = ConvolutionGroup.hpp; sourceTree = ""; }; - 48887478215B639D0079B12E /* ConvolutionFloatFactory.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ConvolutionFloatFactory.h; sourceTree = ""; }; - 4888747A215B639D0079B12E /* ConvolutionInt8Executor.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ConvolutionInt8Executor.cpp; sourceTree = ""; }; - 4888747B215B639D0079B12E /* ResizeFunction.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ResizeFunction.h; sourceTree = ""; }; - 4888747D215B639D0079B12E /* ConvolutionIntFactory.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = ConvolutionIntFactory.hpp; sourceTree = ""; }; - 4888747F215B639D0079B12E /* ConvolutionGroup.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ConvolutionGroup.cpp; sourceTree = ""; }; - 48887481215B639D0079B12E /* ConvolutionFloatFactory.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ConvolutionFloatFactory.cpp; sourceTree = ""; }; - 48887482215B639D0079B12E /* ConvolutionInt8Executor.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = ConvolutionInt8Executor.hpp; sourceTree = ""; }; - 48887483215B639D0079B12E /* ResizeFunction.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ResizeFunction.cpp; sourceTree = ""; }; - 48887484215B639D0079B12E /* Convolution3x3.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = Convolution3x3.hpp; sourceTree = ""; }; - 48887485215B639D0079B12E /* ConvOpt.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ConvOpt.h; sourceTree = ""; }; - 48887487215B639D0079B12E /* ConvolutionWinograd.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = ConvolutionWinograd.hpp; sourceTree = ""; }; - 48887489215B639D0079B12E /* Int8FunctionsOpt.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Int8FunctionsOpt.h; sourceTree = ""; }; - 4888748A215B639D0079B12E /* ConvolutionTiledExecutor.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ConvolutionTiledExecutor.cpp; sourceTree = ""; }; - 4888748C215B639E0079B12E /* CPUNormalize.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUNormalize.cpp; sourceTree = ""; }; - 4888748F215B639E0079B12E /* CPULSTM.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPULSTM.cpp; sourceTree = ""; }; - 48887491215B639E0079B12E /* CPUDeconvolutionDepthwise.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUDeconvolutionDepthwise.cpp; sourceTree = ""; }; - 48887492215B639E0079B12E /* CPUPool.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUPool.hpp; sourceTree = ""; }; - 48887494215B639E0079B12E /* CPUSpatialProduct.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUSpatialProduct.cpp; sourceTree = ""; }; - 48887495215B639E0079B12E /* CPURank.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPURank.cpp; sourceTree = ""; }; - 48887497215B639E0079B12E /* CPUShape.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUShape.hpp; sourceTree = ""; }; - 48887498215B639E0079B12E /* CPUInnerProduct.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUInnerProduct.cpp; sourceTree = ""; }; - 48887499215B639E0079B12E /* CPUBackend.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUBackend.cpp; sourceTree = ""; }; - 4888749A215B639E0079B12E /* CPUConcat.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUConcat.hpp; sourceTree = ""; }; - 4888749B215B639E0079B12E /* CPUNonMaxSuppressionV2.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUNonMaxSuppressionV2.cpp; sourceTree = ""; }; - 4888749C215B639E0079B12E /* CPUSliceTf.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUSliceTf.cpp; sourceTree = ""; }; - 4888749D215B639E0079B12E /* CPUPack.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUPack.cpp; sourceTree = ""; }; - 4888749E215B639E0079B12E /* CPUGatherV2.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUGatherV2.cpp; sourceTree = ""; }; - 4888749F215B639E0079B12E /* CPUSqueeze.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUSqueeze.cpp; sourceTree = ""; }; - 488874A1215B639E0079B12E /* CPUArgMax.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUArgMax.hpp; sourceTree = ""; }; - 488874A2215B639E0079B12E /* CPUSelu.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUSelu.hpp; sourceTree = ""; }; - 488874A3215B639E0079B12E /* CPUCropAndResize.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUCropAndResize.hpp; sourceTree = ""; }; - 488874A5215B639E0079B12E /* CPUConvolutionDepthwise.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUConvolutionDepthwise.cpp; sourceTree = ""; }; - 488874A6215B639E0079B12E /* CPURange.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPURange.cpp; sourceTree = ""; }; - 488874A8215B639E0079B12E /* CPUTFQuantizedConv2D.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUTFQuantizedConv2D.cpp; sourceTree = ""; }; - 488874A9215B639E0079B12E /* CPUScale.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUScale.hpp; sourceTree = ""; }; - 488874AA215B639E0079B12E /* CPUTensorConvert.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUTensorConvert.cpp; sourceTree = ""; }; - 488874AB215B639E0079B12E /* CPUTile.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUTile.hpp; sourceTree = ""; }; - 488874AC215B639E0079B12E /* CPUSoftmax.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUSoftmax.hpp; sourceTree = ""; }; - 488874AE215B639E0079B12E /* MNNAsmGlobal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MNNAsmGlobal.h; sourceTree = ""; }; - 488874B0215B639E0079B12E /* MNNFloat2Int8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNFloat2Int8.S; sourceTree = ""; }; - 488874B1215B639E0079B12E /* MNNGemmFloatUnit_4.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmFloatUnit_4.S; sourceTree = ""; }; - 488874B2215B639E0079B12E /* MNNConvRunForLineDepthWiseInt8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNConvRunForLineDepthWiseInt8.S; sourceTree = ""; }; - 488874B3215B639E0079B12E /* MNNGemmInt16to32_4x4_Common.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmInt16to32_4x4_Common.S; sourceTree = ""; }; - 488874B4215B639E0079B12E /* MNNConvRunForUnitDepthWiseInt8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNConvRunForUnitDepthWiseInt8.S; sourceTree = ""; }; - 488874B5215B639E0079B12E /* MNNMinFloat.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNMinFloat.S; sourceTree = ""; }; - 488874B6215B639E0079B12E /* MNNBilinearProcC1.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNBilinearProcC1.S; sourceTree = ""; }; - 488874B7215B639E0079B12E /* MNNMaxFloat.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNMaxFloat.S; sourceTree = ""; }; - 488874BB215B639E0079B12E /* MNNCubicLineC4.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNCubicLineC4.S; sourceTree = ""; }; - 488874C2215B639E0079B12E /* MNNDeconvRunForUnitDepthWise.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNDeconvRunForUnitDepthWise.S; sourceTree = ""; }; - 488874C8215B639E0079B12E /* MNNWinogradMatrixProductLeft.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNWinogradMatrixProductLeft.S; sourceTree = ""; }; - 488874CB215B639E0079B12E /* MNNConvRunForLineDepthwise.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNConvRunForLineDepthwise.S; sourceTree = ""; }; - 488874CC215B639E0079B12E /* MNNConvSlideWindowMiddle.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNConvSlideWindowMiddle.S; sourceTree = ""; }; - 488874CD215B639E0079B12E /* MNNScaleBias2FloatC4.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNScaleBias2FloatC4.S; sourceTree = ""; }; - 488874CF215B639E0079B12E /* MNNAddBiasRelu6.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNAddBiasRelu6.S; sourceTree = ""; }; - 488874D0215B639E0079B12E /* MNNGemmInt16to32_4x4_Unit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmInt16to32_4x4_Unit.S; sourceTree = ""; }; - 488874D1215B639E0079B12E /* MNNScaleAndAddBias.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNScaleAndAddBias.S; sourceTree = ""; }; - 488874D4215B639E0079B12E /* MNNGemmFloatCommon_4.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmFloatCommon_4.S; sourceTree = ""; }; - 488874D5215B639E0079B12E /* MNNCoefLine.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNCoefLine.S; sourceTree = ""; }; - 488874D7215B639E0079B12E /* MNNWinogradMatrixProductRight.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNWinogradMatrixProductRight.S; sourceTree = ""; }; - 488874DA215B639E0079B12E /* MNNFloat2Int8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNFloat2Int8.S; sourceTree = ""; }; - 488874DB215B639E0079B12E /* MNNGemmFloatUnit_4.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmFloatUnit_4.S; sourceTree = ""; }; - 488874DC215B639E0079B12E /* MNNConvRunForLineDepthWiseInt8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNConvRunForLineDepthWiseInt8.S; sourceTree = ""; }; - 488874DD215B639E0079B12E /* MNNGemmInt16to32_4x4_Common.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmInt16to32_4x4_Common.S; sourceTree = ""; }; - 488874DE215B639E0079B12E /* MNNConvRunForUnitDepthWiseInt8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNConvRunForUnitDepthWiseInt8.S; sourceTree = ""; }; - 488874DF215B639E0079B12E /* MNNMinFloat.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNMinFloat.S; sourceTree = ""; }; - 488874E0215B639E0079B12E /* MNNBilinearProcC1.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNBilinearProcC1.S; sourceTree = ""; }; - 488874E1215B639E0079B12E /* MNNMaxFloat.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNMaxFloat.S; sourceTree = ""; }; - 488874E5215B639E0079B12E /* MNNCubicLineC4.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNCubicLineC4.S; sourceTree = ""; }; - 488874EC215B639E0079B12E /* MNNDeconvRunForUnitDepthWise.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNDeconvRunForUnitDepthWise.S; sourceTree = ""; }; - 488874F3215B639E0079B12E /* MNNWinogradMatrixProductLeft.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNWinogradMatrixProductLeft.S; sourceTree = ""; }; - 488874F7215B639E0079B12E /* MNNConvRunForLineDepthwise.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNConvRunForLineDepthwise.S; sourceTree = ""; }; - 488874F8215B639E0079B12E /* MNNConvSlideWindowMiddle.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNConvSlideWindowMiddle.S; sourceTree = ""; }; - 488874F9215B639E0079B12E /* MNNScaleBias2FloatC4.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNScaleBias2FloatC4.S; sourceTree = ""; }; - 488874FB215B639E0079B12E /* MNNAddBiasRelu6.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNAddBiasRelu6.S; sourceTree = ""; }; - 488874FC215B639E0079B12E /* MNNGemmInt16to32_4x4_Unit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmInt16to32_4x4_Unit.S; sourceTree = ""; }; - 488874FD215B639E0079B12E /* MNNScaleAndAddBias.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNScaleAndAddBias.S; sourceTree = ""; }; - 48887500215B639E0079B12E /* MNNGemmFloatCommon_4.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmFloatCommon_4.S; sourceTree = ""; }; - 48887501215B639E0079B12E /* MNNCoefLine.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNCoefLine.S; sourceTree = ""; }; - 48887503215B639E0079B12E /* MNNWinogradMatrixProductRight.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNWinogradMatrixProductRight.S; sourceTree = ""; }; - 48887504215B639E0079B12E /* CPUQuantizedAvgPool.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUQuantizedAvgPool.cpp; sourceTree = ""; }; - 48887505215B639E0079B12E /* CPUConvolution.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUConvolution.cpp; sourceTree = ""; }; - 48887506215B639E0079B12E /* CPUEltwise.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUEltwise.cpp; sourceTree = ""; }; - 48887508215B639E0079B12E /* CPUCast.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUCast.hpp; sourceTree = ""; }; - 48887509215B639E0079B12E /* CPUResize.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUResize.hpp; sourceTree = ""; }; - 4888750A215B639E0079B12E /* CPUGather.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUGather.hpp; sourceTree = ""; }; - 4888750B215B639E0079B12E /* CPUAsString.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUAsString.cpp; sourceTree = ""; }; - 4888750C215B639E0079B12E /* CPUTanh.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUTanh.cpp; sourceTree = ""; }; - 4888750D215B639E0079B12E /* CPUProposal.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUProposal.hpp; sourceTree = ""; }; - 4888750E215B639E0079B12E /* CPUInterp.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUInterp.hpp; sourceTree = ""; }; - 4888750F215B639E0079B12E /* CPUReduceJoin.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUReduceJoin.cpp; sourceTree = ""; }; - 48887510215B639E0079B12E /* CPUConst.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUConst.hpp; sourceTree = ""; }; - 48887511215B639E0079B12E /* CPUPriorbox.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUPriorbox.cpp; sourceTree = ""; }; - 48887512215B639E0079B12E /* CPUSize.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUSize.hpp; sourceTree = ""; }; - 48887513215B639E0079B12E /* CPUQuantizedReshape.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUQuantizedReshape.cpp; sourceTree = ""; }; - 48887514215B639E0079B12E /* CPUDetectionOutput.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUDetectionOutput.hpp; sourceTree = ""; }; - 48887515215B639E0079B12E /* CPUMatMul.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUMatMul.hpp; sourceTree = ""; }; - 48887517215B639E0079B12E /* CPUPermute.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUPermute.cpp; sourceTree = ""; }; 48887567215B639E0079B12E /* WingoradGenerater.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = WingoradGenerater.cpp; sourceTree = ""; }; - 48887568215B639E0079B12E /* Matrix.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = Matrix.hpp; sourceTree = ""; }; - 48887569215B639E0079B12E /* WingoradGenerater.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = WingoradGenerater.hpp; sourceTree = ""; }; 4888756A215B639E0079B12E /* Matrix.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Matrix.cpp; sourceTree = ""; }; - 4888773D215CD3BF0079B12E /* MNNBlitC3ToFloatRGBA.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNBlitC3ToFloatRGBA.S; sourceTree = ""; }; - 4888773F215CD3D00079B12E /* MNNBlitC1ToFloatRGBA.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNBlitC1ToFloatRGBA.S; sourceTree = ""; }; - 48887741215CFF7B0079B12E /* MNNBlitC3ToFloatRGBA.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNBlitC3ToFloatRGBA.S; sourceTree = ""; }; - 48887742215CFF7B0079B12E /* MNNBlitC1ToFloatRGBA.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNBlitC1ToFloatRGBA.S; sourceTree = ""; }; - 489BFA2A230E3D1F00F6B785 /* FileLoader.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = FileLoader.hpp; sourceTree = ""; }; - 489BFA2B230E3D1F00F6B785 /* FileLoader.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = FileLoader.cpp; sourceTree = ""; }; - 48A8A60121CDF55E00C2B9A7 /* MNNSamplerC1NearestOpt.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNSamplerC1NearestOpt.S; sourceTree = ""; }; - 48A8A60321CDF86F00C2B9A7 /* MNNSamplerC1NearestOpt.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNSamplerC1NearestOpt.S; sourceTree = ""; }; - 48A8A60421CDF86F00C2B9A7 /* MNNSamplerC4NearestOpt.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNSamplerC4NearestOpt.S; sourceTree = ""; }; - 48A8A60821D101A700C2B9A7 /* ImageSampler.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = ImageSampler.hpp; sourceTree = ""; }; - 48A8A60921D101A700C2B9A7 /* ImageBlitter.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = ImageBlitter.hpp; sourceTree = ""; }; - 48A8A60A21D101A700C2B9A7 /* ImageFloatBlitter.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = ImageFloatBlitter.hpp; sourceTree = ""; }; 48A8A60B21D101A700C2B9A7 /* ImageProcess.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ImageProcess.cpp; sourceTree = ""; }; 48A8A60C21D101A700C2B9A7 /* ImageSampler.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ImageSampler.cpp; sourceTree = ""; }; 48A8A60D21D101A700C2B9A7 /* ImageBlitter.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ImageBlitter.cpp; sourceTree = ""; }; 48A8A60E21D101A700C2B9A7 /* ImageFloatBlitter.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ImageFloatBlitter.cpp; sourceTree = ""; }; - 48A8A61621D101DD00C2B9A7 /* SkNx_neon.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SkNx_neon.h; sourceTree = ""; }; 48A8A61721D101DD00C2B9A7 /* Matrix_CV.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Matrix_CV.cpp; sourceTree = ""; }; - 48A8A61821D101DE00C2B9A7 /* SkNx.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SkNx.h; sourceTree = ""; }; - 48A8A61C21D20BE700C2B9A7 /* MNNNV21ToRGBUnit.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNNV21ToRGBUnit.S; sourceTree = ""; }; - 48A8A61E21D235DF00C2B9A7 /* MNNNV21ToRGBUnit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNNV21ToRGBUnit.S; sourceTree = ""; }; - 48A8A62021D3569800C2B9A7 /* MNNGemmInt8toFloat32_8x4_Unit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmInt8toFloat32_8x4_Unit.S; sourceTree = ""; }; - 48A8A62221D37FB500C2B9A7 /* MNNGemmInt8toFloat32_8x4_Unit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmInt8toFloat32_8x4_Unit.S; sourceTree = ""; }; - 48A8A62421D47B5A00C2B9A7 /* OptimizedComputer.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = OptimizedComputer.hpp; sourceTree = ""; }; - 48A8A62521D47B5A00C2B9A7 /* OptimizedComputer.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = OptimizedComputer.cpp; sourceTree = ""; }; - 48A8A62821D5FE1D00C2B9A7 /* MNNNV21ToRGBAUnit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNNV21ToRGBAUnit.S; sourceTree = ""; }; - 48A8A62A21D5FE3100C2B9A7 /* MNNNV21ToRGBAUnit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNNV21ToRGBAUnit.S; sourceTree = ""; }; - 48A8A63621D8A43D00C2B9A7 /* BufferAllocator.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = BufferAllocator.cpp; sourceTree = ""; }; - 48AE9E9D2211950B009DB6F4 /* StrassenMatmulComputor.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = StrassenMatmulComputor.cpp; sourceTree = ""; }; - 48AE9E9E2211950B009DB6F4 /* StrassenMatmulComputor.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = StrassenMatmulComputor.hpp; sourceTree = ""; }; - 48AE9EA12212B2C2009DB6F4 /* Convolution1x1Strassen.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = Convolution1x1Strassen.cpp; sourceTree = ""; }; - 48AE9EA22212B2C2009DB6F4 /* Convolution1x1Strassen.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = Convolution1x1Strassen.hpp; sourceTree = ""; }; - 48AE9EA52212D3F9009DB6F4 /* MNNMatrixSub.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNMatrixSub.S; sourceTree = ""; }; - 48AE9EA72212D403009DB6F4 /* MNNMatrixAdd.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNMatrixAdd.S; sourceTree = ""; }; - 48AE9EA92212E94F009DB6F4 /* MNNMatrixAdd.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNMatrixAdd.S; sourceTree = ""; }; - 48AE9EAA2212E94F009DB6F4 /* MNNMatrixSub.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNMatrixSub.S; sourceTree = ""; }; - 48AE9EAD22151E20009DB6F4 /* MNNStrassenMergeCFunction.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNStrassenMergeCFunction.S; sourceTree = ""; }; - 48AE9EAF221539C2009DB6F4 /* MNNStrassenMergeCFunction.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNStrassenMergeCFunction.S; sourceTree = ""; }; - 48AE9EB122154C9D009DB6F4 /* MNNGemmFloatOne_4.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNGemmFloatOne_4.S; sourceTree = ""; }; - 48AE9EB32215628D009DB6F4 /* MNNGemmFloatOne_4.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmFloatOne_4.S; sourceTree = ""; }; - 48B904A022953DFF003116BB /* CPUSelect.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = CPUSelect.cpp; sourceTree = ""; }; - 48B904A122953DFF003116BB /* CPUSelect.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = CPUSelect.hpp; sourceTree = ""; }; - 48B904A422953E0F003116BB /* CPUZeroLike.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = CPUZeroLike.cpp; sourceTree = ""; }; - 48B904A522953E0F003116BB /* CPUZeroLike.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = CPUZeroLike.hpp; sourceTree = ""; }; - 48B904A8229550CF003116BB /* ShapeSelect.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeSelect.cpp; sourceTree = ""; }; - 48BF218121A3E4C300AFF78E /* MNNSamplerC4BilinearOpt.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNSamplerC4BilinearOpt.S; sourceTree = ""; }; - 48BF218321A4073500AFF78E /* MNNSamplerC4BilinearOpt.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNSamplerC4BilinearOpt.S; sourceTree = ""; }; - 48BF218521A4257500AFF78E /* MNNSamplerC1BilinearOpt.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNSamplerC1BilinearOpt.S; sourceTree = ""; }; - 48BF218721A4380A00AFF78E /* MNNSamplerC1BilinearOpt.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNSamplerC1BilinearOpt.S; sourceTree = ""; }; - 48BF21BD21ABBDA300AFF78E /* MNNLoadU8AndSum.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNLoadU8AndSum.S; sourceTree = ""; }; - 48BF21C021ABC45100AFF78E /* MNNLoadU8AndSum.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNLoadU8AndSum.S; sourceTree = ""; }; - 48BF21F321CA43AE00AFF78E /* MNNSamplerC4NearestOpt.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNSamplerC4NearestOpt.S; sourceTree = ""; }; 48C054862201996200E91945 /* MetalConvolutionWinograd.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalConvolutionWinograd.mm; sourceTree = ""; }; - 48C054872201996200E91945 /* MetalConvolutionWinograd.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = MetalConvolutionWinograd.hpp; sourceTree = ""; }; - 48C054912205B91A00E91945 /* MNNPackC4.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNPackC4.S; sourceTree = ""; }; - 48C054932205B94400E91945 /* MNNUnPackC4.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNUnPackC4.S; sourceTree = ""; }; - 48C054952205B9A500E91945 /* MNNPackC4.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNPackC4.S; sourceTree = ""; }; - 48C054972205B9B400E91945 /* MNNUnPackC4.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNUnPackC4.S; sourceTree = ""; }; - 48C054992205BB8400E91945 /* MNNConvSlideWindowBorder.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNConvSlideWindowBorder.S; sourceTree = ""; }; - 48C0549B2205BC8C00E91945 /* MNNConvSlideWindowBorder.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNConvSlideWindowBorder.S; sourceTree = ""; }; - 48C0549E22081AC200E91945 /* MNNAddBias.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNAddBias.S; sourceTree = ""; }; - 48C054A022081B5B00E91945 /* MNNReluWithSlope.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNReluWithSlope.S; sourceTree = ""; }; - 48C054A222081C9B00E91945 /* MNNAddBiasRelu.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNAddBiasRelu.S; sourceTree = ""; }; - 48C054A422081CDA00E91945 /* MNNReluWithSlopeChannel.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNReluWithSlopeChannel.S; sourceTree = ""; }; - 48C054A6220A745900E91945 /* MNNAddBiasRelu.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNAddBiasRelu.S; sourceTree = ""; }; - 48C054A8220A749100E91945 /* MNNAddBias.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNAddBias.S; sourceTree = ""; }; - 48C054AA220A74B200E91945 /* MNNReluWithSlope.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNReluWithSlope.S; sourceTree = ""; }; - 48C054AC220A74D800E91945 /* MNNReluWithSlopeChannel.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNReluWithSlopeChannel.S; sourceTree = ""; }; - 48C054AE220A758B00E91945 /* MNNCubicSampleC4.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNCubicSampleC4.S; sourceTree = ""; }; - 48C054B0220A762C00E91945 /* MNNConvRunForUnitDepthWise.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNConvRunForUnitDepthWise.S; sourceTree = ""; }; - 48C054B2220A7A4600E91945 /* MNNCubicSampleC4.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNCubicSampleC4.S; sourceTree = ""; }; - 48C054B4220A7A9600E91945 /* MNNConvRunForUnitDepthWise.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNConvRunForUnitDepthWise.S; sourceTree = ""; }; - 48C5E79922FBF87600EAC2A6 /* ShapeSpaceToDepth.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeSpaceToDepth.cpp; sourceTree = ""; }; - 48C5E79A22FBF87600EAC2A6 /* ShapeDepthToSpace.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeDepthToSpace.cpp; sourceTree = ""; }; - 48C5E79D2306C84400EAC2A6 /* MNNGemmint8to32_8x4_Unit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmint8to32_8x4_Unit.S; sourceTree = ""; }; - 48C5E79F2306C84D00EAC2A6 /* MNNGemmint8to32_8x4_Unit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmint8to32_8x4_Unit.S; sourceTree = ""; }; - 48DA297C21F1F7CF00E3BEB2 /* MNNExpC8.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNExpC8.S; sourceTree = ""; }; - 48DA297E21F2051800E3BEB2 /* MNNExpC8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNExpC8.S; sourceTree = ""; }; - 48EB45E32251AC9D006C2322 /* Vec4.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = Vec4.hpp; sourceTree = ""; }; - 48EB45E42254B9D2006C2322 /* ConvolutionDepthwise3x3.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = ConvolutionDepthwise3x3.cpp; sourceTree = ""; }; - 48EB45E52254B9D2006C2322 /* ConvolutionDepthwise3x3.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = ConvolutionDepthwise3x3.hpp; sourceTree = ""; }; - 48EB45E822559525006C2322 /* MNNConvDwF23MulTransUnit.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNConvDwF23MulTransUnit.S; sourceTree = ""; }; - 48EB45EA2255B70C006C2322 /* MNNConvDwF23SourceTransUnit.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNConvDwF23SourceTransUnit.S; sourceTree = ""; }; - 48EB45EC2255D270006C2322 /* MNNConvDwF23MulTransUnit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNConvDwF23MulTransUnit.S; sourceTree = ""; }; - 48EB45ED2255D270006C2322 /* MNNConvDwF23SourceTransUnit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNConvDwF23SourceTransUnit.S; sourceTree = ""; }; - 71E8789E2203E88500268E24 /* MNNNV21ToBGRUnit.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNNV21ToBGRUnit.S; sourceTree = ""; }; - 71E878A12203E9D200268E24 /* MNNNV21ToBGRUnit.S */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm; path = MNNNV21ToBGRUnit.S; sourceTree = ""; }; + 48FA473C23AA127A00172C3B /* MergeOptimizer.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = MergeOptimizer.hpp; sourceTree = ""; }; + 48FA473D23AA127B00172C3B /* Executor.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Executor.cpp; sourceTree = ""; }; + 48FA473E23AA127B00172C3B /* NeuralNetWorkOp.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = NeuralNetWorkOp.cpp; sourceTree = ""; }; + 48FA473F23AA127B00172C3B /* Optimizer.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Optimizer.cpp; sourceTree = ""; }; + 48FA474023AA127B00172C3B /* Expr.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Expr.cpp; sourceTree = ""; }; + 48FA474123AA127B00172C3B /* MathOp.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MathOp.cpp; sourceTree = ""; }; + 48FA474223AA127B00172C3B /* Utils.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Utils.cpp; sourceTree = ""; }; + 48FA474323AA127B00172C3B /* Utils.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = Utils.hpp; sourceTree = ""; }; + 48FA474C23AA136300172C3B /* MergeOptimizer.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MergeOptimizer.cpp; sourceTree = ""; }; 9200045321EDBCF700BCE892 /* MNNTestSuite.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = MNNTestSuite.h; path = ../../../test/MNNTestSuite.h; sourceTree = ""; }; 9200045521EDBCF700BCE892 /* TestUtils.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = TestUtils.h; path = ../../../test/TestUtils.h; sourceTree = ""; }; 9200045721EDBCF700BCE892 /* TestUtils.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; name = TestUtils.mm; path = ../../../test/TestUtils.mm; sourceTree = ""; }; @@ -1316,95 +1572,37 @@ 9200049721EDBDF600BCE892 /* CropAndResizeTest.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CropAndResizeTest.cpp; sourceTree = ""; }; 920004D521EDC30E00BCE892 /* MetalDequantize.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalDequantize.mm; sourceTree = ""; }; 920004D621EDC30E00BCE892 /* MetalDequantize.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; path = MetalDequantize.metal; sourceTree = ""; }; - 920004D721EDC30E00BCE892 /* MetalDequantize.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = MetalDequantize.hpp; sourceTree = ""; }; - 921722CF21DDF63A004583BF /* MNN_generated.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MNN_generated.h; sourceTree = ""; }; - 921722D021DDF63A004583BF /* Type_generated.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Type_generated.h; sourceTree = ""; }; - 921722D121DDF63A004583BF /* GpuLibrary_generated.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GpuLibrary_generated.h; sourceTree = ""; }; - 921722D321DDF63A004583BF /* Tensor_generated.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Tensor_generated.h; sourceTree = ""; }; - 921722D521DDF63A004583BF /* TFQuantizeOp_generated.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TFQuantizeOp_generated.h; sourceTree = ""; }; 9223E10D21D327F40067544A /* MetalSqueeze.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalSqueeze.mm; sourceTree = ""; }; - 9223E10E21D327F40067544A /* MetalSqueeze.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = MetalSqueeze.hpp; sourceTree = ""; }; 9223E11621D34BE40067544A /* MetalSpaceToBatchND.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalSpaceToBatchND.mm; sourceTree = ""; }; - 9223E11721D34BE40067544A /* MetalSpaceToBatchND.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = MetalSpaceToBatchND.hpp; sourceTree = ""; }; 9223E11A21D34C260067544A /* MetalSpaceToBatchND.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalSpaceToBatchND.metal; sourceTree = ""; }; - 9223E11C21D34C6B0067544A /* CPUBatchToSpaceND.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUBatchToSpaceND.hpp; sourceTree = ""; }; - 9223E11D21D34C6B0067544A /* CPUBatchToSpaceND.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUBatchToSpaceND.cpp; sourceTree = ""; }; - 9223E11E21D34C6B0067544A /* CPUSpaceToBatchND.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUSpaceToBatchND.cpp; sourceTree = ""; }; - 9223E11F21D34C6B0067544A /* CPUSpaceToBatchND.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUSpaceToBatchND.hpp; sourceTree = ""; }; 9223E12721D3755F0067544A /* MetalBatchToSpaceND.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalBatchToSpaceND.mm; sourceTree = ""; }; - 9223E12821D3755F0067544A /* MetalBatchToSpaceND.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = MetalBatchToSpaceND.hpp; sourceTree = ""; }; 9223E12B21D3756B0067544A /* MetalBatchToSpaceND.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalBatchToSpaceND.metal; sourceTree = ""; }; 92256933219D14CD00F251E2 /* MetalSliceTF.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalSliceTF.mm; sourceTree = ""; }; - 92256934219D14CD00F251E2 /* MetalSliceTF.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = MetalSliceTF.hpp; sourceTree = ""; }; 92256937219D150900F251E2 /* MetalSliceTF.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalSliceTF.metal; sourceTree = ""; }; 92256945219D698100F251E2 /* MetalRank.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalRank.mm; sourceTree = ""; }; - 92256946219D698100F251E2 /* MetalRank.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = MetalRank.hpp; sourceTree = ""; }; 92256949219D698900F251E2 /* MetalRank.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalRank.metal; sourceTree = ""; }; 9225694E219D6E0200F251E2 /* MetalRange.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalRange.mm; sourceTree = ""; }; - 9225694F219D6E0200F251E2 /* MetalRange.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = MetalRange.hpp; sourceTree = ""; }; 92256952219D6E1000F251E2 /* MetalRange.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalRange.metal; sourceTree = ""; }; 92351C8521992AB2002CA341 /* MetalQuantizedAdd.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalQuantizedAdd.mm; sourceTree = ""; }; - 92351C8621992AB2002CA341 /* MetalQuantizedAdd.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = MetalQuantizedAdd.hpp; sourceTree = ""; }; 92351C8921992AC6002CA341 /* MetalQuantizedAdd.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalQuantizedAdd.metal; sourceTree = ""; }; - 92369E60222544DD009D3A05 /* MetalConvolutionGEMM.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = MetalConvolutionGEMM.hpp; sourceTree = ""; }; 92369E61222544DD009D3A05 /* MetalConvolutionGEMM.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalConvolutionGEMM.mm; sourceTree = ""; }; 92369E63222544FE009D3A05 /* MetalConvolutionGEMM.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalConvolutionGEMM.metal; sourceTree = ""; }; 923B7F8721A653AB002AFCE0 /* MetalGather.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalGather.mm; sourceTree = ""; }; - 923B7F8821A653AB002AFCE0 /* MetalGather.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = MetalGather.hpp; sourceTree = ""; }; 923B7F8B21A653BB002AFCE0 /* MetalGather.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalGather.metal; sourceTree = ""; }; 923B7F9021A68091002AFCE0 /* MetalGatherV2.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalGatherV2.mm; sourceTree = ""; }; - 923B7F9121A68091002AFCE0 /* MetalGatherV2.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = MetalGatherV2.hpp; sourceTree = ""; }; 923B7F9421A680A1002AFCE0 /* MetalGatherV2.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalGatherV2.metal; sourceTree = ""; }; 923B7F9921A69E2E002AFCE0 /* MetalQuantizedReshape.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalQuantizedReshape.mm; sourceTree = ""; }; - 923B7F9A21A69E2E002AFCE0 /* MetalQuantizedReshape.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = MetalQuantizedReshape.hpp; sourceTree = ""; }; 923B7FA221A6C92F002AFCE0 /* MetalCropAndResize.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalCropAndResize.mm; sourceTree = ""; }; - 923B7FA321A6C92F002AFCE0 /* MetalCropAndResize.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = MetalCropAndResize.hpp; sourceTree = ""; }; 923B7FA621A6C940002AFCE0 /* MetalCropAndResize.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalCropAndResize.metal; sourceTree = ""; }; 9243106A2239FE0A0016DA25 /* MetalSize.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalSize.mm; sourceTree = ""; }; - 9243106B2239FE0B0016DA25 /* MetalSize.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = MetalSize.hpp; sourceTree = ""; }; 9243106E2239FE190016DA25 /* MetalSize.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; path = MetalSize.metal; sourceTree = ""; }; 924B11AB21E73B9C006B37DB /* XCTest.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = XCTest.framework; path = Platforms/iPhoneOS.platform/Developer/Library/Frameworks/XCTest.framework; sourceTree = DEVELOPER_DIR; }; - 924F131721A81C74006D46A4 /* MetalTranspose.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalTranspose.mm; sourceTree = ""; }; - 924F131821A81C74006D46A4 /* MetalTranspose.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = MetalTranspose.hpp; sourceTree = ""; }; - 924F131B21A81C80006D46A4 /* MetalTranspose.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalTranspose.metal; sourceTree = ""; }; - 924F132021ABD470006D46A4 /* MetalQuantizedSoftmax.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalQuantizedSoftmax.mm; sourceTree = ""; }; - 924F132121ABD470006D46A4 /* MetalQuantizedSoftmax.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = MetalQuantizedSoftmax.hpp; sourceTree = ""; }; - 924F132421ABD47F006D46A4 /* MetalQuantizedSoftmax.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalQuantizedSoftmax.metal; sourceTree = ""; }; - 924F132621ABEA28006D46A4 /* MetalFixedPoint.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalFixedPoint.metal; sourceTree = ""; }; 925702CE21EF0F5300A2A3CA /* TensorUtilsTest.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = TensorUtilsTest.cpp; sourceTree = ""; }; 925702D121EF270D00A2A3CA /* BufferAllocatorTest.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = BufferAllocatorTest.cpp; sourceTree = ""; }; 925702F521EF604400A2A3CA /* SizeComputerTest.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = SizeComputerTest.cpp; sourceTree = ""; }; - 92575977219EA07F00918499 /* MetalStridedSlice.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalStridedSlice.mm; sourceTree = ""; }; - 92575978219EA07F00918499 /* MetalStridedSlice.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = MetalStridedSlice.hpp; sourceTree = ""; }; - 9257597B219EA08400918499 /* MetalStridedSlice.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalStridedSlice.metal; sourceTree = ""; }; - 9258013C2223B77C00555D43 /* MetalConvolutionDepthwise.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalConvolutionDepthwise.mm; sourceTree = ""; }; - 9258013D2223B77C00555D43 /* MetalConvolutionDepthwise.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = MetalConvolutionDepthwise.hpp; sourceTree = ""; }; - 925801402223B79600555D43 /* MetalConvolutionDepthwise.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalConvolutionDepthwise.metal; sourceTree = ""; }; - 925801422223B8D100555D43 /* MetalConvolutionCommon.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalConvolutionCommon.mm; sourceTree = ""; }; - 925801432223B8D100555D43 /* MetalConvolutionCommon.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = MetalConvolutionCommon.hpp; sourceTree = ""; }; - 925A89112223951200D22428 /* MetalConvolutionActivation.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalConvolutionActivation.metal; sourceTree = ""; }; - 925A8913222395ED00D22428 /* MetalConvolution1x1.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalConvolution1x1.mm; sourceTree = ""; }; - 925A8914222395ED00D22428 /* MetalConvolution1x1.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = MetalConvolution1x1.hpp; sourceTree = ""; }; - 925A89172223961F00D22428 /* MetalConvolution1x1.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalConvolution1x1.metal; sourceTree = ""; }; - 925E87DF220447900000192E /* MetalConvolutionWinograd.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalConvolutionWinograd.metal; sourceTree = ""; }; 925F018821FF1E0B00E648A1 /* SqueezeNetTest.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; name = SqueezeNetTest.cpp; path = ../../test/Model/SqueezeNetTest.cpp; sourceTree = SOURCE_ROOT; }; 925F018A21FF222E00E648A1 /* model */ = {isa = PBXFileReference; lastKnownFileType = folder; name = model; path = ../../resource/model; sourceTree = ""; }; 925F018C21FFF3D300E648A1 /* MobileNetTest.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; name = MobileNetTest.cpp; path = ../../test/Model/MobileNetTest.cpp; sourceTree = SOURCE_ROOT; }; - 9260B27021A7C5CD00D48C97 /* MetalQuantizedMaxPool.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalQuantizedMaxPool.mm; sourceTree = ""; }; - 9260B27121A7C5CD00D48C97 /* MetalQuantizedMaxPool.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = MetalQuantizedMaxPool.hpp; sourceTree = ""; }; - 9260B27421A7C5DC00D48C97 /* MetalQuantizedMaxPool.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalQuantizedMaxPool.metal; sourceTree = ""; }; - 9260B27621A7C5EA00D48C97 /* MetalQuantizedAvgPool.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalQuantizedAvgPool.mm; sourceTree = ""; }; - 9260B27721A7C5EA00D48C97 /* MetalQuantizedAvgPool.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = MetalQuantizedAvgPool.hpp; sourceTree = ""; }; - 9260B27A21A7C5FC00D48C97 /* MetalQuantizedAvgPool.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalQuantizedAvgPool.metal; sourceTree = ""; }; - 92682C4B2181729200B52B9D /* MetalTile.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalTile.mm; sourceTree = ""; }; - 92682C4C2181729200B52B9D /* MetalTile.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = MetalTile.hpp; sourceTree = ""; }; - 92682C4F218172A300B52B9D /* MetalTile.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalTile.metal; sourceTree = ""; }; - 92682C5121819BF100B52B9D /* MetalSeLU.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalSeLU.mm; sourceTree = ""; }; - 92682C5221819BF100B52B9D /* MetalSeLU.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = MetalSeLU.hpp; sourceTree = ""; }; - 92682C5521819BFA00B52B9D /* MetalSeLU.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalSeLU.metal; sourceTree = ""; }; - 92682C5D2181A2EF00B52B9D /* MetalFill.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalFill.mm; sourceTree = ""; }; - 92682C5E2181A2EF00B52B9D /* MetalFill.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = MetalFill.hpp; sourceTree = ""; }; - 92682C612181A2F900B52B9D /* MetalFill.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalFill.metal; sourceTree = ""; }; 926F5C5F1FFF3D360078EE0A /* libc.tbd */ = {isa = PBXFileReference; lastKnownFileType = "sourcecode.text-based-dylib-definition"; name = libc.tbd; path = usr/lib/libc.tbd; sourceTree = SDKROOT; }; 9273AB4B1FE7BE4D00477B22 /* Playground.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = Playground.app; sourceTree = BUILT_PRODUCTS_DIR; }; 9273AB4D1FE7BE4D00477B22 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; @@ -1412,171 +1610,510 @@ 9273AB561FE7BE4D00477B22 /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = ""; }; 9273AB5B1FE7BE4D00477B22 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; 9273AB5C1FE7BE4D00477B22 /* main.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = ""; }; - 92921A84219C24CD00B063D1 /* MetalPack.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalPack.mm; sourceTree = ""; }; - 92921A85219C24CD00B063D1 /* MetalPack.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = MetalPack.hpp; sourceTree = ""; }; - 92921A88219C272B00B063D1 /* MetalPack.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalPack.metal; sourceTree = ""; }; - 92965EDD2175B3C300B86ABE /* MetalConcat.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalConcat.metal; sourceTree = ""; }; 92A4E0FB21F05A4F000B0919 /* MemoryUtilsTest.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = MemoryUtilsTest.cpp; sourceTree = ""; }; 92A4E10221F07C76000B0919 /* AutoStorageTest.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = AutoStorageTest.cpp; sourceTree = ""; }; - 92A6476822014A7900DDD1C4 /* MetalDefine.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = MetalDefine.h; sourceTree = ""; }; - 92A8D6FE21A40695009C2201 /* MetalTFQuantizedConv2D.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalTFQuantizedConv2D.mm; sourceTree = ""; }; - 92A8D6FF21A40695009C2201 /* MetalTFQuantizedConv2D.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = MetalTFQuantizedConv2D.hpp; sourceTree = ""; }; - 92A8D70221A406A8009C2201 /* MetalTFQuantizedConv2D.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalTFQuantizedConv2D.metal; sourceTree = ""; }; - 92A8D70721A54087009C2201 /* MetalDefine.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalDefine.metal; sourceTree = ""; }; - 92C674F722549A1600011D33 /* MetalReLU6.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalReLU6.mm; sourceTree = ""; }; - 92C674F822549A1600011D33 /* MetalReLU6.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = MetalReLU6.hpp; sourceTree = ""; }; - 92C674FB22549A2500011D33 /* MetalReLU6.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalReLU6.metal; sourceTree = ""; }; 92C674FD22549C9900011D33 /* ReLU6Test.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = ReLU6Test.cpp; sourceTree = ""; }; - 92D765962228176500178BE5 /* TensorflowOp_generated.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TensorflowOp_generated.h; sourceTree = ""; }; - 92D765972228176500178BE5 /* CaffeOp_generated.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CaffeOp_generated.h; sourceTree = ""; }; - 92D765982228176500178BE5 /* UserDefine_generated.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = UserDefine_generated.h; sourceTree = ""; }; - 92D7659C2228188500178BE5 /* Pipeline.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Pipeline.cpp; sourceTree = ""; }; - 92D7659D2228188500178BE5 /* WrapExecution.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = WrapExecution.cpp; sourceTree = ""; }; - 92D7659F2228188500178BE5 /* Session.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = Session.hpp; sourceTree = ""; }; - 92D765A02228188600178BE5 /* Schedule.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = Schedule.hpp; sourceTree = ""; }; - 92D765A12228188600178BE5 /* Interpreter.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Interpreter.cpp; sourceTree = ""; }; - 92D765A22228188600178BE5 /* WrapExecution.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = WrapExecution.hpp; sourceTree = ""; }; - 92D765A32228188600178BE5 /* BackendFactory.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = BackendFactory.cpp; sourceTree = ""; }; - 92D765A42228188600178BE5 /* BackendFactory.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = BackendFactory.hpp; sourceTree = ""; }; - 92D765A52228188600178BE5 /* Session.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Session.cpp; sourceTree = ""; }; - 92D765A72228188700178BE5 /* Schedule.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Schedule.cpp; sourceTree = ""; }; - 92D765A82228188700178BE5 /* Pipeline.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = Pipeline.hpp; sourceTree = ""; }; 92D765B8222819EF00178BE5 /* BackendTest.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = BackendTest.cpp; sourceTree = ""; }; 92D765B9222819EF00178BE5 /* ScheduleTest.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ScheduleTest.cpp; sourceTree = ""; }; 92D765BA222819EF00178BE5 /* DirectedAcyclicGraphTest.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = DirectedAcyclicGraphTest.cpp; sourceTree = ""; }; - 92D765BE22281CFF00178BE5 /* DirectedAcyclicGraph.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = DirectedAcyclicGraph.hpp; sourceTree = ""; }; 92EAC19721CB3CD60056F4C2 /* MetalCast.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalCast.mm; sourceTree = ""; }; - 92EAC19821CB3CD60056F4C2 /* MetalCast.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = MetalCast.hpp; sourceTree = ""; }; 92EAC19B21CB3CE20056F4C2 /* MetalCast.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalCast.metal; sourceTree = ""; }; - 92EEFE3B217F0CBB00F89377 /* CPUCrop.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUCrop.hpp; sourceTree = ""; }; - 92EEFE8B217F0CBB00F89377 /* CPUCrop.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUCrop.cpp; sourceTree = ""; }; 92EEFF25217F0EEF00F89377 /* MetalCrop.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalCrop.mm; sourceTree = ""; }; - 92EEFF26217F0EEF00F89377 /* MetalCrop.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = MetalCrop.hpp; sourceTree = ""; }; 92EEFF29217F0F0F00F89377 /* MetalCrop.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalCrop.metal; sourceTree = ""; }; 92EEFF2E2180159600F89377 /* MetalReduction.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalReduction.mm; sourceTree = ""; }; - 92EEFF2F2180159600F89377 /* MetalReduction.hpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.h; path = MetalReduction.hpp; sourceTree = ""; }; 92EEFF32218015A300F89377 /* MetalReduction.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalReduction.metal; sourceTree = ""; }; - AE7BE47A22816FC9002CEEA6 /* ShapeMoments.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeMoments.cpp; sourceTree = ""; }; - AE7BE4B6228555A2002CEEA6 /* BackendRegister.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = BackendRegister.cpp; sourceTree = ""; }; - AE7BE4B822855638002CEEA6 /* ShapeRegister.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeRegister.cpp; sourceTree = ""; }; - AE7BE4BA2285564F002CEEA6 /* CPUOPRegister.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUOPRegister.cpp; sourceTree = ""; }; + 92FF00D323AA0B4800AC97F6 /* CPUTanh.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUTanh.cpp; sourceTree = ""; }; + 92FF00D423AA0B4800AC97F6 /* CPUPadding.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUPadding.hpp; sourceTree = ""; }; + 92FF00D523AA0B4800AC97F6 /* CPUQuanConvolutionDepthwise.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUQuanConvolutionDepthwise.cpp; sourceTree = ""; }; + 92FF00D623AA0B4800AC97F6 /* CPUSqueeze.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUSqueeze.hpp; sourceTree = ""; }; + 92FF00D723AA0B4800AC97F6 /* CPUPoolInt8.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUPoolInt8.cpp; sourceTree = ""; }; + 92FF00D823AA0B4800AC97F6 /* CPUDilation2D.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUDilation2D.hpp; sourceTree = ""; }; + 92FF00D923AA0B4800AC97F6 /* CPUPoolGrad.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUPoolGrad.hpp; sourceTree = ""; }; + 92FF00DA23AA0B4800AC97F6 /* CPUGatherV2.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUGatherV2.hpp; sourceTree = ""; }; + 92FF00DB23AA0B4800AC97F6 /* CPUInterp.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUInterp.hpp; sourceTree = ""; }; + 92FF00DC23AA0B4900AC97F6 /* CPUROIPooling.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUROIPooling.cpp; sourceTree = ""; }; + 92FF00DD23AA0B4900AC97F6 /* CPUPadding.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUPadding.cpp; sourceTree = ""; }; + 92FF00DE23AA0B4900AC97F6 /* CPURNNSequenceGRU.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPURNNSequenceGRU.hpp; sourceTree = ""; }; + 92FF00DF23AA0B4900AC97F6 /* CPUCropAndResize.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUCropAndResize.cpp; sourceTree = ""; }; + 92FF00E023AA0B4900AC97F6 /* CPUSelect.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUSelect.hpp; sourceTree = ""; }; + 92FF00E123AA0B4900AC97F6 /* CPUFloatToInt8.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUFloatToInt8.cpp; sourceTree = ""; }; + 92FF00E223AA0B4900AC97F6 /* CPUExpandDims.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUExpandDims.hpp; sourceTree = ""; }; + 92FF00E323AA0B4900AC97F6 /* CPUQuantizedAvgPool.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUQuantizedAvgPool.cpp; sourceTree = ""; }; + 92FF00E423AA0B4900AC97F6 /* CPUProposal.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUProposal.hpp; sourceTree = ""; }; + 92FF00E523AA0B4900AC97F6 /* CPUReduceJoin.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUReduceJoin.cpp; sourceTree = ""; }; + 92FF00E623AA0B4900AC97F6 /* CPUStridedSlice.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUStridedSlice.hpp; sourceTree = ""; }; + 92FF00E723AA0B4900AC97F6 /* CPUSelu.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUSelu.cpp; sourceTree = ""; }; + 92FF00E823AA0B4900AC97F6 /* CPUNonMaxSuppressionV2.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUNonMaxSuppressionV2.cpp; sourceTree = ""; }; + 92FF00E923AA0B4900AC97F6 /* CPUReverseSequence.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUReverseSequence.hpp; sourceTree = ""; }; + 92FF00EA23AA0B4900AC97F6 /* CPUSliceTf.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUSliceTf.hpp; sourceTree = ""; }; + 92FF00EB23AA0B4900AC97F6 /* CPUMatrixBandPart.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUMatrixBandPart.cpp; sourceTree = ""; }; + 92FF00EC23AA0B4900AC97F6 /* CPUQuantizationUtils.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUQuantizationUtils.hpp; sourceTree = ""; }; + 92FF00ED23AA0B4900AC97F6 /* CPUInt8ToFloat.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUInt8ToFloat.cpp; sourceTree = ""; }; + 92FF00EE23AA0B4A00AC97F6 /* CPUElu.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUElu.hpp; sourceTree = ""; }; + 92FF00EF23AA0B4A00AC97F6 /* CPUUnpack.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUUnpack.hpp; sourceTree = ""; }; + 92FF00F023AA0B4A00AC97F6 /* CPUScatterNd.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUScatterNd.hpp; sourceTree = ""; }; + 92FF00F123AA0B4A00AC97F6 /* CPUPoolInt8.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUPoolInt8.hpp; sourceTree = ""; }; + 92FF00F223AA0B4A00AC97F6 /* CPUArgMax.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUArgMax.cpp; sourceTree = ""; }; + 92FF00F323AA0B4A00AC97F6 /* CPUConvolution.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUConvolution.cpp; sourceTree = ""; }; + 92FF00F423AA0B4A00AC97F6 /* CPUInt8ToFloat.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUInt8ToFloat.hpp; sourceTree = ""; }; + 92FF00F523AA0B4A00AC97F6 /* CPUUnary.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUUnary.hpp; sourceTree = ""; }; + 92FF00F623AA0B4A00AC97F6 /* CPUFill.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUFill.hpp; sourceTree = ""; }; + 92FF00F723AA0B4A00AC97F6 /* CPUSpaceToBatchND.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUSpaceToBatchND.cpp; sourceTree = ""; }; + 92FF00F823AA0B4A00AC97F6 /* CPUPool.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUPool.hpp; sourceTree = ""; }; + 92FF00F923AA0B4A00AC97F6 /* CPUQuanConvolutionDepthwise.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUQuanConvolutionDepthwise.hpp; sourceTree = ""; }; + 92FF00FA23AA0B4A00AC97F6 /* CPUAsString.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUAsString.hpp; sourceTree = ""; }; + 92FF00FB23AA0B4A00AC97F6 /* CPUReverseSequence.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUReverseSequence.cpp; sourceTree = ""; }; + 92FF00FC23AA0B4A00AC97F6 /* CPUTranspose.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUTranspose.cpp; sourceTree = ""; }; + 92FF00FD23AA0B4A00AC97F6 /* CPUDeconvolutionDepthwise.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUDeconvolutionDepthwise.cpp; sourceTree = ""; }; + 92FF00FE23AA0B4B00AC97F6 /* CPUFloatToInt8.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUFloatToInt8.hpp; sourceTree = ""; }; + 92FF00FF23AA0B4B00AC97F6 /* CPUCosineSimilarity.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUCosineSimilarity.cpp; sourceTree = ""; }; + 92FF010023AA0B4B00AC97F6 /* CPUSoftmaxGrad.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUSoftmaxGrad.hpp; sourceTree = ""; }; + 92FF010123AA0B4B00AC97F6 /* CPUSize.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUSize.hpp; sourceTree = ""; }; + 92FF010223AA0B4B00AC97F6 /* CPUPriorbox.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUPriorbox.cpp; sourceTree = ""; }; + 92FF010323AA0B4B00AC97F6 /* CPUBroadcastTo.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUBroadcastTo.cpp; sourceTree = ""; }; + 92FF010423AA0B4B00AC97F6 /* CPUDeconvolution.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUDeconvolution.hpp; sourceTree = ""; }; + 92FF010523AA0B4B00AC97F6 /* CPUFixedPoint.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUFixedPoint.hpp; sourceTree = ""; }; + 92FF010623AA0B4B00AC97F6 /* CPUDequantize.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUDequantize.hpp; sourceTree = ""; }; + 92FF010723AA0B4B00AC97F6 /* CPUConv2DBackPropFilter.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUConv2DBackPropFilter.hpp; sourceTree = ""; }; + 92FF010823AA0B4B00AC97F6 /* CPUSoftmax.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUSoftmax.hpp; sourceTree = ""; }; + 92FF010923AA0B4B00AC97F6 /* CPUBatchToSpaceND.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUBatchToSpaceND.cpp; sourceTree = ""; }; + 92FF010A23AA0B4B00AC97F6 /* CPUReduction.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUReduction.hpp; sourceTree = ""; }; + 92FF010B23AA0B4B00AC97F6 /* CPUWhere.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUWhere.hpp; sourceTree = ""; }; + 92FF010C23AA0B4B00AC97F6 /* CPULinSpace.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPULinSpace.hpp; sourceTree = ""; }; + 92FF010D23AA0B4C00AC97F6 /* CPUDetectionOutput.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUDetectionOutput.cpp; sourceTree = ""; }; + 92FF010E23AA0B4C00AC97F6 /* CPULRN.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPULRN.hpp; sourceTree = ""; }; + 92FF010F23AA0B4C00AC97F6 /* CPUSpatialProduct.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUSpatialProduct.hpp; sourceTree = ""; }; + 92FF011023AA0B4C00AC97F6 /* CPUConv2DBackPropFilter.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUConv2DBackPropFilter.cpp; sourceTree = ""; }; + 92FF011123AA0B4C00AC97F6 /* CPURange.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPURange.hpp; sourceTree = ""; }; + 92FF011223AA0B4C00AC97F6 /* CPUMatMul.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUMatMul.cpp; sourceTree = ""; }; + 92FF011323AA0B4C00AC97F6 /* CPUBatchMatMul.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUBatchMatMul.cpp; sourceTree = ""; }; + 92FF011423AA0B4C00AC97F6 /* CPUCast.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUCast.hpp; sourceTree = ""; }; + 92FF011523AA0B4C00AC97F6 /* CPUEltwiseInt8.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUEltwiseInt8.hpp; sourceTree = ""; }; + 92FF011623AA0B4C00AC97F6 /* CPUTFQuantizedConv2D.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUTFQuantizedConv2D.cpp; sourceTree = ""; }; + 92FF011723AA0B4C00AC97F6 /* CPUSlice.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUSlice.cpp; sourceTree = ""; }; + 92FF011823AA0B4C00AC97F6 /* CPUQuantizedMaxPool.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUQuantizedMaxPool.cpp; sourceTree = ""; }; + 92FF011923AA0B4C00AC97F6 /* CPUScale.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUScale.hpp; sourceTree = ""; }; + 92FF011A23AA0B4C00AC97F6 /* CPUPack.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUPack.cpp; sourceTree = ""; }; + 92FF011B23AA0B4C00AC97F6 /* CPULRN.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPULRN.cpp; sourceTree = ""; }; + 92FF011C23AA0B4D00AC97F6 /* CPUDeconvolutionDepthwise.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUDeconvolutionDepthwise.hpp; sourceTree = ""; }; + 92FF011D23AA0B4D00AC97F6 /* CPUReluGrad.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUReluGrad.hpp; sourceTree = ""; }; + 92FF011E23AA0B4D00AC97F6 /* CPUZeroLike.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUZeroLike.hpp; sourceTree = ""; }; + 92FF011F23AA0B4D00AC97F6 /* CPUDepthwiseConvInt8.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUDepthwiseConvInt8.hpp; sourceTree = ""; }; + 92FF012023AA0B4D00AC97F6 /* CPUSize.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUSize.cpp; sourceTree = ""; }; + 92FF012123AA0B4D00AC97F6 /* CPUQuantizedLogistic.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUQuantizedLogistic.cpp; sourceTree = ""; }; + 92FF012223AA0B4D00AC97F6 /* CPUBinary.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUBinary.cpp; sourceTree = ""; }; + 92FF012323AA0B4D00AC97F6 /* CPUZeroLike.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUZeroLike.cpp; sourceTree = ""; }; + 92FF012423AA0B4D00AC97F6 /* CPUQuantizedMaxPool.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUQuantizedMaxPool.hpp; sourceTree = ""; }; + 92FF012523AA0B4D00AC97F6 /* CPUSelu.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUSelu.hpp; sourceTree = ""; }; + 92FF012623AA0B4D00AC97F6 /* CPUGather.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUGather.hpp; sourceTree = ""; }; + 92FF012723AA0B4D00AC97F6 /* CPUCropAndResize.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUCropAndResize.hpp; sourceTree = ""; }; + 92FF012823AA0B4D00AC97F6 /* CPUSpaceToDepth.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUSpaceToDepth.cpp; sourceTree = ""; }; + 92FF012923AA0B4D00AC97F6 /* CPUConst.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUConst.cpp; sourceTree = ""; }; + 92FF012A23AA0B4D00AC97F6 /* CPUQuantizedSoftmax.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUQuantizedSoftmax.cpp; sourceTree = ""; }; + 92FF012B23AA0B4D00AC97F6 /* CPUConvInt8.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUConvInt8.cpp; sourceTree = ""; }; + 92FF012C23AA0B4D00AC97F6 /* CPUProposal.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUProposal.cpp; sourceTree = ""; }; + 92FF012D23AA0B4D00AC97F6 /* CPUConcat.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUConcat.hpp; sourceTree = ""; }; + 92FF012E23AA0B4E00AC97F6 /* CPUDequantize.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUDequantize.cpp; sourceTree = ""; }; + 92FF012F23AA0B4E00AC97F6 /* CPUBackend.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUBackend.cpp; sourceTree = ""; }; + 92FF013023AA0B4E00AC97F6 /* CPUBroadcastTo.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUBroadcastTo.hpp; sourceTree = ""; }; + 92FF013123AA0B4E00AC97F6 /* CPUInstanceNorm.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUInstanceNorm.cpp; sourceTree = ""; }; + 92FF013223AA0B4E00AC97F6 /* CPUMoments.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUMoments.hpp; sourceTree = ""; }; + 92FF013323AA0B4E00AC97F6 /* CPUTopKV2.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUTopKV2.cpp; sourceTree = ""; }; + 92FF013423AA0B4E00AC97F6 /* CPUUnary.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUUnary.cpp; sourceTree = ""; }; + 92FF013523AA0B4E00AC97F6 /* CPUQuantizedAdd.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUQuantizedAdd.hpp; sourceTree = ""; }; + 92FF013623AA0B4E00AC97F6 /* CPUWhere.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUWhere.cpp; sourceTree = ""; }; + 92FF013723AA0B4E00AC97F6 /* CPUSoftmaxGrad.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUSoftmaxGrad.cpp; sourceTree = ""; }; + 92FF013923AA0B4E00AC97F6 /* CMakeLists.txt */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = CMakeLists.txt; sourceTree = ""; }; + 92FF013B23AA0B4E00AC97F6 /* MNNUInt8ToInt16WithOffsetC4Fast.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNUInt8ToInt16WithOffsetC4Fast.S; sourceTree = ""; }; + 92FF013C23AA0B4E00AC97F6 /* MNNScaleAddInt8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNScaleAddInt8.S; sourceTree = ""; }; + 92FF013D23AA0B4E00AC97F6 /* MNNMatrixProd.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNMatrixProd.S; sourceTree = ""; }; + 92FF013E23AA0B4E00AC97F6 /* MNNFloat2Int8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNFloat2Int8.S; sourceTree = ""; }; + 92FF013F23AA0B4E00AC97F6 /* MNNSamplerC4NearestOpt.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNSamplerC4NearestOpt.S; sourceTree = ""; }; + 92FF014023AA0B4E00AC97F6 /* MNNAddC4WithStride.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNAddC4WithStride.S; sourceTree = ""; }; + 92FF014123AA0B4E00AC97F6 /* MNNQuanToDestUint8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNQuanToDestUint8.S; sourceTree = ""; }; + 92FF014223AA0B4E00AC97F6 /* MNNLoadU8AndSum.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNLoadU8AndSum.S; sourceTree = ""; }; + 92FF014323AA0B4E00AC97F6 /* MNNCubicLineC4.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNCubicLineC4.S; sourceTree = ""; }; + 92FF014423AA0B4E00AC97F6 /* MNNAddBiasRelu6.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNAddBiasRelu6.S; sourceTree = ""; }; + 92FF014523AA0B4E00AC97F6 /* MNNStrassenMergeCFunction.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNStrassenMergeCFunction.S; sourceTree = ""; }; + 92FF014623AA0B4E00AC97F6 /* MNNBlitC1ToFloatRGBA.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNBlitC1ToFloatRGBA.S; sourceTree = ""; }; + 92FF014723AA0B4E00AC97F6 /* MNNCopyC4WithStride.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNCopyC4WithStride.S; sourceTree = ""; }; + 92FF014823AA0B4E00AC97F6 /* MNNNV21ToBGRUnit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNNV21ToBGRUnit.S; sourceTree = ""; }; + 92FF014923AA0B4E00AC97F6 /* MNNLineDepthWiseInt8AddBiasScaleUnit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNLineDepthWiseInt8AddBiasScaleUnit.S; sourceTree = ""; }; + 92FF014A23AA0B4E00AC97F6 /* MNNUnPackC4.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNUnPackC4.S; sourceTree = ""; }; + 92FF014B23AA0B4E00AC97F6 /* MNNSamplerC1NearestOpt.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNSamplerC1NearestOpt.S; sourceTree = ""; }; + 92FF014C23AA0B4E00AC97F6 /* MNNGemmFloatCommon_4.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmFloatCommon_4.S; sourceTree = ""; }; + 92FF014D23AA0B4E00AC97F6 /* MNNNV21ToRGBUnit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNNV21ToRGBUnit.S; sourceTree = ""; }; + 92FF014E23AA0B4E00AC97F6 /* MNNPackC4.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNPackC4.S; sourceTree = ""; }; + 92FF014F23AA0B4E00AC97F6 /* MNNMinFloat.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNMinFloat.S; sourceTree = ""; }; + 92FF015023AA0B4E00AC97F6 /* MNNGemmInt16to32_4x4_Common.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmInt16to32_4x4_Common.S; sourceTree = ""; }; + 92FF015123AA0B4E00AC97F6 /* MNNMaxFloat.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNMaxFloat.S; sourceTree = ""; }; + 92FF015223AA0B4E00AC97F6 /* MNNNV21ToRGBAUnit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNNV21ToRGBAUnit.S; sourceTree = ""; }; + 92FF015323AA0B4E00AC97F6 /* MNNGemmInt16to32_4x4_Unit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmInt16to32_4x4_Unit.S; sourceTree = ""; }; + 92FF015423AA0B4E00AC97F6 /* MNNScaleBias2FloatC4.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNScaleBias2FloatC4.S; sourceTree = ""; }; + 92FF015523AA0B4E00AC97F6 /* MNNMatrixMax.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNMatrixMax.S; sourceTree = ""; }; + 92FF015623AA0B4E00AC97F6 /* MNNConvRunForLineDepthWiseInt8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNConvRunForLineDepthWiseInt8.S; sourceTree = ""; }; + 92FF015723AA0B4E00AC97F6 /* MNNConvRunForUnitDepthWiseUint8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNConvRunForUnitDepthWiseUint8.S; sourceTree = ""; }; + 92FF015823AA0B4E00AC97F6 /* MNNGemmInt8AddBiasScale_8x4_Unit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmInt8AddBiasScale_8x4_Unit.S; sourceTree = ""; }; + 92FF015923AA0B4E00AC97F6 /* MNNGemmInt8toFloat32_8x4_Unit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmInt8toFloat32_8x4_Unit.S; sourceTree = ""; }; + 92FF015A23AA0B4E00AC97F6 /* MNNConvRunForUnitDepthWiseInt8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNConvRunForUnitDepthWiseInt8.S; sourceTree = ""; }; + 92FF015B23AA0B4E00AC97F6 /* MNNScaleAndAddBias.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNScaleAndAddBias.S; sourceTree = ""; }; + 92FF015C23AA0B4E00AC97F6 /* MNNReluInt8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNReluInt8.S; sourceTree = ""; }; + 92FF015D23AA0B4E00AC97F6 /* MNNConvRunForLineDepthWiseUint8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNConvRunForLineDepthWiseUint8.S; sourceTree = ""; }; + 92FF015E23AA0B4E00AC97F6 /* MNNSamplerC4BilinearOpt.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNSamplerC4BilinearOpt.S; sourceTree = ""; }; + 92FF015F23AA0B4E00AC97F6 /* MNNBilinearProcC1.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNBilinearProcC1.S; sourceTree = ""; }; + 92FF016023AA0B4E00AC97F6 /* MNNMatrixSub.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNMatrixSub.S; sourceTree = ""; }; + 92FF016123AA0B4E00AC97F6 /* MNNPowC8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNPowC8.S; sourceTree = ""; }; + 92FF016223AA0B4E00AC97F6 /* MNNMatrixAdd.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNMatrixAdd.S; sourceTree = ""; }; + 92FF016323AA0B4E00AC97F6 /* MNNExpC8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNExpC8.S; sourceTree = ""; }; + 92FF016423AA0B4E00AC97F6 /* MNNAddBiasRelu.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNAddBiasRelu.S; sourceTree = ""; }; + 92FF016523AA0B4E00AC97F6 /* MNNConvDwF23SourceTransUnit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNConvDwF23SourceTransUnit.S; sourceTree = ""; }; + 92FF016623AA0B4E00AC97F6 /* MNNWinogradMatrixProductLeft.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNWinogradMatrixProductLeft.S; sourceTree = ""; }; + 92FF016723AA0B4E00AC97F6 /* MNNDeconvRunForUnitDepthWise.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNDeconvRunForUnitDepthWise.S; sourceTree = ""; }; + 92FF016823AA0B4E00AC97F6 /* MNNSamplerC1BilinearOpt.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNSamplerC1BilinearOpt.S; sourceTree = ""; }; + 92FF016923AA0B4E00AC97F6 /* MNNDepthWiseInt8AddBiasScaleUnit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNDepthWiseInt8AddBiasScaleUnit.S; sourceTree = ""; }; + 92FF016A23AA0B4E00AC97F6 /* MNNGemmInt8AddBiasScale_16x4_Unit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmInt8AddBiasScale_16x4_Unit.S; sourceTree = ""; }; + 92FF016B23AA0B4E00AC97F6 /* MNNGemmFloatOne_4.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmFloatOne_4.S; sourceTree = ""; }; + 92FF016C23AA0B4E00AC97F6 /* MNNWinogradMatrixProductRight.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNWinogradMatrixProductRight.S; sourceTree = ""; }; + 92FF016E23AA0B4E00AC97F6 /* MNNReluWithSlopeChannel.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNReluWithSlopeChannel.S; sourceTree = ""; }; + 92FF016F23AA0B4E00AC97F6 /* MNNAddBias.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNAddBias.S; sourceTree = ""; }; + 92FF017023AA0B4E00AC97F6 /* MNNCubicSampleC4.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNCubicSampleC4.S; sourceTree = ""; }; + 92FF017123AA0B4E00AC97F6 /* MNNCoefLine.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNCoefLine.S; sourceTree = ""; }; + 92FF017223AA0B4E00AC97F6 /* MNNBlitC3ToFloatRGBA.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNBlitC3ToFloatRGBA.S; sourceTree = ""; }; + 92FF017323AA0B4E00AC97F6 /* MNNConvSlideWindowMiddle.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNConvSlideWindowMiddle.S; sourceTree = ""; }; + 92FF017423AA0B4E00AC97F6 /* MNNUInt8ToInt16WithOffsetC4Common.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNUInt8ToInt16WithOffsetC4Common.S; sourceTree = ""; }; + 92FF017523AA0B4E00AC97F6 /* MNNInt8ScaleToFloat.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNInt8ScaleToFloat.S; sourceTree = ""; }; + 92FF017623AA0B4E00AC97F6 /* MNNConvRunForUnitDepthWise.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNConvRunForUnitDepthWise.S; sourceTree = ""; }; + 92FF017723AA0B4E00AC97F6 /* MNNConvDwF23MulTransUnit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNConvDwF23MulTransUnit.S; sourceTree = ""; }; + 92FF017823AA0B4E00AC97F6 /* MNNConvRunForLineDepthwise.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNConvRunForLineDepthwise.S; sourceTree = ""; }; + 92FF017923AA0B4E00AC97F6 /* MNNGemmint8to32_8x4_Unit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmint8to32_8x4_Unit.S; sourceTree = ""; }; + 92FF017A23AA0B4E00AC97F6 /* MNNGemmFloatUnit_4.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmFloatUnit_4.S; sourceTree = ""; }; + 92FF017B23AA0B4E00AC97F6 /* MNNConvSlideWindowBorder.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNConvSlideWindowBorder.S; sourceTree = ""; }; + 92FF017D23AA0B4E00AC97F6 /* MNNUInt8ToInt16WithOffsetC4Fast.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNUInt8ToInt16WithOffsetC4Fast.S; sourceTree = ""; }; + 92FF017E23AA0B4E00AC97F6 /* MNNScaleAddInt8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNScaleAddInt8.S; sourceTree = ""; }; + 92FF017F23AA0B4E00AC97F6 /* MNNMatrixProd.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNMatrixProd.S; sourceTree = ""; }; + 92FF018023AA0B4E00AC97F6 /* MNNFloat2Int8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNFloat2Int8.S; sourceTree = ""; }; + 92FF018123AA0B4E00AC97F6 /* MNNSamplerC4NearestOpt.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNSamplerC4NearestOpt.S; sourceTree = ""; }; + 92FF018223AA0B4E00AC97F6 /* MNNAddC4WithStride.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNAddC4WithStride.S; sourceTree = ""; }; + 92FF018323AA0B4E00AC97F6 /* MNNQuanToDestUint8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNQuanToDestUint8.S; sourceTree = ""; }; + 92FF018423AA0B4E00AC97F6 /* MNNLoadU8AndSum.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNLoadU8AndSum.S; sourceTree = ""; }; + 92FF018523AA0B4E00AC97F6 /* MNNCubicLineC4.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNCubicLineC4.S; sourceTree = ""; }; + 92FF018623AA0B4E00AC97F6 /* MNNAddBiasRelu6.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNAddBiasRelu6.S; sourceTree = ""; }; + 92FF018723AA0B4E00AC97F6 /* MNNStrassenMergeCFunction.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNStrassenMergeCFunction.S; sourceTree = ""; }; + 92FF018823AA0B4E00AC97F6 /* MNNBlitC1ToFloatRGBA.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNBlitC1ToFloatRGBA.S; sourceTree = ""; }; + 92FF018923AA0B4E00AC97F6 /* MNNCopyC4WithStride.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNCopyC4WithStride.S; sourceTree = ""; }; + 92FF018A23AA0B4E00AC97F6 /* MNNNV21ToBGRUnit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNNV21ToBGRUnit.S; sourceTree = ""; }; + 92FF018B23AA0B4E00AC97F6 /* MNNLineDepthWiseInt8AddBiasScaleUnit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNLineDepthWiseInt8AddBiasScaleUnit.S; sourceTree = ""; }; + 92FF018C23AA0B4E00AC97F6 /* MNNUnPackC4.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNUnPackC4.S; sourceTree = ""; }; + 92FF018D23AA0B4E00AC97F6 /* MNNSamplerC1NearestOpt.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNSamplerC1NearestOpt.S; sourceTree = ""; }; + 92FF018E23AA0B4E00AC97F6 /* MNNGemmFloatCommon_4.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmFloatCommon_4.S; sourceTree = ""; }; + 92FF018F23AA0B4E00AC97F6 /* MNNNV21ToRGBUnit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNNV21ToRGBUnit.S; sourceTree = ""; }; + 92FF019023AA0B4E00AC97F6 /* MNNPackC4.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNPackC4.S; sourceTree = ""; }; + 92FF019123AA0B4E00AC97F6 /* MNNMinFloat.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNMinFloat.S; sourceTree = ""; }; + 92FF019223AA0B4E00AC97F6 /* MNNGemmInt16to32_4x4_Common.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmInt16to32_4x4_Common.S; sourceTree = ""; }; + 92FF019323AA0B4E00AC97F6 /* MNNMaxFloat.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNMaxFloat.S; sourceTree = ""; }; + 92FF019423AA0B4E00AC97F6 /* MNNNV21ToRGBAUnit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNNV21ToRGBAUnit.S; sourceTree = ""; }; + 92FF019523AA0B4E00AC97F6 /* MNNGemmInt16to32_4x4_Unit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmInt16to32_4x4_Unit.S; sourceTree = ""; }; + 92FF019623AA0B4E00AC97F6 /* MNNScaleBias2FloatC4.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNScaleBias2FloatC4.S; sourceTree = ""; }; + 92FF019723AA0B4E00AC97F6 /* MNNMatrixMax.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNMatrixMax.S; sourceTree = ""; }; + 92FF019823AA0B4E00AC97F6 /* MNNConvRunForLineDepthWiseInt8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNConvRunForLineDepthWiseInt8.S; sourceTree = ""; }; + 92FF019923AA0B4E00AC97F6 /* MNNConvRunForUnitDepthWiseUint8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNConvRunForUnitDepthWiseUint8.S; sourceTree = ""; }; + 92FF019A23AA0B4E00AC97F6 /* MNNGemmInt8toFloat32_8x4_Unit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmInt8toFloat32_8x4_Unit.S; sourceTree = ""; }; + 92FF019B23AA0B4E00AC97F6 /* MNNConvRunForUnitDepthWiseInt8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNConvRunForUnitDepthWiseInt8.S; sourceTree = ""; }; + 92FF019C23AA0B4E00AC97F6 /* MNNScaleAndAddBias.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNScaleAndAddBias.S; sourceTree = ""; }; + 92FF019D23AA0B4E00AC97F6 /* MNNReluInt8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNReluInt8.S; sourceTree = ""; }; + 92FF019E23AA0B4E00AC97F6 /* MNNConvRunForLineDepthWiseUint8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNConvRunForLineDepthWiseUint8.S; sourceTree = ""; }; + 92FF019F23AA0B4E00AC97F6 /* MNNSamplerC4BilinearOpt.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNSamplerC4BilinearOpt.S; sourceTree = ""; }; + 92FF01A023AA0B4E00AC97F6 /* MNNBilinearProcC1.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNBilinearProcC1.S; sourceTree = ""; }; + 92FF01A123AA0B4E00AC97F6 /* MNNMatrixSub.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNMatrixSub.S; sourceTree = ""; }; + 92FF01A223AA0B4E00AC97F6 /* MNNPowC8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNPowC8.S; sourceTree = ""; }; + 92FF01A323AA0B4E00AC97F6 /* MNNMatrixAdd.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNMatrixAdd.S; sourceTree = ""; }; + 92FF01A423AA0B4E00AC97F6 /* MNNExpC8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNExpC8.S; sourceTree = ""; }; + 92FF01A523AA0B4E00AC97F6 /* MNNAddBiasRelu.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNAddBiasRelu.S; sourceTree = ""; }; + 92FF01A623AA0B4E00AC97F6 /* MNNConvDwF23SourceTransUnit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNConvDwF23SourceTransUnit.S; sourceTree = ""; }; + 92FF01A723AA0B4E00AC97F6 /* MNNWinogradMatrixProductLeft.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNWinogradMatrixProductLeft.S; sourceTree = ""; }; + 92FF01A823AA0B4E00AC97F6 /* MNNDeconvRunForUnitDepthWise.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNDeconvRunForUnitDepthWise.S; sourceTree = ""; }; + 92FF01A923AA0B4E00AC97F6 /* MNNSamplerC1BilinearOpt.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNSamplerC1BilinearOpt.S; sourceTree = ""; }; + 92FF01AA23AA0B4E00AC97F6 /* MNNDepthWiseInt8AddBiasScaleUnit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNDepthWiseInt8AddBiasScaleUnit.S; sourceTree = ""; }; + 92FF01AB23AA0B4E00AC97F6 /* MNNGemmInt8AddBiasScale_16x4_Unit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmInt8AddBiasScale_16x4_Unit.S; sourceTree = ""; }; + 92FF01AC23AA0B4E00AC97F6 /* MNNGemmFloatOne_4.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmFloatOne_4.S; sourceTree = ""; }; + 92FF01AD23AA0B4E00AC97F6 /* MNNWinogradMatrixProductRight.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNWinogradMatrixProductRight.S; sourceTree = ""; }; + 92FF01AF23AA0B4E00AC97F6 /* MNNReluWithSlopeChannel.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNReluWithSlopeChannel.S; sourceTree = ""; }; + 92FF01B023AA0B4E00AC97F6 /* MNNAddBias.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNAddBias.S; sourceTree = ""; }; + 92FF01B123AA0B4E00AC97F6 /* MNNCubicSampleC4.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNCubicSampleC4.S; sourceTree = ""; }; + 92FF01B223AA0B4E00AC97F6 /* MNNCoefLine.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNCoefLine.S; sourceTree = ""; }; + 92FF01B323AA0B4E00AC97F6 /* MNNBlitC3ToFloatRGBA.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNBlitC3ToFloatRGBA.S; sourceTree = ""; }; + 92FF01B423AA0B4E00AC97F6 /* MNNConvSlideWindowMiddle.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNConvSlideWindowMiddle.S; sourceTree = ""; }; + 92FF01B523AA0B4E00AC97F6 /* MNNUInt8ToInt16WithOffsetC4Common.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNUInt8ToInt16WithOffsetC4Common.S; sourceTree = ""; }; + 92FF01B623AA0B4E00AC97F6 /* MNNInt8ScaleToFloat.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNInt8ScaleToFloat.S; sourceTree = ""; }; + 92FF01B723AA0B4E00AC97F6 /* MNNConvRunForUnitDepthWise.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNConvRunForUnitDepthWise.S; sourceTree = ""; }; + 92FF01B823AA0B4E00AC97F6 /* MNNConvDwF23MulTransUnit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNConvDwF23MulTransUnit.S; sourceTree = ""; }; + 92FF01B923AA0B4E00AC97F6 /* MNNConvRunForLineDepthwise.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNConvRunForLineDepthwise.S; sourceTree = ""; }; + 92FF01BA23AA0B4E00AC97F6 /* MNNGemmint8to32_8x4_Unit.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmint8to32_8x4_Unit.S; sourceTree = ""; }; + 92FF01BB23AA0B4E00AC97F6 /* MNNGemmFloatUnit_4.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNGemmFloatUnit_4.S; sourceTree = ""; }; + 92FF01BC23AA0B4E00AC97F6 /* MNNConvSlideWindowBorder.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNConvSlideWindowBorder.S; sourceTree = ""; }; + 92FF01BD23AA0B4E00AC97F6 /* MNNAsmGlobal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MNNAsmGlobal.h; sourceTree = ""; }; + 92FF01BE23AA0B4E00AC97F6 /* CPUReluGrad.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUReluGrad.cpp; sourceTree = ""; }; + 92FF01BF23AA0B4E00AC97F6 /* CPUDilation2D.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUDilation2D.cpp; sourceTree = ""; }; + 92FF01C023AA0B4E00AC97F6 /* CPUArgMax.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUArgMax.hpp; sourceTree = ""; }; + 92FF01C123AA0B4E00AC97F6 /* CPUShape.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUShape.cpp; sourceTree = ""; }; + 92FF01C223AA0B4E00AC97F6 /* CPURank.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPURank.hpp; sourceTree = ""; }; + 92FF01C323AA0B4F00AC97F6 /* CPUReduction.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUReduction.cpp; sourceTree = ""; }; + 92FF01C423AA0B4F00AC97F6 /* CPUStridedSlice.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUStridedSlice.cpp; sourceTree = ""; }; + 92FF01C523AA0B4F00AC97F6 /* CPUGatherND.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUGatherND.cpp; sourceTree = ""; }; + 92FF01C623AA0B4F00AC97F6 /* CPUQuantizedAvgPool.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUQuantizedAvgPool.hpp; sourceTree = ""; }; + 92FF01C723AA0B4F00AC97F6 /* CPUGatherND.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUGatherND.hpp; sourceTree = ""; }; + 92FF01C823AA0B4F00AC97F6 /* CPUConvolution3D.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUConvolution3D.hpp; sourceTree = ""; }; + 92FF01C923AA0B4F00AC97F6 /* CPUSpaceToDepth.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUSpaceToDepth.hpp; sourceTree = ""; }; + 92FF01CA23AA0B4F00AC97F6 /* CPUSpatialProduct.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUSpatialProduct.cpp; sourceTree = ""; }; + 92FF01CB23AA0B4F00AC97F6 /* CPUTanh.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUTanh.hpp; sourceTree = ""; }; + 92FF01CC23AA0B4F00AC97F6 /* CPUTile.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUTile.hpp; sourceTree = ""; }; + 92FF01CD23AA0B4F00AC97F6 /* CPUSetDiff1D.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUSetDiff1D.hpp; sourceTree = ""; }; + 92FF01CE23AA0B4F00AC97F6 /* CPUCast.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUCast.cpp; sourceTree = ""; }; + 92FF01CF23AA0B4F00AC97F6 /* CPUDepthToSpace.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUDepthToSpace.cpp; sourceTree = ""; }; + 92FF01D023AA0B4F00AC97F6 /* CPUSliceTf.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUSliceTf.cpp; sourceTree = ""; }; + 92FF01D123AA0B4F00AC97F6 /* CPUOneHot.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUOneHot.hpp; sourceTree = ""; }; + 92FF01D223AA0B4F00AC97F6 /* CPUCrop.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUCrop.cpp; sourceTree = ""; }; + 92FF01D323AA0B4F00AC97F6 /* CPUThreshold.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUThreshold.hpp; sourceTree = ""; }; + 92FF01D423AA0B5000AC97F6 /* CPUScatterNd.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUScatterNd.cpp; sourceTree = ""; }; + 92FF01D523AA0B5000AC97F6 /* CPUSelect.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUSelect.cpp; sourceTree = ""; }; + 92FF01D623AA0B5000AC97F6 /* CPUElu.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUElu.cpp; sourceTree = ""; }; + 92FF01D723AA0B5000AC97F6 /* CPUConvolution.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUConvolution.hpp; sourceTree = ""; }; + 92FF01D823AA0B5000AC97F6 /* CPUOneHot.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUOneHot.cpp; sourceTree = ""; }; + 92FF01D923AA0B5000AC97F6 /* CPUTFQuantizedConv2D.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUTFQuantizedConv2D.hpp; sourceTree = ""; }; + 92FF01DA23AA0B5000AC97F6 /* CPUAsString.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUAsString.cpp; sourceTree = ""; }; + 92FF01DB23AA0B5000AC97F6 /* CPUDetectionPostProcess.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUDetectionPostProcess.hpp; sourceTree = ""; }; + 92FF01DC23AA0B5000AC97F6 /* CPURelu.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPURelu.hpp; sourceTree = ""; }; + 92FF01DD23AA0B5000AC97F6 /* CPUPermute.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUPermute.cpp; sourceTree = ""; }; + 92FF01DE23AA0B5000AC97F6 /* CPUQuantizedReshape.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUQuantizedReshape.cpp; sourceTree = ""; }; + 92FF01DF23AA0B5000AC97F6 /* CPUConcat.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUConcat.cpp; sourceTree = ""; }; + 92FF01E023AA0B5000AC97F6 /* CPUShape.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUShape.hpp; sourceTree = ""; }; + 92FF01E123AA0B5000AC97F6 /* CPUInnerProduct.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUInnerProduct.cpp; sourceTree = ""; }; + 92FF01E223AA0B5000AC97F6 /* CPUQuantizedConcat.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUQuantizedConcat.hpp; sourceTree = ""; }; + 92FF01E323AA0B5000AC97F6 /* CPUFill.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUFill.cpp; sourceTree = ""; }; + 92FF01E423AA0B5100AC97F6 /* CPUScale.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUScale.cpp; sourceTree = ""; }; + 92FF01E523AA0B5100AC97F6 /* CPUUnravelIndex.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUUnravelIndex.cpp; sourceTree = ""; }; + 92FF01E623AA0B5100AC97F6 /* CPUResize.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUResize.hpp; sourceTree = ""; }; + 92FF01E723AA0B5100AC97F6 /* CPUDetectionOutput.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUDetectionOutput.hpp; sourceTree = ""; }; + 92FF01E823AA0B5100AC97F6 /* CPURuntime.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPURuntime.hpp; sourceTree = ""; }; + 92FF01E923AA0B5100AC97F6 /* CPUPermute.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUPermute.hpp; sourceTree = ""; }; + 92FF01EA23AA0B5100AC97F6 /* CPUInnerProduct.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUInnerProduct.hpp; sourceTree = ""; }; + 92FF01EB23AA0B5100AC97F6 /* CPURNNSequenceGRU.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPURNNSequenceGRU.cpp; sourceTree = ""; }; + 92FF01EC23AA0B5100AC97F6 /* CPUResize.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUResize.cpp; sourceTree = ""; }; + 92FF01ED23AA0B5100AC97F6 /* CPUConst.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUConst.hpp; sourceTree = ""; }; + 92FF01EE23AA0B5100AC97F6 /* CPULSTM.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPULSTM.cpp; sourceTree = ""; }; + 92FF01EF23AA0B5100AC97F6 /* CPUQuantizedSoftmax.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUQuantizedSoftmax.hpp; sourceTree = ""; }; + 92FF01F023AA0B5200AC97F6 /* CPURuntime.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPURuntime.cpp; sourceTree = ""; }; + 92FF01F123AA0B5200AC97F6 /* CPUPool.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUPool.cpp; sourceTree = ""; }; + 92FF01F223AA0B5200AC97F6 /* CPUBatchToSpaceND.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUBatchToSpaceND.hpp; sourceTree = ""; }; + 92FF01F323AA0B5200AC97F6 /* CPUExpandDims.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUExpandDims.cpp; sourceTree = ""; }; + 92FF01F423AA0B5200AC97F6 /* CPUEltwiseInt8.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUEltwiseInt8.cpp; sourceTree = ""; }; + 92FF01F523AA0B5200AC97F6 /* CPUConvolution3D.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUConvolution3D.cpp; sourceTree = ""; }; + 92FF01F623AA0B5200AC97F6 /* CPUUnpack.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUUnpack.cpp; sourceTree = ""; }; + 92FF01F723AA0B5200AC97F6 /* CPUBinary.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUBinary.hpp; sourceTree = ""; }; + 92FF01F823AA0B5200AC97F6 /* CPUConvolutionDepthwise.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUConvolutionDepthwise.cpp; sourceTree = ""; }; + 92FF01F923AA0B5200AC97F6 /* CPUROIPooling.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUROIPooling.hpp; sourceTree = ""; }; + 92FF01FA23AA0B5200AC97F6 /* CPUInstanceNorm.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUInstanceNorm.hpp; sourceTree = ""; }; + 92FF01FB23AA0B5200AC97F6 /* CPUSigmoid.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUSigmoid.cpp; sourceTree = ""; }; + 92FF01FC23AA0B5200AC97F6 /* CPUQuantizedConcat.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUQuantizedConcat.cpp; sourceTree = ""; }; + 92FF01FD23AA0B5200AC97F6 /* CPULSTM.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPULSTM.hpp; sourceTree = ""; }; + 92FF01FE23AA0B5200AC97F6 /* CPURelu.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPURelu.cpp; sourceTree = ""; }; + 92FF01FF23AA0B5200AC97F6 /* CPUDetectionPostProcess.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUDetectionPostProcess.cpp; sourceTree = ""; }; + 92FF020023AA0B5300AC97F6 /* CPUSlice.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUSlice.hpp; sourceTree = ""; }; + 92FF020123AA0B5300AC97F6 /* CPUPoolGrad.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUPoolGrad.cpp; sourceTree = ""; }; + 92FF020223AA0B5300AC97F6 /* CPUNormalize.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUNormalize.cpp; sourceTree = ""; }; + 92FF020323AA0B5300AC97F6 /* CPUSetDiff1D.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUSetDiff1D.cpp; sourceTree = ""; }; + 92FF020423AA0B5300AC97F6 /* CPUSoftmax.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUSoftmax.cpp; sourceTree = ""; }; + 92FF020523AA0B5300AC97F6 /* CPUBatchMatMul.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUBatchMatMul.hpp; sourceTree = ""; }; + 92FF020623AA0B5300AC97F6 /* CPUMoments.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUMoments.cpp; sourceTree = ""; }; + 92FF020723AA0B5300AC97F6 /* CPULinSpace.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPULinSpace.cpp; sourceTree = ""; }; + 92FF020823AA0B5300AC97F6 /* CPUTensorConvert.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUTensorConvert.hpp; sourceTree = ""; }; + 92FF020923AA0B5300AC97F6 /* CPUQuantizedLogistic.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUQuantizedLogistic.hpp; sourceTree = ""; }; + 92FF020A23AA0B5300AC97F6 /* CPUSigmoid.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUSigmoid.hpp; sourceTree = ""; }; + 92FF020B23AA0B5300AC97F6 /* CPURange.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPURange.cpp; sourceTree = ""; }; + 92FF020C23AA0B5500AC97F6 /* CPUUnravelIndex.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUUnravelIndex.hpp; sourceTree = ""; }; + 92FF020D23AA0B5500AC97F6 /* CPUEltwise.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUEltwise.hpp; sourceTree = ""; }; + 92FF020E23AA0B5500AC97F6 /* CPUMatrixBandPart.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUMatrixBandPart.hpp; sourceTree = ""; }; + 92FF020F23AA0B5500AC97F6 /* CPUQuantizedReshape.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUQuantizedReshape.hpp; sourceTree = ""; }; + 92FF021023AA0B5500AC97F6 /* CPUPriorbox.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUPriorbox.hpp; sourceTree = ""; }; + 92FF021123AA0B5600AC97F6 /* CPUGather.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUGather.cpp; sourceTree = ""; }; + 92FF021223AA0B5600AC97F6 /* CPUBackend.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUBackend.hpp; sourceTree = ""; }; + 92FF021323AA0B5600AC97F6 /* CPUDeconvolution.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUDeconvolution.cpp; sourceTree = ""; }; + 92FF021423AA0B5600AC97F6 /* CPUQuantizedAdd.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUQuantizedAdd.cpp; sourceTree = ""; }; + 92FF021523AA0B5600AC97F6 /* CPUSpaceToBatchND.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUSpaceToBatchND.hpp; sourceTree = ""; }; + 92FF021623AA0B5600AC97F6 /* CPUSqueeze.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUSqueeze.cpp; sourceTree = ""; }; + 92FF021723AA0B5600AC97F6 /* CPUDepthwiseConvInt8.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUDepthwiseConvInt8.cpp; sourceTree = ""; }; + 92FF021823AA0B5600AC97F6 /* CPUCrop.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUCrop.hpp; sourceTree = ""; }; + 92FF021923AA0B5600AC97F6 /* CPUThreshold.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUThreshold.cpp; sourceTree = ""; }; + 92FF021A23AA0B5600AC97F6 /* CPUReshape.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUReshape.hpp; sourceTree = ""; }; + 92FF021C23AA0B5600AC97F6 /* Convolution1x1Strassen.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = Convolution1x1Strassen.hpp; sourceTree = ""; }; + 92FF021D23AA0B5600AC97F6 /* CommonOptFunction.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CommonOptFunction.cpp; sourceTree = ""; }; + 92FF021E23AA0B5600AC97F6 /* Convolution3D3x3.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Convolution3D3x3.cpp; sourceTree = ""; }; + 92FF021F23AA0B5600AC97F6 /* StrassenMatmulComputor.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = StrassenMatmulComputor.cpp; sourceTree = ""; }; + 92FF022023AA0B5600AC97F6 /* Convolution3x3.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Convolution3x3.cpp; sourceTree = ""; }; + 92FF022123AA0B5600AC97F6 /* CommonOptFunction.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CommonOptFunction.h; sourceTree = ""; }; + 92FF022223AA0B5600AC97F6 /* ConvolutionWinograd.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ConvolutionWinograd.cpp; sourceTree = ""; }; + 92FF022323AA0B5600AC97F6 /* Int8FunctionsOpt.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Int8FunctionsOpt.cpp; sourceTree = ""; }; + 92FF022423AA0B5600AC97F6 /* ConvolutionWinograd3D.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ConvolutionWinograd3D.cpp; sourceTree = ""; }; + 92FF022523AA0B5600AC97F6 /* ConvOpt.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ConvOpt.cpp; sourceTree = ""; }; + 92FF022623AA0B5600AC97F6 /* OptimizedComputer.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = OptimizedComputer.cpp; sourceTree = ""; }; + 92FF022723AA0B5600AC97F6 /* DeconvolutionWithStride.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = DeconvolutionWithStride.hpp; sourceTree = ""; }; + 92FF022823AA0B5600AC97F6 /* ConvolutionTiledExecutor.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = ConvolutionTiledExecutor.hpp; sourceTree = ""; }; + 92FF022923AA0B5600AC97F6 /* ConvolutionIntFactory.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ConvolutionIntFactory.cpp; sourceTree = ""; }; + 92FF022A23AA0B5600AC97F6 /* WinogradOptFunction.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = WinogradOptFunction.cpp; sourceTree = ""; }; + 92FF022B23AA0B5600AC97F6 /* ConvolutionGroup.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = ConvolutionGroup.hpp; sourceTree = ""; }; + 92FF022C23AA0B5600AC97F6 /* ConvolutionFloatFactory.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ConvolutionFloatFactory.h; sourceTree = ""; }; + 92FF022D23AA0B5600AC97F6 /* ConvolutionInt8Executor.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ConvolutionInt8Executor.cpp; sourceTree = ""; }; + 92FF022E23AA0B5600AC97F6 /* ResizeFunction.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ResizeFunction.h; sourceTree = ""; }; + 92FF022F23AA0B5600AC97F6 /* ConvolutionDepthwise3x3.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ConvolutionDepthwise3x3.cpp; sourceTree = ""; }; + 92FF023023AA0B5600AC97F6 /* ConvolutionIntFactory.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = ConvolutionIntFactory.hpp; sourceTree = ""; }; + 92FF023123AA0B5600AC97F6 /* WinogradOptFunction.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = WinogradOptFunction.hpp; sourceTree = ""; }; + 92FF023223AA0B5600AC97F6 /* ConvolutionGroup.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ConvolutionGroup.cpp; sourceTree = ""; }; + 92FF023323AA0B5600AC97F6 /* ConvolutionFloatFactory.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ConvolutionFloatFactory.cpp; sourceTree = ""; }; + 92FF023423AA0B5600AC97F6 /* ConvolutionInt8Executor.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = ConvolutionInt8Executor.hpp; sourceTree = ""; }; + 92FF023523AA0B5600AC97F6 /* ConvolutionDepthwise3x3.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = ConvolutionDepthwise3x3.hpp; sourceTree = ""; }; + 92FF023623AA0B5600AC97F6 /* Convolution1x1Strassen.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Convolution1x1Strassen.cpp; sourceTree = ""; }; + 92FF023723AA0B5600AC97F6 /* ResizeFunction.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ResizeFunction.cpp; sourceTree = ""; }; + 92FF023823AA0B5600AC97F6 /* StrassenMatmulComputor.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = StrassenMatmulComputor.hpp; sourceTree = ""; }; + 92FF023923AA0B5600AC97F6 /* Convolution3x3.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = Convolution3x3.hpp; sourceTree = ""; }; + 92FF023A23AA0B5600AC97F6 /* Convolution3D3x3.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = Convolution3D3x3.hpp; sourceTree = ""; }; + 92FF023B23AA0B5600AC97F6 /* ConvOpt.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ConvOpt.h; sourceTree = ""; }; + 92FF023C23AA0B5600AC97F6 /* ConvolutionWinograd.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = ConvolutionWinograd.hpp; sourceTree = ""; }; + 92FF023D23AA0B5600AC97F6 /* ConvolutionWinograd3D.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = ConvolutionWinograd3D.hpp; sourceTree = ""; }; + 92FF023E23AA0B5600AC97F6 /* OptimizedComputer.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = OptimizedComputer.hpp; sourceTree = ""; }; + 92FF023F23AA0B5600AC97F6 /* Int8FunctionsOpt.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Int8FunctionsOpt.h; sourceTree = ""; }; + 92FF024023AA0B5600AC97F6 /* DeconvolutionWithStride.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = DeconvolutionWithStride.cpp; sourceTree = ""; }; + 92FF024123AA0B5600AC97F6 /* ConvolutionTiledExecutor.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ConvolutionTiledExecutor.cpp; sourceTree = ""; }; + 92FF024223AA0B5600AC97F6 /* CPUPack.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUPack.hpp; sourceTree = ""; }; + 92FF024323AA0B5600AC97F6 /* CPURank.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPURank.cpp; sourceTree = ""; }; + 92FF024423AA0B5600AC97F6 /* CPUTile.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUTile.cpp; sourceTree = ""; }; + 92FF024523AA0B5700AC97F6 /* CPUEltwise.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUEltwise.cpp; sourceTree = ""; }; + 92FF024623AA0B5700AC97F6 /* CPUInterp.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUInterp.cpp; sourceTree = ""; }; + 92FF024723AA0B5700AC97F6 /* CPUReduceJoin.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUReduceJoin.hpp; sourceTree = ""; }; + 92FF024823AA0B5700AC97F6 /* CPUNonMaxSuppressionV2.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUNonMaxSuppressionV2.hpp; sourceTree = ""; }; + 92FF024923AA0B5700AC97F6 /* CPUTranspose.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUTranspose.hpp; sourceTree = ""; }; + 92FF024A23AA0B5700AC97F6 /* CPUNormalize.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUNormalize.hpp; sourceTree = ""; }; + 92FF024B23AA0B5700AC97F6 /* CPUMatMul.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUMatMul.hpp; sourceTree = ""; }; + 92FF024C23AA0B5700AC97F6 /* CPUConvolutionDepthwise.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUConvolutionDepthwise.hpp; sourceTree = ""; }; + 92FF024D23AA0B5700AC97F6 /* CPUGatherV2.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUGatherV2.cpp; sourceTree = ""; }; + 92FF024E23AA0B5700AC97F6 /* CPUDepthToSpace.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUDepthToSpace.hpp; sourceTree = ""; }; + 92FF024F23AA0B5900AC97F6 /* CPUConvInt8.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUConvInt8.hpp; sourceTree = ""; }; + 92FF025023AA0B5900AC97F6 /* CPUOPRegister.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUOPRegister.cpp; sourceTree = ""; }; + 92FF025123AA0B5900AC97F6 /* CPUCosineSimilarity.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUCosineSimilarity.hpp; sourceTree = ""; }; + 92FF025223AA0B5900AC97F6 /* CPUTensorConvert.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUTensorConvert.cpp; sourceTree = ""; }; + 92FF025323AA0B5900AC97F6 /* CPUTopKV2.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUTopKV2.hpp; sourceTree = ""; }; + 92FF025423AA0B5900AC97F6 /* CPUReshape.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUReshape.cpp; sourceTree = ""; }; + 92FF03D323AA0B6C00AC97F6 /* ShapeQuantizedMaxPool.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeQuantizedMaxPool.cpp; sourceTree = ""; }; + 92FF03D423AA0B6C00AC97F6 /* ShapeFill.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeFill.cpp; sourceTree = ""; }; + 92FF03D523AA0B6C00AC97F6 /* ShapeUnpack.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeUnpack.cpp; sourceTree = ""; }; + 92FF03D623AA0B6C00AC97F6 /* ShapeNonMaxSuppressionV2.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeNonMaxSuppressionV2.cpp; sourceTree = ""; }; + 92FF03D723AA0B6C00AC97F6 /* ShapePool.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapePool.cpp; sourceTree = ""; }; + 92FF03D823AA0B6C00AC97F6 /* ShapeRange.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeRange.cpp; sourceTree = ""; }; + 92FF03D923AA0B6C00AC97F6 /* ShapeRank.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeRank.cpp; sourceTree = ""; }; + 92FF03DA23AA0B6D00AC97F6 /* ShapePack.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapePack.cpp; sourceTree = ""; }; + 92FF03DB23AA0B6D00AC97F6 /* ShapeDeconvolution.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeDeconvolution.cpp; sourceTree = ""; }; + 92FF03DC23AA0B6D00AC97F6 /* ShapeConcat.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeConcat.cpp; sourceTree = ""; }; + 92FF03DD23AA0B6D00AC97F6 /* ShapeScatterNd.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeScatterNd.cpp; sourceTree = ""; }; + 92FF03DE23AA0B6D00AC97F6 /* ShapeROIPooling.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeROIPooling.cpp; sourceTree = ""; }; + 92FF03DF23AA0B6D00AC97F6 /* ShapeSize.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeSize.cpp; sourceTree = ""; }; + 92FF03E023AA0B6D00AC97F6 /* ShapeCosineSimilarity.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeCosineSimilarity.cpp; sourceTree = ""; }; + 92FF03E123AA0B6D00AC97F6 /* ShapeMatMul.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeMatMul.cpp; sourceTree = ""; }; + 92FF03E223AA0B6D00AC97F6 /* ShapeInterp.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeInterp.cpp; sourceTree = ""; }; + 92FF03E323AA0B6D00AC97F6 /* ShapeLinSpace.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeLinSpace.cpp; sourceTree = ""; }; + 92FF03E423AA0B6D00AC97F6 /* ShapeBatchMatMul.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeBatchMatMul.cpp; sourceTree = ""; }; + 92FF03E523AA0B6D00AC97F6 /* ShapeOneHot.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeOneHot.cpp; sourceTree = ""; }; + 92FF03E623AA0B6D00AC97F6 /* ShapeReduceJoin.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeReduceJoin.cpp; sourceTree = ""; }; + 92FF03E723AA0B6D00AC97F6 /* ShapePadding.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapePadding.cpp; sourceTree = ""; }; + 92FF03E823AA0B6D00AC97F6 /* ShapeProposal.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeProposal.cpp; sourceTree = ""; }; + 92FF03E923AA0B6D00AC97F6 /* ShapeSliceTf.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeSliceTf.cpp; sourceTree = ""; }; + 92FF03EA23AA0B6D00AC97F6 /* ShapeQuantizedAvgPool.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeQuantizedAvgPool.cpp; sourceTree = ""; }; + 92FF03EB23AA0B6E00AC97F6 /* ShapeSpaceToBatchND.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeSpaceToBatchND.cpp; sourceTree = ""; }; + 92FF03EC23AA0B6E00AC97F6 /* ShapeWhere.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeWhere.cpp; sourceTree = ""; }; + 92FF03ED23AA0B6E00AC97F6 /* ShapeCrop.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeCrop.cpp; sourceTree = ""; }; + 92FF03EE23AA0B6E00AC97F6 /* ShapeStridedSlice.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeStridedSlice.cpp; sourceTree = ""; }; + 92FF03EF23AA0B6E00AC97F6 /* ShapeConvolution3D.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeConvolution3D.cpp; sourceTree = ""; }; + 92FF03F023AA0B6E00AC97F6 /* ShapeSelect.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeSelect.cpp; sourceTree = ""; }; + 92FF03F123AA0B6E00AC97F6 /* ShapeDetectionOutput.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeDetectionOutput.cpp; sourceTree = ""; }; + 92FF03F223AA0B6E00AC97F6 /* ShapeUnravelIndex.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeUnravelIndex.cpp; sourceTree = ""; }; + 92FF03F323AA0B6E00AC97F6 /* ShapeDequantize.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeDequantize.cpp; sourceTree = ""; }; + 92FF03F423AA0B6E00AC97F6 /* ShapePermute.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapePermute.cpp; sourceTree = ""; }; + 92FF03F523AA0B6E00AC97F6 /* ShapeDetectionPostProcess.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeDetectionPostProcess.cpp; sourceTree = ""; }; + 92FF03F623AA0B6E00AC97F6 /* ShapeExpandDims.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeExpandDims.cpp; sourceTree = ""; }; + 92FF03F723AA0B6E00AC97F6 /* ShapeGatherV2.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeGatherV2.cpp; sourceTree = ""; }; + 92FF03F823AA0B6E00AC97F6 /* ShapeBatchToSpaceND.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeBatchToSpaceND.cpp; sourceTree = ""; }; + 92FF03F923AA0B6F00AC97F6 /* ShapeTensorConvert.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeTensorConvert.cpp; sourceTree = ""; }; + 92FF03FA23AA0B6F00AC97F6 /* ShapeSlice.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeSlice.cpp; sourceTree = ""; }; + 92FF03FB23AA0B6F00AC97F6 /* ShapeMoments.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeMoments.cpp; sourceTree = ""; }; + 92FF03FC23AA0B6F00AC97F6 /* ShapeQuantizedReshape.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeQuantizedReshape.cpp; sourceTree = ""; }; + 92FF03FD23AA0B6F00AC97F6 /* ShapeTopKV2.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeTopKV2.cpp; sourceTree = ""; }; + 92FF03FE23AA0B6F00AC97F6 /* ShapeLSTM.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeLSTM.cpp; sourceTree = ""; }; + 92FF03FF23AA0B6F00AC97F6 /* ShapeSpaceToDepth.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeSpaceToDepth.cpp; sourceTree = ""; }; + 92FF040023AA0B6F00AC97F6 /* ShapeInnerProduct.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeInnerProduct.cpp; sourceTree = ""; }; + 92FF040123AA0B6F00AC97F6 /* ShapeSqueeze.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeSqueeze.cpp; sourceTree = ""; }; + 92FF040223AA0B6F00AC97F6 /* ShapeGather.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeGather.cpp; sourceTree = ""; }; + 92FF040323AA0B6F00AC97F6 /* ShapeGatherND.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeGatherND.cpp; sourceTree = ""; }; + 92FF040423AA0B6F00AC97F6 /* ShapeConvolution.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeConvolution.cpp; sourceTree = ""; }; + 92FF040523AA0B6F00AC97F6 /* ShapeTile.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeTile.cpp; sourceTree = ""; }; + 92FF040623AA0B6F00AC97F6 /* ShapePool3D.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapePool3D.cpp; sourceTree = ""; }; + 92FF040723AA0B6F00AC97F6 /* ShapeConst.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeConst.cpp; sourceTree = ""; }; + 92FF040823AA0B7000AC97F6 /* ShapeEltwise.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeEltwise.cpp; sourceTree = ""; }; + 92FF040923AA0B7000AC97F6 /* ShapeDepthToSpace.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeDepthToSpace.cpp; sourceTree = ""; }; + 92FF040A23AA0B7000AC97F6 /* ShapeCropAndResize.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeCropAndResize.cpp; sourceTree = ""; }; + 92FF040B23AA0B7000AC97F6 /* ShapeArgMax.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeArgMax.cpp; sourceTree = ""; }; + 92FF040C23AA0B7000AC97F6 /* ShapeResize.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeResize.cpp; sourceTree = ""; }; + 92FF040D23AA0B7000AC97F6 /* ShapeAsString.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeAsString.cpp; sourceTree = ""; }; + 92FF040E23AA0B7000AC97F6 /* ShapeRNNSequenceGRU.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeRNNSequenceGRU.cpp; sourceTree = ""; }; + 92FF040F23AA0B7000AC97F6 /* ShapePriorbox.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapePriorbox.cpp; sourceTree = ""; }; + 92FF041023AA0B7000AC97F6 /* ShapeReshape.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeReshape.cpp; sourceTree = ""; }; + 92FF041123AA0B7000AC97F6 /* ShapeTranspose.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeTranspose.cpp; sourceTree = ""; }; + 92FF041223AA0B7000AC97F6 /* ShapeReduction.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeReduction.cpp; sourceTree = ""; }; + 92FF041323AA0B7000AC97F6 /* ShapeRegister.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeRegister.cpp; sourceTree = ""; }; + 92FF041423AA0B7000AC97F6 /* ShapeBinaryOp.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeBinaryOp.cpp; sourceTree = ""; }; + 92FF041523AA0B7000AC97F6 /* ShapeShape.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeShape.cpp; sourceTree = ""; }; + 92FF041623AA0B7000AC97F6 /* ShapeBroadcastTo.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeBroadcastTo.cpp; sourceTree = ""; }; + 92FF041723AA0B7100AC97F6 /* ShapeCast.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeCast.cpp; sourceTree = ""; }; + 92FF041823AA0B7100AC97F6 /* ShapeTFQuantizedConv2D.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeTFQuantizedConv2D.cpp; sourceTree = ""; }; + 92FF048323AA0BF900AC97F6 /* Interpreter.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Interpreter.cpp; sourceTree = ""; }; + 92FF048423AA0BF900AC97F6 /* AutoStorage.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AutoStorage.h; sourceTree = ""; }; + 92FF048523AA0BFA00AC97F6 /* FileLoader.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = FileLoader.hpp; sourceTree = ""; }; + 92FF048623AA0BFA00AC97F6 /* BackendRegister.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = BackendRegister.cpp; sourceTree = ""; }; + 92FF048723AA0BFA00AC97F6 /* AutoTime.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = AutoTime.cpp; sourceTree = ""; }; + 92FF048823AA0BFA00AC97F6 /* Schedule.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = Schedule.hpp; sourceTree = ""; }; + 92FF048923AA0BFA00AC97F6 /* BufferAllocator.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = BufferAllocator.cpp; sourceTree = ""; }; + 92FF048A23AA0BFA00AC97F6 /* Pipeline.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = Pipeline.hpp; sourceTree = ""; }; + 92FF048B23AA0BFA00AC97F6 /* BackendFactory.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = BackendFactory.hpp; sourceTree = ""; }; + 92FF048C23AA0BFA00AC97F6 /* Execution.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = Execution.hpp; sourceTree = ""; }; + 92FF048D23AA0BFA00AC97F6 /* Backend.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Backend.cpp; sourceTree = ""; }; + 92FF048E23AA0BFA00AC97F6 /* Macro.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Macro.h; sourceTree = ""; }; + 92FF048F23AA0BFA00AC97F6 /* SizeComputer.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = SizeComputer.cpp; sourceTree = ""; }; + 92FF049023AA0BFA00AC97F6 /* DirectedAcyclicGraph.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = DirectedAcyclicGraph.hpp; sourceTree = ""; }; + 92FF049123AA0BFA00AC97F6 /* BackendFactory.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = BackendFactory.cpp; sourceTree = ""; }; + 92FF049223AA0BFA00AC97F6 /* Schedule.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Schedule.cpp; sourceTree = ""; }; + 92FF049323AA0BFA00AC97F6 /* MNNMemoryUtils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MNNMemoryUtils.h; sourceTree = ""; }; + 92FF049423AA0BFA00AC97F6 /* TensorUtils.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = TensorUtils.hpp; sourceTree = ""; }; + 92FF049523AA0BFA00AC97F6 /* TensorUtils.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = TensorUtils.cpp; sourceTree = ""; }; + 92FF049623AA0BFA00AC97F6 /* WrapExecution.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = WrapExecution.hpp; sourceTree = ""; }; + 92FF049723AA0BFB00AC97F6 /* MNNMemoryUtils.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MNNMemoryUtils.cpp; sourceTree = ""; }; + 92FF049823AA0BFB00AC97F6 /* SizeComputer.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = SizeComputer.hpp; sourceTree = ""; }; + 92FF049923AA0BFB00AC97F6 /* WrapExecution.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = WrapExecution.cpp; sourceTree = ""; }; + 92FF049A23AA0BFB00AC97F6 /* BufferAllocator.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = BufferAllocator.hpp; sourceTree = ""; }; + 92FF049B23AA0BFB00AC97F6 /* NonCopyable.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = NonCopyable.hpp; sourceTree = ""; }; + 92FF049C23AA0BFB00AC97F6 /* Execution.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Execution.cpp; sourceTree = ""; }; + 92FF049D23AA0BFB00AC97F6 /* FileLoader.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = FileLoader.cpp; sourceTree = ""; }; + 92FF049E23AA0BFB00AC97F6 /* Concurrency.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Concurrency.h; sourceTree = ""; }; + 92FF049F23AA0BFB00AC97F6 /* Tensor.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Tensor.cpp; sourceTree = ""; }; + 92FF04A023AA0BFB00AC97F6 /* Backend.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = Backend.hpp; sourceTree = ""; }; + 92FF04A123AA0BFB00AC97F6 /* Pipeline.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Pipeline.cpp; sourceTree = ""; }; + 92FF04A223AA0BFB00AC97F6 /* Session.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = Session.hpp; sourceTree = ""; }; + 92FF04A323AA0BFB00AC97F6 /* Session.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Session.cpp; sourceTree = ""; }; AE7BE4BC22855665002CEEA6 /* MetalOPRegister.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalOPRegister.mm; sourceTree = ""; }; - AEC3B31E211BEF710046AD86 /* MNNDefine.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = MNNDefine.h; sourceTree = ""; }; - C422D737232634DD00FD59D0 /* ShapeConvolution3D.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeConvolution3D.cpp; sourceTree = ""; }; - C422D73D2326449500FD59D0 /* ShapePool3D.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapePool3D.cpp; sourceTree = ""; }; - C422D766232F567300FD59D0 /* CPUConvolution3D.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUConvolution3D.cpp; sourceTree = ""; }; - C422D767232F567300FD59D0 /* CPUConvolution3D.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUConvolution3D.hpp; sourceTree = ""; }; - C422D7B823320B2900FD59D0 /* Convolution3D3x3.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Convolution3D3x3.cpp; sourceTree = ""; }; - C422D7B923320B2A00FD59D0 /* Convolution3D3x3.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = Convolution3D3x3.hpp; sourceTree = ""; }; - C422D7C02339D0EC00FD59D0 /* CPUElu.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUElu.cpp; sourceTree = ""; }; - C422D7C12339D0ED00FD59D0 /* CPUElu.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUElu.hpp; sourceTree = ""; }; - C422D7C4233A0F0E00FD59D0 /* ConvolutionWinograd3D.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ConvolutionWinograd3D.cpp; sourceTree = ""; }; - C422D7C5233A0F0E00FD59D0 /* ConvolutionWinograd3D.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = ConvolutionWinograd3D.hpp; sourceTree = ""; }; - C425F8732292A31D00B4682D /* MNNPowC8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNPowC8.S; sourceTree = ""; }; - C425F8752292A32A00B4682D /* MNNPowC8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNPowC8.S; sourceTree = ""; }; - C49FCD74231D1BE200F358B6 /* CommonOptFunction.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CommonOptFunction.cpp; sourceTree = ""; }; - C49FCD75231D1BE200F358B6 /* MNNConvSlideWindowMiddle.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MNNConvSlideWindowMiddle.cpp; sourceTree = ""; }; - C49FCD76231D1BE200F358B6 /* MNNGemmFloatCommon_4.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MNNGemmFloatCommon_4.cpp; sourceTree = ""; }; - C49FCD77231D1BE200F358B6 /* FunctionSummary.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = FunctionSummary.hpp; sourceTree = ""; }; - C49FCD78231D1BE200F358B6 /* MNNConvSlideWindowBorder.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MNNConvSlideWindowBorder.cpp; sourceTree = ""; }; - C49FCD79231D1BE200F358B6 /* MNNMatrixAdd.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MNNMatrixAdd.cpp; sourceTree = ""; }; - C49FCD7A231D1BE200F358B6 /* MNNMatrixSub.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MNNMatrixSub.cpp; sourceTree = ""; }; - C49FCD7B231D1BE200F358B6 /* DispatchHelper.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = DispatchHelper.hpp; sourceTree = ""; }; - C49FCD7D231D1BE200F358B6 /* CommonOptFunction.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CommonOptFunction.cpp; sourceTree = ""; }; - C49FCD7E231D1BE200F358B6 /* MNNConvSlideWindowMiddle.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MNNConvSlideWindowMiddle.cpp; sourceTree = ""; }; - C49FCD7F231D1BE200F358B6 /* MNNGemmFloatCommon_4.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MNNGemmFloatCommon_4.cpp; sourceTree = ""; }; - C49FCD80231D1BE200F358B6 /* FunctionSummary.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = FunctionSummary.hpp; sourceTree = ""; }; - C49FCD81231D1BE200F358B6 /* MNNMatrixAdd.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MNNMatrixAdd.cpp; sourceTree = ""; }; - C49FCD82231D1BE200F358B6 /* MNNMatrixSub.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MNNMatrixSub.cpp; sourceTree = ""; }; - C49FCD83231D1BE200F358B6 /* FunctionDispatcher.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = FunctionDispatcher.cpp; sourceTree = ""; }; - C49FCD84231D1BE200F358B6 /* DispatchHelper.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = DispatchHelper.cpp; sourceTree = ""; }; - C49FCD85231D1BE200F358B6 /* CPUConvolution3D.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUConvolution3D.hpp; sourceTree = ""; }; - C49FCD86231D1BE200F358B6 /* ThreadPool.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = ThreadPool.hpp; sourceTree = ""; }; - C49FCD87231D1BE200F358B6 /* CPUConvolution3D.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUConvolution3D.cpp; sourceTree = ""; }; - C49FCD88231D1BE200F358B6 /* ThreadPool.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ThreadPool.cpp; sourceTree = ""; }; CE96FE5F21707D57004AB400 /* MetalTensorConverter.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalTensorConverter.mm; sourceTree = ""; }; - CE96FE6021707D57004AB400 /* MetalUnary.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = MetalUnary.hpp; sourceTree = ""; }; - CE96FE6121707D57004AB400 /* MetalSigmoid.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = MetalSigmoid.hpp; sourceTree = ""; }; - CE96FE6221707D57004AB400 /* MetalTensorConverter.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = MetalTensorConverter.hpp; sourceTree = ""; }; CE96FE6321707D57004AB400 /* MetalMatMul.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalMatMul.mm; sourceTree = ""; }; - CE96FE6521707D57004AB400 /* MetalMatMul.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = MetalMatMul.hpp; sourceTree = ""; }; CE96FE6621707D57004AB400 /* MetalUnary.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalUnary.mm; sourceTree = ""; }; CE96FE6921707D58004AB400 /* MetalUnary.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; path = MetalUnary.metal; sourceTree = ""; }; CE96FE6C21707D58004AB400 /* MetalSigmoid.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalSigmoid.mm; sourceTree = ""; }; CE96FE6D21707D58004AB400 /* MetalSigmoid.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; path = MetalSigmoid.metal; sourceTree = ""; }; CE96FE6F21707D58004AB400 /* MetalMatMul.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; path = MetalMatMul.metal; sourceTree = ""; }; - EB288360230EAF6C00837188 /* CPUEltwiseInt8.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUEltwiseInt8.cpp; sourceTree = ""; }; - EB288361230EAF6C00837188 /* CPUEltwiseInt8.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUEltwiseInt8.hpp; sourceTree = ""; }; - EB288364230EB05C00837188 /* MNNScaleAddInt8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNScaleAddInt8.S; sourceTree = ""; }; - EB288366230EB06600837188 /* MNNScaleAddInt8.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = MNNScaleAddInt8.S; sourceTree = ""; }; - EB4925B2224A146000C512BB /* ShapeBatchMatMul.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeBatchMatMul.cpp; sourceTree = ""; }; - EB4925B3224A146000C512BB /* ShapeRNNSequenceGRU.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeRNNSequenceGRU.cpp; sourceTree = ""; }; - EB4925B6224A147E00C512BB /* CPUInstanceNorm.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUInstanceNorm.cpp; sourceTree = ""; }; - EB4925B7224A147E00C512BB /* CPURNNSequenceGRU.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPURNNSequenceGRU.cpp; sourceTree = ""; }; - EB4925B8224A147E00C512BB /* CPUMoments.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUMoments.hpp; sourceTree = ""; }; - EB4925B9224A147E00C512BB /* CPURNNSequenceGRU.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPURNNSequenceGRU.hpp; sourceTree = ""; }; - EB4925BA224A147E00C512BB /* CPUBatchMatMul.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUBatchMatMul.cpp; sourceTree = ""; }; - EB4925BB224A147E00C512BB /* CPUMoments.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUMoments.cpp; sourceTree = ""; }; - EB4925BC224A147E00C512BB /* CPUInstanceNorm.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUInstanceNorm.hpp; sourceTree = ""; }; - EB4925BD224A147E00C512BB /* CPUBatchMatMul.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUBatchMatMul.hpp; sourceTree = ""; }; - EB69637322E070E00065993C /* CPUCosineSimilarity.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUCosineSimilarity.hpp; sourceTree = ""; }; - EB69637422E070E00065993C /* CPUCosineSimilarity.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUCosineSimilarity.cpp; sourceTree = ""; }; - EB69637722E072600065993C /* ShapeCosineSimilarity.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeCosineSimilarity.cpp; sourceTree = ""; }; EBAFCE662231133F000D4EF4 /* QuantizedAddTest.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = QuantizedAddTest.cpp; sourceTree = ""; }; - EBB38EC721E748B9005F76D7 /* ShapeShape.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeShape.cpp; sourceTree = ""; }; - EBB38EC821E748B9005F76D7 /* ShapePriorbox.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapePriorbox.cpp; sourceTree = ""; }; - EBB38EC921E748B9005F76D7 /* ShapeGather.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeGather.cpp; sourceTree = ""; }; - EBB38ECA21E748B9005F76D7 /* ShapeAsString.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeAsString.cpp; sourceTree = ""; }; - EBB38ECB21E748B9005F76D7 /* ShapeInterp.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeInterp.cpp; sourceTree = ""; }; - EBB38ECC21E748B9005F76D7 /* ShapeCrop.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeCrop.cpp; sourceTree = ""; }; - EBB38ECD21E748B9005F76D7 /* ShapeMatMul.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeMatMul.cpp; sourceTree = ""; }; - EBB38ECE21E748B9005F76D7 /* ShapeTFQuantizedConv2D.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeTFQuantizedConv2D.cpp; sourceTree = ""; }; - EBB38ECF21E748B9005F76D7 /* ShapeReshape.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeReshape.cpp; sourceTree = ""; }; - EBB38ED021E748B9005F76D7 /* ShapeStridedSlice.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeStridedSlice.cpp; sourceTree = ""; }; - EBB38ED121E748B9005F76D7 /* ShapeQuantizedReshape.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeQuantizedReshape.cpp; sourceTree = ""; }; - EBB38ED221E748B9005F76D7 /* ShapePool.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapePool.cpp; sourceTree = ""; }; - EBB38ED321E748B9005F76D7 /* ShapeInnerProduct.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeInnerProduct.cpp; sourceTree = ""; }; - EBB38ED421E748B9005F76D7 /* ShapeReduction.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeReduction.cpp; sourceTree = ""; }; - EBB38ED521E748B9005F76D7 /* ShapeQuantizedAvgPool.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeQuantizedAvgPool.cpp; sourceTree = ""; }; - EBB38ED621E748B9005F76D7 /* ShapeArgMax.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeArgMax.cpp; sourceTree = ""; }; - EBB38ED821E748B9005F76D7 /* ShapeSliceTf.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeSliceTf.cpp; sourceTree = ""; }; - EBB38EDA21E748B9005F76D7 /* ShapeResize.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeResize.cpp; sourceTree = ""; }; - EBB38EDB21E748B9005F76D7 /* ShapeTranspose.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeTranspose.cpp; sourceTree = ""; }; - EBB38EDC21E748B9005F76D7 /* ShapeWhere.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeWhere.cpp; sourceTree = ""; }; - EBB38EDD21E748B9005F76D7 /* ShapeSqueeze.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeSqueeze.cpp; sourceTree = ""; }; - EBB38EDE21E748B9005F76D7 /* ShapeBinaryOp.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeBinaryOp.cpp; sourceTree = ""; }; - EBB38EDF21E748B9005F76D7 /* ShapeNonMaxSuppressionV2.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeNonMaxSuppressionV2.cpp; sourceTree = ""; }; - EBB38EE021E748B9005F76D7 /* ShapeGatherV2.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeGatherV2.cpp; sourceTree = ""; }; - EBB38EE121E748B9005F76D7 /* ShapeConcat.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeConcat.cpp; sourceTree = ""; }; - EBB38EE221E748B9005F76D7 /* ShapeCast.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeCast.cpp; sourceTree = ""; }; - EBB38EE321E748B9005F76D7 /* ShapeBatchToSpaceND.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeBatchToSpaceND.cpp; sourceTree = ""; }; - EBB38EE421E748B9005F76D7 /* ShapeTile.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeTile.cpp; sourceTree = ""; }; - EBB38EE521E748B9005F76D7 /* ShapeEltwise.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeEltwise.cpp; sourceTree = ""; }; - EBB38EE621E748B9005F76D7 /* ShapeConst.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeConst.cpp; sourceTree = ""; }; - EBB38EE721E748B9005F76D7 /* ShapeDetectionOutput.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeDetectionOutput.cpp; sourceTree = ""; }; - EBB38EE821E748B9005F76D7 /* ShapeSize.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeSize.cpp; sourceTree = ""; }; - EBB38EE921E748B9005F76D7 /* ShapeTensorConvert.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeTensorConvert.cpp; sourceTree = ""; }; - EBB38EEA21E748B9005F76D7 /* ShapePermute.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapePermute.cpp; sourceTree = ""; }; - EBB38EEB21E748B9005F76D7 /* ShapeExpandDims.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeExpandDims.cpp; sourceTree = ""; }; - EBB38EEC21E748B9005F76D7 /* ShapeROIPooling.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeROIPooling.cpp; sourceTree = ""; }; - EBB38EF121E748B9005F76D7 /* ShapeCropAndResize.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeCropAndResize.cpp; sourceTree = ""; }; - EBB38EF221E748B9005F76D7 /* ShapeQuantizedMaxPool.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeQuantizedMaxPool.cpp; sourceTree = ""; }; - EBB38EF321E748B9005F76D7 /* ShapeRange.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeRange.cpp; sourceTree = ""; }; - EBB38EF421E748B9005F76D7 /* ShapeUnpack.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeUnpack.cpp; sourceTree = ""; }; - EBB38EF521E748B9005F76D7 /* ShapeTopKV2.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeTopKV2.cpp; sourceTree = ""; }; - EBB38EF621E748B9005F76D7 /* ShapeFill.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeFill.cpp; sourceTree = ""; }; - EBB38EF721E748B9005F76D7 /* ShapeProposal.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeProposal.cpp; sourceTree = ""; }; - EBB38EF821E748B9005F76D7 /* ShapeConvolution.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeConvolution.cpp; sourceTree = ""; }; - EBB38EF921E748B9005F76D7 /* ShapeRank.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeRank.cpp; sourceTree = ""; }; - EBB38EFA21E748B9005F76D7 /* ShapeLSTM.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeLSTM.cpp; sourceTree = ""; }; - EBB38EFB21E748B9005F76D7 /* ShapeSlice.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeSlice.cpp; sourceTree = ""; }; - EBB38EFC21E748B9005F76D7 /* ShapeReduceJoin.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeReduceJoin.cpp; sourceTree = ""; }; - EBB38EFD21E748B9005F76D7 /* ShapeSpaceToBatchND.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeSpaceToBatchND.cpp; sourceTree = ""; }; - EBB38EFE21E748B9005F76D7 /* ShapePack.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapePack.cpp; sourceTree = ""; }; - EBB38EFF21E748B9005F76D7 /* ShapeDeconvolution.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeDeconvolution.cpp; sourceTree = ""; }; - EBD9FF11236A939700E188F5 /* ShapeDetectionPostProcess.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeDetectionPostProcess.cpp; sourceTree = ""; }; - EBD9FF13236A93AB00E188F5 /* CPUDetectionPostProcess.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUDetectionPostProcess.cpp; sourceTree = ""; }; - EBD9FF14236A93AB00E188F5 /* CPUDetectionPostProcess.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUDetectionPostProcess.hpp; sourceTree = ""; }; /* End PBXFileReference section */ /* Begin PBXFrameworksBuildPhase section */ @@ -1606,7 +2143,6 @@ 9273AB4C1FE7BE4D00477B22 /* Playground */, 0F1465B81FA18D1000F9860A /* Products */, 0F78AC251FCD495800205A7C /* Frameworks */, - 489BFA2E230E612400F6B785 /* Recovered References */, ); sourceTree = ""; wrapsLines = 0; @@ -1623,8 +2159,7 @@ 0F1465B91FA18D1000F9860A /* MNN */ = { isa = PBXGroup; children = ( - 4807071B231E512D00528CE5 /* express */, - 921722CC21DDF63A004583BF /* schema */, + 48593FB423A89B2F0069452A /* express */, 488873A8215B639D0079B12E /* source */, 48027FAA210571FB001E5982 /* include */, 0F1465BB1FA18D1000F9860A /* Info.plist */, @@ -1645,71 +2180,40 @@ 48027FAA210571FB001E5982 /* include */ = { isa = PBXGroup; children = ( - AEC3B31E211BEF710046AD86 /* MNNDefine.h */, - 4805294B2105BADB00AA776E /* MNNForwardType.h */, - 4821FA32216F214200B910CC /* MNNSharedContext.h */, - 48265468210ABA3000B2CFEA /* AutoTime.hpp */, - 48871459215153F900CCE0D8 /* ErrorCode.hpp */, - 4826546A210AF76D00B2CFEA /* HalideRuntime.h */, - 48871464215225D600CCE0D8 /* ImageProcess.hpp */, - 48871478215249EA00CCE0D8 /* Matrix.h */, - 480529612105DDA400AA776E /* Interpreter.hpp */, - 48871479215249EA00CCE0D8 /* Rect.h */, - 4851BE0F2122C1BC009BB0AC /* Tensor.hpp */, + 1F501F742397BA5A004E8721 /* AutoTime.hpp */, + 1F501F782397BA5A004E8721 /* ErrorCode.hpp */, + 1F501F762397BA5A004E8721 /* expr */, + 1F501F722397BA5A004E8721 /* HalideRuntime.h */, + 1F501F772397BA5A004E8721 /* ImageProcess.hpp */, + 1F501F752397BA5A004E8721 /* Interpreter.hpp */, + 1F501F7A2397BA5A004E8721 /* Matrix.h */, + 1F501F732397BA5A004E8721 /* MNNDefine.h */, + 1F501F7C2397BA5A004E8721 /* MNNForwardType.h */, + 1F501F7E2397BA5B004E8721 /* MNNSharedContext.h */, + 1F501F792397BA5A004E8721 /* Rect.h */, + 1F501F7B2397BA5A004E8721 /* Tensor.hpp */, ); name = include; path = ../../../include; sourceTree = ""; }; - 4807071B231E512D00528CE5 /* express */ = { + 48593FB423A89B2F0069452A /* express */ = { isa = PBXGroup; children = ( - 4807071D231E512D00528CE5 /* include */, - 48070723231E512D00528CE5 /* source */, + 48FA474C23AA136300172C3B /* MergeOptimizer.cpp */, + 48FA473D23AA127B00172C3B /* Executor.cpp */, + 48FA474023AA127B00172C3B /* Expr.cpp */, + 48FA474123AA127B00172C3B /* MathOp.cpp */, + 48FA473C23AA127A00172C3B /* MergeOptimizer.hpp */, + 48FA473E23AA127B00172C3B /* NeuralNetWorkOp.cpp */, + 48FA473F23AA127B00172C3B /* Optimizer.cpp */, + 48FA474223AA127B00172C3B /* Utils.cpp */, + 48FA474323AA127B00172C3B /* Utils.hpp */, ); name = express; path = ../../../express; sourceTree = ""; }; - 4807071D231E512D00528CE5 /* include */ = { - isa = PBXGroup; - children = ( - 4807071E231E512D00528CE5 /* NeuralNetWorkOp.hpp */, - 4807071F231E512D00528CE5 /* Expr.hpp */, - 48070720231E512D00528CE5 /* MathOp.hpp */, - 48070721231E512D00528CE5 /* ExprCreator.hpp */, - 48070722231E512D00528CE5 /* Optimizer.hpp */, - ); - path = include; - sourceTree = ""; - }; - 48070723231E512D00528CE5 /* source */ = { - isa = PBXGroup; - children = ( - 48070724231E512D00528CE5 /* Optimizer.cpp */, - 48070725231E512D00528CE5 /* Utils.cpp */, - 48070726231E512D00528CE5 /* Solution.cpp */, - 48070727231E512D00528CE5 /* optimizer */, - 4807072A231E512D00528CE5 /* InsideExpr.hpp */, - 4807072B231E512D00528CE5 /* Expr.cpp */, - 4807072C231E512D00528CE5 /* MathOp.cpp */, - 4807072D231E512D00528CE5 /* InsideExpr.cpp */, - 4807072E231E512D00528CE5 /* Utils.hpp */, - 4807072F231E512D00528CE5 /* Solution.hpp */, - 48070730231E512D00528CE5 /* NeuralNetWorkOp.cpp */, - ); - path = source; - sourceTree = ""; - }; - 48070727231E512D00528CE5 /* optimizer */ = { - isa = PBXGroup; - children = ( - 48070728231E512D00528CE5 /* MergeOptimizer.hpp */, - 48070729231E512D00528CE5 /* MergeOptimizer.cpp */, - ); - path = optimizer; - sourceTree = ""; - }; 488873A8215B639D0079B12E /* source */ = { isa = PBXGroup; children = ( @@ -1727,39 +2231,39 @@ 488873AC215B639D0079B12E /* core */ = { isa = PBXGroup; children = ( - 489BFA2B230E3D1F00F6B785 /* FileLoader.cpp */, - 489BFA2A230E3D1F00F6B785 /* FileLoader.hpp */, - AE7BE4B6228555A2002CEEA6 /* BackendRegister.cpp */, - 488873C3215B639D0079B12E /* MNNMemoryUtils.c */, - 488873B9215B639D0079B12E /* MNNMemoryUtils.h */, - 488873B5215B639D0079B12E /* AutoStorage.h */, - 488873C4215B639D0079B12E /* AutoTime.cpp */, - 4841B5F321EAE98B002E5D66 /* Backend.cpp */, - 4841B5F521EAE98B002E5D66 /* Backend.hpp */, - 92D765A32228188600178BE5 /* BackendFactory.cpp */, - 92D765A42228188600178BE5 /* BackendFactory.hpp */, - 48A8A63621D8A43D00C2B9A7 /* BufferAllocator.cpp */, - 488873BD215B639D0079B12E /* BufferAllocator.hpp */, - 488873B1215B639D0079B12E /* Concurrency.h */, - 92D765BE22281CFF00178BE5 /* DirectedAcyclicGraph.hpp */, - 4841B5FA21EAE998002E5D66 /* Execution.cpp */, - 4841B5FB21EAE998002E5D66 /* Execution.hpp */, - 488873B7215B639D0079B12E /* Macro.h */, - 92D765A12228188600178BE5 /* Interpreter.cpp */, - 488873C6215B639D0079B12E /* NonCopyable.hpp */, - 92D7659C2228188500178BE5 /* Pipeline.cpp */, - 92D765A82228188700178BE5 /* Pipeline.hpp */, - 92D765A72228188700178BE5 /* Schedule.cpp */, - 92D765A02228188600178BE5 /* Schedule.hpp */, - 92D765A52228188600178BE5 /* Session.cpp */, - 92D7659F2228188500178BE5 /* Session.hpp */, - 4841B5F421EAE98B002E5D66 /* SizeComputer.cpp */, - 4841B5F221EAE98B002E5D66 /* SizeComputer.hpp */, - 488873C2215B639D0079B12E /* Tensor.cpp */, - 488873BA215B639D0079B12E /* TensorUtils.cpp */, - 488873AF215B639D0079B12E /* TensorUtils.hpp */, - 92D7659D2228188500178BE5 /* WrapExecution.cpp */, - 92D765A22228188600178BE5 /* WrapExecution.hpp */, + 92FF048423AA0BF900AC97F6 /* AutoStorage.h */, + 92FF048723AA0BFA00AC97F6 /* AutoTime.cpp */, + 92FF048D23AA0BFA00AC97F6 /* Backend.cpp */, + 92FF04A023AA0BFB00AC97F6 /* Backend.hpp */, + 92FF049123AA0BFA00AC97F6 /* BackendFactory.cpp */, + 92FF048B23AA0BFA00AC97F6 /* BackendFactory.hpp */, + 92FF048623AA0BFA00AC97F6 /* BackendRegister.cpp */, + 92FF048923AA0BFA00AC97F6 /* BufferAllocator.cpp */, + 92FF049A23AA0BFB00AC97F6 /* BufferAllocator.hpp */, + 92FF049E23AA0BFB00AC97F6 /* Concurrency.h */, + 92FF049023AA0BFA00AC97F6 /* DirectedAcyclicGraph.hpp */, + 92FF049C23AA0BFB00AC97F6 /* Execution.cpp */, + 92FF048C23AA0BFA00AC97F6 /* Execution.hpp */, + 92FF049D23AA0BFB00AC97F6 /* FileLoader.cpp */, + 92FF048523AA0BFA00AC97F6 /* FileLoader.hpp */, + 92FF048323AA0BF900AC97F6 /* Interpreter.cpp */, + 92FF048E23AA0BFA00AC97F6 /* Macro.h */, + 92FF049723AA0BFB00AC97F6 /* MNNMemoryUtils.cpp */, + 92FF049323AA0BFA00AC97F6 /* MNNMemoryUtils.h */, + 92FF049B23AA0BFB00AC97F6 /* NonCopyable.hpp */, + 92FF04A123AA0BFB00AC97F6 /* Pipeline.cpp */, + 92FF048A23AA0BFA00AC97F6 /* Pipeline.hpp */, + 92FF049223AA0BFA00AC97F6 /* Schedule.cpp */, + 92FF048823AA0BFA00AC97F6 /* Schedule.hpp */, + 92FF04A323AA0BFB00AC97F6 /* Session.cpp */, + 92FF04A223AA0BFB00AC97F6 /* Session.hpp */, + 92FF048F23AA0BFA00AC97F6 /* SizeComputer.cpp */, + 92FF049823AA0BFB00AC97F6 /* SizeComputer.hpp */, + 92FF049F23AA0BFB00AC97F6 /* Tensor.cpp */, + 92FF049523AA0BFA00AC97F6 /* TensorUtils.cpp */, + 92FF049423AA0BFA00AC97F6 /* TensorUtils.hpp */, + 92FF049923AA0BFB00AC97F6 /* WrapExecution.cpp */, + 92FF049623AA0BFA00AC97F6 /* WrapExecution.hpp */, ); path = core; sourceTree = ""; @@ -1767,181 +2271,181 @@ 488873C8215B639D0079B12E /* metal */ = { isa = PBXGroup; children = ( - AE7BE4BC22855665002CEEA6 /* MetalOPRegister.mm */, - 488873DB215B639D0079B12E /* MetalBackend.hpp */, + 1F501F002397BA49004E8721 /* MetalBackend.hpp */, 488873E0215B639D0079B12E /* MetalBackend.metal */, 48887405215B639D0079B12E /* MetalBackend.mm */, - 9223E12821D3755F0067544A /* MetalBatchToSpaceND.hpp */, + 1F501EFC2397BA49004E8721 /* MetalBatchToSpaceND.hpp */, 9223E12B21D3756B0067544A /* MetalBatchToSpaceND.metal */, 9223E12721D3755F0067544A /* MetalBatchToSpaceND.mm */, - 486FDF46223E4B2800F487FB /* MetalBinary.hpp */, + 1F501F022397BA49004E8721 /* MetalBinary.hpp */, 486FDF45223E4B2800F487FB /* MetalBinary.metal */, 486FDF44223E4B2700F487FB /* MetalBinary.mm */, - 92EAC19821CB3CD60056F4C2 /* MetalCast.hpp */, + 1F501F012397BA49004E8721 /* MetalCast.hpp */, 92EAC19B21CB3CE20056F4C2 /* MetalCast.metal */, 92EAC19721CB3CD60056F4C2 /* MetalCast.mm */, - 4888740A215B639D0079B12E /* MetalConcat.hpp */, + 1F501F2C2397BA4C004E8721 /* MetalConcat.hpp */, 92965EDD2175B3C300B86ABE /* MetalConcat.metal */, 488873E6215B639D0079B12E /* MetalConcat.mm */, - 488873F7215B639D0079B12E /* MetalConvolution.hpp */, + 1F501F2B2397BA4C004E8721 /* MetalConvolution.hpp */, 488873DC215B639D0079B12E /* MetalConvolution.metal */, 488873E1215B639D0079B12E /* MetalConvolution.mm */, - 925A8914222395ED00D22428 /* MetalConvolution1x1.hpp */, + 1F501F192397BA4B004E8721 /* MetalConvolution1x1.hpp */, 925A89172223961F00D22428 /* MetalConvolution1x1.metal */, 925A8913222395ED00D22428 /* MetalConvolution1x1.mm */, 925A89112223951200D22428 /* MetalConvolutionActivation.metal */, - 925801432223B8D100555D43 /* MetalConvolutionCommon.hpp */, + 1F501F112397BA4A004E8721 /* MetalConvolutionCommon.hpp */, 925801422223B8D100555D43 /* MetalConvolutionCommon.mm */, - 9258013D2223B77C00555D43 /* MetalConvolutionDepthwise.hpp */, + 1F501F092397BA4A004E8721 /* MetalConvolutionDepthwise.hpp */, 925801402223B79600555D43 /* MetalConvolutionDepthwise.metal */, 9258013C2223B77C00555D43 /* MetalConvolutionDepthwise.mm */, - 92369E60222544DD009D3A05 /* MetalConvolutionGEMM.hpp */, + 1F501F292397BA4C004E8721 /* MetalConvolutionGEMM.hpp */, 92369E63222544FE009D3A05 /* MetalConvolutionGEMM.metal */, 92369E61222544DD009D3A05 /* MetalConvolutionGEMM.mm */, - 48C054872201996200E91945 /* MetalConvolutionWinograd.hpp */, + 1F501F1E2397BA4B004E8721 /* MetalConvolutionWinograd.hpp */, 925E87DF220447900000192E /* MetalConvolutionWinograd.metal */, 48C054862201996200E91945 /* MetalConvolutionWinograd.mm */, - 92EEFF26217F0EEF00F89377 /* MetalCrop.hpp */, + 1F501F152397BA4B004E8721 /* MetalCrop.hpp */, 92EEFF29217F0F0F00F89377 /* MetalCrop.metal */, 92EEFF25217F0EEF00F89377 /* MetalCrop.mm */, - 923B7FA321A6C92F002AFCE0 /* MetalCropAndResize.hpp */, + 1F501F0E2397BA4A004E8721 /* MetalCropAndResize.hpp */, 923B7FA621A6C940002AFCE0 /* MetalCropAndResize.metal */, 923B7FA221A6C92F002AFCE0 /* MetalCropAndResize.mm */, - 488873FD215B639D0079B12E /* MetalDeconvolution.hpp */, + 1F501F312397BA4C004E8721 /* MetalDeconvolution.hpp */, 488873FC215B639D0079B12E /* MetalDeconvolution.metal */, 488873F2215B639D0079B12E /* MetalDeconvolution.mm */, - 92A6476822014A7900DDD1C4 /* MetalDefine.h */, + 1F501F0A2397BA4A004E8721 /* MetalDefine.h */, 92A8D70721A54087009C2201 /* MetalDefine.metal */, - 920004D721EDC30E00BCE892 /* MetalDequantize.hpp */, + 1F501F282397BA4C004E8721 /* MetalDequantize.hpp */, 920004D621EDC30E00BCE892 /* MetalDequantize.metal */, 920004D521EDC30E00BCE892 /* MetalDequantize.mm */, - 488873E3215B639D0079B12E /* MetalEltwise.hpp */, + 1F501F202397BA4B004E8721 /* MetalEltwise.hpp */, 4888740F215B639D0079B12E /* MetalEltwise.metal */, 488873DE215B639D0079B12E /* MetalEltwise.mm */, - 92682C5E2181A2EF00B52B9D /* MetalFill.hpp */, + 1F501F132397BA4B004E8721 /* MetalFill.hpp */, 92682C612181A2F900B52B9D /* MetalFill.metal */, 92682C5D2181A2EF00B52B9D /* MetalFill.mm */, 924F132621ABEA28006D46A4 /* MetalFixedPoint.metal */, - 923B7F8821A653AB002AFCE0 /* MetalGather.hpp */, + 1F501F062397BA4A004E8721 /* MetalGather.hpp */, 923B7F8B21A653BB002AFCE0 /* MetalGather.metal */, 923B7F8721A653AB002AFCE0 /* MetalGather.mm */, - 923B7F9121A68091002AFCE0 /* MetalGatherV2.hpp */, + 1F501F2A2397BA4C004E8721 /* MetalGatherV2.hpp */, 923B7F9421A680A1002AFCE0 /* MetalGatherV2.metal */, 923B7F9021A68091002AFCE0 /* MetalGatherV2.mm */, - 48887403215B639D0079B12E /* MetalInterp.hpp */, + 1F501EFB2397BA49004E8721 /* MetalInterp.hpp */, 488873FF215B639D0079B12E /* MetalInterp.mm */, - 488873CC215B639D0079B12E /* MetalLRN.hpp */, + 1F501F2F2397BA4C004E8721 /* MetalLRN.hpp */, 488873CB215B639D0079B12E /* MetalLRN.metal */, 488873D5215B639D0079B12E /* MetalLRN.mm */, - 488873EA215B639D0079B12E /* MetalLSTM.hpp */, + 1F501F032397BA49004E8721 /* MetalLSTM.hpp */, 488873D9215B639D0079B12E /* MetalLSTM.metal */, 4888740D215B639D0079B12E /* MetalLSTM.mm */, - CE96FE6521707D57004AB400 /* MetalMatMul.hpp */, + 1F501F0C2397BA4A004E8721 /* MetalMatMul.hpp */, CE96FE6F21707D58004AB400 /* MetalMatMul.metal */, CE96FE6321707D57004AB400 /* MetalMatMul.mm */, - 488873DA215B639D0079B12E /* MetalNormalize.hpp */, + 1F501F252397BA4C004E8721 /* MetalNormalize.hpp */, 488873E8215B639D0079B12E /* MetalNormalize.metal */, 488873D6215B639D0079B12E /* MetalNormalize.mm */, - 92921A85219C24CD00B063D1 /* MetalPack.hpp */, + AE7BE4BC22855665002CEEA6 /* MetalOPRegister.mm */, + 1F501F2D2397BA4C004E8721 /* MetalPack.hpp */, 92921A88219C272B00B063D1 /* MetalPack.metal */, 92921A84219C24CD00B063D1 /* MetalPack.mm */, - 488873EC215B639D0079B12E /* MetalPermute.hpp */, + 1F501F2E2397BA4C004E8721 /* MetalPermute.hpp */, 488873CD215B639D0079B12E /* MetalPermute.metal */, 4888740B215B639D0079B12E /* MetalPermute.mm */, - 488873E4215B639D0079B12E /* MetalPooling.hpp */, + 1F501F1A2397BA4B004E8721 /* MetalPooling.hpp */, 488873FE215B639D0079B12E /* MetalPooling.metal */, 488873F4215B639D0079B12E /* MetalPooling.mm */, - 488873EB215B639D0079B12E /* MetalPReLU.hpp */, + 1F501F302397BA4C004E8721 /* MetalPReLU.hpp */, 48887409215B639D0079B12E /* MetalPReLU.metal */, 4888740C215B639D0079B12E /* MetalPReLU.mm */, - 92351C8621992AB2002CA341 /* MetalQuantizedAdd.hpp */, + 1F501F1B2397BA4B004E8721 /* MetalQuantizedAdd.hpp */, 92351C8921992AC6002CA341 /* MetalQuantizedAdd.metal */, 92351C8521992AB2002CA341 /* MetalQuantizedAdd.mm */, - 9260B27721A7C5EA00D48C97 /* MetalQuantizedAvgPool.hpp */, + 1F501F322397BA4C004E8721 /* MetalQuantizedAvgPool.hpp */, 9260B27A21A7C5FC00D48C97 /* MetalQuantizedAvgPool.metal */, 9260B27621A7C5EA00D48C97 /* MetalQuantizedAvgPool.mm */, - 9260B27121A7C5CD00D48C97 /* MetalQuantizedMaxPool.hpp */, + 1F501F122397BA4A004E8721 /* MetalQuantizedMaxPool.hpp */, 9260B27421A7C5DC00D48C97 /* MetalQuantizedMaxPool.metal */, 9260B27021A7C5CD00D48C97 /* MetalQuantizedMaxPool.mm */, - 923B7F9A21A69E2E002AFCE0 /* MetalQuantizedReshape.hpp */, + 1F501EFA2397BA49004E8721 /* MetalQuantizedReshape.hpp */, 923B7F9921A69E2E002AFCE0 /* MetalQuantizedReshape.mm */, - 924F132121ABD470006D46A4 /* MetalQuantizedSoftmax.hpp */, + 1F501F262397BA4C004E8721 /* MetalQuantizedSoftmax.hpp */, 924F132421ABD47F006D46A4 /* MetalQuantizedSoftmax.metal */, 924F132021ABD470006D46A4 /* MetalQuantizedSoftmax.mm */, - 9225694F219D6E0200F251E2 /* MetalRange.hpp */, + 1F501F272397BA4C004E8721 /* MetalRange.hpp */, 92256952219D6E1000F251E2 /* MetalRange.metal */, 9225694E219D6E0200F251E2 /* MetalRange.mm */, - 92256946219D698100F251E2 /* MetalRank.hpp */, + 1F501F0F2397BA4A004E8721 /* MetalRank.hpp */, 92256949219D698900F251E2 /* MetalRank.metal */, 92256945219D698100F251E2 /* MetalRank.mm */, - 92EEFF2F2180159600F89377 /* MetalReduction.hpp */, + 1F501F342397BA4D004E8721 /* MetalReduction.hpp */, 92EEFF32218015A300F89377 /* MetalReduction.metal */, 92EEFF2E2180159600F89377 /* MetalReduction.mm */, - 488873E2215B639D0079B12E /* MetalReLU.hpp */, + 1F501F212397BA4B004E8721 /* MetalReLU.hpp */, 488873D1215B639D0079B12E /* MetalReLU.metal */, 488873F3215B639D0079B12E /* MetalReLU.mm */, - 92C674F822549A1600011D33 /* MetalReLU6.hpp */, + 1F501F052397BA49004E8721 /* MetalReLU6.hpp */, 92C674FB22549A2500011D33 /* MetalReLU6.metal */, 92C674F722549A1600011D33 /* MetalReLU6.mm */, - 488873CE215B639D0079B12E /* MetalReshape.hpp */, + 1F501F352397BA4D004E8721 /* MetalReshape.hpp */, 488873CA215B639D0079B12E /* MetalReshape.metal */, 488873FA215B639D0079B12E /* MetalReshape.mm */, - 488873F1215B639D0079B12E /* MetalResize.hpp */, + 1F501F082397BA4A004E8721 /* MetalResize.hpp */, 48887408215B639D0079B12E /* MetalResize.metal */, 4888740E215B639D0079B12E /* MetalResize.mm */, - 488873D3215B639D0079B12E /* MetalROIPooling.hpp */, + 1F501F042397BA49004E8721 /* MetalROIPooling.hpp */, 48887400215B639D0079B12E /* MetalROIPooling.metal */, 48887402215B639D0079B12E /* MetalROIPooling.mm */, - 48887406215B639D0079B12E /* MetalScale.hpp */, + 1F501F162397BA4B004E8721 /* MetalScale.hpp */, 488873F6215B639D0079B12E /* MetalScale.metal */, 488873F8215B639D0079B12E /* MetalScale.mm */, - 92682C5221819BF100B52B9D /* MetalSeLU.hpp */, + 1F501F242397BA4B004E8721 /* MetalSeLU.hpp */, 92682C5521819BFA00B52B9D /* MetalSeLU.metal */, 92682C5121819BF100B52B9D /* MetalSeLU.mm */, - CE96FE6121707D57004AB400 /* MetalSigmoid.hpp */, + 1F501F232397BA4B004E8721 /* MetalSigmoid.hpp */, CE96FE6D21707D58004AB400 /* MetalSigmoid.metal */, CE96FE6C21707D58004AB400 /* MetalSigmoid.mm */, - 9243106B2239FE0B0016DA25 /* MetalSize.hpp */, + 1F501F222397BA4B004E8721 /* MetalSize.hpp */, 9243106E2239FE190016DA25 /* MetalSize.metal */, 9243106A2239FE0A0016DA25 /* MetalSize.mm */, - 488873E5215B639D0079B12E /* MetalSlice.hpp */, + 1F501F182397BA4B004E8721 /* MetalSlice.hpp */, 48887407215B639D0079B12E /* MetalSlice.metal */, 488873DD215B639D0079B12E /* MetalSlice.mm */, - 92256934219D14CD00F251E2 /* MetalSliceTF.hpp */, + 1F501F072397BA4A004E8721 /* MetalSliceTF.hpp */, 92256937219D150900F251E2 /* MetalSliceTF.metal */, 92256933219D14CD00F251E2 /* MetalSliceTF.mm */, - 488873F9215B639D0079B12E /* MetalSoftmax.hpp */, + 1F501F0B2397BA4A004E8721 /* MetalSoftmax.hpp */, 488873E9215B639D0079B12E /* MetalSoftmax.metal */, 488873D0215B639D0079B12E /* MetalSoftmax.mm */, - 9223E11721D34BE40067544A /* MetalSpaceToBatchND.hpp */, + 1F501F1F2397BA4B004E8721 /* MetalSpaceToBatchND.hpp */, 9223E11A21D34C260067544A /* MetalSpaceToBatchND.metal */, 9223E11621D34BE40067544A /* MetalSpaceToBatchND.mm */, - 488873F0215B639D0079B12E /* MetalSpatialProduct.hpp */, + 1F501EFE2397BA49004E8721 /* MetalSpatialProduct.hpp */, 48887401215B639D0079B12E /* MetalSpatialProduct.metal */, 488873C9215B639D0079B12E /* MetalSpatialProduct.mm */, - 9223E10E21D327F40067544A /* MetalSqueeze.hpp */, + 1F501F142397BA4B004E8721 /* MetalSqueeze.hpp */, 9223E10D21D327F40067544A /* MetalSqueeze.mm */, - 92575978219EA07F00918499 /* MetalStridedSlice.hpp */, + 1F501F332397BA4C004E8721 /* MetalStridedSlice.hpp */, 9257597B219EA08400918499 /* MetalStridedSlice.metal */, 92575977219EA07F00918499 /* MetalStridedSlice.mm */, - 488873D4215B639D0079B12E /* MetalTanH.hpp */, + 1F501F1C2397BA4B004E8721 /* MetalTanH.hpp */, 488873FB215B639D0079B12E /* MetalTanH.metal */, 488873CF215B639D0079B12E /* MetalTanH.mm */, - CE96FE6221707D57004AB400 /* MetalTensorConverter.hpp */, + 1F501F0D2397BA4A004E8721 /* MetalTensorConverter.hpp */, CE96FE5F21707D57004AB400 /* MetalTensorConverter.mm */, - 92A8D6FF21A40695009C2201 /* MetalTFQuantizedConv2D.hpp */, + 1F501F1D2397BA4B004E8721 /* MetalTFQuantizedConv2D.hpp */, 92A8D70221A406A8009C2201 /* MetalTFQuantizedConv2D.metal */, 92A8D6FE21A40695009C2201 /* MetalTFQuantizedConv2D.mm */, - 92682C4C2181729200B52B9D /* MetalTile.hpp */, + 1F501F172397BA4B004E8721 /* MetalTile.hpp */, 92682C4F218172A300B52B9D /* MetalTile.metal */, 92682C4B2181729200B52B9D /* MetalTile.mm */, - 924F131821A81C74006D46A4 /* MetalTranspose.hpp */, + 1F501F102397BA4A004E8721 /* MetalTranspose.hpp */, 924F131B21A81C80006D46A4 /* MetalTranspose.metal */, 924F131721A81C74006D46A4 /* MetalTranspose.mm */, - CE96FE6021707D57004AB400 /* MetalUnary.hpp */, + 1F501EFF2397BA49004E8721 /* MetalUnary.hpp */, CE96FE6921707D58004AB400 /* MetalUnary.metal */, CE96FE6621707D57004AB400 /* MetalUnary.mm */, - 48887404215B639D0079B12E /* MNNMetalContext.h */, + 1F501EFD2397BA49004E8721 /* MNNMetalContext.h */, 488873D7215B639D0079B12E /* MNNMetalContext.mm */, ); name = metal; @@ -1951,465 +2455,251 @@ 48887410215B639D0079B12E /* cpu */ = { isa = PBXGroup; children = ( - EBD9FF13236A93AB00E188F5 /* CPUDetectionPostProcess.cpp */, - EBD9FF14236A93AB00E188F5 /* CPUDetectionPostProcess.hpp */, - 48057D8B2330E85C00F922BE /* CPUMatrixBandPart.cpp */, - 48057D8A2330E85C00F922BE /* CPUMatrixBandPart.hpp */, - 48057D872330A90900F922BE /* CPUGatherND.cpp */, - 48057D862330A90900F922BE /* CPUGatherND.hpp */, - C422D7C02339D0EC00FD59D0 /* CPUElu.cpp */, - C422D7C12339D0ED00FD59D0 /* CPUElu.hpp */, - C422D766232F567300FD59D0 /* CPUConvolution3D.cpp */, - C422D767232F567300FD59D0 /* CPUConvolution3D.hpp */, - 48070718231A7B5000528CE5 /* CPUReverseSequence.cpp */, - 48070717231A7B5000528CE5 /* CPUReverseSequence.hpp */, - EB288360230EAF6C00837188 /* CPUEltwiseInt8.cpp */, - EB288361230EAF6C00837188 /* CPUEltwiseInt8.hpp */, - 11EDD60622E55A09007F3793 /* CPUDepthToSpace.cpp */, - 11EDD60722E55A09007F3793 /* CPUDepthToSpace.hpp */, - 11EDD60822E55A09007F3793 /* CPUSpaceToDepth.cpp */, - 11EDD60922E55A09007F3793 /* CPUSpaceToDepth.hpp */, - EB69637422E070E00065993C /* CPUCosineSimilarity.cpp */, - EB69637322E070E00065993C /* CPUCosineSimilarity.hpp */, - 487970F322C9C07000795502 /* CPUPoolInt8.cpp */, - 487970F222C9C07000795502 /* CPUPoolInt8.hpp */, - 487970D222C9BF4B00795502 /* CPUConvInt8.cpp */, - 487970CE22C9BF4A00795502 /* CPUConvInt8.hpp */, - 487970CD22C9BF4A00795502 /* CPUDepthwiseConvInt8.cpp */, - 487970CF22C9BF4A00795502 /* CPUDepthwiseConvInt8.hpp */, - 487970D322C9BF4B00795502 /* CPUFloatToInt8.cpp */, - 487970CC22C9BF4A00795502 /* CPUFloatToInt8.hpp */, - 487970D022C9BF4A00795502 /* CPUInt8ToFloat.cpp */, - 487970D122C9BF4A00795502 /* CPUInt8ToFloat.hpp */, - 4843AA5122A7E9AB00889A63 /* CPUConv2DBackPropFilter.cpp */, - 4843AA4C22A7E9AA00889A63 /* CPUConv2DBackPropFilter.hpp */, - 4843AA5022A7E9AB00889A63 /* CPUPoolGrad.cpp */, - 4843AA4E22A7E9AB00889A63 /* CPUPoolGrad.hpp */, - 4843AA4D22A7E9AA00889A63 /* CPUReluGrad.cpp */, - 4843AA4F22A7E9AB00889A63 /* CPUReluGrad.hpp */, - 4843AA5222A7E9AB00889A63 /* CPUSoftmaxGrad.cpp */, - 4843AA5322A7E9AB00889A63 /* CPUSoftmaxGrad.hpp */, - AE7BE4BA2285564F002CEEA6 /* CPUOPRegister.cpp */, - EB4925BA224A147E00C512BB /* CPUBatchMatMul.cpp */, - EB4925BD224A147E00C512BB /* CPUBatchMatMul.hpp */, - EB4925B6224A147E00C512BB /* CPUInstanceNorm.cpp */, - EB4925BC224A147E00C512BB /* CPUInstanceNorm.hpp */, - EB4925BB224A147E00C512BB /* CPUMoments.cpp */, - EB4925B8224A147E00C512BB /* CPUMoments.hpp */, - EB4925B7224A147E00C512BB /* CPURNNSequenceGRU.cpp */, - EB4925B9224A147E00C512BB /* CPURNNSequenceGRU.hpp */, - 486FDF3D223E495A00F487FB /* CPUBinary.cpp */, - 486FDF3C223E495A00F487FB /* CPUBinary.hpp */, - 486FDF3E223E495A00F487FB /* CPUUnary.cpp */, - 486FDF3F223E495B00F487FB /* CPUUnary.hpp */, - 488874AD215B639E0079B12E /* arm */, - 48887469215B639D0079B12E /* compute */, - 48887413215B639D0079B12E /* CPUArgMax.cpp */, - 488874A1215B639E0079B12E /* CPUArgMax.hpp */, - 4888750B215B639E0079B12E /* CPUAsString.cpp */, - 48887425215B639D0079B12E /* CPUAsString.hpp */, - 48887499215B639E0079B12E /* CPUBackend.cpp */, - 4888744E215B639D0079B12E /* CPUBackend.hpp */, - 9223E11D21D34C6B0067544A /* CPUBatchToSpaceND.cpp */, - 9223E11C21D34C6B0067544A /* CPUBatchToSpaceND.hpp */, - 48887423215B639D0079B12E /* CPUCast.cpp */, - 48887508215B639E0079B12E /* CPUCast.hpp */, - 4888744F215B639D0079B12E /* CPUConcat.cpp */, - 4888749A215B639E0079B12E /* CPUConcat.hpp */, - 4888742A215B639D0079B12E /* CPUConst.cpp */, - 48887510215B639E0079B12E /* CPUConst.hpp */, - 48887505215B639E0079B12E /* CPUConvolution.cpp */, - 48887420215B639D0079B12E /* CPUConvolution.hpp */, - 488874A5215B639E0079B12E /* CPUConvolutionDepthwise.cpp */, - 48887415215B639D0079B12E /* CPUConvolutionDepthwise.hpp */, - 92EEFE8B217F0CBB00F89377 /* CPUCrop.cpp */, - 92EEFE3B217F0CBB00F89377 /* CPUCrop.hpp */, - 48887411215B639D0079B12E /* CPUCropAndResize.cpp */, - 488874A3215B639E0079B12E /* CPUCropAndResize.hpp */, - 4888743F215B639D0079B12E /* CPUDeconvolution.cpp */, - 48887466215B639D0079B12E /* CPUDeconvolution.hpp */, - 48887491215B639E0079B12E /* CPUDeconvolutionDepthwise.cpp */, - 48887448215B639D0079B12E /* CPUDeconvolutionDepthwise.hpp */, - 4841B60B21EC607D002E5D66 /* CPUDequantize.cpp */, - 4841B60821EC607D002E5D66 /* CPUDequantize.hpp */, - 4888742C215B639D0079B12E /* CPUDetectionOutput.cpp */, - 48887514215B639E0079B12E /* CPUDetectionOutput.hpp */, - 48887506215B639E0079B12E /* CPUEltwise.cpp */, - 48887421215B639D0079B12E /* CPUEltwise.hpp */, - 48887467215B639D0079B12E /* CPUExpandDims.cpp */, - 4888743C215B639D0079B12E /* CPUExpandDims.hpp */, - 48887459215B639D0079B12E /* CPUFill.cpp */, - 48887432215B639D0079B12E /* CPUFill.hpp */, - 485DD422217F4C7600129159 /* CPUFixedPoint.hpp */, - 48887424215B639D0079B12E /* CPUGather.cpp */, - 4888750A215B639E0079B12E /* CPUGather.hpp */, - 4888749E215B639E0079B12E /* CPUGatherV2.cpp */, - 48887455215B639D0079B12E /* CPUGatherV2.hpp */, - 48887498215B639E0079B12E /* CPUInnerProduct.cpp */, - 48887450215B639D0079B12E /* CPUInnerProduct.hpp */, - 48887429215B639D0079B12E /* CPUInterp.cpp */, - 4888750E215B639E0079B12E /* CPUInterp.hpp */, - 4888745C215B639D0079B12E /* CPULRN.cpp */, - 48887435215B639D0079B12E /* CPULRN.hpp */, - 4888748F215B639E0079B12E /* CPULSTM.cpp */, - 48887446215B639D0079B12E /* CPULSTM.hpp */, - 4888742F215B639D0079B12E /* CPUMatMul.cpp */, - 48887515215B639E0079B12E /* CPUMatMul.hpp */, - 4888749B215B639E0079B12E /* CPUNonMaxSuppressionV2.cpp */, - 48887453215B639D0079B12E /* CPUNonMaxSuppressionV2.hpp */, - 4888748C215B639E0079B12E /* CPUNormalize.cpp */, - 48887445215B639D0079B12E /* CPUNormalize.hpp */, - 4888749D215B639E0079B12E /* CPUPack.cpp */, - 48887452215B639D0079B12E /* CPUPack.hpp */, - 48887517215B639E0079B12E /* CPUPermute.cpp */, - 48887431215B639D0079B12E /* CPUPermute.hpp */, - 48887447215B639D0079B12E /* CPUPool.cpp */, - 48887492215B639E0079B12E /* CPUPool.hpp */, - 48887511215B639E0079B12E /* CPUPriorbox.cpp */, - 4888742D215B639D0079B12E /* CPUPriorbox.hpp */, - 48887426215B639D0079B12E /* CPUProposal.cpp */, - 4888750D215B639E0079B12E /* CPUProposal.hpp */, - 485DD417217F49C500129159 /* CPUQuanConvolutionDepthwise.cpp */, - 485DD416217F49C500129159 /* CPUQuanConvolutionDepthwise.hpp */, - 4888741B215B639D0079B12E /* CPUQuantizationUtils.hpp */, - 485DD40E217F495500129159 /* CPUQuantizedAdd.cpp */, - 485DD40B217F495400129159 /* CPUQuantizedAdd.hpp */, - 48887504215B639E0079B12E /* CPUQuantizedAvgPool.cpp */, - 4888741E215B639D0079B12E /* CPUQuantizedAvgPool.hpp */, - 4841B60621EC607D002E5D66 /* CPUQuantizedConcat.cpp */, - 4841B60921EC607D002E5D66 /* CPUQuantizedConcat.hpp */, - 4841B60A21EC607D002E5D66 /* CPUQuantizedLogistic.cpp */, - 4841B60721EC607D002E5D66 /* CPUQuantizedLogistic.hpp */, - 4888743B215B639D0079B12E /* CPUQuantizedMaxPool.cpp */, - 48887463215B639D0079B12E /* CPUQuantizedMaxPool.hpp */, - 48887513215B639E0079B12E /* CPUQuantizedReshape.cpp */, - 4888742B215B639D0079B12E /* CPUQuantizedReshape.hpp */, - 485DD40C217F495500129159 /* CPUQuantizedSoftmax.cpp */, - 485DD40F217F495500129159 /* CPUQuantizedSoftmax.hpp */, - 488874A6215B639E0079B12E /* CPURange.cpp */, - 48887414215B639D0079B12E /* CPURange.hpp */, - 48887495215B639E0079B12E /* CPURank.cpp */, - 4888744B215B639D0079B12E /* CPURank.hpp */, - 4888750F215B639E0079B12E /* CPUReduceJoin.cpp */, - 48887428215B639D0079B12E /* CPUReduceJoin.hpp */, - 48887464215B639D0079B12E /* CPUReduction.cpp */, - 4888743E215B639D0079B12E /* CPUReduction.hpp */, - 48887468215B639D0079B12E /* CPURelu.cpp */, - 48887440215B639D0079B12E /* CPURelu.hpp */, - 48887465215B639D0079B12E /* CPUReshape.cpp */, - 4888743D215B639D0079B12E /* CPUReshape.hpp */, - 48887422215B639D0079B12E /* CPUResize.cpp */, - 48887509215B639E0079B12E /* CPUResize.hpp */, - 4888745E215B639D0079B12E /* CPUROIPooling.cpp */, - 48887439215B639D0079B12E /* CPUROIPooling.hpp */, - 48887419215B639D0079B12E /* CPUScale.cpp */, - 488874A9215B639E0079B12E /* CPUScale.hpp */, - 48887412215B639D0079B12E /* CPUSelu.cpp */, - 488874A2215B639E0079B12E /* CPUSelu.hpp */, - 4888744D215B639D0079B12E /* CPUShape.cpp */, - 48887497215B639E0079B12E /* CPUShape.hpp */, - 48887462215B639D0079B12E /* CPUSigmoid.cpp */, - 48887441215B639D0079B12E /* CPUSigmoid.hpp */, - 4888742E215B639D0079B12E /* CPUSize.cpp */, - 48887512215B639E0079B12E /* CPUSize.hpp */, - 4888745A215B639D0079B12E /* CPUSlice.cpp */, - 48887434215B639D0079B12E /* CPUSlice.hpp */, - 4888749C215B639E0079B12E /* CPUSliceTf.cpp */, - 48887454215B639D0079B12E /* CPUSliceTf.hpp */, - 4888741C215B639D0079B12E /* CPUSoftmax.cpp */, - 488874AC215B639E0079B12E /* CPUSoftmax.hpp */, - 9223E11E21D34C6B0067544A /* CPUSpaceToBatchND.cpp */, - 9223E11F21D34C6B0067544A /* CPUSpaceToBatchND.hpp */, - 48887494215B639E0079B12E /* CPUSpatialProduct.cpp */, - 4888744C215B639D0079B12E /* CPUSpatialProduct.hpp */, - 4888749F215B639E0079B12E /* CPUSqueeze.cpp */, - 48887457215B639D0079B12E /* CPUSqueeze.hpp */, - 4888745D215B639D0079B12E /* CPUStridedSlice.cpp */, - 48887436215B639D0079B12E /* CPUStridedSlice.hpp */, - 4888750C215B639E0079B12E /* CPUTanh.cpp */, - 48887427215B639D0079B12E /* CPUTanh.hpp */, - 488874AA215B639E0079B12E /* CPUTensorConvert.cpp */, - 4888741A215B639D0079B12E /* CPUTensorConvert.hpp */, - 488874A8215B639E0079B12E /* CPUTFQuantizedConv2D.cpp */, - 48887417215B639D0079B12E /* CPUTFQuantizedConv2D.hpp */, - 4888741D215B639D0079B12E /* CPUTile.cpp */, - 488874AB215B639E0079B12E /* CPUTile.hpp */, - 4888745F215B639D0079B12E /* CPUTopKV2.cpp */, - 48887438215B639D0079B12E /* CPUTopKV2.hpp */, - 48887458215B639D0079B12E /* CPUTranspose.cpp */, - 48887433215B639D0079B12E /* CPUTranspose.hpp */, - 48887460215B639D0079B12E /* CPUUnpack.cpp */, - 4888743A215B639D0079B12E /* CPUUnpack.hpp */, - 48887437215B639D0079B12E /* CPUWhere.cpp */, - 4888745B215B639D0079B12E /* CPUWhere.hpp */, - 486FDF4A2241E95700F487FB /* CPURuntime.cpp */, - 486FDF4B2241E95700F487FB /* CPURuntime.hpp */, - 48B904A022953DFF003116BB /* CPUSelect.cpp */, - 48B904A122953DFF003116BB /* CPUSelect.hpp */, - 48B904A422953E0F003116BB /* CPUZeroLike.cpp */, - 48B904A522953E0F003116BB /* CPUZeroLike.hpp */, - 4829D54E22AF5C340093E3BE /* CPUSetDiff1D.cpp */, - 4829D54F22AF5C340093E3BE /* CPUSetDiff1D.hpp */, - 4847D41E22C07E850049F3CA /* CPUPadding.cpp */, - 4847D41F22C07E850049F3CA /* CPUPadding.hpp */, + 92FF013823AA0B4E00AC97F6 /* arm */, + 92FF021B23AA0B5600AC97F6 /* compute */, + 92FF00F223AA0B4A00AC97F6 /* CPUArgMax.cpp */, + 92FF01C023AA0B4E00AC97F6 /* CPUArgMax.hpp */, + 92FF01DA23AA0B5000AC97F6 /* CPUAsString.cpp */, + 92FF00FA23AA0B4A00AC97F6 /* CPUAsString.hpp */, + 92FF012F23AA0B4E00AC97F6 /* CPUBackend.cpp */, + 92FF021223AA0B5600AC97F6 /* CPUBackend.hpp */, + 92FF011323AA0B4C00AC97F6 /* CPUBatchMatMul.cpp */, + 92FF020523AA0B5300AC97F6 /* CPUBatchMatMul.hpp */, + 92FF010923AA0B4B00AC97F6 /* CPUBatchToSpaceND.cpp */, + 92FF01F223AA0B5200AC97F6 /* CPUBatchToSpaceND.hpp */, + 92FF012223AA0B4D00AC97F6 /* CPUBinary.cpp */, + 92FF01F723AA0B5200AC97F6 /* CPUBinary.hpp */, + 92FF010323AA0B4B00AC97F6 /* CPUBroadcastTo.cpp */, + 92FF013023AA0B4E00AC97F6 /* CPUBroadcastTo.hpp */, + 92FF01CE23AA0B4F00AC97F6 /* CPUCast.cpp */, + 92FF011423AA0B4C00AC97F6 /* CPUCast.hpp */, + 92FF01DF23AA0B5000AC97F6 /* CPUConcat.cpp */, + 92FF012D23AA0B4D00AC97F6 /* CPUConcat.hpp */, + 92FF012923AA0B4D00AC97F6 /* CPUConst.cpp */, + 92FF01ED23AA0B5100AC97F6 /* CPUConst.hpp */, + 92FF011023AA0B4C00AC97F6 /* CPUConv2DBackPropFilter.cpp */, + 92FF010723AA0B4B00AC97F6 /* CPUConv2DBackPropFilter.hpp */, + 92FF012B23AA0B4D00AC97F6 /* CPUConvInt8.cpp */, + 92FF024F23AA0B5900AC97F6 /* CPUConvInt8.hpp */, + 92FF00F323AA0B4A00AC97F6 /* CPUConvolution.cpp */, + 92FF01D723AA0B5000AC97F6 /* CPUConvolution.hpp */, + 92FF01F523AA0B5200AC97F6 /* CPUConvolution3D.cpp */, + 92FF01C823AA0B4F00AC97F6 /* CPUConvolution3D.hpp */, + 92FF01F823AA0B5200AC97F6 /* CPUConvolutionDepthwise.cpp */, + 92FF024C23AA0B5700AC97F6 /* CPUConvolutionDepthwise.hpp */, + 92FF00FF23AA0B4B00AC97F6 /* CPUCosineSimilarity.cpp */, + 92FF025123AA0B5900AC97F6 /* CPUCosineSimilarity.hpp */, + 92FF01D223AA0B4F00AC97F6 /* CPUCrop.cpp */, + 92FF021823AA0B5600AC97F6 /* CPUCrop.hpp */, + 92FF00DF23AA0B4900AC97F6 /* CPUCropAndResize.cpp */, + 92FF012723AA0B4D00AC97F6 /* CPUCropAndResize.hpp */, + 92FF021323AA0B5600AC97F6 /* CPUDeconvolution.cpp */, + 92FF010423AA0B4B00AC97F6 /* CPUDeconvolution.hpp */, + 92FF00FD23AA0B4A00AC97F6 /* CPUDeconvolutionDepthwise.cpp */, + 92FF011C23AA0B4D00AC97F6 /* CPUDeconvolutionDepthwise.hpp */, + 92FF01CF23AA0B4F00AC97F6 /* CPUDepthToSpace.cpp */, + 92FF024E23AA0B5700AC97F6 /* CPUDepthToSpace.hpp */, + 92FF021723AA0B5600AC97F6 /* CPUDepthwiseConvInt8.cpp */, + 92FF011F23AA0B4D00AC97F6 /* CPUDepthwiseConvInt8.hpp */, + 92FF012E23AA0B4E00AC97F6 /* CPUDequantize.cpp */, + 92FF010623AA0B4B00AC97F6 /* CPUDequantize.hpp */, + 92FF010D23AA0B4C00AC97F6 /* CPUDetectionOutput.cpp */, + 92FF01E723AA0B5100AC97F6 /* CPUDetectionOutput.hpp */, + 92FF01FF23AA0B5200AC97F6 /* CPUDetectionPostProcess.cpp */, + 92FF01DB23AA0B5000AC97F6 /* CPUDetectionPostProcess.hpp */, + 92FF01BF23AA0B4E00AC97F6 /* CPUDilation2D.cpp */, + 92FF00D823AA0B4800AC97F6 /* CPUDilation2D.hpp */, + 92FF024523AA0B5700AC97F6 /* CPUEltwise.cpp */, + 92FF020D23AA0B5500AC97F6 /* CPUEltwise.hpp */, + 92FF01F423AA0B5200AC97F6 /* CPUEltwiseInt8.cpp */, + 92FF011523AA0B4C00AC97F6 /* CPUEltwiseInt8.hpp */, + 92FF01D623AA0B5000AC97F6 /* CPUElu.cpp */, + 92FF00EE23AA0B4A00AC97F6 /* CPUElu.hpp */, + 92FF01F323AA0B5200AC97F6 /* CPUExpandDims.cpp */, + 92FF00E223AA0B4900AC97F6 /* CPUExpandDims.hpp */, + 92FF01E323AA0B5000AC97F6 /* CPUFill.cpp */, + 92FF00F623AA0B4A00AC97F6 /* CPUFill.hpp */, + 92FF010523AA0B4B00AC97F6 /* CPUFixedPoint.hpp */, + 92FF00E123AA0B4900AC97F6 /* CPUFloatToInt8.cpp */, + 92FF00FE23AA0B4B00AC97F6 /* CPUFloatToInt8.hpp */, + 92FF021123AA0B5600AC97F6 /* CPUGather.cpp */, + 92FF012623AA0B4D00AC97F6 /* CPUGather.hpp */, + 92FF01C523AA0B4F00AC97F6 /* CPUGatherND.cpp */, + 92FF01C723AA0B4F00AC97F6 /* CPUGatherND.hpp */, + 92FF024D23AA0B5700AC97F6 /* CPUGatherV2.cpp */, + 92FF00DA23AA0B4800AC97F6 /* CPUGatherV2.hpp */, + 92FF01E123AA0B5000AC97F6 /* CPUInnerProduct.cpp */, + 92FF01EA23AA0B5100AC97F6 /* CPUInnerProduct.hpp */, + 92FF013123AA0B4E00AC97F6 /* CPUInstanceNorm.cpp */, + 92FF01FA23AA0B5200AC97F6 /* CPUInstanceNorm.hpp */, + 92FF00ED23AA0B4900AC97F6 /* CPUInt8ToFloat.cpp */, + 92FF00F423AA0B4A00AC97F6 /* CPUInt8ToFloat.hpp */, + 92FF024623AA0B5700AC97F6 /* CPUInterp.cpp */, + 92FF00DB23AA0B4800AC97F6 /* CPUInterp.hpp */, + 92FF020723AA0B5300AC97F6 /* CPULinSpace.cpp */, + 92FF010C23AA0B4B00AC97F6 /* CPULinSpace.hpp */, + 92FF011B23AA0B4C00AC97F6 /* CPULRN.cpp */, + 92FF010E23AA0B4C00AC97F6 /* CPULRN.hpp */, + 92FF01EE23AA0B5100AC97F6 /* CPULSTM.cpp */, + 92FF01FD23AA0B5200AC97F6 /* CPULSTM.hpp */, + 92FF011223AA0B4C00AC97F6 /* CPUMatMul.cpp */, + 92FF024B23AA0B5700AC97F6 /* CPUMatMul.hpp */, + 92FF00EB23AA0B4900AC97F6 /* CPUMatrixBandPart.cpp */, + 92FF020E23AA0B5500AC97F6 /* CPUMatrixBandPart.hpp */, + 92FF020623AA0B5300AC97F6 /* CPUMoments.cpp */, + 92FF013223AA0B4E00AC97F6 /* CPUMoments.hpp */, + 92FF00E823AA0B4900AC97F6 /* CPUNonMaxSuppressionV2.cpp */, + 92FF024823AA0B5700AC97F6 /* CPUNonMaxSuppressionV2.hpp */, + 92FF020223AA0B5300AC97F6 /* CPUNormalize.cpp */, + 92FF024A23AA0B5700AC97F6 /* CPUNormalize.hpp */, + 92FF01D823AA0B5000AC97F6 /* CPUOneHot.cpp */, + 92FF01D123AA0B4F00AC97F6 /* CPUOneHot.hpp */, + 92FF025023AA0B5900AC97F6 /* CPUOPRegister.cpp */, + 92FF011A23AA0B4C00AC97F6 /* CPUPack.cpp */, + 92FF024223AA0B5600AC97F6 /* CPUPack.hpp */, + 92FF00DD23AA0B4900AC97F6 /* CPUPadding.cpp */, + 92FF00D423AA0B4800AC97F6 /* CPUPadding.hpp */, + 92FF01DD23AA0B5000AC97F6 /* CPUPermute.cpp */, + 92FF01E923AA0B5100AC97F6 /* CPUPermute.hpp */, + 92FF01F123AA0B5200AC97F6 /* CPUPool.cpp */, + 92FF00F823AA0B4A00AC97F6 /* CPUPool.hpp */, + 92FF020123AA0B5300AC97F6 /* CPUPoolGrad.cpp */, + 92FF00D923AA0B4800AC97F6 /* CPUPoolGrad.hpp */, + 92FF00D723AA0B4800AC97F6 /* CPUPoolInt8.cpp */, + 92FF00F123AA0B4A00AC97F6 /* CPUPoolInt8.hpp */, + 92FF010223AA0B4B00AC97F6 /* CPUPriorbox.cpp */, + 92FF021023AA0B5500AC97F6 /* CPUPriorbox.hpp */, + 92FF012C23AA0B4D00AC97F6 /* CPUProposal.cpp */, + 92FF00E423AA0B4900AC97F6 /* CPUProposal.hpp */, + 92FF00D523AA0B4800AC97F6 /* CPUQuanConvolutionDepthwise.cpp */, + 92FF00F923AA0B4A00AC97F6 /* CPUQuanConvolutionDepthwise.hpp */, + 92FF00EC23AA0B4900AC97F6 /* CPUQuantizationUtils.hpp */, + 92FF021423AA0B5600AC97F6 /* CPUQuantizedAdd.cpp */, + 92FF013523AA0B4E00AC97F6 /* CPUQuantizedAdd.hpp */, + 92FF00E323AA0B4900AC97F6 /* CPUQuantizedAvgPool.cpp */, + 92FF01C623AA0B4F00AC97F6 /* CPUQuantizedAvgPool.hpp */, + 92FF01FC23AA0B5200AC97F6 /* CPUQuantizedConcat.cpp */, + 92FF01E223AA0B5000AC97F6 /* CPUQuantizedConcat.hpp */, + 92FF012123AA0B4D00AC97F6 /* CPUQuantizedLogistic.cpp */, + 92FF020923AA0B5300AC97F6 /* CPUQuantizedLogistic.hpp */, + 92FF011823AA0B4C00AC97F6 /* CPUQuantizedMaxPool.cpp */, + 92FF012423AA0B4D00AC97F6 /* CPUQuantizedMaxPool.hpp */, + 92FF01DE23AA0B5000AC97F6 /* CPUQuantizedReshape.cpp */, + 92FF020F23AA0B5500AC97F6 /* CPUQuantizedReshape.hpp */, + 92FF012A23AA0B4D00AC97F6 /* CPUQuantizedSoftmax.cpp */, + 92FF01EF23AA0B5100AC97F6 /* CPUQuantizedSoftmax.hpp */, + 92FF020B23AA0B5300AC97F6 /* CPURange.cpp */, + 92FF011123AA0B4C00AC97F6 /* CPURange.hpp */, + 92FF024323AA0B5600AC97F6 /* CPURank.cpp */, + 92FF01C223AA0B4E00AC97F6 /* CPURank.hpp */, + 92FF00E523AA0B4900AC97F6 /* CPUReduceJoin.cpp */, + 92FF024723AA0B5700AC97F6 /* CPUReduceJoin.hpp */, + 92FF01C323AA0B4F00AC97F6 /* CPUReduction.cpp */, + 92FF010A23AA0B4B00AC97F6 /* CPUReduction.hpp */, + 92FF01FE23AA0B5200AC97F6 /* CPURelu.cpp */, + 92FF01DC23AA0B5000AC97F6 /* CPURelu.hpp */, + 92FF01BE23AA0B4E00AC97F6 /* CPUReluGrad.cpp */, + 92FF011D23AA0B4D00AC97F6 /* CPUReluGrad.hpp */, + 92FF025423AA0B5900AC97F6 /* CPUReshape.cpp */, + 92FF021A23AA0B5600AC97F6 /* CPUReshape.hpp */, + 92FF01EC23AA0B5100AC97F6 /* CPUResize.cpp */, + 92FF01E623AA0B5100AC97F6 /* CPUResize.hpp */, + 92FF00FB23AA0B4A00AC97F6 /* CPUReverseSequence.cpp */, + 92FF00E923AA0B4900AC97F6 /* CPUReverseSequence.hpp */, + 92FF01EB23AA0B5100AC97F6 /* CPURNNSequenceGRU.cpp */, + 92FF00DE23AA0B4900AC97F6 /* CPURNNSequenceGRU.hpp */, + 92FF00DC23AA0B4900AC97F6 /* CPUROIPooling.cpp */, + 92FF01F923AA0B5200AC97F6 /* CPUROIPooling.hpp */, + 92FF01F023AA0B5200AC97F6 /* CPURuntime.cpp */, + 92FF01E823AA0B5100AC97F6 /* CPURuntime.hpp */, + 92FF01E423AA0B5100AC97F6 /* CPUScale.cpp */, + 92FF011923AA0B4C00AC97F6 /* CPUScale.hpp */, + 92FF01D423AA0B5000AC97F6 /* CPUScatterNd.cpp */, + 92FF00F023AA0B4A00AC97F6 /* CPUScatterNd.hpp */, + 92FF01D523AA0B5000AC97F6 /* CPUSelect.cpp */, + 92FF00E023AA0B4900AC97F6 /* CPUSelect.hpp */, + 92FF00E723AA0B4900AC97F6 /* CPUSelu.cpp */, + 92FF012523AA0B4D00AC97F6 /* CPUSelu.hpp */, + 92FF020323AA0B5300AC97F6 /* CPUSetDiff1D.cpp */, + 92FF01CD23AA0B4F00AC97F6 /* CPUSetDiff1D.hpp */, + 92FF01C123AA0B4E00AC97F6 /* CPUShape.cpp */, + 92FF01E023AA0B5000AC97F6 /* CPUShape.hpp */, + 92FF01FB23AA0B5200AC97F6 /* CPUSigmoid.cpp */, + 92FF020A23AA0B5300AC97F6 /* CPUSigmoid.hpp */, + 92FF012023AA0B4D00AC97F6 /* CPUSize.cpp */, + 92FF010123AA0B4B00AC97F6 /* CPUSize.hpp */, + 92FF011723AA0B4C00AC97F6 /* CPUSlice.cpp */, + 92FF020023AA0B5300AC97F6 /* CPUSlice.hpp */, + 92FF01D023AA0B4F00AC97F6 /* CPUSliceTf.cpp */, + 92FF00EA23AA0B4900AC97F6 /* CPUSliceTf.hpp */, + 92FF020423AA0B5300AC97F6 /* CPUSoftmax.cpp */, + 92FF010823AA0B4B00AC97F6 /* CPUSoftmax.hpp */, + 92FF013723AA0B4E00AC97F6 /* CPUSoftmaxGrad.cpp */, + 92FF010023AA0B4B00AC97F6 /* CPUSoftmaxGrad.hpp */, + 92FF00F723AA0B4A00AC97F6 /* CPUSpaceToBatchND.cpp */, + 92FF021523AA0B5600AC97F6 /* CPUSpaceToBatchND.hpp */, + 92FF012823AA0B4D00AC97F6 /* CPUSpaceToDepth.cpp */, + 92FF01C923AA0B4F00AC97F6 /* CPUSpaceToDepth.hpp */, + 92FF01CA23AA0B4F00AC97F6 /* CPUSpatialProduct.cpp */, + 92FF010F23AA0B4C00AC97F6 /* CPUSpatialProduct.hpp */, + 92FF021623AA0B5600AC97F6 /* CPUSqueeze.cpp */, + 92FF00D623AA0B4800AC97F6 /* CPUSqueeze.hpp */, + 92FF01C423AA0B4F00AC97F6 /* CPUStridedSlice.cpp */, + 92FF00E623AA0B4900AC97F6 /* CPUStridedSlice.hpp */, + 92FF00D323AA0B4800AC97F6 /* CPUTanh.cpp */, + 92FF01CB23AA0B4F00AC97F6 /* CPUTanh.hpp */, + 92FF025223AA0B5900AC97F6 /* CPUTensorConvert.cpp */, + 92FF020823AA0B5300AC97F6 /* CPUTensorConvert.hpp */, + 92FF011623AA0B4C00AC97F6 /* CPUTFQuantizedConv2D.cpp */, + 92FF01D923AA0B5000AC97F6 /* CPUTFQuantizedConv2D.hpp */, + 92FF021923AA0B5600AC97F6 /* CPUThreshold.cpp */, + 92FF01D323AA0B4F00AC97F6 /* CPUThreshold.hpp */, + 92FF024423AA0B5600AC97F6 /* CPUTile.cpp */, + 92FF01CC23AA0B4F00AC97F6 /* CPUTile.hpp */, + 92FF013323AA0B4E00AC97F6 /* CPUTopKV2.cpp */, + 92FF025323AA0B5900AC97F6 /* CPUTopKV2.hpp */, + 92FF00FC23AA0B4A00AC97F6 /* CPUTranspose.cpp */, + 92FF024923AA0B5700AC97F6 /* CPUTranspose.hpp */, + 92FF013423AA0B4E00AC97F6 /* CPUUnary.cpp */, + 92FF00F523AA0B4A00AC97F6 /* CPUUnary.hpp */, + 92FF01F623AA0B5200AC97F6 /* CPUUnpack.cpp */, + 92FF00EF23AA0B4A00AC97F6 /* CPUUnpack.hpp */, + 92FF01E523AA0B5100AC97F6 /* CPUUnravelIndex.cpp */, + 92FF020C23AA0B5500AC97F6 /* CPUUnravelIndex.hpp */, + 92FF013623AA0B4E00AC97F6 /* CPUWhere.cpp */, + 92FF010B23AA0B4B00AC97F6 /* CPUWhere.hpp */, + 92FF012323AA0B4D00AC97F6 /* CPUZeroLike.cpp */, + 92FF011E23AA0B4D00AC97F6 /* CPUZeroLike.hpp */, ); name = cpu; path = backend/cpu; sourceTree = ""; }; - 48887469215B639D0079B12E /* compute */ = { - isa = PBXGroup; - children = ( - C422D7C4233A0F0E00FD59D0 /* ConvolutionWinograd3D.cpp */, - C422D7C5233A0F0E00FD59D0 /* ConvolutionWinograd3D.hpp */, - C422D7B823320B2900FD59D0 /* Convolution3D3x3.cpp */, - C422D7B923320B2A00FD59D0 /* Convolution3D3x3.hpp */, - 48A8A62521D47B5A00C2B9A7 /* OptimizedComputer.cpp */, - 48A8A62421D47B5A00C2B9A7 /* OptimizedComputer.hpp */, - 4888746B215B639D0079B12E /* CommonOptFunction.cpp */, - 4888746D215B639D0079B12E /* Convolution3x3.cpp */, - 4888746F215B639D0079B12E /* CommonOptFunction.h */, - 48887470215B639D0079B12E /* ConvolutionWinograd.cpp */, - 48887471215B639D0079B12E /* Int8FunctionsOpt.cpp */, - 48887473215B639D0079B12E /* ConvOpt.cpp */, - 48887474215B639D0079B12E /* ConvolutionTiledExecutor.hpp */, - 48887476215B639D0079B12E /* ConvolutionIntFactory.cpp */, - 48887477215B639D0079B12E /* ConvolutionGroup.hpp */, - 48887478215B639D0079B12E /* ConvolutionFloatFactory.h */, - 4888747A215B639D0079B12E /* ConvolutionInt8Executor.cpp */, - 4888747B215B639D0079B12E /* ResizeFunction.h */, - 4888747D215B639D0079B12E /* ConvolutionIntFactory.hpp */, - 4888747F215B639D0079B12E /* ConvolutionGroup.cpp */, - 48887481215B639D0079B12E /* ConvolutionFloatFactory.cpp */, - 48887482215B639D0079B12E /* ConvolutionInt8Executor.hpp */, - 48887483215B639D0079B12E /* ResizeFunction.cpp */, - 48887484215B639D0079B12E /* Convolution3x3.hpp */, - 48887485215B639D0079B12E /* ConvOpt.h */, - 48887487215B639D0079B12E /* ConvolutionWinograd.hpp */, - 48887489215B639D0079B12E /* Int8FunctionsOpt.h */, - 4888748A215B639D0079B12E /* ConvolutionTiledExecutor.cpp */, - 483CD480216B1C7B00B05BE9 /* DeconvolutionWithStride.cpp */, - 483CD481216B1C7B00B05BE9 /* DeconvolutionWithStride.hpp */, - 483CD484216B2F0400B05BE9 /* WinogradOptFunction.cpp */, - 483CD485216B2F0400B05BE9 /* WinogradOptFunction.hpp */, - 48AE9E9D2211950B009DB6F4 /* StrassenMatmulComputor.cpp */, - 48AE9E9E2211950B009DB6F4 /* StrassenMatmulComputor.hpp */, - 48AE9EA12212B2C2009DB6F4 /* Convolution1x1Strassen.cpp */, - 48AE9EA22212B2C2009DB6F4 /* Convolution1x1Strassen.hpp */, - 48EB45E42254B9D2006C2322 /* ConvolutionDepthwise3x3.cpp */, - 48EB45E52254B9D2006C2322 /* ConvolutionDepthwise3x3.hpp */, - ); - path = compute; - sourceTree = ""; - }; - 488874AD215B639E0079B12E /* arm */ = { - isa = PBXGroup; - children = ( - 488874AE215B639E0079B12E /* MNNAsmGlobal.h */, - 488874AF215B639E0079B12E /* arm32 */, - 488874D9215B639E0079B12E /* arm64 */, - ); - path = arm; - sourceTree = ""; - }; - 488874AF215B639E0079B12E /* arm32 */ = { - isa = PBXGroup; - children = ( - EB288364230EB05C00837188 /* MNNScaleAddInt8.S */, - 48C5E79D2306C84400EAC2A6 /* MNNGemmint8to32_8x4_Unit.S */, - 487970DE22C9BF5E00795502 /* MNNDepthWiseInt8AddBiasScaleUnit.S */, - 487970DC22C9BF5E00795502 /* MNNGemmInt8AddBiasScale_8x4_Unit.S */, - 487970DD22C9BF5E00795502 /* MNNGemmInt8AddBiasScale_16x4_Unit.S */, - 487970E022C9BF5E00795502 /* MNNInt8ScaleToFloat.S */, - 487970DF22C9BF5E00795502 /* MNNLineDepthWiseInt8AddBiasScaleUnit.S */, - 487970E122C9BF5E00795502 /* MNNReluInt8.S */, - C425F8732292A31D00B4682D /* MNNPowC8.S */, - 486B4BC0222D4831001E73E3 /* MNNMatrixMax.S */, - 486B4BB8222901D5001E73E3 /* MNNMatrixProd.S */, - 48A8A62A21D5FE3100C2B9A7 /* MNNNV21ToRGBAUnit.S */, - 48A8A62021D3569800C2B9A7 /* MNNGemmInt8toFloat32_8x4_Unit.S */, - 483CD48C216CE3B500B05BE9 /* MNNCopyC4WithStride.S */, - 488874B0215B639E0079B12E /* MNNFloat2Int8.S */, - 488874B1215B639E0079B12E /* MNNGemmFloatUnit_4.S */, - 488874B2215B639E0079B12E /* MNNConvRunForLineDepthWiseInt8.S */, - 488874B3215B639E0079B12E /* MNNGemmInt16to32_4x4_Common.S */, - 488874B4215B639E0079B12E /* MNNConvRunForUnitDepthWiseInt8.S */, - 488874B5215B639E0079B12E /* MNNMinFloat.S */, - 488874B6215B639E0079B12E /* MNNBilinearProcC1.S */, - 488874B7215B639E0079B12E /* MNNMaxFloat.S */, - 488874BB215B639E0079B12E /* MNNCubicLineC4.S */, - 488874C2215B639E0079B12E /* MNNDeconvRunForUnitDepthWise.S */, - 488874C8215B639E0079B12E /* MNNWinogradMatrixProductLeft.S */, - 488874CB215B639E0079B12E /* MNNConvRunForLineDepthwise.S */, - 488874CC215B639E0079B12E /* MNNConvSlideWindowMiddle.S */, - 488874CD215B639E0079B12E /* MNNScaleBias2FloatC4.S */, - 488874CF215B639E0079B12E /* MNNAddBiasRelu6.S */, - 488874D0215B639E0079B12E /* MNNGemmInt16to32_4x4_Unit.S */, - 488874D1215B639E0079B12E /* MNNScaleAndAddBias.S */, - 488874D4215B639E0079B12E /* MNNGemmFloatCommon_4.S */, - 488874D5215B639E0079B12E /* MNNCoefLine.S */, - 488874D7215B639E0079B12E /* MNNWinogradMatrixProductRight.S */, - 4888773D215CD3BF0079B12E /* MNNBlitC3ToFloatRGBA.S */, - 4888773F215CD3D00079B12E /* MNNBlitC1ToFloatRGBA.S */, - 483CD488216CDDA100B05BE9 /* MNNAddC4WithStride.S */, - 485DD424218161E100129159 /* MNNConvRunForUnitDepthWiseUint8.S */, - 485DD4262181898C00129159 /* MNNUInt8ToInt16WithOffsetC4Common.S */, - 485DD4282181938C00129159 /* MNNQuanToDestUint8.S */, - 485DD42A21819FB000129159 /* MNNUInt8ToInt16WithOffsetC4Fast.S */, - 485DD42C2181A68F00129159 /* MNNConvRunForLineDepthWiseUint8.S */, - 48BF218121A3E4C300AFF78E /* MNNSamplerC4BilinearOpt.S */, - 48BF218521A4257500AFF78E /* MNNSamplerC1BilinearOpt.S */, - 48BF21BD21ABBDA300AFF78E /* MNNLoadU8AndSum.S */, - 48BF21F321CA43AE00AFF78E /* MNNSamplerC4NearestOpt.S */, - 48A8A60121CDF55E00C2B9A7 /* MNNSamplerC1NearestOpt.S */, - 48A8A61C21D20BE700C2B9A7 /* MNNNV21ToRGBUnit.S */, - 48DA297C21F1F7CF00E3BEB2 /* MNNExpC8.S */, - 71E8789E2203E88500268E24 /* MNNNV21ToBGRUnit.S */, - 48C054912205B91A00E91945 /* MNNPackC4.S */, - 48C054932205B94400E91945 /* MNNUnPackC4.S */, - 48C054992205BB8400E91945 /* MNNConvSlideWindowBorder.S */, - 48C0549E22081AC200E91945 /* MNNAddBias.S */, - 48C054A022081B5B00E91945 /* MNNReluWithSlope.S */, - 48C054A222081C9B00E91945 /* MNNAddBiasRelu.S */, - 48C054A422081CDA00E91945 /* MNNReluWithSlopeChannel.S */, - 48C054AE220A758B00E91945 /* MNNCubicSampleC4.S */, - 48C054B0220A762C00E91945 /* MNNConvRunForUnitDepthWise.S */, - 48AE9EA52212D3F9009DB6F4 /* MNNMatrixSub.S */, - 48AE9EA72212D403009DB6F4 /* MNNMatrixAdd.S */, - 48AE9EAD22151E20009DB6F4 /* MNNStrassenMergeCFunction.S */, - 48AE9EB122154C9D009DB6F4 /* MNNGemmFloatOne_4.S */, - 48EB45E822559525006C2322 /* MNNConvDwF23MulTransUnit.S */, - 48EB45EA2255B70C006C2322 /* MNNConvDwF23SourceTransUnit.S */, - ); - path = arm32; - sourceTree = ""; - }; - 488874D9215B639E0079B12E /* arm64 */ = { - isa = PBXGroup; - children = ( - EB288366230EB06600837188 /* MNNScaleAddInt8.S */, - 48C5E79F2306C84D00EAC2A6 /* MNNGemmint8to32_8x4_Unit.S */, - 487970F622C9C19F00795502 /* MNNGemmInt8AddBiasScale_16x4_Unit.S */, - 487970E922C9BF7200795502 /* MNNDepthWiseInt8AddBiasScaleUnit.S */, - 487970EC22C9BF7200795502 /* MNNGemmInt8AddBiasScale_16x4_Unit_D4.S */, - 487970E822C9BF7200795502 /* MNNInt8ScaleToFloat.S */, - 487970EA22C9BF7200795502 /* MNNLineDepthWiseInt8AddBiasScaleUnit.S */, - 487970EB22C9BF7200795502 /* MNNReluInt8.S */, - C425F8752292A32A00B4682D /* MNNPowC8.S */, - 48EB45EC2255D270006C2322 /* MNNConvDwF23MulTransUnit.S */, - 48EB45ED2255D270006C2322 /* MNNConvDwF23SourceTransUnit.S */, - 486B4BC2222D4845001E73E3 /* MNNMatrixMax.S */, - 486B4BBA222901E5001E73E3 /* MNNMatrixProd.S */, - 48AE9EB32215628D009DB6F4 /* MNNGemmFloatOne_4.S */, - 48AE9EAF221539C2009DB6F4 /* MNNStrassenMergeCFunction.S */, - 48AE9EA92212E94F009DB6F4 /* MNNMatrixAdd.S */, - 48AE9EAA2212E94F009DB6F4 /* MNNMatrixSub.S */, - 48DA297E21F2051800E3BEB2 /* MNNExpC8.S */, - 48A8A62821D5FE1D00C2B9A7 /* MNNNV21ToRGBAUnit.S */, - 48A8A62221D37FB500C2B9A7 /* MNNGemmInt8toFloat32_8x4_Unit.S */, - 48A8A61E21D235DF00C2B9A7 /* MNNNV21ToRGBUnit.S */, - 48A8A60321CDF86F00C2B9A7 /* MNNSamplerC1NearestOpt.S */, - 48A8A60421CDF86F00C2B9A7 /* MNNSamplerC4NearestOpt.S */, - 48BF21C021ABC45100AFF78E /* MNNLoadU8AndSum.S */, - 48BF218721A4380A00AFF78E /* MNNSamplerC1BilinearOpt.S */, - 48BF218321A4073500AFF78E /* MNNSamplerC4BilinearOpt.S */, - 485DD4362182B07B00129159 /* MNNUInt8ToInt16WithOffsetC4Fast.S */, - 485DD4322182AE8000129159 /* MNNConvRunForLineDepthWiseUint8.S */, - 485DD4332182AE8100129159 /* MNNConvRunForUnitDepthWiseUint8.S */, - 485DD42E2181E94300129159 /* MNNQuanToDestUint8.S */, - 485DD42F2181E94300129159 /* MNNUInt8ToInt16WithOffsetC4Common.S */, - 483CD48E216CE3BB00B05BE9 /* MNNCopyC4WithStride.S */, - 483CD48A216CE20D00B05BE9 /* MNNAddC4WithStride.S */, - 48887742215CFF7B0079B12E /* MNNBlitC1ToFloatRGBA.S */, - 48887741215CFF7B0079B12E /* MNNBlitC3ToFloatRGBA.S */, - 488874DA215B639E0079B12E /* MNNFloat2Int8.S */, - 488874DB215B639E0079B12E /* MNNGemmFloatUnit_4.S */, - 488874DC215B639E0079B12E /* MNNConvRunForLineDepthWiseInt8.S */, - 488874DD215B639E0079B12E /* MNNGemmInt16to32_4x4_Common.S */, - 488874DE215B639E0079B12E /* MNNConvRunForUnitDepthWiseInt8.S */, - 488874DF215B639E0079B12E /* MNNMinFloat.S */, - 488874E0215B639E0079B12E /* MNNBilinearProcC1.S */, - 488874E1215B639E0079B12E /* MNNMaxFloat.S */, - 488874E5215B639E0079B12E /* MNNCubicLineC4.S */, - 488874EC215B639E0079B12E /* MNNDeconvRunForUnitDepthWise.S */, - 488874F3215B639E0079B12E /* MNNWinogradMatrixProductLeft.S */, - 488874F7215B639E0079B12E /* MNNConvRunForLineDepthwise.S */, - 488874F8215B639E0079B12E /* MNNConvSlideWindowMiddle.S */, - 488874F9215B639E0079B12E /* MNNScaleBias2FloatC4.S */, - 488874FB215B639E0079B12E /* MNNAddBiasRelu6.S */, - 488874FC215B639E0079B12E /* MNNGemmInt16to32_4x4_Unit.S */, - 488874FD215B639E0079B12E /* MNNScaleAndAddBias.S */, - 48887500215B639E0079B12E /* MNNGemmFloatCommon_4.S */, - 48887501215B639E0079B12E /* MNNCoefLine.S */, - 48887503215B639E0079B12E /* MNNWinogradMatrixProductRight.S */, - 71E878A12203E9D200268E24 /* MNNNV21ToBGRUnit.S */, - 48C054952205B9A500E91945 /* MNNPackC4.S */, - 48C054972205B9B400E91945 /* MNNUnPackC4.S */, - 48C0549B2205BC8C00E91945 /* MNNConvSlideWindowBorder.S */, - 48C054A6220A745900E91945 /* MNNAddBiasRelu.S */, - 48C054A8220A749100E91945 /* MNNAddBias.S */, - 48C054AA220A74B200E91945 /* MNNReluWithSlope.S */, - 48C054AC220A74D800E91945 /* MNNReluWithSlopeChannel.S */, - 48C054B2220A7A4600E91945 /* MNNCubicSampleC4.S */, - 48C054B4220A7A9600E91945 /* MNNConvRunForUnitDepthWise.S */, - ); - path = arm64; - sourceTree = ""; - }; 48887566215B639E0079B12E /* math */ = { isa = PBXGroup; children = ( 4888756A215B639E0079B12E /* Matrix.cpp */, - 48887568215B639E0079B12E /* Matrix.hpp */, + 1F501EF42397BA31004E8721 /* Matrix.hpp */, + 1F501EF52397BA31004E8721 /* Vec4.hpp */, 48887567215B639E0079B12E /* WingoradGenerater.cpp */, - 48887569215B639E0079B12E /* WingoradGenerater.hpp */, - 48EB45E32251AC9D006C2322 /* Vec4.hpp */, + 1F501EF62397BA31004E8721 /* WingoradGenerater.hpp */, ); path = math; sourceTree = ""; }; - 489BFA2E230E612400F6B785 /* Recovered References */ = { - isa = PBXGroup; - children = ( - 487970F922CE19EA00795502 /* FileLoader.cpp */, - 487970F822CE19EA00795502 /* FileLoader.hpp */, - C49FCD7E231D1BE200F358B6 /* MNNConvSlideWindowMiddle.cpp */, - C49FCD84231D1BE200F358B6 /* DispatchHelper.cpp */, - C49FCD82231D1BE200F358B6 /* MNNMatrixSub.cpp */, - C49FCD7F231D1BE200F358B6 /* MNNGemmFloatCommon_4.cpp */, - C49FCD81231D1BE200F358B6 /* MNNMatrixAdd.cpp */, - C49FCD7D231D1BE200F358B6 /* CommonOptFunction.cpp */, - C49FCD74231D1BE200F358B6 /* CommonOptFunction.cpp */, - C49FCD76231D1BE200F358B6 /* MNNGemmFloatCommon_4.cpp */, - C49FCD88231D1BE200F358B6 /* ThreadPool.cpp */, - C49FCD79231D1BE200F358B6 /* MNNMatrixAdd.cpp */, - C49FCD87231D1BE200F358B6 /* CPUConvolution3D.cpp */, - C49FCD78231D1BE200F358B6 /* MNNConvSlideWindowBorder.cpp */, - C49FCD83231D1BE200F358B6 /* FunctionDispatcher.cpp */, - C49FCD7A231D1BE200F358B6 /* MNNMatrixSub.cpp */, - C49FCD75231D1BE200F358B6 /* MNNConvSlideWindowMiddle.cpp */, - C49FCD77231D1BE200F358B6 /* FunctionSummary.hpp */, - C49FCD80231D1BE200F358B6 /* FunctionSummary.hpp */, - C49FCD7B231D1BE200F358B6 /* DispatchHelper.hpp */, - C49FCD86231D1BE200F358B6 /* ThreadPool.hpp */, - C49FCD85231D1BE200F358B6 /* CPUConvolution3D.hpp */, - ); - name = "Recovered References"; - sourceTree = ""; - }; 48A8A60721D101A700C2B9A7 /* cv */ = { isa = PBXGroup; children = ( 48A8A60D21D101A700C2B9A7 /* ImageBlitter.cpp */, - 48A8A60921D101A700C2B9A7 /* ImageBlitter.hpp */, + 1F501EED2397BA26004E8721 /* ImageBlitter.hpp */, 48A8A60E21D101A700C2B9A7 /* ImageFloatBlitter.cpp */, - 48A8A60A21D101A700C2B9A7 /* ImageFloatBlitter.hpp */, + 1F501EEB2397BA26004E8721 /* ImageFloatBlitter.hpp */, 48A8A60B21D101A700C2B9A7 /* ImageProcess.cpp */, 48A8A60C21D101A700C2B9A7 /* ImageSampler.cpp */, - 48A8A60821D101A700C2B9A7 /* ImageSampler.hpp */, + 1F501EEA2397BA26004E8721 /* ImageSampler.hpp */, 48A8A61721D101DD00C2B9A7 /* Matrix_CV.cpp */, - 48A8A61621D101DD00C2B9A7 /* SkNx_neon.h */, - 48A8A61821D101DE00C2B9A7 /* SkNx.h */, + 1F501EEC2397BA26004E8721 /* SkNx_neon.h */, + 1F501EEE2397BA26004E8721 /* SkNx.h */, ); path = cv; sourceTree = ""; @@ -2515,31 +2805,6 @@ path = ../../../test/op; sourceTree = ""; }; - 921722CC21DDF63A004583BF /* schema */ = { - isa = PBXGroup; - children = ( - 921722CE21DDF63A004583BF /* current */, - ); - name = schema; - path = ../../../schema; - sourceTree = ""; - }; - 921722CE21DDF63A004583BF /* current */ = { - isa = PBXGroup; - children = ( - 48070743231E52E300528CE5 /* BasicOptimizer_generated.h */, - 921722CF21DDF63A004583BF /* MNN_generated.h */, - 92D765972228176500178BE5 /* CaffeOp_generated.h */, - 921722D121DDF63A004583BF /* GpuLibrary_generated.h */, - 921722D321DDF63A004583BF /* Tensor_generated.h */, - 92D765962228176500178BE5 /* TensorflowOp_generated.h */, - 921722D521DDF63A004583BF /* TFQuantizeOp_generated.h */, - 921722D021DDF63A004583BF /* Type_generated.h */, - 92D765982228176500178BE5 /* UserDefine_generated.h */, - ); - path = current; - sourceTree = ""; - }; 925F018721FF1DF400E648A1 /* model */ = { isa = PBXGroup; children = ( @@ -2564,74 +2829,276 @@ path = Playground; sourceTree = ""; }; + 92FF013823AA0B4E00AC97F6 /* arm */ = { + isa = PBXGroup; + children = ( + 92FF013923AA0B4E00AC97F6 /* CMakeLists.txt */, + 92FF013A23AA0B4E00AC97F6 /* arm32 */, + 92FF017C23AA0B4E00AC97F6 /* arm64 */, + 92FF01BD23AA0B4E00AC97F6 /* MNNAsmGlobal.h */, + ); + path = arm; + sourceTree = ""; + }; + 92FF013A23AA0B4E00AC97F6 /* arm32 */ = { + isa = PBXGroup; + children = ( + 92FF013B23AA0B4E00AC97F6 /* MNNUInt8ToInt16WithOffsetC4Fast.S */, + 92FF013C23AA0B4E00AC97F6 /* MNNScaleAddInt8.S */, + 92FF013D23AA0B4E00AC97F6 /* MNNMatrixProd.S */, + 92FF013E23AA0B4E00AC97F6 /* MNNFloat2Int8.S */, + 92FF013F23AA0B4E00AC97F6 /* MNNSamplerC4NearestOpt.S */, + 92FF014023AA0B4E00AC97F6 /* MNNAddC4WithStride.S */, + 92FF014123AA0B4E00AC97F6 /* MNNQuanToDestUint8.S */, + 92FF014223AA0B4E00AC97F6 /* MNNLoadU8AndSum.S */, + 92FF014323AA0B4E00AC97F6 /* MNNCubicLineC4.S */, + 92FF014423AA0B4E00AC97F6 /* MNNAddBiasRelu6.S */, + 92FF014523AA0B4E00AC97F6 /* MNNStrassenMergeCFunction.S */, + 92FF014623AA0B4E00AC97F6 /* MNNBlitC1ToFloatRGBA.S */, + 92FF014723AA0B4E00AC97F6 /* MNNCopyC4WithStride.S */, + 92FF014823AA0B4E00AC97F6 /* MNNNV21ToBGRUnit.S */, + 92FF014923AA0B4E00AC97F6 /* MNNLineDepthWiseInt8AddBiasScaleUnit.S */, + 92FF014A23AA0B4E00AC97F6 /* MNNUnPackC4.S */, + 92FF014B23AA0B4E00AC97F6 /* MNNSamplerC1NearestOpt.S */, + 92FF014C23AA0B4E00AC97F6 /* MNNGemmFloatCommon_4.S */, + 92FF014D23AA0B4E00AC97F6 /* MNNNV21ToRGBUnit.S */, + 92FF014E23AA0B4E00AC97F6 /* MNNPackC4.S */, + 92FF014F23AA0B4E00AC97F6 /* MNNMinFloat.S */, + 92FF015023AA0B4E00AC97F6 /* MNNGemmInt16to32_4x4_Common.S */, + 92FF015123AA0B4E00AC97F6 /* MNNMaxFloat.S */, + 92FF015223AA0B4E00AC97F6 /* MNNNV21ToRGBAUnit.S */, + 92FF015323AA0B4E00AC97F6 /* MNNGemmInt16to32_4x4_Unit.S */, + 92FF015423AA0B4E00AC97F6 /* MNNScaleBias2FloatC4.S */, + 92FF015523AA0B4E00AC97F6 /* MNNMatrixMax.S */, + 92FF015623AA0B4E00AC97F6 /* MNNConvRunForLineDepthWiseInt8.S */, + 92FF015723AA0B4E00AC97F6 /* MNNConvRunForUnitDepthWiseUint8.S */, + 92FF015823AA0B4E00AC97F6 /* MNNGemmInt8AddBiasScale_8x4_Unit.S */, + 92FF015923AA0B4E00AC97F6 /* MNNGemmInt8toFloat32_8x4_Unit.S */, + 92FF015A23AA0B4E00AC97F6 /* MNNConvRunForUnitDepthWiseInt8.S */, + 92FF015B23AA0B4E00AC97F6 /* MNNScaleAndAddBias.S */, + 92FF015C23AA0B4E00AC97F6 /* MNNReluInt8.S */, + 92FF015D23AA0B4E00AC97F6 /* MNNConvRunForLineDepthWiseUint8.S */, + 92FF015E23AA0B4E00AC97F6 /* MNNSamplerC4BilinearOpt.S */, + 92FF015F23AA0B4E00AC97F6 /* MNNBilinearProcC1.S */, + 92FF016023AA0B4E00AC97F6 /* MNNMatrixSub.S */, + 92FF016123AA0B4E00AC97F6 /* MNNPowC8.S */, + 92FF016223AA0B4E00AC97F6 /* MNNMatrixAdd.S */, + 92FF016323AA0B4E00AC97F6 /* MNNExpC8.S */, + 92FF016423AA0B4E00AC97F6 /* MNNAddBiasRelu.S */, + 92FF016523AA0B4E00AC97F6 /* MNNConvDwF23SourceTransUnit.S */, + 92FF016623AA0B4E00AC97F6 /* MNNWinogradMatrixProductLeft.S */, + 92FF016723AA0B4E00AC97F6 /* MNNDeconvRunForUnitDepthWise.S */, + 92FF016823AA0B4E00AC97F6 /* MNNSamplerC1BilinearOpt.S */, + 92FF016923AA0B4E00AC97F6 /* MNNDepthWiseInt8AddBiasScaleUnit.S */, + 92FF016A23AA0B4E00AC97F6 /* MNNGemmInt8AddBiasScale_16x4_Unit.S */, + 92FF016B23AA0B4E00AC97F6 /* MNNGemmFloatOne_4.S */, + 92FF016C23AA0B4E00AC97F6 /* MNNWinogradMatrixProductRight.S */, + 92FF016E23AA0B4E00AC97F6 /* MNNReluWithSlopeChannel.S */, + 92FF016F23AA0B4E00AC97F6 /* MNNAddBias.S */, + 92FF017023AA0B4E00AC97F6 /* MNNCubicSampleC4.S */, + 92FF017123AA0B4E00AC97F6 /* MNNCoefLine.S */, + 92FF017223AA0B4E00AC97F6 /* MNNBlitC3ToFloatRGBA.S */, + 92FF017323AA0B4E00AC97F6 /* MNNConvSlideWindowMiddle.S */, + 92FF017423AA0B4E00AC97F6 /* MNNUInt8ToInt16WithOffsetC4Common.S */, + 92FF017523AA0B4E00AC97F6 /* MNNInt8ScaleToFloat.S */, + 92FF017623AA0B4E00AC97F6 /* MNNConvRunForUnitDepthWise.S */, + 92FF017723AA0B4E00AC97F6 /* MNNConvDwF23MulTransUnit.S */, + 92FF017823AA0B4E00AC97F6 /* MNNConvRunForLineDepthwise.S */, + 92FF017923AA0B4E00AC97F6 /* MNNGemmint8to32_8x4_Unit.S */, + 92FF017A23AA0B4E00AC97F6 /* MNNGemmFloatUnit_4.S */, + 92FF017B23AA0B4E00AC97F6 /* MNNConvSlideWindowBorder.S */, + ); + path = arm32; + sourceTree = ""; + }; + 92FF017C23AA0B4E00AC97F6 /* arm64 */ = { + isa = PBXGroup; + children = ( + 92FF017D23AA0B4E00AC97F6 /* MNNUInt8ToInt16WithOffsetC4Fast.S */, + 92FF017E23AA0B4E00AC97F6 /* MNNScaleAddInt8.S */, + 92FF017F23AA0B4E00AC97F6 /* MNNMatrixProd.S */, + 92FF018023AA0B4E00AC97F6 /* MNNFloat2Int8.S */, + 92FF018123AA0B4E00AC97F6 /* MNNSamplerC4NearestOpt.S */, + 92FF018223AA0B4E00AC97F6 /* MNNAddC4WithStride.S */, + 92FF018323AA0B4E00AC97F6 /* MNNQuanToDestUint8.S */, + 92FF018423AA0B4E00AC97F6 /* MNNLoadU8AndSum.S */, + 92FF018523AA0B4E00AC97F6 /* MNNCubicLineC4.S */, + 92FF018623AA0B4E00AC97F6 /* MNNAddBiasRelu6.S */, + 92FF018723AA0B4E00AC97F6 /* MNNStrassenMergeCFunction.S */, + 92FF018823AA0B4E00AC97F6 /* MNNBlitC1ToFloatRGBA.S */, + 92FF018923AA0B4E00AC97F6 /* MNNCopyC4WithStride.S */, + 92FF018A23AA0B4E00AC97F6 /* MNNNV21ToBGRUnit.S */, + 92FF018B23AA0B4E00AC97F6 /* MNNLineDepthWiseInt8AddBiasScaleUnit.S */, + 92FF018C23AA0B4E00AC97F6 /* MNNUnPackC4.S */, + 92FF018D23AA0B4E00AC97F6 /* MNNSamplerC1NearestOpt.S */, + 92FF018E23AA0B4E00AC97F6 /* MNNGemmFloatCommon_4.S */, + 92FF018F23AA0B4E00AC97F6 /* MNNNV21ToRGBUnit.S */, + 92FF019023AA0B4E00AC97F6 /* MNNPackC4.S */, + 92FF019123AA0B4E00AC97F6 /* MNNMinFloat.S */, + 92FF019223AA0B4E00AC97F6 /* MNNGemmInt16to32_4x4_Common.S */, + 92FF019323AA0B4E00AC97F6 /* MNNMaxFloat.S */, + 92FF019423AA0B4E00AC97F6 /* MNNNV21ToRGBAUnit.S */, + 92FF019523AA0B4E00AC97F6 /* MNNGemmInt16to32_4x4_Unit.S */, + 92FF019623AA0B4E00AC97F6 /* MNNScaleBias2FloatC4.S */, + 92FF019723AA0B4E00AC97F6 /* MNNMatrixMax.S */, + 92FF019823AA0B4E00AC97F6 /* MNNConvRunForLineDepthWiseInt8.S */, + 92FF019923AA0B4E00AC97F6 /* MNNConvRunForUnitDepthWiseUint8.S */, + 92FF019A23AA0B4E00AC97F6 /* MNNGemmInt8toFloat32_8x4_Unit.S */, + 92FF019B23AA0B4E00AC97F6 /* MNNConvRunForUnitDepthWiseInt8.S */, + 92FF019C23AA0B4E00AC97F6 /* MNNScaleAndAddBias.S */, + 92FF019D23AA0B4E00AC97F6 /* MNNReluInt8.S */, + 92FF019E23AA0B4E00AC97F6 /* MNNConvRunForLineDepthWiseUint8.S */, + 92FF019F23AA0B4E00AC97F6 /* MNNSamplerC4BilinearOpt.S */, + 92FF01A023AA0B4E00AC97F6 /* MNNBilinearProcC1.S */, + 92FF01A123AA0B4E00AC97F6 /* MNNMatrixSub.S */, + 92FF01A223AA0B4E00AC97F6 /* MNNPowC8.S */, + 92FF01A323AA0B4E00AC97F6 /* MNNMatrixAdd.S */, + 92FF01A423AA0B4E00AC97F6 /* MNNExpC8.S */, + 92FF01A523AA0B4E00AC97F6 /* MNNAddBiasRelu.S */, + 92FF01A623AA0B4E00AC97F6 /* MNNConvDwF23SourceTransUnit.S */, + 92FF01A723AA0B4E00AC97F6 /* MNNWinogradMatrixProductLeft.S */, + 92FF01A823AA0B4E00AC97F6 /* MNNDeconvRunForUnitDepthWise.S */, + 92FF01A923AA0B4E00AC97F6 /* MNNSamplerC1BilinearOpt.S */, + 92FF01AA23AA0B4E00AC97F6 /* MNNDepthWiseInt8AddBiasScaleUnit.S */, + 92FF01AB23AA0B4E00AC97F6 /* MNNGemmInt8AddBiasScale_16x4_Unit.S */, + 92FF01AC23AA0B4E00AC97F6 /* MNNGemmFloatOne_4.S */, + 92FF01AD23AA0B4E00AC97F6 /* MNNWinogradMatrixProductRight.S */, + 92FF01AF23AA0B4E00AC97F6 /* MNNReluWithSlopeChannel.S */, + 92FF01B023AA0B4E00AC97F6 /* MNNAddBias.S */, + 92FF01B123AA0B4E00AC97F6 /* MNNCubicSampleC4.S */, + 92FF01B223AA0B4E00AC97F6 /* MNNCoefLine.S */, + 92FF01B323AA0B4E00AC97F6 /* MNNBlitC3ToFloatRGBA.S */, + 92FF01B423AA0B4E00AC97F6 /* MNNConvSlideWindowMiddle.S */, + 92FF01B523AA0B4E00AC97F6 /* MNNUInt8ToInt16WithOffsetC4Common.S */, + 92FF01B623AA0B4E00AC97F6 /* MNNInt8ScaleToFloat.S */, + 92FF01B723AA0B4E00AC97F6 /* MNNConvRunForUnitDepthWise.S */, + 92FF01B823AA0B4E00AC97F6 /* MNNConvDwF23MulTransUnit.S */, + 92FF01B923AA0B4E00AC97F6 /* MNNConvRunForLineDepthwise.S */, + 92FF01BA23AA0B4E00AC97F6 /* MNNGemmint8to32_8x4_Unit.S */, + 92FF01BB23AA0B4E00AC97F6 /* MNNGemmFloatUnit_4.S */, + 92FF01BC23AA0B4E00AC97F6 /* MNNConvSlideWindowBorder.S */, + ); + path = arm64; + sourceTree = ""; + }; + 92FF021B23AA0B5600AC97F6 /* compute */ = { + isa = PBXGroup; + children = ( + 92FF021C23AA0B5600AC97F6 /* Convolution1x1Strassen.hpp */, + 92FF021D23AA0B5600AC97F6 /* CommonOptFunction.cpp */, + 92FF021E23AA0B5600AC97F6 /* Convolution3D3x3.cpp */, + 92FF021F23AA0B5600AC97F6 /* StrassenMatmulComputor.cpp */, + 92FF022023AA0B5600AC97F6 /* Convolution3x3.cpp */, + 92FF022123AA0B5600AC97F6 /* CommonOptFunction.h */, + 92FF022223AA0B5600AC97F6 /* ConvolutionWinograd.cpp */, + 92FF022323AA0B5600AC97F6 /* Int8FunctionsOpt.cpp */, + 92FF022423AA0B5600AC97F6 /* ConvolutionWinograd3D.cpp */, + 92FF022523AA0B5600AC97F6 /* ConvOpt.cpp */, + 92FF022623AA0B5600AC97F6 /* OptimizedComputer.cpp */, + 92FF022723AA0B5600AC97F6 /* DeconvolutionWithStride.hpp */, + 92FF022823AA0B5600AC97F6 /* ConvolutionTiledExecutor.hpp */, + 92FF022923AA0B5600AC97F6 /* ConvolutionIntFactory.cpp */, + 92FF022A23AA0B5600AC97F6 /* WinogradOptFunction.cpp */, + 92FF022B23AA0B5600AC97F6 /* ConvolutionGroup.hpp */, + 92FF022C23AA0B5600AC97F6 /* ConvolutionFloatFactory.h */, + 92FF022D23AA0B5600AC97F6 /* ConvolutionInt8Executor.cpp */, + 92FF022E23AA0B5600AC97F6 /* ResizeFunction.h */, + 92FF022F23AA0B5600AC97F6 /* ConvolutionDepthwise3x3.cpp */, + 92FF023023AA0B5600AC97F6 /* ConvolutionIntFactory.hpp */, + 92FF023123AA0B5600AC97F6 /* WinogradOptFunction.hpp */, + 92FF023223AA0B5600AC97F6 /* ConvolutionGroup.cpp */, + 92FF023323AA0B5600AC97F6 /* ConvolutionFloatFactory.cpp */, + 92FF023423AA0B5600AC97F6 /* ConvolutionInt8Executor.hpp */, + 92FF023523AA0B5600AC97F6 /* ConvolutionDepthwise3x3.hpp */, + 92FF023623AA0B5600AC97F6 /* Convolution1x1Strassen.cpp */, + 92FF023723AA0B5600AC97F6 /* ResizeFunction.cpp */, + 92FF023823AA0B5600AC97F6 /* StrassenMatmulComputor.hpp */, + 92FF023923AA0B5600AC97F6 /* Convolution3x3.hpp */, + 92FF023A23AA0B5600AC97F6 /* Convolution3D3x3.hpp */, + 92FF023B23AA0B5600AC97F6 /* ConvOpt.h */, + 92FF023C23AA0B5600AC97F6 /* ConvolutionWinograd.hpp */, + 92FF023D23AA0B5600AC97F6 /* ConvolutionWinograd3D.hpp */, + 92FF023E23AA0B5600AC97F6 /* OptimizedComputer.hpp */, + 92FF023F23AA0B5600AC97F6 /* Int8FunctionsOpt.h */, + 92FF024023AA0B5600AC97F6 /* DeconvolutionWithStride.cpp */, + 92FF024123AA0B5600AC97F6 /* ConvolutionTiledExecutor.cpp */, + ); + path = compute; + sourceTree = ""; + }; EBB38EC621E748B9005F76D7 /* shape */ = { isa = PBXGroup; children = ( - EBD9FF11236A939700E188F5 /* ShapeDetectionPostProcess.cpp */, - 48057D842330A8F900F922BE /* ShapeGatherND.cpp */, - C422D73D2326449500FD59D0 /* ShapePool3D.cpp */, - C422D737232634DD00FD59D0 /* ShapeConvolution3D.cpp */, - 48C5E79A22FBF87600EAC2A6 /* ShapeDepthToSpace.cpp */, - 48C5E79922FBF87600EAC2A6 /* ShapeSpaceToDepth.cpp */, - EB69637722E072600065993C /* ShapeCosineSimilarity.cpp */, - AE7BE4B822855638002CEEA6 /* ShapeRegister.cpp */, - AE7BE47A22816FC9002CEEA6 /* ShapeMoments.cpp */, - EB4925B2224A146000C512BB /* ShapeBatchMatMul.cpp */, - EB4925B3224A146000C512BB /* ShapeRNNSequenceGRU.cpp */, - EBB38ED621E748B9005F76D7 /* ShapeArgMax.cpp */, - EBB38ECA21E748B9005F76D7 /* ShapeAsString.cpp */, - EBB38EE321E748B9005F76D7 /* ShapeBatchToSpaceND.cpp */, - EBB38EDE21E748B9005F76D7 /* ShapeBinaryOp.cpp */, - EBB38EE221E748B9005F76D7 /* ShapeCast.cpp */, - EBB38EE121E748B9005F76D7 /* ShapeConcat.cpp */, - EBB38EE621E748B9005F76D7 /* ShapeConst.cpp */, - EBB38EF821E748B9005F76D7 /* ShapeConvolution.cpp */, - EBB38ECC21E748B9005F76D7 /* ShapeCrop.cpp */, - EBB38EF121E748B9005F76D7 /* ShapeCropAndResize.cpp */, - EBB38EFF21E748B9005F76D7 /* ShapeDeconvolution.cpp */, - 4841B61221EC6267002E5D66 /* ShapeDequantize.cpp */, - EBB38EE721E748B9005F76D7 /* ShapeDetectionOutput.cpp */, - EBB38EE521E748B9005F76D7 /* ShapeEltwise.cpp */, - EBB38EEB21E748B9005F76D7 /* ShapeExpandDims.cpp */, - EBB38EF621E748B9005F76D7 /* ShapeFill.cpp */, - EBB38EC921E748B9005F76D7 /* ShapeGather.cpp */, - EBB38EE021E748B9005F76D7 /* ShapeGatherV2.cpp */, - EBB38ED321E748B9005F76D7 /* ShapeInnerProduct.cpp */, - EBB38ECB21E748B9005F76D7 /* ShapeInterp.cpp */, - EBB38EFA21E748B9005F76D7 /* ShapeLSTM.cpp */, - EBB38ECD21E748B9005F76D7 /* ShapeMatMul.cpp */, - EBB38EDF21E748B9005F76D7 /* ShapeNonMaxSuppressionV2.cpp */, - EBB38EFE21E748B9005F76D7 /* ShapePack.cpp */, - EBB38EEA21E748B9005F76D7 /* ShapePermute.cpp */, - EBB38ED221E748B9005F76D7 /* ShapePool.cpp */, - EBB38EC821E748B9005F76D7 /* ShapePriorbox.cpp */, - EBB38EF721E748B9005F76D7 /* ShapeProposal.cpp */, - EBB38ED521E748B9005F76D7 /* ShapeQuantizedAvgPool.cpp */, - EBB38EF221E748B9005F76D7 /* ShapeQuantizedMaxPool.cpp */, - EBB38ED121E748B9005F76D7 /* ShapeQuantizedReshape.cpp */, - EBB38EF321E748B9005F76D7 /* ShapeRange.cpp */, - EBB38EF921E748B9005F76D7 /* ShapeRank.cpp */, - EBB38EFC21E748B9005F76D7 /* ShapeReduceJoin.cpp */, - EBB38ED421E748B9005F76D7 /* ShapeReduction.cpp */, - EBB38ECF21E748B9005F76D7 /* ShapeReshape.cpp */, - EBB38EDA21E748B9005F76D7 /* ShapeResize.cpp */, - EBB38EEC21E748B9005F76D7 /* ShapeROIPooling.cpp */, - EBB38EC721E748B9005F76D7 /* ShapeShape.cpp */, - EBB38EE821E748B9005F76D7 /* ShapeSize.cpp */, - EBB38EFB21E748B9005F76D7 /* ShapeSlice.cpp */, - EBB38ED821E748B9005F76D7 /* ShapeSliceTf.cpp */, - EBB38EFD21E748B9005F76D7 /* ShapeSpaceToBatchND.cpp */, - EBB38EDD21E748B9005F76D7 /* ShapeSqueeze.cpp */, - EBB38ED021E748B9005F76D7 /* ShapeStridedSlice.cpp */, - EBB38EE921E748B9005F76D7 /* ShapeTensorConvert.cpp */, - EBB38ECE21E748B9005F76D7 /* ShapeTFQuantizedConv2D.cpp */, - EBB38EE421E748B9005F76D7 /* ShapeTile.cpp */, - EBB38EF521E748B9005F76D7 /* ShapeTopKV2.cpp */, - EBB38EDB21E748B9005F76D7 /* ShapeTranspose.cpp */, - EBB38EF421E748B9005F76D7 /* ShapeUnpack.cpp */, - EBB38EDC21E748B9005F76D7 /* ShapeWhere.cpp */, - 48B904A8229550CF003116BB /* ShapeSelect.cpp */, - 4847D41C22C0739A0049F3CA /* ShapePadding.cpp */, + 92FF040B23AA0B7000AC97F6 /* ShapeArgMax.cpp */, + 92FF040D23AA0B7000AC97F6 /* ShapeAsString.cpp */, + 92FF03E423AA0B6D00AC97F6 /* ShapeBatchMatMul.cpp */, + 92FF03F823AA0B6E00AC97F6 /* ShapeBatchToSpaceND.cpp */, + 92FF041423AA0B7000AC97F6 /* ShapeBinaryOp.cpp */, + 92FF041623AA0B7000AC97F6 /* ShapeBroadcastTo.cpp */, + 92FF041723AA0B7100AC97F6 /* ShapeCast.cpp */, + 92FF03DC23AA0B6D00AC97F6 /* ShapeConcat.cpp */, + 92FF040723AA0B6F00AC97F6 /* ShapeConst.cpp */, + 92FF040423AA0B6F00AC97F6 /* ShapeConvolution.cpp */, + 92FF03EF23AA0B6E00AC97F6 /* ShapeConvolution3D.cpp */, + 92FF03E023AA0B6D00AC97F6 /* ShapeCosineSimilarity.cpp */, + 92FF03ED23AA0B6E00AC97F6 /* ShapeCrop.cpp */, + 92FF040A23AA0B7000AC97F6 /* ShapeCropAndResize.cpp */, + 92FF03DB23AA0B6D00AC97F6 /* ShapeDeconvolution.cpp */, + 92FF040923AA0B7000AC97F6 /* ShapeDepthToSpace.cpp */, + 92FF03F323AA0B6E00AC97F6 /* ShapeDequantize.cpp */, + 92FF03F123AA0B6E00AC97F6 /* ShapeDetectionOutput.cpp */, + 92FF03F523AA0B6E00AC97F6 /* ShapeDetectionPostProcess.cpp */, + 92FF040823AA0B7000AC97F6 /* ShapeEltwise.cpp */, + 92FF03F623AA0B6E00AC97F6 /* ShapeExpandDims.cpp */, + 92FF03D423AA0B6C00AC97F6 /* ShapeFill.cpp */, + 92FF040223AA0B6F00AC97F6 /* ShapeGather.cpp */, + 92FF040323AA0B6F00AC97F6 /* ShapeGatherND.cpp */, + 92FF03F723AA0B6E00AC97F6 /* ShapeGatherV2.cpp */, + 92FF040023AA0B6F00AC97F6 /* ShapeInnerProduct.cpp */, + 92FF03E223AA0B6D00AC97F6 /* ShapeInterp.cpp */, + 92FF03E323AA0B6D00AC97F6 /* ShapeLinSpace.cpp */, + 92FF03FE23AA0B6F00AC97F6 /* ShapeLSTM.cpp */, + 92FF03E123AA0B6D00AC97F6 /* ShapeMatMul.cpp */, + 92FF03FB23AA0B6F00AC97F6 /* ShapeMoments.cpp */, + 92FF03D623AA0B6C00AC97F6 /* ShapeNonMaxSuppressionV2.cpp */, + 92FF03E523AA0B6D00AC97F6 /* ShapeOneHot.cpp */, + 92FF03DA23AA0B6D00AC97F6 /* ShapePack.cpp */, + 92FF03E723AA0B6D00AC97F6 /* ShapePadding.cpp */, + 92FF03F423AA0B6E00AC97F6 /* ShapePermute.cpp */, + 92FF03D723AA0B6C00AC97F6 /* ShapePool.cpp */, + 92FF040623AA0B6F00AC97F6 /* ShapePool3D.cpp */, + 92FF040F23AA0B7000AC97F6 /* ShapePriorbox.cpp */, + 92FF03E823AA0B6D00AC97F6 /* ShapeProposal.cpp */, + 92FF03EA23AA0B6D00AC97F6 /* ShapeQuantizedAvgPool.cpp */, + 92FF03D323AA0B6C00AC97F6 /* ShapeQuantizedMaxPool.cpp */, + 92FF03FC23AA0B6F00AC97F6 /* ShapeQuantizedReshape.cpp */, + 92FF03D823AA0B6C00AC97F6 /* ShapeRange.cpp */, + 92FF03D923AA0B6C00AC97F6 /* ShapeRank.cpp */, + 92FF03E623AA0B6D00AC97F6 /* ShapeReduceJoin.cpp */, + 92FF041223AA0B7000AC97F6 /* ShapeReduction.cpp */, + 92FF041323AA0B7000AC97F6 /* ShapeRegister.cpp */, + 92FF041023AA0B7000AC97F6 /* ShapeReshape.cpp */, + 92FF040C23AA0B7000AC97F6 /* ShapeResize.cpp */, + 92FF040E23AA0B7000AC97F6 /* ShapeRNNSequenceGRU.cpp */, + 92FF03DE23AA0B6D00AC97F6 /* ShapeROIPooling.cpp */, + 92FF03DD23AA0B6D00AC97F6 /* ShapeScatterNd.cpp */, + 92FF03F023AA0B6E00AC97F6 /* ShapeSelect.cpp */, + 92FF041523AA0B7000AC97F6 /* ShapeShape.cpp */, + 92FF03DF23AA0B6D00AC97F6 /* ShapeSize.cpp */, + 92FF03FA23AA0B6F00AC97F6 /* ShapeSlice.cpp */, + 92FF03E923AA0B6D00AC97F6 /* ShapeSliceTf.cpp */, + 92FF03EB23AA0B6E00AC97F6 /* ShapeSpaceToBatchND.cpp */, + 92FF03FF23AA0B6F00AC97F6 /* ShapeSpaceToDepth.cpp */, + 92FF040123AA0B6F00AC97F6 /* ShapeSqueeze.cpp */, + 92FF03EE23AA0B6E00AC97F6 /* ShapeStridedSlice.cpp */, + 92FF03F923AA0B6F00AC97F6 /* ShapeTensorConvert.cpp */, + 92FF041823AA0B7100AC97F6 /* ShapeTFQuantizedConv2D.cpp */, + 92FF040523AA0B6F00AC97F6 /* ShapeTile.cpp */, + 92FF03FD23AA0B6F00AC97F6 /* ShapeTopKV2.cpp */, + 92FF041123AA0B7000AC97F6 /* ShapeTranspose.cpp */, + 92FF03D523AA0B6C00AC97F6 /* ShapeUnpack.cpp */, + 92FF03F223AA0B6E00AC97F6 /* ShapeUnravelIndex.cpp */, + 92FF03EC23AA0B6E00AC97F6 /* ShapeWhere.cpp */, ); path = shape; sourceTree = ""; @@ -2643,236 +3110,243 @@ isa = PBXHeadersBuildPhase; buildActionMask = 2147483647; files = ( - 48070736231E512D00528CE5 /* Optimizer.hpp in Headers */, - 48070732231E512D00528CE5 /* NeuralNetWorkOp.hpp in Headers */, - 48070734231E512D00528CE5 /* MathOp.hpp in Headers */, - 48070733231E512D00528CE5 /* Expr.hpp in Headers */, - 4807073C231E512D00528CE5 /* InsideExpr.hpp in Headers */, - 4887147B215249EA00CCE0D8 /* Rect.h in Headers */, - 920004DA21EDC30E00BCE892 /* MetalDequantize.hpp in Headers */, - 48265469210ABA3000B2CFEA /* AutoTime.hpp in Headers */, - 48887673215B639F0079B12E /* CPUCropAndResize.hpp in Headers */, - 924F132321ABD470006D46A4 /* MetalQuantizedSoftmax.hpp in Headers */, - 48887606215B639F0079B12E /* CPULRN.hpp in Headers */, - CE96FE7721707D58004AB400 /* MetalMatMul.hpp in Headers */, - 48A8A61121D101A700C2B9A7 /* ImageFloatBlitter.hpp in Headers */, - 9260B27921A7C5EA00D48C97 /* MetalQuantizedAvgPool.hpp in Headers */, - 4888764D215B639F0079B12E /* ConvolutionIntFactory.hpp in Headers */, - EB4925C0224A147E00C512BB /* CPUMoments.hpp in Headers */, - 4841B60E21EC607E002E5D66 /* CPUDequantize.hpp in Headers */, - 48887729215B639F0079B12E /* Matrix.hpp in Headers */, - 4888762C215B639F0079B12E /* CPUWhere.hpp in Headers */, - 480529622105DDA400AA776E /* Interpreter.hpp in Headers */, - 48070735231E512D00528CE5 /* ExprCreator.hpp in Headers */, - 92EEFF28217F0EEF00F89377 /* MetalCrop.hpp in Headers */, - 4887147A215249EA00CCE0D8 /* Matrix.h in Headers */, - 486FDF40223E495B00F487FB /* CPUBinary.hpp in Headers */, - 48871465215225D600CCE0D8 /* ImageProcess.hpp in Headers */, - 9243106D2239FE0B0016DA25 /* MetalSize.hpp in Headers */, - 4888759E215B639F0079B12E /* MetalLRN.hpp in Headers */, - 488875F8215B639F0079B12E /* CPUTanh.hpp in Headers */, - 488875AD215B639F0079B12E /* MetalBackend.hpp in Headers */, - 488875DC215B639F0079B12E /* MetalConcat.hpp in Headers */, - 48887628215B639F0079B12E /* CPUSqueeze.hpp in Headers */, - 4807073A231E512D00528CE5 /* MergeOptimizer.hpp in Headers */, - 48887654215B639F0079B12E /* Convolution3x3.hpp in Headers */, - 48887626215B639F0079B12E /* CPUGatherV2.hpp in Headers */, - 48070719231A7B5100528CE5 /* CPUReverseSequence.hpp in Headers */, - 488875CF215B639F0079B12E /* MetalDeconvolution.hpp in Headers */, - 487970D922C9BF4B00795502 /* CPUInt8ToFloat.hpp in Headers */, - 488875E8215B639F0079B12E /* CPUTFQuantizedConv2D.hpp in Headers */, - 48AE9EA42212B2C2009DB6F4 /* Convolution1x1Strassen.hpp in Headers */, - 4841B5FD21EAE998002E5D66 /* Execution.hpp in Headers */, - 48887662215B639F0079B12E /* CPUPool.hpp in Headers */, - EB4925C4224A147E00C512BB /* CPUInstanceNorm.hpp in Headers */, - 92682C5421819BF100B52B9D /* MetalSeLU.hpp in Headers */, - 488875A6215B639F0079B12E /* MetalTanH.hpp in Headers */, - 488875CB215B639F0079B12E /* MetalSoftmax.hpp in Headers */, - 4887145A215153F900CCE0D8 /* ErrorCode.hpp in Headers */, - 4841B60D21EC607E002E5D66 /* CPUQuantizedLogistic.hpp in Headers */, - 92D765AD2228188700178BE5 /* Session.hpp in Headers */, - CE96FE7321707D58004AB400 /* MetalSigmoid.hpp in Headers */, - EBD9FF16236A93AB00E188F5 /* CPUDetectionPostProcess.hpp in Headers */, - 48887612215B639F0079B12E /* CPUSigmoid.hpp in Headers */, - 4888767D215B639F0079B12E /* MNNAsmGlobal.h in Headers */, - 4843AA5B22A7E9AB00889A63 /* CPUSoftmaxGrad.hpp in Headers */, - 92682C4E2181729200B52B9D /* MetalTile.hpp in Headers */, - 921722F421DDF63A004583BF /* TFQuantizeOp_generated.h in Headers */, - 48887648215B639F0079B12E /* ConvolutionFloatFactory.h in Headers */, - 488875AC215B639F0079B12E /* MetalNormalize.hpp in Headers */, - 92351C8821992AB2002CA341 /* MetalQuantizedAdd.hpp in Headers */, - 48887617215B639F0079B12E /* CPULSTM.hpp in Headers */, - 488875BE215B639F0079B12E /* MetalPermute.hpp in Headers */, - 488875EF215B639F0079B12E /* CPUQuantizedAvgPool.hpp in Headers */, - 4826546C210AF76E00B2CFEA /* HalideRuntime.h in Headers */, - 4888772A215B639F0079B12E /* WingoradGenerater.hpp in Headers */, - 48887607215B639F0079B12E /* CPUStridedSlice.hpp in Headers */, - 92256948219D698100F251E2 /* MetalRank.hpp in Headers */, - 92EEFF312180159600F89377 /* MetalReduction.hpp in Headers */, - 11EDD60B22E55A09007F3793 /* CPUDepthToSpace.hpp in Headers */, - 92D7659B2228176500178BE5 /* UserDefine_generated.h in Headers */, - 48887616215B639F0079B12E /* CPUNormalize.hpp in Headers */, - 48887625215B639F0079B12E /* CPUSliceTf.hpp in Headers */, - 92EAC19A21CB3CD60056F4C2 /* MetalCast.hpp in Headers */, - 48070744231E52E300528CE5 /* BasicOptimizer_generated.h in Headers */, - 4888767C215B639F0079B12E /* CPUSoftmax.hpp in Headers */, - 925801452223B8D100555D43 /* MetalConvolutionCommon.hpp in Headers */, - 48A8A61021D101A700C2B9A7 /* ImageBlitter.hpp in Headers */, - 488876DB215B639F0079B12E /* CPUInterp.hpp in Headers */, - CE96FE7221707D58004AB400 /* MetalUnary.hpp in Headers */, - 488875D8215B639F0079B12E /* MetalScale.hpp in Headers */, - 488875EB215B639F0079B12E /* CPUTensorConvert.hpp in Headers */, - 4843AA5722A7E9AB00889A63 /* CPUReluGrad.hpp in Headers */, - 488875BD215B639F0079B12E /* MetalPReLU.hpp in Headers */, - 488875F6215B639F0079B12E /* CPUAsString.hpp in Headers */, - 488875F2215B639F0079B12E /* CPUEltwise.hpp in Headers */, - AEC3B31F211BEF710046AD86 /* MNNDefine.h in Headers */, - 48887652215B639F0079B12E /* ConvolutionInt8Executor.hpp in Headers */, - 488875FE215B639F0079B12E /* CPUPriorbox.hpp in Headers */, - 485DD418217F49C500129159 /* CPUQuanConvolutionDepthwise.hpp in Headers */, - 92C674FA22549A1600011D33 /* MetalReLU6.hpp in Headers */, - 48887634215B639F0079B12E /* CPUQuantizedMaxPool.hpp in Headers */, - 483CD487216B2F0400B05BE9 /* WinogradOptFunction.hpp in Headers */, - 92D7659A2228176500178BE5 /* CaffeOp_generated.h in Headers */, - 486FDF43223E495B00F487FB /* CPUUnary.hpp in Headers */, - 488876E1215B639F0079B12E /* CPUDetectionOutput.hpp in Headers */, - 48A8A62621D47B5A00C2B9A7 /* OptimizedComputer.hpp in Headers */, - 48887624215B639F0079B12E /* CPUNonMaxSuppressionV2.hpp in Headers */, - C422D7C32339D0EE00FD59D0 /* CPUElu.hpp in Headers */, - 48887659215B639F0079B12E /* Int8FunctionsOpt.h in Headers */, - 488875E5215B639F0079B12E /* CPURange.hpp in Headers */, - 4888760F215B639F0079B12E /* CPUReduction.hpp in Headers */, - 488875FC215B639F0079B12E /* CPUQuantizedReshape.hpp in Headers */, - 48887603215B639F0079B12E /* CPUFill.hpp in Headers */, - 48887679215B639F0079B12E /* CPUScale.hpp in Headers */, - 487970D622C9BF4B00795502 /* CPUConvInt8.hpp in Headers */, - 486FDF4D2241E95700F487FB /* CPURuntime.hpp in Headers */, - 48887655215B639F0079B12E /* ConvOpt.h in Headers */, - 488876DA215B639F0079B12E /* CPUProposal.hpp in Headers */, - EB69637522E070E10065993C /* CPUCosineSimilarity.hpp in Headers */, - 48057D8C2330E85C00F922BE /* CPUMatrixBandPart.hpp in Headers */, - CE96FE7421707D58004AB400 /* MetalTensorConverter.hpp in Headers */, - 48070741231E512D00528CE5 /* Solution.hpp in Headers */, - 488875BC215B639F0079B12E /* MetalLSTM.hpp in Headers */, - 921722F221DDF63A004583BF /* Tensor_generated.h in Headers */, - 48887623215B639F0079B12E /* CPUPack.hpp in Headers */, - 923B7F8A21A653AB002AFCE0 /* MetalGather.hpp in Headers */, - 92D765B22228188700178BE5 /* BackendFactory.hpp in Headers */, - 48887647215B639F0079B12E /* ConvolutionGroup.hpp in Headers */, - 488875EC215B639F0079B12E /* CPUQuantizationUtils.hpp in Headers */, - 488875B5215B639F0079B12E /* MetalEltwise.hpp in Headers */, - 48887584215B639F0079B12E /* Concurrency.h in Headers */, - 48A8A61921D101DE00C2B9A7 /* SkNx_neon.h in Headers */, - 48887619215B639F0079B12E /* CPUDeconvolutionDepthwise.hpp in Headers */, - 487970F422C9C07000795502 /* CPUPoolInt8.hpp in Headers */, - 48C054892201996200E91945 /* MetalConvolutionWinograd.hpp in Headers */, - 9223E12021D34C6B0067544A /* CPUBatchToSpaceND.hpp in Headers */, - 921722EE21DDF63A004583BF /* MNN_generated.h in Headers */, - 48887637215B639F0079B12E /* CPUDeconvolution.hpp in Headers */, - 48B904A322953DFF003116BB /* CPUSelect.hpp in Headers */, - 488875C3215B639F0079B12E /* MetalResize.hpp in Headers */, - 48A8A60F21D101A700C2B9A7 /* ImageSampler.hpp in Headers */, - 488875A0215B639F0079B12E /* MetalReshape.hpp in Headers */, - 48B904A722953E0F003116BB /* CPUZeroLike.hpp in Headers */, - 488876E2215B639F0079B12E /* CPUMatMul.hpp in Headers */, - 488875C9215B639F0079B12E /* MetalConvolution.hpp in Headers */, - 9223E11921D34BE40067544A /* MetalSpaceToBatchND.hpp in Headers */, - 4888758A215B639F0079B12E /* Macro.h in Headers */, - 488875F9215B639F0079B12E /* CPUReduceJoin.hpp in Headers */, - 488876DD215B639F0079B12E /* CPUConst.hpp in Headers */, - 488875D5215B639F0079B12E /* MetalInterp.hpp in Headers */, - 488875B4215B639F0079B12E /* MetalReLU.hpp in Headers */, - 48887644215B639F0079B12E /* ConvolutionTiledExecutor.hpp in Headers */, - 4847D42122C07E850049F3CA /* CPUPadding.hpp in Headers */, - 488875B7215B639F0079B12E /* MetalSlice.hpp in Headers */, - 92EEFEB2217F0CBB00F89377 /* CPUCrop.hpp in Headers */, - 921722F021DDF63A004583BF /* GpuLibrary_generated.h in Headers */, - 0F22069B211060A000EAE225 /* MNNForwardType.h in Headers */, - 48887602215B639F0079B12E /* CPUPermute.hpp in Headers */, - 488875E6215B639F0079B12E /* CPUConvolutionDepthwise.hpp in Headers */, - 4888761D215B639F0079B12E /* CPUSpatialProduct.hpp in Headers */, - 483CD483216B1C7B00B05BE9 /* DeconvolutionWithStride.hpp in Headers */, - 48887667215B639F0079B12E /* CPUShape.hpp in Headers */, - 48EB45E72254B9D2006C2322 /* ConvolutionDepthwise3x3.hpp in Headers */, - C422D769232F567300FD59D0 /* CPUConvolution3D.hpp in Headers */, - 92D765B62228188700178BE5 /* Pipeline.hpp in Headers */, - 48AE9EA02211950B009DB6F4 /* StrassenMatmulComputor.hpp in Headers */, - 48887582215B639F0079B12E /* TensorUtils.hpp in Headers */, - EB4925C5224A147E00C512BB /* CPUBatchMatMul.hpp in Headers */, - 486FDF49223E4B2800F487FB /* MetalBinary.hpp in Headers */, - 488876DF215B639F0079B12E /* CPUSize.hpp in Headers */, - 48887657215B639F0079B12E /* ConvolutionWinograd.hpp in Headers */, - 92682C602181A2EF00B52B9D /* MetalFill.hpp in Headers */, - 488875F1215B639F0079B12E /* CPUConvolution.hpp in Headers */, - 9257597A219EA07F00918499 /* MetalStridedSlice.hpp in Headers */, - 4888761F215B639F0079B12E /* CPUBackend.hpp in Headers */, - EB4925C1224A147E00C512BB /* CPURNNSequenceGRU.hpp in Headers */, - 4888763F215B639F0079B12E /* CommonOptFunction.h in Headers */, - 489BFA2C230E3D1F00F6B785 /* FileLoader.hpp in Headers */, - 48887605215B639F0079B12E /* CPUSlice.hpp in Headers */, - 485DD415217F495500129159 /* CPUQuantizedSoftmax.hpp in Headers */, - 4843AA5422A7E9AB00889A63 /* CPUConv2DBackPropFilter.hpp in Headers */, - 488875B6215B639F0079B12E /* MetalPooling.hpp in Headers */, - 48887609215B639F0079B12E /* CPUTopKV2.hpp in Headers */, - 4888760E215B639F0079B12E /* CPUReshape.hpp in Headers */, - 488875D6215B639F0079B12E /* MNNMetalContext.h in Headers */, - 48887604215B639F0079B12E /* CPUTranspose.hpp in Headers */, - 4829D55122AF5C340093E3BE /* CPUSetDiff1D.hpp in Headers */, - 9223E12321D34C6B0067544A /* CPUSpaceToBatchND.hpp in Headers */, - EB288363230EAF6C00837188 /* CPUEltwiseInt8.hpp in Headers */, - 92921A87219C24CD00B063D1 /* MetalPack.hpp in Headers */, - 487970D722C9BF4B00795502 /* CPUDepthwiseConvInt8.hpp in Headers */, - 921722EF21DDF63A004583BF /* Type_generated.h in Headers */, - 485DD411217F495500129159 /* CPUQuantizedAdd.hpp in Headers */, - 48887621215B639F0079B12E /* CPUInnerProduct.hpp in Headers */, - 925A8916222395ED00D22428 /* MetalConvolution1x1.hpp in Headers */, - 48887590215B639F0079B12E /* BufferAllocator.hpp in Headers */, - 4888758C215B639F0079B12E /* MNNMemoryUtils.h in Headers */, - 48057D882330A90900F922BE /* CPUGatherND.hpp in Headers */, - 48887588215B639F0079B12E /* AutoStorage.h in Headers */, - 48A8A61B21D101DE00C2B9A7 /* SkNx.h in Headers */, - 4888761C215B639F0079B12E /* CPURank.hpp in Headers */, - 92D765BF22281D0000178BE5 /* DirectedAcyclicGraph.hpp in Headers */, - 4843AA5622A7E9AB00889A63 /* CPUPoolGrad.hpp in Headers */, - 924F131A21A81C74006D46A4 /* MetalTranspose.hpp in Headers */, - 488875A5215B639F0079B12E /* MetalROIPooling.hpp in Headers */, - 92A8D70121A40695009C2201 /* MetalTFQuantizedConv2D.hpp in Headers */, - 923B7F9321A68091002AFCE0 /* MetalGatherV2.hpp in Headers */, - 48887599215B639F0079B12E /* NonCopyable.hpp in Headers */, - 488875C2215B639F0079B12E /* MetalSpatialProduct.hpp in Headers */, - 487970D422C9BF4B00795502 /* CPUFloatToInt8.hpp in Headers */, - 923B7FA521A6C92F002AFCE0 /* MetalCropAndResize.hpp in Headers */, - C422D7BB23320B2B00FD59D0 /* Convolution3D3x3.hpp in Headers */, - 11EDD60D22E55A09007F3793 /* CPUSpaceToDepth.hpp in Headers */, - 488876D7215B639F0079B12E /* CPUGather.hpp in Headers */, - 92256951219D6E0200F251E2 /* MetalRange.hpp in Headers */, - 488876D6215B639F0079B12E /* CPUResize.hpp in Headers */, - 48887671215B639F0079B12E /* CPUArgMax.hpp in Headers */, - 4888766A215B639F0079B12E /* CPUConcat.hpp in Headers */, - 92D765B02228188700178BE5 /* WrapExecution.hpp in Headers */, - 4851BE102122C1BC009BB0AC /* Tensor.hpp in Headers */, - 9223E12A21D3755F0067544A /* MetalBatchToSpaceND.hpp in Headers */, - 923B7F9C21A69E2E002AFCE0 /* MetalQuantizedReshape.hpp in Headers */, - 9260B27321A7C5CD00D48C97 /* MetalQuantizedMaxPool.hpp in Headers */, - 488876D5215B639F0079B12E /* CPUCast.hpp in Headers */, - 4841B5F621EAE98B002E5D66 /* SizeComputer.hpp in Headers */, - 48887611215B639F0079B12E /* CPURelu.hpp in Headers */, - 92256936219D14CD00F251E2 /* MetalSliceTF.hpp in Headers */, - C422D7C7233A0F0F00FD59D0 /* ConvolutionWinograd3D.hpp in Headers */, - 48887672215B639F0079B12E /* CPUSelu.hpp in Headers */, - 92D765992228176500178BE5 /* TensorflowOp_generated.h in Headers */, - 9258013F2223B77C00555D43 /* MetalConvolutionDepthwise.hpp in Headers */, - 485DD423217F4C7600129159 /* CPUFixedPoint.hpp in Headers */, - 4888760B215B639F0079B12E /* CPUUnpack.hpp in Headers */, - 4888760D215B639F0079B12E /* CPUExpandDims.hpp in Headers */, - 48070740231E512D00528CE5 /* Utils.hpp in Headers */, - 9223E11021D327F40067544A /* MetalSqueeze.hpp in Headers */, - 4888767B215B639F0079B12E /* CPUTile.hpp in Headers */, - 4841B60F21EC607E002E5D66 /* CPUQuantizedConcat.hpp in Headers */, - 4841B5F921EAE98B002E5D66 /* Backend.hpp in Headers */, - 92D765AE2228188700178BE5 /* Schedule.hpp in Headers */, - 4888760A215B639F0079B12E /* CPUROIPooling.hpp in Headers */, - 4888764B215B639F0079B12E /* ResizeFunction.h in Headers */, + 1FD9536D23A89CA200888FC3 /* CPUQuantizedAvgPool.hpp in Headers */, + 1FD9566323A89D8A00888FC3 /* MetalSliceTF.hpp in Headers */, + 1FD9534223A89CA100888FC3 /* CPUSetDiff1D.hpp in Headers */, + 1FD9549323A89D1300888FC3 /* StrassenMatmulComputor.hpp in Headers */, + 1F501F812397BA5B004E8721 /* AutoTime.hpp in Headers */, + 92FF04A523AA0BFB00AC97F6 /* AutoStorage.h in Headers */, + 92FF02AF23AA0B5A00AC97F6 /* CPUConcat.hpp in Headers */, + 92FF04B923AA0BFB00AC97F6 /* SizeComputer.hpp in Headers */, + 92FF03C823AA0B5A00AC97F6 /* CPUNormalize.hpp in Headers */, + 1F501F852397BA5B004E8721 /* ErrorCode.hpp in Headers */, + 92FF02B423AA0B5A00AC97F6 /* CPUMoments.hpp in Headers */, + 92FF034A23AA0B5A00AC97F6 /* CPUTanh.hpp in Headers */, + 1F501F842397BA5B004E8721 /* ImageProcess.hpp in Headers */, + 92FF04AC23AA0BFB00AC97F6 /* BackendFactory.hpp in Headers */, + 92FF025623AA0B5A00AC97F6 /* CPUPadding.hpp in Headers */, + 1F501F822397BA5B004E8721 /* Interpreter.hpp in Headers */, + 1F501F882397BA5B004E8721 /* Tensor.hpp in Headers */, + 92FF028223AA0B5A00AC97F6 /* CPUSoftmaxGrad.hpp in Headers */, + 92FF03B823AA0B5A00AC97F6 /* Convolution3D3x3.hpp in Headers */, + 1F501F872397BA5B004E8721 /* Matrix.h in Headers */, + 92FF025C23AA0B5A00AC97F6 /* CPUGatherV2.hpp in Headers */, + 92FF04A623AA0BFB00AC97F6 /* FileLoader.hpp in Headers */, + 92FF026E23AA0B5A00AC97F6 /* CPUQuantizationUtils.hpp in Headers */, + 92FF03AA23AA0B5A00AC97F6 /* ConvolutionFloatFactory.h in Headers */, + 1F501F862397BA5B004E8721 /* Rect.h in Headers */, + 1FD9566723A89D8A00888FC3 /* MetalGather.hpp in Headers */, + 1FD9553F23A89D4F00888FC3 /* ImageBlitter.hpp in Headers */, + 1FD953D723A89CD100888FC3 /* Macro.h in Headers */, + 1FD9533C23A89CA100888FC3 /* CPUSliceTf.hpp in Headers */, + 1F501F8B2397BA5B004E8721 /* MNNSharedContext.h in Headers */, + 92FF029623AA0B5A00AC97F6 /* CPUCast.hpp in Headers */, + 92FF038923AA0B5A00AC97F6 /* CPUSigmoid.hpp in Headers */, + 92FF027A23AA0B5A00AC97F6 /* CPUPool.hpp in Headers */, + 92FF039423AA0B5A00AC97F6 /* CPUSpaceToBatchND.hpp in Headers */, + 1F501F892397BA5B004E8721 /* MNNForwardType.h in Headers */, + 92FF034823AA0B5A00AC97F6 /* CPUSpaceToDepth.hpp in Headers */, + 92FF038423AA0B5A00AC97F6 /* CPUBatchMatMul.hpp in Headers */, + 92FF027323AA0B5A00AC97F6 /* CPUPoolInt8.hpp in Headers */, + 1F501F802397BA5B004E8721 /* MNNDefine.h in Headers */, + 1FD9535B23A89CA200888FC3 /* CPUSelu.hpp in Headers */, + 1FD952CF23A89CA100888FC3 /* CPUDetectionOutput.hpp in Headers */, + 1FD956A623A89D8A00888FC3 /* MetalQuantizedMaxPool.hpp in Headers */, + 1F501F7F2397BA5B004E8721 /* HalideRuntime.h in Headers */, + 92FF025823AA0B5A00AC97F6 /* CPUSqueeze.hpp in Headers */, + 92FF029E23AA0B5A00AC97F6 /* CPUDeconvolutionDepthwise.hpp in Headers */, + 92FF035623AA0B5A00AC97F6 /* CPUConvolution.hpp in Headers */, + 1F501F9D2397BB00004E8721 /* expr in Headers */, + 1F501EF12397BA26004E8721 /* SkNx_neon.h in Headers */, + 1F501EF82397BA31004E8721 /* Vec4.hpp in Headers */, + 92FF027623AA0B5A00AC97F6 /* CPUInt8ToFloat.hpp in Headers */, + 1F501F472397BA4D004E8721 /* MetalSoftmax.hpp in Headers */, + 1F501F6D2397BA4D004E8721 /* MetalDeconvolution.hpp in Headers */, + 1F501F6A2397BA4D004E8721 /* MetalPermute.hpp in Headers */, + 92FF03BB23AA0B5A00AC97F6 /* ConvolutionWinograd3D.hpp in Headers */, + 92FF033C23AA0B5A00AC97F6 /* MNNAsmGlobal.h in Headers */, + 1F501F572397BA4D004E8721 /* MetalQuantizedAdd.hpp in Headers */, + 1F501F5E2397BA4D004E8721 /* MetalSize.hpp in Headers */, + 92FF025A23AA0B5A00AC97F6 /* CPUDilation2D.hpp in Headers */, + 92FF029723AA0B5A00AC97F6 /* CPUEltwiseInt8.hpp in Headers */, + 1F501F502397BA4D004E8721 /* MetalSqueeze.hpp in Headers */, + 92FF027B23AA0B5A00AC97F6 /* CPUQuanConvolutionDepthwise.hpp in Headers */, + 92FF028923AA0B5A00AC97F6 /* CPUConv2DBackPropFilter.hpp in Headers */, + 92FF028E23AA0B5A00AC97F6 /* CPULinSpace.hpp in Headers */, + 1F501F692397BA4D004E8721 /* MetalPack.hpp in Headers */, + 92FF038823AA0B5A00AC97F6 /* CPUQuantizedLogistic.hpp in Headers */, + 1F501F482397BA4D004E8721 /* MetalMatMul.hpp in Headers */, + 92FF037623AA0B5A00AC97F6 /* CPUBinary.hpp in Headers */, + 92FF03AC23AA0B5A00AC97F6 /* ResizeFunction.h in Headers */, + 1F501EF92397BA31004E8721 /* WingoradGenerater.hpp in Headers */, + 1F501F672397BA4D004E8721 /* MetalConvolution.hpp in Headers */, + 1F501F372397BA4D004E8721 /* MetalInterp.hpp in Headers */, + 92FF037823AA0B5A00AC97F6 /* CPUROIPooling.hpp in Headers */, + 1F501F462397BA4D004E8721 /* MetalDefine.h in Headers */, + 1F501F392397BA4D004E8721 /* MNNMetalContext.h in Headers */, + 1F501F652397BA4D004E8721 /* MetalConvolutionGEMM.hpp in Headers */, + 1F501F6B2397BA4D004E8721 /* MetalLRN.hpp in Headers */, + 92FF038723AA0B5A00AC97F6 /* CPUTensorConvert.hpp in Headers */, + 1F501F522397BA4D004E8721 /* MetalScale.hpp in Headers */, + 92FF036E23AA0B5A00AC97F6 /* CPUQuantizedSoftmax.hpp in Headers */, + 92FF04BF23AA0BFB00AC97F6 /* Concurrency.h in Headers */, + 1F501F3E2397BA4D004E8721 /* MetalBinary.hpp in Headers */, + 1F501EEF2397BA26004E8721 /* ImageSampler.hpp in Headers */, + 92FF03CD23AA0B5A00AC97F6 /* CPUConvInt8.hpp in Headers */, + 1F501F622397BA4D004E8721 /* MetalQuantizedSoftmax.hpp in Headers */, + 92FF026423AA0B5A00AC97F6 /* CPUExpandDims.hpp in Headers */, + 92FF02A823AA0B5A00AC97F6 /* CPUGather.hpp in Headers */, + 92FF03C523AA0B5A00AC97F6 /* CPUReduceJoin.hpp in Headers */, + 1F501F712397BA4D004E8721 /* MetalReshape.hpp in Headers */, + 92FF02A723AA0B5A00AC97F6 /* CPUSelu.hpp in Headers */, + 92FF027123AA0B5A00AC97F6 /* CPUUnpack.hpp in Headers */, + 92FF034B23AA0B5A00AC97F6 /* CPUTile.hpp in Headers */, + 92FF029323AA0B5A00AC97F6 /* CPURange.hpp in Headers */, + 92FF03B323AA0B5A00AC97F6 /* ConvolutionDepthwise3x3.hpp in Headers */, + 92FF038B23AA0B5A00AC97F6 /* CPUUnravelIndex.hpp in Headers */, + 92FF02B223AA0B5A00AC97F6 /* CPUBroadcastTo.hpp in Headers */, + 1F501EF32397BA26004E8721 /* SkNx.h in Headers */, + 92FF03BC23AA0B5A00AC97F6 /* OptimizedComputer.hpp in Headers */, + 92FF03C623AA0B5A00AC97F6 /* CPUNonMaxSuppressionV2.hpp in Headers */, + 1F501F442397BA4D004E8721 /* MetalResize.hpp in Headers */, + 92FF037C23AA0B5A00AC97F6 /* CPULSTM.hpp in Headers */, + 1F501F432397BA4D004E8721 /* MetalSliceTF.hpp in Headers */, + 92FF028C23AA0B5A00AC97F6 /* CPUReduction.hpp in Headers */, + 1F501F5D2397BA4D004E8721 /* MetalReLU.hpp in Headers */, + 92FF03B923AA0B5A00AC97F6 /* ConvOpt.h in Headers */, + 92FF04AB23AA0BFB00AC97F6 /* Pipeline.hpp in Headers */, + 92FF03CF23AA0B5A00AC97F6 /* CPUCosineSimilarity.hpp in Headers */, + 92FF036123AA0B5A00AC97F6 /* CPUQuantizedConcat.hpp in Headers */, + 92FF034623AA0B5A00AC97F6 /* CPUGatherND.hpp in Headers */, + 92FF038F23AA0B5A00AC97F6 /* CPUPriorbox.hpp in Headers */, + 92FF026823AA0B5A00AC97F6 /* CPUStridedSlice.hpp in Headers */, + 1F501F6F2397BA4D004E8721 /* MetalStridedSlice.hpp in Headers */, + 92FF03AE23AA0B5A00AC97F6 /* ConvolutionIntFactory.hpp in Headers */, + 92FF04C323AA0BFB00AC97F6 /* Session.hpp in Headers */, + 1F501F4A2397BA4D004E8721 /* MetalCropAndResize.hpp in Headers */, + 48FA474423AA127B00172C3B /* MergeOptimizer.hpp in Headers */, + 92FF039F23AA0B5A00AC97F6 /* CommonOptFunction.h in Headers */, + 92FF03BA23AA0B5A00AC97F6 /* ConvolutionWinograd.hpp in Headers */, + 92FF027723AA0B5A00AC97F6 /* CPUUnary.hpp in Headers */, + 1F501F3F2397BA4D004E8721 /* MetalLSTM.hpp in Headers */, + 1F501F5A2397BA4D004E8721 /* MetalConvolutionWinograd.hpp in Headers */, + 1F501F492397BA4D004E8721 /* MetalTensorConverter.hpp in Headers */, + 92FF035B23AA0B5A00AC97F6 /* CPURelu.hpp in Headers */, + 1F501F5B2397BA4D004E8721 /* MetalSpaceToBatchND.hpp in Headers */, + 1F501F4F2397BA4D004E8721 /* MetalFill.hpp in Headers */, + 92FF038D23AA0B5A00AC97F6 /* CPUMatrixBandPart.hpp in Headers */, + 92FF035A23AA0B5A00AC97F6 /* CPUDetectionPostProcess.hpp in Headers */, + 1F501F552397BA4D004E8721 /* MetalConvolution1x1.hpp in Headers */, + 1F501F532397BA4D004E8721 /* MetalTile.hpp in Headers */, + 92FF04B723AA0BFB00AC97F6 /* WrapExecution.hpp in Headers */, + 1F501F6E2397BA4D004E8721 /* MetalQuantizedAvgPool.hpp in Headers */, + 92FF04AD23AA0BFB00AC97F6 /* Execution.hpp in Headers */, + 92FF025D23AA0B5A00AC97F6 /* CPUInterp.hpp in Headers */, + 92FF039A23AA0B5A00AC97F6 /* Convolution1x1Strassen.hpp in Headers */, + 1F501F632397BA4D004E8721 /* MetalRange.hpp in Headers */, + 1F501F4C2397BA4D004E8721 /* MetalTranspose.hpp in Headers */, + 1F501F512397BA4D004E8721 /* MetalCrop.hpp in Headers */, + 92FF027823AA0B5A00AC97F6 /* CPUFill.hpp in Headers */, + 1F501F412397BA4D004E8721 /* MetalReLU6.hpp in Headers */, + 92FF029B23AA0B5A00AC97F6 /* CPUScale.hpp in Headers */, + 92FF029023AA0B5A00AC97F6 /* CPULRN.hpp in Headers */, + 92FF04B123AA0BFB00AC97F6 /* DirectedAcyclicGraph.hpp in Headers */, + 92FF028A23AA0B5A00AC97F6 /* CPUSoftmax.hpp in Headers */, + 92FF036C23AA0B5A00AC97F6 /* CPUConst.hpp in Headers */, + 92FF03CA23AA0B5A00AC97F6 /* CPUConvolutionDepthwise.hpp in Headers */, + 92FF037F23AA0B5A00AC97F6 /* CPUSlice.hpp in Headers */, + 92FF026C23AA0B5A00AC97F6 /* CPUSliceTf.hpp in Headers */, + 1F501F602397BA4D004E8721 /* MetalSeLU.hpp in Headers */, + 92FF04A923AA0BFB00AC97F6 /* Schedule.hpp in Headers */, + 92FF028623AA0B5A00AC97F6 /* CPUDeconvolution.hpp in Headers */, + 92FF02A023AA0B5A00AC97F6 /* CPUZeroLike.hpp in Headers */, + 92FF04B523AA0BFB00AC97F6 /* TensorUtils.hpp in Headers */, + 92FF026B23AA0B5A00AC97F6 /* CPUReverseSequence.hpp in Headers */, + 92FF034723AA0B5A00AC97F6 /* CPUConvolution3D.hpp in Headers */, + 92FF035223AA0B5A00AC97F6 /* CPUThreshold.hpp in Headers */, + 92FF04BB23AA0BFB00AC97F6 /* BufferAllocator.hpp in Headers */, + 92FF035823AA0B5A00AC97F6 /* CPUTFQuantizedConv2D.hpp in Headers */, + 92FF027223AA0B5A00AC97F6 /* CPUScatterNd.hpp in Headers */, + 92FF025B23AA0B5A00AC97F6 /* CPUPoolGrad.hpp in Headers */, + 1F501F562397BA4D004E8721 /* MetalPooling.hpp in Headers */, + 92FF03C023AA0B5A00AC97F6 /* CPUPack.hpp in Headers */, + 1F501F362397BA4D004E8721 /* MetalQuantizedReshape.hpp in Headers */, + 1F501F4E2397BA4D004E8721 /* MetalQuantizedMaxPool.hpp in Headers */, + 1F501F612397BA4D004E8721 /* MetalNormalize.hpp in Headers */, + 92FF03A923AA0B5A00AC97F6 /* ConvolutionGroup.hpp in Headers */, + 1F501F4B2397BA4D004E8721 /* MetalRank.hpp in Headers */, + 92FF03BD23AA0B5A00AC97F6 /* Int8FunctionsOpt.h in Headers */, + 92FF036623AA0B5A00AC97F6 /* CPUDetectionOutput.hpp in Headers */, + 92FF04BC23AA0BFB00AC97F6 /* NonCopyable.hpp in Headers */, + 92FF036823AA0B5A00AC97F6 /* CPUPermute.hpp in Headers */, + 48FA474B23AA127B00172C3B /* Utils.hpp in Headers */, + 92FF04AF23AA0BFB00AC97F6 /* Macro.h in Headers */, + 92FF028D23AA0B5A00AC97F6 /* CPUWhere.hpp in Headers */, + 1F501F3A2397BA4D004E8721 /* MetalSpatialProduct.hpp in Headers */, + 1F501F3C2397BA4D004E8721 /* MetalBackend.hpp in Headers */, + 92FF028323AA0B5A00AC97F6 /* CPUSize.hpp in Headers */, + 92FF03AF23AA0B5A00AC97F6 /* WinogradOptFunction.hpp in Headers */, + 92FF03C923AA0B5A00AC97F6 /* CPUMatMul.hpp in Headers */, + 1F501F5C2397BA4D004E8721 /* MetalEltwise.hpp in Headers */, + 1F501F3D2397BA4D004E8721 /* MetalCast.hpp in Headers */, + 92FF03B223AA0B5A00AC97F6 /* ConvolutionInt8Executor.hpp in Headers */, + 1F501F3B2397BA4D004E8721 /* MetalUnary.hpp in Headers */, + 92FF03A523AA0B5A00AC97F6 /* DeconvolutionWithStride.hpp in Headers */, + 1F501EF02397BA26004E8721 /* ImageFloatBlitter.hpp in Headers */, + 92FF034123AA0B5A00AC97F6 /* CPURank.hpp in Headers */, + 92FF03D123AA0B5A00AC97F6 /* CPUTopKV2.hpp in Headers */, + 1F501F642397BA4D004E8721 /* MetalDequantize.hpp in Headers */, + 1F501F682397BA4D004E8721 /* MetalConcat.hpp in Headers */, + 92FF033F23AA0B5A00AC97F6 /* CPUArgMax.hpp in Headers */, + 92FF038E23AA0B5A00AC97F6 /* CPUQuantizedReshape.hpp in Headers */, + 92FF034C23AA0B5A00AC97F6 /* CPUSetDiff1D.hpp in Headers */, + 1F501F542397BA4D004E8721 /* MetalSlice.hpp in Headers */, + 92FF02A123AA0B5A00AC97F6 /* CPUDepthwiseConvInt8.hpp in Headers */, + 92FF036723AA0B5A00AC97F6 /* CPURuntime.hpp in Headers */, + 92FF026623AA0B5A00AC97F6 /* CPUProposal.hpp in Headers */, + 92FF03C723AA0B5A00AC97F6 /* CPUTranspose.hpp in Headers */, + 92FF026023AA0B5A00AC97F6 /* CPURNNSequenceGRU.hpp in Headers */, + 1F501F4D2397BA4D004E8721 /* MetalConvolutionCommon.hpp in Headers */, + 1F501F702397BA4D004E8721 /* MetalReduction.hpp in Headers */, + 1F501F582397BA4D004E8721 /* MetalTanH.hpp in Headers */, + 92FF029F23AA0B5A00AC97F6 /* CPUReluGrad.hpp in Headers */, + 1F501EF22397BA26004E8721 /* ImageBlitter.hpp in Headers */, + 92FF02A923AA0B5A00AC97F6 /* CPUCropAndResize.hpp in Headers */, + 92FF037923AA0B5A00AC97F6 /* CPUInstanceNorm.hpp in Headers */, + 92FF026223AA0B5A00AC97F6 /* CPUSelect.hpp in Headers */, + 92FF039923AA0B5A00AC97F6 /* CPUReshape.hpp in Headers */, + 1F501EF72397BA31004E8721 /* Matrix.hpp in Headers */, + 1F501F402397BA4D004E8721 /* MetalROIPooling.hpp in Headers */, + 1F501F592397BA4D004E8721 /* MetalTFQuantizedConv2D.hpp in Headers */, + 92FF02B723AA0B5A00AC97F6 /* CPUQuantizedAdd.hpp in Headers */, + 92FF03B623AA0B5A00AC97F6 /* StrassenMatmulComputor.hpp in Headers */, + 92FF03A623AA0B5A00AC97F6 /* ConvolutionTiledExecutor.hpp in Headers */, + 92FF035F23AA0B5A00AC97F6 /* CPUShape.hpp in Headers */, + 92FF036523AA0B5A00AC97F6 /* CPUResize.hpp in Headers */, + 92FF04B423AA0BFB00AC97F6 /* MNNMemoryUtils.h in Headers */, + 1F501F422397BA4D004E8721 /* MetalGather.hpp in Headers */, + 1F501F5F2397BA4D004E8721 /* MetalSigmoid.hpp in Headers */, + 92FF04C123AA0BFB00AC97F6 /* Backend.hpp in Headers */, + 92FF02A623AA0B5A00AC97F6 /* CPUQuantizedMaxPool.hpp in Headers */, + 92FF029123AA0B5A00AC97F6 /* CPUSpatialProduct.hpp in Headers */, + 92FF028023AA0B5A00AC97F6 /* CPUFloatToInt8.hpp in Headers */, + 92FF028723AA0B5A00AC97F6 /* CPUFixedPoint.hpp in Headers */, + 92FF03B723AA0B5A00AC97F6 /* Convolution3x3.hpp in Headers */, + 92FF034523AA0B5A00AC97F6 /* CPUQuantizedAvgPool.hpp in Headers */, + 92FF027C23AA0B5A00AC97F6 /* CPUAsString.hpp in Headers */, + 1F501F452397BA4D004E8721 /* MetalConvolutionDepthwise.hpp in Headers */, + 92FF035023AA0B5A00AC97F6 /* CPUOneHot.hpp in Headers */, + 92FF039123AA0B5A00AC97F6 /* CPUBackend.hpp in Headers */, + 92FF027023AA0B5A00AC97F6 /* CPUElu.hpp in Headers */, + 92FF036923AA0B5A00AC97F6 /* CPUInnerProduct.hpp in Headers */, + 1F501F6C2397BA4D004E8721 /* MetalPReLU.hpp in Headers */, + 1F501F662397BA4D004E8721 /* MetalGatherV2.hpp in Headers */, + 92FF038C23AA0B5A00AC97F6 /* CPUEltwise.hpp in Headers */, + 92FF028823AA0B5A00AC97F6 /* CPUDequantize.hpp in Headers */, + 92FF037123AA0B5A00AC97F6 /* CPUBatchToSpaceND.hpp in Headers */, + 92FF039723AA0B5A00AC97F6 /* CPUCrop.hpp in Headers */, + 92FF03CC23AA0B5A00AC97F6 /* CPUDepthToSpace.hpp in Headers */, + 1F501F382397BA4D004E8721 /* MetalBatchToSpaceND.hpp in Headers */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -2884,7 +3358,6 @@ buildConfigurationList = 0F1465BF1FA18D1000F9860A /* Build configuration list for PBXNativeTarget "MNN" */; buildPhases = ( AE7BE46E22814B98002CEEA6 /* Run Script - OP register scan */, - 92E7B94C21DE0C84003013ED /* Generate flatbuffers If Needed */, 0F1465B21FA18D1000F9860A /* Sources */, 0F1465B31FA18D1000F9860A /* Frameworks */, 0F1465B41FA18D1000F9860A /* Headers */, @@ -2962,6 +3435,7 @@ isa = PBXResourcesBuildPhase; buildActionMask = 2147483647; files = ( + 92FF02BA23AA0B5A00AC97F6 /* CMakeLists.txt in Resources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -2977,39 +3451,39 @@ /* End PBXResourcesBuildPhase section */ /* Begin PBXShellScriptBuildPhase section */ - 92E7B94C21DE0C84003013ED /* Generate flatbuffers If Needed */ = { + 98533E6C205A262D00DA46E3 /* Run Script */ = { isa = PBXShellScriptBuildPhase; - buildActionMask = 2147483647; + buildActionMask = 12; files = ( ); - inputFileListPaths = ( - ); inputPaths = ( ); - name = "Generate flatbuffers If Needed "; - outputFileListPaths = ( - ); + name = "Run Script"; outputPaths = ( ); runOnlyForDeploymentPostprocessing = 0; shellPath = /bin/sh; - shellScript = "if [ ! -e ../../schema/current/MNN_generated.h ]; then\n cd ../../schema \n ./generate.sh\nfi\n"; + shellScript = "\necho \"==========\"\necho ${TARGET_NAME}\necho ${PROJECT_FILE_PATH}\necho ${TARGET_BUILD_DIR}\n\ntouch ${TARGET_BUILD_DIR}/MNN.framework/mnn.metallib\ncp ${TARGET_BUILD_DIR}/MNN.framework/mnn.metallib ${TARGET_BUILD_DIR}/Playground.app/\n"; }; - 98533E6C205A262D00DA46E3 /* Run Script */ = { + AE7BE46E22814B98002CEEA6 /* Run Script - OP register scan */ = { isa = PBXShellScriptBuildPhase; - buildActionMask = 12; + buildActionMask = 2147483647; files = ( ); + inputFileListPaths = ( + ); inputPaths = ( ); - name = "Run Script"; + name = "Run Script - OP register scan"; + outputFileListPaths = ( + ); outputPaths = ( ); runOnlyForDeploymentPostprocessing = 0; shellPath = /bin/sh; - shellScript = "\necho \"==========\"\necho ${TARGET_NAME}\necho ${PROJECT_FILE_PATH}\necho ${TARGET_BUILD_DIR}\n\ntouch ${TARGET_BUILD_DIR}/MNN.framework/mnn.metallib\ncp ${TARGET_BUILD_DIR}/MNN.framework/mnn.metallib ${TARGET_BUILD_DIR}/Playground.app/\n"; + shellScript = "cd ${PROJECT_FILE_PATH}/../MNN\nsh OpRegister.sh\n"; }; - AE7BE46E22814B98002CEEA6 /* Run Script - OP register scan */ = { + AE8CC60B21BFE618004358B3 /* ShellScript */ = { isa = PBXShellScriptBuildPhase; buildActionMask = 2147483647; files = ( @@ -3018,14 +3492,13 @@ ); inputPaths = ( ); - name = "Run Script - OP register scan"; outputFileListPaths = ( ); outputPaths = ( ); runOnlyForDeploymentPostprocessing = 0; shellPath = /bin/sh; - shellScript = "# Type a script or drag a script file from your workspace to insert its path.\ncd ${PROJECT_FILE_PATH}/../MNN\nsh OpRegister.sh\n"; + shellScript = "echo ${TARGET_BUILD_DIR}\nrm -f ${TARGET_BUILD_DIR}/MNN.framework/mnn.metallib\n"; }; /* End PBXShellScriptBuildPhase section */ @@ -3034,464 +3507,473 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( - 487970E722C9BF5E00795502 /* MNNReluInt8.S in Sources */, - 4888769E215B639F0079B12E /* MNNGemmInt16to32_4x4_Unit.S in Sources */, - 488876E4215B639F0079B12E /* CPUPermute.cpp in Sources */, - 488875FD215B639F0079B12E /* CPUDetectionOutput.cpp in Sources */, 924F131C21A81C80006D46A4 /* MetalTranspose.metal in Sources */, - 48AE9EB222154C9D009DB6F4 /* MNNGemmFloatOne_4.S in Sources */, - 4888773E215CD3BF0079B12E /* MNNBlitC3ToFloatRGBA.S in Sources */, - 483CD482216B1C7B00B05BE9 /* DeconvolutionWithStride.cpp in Sources */, - 4841B5F721EAE98B002E5D66 /* Backend.cpp in Sources */, - 4888764F215B639F0079B12E /* ConvolutionGroup.cpp in Sources */, - 48887678215B639F0079B12E /* CPUTFQuantizedConv2D.cpp in Sources */, - 48BF218821A4380A00AFF78E /* MNNSamplerC1BilinearOpt.S in Sources */, - 48C054982205B9B400E91945 /* MNNUnPackC4.S in Sources */, - 485DD4302181E94300129159 /* MNNQuanToDestUint8.S in Sources */, + 92FF04BD23AA0BFB00AC97F6 /* Execution.cpp in Sources */, + 92FF030A23AA0B5A00AC97F6 /* MNNLineDepthWiseInt8AddBiasScaleUnit.S in Sources */, + 92FF03B023AA0B5A00AC97F6 /* ConvolutionGroup.cpp in Sources */, + 48FA474623AA127B00172C3B /* NeuralNetWorkOp.cpp in Sources */, + 92FF02F423AA0B5A00AC97F6 /* MNNUInt8ToInt16WithOffsetC4Common.S in Sources */, + 92FF038623AA0B5A00AC97F6 /* CPULinSpace.cpp in Sources */, 488875A9215B639F0079B12E /* MNNMetalContext.mm in Sources */, + 92FF037B23AA0B5A00AC97F6 /* CPUQuantizedConcat.cpp in Sources */, 4888759B215B639F0079B12E /* MetalSpatialProduct.mm in Sources */, - 92EEFEFF217F0CBC00F89377 /* CPUCrop.cpp in Sources */, - 48887661215B639F0079B12E /* CPUDeconvolutionDepthwise.cpp in Sources */, - 487970F722C9C19F00795502 /* MNNGemmInt8AddBiasScale_16x4_Unit.S in Sources */, 92682C5321819BF100B52B9D /* MetalSeLU.mm in Sources */, - 488876C9215B639F0079B12E /* MNNGemmInt16to32_4x4_Unit.S in Sources */, - 48887620215B639F0079B12E /* CPUConcat.cpp in Sources */, 923B7F9521A680A1002AFCE0 /* MetalGatherV2.metal in Sources */, - 48070737231E512D00528CE5 /* Optimizer.cpp in Sources */, - 48887675215B639F0079B12E /* CPUConvolutionDepthwise.cpp in Sources */, - 488875EA215B639F0079B12E /* CPUScale.cpp in Sources */, - 488876DC215B639F0079B12E /* CPUReduceJoin.cpp in Sources */, - 488876C6215B639F0079B12E /* MNNScaleBias2FloatC4.S in Sources */, - EBB38F1B21E748B9005F76D7 /* ShapeCast.cpp in Sources */, - 48AE9EB0221539C3009DB6F4 /* MNNStrassenMergeCFunction.S in Sources */, + 92FF02D423AA0B5A00AC97F6 /* MNNScaleBias2FloatC4.S in Sources */, + 92FF032C23AA0B5A00AC97F6 /* MNNWinogradMatrixProductRight.S in Sources */, + 92FF031A23AA0B5A00AC97F6 /* MNNConvRunForUnitDepthWiseInt8.S in Sources */, + 92FF031223AA0B5A00AC97F6 /* MNNMaxFloat.S in Sources */, + 92FF04B023AA0BFB00AC97F6 /* SizeComputer.cpp in Sources */, + 92FF02CB23AA0B5A00AC97F6 /* MNNSamplerC1NearestOpt.S in Sources */, + 92FF027D23AA0B5A00AC97F6 /* CPUReverseSequence.cpp in Sources */, + 92FF035E23AA0B5A00AC97F6 /* CPUConcat.cpp in Sources */, + 92FF02C223AA0B5A00AC97F6 /* MNNLoadU8AndSum.S in Sources */, + 92FF039823AA0B5A00AC97F6 /* CPUThreshold.cpp in Sources */, + 92FF02E323AA0B5A00AC97F6 /* MNNExpC8.S in Sources */, 9260B27821A7C5EA00D48C97 /* MetalQuantizedAvgPool.mm in Sources */, - 48A8A60621CDF87000C2B9A7 /* MNNSamplerC4NearestOpt.S in Sources */, - EBB38F0821E748B9005F76D7 /* ShapeReshape.cpp in Sources */, - EB4925B4224A146000C512BB /* ShapeBatchMatMul.cpp in Sources */, - 48887640215B639F0079B12E /* ConvolutionWinograd.cpp in Sources */, - 4888762D215B639F0079B12E /* CPULRN.cpp in Sources */, - 48C054A322081C9B00E91945 /* MNNAddBiasRelu.S in Sources */, - 488875E2215B639F0079B12E /* CPUCropAndResize.cpp in Sources */, - C425F8742292A31F00B4682D /* MNNPowC8.S in Sources */, - EBB38F2421E748B9005F76D7 /* ShapeExpandDims.cpp in Sources */, - 488875E3215B639F0079B12E /* CPUSelu.cpp in Sources */, - 48887643215B639F0079B12E /* ConvOpt.cpp in Sources */, - 4888766F215B639F0079B12E /* CPUSqueeze.cpp in Sources */, + 92FF044D23AA0B7100AC97F6 /* ShapeConst.cpp in Sources */, + 92FF030223AA0B5A00AC97F6 /* MNNQuanToDestUint8.S in Sources */, + 92FF037323AA0B5A00AC97F6 /* CPUEltwiseInt8.cpp in Sources */, + 92FF042F23AA0B7100AC97F6 /* ShapeSliceTf.cpp in Sources */, + 92FF03B523AA0B5A00AC97F6 /* ResizeFunction.cpp in Sources */, + 92FF04B323AA0BFB00AC97F6 /* Schedule.cpp in Sources */, + 92FF036423AA0B5A00AC97F6 /* CPUUnravelIndex.cpp in Sources */, + 92FF02C623AA0B5A00AC97F6 /* MNNBlitC1ToFloatRGBA.S in Sources */, + 92FF02BE23AA0B5A00AC97F6 /* MNNFloat2Int8.S in Sources */, + 92FF034923AA0B5A00AC97F6 /* CPUSpatialProduct.cpp in Sources */, + 92FF037D23AA0B5A00AC97F6 /* CPURelu.cpp in Sources */, 920004D821EDC30E00BCE892 /* MetalDequantize.mm in Sources */, + 92FF034423AA0B5A00AC97F6 /* CPUGatherND.cpp in Sources */, + 92FF028F23AA0B5A00AC97F6 /* CPUDetectionOutput.cpp in Sources */, CE96FE7E21707D58004AB400 /* MetalSigmoid.mm in Sources */, - 488875ED215B639F0079B12E /* CPUSoftmax.cpp in Sources */, - 485DD412217F495500129159 /* CPUQuantizedSoftmax.cpp in Sources */, - 4888765A215B639F0079B12E /* ConvolutionTiledExecutor.cpp in Sources */, - AE7BE4B922855638002CEEA6 /* ShapeRegister.cpp in Sources */, - 4888762A215B639F0079B12E /* CPUFill.cpp in Sources */, - 489BFA2D230E3D1F00F6B785 /* FileLoader.cpp in Sources */, + 92FF039023AA0B5A00AC97F6 /* CPUGather.cpp in Sources */, 488875A7215B639F0079B12E /* MetalLRN.mm in Sources */, - EBB38F1321E748B9005F76D7 /* ShapeResize.cpp in Sources */, - 11EDD60A22E55A09007F3793 /* CPUDepthToSpace.cpp in Sources */, - 48887597215B639F0079B12E /* AutoTime.cpp in Sources */, - 487970DA22C9BF4B00795502 /* CPUConvInt8.cpp in Sources */, - 4888767F215B639F0079B12E /* MNNGemmFloatUnit_4.S in Sources */, - 487970E322C9BF5E00795502 /* MNNGemmInt8AddBiasScale_16x4_Unit.S in Sources */, - EBB38F2B21E748B9005F76D7 /* ShapeQuantizedMaxPool.cpp in Sources */, - 4888767E215B639F0079B12E /* MNNFloat2Int8.S in Sources */, - 48C054A122081B5B00E91945 /* MNNReluWithSlope.S in Sources */, - 48C5E79C22FBF87700EAC2A6 /* ShapeDepthToSpace.cpp in Sources */, - 4888765F215B639F0079B12E /* CPULSTM.cpp in Sources */, - 48887631215B639F0079B12E /* CPUUnpack.cpp in Sources */, - EBB38F0221E748B9005F76D7 /* ShapeGather.cpp in Sources */, - EBB38F3821E748B9005F76D7 /* ShapeDeconvolution.cpp in Sources */, + 92FF029923AA0B5A00AC97F6 /* CPUSlice.cpp in Sources */, + 92FF041D23AA0B7100AC97F6 /* ShapePool.cpp in Sources */, + 92FF02C523AA0B5A00AC97F6 /* MNNStrassenMergeCFunction.S in Sources */, + 92FF02B823AA0B5A00AC97F6 /* CPUWhere.cpp in Sources */, + 92FF027423AA0B5A00AC97F6 /* CPUArgMax.cpp in Sources */, + 92FF044523AA0B7100AC97F6 /* ShapeSpaceToDepth.cpp in Sources */, + 92FF04B823AA0BFB00AC97F6 /* MNNMemoryUtils.cpp in Sources */, + 92FF042D23AA0B7100AC97F6 /* ShapePadding.cpp in Sources */, + 92FF04BE23AA0BFB00AC97F6 /* FileLoader.cpp in Sources */, + 92FF027923AA0B5A00AC97F6 /* CPUSpaceToBatchND.cpp in Sources */, 92EAC19C21CB3CE20056F4C2 /* MetalCast.metal in Sources */, + 92FF02F623AA0B5A00AC97F6 /* MNNConvRunForUnitDepthWise.S in Sources */, + 92FF042323AA0B7100AC97F6 /* ShapeScatterNd.cpp in Sources */, 925A89182223961F00D22428 /* MetalConvolution1x1.metal in Sources */, 488875A8215B639F0079B12E /* MetalNormalize.mm in Sources */, + 92FF045A23AA0B7100AC97F6 /* ShapeBinaryOp.cpp in Sources */, + 92FF02E523AA0B5A00AC97F6 /* MNNConvDwF23SourceTransUnit.S in Sources */, + 92FF02DA23AA0B5A00AC97F6 /* MNNConvRunForUnitDepthWiseInt8.S in Sources */, 9260B27221A7C5CD00D48C97 /* MetalQuantizedMaxPool.mm in Sources */, - 4888769A215B639F0079B12E /* MNNConvSlideWindowMiddle.S in Sources */, - 48C054B5220A7A9600E91945 /* MNNConvRunForUnitDepthWise.S in Sources */, + 92FF033623AA0B5A00AC97F6 /* MNNConvRunForUnitDepthWise.S in Sources */, + 92FF029C23AA0B5A00AC97F6 /* CPUPack.cpp in Sources */, + 92FF043523AA0B7100AC97F6 /* ShapeConvolution3D.cpp in Sources */, + 92FF039523AA0B5A00AC97F6 /* CPUSqueeze.cpp in Sources */, + 92FF043923AA0B7100AC97F6 /* ShapeDequantize.cpp in Sources */, 486FDF47223E4B2800F487FB /* MetalBinary.mm in Sources */, - 92D765AB2228188700178BE5 /* WrapExecution.cpp in Sources */, + 92FF02FD23AA0B5A00AC97F6 /* MNNScaleAddInt8.S in Sources */, + 92FF04A723AA0BFB00AC97F6 /* BackendRegister.cpp in Sources */, + 92FF02DF23AA0B5A00AC97F6 /* MNNBilinearProcC1.S in Sources */, 925E87E0220447900000192E /* MetalConvolutionWinograd.metal in Sources */, - EBB38F2F21E748B9005F76D7 /* ShapeFill.cpp in Sources */, + 92FF035123AA0B5A00AC97F6 /* CPUCrop.cpp in Sources */, + 92FF031523AA0B5A00AC97F6 /* MNNScaleBias2FloatC4.S in Sources */, 488875D9215B639F0079B12E /* MetalSlice.metal in Sources */, - 4841B60C21EC607E002E5D66 /* CPUQuantizedConcat.cpp in Sources */, - EBB38F0321E748B9005F76D7 /* ShapeAsString.cpp in Sources */, - 48A8A60221CDF55E00C2B9A7 /* MNNSamplerC1NearestOpt.S in Sources */, - 487970E222C9BF5E00795502 /* MNNGemmInt8AddBiasScale_8x4_Unit.S in Sources */, + 92FF02BF23AA0B5A00AC97F6 /* MNNSamplerC4NearestOpt.S in Sources */, 488875CE215B639F0079B12E /* MetalDeconvolution.metal in Sources */, - 487970E522C9BF5E00795502 /* MNNLineDepthWiseInt8AddBiasScaleUnit.S in Sources */, 9243106F2239FE190016DA25 /* MetalSize.metal in Sources */, - C422D7C6233A0F0F00FD59D0 /* ConvolutionWinograd3D.cpp in Sources */, - 48887595215B639F0079B12E /* Tensor.cpp in Sources */, - 487970E422C9BF5E00795502 /* MNNDepthWiseInt8AddBiasScaleUnit.S in Sources */, - 48EB45EB2255B70C006C2322 /* MNNConvDwF23SourceTransUnit.S in Sources */, - 11EDD60C22E55A09007F3793 /* CPUSpaceToDepth.cpp in Sources */, - EBB38F3521E748B9005F76D7 /* ShapeReduceJoin.cpp in Sources */, + 92FF029223AA0B5A00AC97F6 /* CPUConv2DBackPropFilter.cpp in Sources */, + 92FF04B623AA0BFB00AC97F6 /* TensorUtils.cpp in Sources */, + 92FF045223AA0B7100AC97F6 /* ShapeResize.cpp in Sources */, 9243106C2239FE0B0016DA25 /* MetalSize.mm in Sources */, 92256947219D698100F251E2 /* MetalRank.mm in Sources */, 92921A86219C24CD00B063D1 /* MetalPack.mm in Sources */, - AE7BE4B7228555A2002CEEA6 /* BackendRegister.cpp in Sources */, - 487970D522C9BF4B00795502 /* CPUDepthwiseConvInt8.cpp in Sources */, - 486FDF4C2241E95700F487FB /* CPURuntime.cpp in Sources */, + 92FF034023AA0B5A00AC97F6 /* CPUShape.cpp in Sources */, + 92FF02B023AA0B5A00AC97F6 /* CPUDequantize.cpp in Sources */, + 92FF04C223AA0BFB00AC97F6 /* Pipeline.cpp in Sources */, + 92FF04C423AA0BFB00AC97F6 /* Session.cpp in Sources */, 488875C6215B639F0079B12E /* MetalPooling.mm in Sources */, - 487970E622C9BF5E00795502 /* MNNInt8ScaleToFloat.S in Sources */, 48A8A61321D101A700C2B9A7 /* ImageSampler.cpp in Sources */, 9258013E2223B77C00555D43 /* MetalConvolutionDepthwise.mm in Sources */, + 92FF02D123AA0B5A00AC97F6 /* MNNMaxFloat.S in Sources */, + 92FF026923AA0B5A00AC97F6 /* CPUSelu.cpp in Sources */, + 92FF03B123AA0B5A00AC97F6 /* ConvolutionFloatFactory.cpp in Sources */, + 92FF027E23AA0B5A00AC97F6 /* CPUTranspose.cpp in Sources */, 488875C8215B639F0079B12E /* MetalScale.metal in Sources */, + 92FF032123AA0B5A00AC97F6 /* MNNPowC8.S in Sources */, 92A8D70021A40695009C2201 /* MetalTFQuantizedConv2D.mm in Sources */, + 92FF033023AA0B5A00AC97F6 /* MNNCubicSampleC4.S in Sources */, + 92FF03C323AA0B5A00AC97F6 /* CPUEltwise.cpp in Sources */, + 92FF02F223AA0B5A00AC97F6 /* MNNBlitC3ToFloatRGBA.S in Sources */, 92C674F922549A1600011D33 /* MetalReLU6.mm in Sources */, - EB69637622E070E10065993C /* CPUCosineSimilarity.cpp in Sources */, 488875D3215B639F0079B12E /* MetalSpatialProduct.metal in Sources */, - 48887630215B639F0079B12E /* CPUTopKV2.cpp in Sources */, - 4847D42022C07E850049F3CA /* CPUPadding.cpp in Sources */, - 48BF218621A4257500AFF78E /* MNNSamplerC1BilinearOpt.S in Sources */, CE96FE8121707D58004AB400 /* MetalMatMul.metal in Sources */, - 48887689215B639F0079B12E /* MNNCubicLineC4.S in Sources */, - 485DD4352182AE8100129159 /* MNNConvRunForUnitDepthWiseUint8.S in Sources */, - 485DD414217F495500129159 /* CPUQuantizedAdd.cpp in Sources */, - 48887610215B639F0079B12E /* CPUDeconvolution.cpp in Sources */, - 9223E12121D34C6B0067544A /* CPUBatchToSpaceND.cpp in Sources */, - 485DD42B21819FB000129159 /* MNNUInt8ToInt16WithOffsetC4Fast.S in Sources */, - 486FDF42223E495B00F487FB /* CPUUnary.cpp in Sources */, + 92FF030323AA0B5A00AC97F6 /* MNNLoadU8AndSum.S in Sources */, 92EEFF27217F0EEF00F89377 /* MetalCrop.mm in Sources */, - 48887600215B639F0079B12E /* CPUMatMul.cpp in Sources */, - 48887669215B639F0079B12E /* CPUBackend.cpp in Sources */, - 488876A5215B639F0079B12E /* MNNWinogradMatrixProductRight.S in Sources */, - 48887653215B639F0079B12E /* ResizeFunction.cpp in Sources */, - 488875FF215B639F0079B12E /* CPUSize.cpp in Sources */, - EB4925C3224A147E00C512BB /* CPUMoments.cpp in Sources */, + 92FF02D223AA0B5A00AC97F6 /* MNNNV21ToRGBAUnit.S in Sources */, 92256950219D6E0200F251E2 /* MetalRange.mm in Sources */, - 4847D41D22C0739A0049F3CA /* ShapePadding.cpp in Sources */, - 487970ED22C9BF7200795502 /* MNNInt8ScaleToFloat.S in Sources */, + 92FF025723AA0B5A00AC97F6 /* CPUQuanConvolutionDepthwise.cpp in Sources */, + 92FF031423AA0B5A00AC97F6 /* MNNGemmInt16to32_4x4_Unit.S in Sources */, + 48FA474823AA127B00172C3B /* Expr.cpp in Sources */, + 92FF039223AA0B5A00AC97F6 /* CPUDeconvolution.cpp in Sources */, + 92FF042923AA0B7100AC97F6 /* ShapeLinSpace.cpp in Sources */, + 92FF03A723AA0B5A00AC97F6 /* ConvolutionIntFactory.cpp in Sources */, + 92FF027523AA0B5A00AC97F6 /* CPUConvolution.cpp in Sources */, 924F132521ABD47F006D46A4 /* MetalQuantizedSoftmax.metal in Sources */, - EBB38F1521E748B9005F76D7 /* ShapeWhere.cpp in Sources */, - 488876D9215B639F0079B12E /* CPUTanh.cpp in Sources */, - 488876B9215B639F0079B12E /* MNNDeconvRunForUnitDepthWise.S in Sources */, - 48C0549C2205BC8C00E91945 /* MNNConvSlideWindowBorder.S in Sources */, + 92FF043B23AA0B7100AC97F6 /* ShapeDetectionPostProcess.cpp in Sources */, + 92FF037523AA0B5A00AC97F6 /* CPUUnpack.cpp in Sources */, + 92FF03A023AA0B5A00AC97F6 /* ConvolutionWinograd.cpp in Sources */, + 92FF04A823AA0BFB00AC97F6 /* AutoTime.cpp in Sources */, + 92FF04AE23AA0BFB00AC97F6 /* Backend.cpp in Sources */, + 92FF041E23AA0B7100AC97F6 /* ShapeRange.cpp in Sources */, + 92FF03B423AA0B5A00AC97F6 /* Convolution1x1Strassen.cpp in Sources */, + 92FF031623AA0B5A00AC97F6 /* MNNMatrixMax.S in Sources */, + 92FF043A23AA0B7100AC97F6 /* ShapePermute.cpp in Sources */, + 92FF030823AA0B5A00AC97F6 /* MNNCopyC4WithStride.S in Sources */, 92256938219D150900F251E2 /* MetalSliceTF.metal in Sources */, 488875DB215B639F0079B12E /* MetalPReLU.metal in Sources */, - 488875EE215B639F0079B12E /* CPUTile.cpp in Sources */, - 488876A2215B639F0079B12E /* MNNGemmFloatCommon_4.S in Sources */, - 48DA297D21F1F7CF00E3BEB2 /* MNNExpC8.S in Sources */, - 48887668215B639F0079B12E /* CPUInnerProduct.cpp in Sources */, - EBB38F3421E748B9005F76D7 /* ShapeSlice.cpp in Sources */, - 48B904A9229550CF003116BB /* ShapeSelect.cpp in Sources */, - 48070739231E512D00528CE5 /* Solution.cpp in Sources */, - 48057D8D2330E85C00F922BE /* CPUMatrixBandPart.cpp in Sources */, + 92FF030023AA0B5A00AC97F6 /* MNNSamplerC4NearestOpt.S in Sources */, + 92FF044023AA0B7100AC97F6 /* ShapeSlice.cpp in Sources */, + 92FF044723AA0B7100AC97F6 /* ShapeSqueeze.cpp in Sources */, + 92FF033923AA0B5A00AC97F6 /* MNNGemmint8to32_8x4_Unit.S in Sources */, 92EEFF33218015A300F89377 /* MetalReduction.metal in Sources */, + 92FF039C23AA0B5A00AC97F6 /* Convolution3D3x3.cpp in Sources */, + 92FF028523AA0B5A00AC97F6 /* CPUBroadcastTo.cpp in Sources */, 923B7F9221A68091002AFCE0 /* MetalGatherV2.mm in Sources */, 92C674FC22549A2500011D33 /* MetalReLU6.metal in Sources */, + 92FF043423AA0B7100AC97F6 /* ShapeStridedSlice.cpp in Sources */, + 92FF02EB23AA0B5A00AC97F6 /* MNNGemmFloatOne_4.S in Sources */, 488875BB215B639F0079B12E /* MetalSoftmax.metal in Sources */, + 48FA474A23AA127B00172C3B /* Utils.cpp in Sources */, + 92FF043F23AA0B7100AC97F6 /* ShapeTensorConvert.cpp in Sources */, + 92FF044B23AA0B7100AC97F6 /* ShapeTile.cpp in Sources */, + 92FF030723AA0B5A00AC97F6 /* MNNBlitC1ToFloatRGBA.S in Sources */, + 92FF03A423AA0B5A00AC97F6 /* OptimizedComputer.cpp in Sources */, + 92FF034F23AA0B5A00AC97F6 /* CPUSliceTf.cpp in Sources */, + 92FF033E23AA0B5A00AC97F6 /* CPUDilation2D.cpp in Sources */, + 92FF032E23AA0B5A00AC97F6 /* MNNReluWithSlopeChannel.S in Sources */, + 92FF034E23AA0B5A00AC97F6 /* CPUDepthToSpace.cpp in Sources */, + 92FF044823AA0B7100AC97F6 /* ShapeGather.cpp in Sources */, 9257597C219EA08400918499 /* MetalStridedSlice.metal in Sources */, - 48C5E79B22FBF87700EAC2A6 /* ShapeSpaceToDepth.cpp in Sources */, - 4841B61121EC607E002E5D66 /* CPUDequantize.cpp in Sources */, - 487970F022C9BF7200795502 /* MNNReluInt8.S in Sources */, - 488875E4215B639F0079B12E /* CPUArgMax.cpp in Sources */, - 4807073D231E512D00528CE5 /* Expr.cpp in Sources */, - 71E8789F2203E88500268E24 /* MNNNV21ToBGRUnit.S in Sources */, 48C054882201996200E91945 /* MetalConvolutionWinograd.mm in Sources */, - 485DD4312181E94300129159 /* MNNUInt8ToInt16WithOffsetC4Common.S in Sources */, - 48C0549F22081AC200E91945 /* MNNAddBias.S in Sources */, 488875DA215B639F0079B12E /* MetalResize.metal in Sources */, - EB69637822E072600065993C /* ShapeCosineSimilarity.cpp in Sources */, - 48B904A622953E0F003116BB /* CPUZeroLike.cpp in Sources */, - 48887639215B639F0079B12E /* CPURelu.cpp in Sources */, - 48C054A522081CDA00E91945 /* MNNReluWithSlopeChannel.S in Sources */, 925A8915222395ED00D22428 /* MetalConvolution1x1.mm in Sources */, - 48AE9E9F2211950B009DB6F4 /* StrassenMatmulComputor.cpp in Sources */, - 483CD489216CDDA100B05BE9 /* MNNAddC4WithStride.S in Sources */, - 487970D822C9BF4B00795502 /* CPUInt8ToFloat.cpp in Sources */, + 92FF032323AA0B5A00AC97F6 /* MNNExpC8.S in Sources */, 488875D7215B639F0079B12E /* MetalBackend.mm in Sources */, 92A8D70821A54087009C2201 /* MetalDefine.metal in Sources */, - 48C054AD220A74D800E91945 /* MNNReluWithSlopeChannel.S in Sources */, - EBB38F0121E748B9005F76D7 /* ShapePriorbox.cpp in Sources */, - 48057D852330A8F900F922BE /* ShapeGatherND.cpp in Sources */, - 48EB45EE2255D271006C2322 /* MNNConvDwF23MulTransUnit.S in Sources */, - 48C054A9220A749100E91945 /* MNNAddBias.S in Sources */, + 92FF044C23AA0B7100AC97F6 /* ShapePool3D.cpp in Sources */, + 92FF028B23AA0B5A00AC97F6 /* CPUBatchToSpaceND.cpp in Sources */, + 92FF029823AA0B5A00AC97F6 /* CPUTFQuantizedConv2D.cpp in Sources */, + 92FF031323AA0B5A00AC97F6 /* MNNNV21ToRGBAUnit.S in Sources */, + 92FF02F123AA0B5A00AC97F6 /* MNNCoefLine.S in Sources */, + 92FF038523AA0B5A00AC97F6 /* CPUMoments.cpp in Sources */, + 92FF031E23AA0B5A00AC97F6 /* MNNSamplerC4BilinearOpt.S in Sources */, + 92FF03C223AA0B5A00AC97F6 /* CPUTile.cpp in Sources */, + 92FF02A423AA0B5A00AC97F6 /* CPUBinary.cpp in Sources */, + 92FF034223AA0B5A00AC97F6 /* CPUReduction.cpp in Sources */, + 92FF02CF23AA0B5A00AC97F6 /* MNNMinFloat.S in Sources */, + 92FF030E23AA0B5A00AC97F6 /* MNNNV21ToRGBUnit.S in Sources */, + 48FA474D23AA136300172C3B /* MergeOptimizer.cpp in Sources */, + 92FF03CE23AA0B5A00AC97F6 /* CPUOPRegister.cpp in Sources */, + 92FF02C323AA0B5A00AC97F6 /* MNNCubicLineC4.S in Sources */, 92351C8A21992AC6002CA341 /* MetalQuantizedAdd.metal in Sources */, + 92FF02B323AA0B5A00AC97F6 /* CPUInstanceNorm.cpp in Sources */, 92965EDE2175B3C300B86ABE /* MetalConcat.metal in Sources */, 9223E10F21D327F40067544A /* MetalSqueeze.mm in Sources */, 488875AB215B639F0079B12E /* MetalLSTM.metal in Sources */, + 92FF042223AA0B7100AC97F6 /* ShapeConcat.cpp in Sources */, 92256935219D14CD00F251E2 /* MetalSliceTF.mm in Sources */, + 92FF025E23AA0B5A00AC97F6 /* CPUROIPooling.cpp in Sources */, + 92FF044A23AA0B7100AC97F6 /* ShapeConvolution.cpp in Sources */, + 92FF02FA23AA0B5A00AC97F6 /* MNNGemmFloatUnit_4.S in Sources */, 92256953219D6E1000F251E2 /* MetalRange.metal in Sources */, - 4888762F215B639F0079B12E /* CPUROIPooling.cpp in Sources */, - AE7BE47C22816FC9002CEEA6 /* ShapeMoments.cpp in Sources */, - 488876A8215B639F0079B12E /* MNNGemmFloatUnit_4.S in Sources */, - 48887618215B639F0079B12E /* CPUPool.cpp in Sources */, - EBB38F0F21E748B9005F76D7 /* ShapeArgMax.cpp in Sources */, - 488876B2215B639F0079B12E /* MNNCubicLineC4.S in Sources */, + 92FF02E923AA0B5A00AC97F6 /* MNNDepthWiseInt8AddBiasScaleUnit.S in Sources */, + 92FF026A23AA0B5A00AC97F6 /* CPUNonMaxSuppressionV2.cpp in Sources */, + 92FF03D223AA0B5A00AC97F6 /* CPUReshape.cpp in Sources */, + 92FF045123AA0B7100AC97F6 /* ShapeArgMax.cpp in Sources */, + 92FF033823AA0B5A00AC97F6 /* MNNConvRunForLineDepthwise.S in Sources */, CE96FE7521707D58004AB400 /* MetalMatMul.mm in Sources */, 9223E12C21D3756B0067544A /* MetalBatchToSpaceND.metal in Sources */, - EBB38F0A21E748B9005F76D7 /* ShapeQuantizedReshape.cpp in Sources */, + 92FF044F23AA0B7100AC97F6 /* ShapeDepthToSpace.cpp in Sources */, + 92FF043323AA0B7100AC97F6 /* ShapeCrop.cpp in Sources */, 92EEFF302180159600F89377 /* MetalReduction.mm in Sources */, - 48C054962205B9A500E91945 /* MNNPackC4.S in Sources */, - 48887681215B639F0079B12E /* MNNGemmInt16to32_4x4_Common.S in Sources */, + 92FF02C423AA0B5A00AC97F6 /* MNNAddBiasRelu6.S in Sources */, 92A8D70321A406A8009C2201 /* MetalTFQuantizedConv2D.metal in Sources */, - 488876A9215B639F0079B12E /* MNNConvRunForLineDepthWiseInt8.S in Sources */, - 48887646215B639F0079B12E /* ConvolutionIntFactory.cpp in Sources */, - 4888758D215B639F0079B12E /* TensorUtils.cpp in Sources */, + 92FF02B523AA0B5A00AC97F6 /* CPUTopKV2.cpp in Sources */, + 92FF038323AA0B5A00AC97F6 /* CPUSoftmax.cpp in Sources */, + 92FF038123AA0B5A00AC97F6 /* CPUNormalize.cpp in Sources */, + 92FF032923AA0B5A00AC97F6 /* MNNDepthWiseInt8AddBiasScaleUnit.S in Sources */, + 92FF02BD23AA0B5A00AC97F6 /* MNNMatrixProd.S in Sources */, + 92FF032B23AA0B5A00AC97F6 /* MNNGemmFloatOne_4.S in Sources */, 925801412223B79600555D43 /* MetalConvolutionDepthwise.metal in Sources */, - EBB38F0021E748B9005F76D7 /* ShapeShape.cpp in Sources */, - 488876C4215B639F0079B12E /* MNNConvRunForLineDepthwise.S in Sources */, + 92FF02A223AA0B5A00AC97F6 /* CPUSize.cpp in Sources */, + 92FF02EE23AA0B5A00AC97F6 /* MNNReluWithSlopeChannel.S in Sources */, + 92FF036A23AA0B5A00AC97F6 /* CPURNNSequenceGRU.cpp in Sources */, + 92FF04B223AA0BFB00AC97F6 /* BackendFactory.cpp in Sources */, + 92FF02FC23AA0B5A00AC97F6 /* MNNUInt8ToInt16WithOffsetC4Fast.S in Sources */, + 92FF02E823AA0B5A00AC97F6 /* MNNSamplerC1BilinearOpt.S in Sources */, 488875A3215B639F0079B12E /* MetalReLU.metal in Sources */, + 92FF03A223AA0B5A00AC97F6 /* ConvolutionWinograd3D.cpp in Sources */, + 92FF033B23AA0B5A00AC97F6 /* MNNConvSlideWindowBorder.S in Sources */, 488875CA215B639F0079B12E /* MetalScale.mm in Sources */, 486FDF48223E4B2800F487FB /* MetalBinary.metal in Sources */, - 48887699215B639F0079B12E /* MNNConvRunForLineDepthwise.S in Sources */, + 92FF031C23AA0B5A00AC97F6 /* MNNReluInt8.S in Sources */, 92EEFF2A217F0F0F00F89377 /* MetalCrop.metal in Sources */, - 487970DB22C9BF4B00795502 /* CPUFloatToInt8.cpp in Sources */, - 48887638215B639F0079B12E /* CPUExpandDims.cpp in Sources */, - EBB38F1D21E748B9005F76D7 /* ShapeTile.cpp in Sources */, - EB4925C2224A147E00C512BB /* CPUBatchMatMul.cpp in Sources */, - 48887665215B639F0079B12E /* CPURank.cpp in Sources */, - 488876D2215B639F0079B12E /* CPUConvolution.cpp in Sources */, - 4843AA5822A7E9AB00889A63 /* CPUPoolGrad.cpp in Sources */, - 485DD4342182AE8100129159 /* MNNConvRunForLineDepthWiseUint8.S in Sources */, - 4888762E215B639F0079B12E /* CPUStridedSlice.cpp in Sources */, 488875B3215B639F0079B12E /* MetalConvolution.mm in Sources */, - 4807073B231E512D00528CE5 /* MergeOptimizer.cpp in Sources */, - EB288365230EB05D00837188 /* MNNScaleAddInt8.S in Sources */, + 92FF043D23AA0B7100AC97F6 /* ShapeGatherV2.cpp in Sources */, 488875CC215B639F0079B12E /* MetalReshape.mm in Sources */, - 48AE9EB42215628E009DB6F4 /* MNNGemmFloatOne_4.S in Sources */, - EBB38F3321E748B9005F76D7 /* ShapeLSTM.cpp in Sources */, - 488876C5215B639F0079B12E /* MNNConvSlideWindowMiddle.S in Sources */, - 48887633215B639F0079B12E /* CPUSigmoid.cpp in Sources */, - 92D765B52228188700178BE5 /* Schedule.cpp in Sources */, - 483CD486216B2F0400B05BE9 /* WinogradOptFunction.cpp in Sources */, + 92FF02C123AA0B5A00AC97F6 /* MNNQuanToDestUint8.S in Sources */, + 92FF039323AA0B5A00AC97F6 /* CPUQuantizedAdd.cpp in Sources */, + 92FF02F723AA0B5A00AC97F6 /* MNNConvDwF23MulTransUnit.S in Sources */, 488875CD215B639F0079B12E /* MetalTanH.metal in Sources */, - 4888764A215B639F0079B12E /* ConvolutionInt8Executor.cpp in Sources */, - 4888767A215B639F0079B12E /* CPUTensorConvert.cpp in Sources */, - EBB38F0D21E748B9005F76D7 /* ShapeReduction.cpp in Sources */, - 488876DE215B639F0079B12E /* CPUPriorbox.cpp in Sources */, + 92FF036223AA0B5A00AC97F6 /* CPUFill.cpp in Sources */, CE96FE7B21707D58004AB400 /* MetalUnary.metal in Sources */, - EBB38F2321E748B9005F76D7 /* ShapePermute.cpp in Sources */, - 92D765B12228188700178BE5 /* BackendFactory.cpp in Sources */, - 4807071A231A7B5100528CE5 /* CPUReverseSequence.cpp in Sources */, - 485DD4272181898C00129159 /* MNNUInt8ToInt16WithOffsetC4Common.S in Sources */, - C422D73E2326449500FD59D0 /* ShapePool3D.cpp in Sources */, + 92FF043823AA0B7100AC97F6 /* ShapeUnravelIndex.cpp in Sources */, + 92FF035723AA0B5A00AC97F6 /* CPUOneHot.cpp in Sources */, + 92FF02A523AA0B5A00AC97F6 /* CPUZeroLike.cpp in Sources */, + 92FF03C423AA0B5A00AC97F6 /* CPUInterp.cpp in Sources */, + 92FF02E023AA0B5A00AC97F6 /* MNNMatrixSub.S in Sources */, + 92FF044623AA0B7100AC97F6 /* ShapeInnerProduct.cpp in Sources */, + 92FF037A23AA0B5A00AC97F6 /* CPUSigmoid.cpp in Sources */, + 92FF036F23AA0B5A00AC97F6 /* CPURuntime.cpp in Sources */, + 92FF039D23AA0B5A00AC97F6 /* StrassenMatmulComputor.cpp in Sources */, + 92FF030B23AA0B5A00AC97F6 /* MNNUnPackC4.S in Sources */, 9223E11B21D34C260067544A /* MetalSpaceToBatchND.metal in Sources */, - 4841B61021EC607E002E5D66 /* CPUQuantizedLogistic.cpp in Sources */, - 4841B5F821EAE98B002E5D66 /* SizeComputer.cpp in Sources */, - 486B4BC3222D4845001E73E3 /* MNNMatrixMax.S in Sources */, - 485DD4292181938C00129159 /* MNNQuanToDestUint8.S in Sources */, - C422D7C22339D0EE00FD59D0 /* CPUElu.cpp in Sources */, - 48A8A62721D47B5A00C2B9A7 /* OptimizedComputer.cpp in Sources */, - 48EB45E922559525006C2322 /* MNNConvDwF23MulTransUnit.S in Sources */, - 48887608215B639F0079B12E /* CPUWhere.cpp in Sources */, - 48AE9EAB2212E94F009DB6F4 /* MNNMatrixAdd.S in Sources */, - EBB38F1A21E748B9005F76D7 /* ShapeConcat.cpp in Sources */, - 48BF21C221ABC45100AFF78E /* MNNLoadU8AndSum.S in Sources */, - 48AE9EA82212D403009DB6F4 /* MNNMatrixAdd.S in Sources */, - 48887629215B639F0079B12E /* CPUTranspose.cpp in Sources */, - 488876D1215B639F0079B12E /* CPUQuantizedAvgPool.cpp in Sources */, - C425F8762292A32B00B4682D /* MNNPowC8.S in Sources */, - 48887696215B639F0079B12E /* MNNWinogradMatrixProductLeft.S in Sources */, - EBB38F0521E748B9005F76D7 /* ShapeCrop.cpp in Sources */, + 92FF031F23AA0B5A00AC97F6 /* MNNBilinearProcC1.S in Sources */, + 92FF03BF23AA0B5A00AC97F6 /* ConvolutionTiledExecutor.cpp in Sources */, + 92FF037723AA0B5A00AC97F6 /* CPUConvolutionDepthwise.cpp in Sources */, + 92FF025F23AA0B5A00AC97F6 /* CPUPadding.cpp in Sources */, + 92FF02D023AA0B5A00AC97F6 /* MNNGemmInt16to32_4x4_Common.S in Sources */, + 92FF02DE23AA0B5A00AC97F6 /* MNNSamplerC4BilinearOpt.S in Sources */, + 92FF032423AA0B5A00AC97F6 /* MNNAddBiasRelu.S in Sources */, + 92FF02F923AA0B5A00AC97F6 /* MNNGemmint8to32_8x4_Unit.S in Sources */, + 92FF02E623AA0B5A00AC97F6 /* MNNWinogradMatrixProductLeft.S in Sources */, + 92FF043723AA0B7100AC97F6 /* ShapeDetectionOutput.cpp in Sources */, + 92FF039E23AA0B5A00AC97F6 /* Convolution3x3.cpp in Sources */, + 92FF042623AA0B7100AC97F6 /* ShapeCosineSimilarity.cpp in Sources */, + 92FF02DC23AA0B5A00AC97F6 /* MNNReluInt8.S in Sources */, + 92FF041A23AA0B7100AC97F6 /* ShapeFill.cpp in Sources */, + 92FF035323AA0B5A00AC97F6 /* CPUScatterNd.cpp in Sources */, + 92FF041923AA0B7100AC97F6 /* ShapeQuantizedMaxPool.cpp in Sources */, + 92FF038A23AA0B5A00AC97F6 /* CPURange.cpp in Sources */, + 92FF03A123AA0B5A00AC97F6 /* Int8FunctionsOpt.cpp in Sources */, + 92FF026523AA0B5A00AC97F6 /* CPUQuantizedAvgPool.cpp in Sources */, + 92FF029423AA0B5A00AC97F6 /* CPUMatMul.cpp in Sources */, + 92FF038023AA0B5A00AC97F6 /* CPUPoolGrad.cpp in Sources */, + 92FF028123AA0B5A00AC97F6 /* CPUCosineSimilarity.cpp in Sources */, + 92FF03A323AA0B5A00AC97F6 /* ConvOpt.cpp in Sources */, 488875C5215B639F0079B12E /* MetalReLU.mm in Sources */, - 488875F7215B639F0079B12E /* CPUProposal.cpp in Sources */, 488875D1215B639F0079B12E /* MetalInterp.mm in Sources */, 488875DE215B639F0079B12E /* MetalPReLU.mm in Sources */, + 92FF02CD23AA0B5A00AC97F6 /* MNNNV21ToRGBUnit.S in Sources */, 488875B0215B639F0079B12E /* MetalEltwise.mm in Sources */, - 48EB45EF2255D271006C2322 /* MNNConvDwF23SourceTransUnit.S in Sources */, - EBB38F1921E748B9005F76D7 /* ShapeGatherV2.cpp in Sources */, + 92FF029A23AA0B5A00AC97F6 /* CPUQuantizedMaxPool.cpp in Sources */, + 92FF02D923AA0B5A00AC97F6 /* MNNGemmInt8toFloat32_8x4_Unit.S in Sources */, 924F132721ABEA28006D46A4 /* MetalFixedPoint.metal in Sources */, - EBB38F0E21E748B9005F76D7 /* ShapeQuantizedAvgPool.cpp in Sources */, - 48887636215B639F0079B12E /* CPUReshape.cpp in Sources */, - 488875FB215B639F0079B12E /* CPUConst.cpp in Sources */, - 48AE9EAC2212E94F009DB6F4 /* MNNMatrixSub.S in Sources */, - 488876A7215B639F0079B12E /* MNNFloat2Int8.S in Sources */, 488875B2215B639F0079B12E /* MetalBackend.metal in Sources */, - 48C054AB220A74B200E91945 /* MNNReluWithSlope.S in Sources */, + 92FF042423AA0B7100AC97F6 /* ShapeROIPooling.cpp in Sources */, + 92FF033123AA0B5A00AC97F6 /* MNNCoefLine.S in Sources */, + 92FF033723AA0B5A00AC97F6 /* MNNConvDwF23MulTransUnit.S in Sources */, 488875D0215B639F0079B12E /* MetalPooling.metal in Sources */, - 48A8A62921D5FE1E00C2B9A7 /* MNNNV21ToRGBAUnit.S in Sources */, - EBB38F0C21E748B9005F76D7 /* ShapeInnerProduct.cpp in Sources */, - 486B4BB9222901D6001E73E3 /* MNNMatrixProd.S in Sources */, - 488875F4215B639F0079B12E /* CPUCast.cpp in Sources */, - 48887682215B639F0079B12E /* MNNConvRunForUnitDepthWiseInt8.S in Sources */, - 488876E0215B639F0079B12E /* CPUQuantizedReshape.cpp in Sources */, - 48887684215B639F0079B12E /* MNNBilinearProcC1.S in Sources */, - 92D765B32228188700178BE5 /* Session.cpp in Sources */, + 92FF042C23AA0B7100AC97F6 /* ShapeReduceJoin.cpp in Sources */, + 92FF045E23AA0B7100AC97F6 /* ShapeTFQuantizedConv2D.cpp in Sources */, + 92FF043023AA0B7100AC97F6 /* ShapeQuantizedAvgPool.cpp in Sources */, + 92FF030623AA0B5A00AC97F6 /* MNNStrassenMergeCFunction.S in Sources */, + 92FF033223AA0B5A00AC97F6 /* MNNBlitC3ToFloatRGBA.S in Sources */, + 92FF02B923AA0B5A00AC97F6 /* CPUSoftmaxGrad.cpp in Sources */, + 92FF03BE23AA0B5A00AC97F6 /* DeconvolutionWithStride.cpp in Sources */, + 92FF044923AA0B7100AC97F6 /* ShapeGatherND.cpp in Sources */, + 92FF02E123AA0B5A00AC97F6 /* MNNPowC8.S in Sources */, 92682C4D2181729200B52B9D /* MetalTile.mm in Sources */, - 4841B5FC21EAE998002E5D66 /* Execution.cpp in Sources */, - 4888769F215B639F0079B12E /* MNNScaleAndAddBias.S in Sources */, - 48BF21BE21ABBDA300AFF78E /* MNNLoadU8AndSum.S in Sources */, + 92FF02B123AA0B5A00AC97F6 /* CPUBackend.cpp in Sources */, + 92FF02C823AA0B5A00AC97F6 /* MNNNV21ToBGRUnit.S in Sources */, + 92FF03CB23AA0B5A00AC97F6 /* CPUGatherV2.cpp in Sources */, + 92FF045C23AA0B7100AC97F6 /* ShapeBroadcastTo.cpp in Sources */, + 92FF02AE23AA0B5A00AC97F6 /* CPUProposal.cpp in Sources */, 920004D921EDC30E00BCE892 /* MetalDequantize.metal in Sources */, - 4807073E231E512D00528CE5 /* MathOp.cpp in Sources */, 924F132221ABD470006D46A4 /* MetalQuantizedSoftmax.mm in Sources */, - 48AE9EA62212D3F9009DB6F4 /* MNNMatrixSub.S in Sources */, + 92FF042723AA0B7100AC97F6 /* ShapeMatMul.cpp in Sources */, + 92FF042823AA0B7100AC97F6 /* ShapeInterp.cpp in Sources */, 9260B27B21A7C5FC00D48C97 /* MetalQuantizedAvgPool.metal in Sources */, - 488876A3215B639F0079B12E /* MNNCoefLine.S in Sources */, + 92FF02D623AA0B5A00AC97F6 /* MNNConvRunForLineDepthWiseInt8.S in Sources */, CE96FE7821707D58004AB400 /* MetalUnary.mm in Sources */, - 48DA297F21F2051800E3BEB2 /* MNNExpC8.S in Sources */, + 92FF04BA23AA0BFB00AC97F6 /* WrapExecution.cpp in Sources */, 488875E0215B639F0079B12E /* MetalResize.mm in Sources */, 488875AE215B639F0079B12E /* MetalConvolution.metal in Sources */, - EBB38F3121E748B9005F76D7 /* ShapeConvolution.cpp in Sources */, - 48C054942205B94400E91945 /* MNNUnPackC4.S in Sources */, - EBB38F2221E748B9005F76D7 /* ShapeTensorConvert.cpp in Sources */, - EB288367230EB06600837188 /* MNNScaleAddInt8.S in Sources */, - EBB38F1F21E748B9005F76D7 /* ShapeConst.cpp in Sources */, + 92FF02DB23AA0B5A00AC97F6 /* MNNScaleAndAddBias.S in Sources */, + 92FF034D23AA0B5A00AC97F6 /* CPUCast.cpp in Sources */, + 92FF030C23AA0B5A00AC97F6 /* MNNSamplerC1NearestOpt.S in Sources */, 923B7F9B21A69E2E002AFCE0 /* MetalQuantizedReshape.mm in Sources */, - 48A8A62B21D5FE3100C2B9A7 /* MNNNV21ToRGBAUnit.S in Sources */, - 48887690215B639F0079B12E /* MNNDeconvRunForUnitDepthWise.S in Sources */, - 485DD425218161E100129159 /* MNNConvRunForUnitDepthWiseUint8.S in Sources */, - 488876AE215B639F0079B12E /* MNNMaxFloat.S in Sources */, + 92FF033A23AA0B5A00AC97F6 /* MNNGemmFloatUnit_4.S in Sources */, + 92FF042E23AA0B7100AC97F6 /* ShapeProposal.cpp in Sources */, + 92FF025923AA0B5A00AC97F6 /* CPUPoolInt8.cpp in Sources */, + 92FF036023AA0B5A00AC97F6 /* CPUInnerProduct.cpp in Sources */, + 92FF041F23AA0B7100AC97F6 /* ShapeRank.cpp in Sources */, + 92FF045B23AA0B7100AC97F6 /* ShapeShape.cpp in Sources */, + 92FF042A23AA0B7100AC97F6 /* ShapeBatchMatMul.cpp in Sources */, 488875C4215B639F0079B12E /* MetalDeconvolution.mm in Sources */, 488875E1215B639F0079B12E /* MetalEltwise.metal in Sources */, - 71E878A32203E9D200268E24 /* MNNNV21ToBGRUnit.S in Sources */, - 48A8A62321D37FB500C2B9A7 /* MNNGemmInt8toFloat32_8x4_Unit.S in Sources */, + 92FF03A823AA0B5A00AC97F6 /* WinogradOptFunction.cpp in Sources */, + 92FF045323AA0B7100AC97F6 /* ShapeAsString.cpp in Sources */, + 92FF044123AA0B7100AC97F6 /* ShapeMoments.cpp in Sources */, 92351C8721992AB2002CA341 /* MetalQuantizedAdd.mm in Sources */, - 48C5E79E2306C84400EAC2A6 /* MNNGemmint8to32_8x4_Unit.S in Sources */, - 4888763B215B639F0079B12E /* CommonOptFunction.cpp in Sources */, - 4888766C215B639F0079B12E /* CPUSliceTf.cpp in Sources */, - 4843AA5922A7E9AB00889A63 /* CPUConv2DBackPropFilter.cpp in Sources */, - 488876C8215B639F0079B12E /* MNNAddBiasRelu6.S in Sources */, + 92FF03AB23AA0B5A00AC97F6 /* ConvolutionInt8Executor.cpp in Sources */, + 92FF036D23AA0B5A00AC97F6 /* CPULSTM.cpp in Sources */, 488875D4215B639F0079B12E /* MetalROIPooling.mm in Sources */, - 488875FA215B639F0079B12E /* CPUInterp.cpp in Sources */, + 48FA474523AA127B00172C3B /* Executor.cpp in Sources */, + 92FF037223AA0B5A00AC97F6 /* CPUExpandDims.cpp in Sources */, + 92FF02EA23AA0B5A00AC97F6 /* MNNGemmInt8AddBiasScale_16x4_Unit.S in Sources */, 48A8A61A21D101DE00C2B9A7 /* Matrix_CV.cpp in Sources */, - EBB38F0721E748B9005F76D7 /* ShapeTFQuantizedConv2D.cpp in Sources */, - 48887676215B639F0079B12E /* CPURange.cpp in Sources */, + 92FF031823AA0B5A00AC97F6 /* MNNConvRunForUnitDepthWiseUint8.S in Sources */, + 92FF039623AA0B5A00AC97F6 /* CPUDepthwiseConvInt8.cpp in Sources */, + 92FF04AA23AA0BFB00AC97F6 /* BufferAllocator.cpp in Sources */, + 92FF031123AA0B5A00AC97F6 /* MNNGemmInt16to32_4x4_Common.S in Sources */, + 92FF030523AA0B5A00AC97F6 /* MNNAddBiasRelu6.S in Sources */, + 92FF030F23AA0B5A00AC97F6 /* MNNPackC4.S in Sources */, 92EAC19921CB3CD60056F4C2 /* MetalCast.mm in Sources */, - EBB38F0621E748B9005F76D7 /* ShapeMatMul.cpp in Sources */, - 483CD48F216CE3BB00B05BE9 /* MNNCopyC4WithStride.S in Sources */, - 486FDF41223E495B00F487FB /* CPUBinary.cpp in Sources */, - 488876AC215B639F0079B12E /* MNNMinFloat.S in Sources */, - EB4925BF224A147E00C512BB /* CPURNNSequenceGRU.cpp in Sources */, - 488876CA215B639F0079B12E /* MNNScaleAndAddBias.S in Sources */, - 9223E12221D34C6B0067544A /* CPUSpaceToBatchND.cpp in Sources */, + 92FF031D23AA0B5A00AC97F6 /* MNNConvRunForLineDepthWiseUint8.S in Sources */, 92575979219EA07F00918499 /* MetalStridedSlice.mm in Sources */, - EBB38F0421E748B9005F76D7 /* ShapeInterp.cpp in Sources */, - 486B4BBB222901E5001E73E3 /* MNNMatrixProd.S in Sources */, - 488876D3215B639F0079B12E /* CPUEltwise.cpp in Sources */, - EBB38F1121E748B9005F76D7 /* ShapeSliceTf.cpp in Sources */, - 488875F3215B639F0079B12E /* CPUResize.cpp in Sources */, - 4888766B215B639F0079B12E /* CPUNonMaxSuppressionV2.cpp in Sources */, - 48B904A222953DFF003116BB /* CPUSelect.cpp in Sources */, - 487970EE22C9BF7200795502 /* MNNDepthWiseInt8AddBiasScaleUnit.S in Sources */, - EB4925BE224A147E00C512BB /* CPUInstanceNorm.cpp in Sources */, - 48887685215B639F0079B12E /* MNNMaxFloat.S in Sources */, - 4888766D215B639F0079B12E /* CPUPack.cpp in Sources */, - EBB38F1721E748B9005F76D7 /* ShapeBinaryOp.cpp in Sources */, - 488876C0215B639F0079B12E /* MNNWinogradMatrixProductLeft.S in Sources */, - EBB38F2021E748B9005F76D7 /* ShapeDetectionOutput.cpp in Sources */, - 48C054A7220A745900E91945 /* MNNAddBiasRelu.S in Sources */, - 485DD4372182B07B00129159 /* MNNUInt8ToInt16WithOffsetC4Fast.S in Sources */, + 92FF030123AA0B5A00AC97F6 /* MNNAddC4WithStride.S in Sources */, + 92FF02E223AA0B5A00AC97F6 /* MNNMatrixAdd.S in Sources */, 92921A89219C272B00B063D1 /* MetalPack.metal in Sources */, + 92FF038223AA0B5A00AC97F6 /* CPUSetDiff1D.cpp in Sources */, + 92FF030423AA0B5A00AC97F6 /* MNNCubicLineC4.S in Sources */, + 92FF029523AA0B5A00AC97F6 /* CPUBatchMatMul.cpp in Sources */, + 92FF031B23AA0B5A00AC97F6 /* MNNScaleAndAddBias.S in Sources */, 925A89122223951200D22428 /* MetalConvolutionActivation.metal in Sources */, - EBB38F1821E748B9005F76D7 /* ShapeNonMaxSuppressionV2.cpp in Sources */, - 48BF21F421CA43AE00AFF78E /* MNNSamplerC4NearestOpt.S in Sources */, - EBD9FF15236A93AB00E188F5 /* CPUDetectionPostProcess.cpp in Sources */, - 488876AA215B639F0079B12E /* MNNGemmInt16to32_4x4_Common.S in Sources */, + 92FF02AD23AA0B5A00AC97F6 /* CPUConvInt8.cpp in Sources */, + 92FF042123AA0B7100AC97F6 /* ShapeDeconvolution.cpp in Sources */, 92369E64222544FE009D3A05 /* MetalConvolutionGEMM.metal in Sources */, - EBB38F3021E748B9005F76D7 /* ShapeProposal.cpp in Sources */, - 48AE9EAE22151E20009DB6F4 /* MNNStrassenMergeCFunction.S in Sources */, - 487970EF22C9BF7200795502 /* MNNLineDepthWiseInt8AddBiasScaleUnit.S in Sources */, + 92FF035C23AA0B5A00AC97F6 /* CPUPermute.cpp in Sources */, + 92FF02F023AA0B5A00AC97F6 /* MNNCubicSampleC4.S in Sources */, + 92FF027F23AA0B5A00AC97F6 /* CPUDeconvolutionDepthwise.cpp in Sources */, + 92FF02D323AA0B5A00AC97F6 /* MNNGemmInt16to32_4x4_Unit.S in Sources */, + 92FF044E23AA0B7100AC97F6 /* ShapeEltwise.cpp in Sources */, + 92FF04A423AA0BFB00AC97F6 /* Interpreter.cpp in Sources */, + 92FF045623AA0B7100AC97F6 /* ShapeReshape.cpp in Sources */, + 92FF032523AA0B5A00AC97F6 /* MNNConvDwF23SourceTransUnit.S in Sources */, + 92FF044423AA0B7100AC97F6 /* ShapeLSTM.cpp in Sources */, + 92FF02FB23AA0B5A00AC97F6 /* MNNConvSlideWindowBorder.S in Sources */, + 92FF037423AA0B5A00AC97F6 /* CPUConvolution3D.cpp in Sources */, + 92FF043E23AA0B7100AC97F6 /* ShapeBatchToSpaceND.cpp in Sources */, + 92FF02D823AA0B5A00AC97F6 /* MNNGemmInt8AddBiasScale_8x4_Unit.S in Sources */, 488875A2215B639F0079B12E /* MetalSoftmax.mm in Sources */, - 48A8A61F21D235DF00C2B9A7 /* MNNNV21ToRGBUnit.S in Sources */, - 4841B61421EC6267002E5D66 /* ShapeDequantize.cpp in Sources */, - 48057D892330A90900F922BE /* CPUGatherND.cpp in Sources */, - 48C5E7A02306C84D00EAC2A6 /* MNNGemmint8to32_8x4_Unit.S in Sources */, - C422D768232F567300FD59D0 /* CPUConvolution3D.cpp in Sources */, + 92FF030D23AA0B5A00AC97F6 /* MNNGemmFloatCommon_4.S in Sources */, + 92FF034323AA0B5A00AC97F6 /* CPUStridedSlice.cpp in Sources */, + 92FF043223AA0B7100AC97F6 /* ShapeWhere.cpp in Sources */, + 92FF045523AA0B7100AC97F6 /* ShapePriorbox.cpp in Sources */, + 92FF02F523AA0B5A00AC97F6 /* MNNInt8ScaleToFloat.S in Sources */, + 92FF029D23AA0B5A00AC97F6 /* CPULRN.cpp in Sources */, + 92FF02D523AA0B5A00AC97F6 /* MNNMatrixMax.S in Sources */, + 92FF042023AA0B7100AC97F6 /* ShapePack.cpp in Sources */, 923B7FA721A6C940002AFCE0 /* MetalCropAndResize.metal in Sources */, + 48FA474723AA127B00172C3B /* Optimizer.cpp in Sources */, + 92FF041B23AA0B7100AC97F6 /* ShapeUnpack.cpp in Sources */, + 92FF033523AA0B5A00AC97F6 /* MNNInt8ScaleToFloat.S in Sources */, 923B7F8921A653AB002AFCE0 /* MetalGather.mm in Sources */, - 485DD419217F49C500129159 /* CPUQuanConvolutionDepthwise.cpp in Sources */, - 48887680215B639F0079B12E /* MNNConvRunForLineDepthWiseInt8.S in Sources */, - 4888769B215B639F0079B12E /* MNNScaleBias2FloatC4.S in Sources */, 488875DD215B639F0079B12E /* MetalPermute.mm in Sources */, - 48887641215B639F0079B12E /* Int8FunctionsOpt.cpp in Sources */, - 48C054B1220A762C00E91945 /* MNNConvRunForUnitDepthWise.S in Sources */, 9260B27521A7C5DC00D48C97 /* MetalQuantizedMaxPool.metal in Sources */, - 488875F5215B639F0079B12E /* CPUGather.cpp in Sources */, + 92FF02CC23AA0B5A00AC97F6 /* MNNGemmFloatCommon_4.S in Sources */, 92369E62222544DE009D3A05 /* MetalConvolutionGEMM.mm in Sources */, - EB4925B5224A146000C512BB /* ShapeRNNSequenceGRU.cpp in Sources */, - 488876AB215B639F0079B12E /* MNNConvRunForUnitDepthWiseInt8.S in Sources */, + 92FF026F23AA0B5A00AC97F6 /* CPUInt8ToFloat.cpp in Sources */, + 92FF037E23AA0B5A00AC97F6 /* CPUDetectionPostProcess.cpp in Sources */, + 92FF045023AA0B7100AC97F6 /* ShapeCropAndResize.cpp in Sources */, + 92FF02AB23AA0B5A00AC97F6 /* CPUConst.cpp in Sources */, + 92FF03D023AA0B5A00AC97F6 /* CPUTensorConvert.cpp in Sources */, + 92FF02C023AA0B5A00AC97F6 /* MNNAddC4WithStride.S in Sources */, + 92FF02F823AA0B5A00AC97F6 /* MNNConvRunForLineDepthwise.S in Sources */, + 92FF02B623AA0B5A00AC97F6 /* CPUUnary.cpp in Sources */, 488875AF215B639F0079B12E /* MetalSlice.mm in Sources */, - 488876AD215B639F0079B12E /* MNNBilinearProcC1.S in Sources */, - EBB38F2C21E748B9005F76D7 /* ShapeRange.cpp in Sources */, - C422D7BA23320B2B00FD59D0 /* Convolution3D3x3.cpp in Sources */, + 92FF032723AA0B5A00AC97F6 /* MNNDeconvRunForUnitDepthWise.S in Sources */, + 92FF044223AA0B7100AC97F6 /* ShapeQuantizedReshape.cpp in Sources */, + 92FF02CA23AA0B5A00AC97F6 /* MNNUnPackC4.S in Sources */, 488875BA215B639F0079B12E /* MetalNormalize.metal in Sources */, 488875D2215B639F0079B12E /* MetalROIPooling.metal in Sources */, - 48C0549A2205BB8400E91945 /* MNNConvSlideWindowBorder.S in Sources */, - 483CD48D216CE3B500B05BE9 /* MNNCopyC4WithStride.S in Sources */, - 48C054B3220A7A4600E91945 /* MNNCubicSampleC4.S in Sources */, - 48C054922205B91A00E91945 /* MNNPackC4.S in Sources */, + 92FF02E723AA0B5A00AC97F6 /* MNNDeconvRunForUnitDepthWise.S in Sources */, + 92FF02BB23AA0B5A00AC97F6 /* MNNUInt8ToInt16WithOffsetC4Fast.S in Sources */, + 92FF033323AA0B5A00AC97F6 /* MNNConvSlideWindowMiddle.S in Sources */, + 92FF028423AA0B5A00AC97F6 /* CPUPriorbox.cpp in Sources */, + 92FF045923AA0B7100AC97F6 /* ShapeRegister.cpp in Sources */, 48A8A61221D101A700C2B9A7 /* ImageProcess.cpp in Sources */, 4888772B215B639F0079B12E /* Matrix.cpp in Sources */, - 488876CD215B639F0079B12E /* MNNGemmFloatCommon_4.S in Sources */, - 48887744215CFF7B0079B12E /* MNNBlitC1ToFloatRGBA.S in Sources */, - 48887740215CD3D00079B12E /* MNNBlitC1ToFloatRGBA.S in Sources */, - 488876CE215B639F0079B12E /* MNNCoefLine.S in Sources */, + 92FF045823AA0B7100AC97F6 /* ShapeReduction.cpp in Sources */, 9223E11821D34BE40067544A /* MetalSpaceToBatchND.mm in Sources */, - EBB38F3621E748B9005F76D7 /* ShapeSpaceToBatchND.cpp in Sources */, - EBB38F0B21E748B9005F76D7 /* ShapePool.cpp in Sources */, - 4843AA5522A7E9AB00889A63 /* CPUReluGrad.cpp in Sources */, - EBB38F1C21E748B9005F76D7 /* ShapeBatchToSpaceND.cpp in Sources */, + 92FF026D23AA0B5A00AC97F6 /* CPUMatrixBandPart.cpp in Sources */, + 92FF02A323AA0B5A00AC97F6 /* CPUQuantizedLogistic.cpp in Sources */, 9225694A219D698900F251E2 /* MetalRank.metal in Sources */, - 48887743215CFF7B0079B12E /* MNNBlitC3ToFloatRGBA.S in Sources */, + 92FF032F23AA0B5A00AC97F6 /* MNNAddBias.S in Sources */, 92682C622181A2F900B52B9D /* MetalFill.metal in Sources */, 48887728215B639F0079B12E /* WingoradGenerater.cpp in Sources */, + 92FF045423AA0B7100AC97F6 /* ShapeRNNSequenceGRU.cpp in Sources */, 92682C5621819BFA00B52B9D /* MetalSeLU.metal in Sources */, - 48BF218221A3E4C300AFF78E /* MNNSamplerC4BilinearOpt.S in Sources */, + 92FF02AA23AA0B5A00AC97F6 /* CPUSpaceToDepth.cpp in Sources */, + 92FF02FF23AA0B5A00AC97F6 /* MNNFloat2Int8.S in Sources */, AE7BE4BD22855665002CEEA6 /* MetalOPRegister.mm in Sources */, - EB288362230EAF6C00837188 /* CPUEltwiseInt8.cpp in Sources */, + 92FF033423AA0B5A00AC97F6 /* MNNUInt8ToInt16WithOffsetC4Common.S in Sources */, 92682C5F2181A2EF00B52B9D /* MetalFill.mm in Sources */, - 48887683215B639F0079B12E /* MNNMinFloat.S in Sources */, + 92FF036B23AA0B5A00AC97F6 /* CPUResize.cpp in Sources */, + 92FF02C723AA0B5A00AC97F6 /* MNNCopyC4WithStride.S in Sources */, CE96FE7F21707D58004AB400 /* MetalSigmoid.metal in Sources */, - 48AE9EA32212B2C2009DB6F4 /* Convolution1x1Strassen.cpp in Sources */, - 48EB45E62254B9D2006C2322 /* ConvolutionDepthwise3x3.cpp in Sources */, - 4843AA5A22A7E9AB00889A63 /* CPUSoftmaxGrad.cpp in Sources */, - EBB38F0921E748B9005F76D7 /* ShapeStridedSlice.cpp in Sources */, - AE7BE4BB2285564F002CEEA6 /* CPUOPRegister.cpp in Sources */, - 4888760C215B639F0079B12E /* CPUQuantizedMaxPool.cpp in Sources */, - 48BF218421A4073500AFF78E /* MNNSamplerC4BilinearOpt.S in Sources */, - EBB38F2D21E748B9005F76D7 /* ShapeUnpack.cpp in Sources */, - 48887635215B639F0079B12E /* CPUReduction.cpp in Sources */, - 4888762B215B639F0079B12E /* CPUSlice.cpp in Sources */, 488875B8215B639F0079B12E /* MetalConcat.mm in Sources */, + 92FF030923AA0B5A00AC97F6 /* MNNNV21ToBGRUnit.S in Sources */, 92682C50218172A300B52B9D /* MetalTile.metal in Sources */, - 487970F522C9C07000795502 /* CPUPoolInt8.cpp in Sources */, - 48070738231E512D00528CE5 /* Utils.cpp in Sources */, - 486B4BC1222D4831001E73E3 /* MNNMatrixMax.S in Sources */, - 4888769D215B639F0079B12E /* MNNAddBiasRelu6.S in Sources */, + 92FF032623AA0B5A00AC97F6 /* MNNWinogradMatrixProductLeft.S in Sources */, + 92FF04C023AA0BFB00AC97F6 /* Tensor.cpp in Sources */, + 92FF045D23AA0B7100AC97F6 /* ShapeCast.cpp in Sources */, 923B7FA421A6C92F002AFCE0 /* MetalCropAndResize.mm in Sources */, 48A8A61421D101A700C2B9A7 /* ImageBlitter.cpp in Sources */, - C422D738232634DD00FD59D0 /* ShapeConvolution3D.cpp in Sources */, - EBB38F2A21E748B9005F76D7 /* ShapeCropAndResize.cpp in Sources */, + 92FF025523AA0B5A00AC97F6 /* CPUTanh.cpp in Sources */, 9223E12921D3755F0067544A /* MetalBatchToSpaceND.mm in Sources */, - 4888763D215B639F0079B12E /* Convolution3x3.cpp in Sources */, 4888759C215B639F0079B12E /* MetalReshape.metal in Sources */, - 92D765AA2228188700178BE5 /* Pipeline.cpp in Sources */, - EBB38F2521E748B9005F76D7 /* ShapeROIPooling.cpp in Sources */, - 488876D8215B639F0079B12E /* CPUAsString.cpp in Sources */, - 48C054AF220A758B00E91945 /* MNNCubicSampleC4.S in Sources */, + 92FF02EF23AA0B5A00AC97F6 /* MNNAddBias.S in Sources */, + 92FF032223AA0B5A00AC97F6 /* MNNMatrixAdd.S in Sources */, CE96FE7121707D58004AB400 /* MetalTensorConverter.mm in Sources */, - 4829D55022AF5C340093E3BE /* CPUSetDiff1D.cpp in Sources */, - 48A8A60521CDF87000C2B9A7 /* MNNSamplerC1NearestOpt.S in Sources */, - 48887664215B639F0079B12E /* CPUSpatialProduct.cpp in Sources */, - EBB38F1E21E748B9005F76D7 /* ShapeEltwise.cpp in Sources */, - EBB38F3721E748B9005F76D7 /* ShapePack.cpp in Sources */, - EBB38F2E21E748B9005F76D7 /* ShapeTopKV2.cpp in Sources */, - 48887596215B639F0079B12E /* MNNMemoryUtils.c in Sources */, + 92FF02D723AA0B5A00AC97F6 /* MNNConvRunForUnitDepthWiseUint8.S in Sources */, + 92FF035523AA0B5A00AC97F6 /* CPUElu.cpp in Sources */, + 92FF026123AA0B5A00AC97F6 /* CPUCropAndResize.cpp in Sources */, 4888759F215B639F0079B12E /* MetalPermute.metal in Sources */, - EBB38F2121E748B9005F76D7 /* ShapeSize.cpp in Sources */, - 48A8A61D21D20BE700C2B9A7 /* MNNNV21ToRGBUnit.S in Sources */, - EBB38F3221E748B9005F76D7 /* ShapeRank.cpp in Sources */, - EBB38F1421E748B9005F76D7 /* ShapeTranspose.cpp in Sources */, - 48887651215B639F0079B12E /* ConvolutionFloatFactory.cpp in Sources */, + 92FF03C123AA0B5A00AC97F6 /* CPURank.cpp in Sources */, + 48FA474923AA127B00172C3B /* MathOp.cpp in Sources */, + 92FF035923AA0B5A00AC97F6 /* CPUAsString.cpp in Sources */, + 92FF041C23AA0B7100AC97F6 /* ShapeNonMaxSuppressionV2.cpp in Sources */, + 92FF02CE23AA0B5A00AC97F6 /* MNNPackC4.S in Sources */, + 92FF037023AA0B5A00AC97F6 /* CPUPool.cpp in Sources */, + 92FF03AD23AA0B5A00AC97F6 /* ConvolutionDepthwise3x3.cpp in Sources */, + 92FF031723AA0B5A00AC97F6 /* MNNConvRunForLineDepthWiseInt8.S in Sources */, + 92FF033D23AA0B5A00AC97F6 /* CPUReluGrad.cpp in Sources */, 923B7F8C21A653BB002AFCE0 /* MetalGather.metal in Sources */, - 485DD42D2181A68F00129159 /* MNNConvRunForLineDepthWiseUint8.S in Sources */, + 92FF032023AA0B5A00AC97F6 /* MNNMatrixSub.S in Sources */, + 92FF036323AA0B5A00AC97F6 /* CPUScale.cpp in Sources */, + 92FF02FE23AA0B5A00AC97F6 /* MNNMatrixProd.S in Sources */, 925801442223B8D100555D43 /* MetalConvolutionCommon.mm in Sources */, - EBB38F1621E748B9005F76D7 /* ShapeSqueeze.cpp in Sources */, - 483CD48B216CE20D00B05BE9 /* MNNAddC4WithStride.S in Sources */, + 92FF026723AA0B5A00AC97F6 /* CPUReduceJoin.cpp in Sources */, + 92FF039B23AA0B5A00AC97F6 /* CommonOptFunction.cpp in Sources */, + 92FF02BC23AA0B5A00AC97F6 /* MNNScaleAddInt8.S in Sources */, + 92FF02DD23AA0B5A00AC97F6 /* MNNConvRunForLineDepthWiseUint8.S in Sources */, + 92FF026323AA0B5A00AC97F6 /* CPUFloatToInt8.cpp in Sources */, 4888759D215B639F0079B12E /* MetalLRN.metal in Sources */, - 4888766E215B639F0079B12E /* CPUGatherV2.cpp in Sources */, - 48A8A62121D3569800C2B9A7 /* MNNGemmInt8toFloat32_8x4_Unit.S in Sources */, 488875A1215B639F0079B12E /* MetalTanH.mm in Sources */, - 4888761E215B639F0079B12E /* CPUShape.cpp in Sources */, 924F131921A81C74006D46A4 /* MetalTranspose.mm in Sources */, - 488876D0215B639F0079B12E /* MNNWinogradMatrixProductRight.S in Sources */, - 48070742231E512D00528CE5 /* NeuralNetWorkOp.cpp in Sources */, - EBD9FF12236A939700E188F5 /* ShapeDetectionPostProcess.cpp in Sources */, - 4807073F231E512D00528CE5 /* InsideExpr.cpp in Sources */, - 48A8A63721D8A43D00C2B9A7 /* BufferAllocator.cpp in Sources */, - 92D765AF2228188700178BE5 /* Interpreter.cpp in Sources */, - 4888765C215B639F0079B12E /* CPUNormalize.cpp in Sources */, + 92FF035423AA0B5A00AC97F6 /* CPUSelect.cpp in Sources */, + 92FF02C923AA0B5A00AC97F6 /* MNNLineDepthWiseInt8AddBiasScaleUnit.S in Sources */, + 92FF032823AA0B5A00AC97F6 /* MNNSamplerC1BilinearOpt.S in Sources */, + 92FF031923AA0B5A00AC97F6 /* MNNGemmInt8toFloat32_8x4_Unit.S in Sources */, + 92FF044323AA0B7100AC97F6 /* ShapeTopKV2.cpp in Sources */, + 92FF02EC23AA0B5A00AC97F6 /* MNNWinogradMatrixProductRight.S in Sources */, + 92FF02AC23AA0B5A00AC97F6 /* CPUQuantizedSoftmax.cpp in Sources */, + 92FF042523AA0B7100AC97F6 /* ShapeSize.cpp in Sources */, + 92FF043123AA0B7100AC97F6 /* ShapeSpaceToBatchND.cpp in Sources */, + 92FF035D23AA0B5A00AC97F6 /* CPUQuantizedReshape.cpp in Sources */, 48A8A61521D101A700C2B9A7 /* ImageFloatBlitter.cpp in Sources */, + 92FF043623AA0B7100AC97F6 /* ShapeSelect.cpp in Sources */, + 92FF042B23AA0B7100AC97F6 /* ShapeOneHot.cpp in Sources */, + 92FF043C23AA0B7100AC97F6 /* ShapeExpandDims.cpp in Sources */, + 92FF045723AA0B7100AC97F6 /* ShapeTranspose.cpp in Sources */, + 92FF02E423AA0B5A00AC97F6 /* MNNAddBiasRelu.S in Sources */, + 92FF031023AA0B5A00AC97F6 /* MNNMinFloat.S in Sources */, + 92FF032A23AA0B5A00AC97F6 /* MNNGemmInt8AddBiasScale_16x4_Unit.S in Sources */, + 92FF02F323AA0B5A00AC97F6 /* MNNConvSlideWindowMiddle.S in Sources */, 488875DF215B639F0079B12E /* MetalLSTM.mm in Sources */, ); runOnlyForDeploymentPostprocessing = 0; @@ -3711,12 +4193,12 @@ 0F1465C01FA18D1000F9860A /* Debug */ = { isa = XCBuildConfiguration; buildSettings = { - CODE_SIGN_IDENTITY = "iPhone Developer"; + CODE_SIGN_IDENTITY = "Apple Development"; "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = ""; CODE_SIGN_STYLE = Automatic; DEAD_CODE_STRIPPING = YES; DEFINES_MODULE = YES; - DEVELOPMENT_TEAM = ""; + DEVELOPMENT_TEAM = 6G7464HHUS; DYLIB_COMPATIBILITY_VERSION = 1; DYLIB_CURRENT_VERSION = 1; DYLIB_INSTALL_NAME_BASE = "@rpath"; @@ -3732,6 +4214,8 @@ "${inherited}", "${SRCROOT}/../../3rd_party/flatbuffers/include", "${SRCROOT}/../../3rd_party/half", + "${SRCROOT}/../../include/", + "${SRCROOT}/../../source/", ); INFOPLIST_FILE = "$(SRCROOT)/MNN/Info.plist"; INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Frameworks"; @@ -3751,6 +4235,7 @@ SKIP_INSTALL = YES; SUPPORTED_PLATFORMS = "macosx iphonesimulator iphoneos"; TARGETED_DEVICE_FAMILY = "1,2"; + USER_HEADER_SEARCH_PATHS = "${SRCROOT}/../../schema/current"; VALID_ARCHS = "arm64 armv7 armv7s x86_64 i386"; }; name = Debug; @@ -3758,11 +4243,11 @@ 0F1465C11FA18D1000F9860A /* Release */ = { isa = XCBuildConfiguration; buildSettings = { - CODE_SIGN_IDENTITY = ""; + CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; DEAD_CODE_STRIPPING = YES; DEFINES_MODULE = YES; - DEVELOPMENT_TEAM = ""; + DEVELOPMENT_TEAM = 6G7464HHUS; DYLIB_COMPATIBILITY_VERSION = 1; DYLIB_CURRENT_VERSION = 1; DYLIB_INSTALL_NAME_BASE = "@rpath"; @@ -3777,6 +4262,8 @@ "${inherited}", "${SRCROOT}/../../3rd_party/flatbuffers/include", "${SRCROOT}/../../3rd_party/half", + "${SRCROOT}/../../include/", + "${SRCROOT}/../../source/", ); INFOPLIST_FILE = "$(SRCROOT)/MNN/Info.plist"; INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Frameworks"; @@ -3796,6 +4283,7 @@ SKIP_INSTALL = YES; SUPPORTED_PLATFORMS = "macosx iphonesimulator iphoneos"; TARGETED_DEVICE_FAMILY = "1,2"; + USER_HEADER_SEARCH_PATHS = "${SRCROOT}/../../schema/current"; VALID_ARCHS = "arm64 armv7 armv7s x86_64 i386"; }; name = Release; @@ -3806,10 +4294,12 @@ ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; ASSETCATALOG_COMPILER_LAUNCHIMAGE_NAME = LaunchImage; CODE_SIGN_STYLE = Automatic; - DEVELOPMENT_TEAM = ""; + DEVELOPMENT_TEAM = 6G7464HHUS; HEADER_SEARCH_PATHS = ( "${inherited}", "${SRCROOT}/../../3rd_party/flatbuffers/include", + "${SRCROOT}/../../source", + "${SRCROOT}/../../schema/current", ); INFOPLIST_FILE = Playground/Info.plist; IPHONEOS_DEPLOYMENT_TARGET = 9.0; @@ -3827,10 +4317,12 @@ ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; ASSETCATALOG_COMPILER_LAUNCHIMAGE_NAME = LaunchImage; CODE_SIGN_STYLE = Automatic; - DEVELOPMENT_TEAM = ""; + DEVELOPMENT_TEAM = 6G7464HHUS; HEADER_SEARCH_PATHS = ( "${inherited}", "${SRCROOT}/../../3rd_party/flatbuffers/include", + "${SRCROOT}/../../source", + "${SRCROOT}/../../schema/current", ); INFOPLIST_FILE = Playground/Info.plist; IPHONEOS_DEPLOYMENT_TARGET = 9.0; diff --git a/project/ios/MNN.xcodeproj/xcshareddata/xcschemes/MNN.xcscheme b/project/ios/MNN.xcodeproj/xcshareddata/xcschemes/MNN.xcscheme index 0dd3be848..f39888835 100644 --- a/project/ios/MNN.xcodeproj/xcshareddata/xcschemes/MNN.xcscheme +++ b/project/ios/MNN.xcodeproj/xcshareddata/xcschemes/MNN.xcscheme @@ -41,8 +41,6 @@ selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB" selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB" shouldUseLaunchSchemeArgsEnv = "YES"> - - - - + + - - - - - - + + - - CFBundleDevelopmentRegion $(DEVELOPMENT_LANGUAGE) - CFBundleExecutable - $(EXECUTABLE_NAME) - CFBundleIdentifier - $(PRODUCT_BUNDLE_IDENTIFIER) CFBundleInfoDictionaryVersion 6.0 CFBundleName $(PRODUCT_NAME) CFBundlePackageType FMWK - CFBundleShortVersionString - 1.0 - CFBundleVersion - $(CURRENT_PROJECT_VERSION) - NSPrincipalClass - diff --git a/project/ios/MNN/OpRegister.sh b/project/ios/MNN/OpRegister.sh index 2128c21b3..35e4c1502 100755 --- a/project/ios/MNN/OpRegister.sh +++ b/project/ios/MNN/OpRegister.sh @@ -13,7 +13,7 @@ function read_dir(){ print(c""a""$3"__"$4""b) >> "extern"; print (a""$3"__"$4""b) >> "call" }'` - done + done } start=$(date +%s) @@ -51,21 +51,7 @@ cat call >> $SHAPEFILE echo '}\n#endif\n}' >> $SHAPEFILE rm call -#hanle Metal -METALFILE=$SHELL_FOLDER/source/backend/metal/MetalOPRegister.mm -METAL=$SHELL_FOLDER/source/backend/metal -METAL_KEY="REGISTER_METAL_OP_CREATOR" -echo "// This file is generated by Shell for ops register\n#import \"MetalDefine.h\"\nnamespace MNN {\n#if MNN_METAL_ENABLED" > $METALFILE -echo "Start Register Metal" -read_dir $METAL $METAL_KEY $SEP $FILE_EXTERN_MM -cat extern >> $METALFILE -rm extern -echo '\nvoid registerMetalOps() {' >> $METALFILE -cat call >> $METALFILE -echo '}\n#endif\n}' >> $METALFILE -rm call - echo "Register Op End" dur=$(echo "$(date +%s) - $start" | bc) -printf "Execution time: %.6f seconds" $dur \ No newline at end of file +printf "Execution time: %.6f seconds" $dur diff --git a/project/ios/buildiOS.sh b/project/ios/buildiOS.sh new file mode 100755 index 000000000..58d52a19c --- /dev/null +++ b/project/ios/buildiOS.sh @@ -0,0 +1,30 @@ +#!/bin/sh + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +pushd ${SCRIPT_DIR} +rm -rf ios_64 +mkdir ios_64 +cd ios_64 +cmake ../../../ -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=../../../cmake/ios.toolchain.cmake -DMNN_METAL=ON -DIOS_ARCH="arm64" -DENABLE_BITCODE=0 -G Xcode +echo "Building AArch64" +xcodebuild ONLY_ACTIVE_ARCH=NO CODE_SIGN_IDENTITY="" CODE_SIGNING_REQUIRED=NO -configuration Release -scheme MNN -target MNN -sdk iphoneos -quiet -DMNN_AAPL_FMWK=ON +cd ../ + +rm -rf ios_32 +mkdir ios_32 +cd ios_32 +cmake ../../../ -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=../../../cmake/ios.toolchain.cmake -DMNN_METAL=ON -DIOS_ARCH="armv7;armv7s" -DENABLE_BITCODE=0 -G Xcode +echo "Building AArch32" +xcodebuild ONLY_ACTIVE_ARCH=NO CODE_SIGN_IDENTITY="" CODE_SIGNING_REQUIRED=NO -configuration Release -scheme MNN -target MNN -sdk iphoneos -quiet -DMNN_AAPL_FMWK=ON +cd ../ + +mv ios_32/Release-iphoneos/MNN.framework/MNN ios_32/Release-iphoneos/MNN.framework/MNN_32 + +echo "Creating Fat Binary" +lipo -create ios_32/Release-iphoneos/MNN.framework/MNN_32 ios_64/Release-iphoneos/MNN.framework/MNN -output ios_32/Release-iphoneos/MNN.framework/MNN +rm ios_32/Release-iphoneos/MNN.framework/MNN_32 +echo "Patching Framework Headers" +rm -rf ./MNN.framework +cp -R ios_32/Release-iphoneos/MNN.framework ./MNN.framework +cp -R ../../include/MNN/expr ./MNN.framework/Headers/expr +popd diff --git a/pymnn/pip_package/MNNTools/mnnconvert.py b/pymnn/pip_package/MNNTools/mnnconvert.py index dc865104d..2ef84f7f2 100644 --- a/pymnn/pip_package/MNNTools/mnnconvert.py +++ b/pymnn/pip_package/MNNTools/mnnconvert.py @@ -13,8 +13,7 @@ def usage(): print(" [--modelFile MODELFILE]") print(" [--prototxt PROTOTXT]") print(" [--MNNModel MNNMODEL]") - print(" [--benchmarkModel {True,False}]") - print(" [--bizCode BIZCODE]") + print(" [--fp16 {True,False}]") def main(): """ main funcion """ @@ -29,12 +28,10 @@ def main(): help="only used for caffe, for example: xxx.prototxt") parser.add_argument("--MNNModel", type=str, required=True,\ help="MNN model, ex: xxx.mnn") - parser.add_argument("--benchmarkModel", type=bool, default=False,\ + parser.add_argument("--fp16", type=bool, default=False,\ help="{True,False}\ Boolean to change the mnn usage. If True, the output\ - model can only be used in benchmark mode") - parser.add_argument("--bizCode", type=str, default='MNN',\ - help="MNN Model Flag, for example: MNN") + model save data in half_float type") TF = 0 CAFFE = 1 ONNX = 2 @@ -69,7 +66,7 @@ def main(): ### just cheat with a not exist name ### args.prototxt = "NA.mnn" Tools.mnnconvert(args.MNNModel, args. modelFile, framework_type,\ - args.bizCode, args.benchmarkModel, args.prototxt) + args.fp16, args.prototxt) return 0 if __name__ == "__main__": main() diff --git a/pymnn/pip_package/build_deps.py b/pymnn/pip_package/build_deps.py index 95ffd54a1..4d353b444 100644 --- a/pymnn/pip_package/build_deps.py +++ b/pymnn/pip_package/build_deps.py @@ -7,7 +7,7 @@ IS_WINDOWS = (platform.system() == 'Windows') IS_DARWIN = (platform.system() == 'Darwin') IS_LINUX = (platform.system() == 'Linux') -BUILD_DIR = 'build' +BUILD_DIR = 'pymnn_build' # avoid overwrite temporary product when build pymnn def build_deps(): """ build depency """ root_dir = os.path.dirname(os.path.dirname(os.getcwd())) diff --git a/pymnn/pip_package/setup.py b/pymnn/pip_package/setup.py index 815643b71..153c55bb1 100644 --- a/pymnn/pip_package/setup.py +++ b/pymnn/pip_package/setup.py @@ -12,7 +12,7 @@ IS_WINDOWS = (platform.system() == 'Windows') IS_DARWIN = (platform.system() == 'Darwin') IS_LINUX = (platform.system() == 'Linux') -BUILD_DIR = 'build' +BUILD_DIR = 'pymnn_build' BUILD_TYPE = 'RELEASE' def check_env_flag(name, default=''): """ check whether a env is set to Yes """ @@ -23,7 +23,7 @@ def report(*args): print(*args) package_name = os.getenv('MNN_PACKAGE_NAME', 'MNN') -version = '0.0.7' +version = '0.0.8' depend_pip_packages = ['flatbuffers', 'pydot_ng', 'graphviz'] README = os.path.join(os.getcwd(), "README.md") with open(README) as f: @@ -121,7 +121,7 @@ def configure_extension_build(): tools_include_dirs = [os.path.join(root_dir, "tools", "converter",\ "source", "IR")] tools_include_dirs += [os.path.join(root_dir, "tools", "converter",\ - "source", "include")] + "include")] tools_include_dirs += [os.path.join(root_dir, "tools", "converter",\ "source", "tflite", "schema")] tools_include_dirs += [os.path.join(root_dir, "tools", "converter", "source")] @@ -134,8 +134,10 @@ def configure_extension_build(): tools_include_dirs += [os.path.join(root_dir, "3rd_party")] tools_include_dirs += [os.path.join(root_dir, "3rd_party", "imageHelper")] tools_include_dirs += [os.path.join(root_dir, "source", "core")] - tools_depend = ['-lCOMMON_LIB', '-ltflite', '-lonnx', '-loptimizer',\ - '-lMNN', '-lMNN_Express', '-lmnn_bizcode', '-lcaffe', '-ltensorflow'] + tools_include_dirs += [os.path.join(root_dir, "schema", "current")] + #tools_depend = ['-lCOMMON_LIB', '-ltflite', '-lonnx', '-loptimizer',\ + # '-lMNN', '-lMNN_Express', '-lmnn_bizcode', '-lcaffe', '-ltensorflow'] + tools_depend = ['-lMNN', '-lMNNConvertDeps'] engine_extra_link_args = [] tools_extra_link_args = [] if IS_DARWIN: @@ -168,7 +170,7 @@ def configure_extension_build(): tools_extra_link_args += ['/WHOLEARCHIVE:mnn_bizcode.lib'] tools_extra_link_args += ['/WHOLEARCHIVE:caffe.lib'] tools_extra_link_args += ['/WHOLEARCHIVE:tensorflow.lib'] - tools_extra_link_args += ['C:\\protobuf\\vsprojects\\Release\\libprotobuf.lib'] + tools_extra_link_args += ['C:\\Users\\tianhang.yth\\Desktop\\protobuf\\vsprojects\\Release\\libprotobuf.lib'] if BUILD_TYPE == 'DEBUG': if IS_WINDOWS: diff --git a/pymnn/src/MNN.cc b/pymnn/src/MNN.cc index d1b65916b..850c44474 100644 --- a/pymnn/src/MNN.cc +++ b/pymnn/src/MNN.cc @@ -18,12 +18,7 @@ #include "Interpreter.hpp" #include "ImageProcess.hpp" #endif - -#if PY_MAJOR_VERSION >= 3 -#define PyString_Check PyBytes_Check -#define PyString_AsString PyBytes_AsString -#define PyString_FromString PyBytes_FromString -#endif +#include "util.h" using namespace MNN; using namespace std; @@ -117,6 +112,8 @@ static PyObject* PyMNNInterpreter_runSessionWithCallBack(PyMNNInterpreter *self, static PyObject* PyMNNInterpreter_runSessionWithCallBackInfo(PyMNNInterpreter *self, PyObject *args); static PyObject* PyMNNInterpreter_getSessionInput(PyMNNInterpreter *self, PyObject *args); static PyObject* PyMNNInterpreter_getSessionOutput(PyMNNInterpreter *self, PyObject *args); +static PyObject* PyMNNInterpreter_getSessionInputAll(PyMNNInterpreter *self, PyObject *args); +static PyObject* PyMNNInterpreter_getSessionOutputAll(PyMNNInterpreter *self, PyObject *args); static PyObject* PyMNNInterpreter_cache(PyMNNInterpreter *self, PyObject *args); static PyObject* PyMNNInterpreter_removeCache(PyMNNInterpreter *self, PyObject *args); static PyObject* PyMNNInterpreter_updateSessionToModel(PyMNNInterpreter *self, PyObject *args); @@ -132,6 +129,8 @@ static PyMethodDef PyMNNInterpreter_methods[] = { {"runSessionWithCallBackInfo", (PyCFunction)PyMNNInterpreter_runSessionWithCallBackInfo, METH_VARARGS, "run session with callback info"}, {"getSessionOutput", (PyCFunction)PyMNNInterpreter_getSessionOutput, METH_VARARGS, "get session output"}, {"getSessionInput", (PyCFunction)PyMNNInterpreter_getSessionInput, METH_VARARGS, "get session input"}, + {"getSessionOutputAll", (PyCFunction)PyMNNInterpreter_getSessionOutputAll, METH_VARARGS, "get session output all"}, + {"getSessionInputAll", (PyCFunction)PyMNNInterpreter_getSessionInputAll, METH_VARARGS, "get session input all"}, {"resizeTensor", (PyCFunction)PyMNNInterpreter_resizeTensor, METH_VARARGS, "resize tensor"}, {"cache", (PyCFunction)PyMNNInterpreter_cache, METH_VARARGS, "cache current net instance"}, {"removeCache", (PyCFunction)PyMNNInterpreter_removeCache, METH_VARARGS, "remove cache with given path"}, @@ -436,14 +435,14 @@ namespace ec { size_t saveTensorsCount = PyTuple_Size(saveTensors); for (int i=0; i(PyLong_AsLong(PyObject_Call(beginCallback, args, NULL))); @@ -657,7 +656,7 @@ static PyObject* PyMNNInterpreter_runSessionWithCallBack(PyMNNInterpreter *self, tensor->tensor = tensors[i]; PyTuple_SetItem(weTensorData, i, (PyObject *)tensor); } - PyObject *weStringData = PyString_FromString(name.c_str()); + PyObject *weStringData = char2Object(name.c_str()); PyTuple_SetItem(args, 0, weTensorData); PyTuple_SetItem(args, 1, weStringData); bool ret = static_cast(PyLong_AsLong(PyObject_Call(endCallback, args, NULL))); @@ -870,6 +869,62 @@ static PyObject* PyMNNInterpreter_getSessionInput(PyMNNInterpreter *self, PyObje return (PyObject *)tensor; } +static PyObject* PyMNNInterpreter_getSessionOutputAll(PyMNNInterpreter *self, PyObject *args) { + PyMNNSession* session = NULL; + if (!PyArg_ParseTuple(args, "O", &session)) { + return NULL; + } + if (!PyObject_TypeCheck(session, &PyMNNSessionType)) { + PyErr_SetString(PyExc_Exception,"PyMNNInterpreter_getSessionOutputAll: First argument is not a MNN.Session instance"); + return NULL; + } + PyObject *f = importName("MNN", "Tensor"); + if (!f || !PyCallable_Check(f)) { + PyErr_SetString(PyExc_Exception,"PyMNNInterpreter_getSessionOutputAll: MNN.Tensor not found"); + return NULL; + } + auto map = self->interpreter->getSessionOutputAll(session->session); + PyObject* output = PyDict_New(); + for (auto it=map.begin(); it!=map.end(); ++it) { + PyObject *tensor = PyObject_Call(f, PyTuple_New(0), NULL); + if (!tensor) { + PyErr_SetString(PyExc_Exception,"PyMNNInterpreter_getSessionOutputAll: MNN.Tensor instance create failed"); + return NULL; + } + ((PyMNNTensor*)tensor)->tensor = it->second; + PyDict_SetItem(output, char2Object(it->first.c_str()), tensor); + } + return output; +} + +static PyObject* PyMNNInterpreter_getSessionInputAll(PyMNNInterpreter *self, PyObject *args) { + PyMNNSession* session = NULL; + if (!PyArg_ParseTuple(args, "O", &session)) { + return NULL; + } + if (!PyObject_TypeCheck(session, &PyMNNSessionType)) { + PyErr_SetString(PyExc_Exception,"PyMNNInterpreter_getSessionInputAll: First argument is not a MNN.Session instance"); + return NULL; + } + PyObject *f = importName("MNN", "Tensor"); + if (!f || !PyCallable_Check(f)) { + PyErr_SetString(PyExc_Exception,"PyMNNInterpreter_getSessionInputAll: MNN.Tensor not found"); + return NULL; + } + auto map = self->interpreter->getSessionInputAll(session->session); + PyObject* output = PyDict_New(); + for (auto it=map.begin(); it!=map.end(); ++it) { + PyObject *tensor = PyObject_Call(f, PyTuple_New(0), NULL); + if (!tensor) { + PyErr_SetString(PyExc_Exception,"PyMNNInterpreter_getSessionInputAll: MNN.Tensor instance create failed"); + return NULL; + } + ((PyMNNTensor*)tensor)->tensor = it->second; + PyDict_SetItem(output, char2Object(it->first.c_str()), tensor); + } + return output; +} + PyObject* PyMNNInterpreter_new(struct _typeobject *type, PyObject *args, PyObject *kwds) { PyMNNInterpreter* self = (PyMNNInterpreter *)type->tp_alloc(type, 0); return (PyObject*)self; @@ -1055,10 +1110,12 @@ static int PyMNNTensor_init(PyMNNTensor *self, PyObject *args, PyObject *kwds) { PyObject* args = PyTuple_New(1); PyTuple_SetItem(args, 0, PyLong_FromLong(dataSize)); PyObject* reshaped_array = PyObject_Call(reshape_func, args, NULL); - Py_XDECREF(args); PyObject* reshaped_tuple = PySequence_Tuple(reshaped_array); data = reshaped_tuple; Py_XDECREF(reshaped_array); + Py_XDECREF(args); + Py_XDECREF(reshape_func); + Py_XDECREF(sizeSrc); } halide_type_t htt; if (dataType == PyMNNHalideTypeInt) { @@ -1126,7 +1183,7 @@ static int PyMNNTensor_init(PyMNNTensor *self, PyObject *args, PyObject *kwds) { return -1; } for (int i=0; itensor->host(); for (int i=0; iopInfo) { - name = PyString_FromString(self->opInfo->name().c_str()); + name = char2Object(self->opInfo->name().c_str()); } return name; } static PyObject* PyMNNOpInfo_getType(PyMNNOpInfo *self, PyObject *args) { PyObject *type = NULL; if (self->opInfo) { - type = PyString_FromString(self->opInfo->type().c_str()); + type = char2Object(self->opInfo->type().c_str()); } return type; } diff --git a/pymnn/src/MNNTools.cc b/pymnn/src/MNNTools.cc index 1e98ef3b1..63ccfa13d 100644 --- a/pymnn/src/MNNTools.cc +++ b/pymnn/src/MNNTools.cc @@ -1,5 +1,5 @@ /* - MNN python module + MNN python module */ #include #include "structmember.h" @@ -24,22 +24,22 @@ static PyObject* PyTool_Converter(PyObject *self, PyObject *args) { const char* mnnModel = NULL; const char* modelFile = NULL; PyObject* frameworkType = NULL; - const char* bizCode; - PyObject* benchmarkModel = NULL; + PyObject* fp16 = NULL; const char* prototxtFile = NULL; - if (!PyArg_ParseTuple(args, "ssOsO|s", &mnnModel, &modelFile, &frameworkType, &bizCode, &benchmarkModel, &prototxtFile)) { + if (!PyArg_ParseTuple(args, "ssOO|s", &mnnModel, &modelFile, &frameworkType, &fp16, &prototxtFile)) { return NULL; } struct modelConfig modelPath; modelPath.MNNModel = std::string(mnnModel); modelPath.modelFile = std::string(modelFile); modelPath.model = static_cast(PyLong_AsLong(frameworkType)); - modelPath.bizCode = std::string(bizCode); - modelPath.benchmarkModel = static_cast(PyLong_AsLong(benchmarkModel)); + modelPath.bizCode = std::string(""); + modelPath.benchmarkModel = false; + modelPath.saveHalfFloat = static_cast(PyLong_AsLong(fp16)); if(prototxtFile){ - modelPath.prototxtFile = std::string(prototxtFile); + modelPath.prototxtFile = std::string(prototxtFile); } - + std::unique_ptr netT = std::unique_ptr(new MNN::NetT()); if (modelPath.model == modelConfig::CAFFE) { caffe2MNNNet(modelPath.prototxtFile, modelPath.modelFile, modelPath.bizCode, netT); @@ -58,9 +58,9 @@ static PyObject* PyTool_Converter(PyObject *self, PyObject *args) { if (modelPath.model != modelConfig::MNN) { std::cout << "Start to Optimize the MNN Net..." << std::endl; std::unique_ptr newNet = optimizeNet(netT); - writeFb(newNet, modelPath.MNNModel, modelPath.benchmarkModel); + writeFb(newNet, modelPath.MNNModel, modelPath.benchmarkModel,modelPath.saveHalfFloat); } else { - writeFb(netT, modelPath.MNNModel, modelPath.benchmarkModel); + writeFb(netT, modelPath.MNNModel, modelPath.benchmarkModel,modelPath.saveHalfFloat); } Py_RETURN_TRUE; } @@ -162,5 +162,4 @@ MOD_INIT(Tools) } return; #endif -} - +} diff --git a/pymnn/src/util.h b/pymnn/src/util.h new file mode 100644 index 000000000..733d15070 --- /dev/null +++ b/pymnn/src/util.h @@ -0,0 +1,36 @@ +#pragma once +#include +// Returns true if obj is a bytes/str or unicode object +inline bool checkString(PyObject* obj) { + return PyBytes_Check(obj) || PyUnicode_Check(obj); +} +// Convert PyBytes (PyString) or PyUnicode as std::string +// PyBytes are unpacked as-is. PyUnicode is unpacked as UTF-8. +// NOTE: this method requires the GIL +inline std::string object2String(PyObject* obj) { + if (PyBytes_Check(obj)) { + return std::string(PyBytes_AS_STRING(obj)); + } + if (PyUnicode_Check(obj)) { + PyObject *bytes = PyUnicode_AsUTF8String(obj); + std::string s = std::string(PyBytes_AS_STRING(bytes)); + Py_XDECREF(bytes); + return s; + } +} + +inline PyObject* char2Object(const char* str) { +#if PY_MAJOR_VERSION == 2 + return PyString_FromString(str); +#else + return PyUnicode_FromString(str); +#endif +} +inline PyObject* string2Object(const std::string& str) { +#if PY_MAJOR_VERSION == 2 + return PyString_FromString(str.c_str()); +#else + return PyUnicode_FromString(str.c_str()); +#endif +} + diff --git a/schema/current/BasicOptimizer_generated.h b/schema/current/BasicOptimizer_generated.h new file mode 100644 index 000000000..c8647bec9 --- /dev/null +++ b/schema/current/BasicOptimizer_generated.h @@ -0,0 +1,389 @@ +// automatically generated by the FlatBuffers compiler, do not modify + + +#ifndef FLATBUFFERS_GENERATED_BASICOPTIMIZER_MNN_OPTIMIZER_H_ +#define FLATBUFFERS_GENERATED_BASICOPTIMIZER_MNN_OPTIMIZER_H_ + +#include "flatbuffers/flatbuffers.h" + +#include "CaffeOp_generated.h" +#include "GpuLibrary_generated.h" +#include "MNN_generated.h" +#include "TFQuantizeOp_generated.h" +#include "Tensor_generated.h" +#include "TensorflowOp_generated.h" +#include "Type_generated.h" +#include "UserDefine_generated.h" + +namespace MNN { +namespace Optimizer { + +struct BackendConfig; +struct BackendConfigT; + +struct Merge; +struct MergeT; + +inline const flatbuffers::TypeTable *BackendConfigTypeTable(); + +inline const flatbuffers::TypeTable *MergeTypeTable(); + +struct BackendConfigT : public flatbuffers::NativeTable { + typedef BackendConfig TableType; + int32_t memroy; + MNN::ForwardType type; + int32_t precision; + int32_t power; + int32_t numberThread; + BackendConfigT() + : memroy(0), + type(MNN::ForwardType_CPU), + precision(0), + power(0), + numberThread(1) { + } +}; + +struct BackendConfig FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef BackendConfigT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return BackendConfigTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_MEMROY = 4, + VT_TYPE = 6, + VT_PRECISION = 8, + VT_POWER = 10, + VT_NUMBERTHREAD = 12 + }; + int32_t memroy() const { + return GetField(VT_MEMROY, 0); + } + MNN::ForwardType type() const { + return static_cast(GetField(VT_TYPE, 0)); + } + int32_t precision() const { + return GetField(VT_PRECISION, 0); + } + int32_t power() const { + return GetField(VT_POWER, 0); + } + int32_t numberThread() const { + return GetField(VT_NUMBERTHREAD, 1); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_MEMROY) && + VerifyField(verifier, VT_TYPE) && + VerifyField(verifier, VT_PRECISION) && + VerifyField(verifier, VT_POWER) && + VerifyField(verifier, VT_NUMBERTHREAD) && + verifier.EndTable(); + } + BackendConfigT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(BackendConfigT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BackendConfigT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct BackendConfigBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_memroy(int32_t memroy) { + fbb_.AddElement(BackendConfig::VT_MEMROY, memroy, 0); + } + void add_type(MNN::ForwardType type) { + fbb_.AddElement(BackendConfig::VT_TYPE, static_cast(type), 0); + } + void add_precision(int32_t precision) { + fbb_.AddElement(BackendConfig::VT_PRECISION, precision, 0); + } + void add_power(int32_t power) { + fbb_.AddElement(BackendConfig::VT_POWER, power, 0); + } + void add_numberThread(int32_t numberThread) { + fbb_.AddElement(BackendConfig::VT_NUMBERTHREAD, numberThread, 1); + } + explicit BackendConfigBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + BackendConfigBuilder &operator=(const BackendConfigBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateBackendConfig( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t memroy = 0, + MNN::ForwardType type = MNN::ForwardType_CPU, + int32_t precision = 0, + int32_t power = 0, + int32_t numberThread = 1) { + BackendConfigBuilder builder_(_fbb); + builder_.add_numberThread(numberThread); + builder_.add_power(power); + builder_.add_precision(precision); + builder_.add_memroy(memroy); + builder_.add_type(type); + return builder_.Finish(); +} + +flatbuffers::Offset CreateBackendConfig(flatbuffers::FlatBufferBuilder &_fbb, const BackendConfigT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct MergeT : public flatbuffers::NativeTable { + typedef Merge TableType; + std::vector outputIndexes; + std::vector inputIndexes; + int32_t tensorNumber; + std::unique_ptr backend; + std::vector> oplists; + MergeT() + : tensorNumber(0) { + } +}; + +struct Merge FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef MergeT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return MergeTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_OUTPUTINDEXES = 4, + VT_INPUTINDEXES = 6, + VT_TENSORNUMBER = 8, + VT_BACKEND = 10, + VT_OPLISTS = 12 + }; + const flatbuffers::Vector *outputIndexes() const { + return GetPointer *>(VT_OUTPUTINDEXES); + } + const flatbuffers::Vector *inputIndexes() const { + return GetPointer *>(VT_INPUTINDEXES); + } + int32_t tensorNumber() const { + return GetField(VT_TENSORNUMBER, 0); + } + const BackendConfig *backend() const { + return GetPointer(VT_BACKEND); + } + const flatbuffers::Vector> *oplists() const { + return GetPointer> *>(VT_OPLISTS); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_OUTPUTINDEXES) && + verifier.VerifyVector(outputIndexes()) && + VerifyOffset(verifier, VT_INPUTINDEXES) && + verifier.VerifyVector(inputIndexes()) && + VerifyField(verifier, VT_TENSORNUMBER) && + VerifyOffset(verifier, VT_BACKEND) && + verifier.VerifyTable(backend()) && + VerifyOffset(verifier, VT_OPLISTS) && + verifier.VerifyVector(oplists()) && + verifier.VerifyVectorOfTables(oplists()) && + verifier.EndTable(); + } + MergeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(MergeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MergeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct MergeBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_outputIndexes(flatbuffers::Offset> outputIndexes) { + fbb_.AddOffset(Merge::VT_OUTPUTINDEXES, outputIndexes); + } + void add_inputIndexes(flatbuffers::Offset> inputIndexes) { + fbb_.AddOffset(Merge::VT_INPUTINDEXES, inputIndexes); + } + void add_tensorNumber(int32_t tensorNumber) { + fbb_.AddElement(Merge::VT_TENSORNUMBER, tensorNumber, 0); + } + void add_backend(flatbuffers::Offset backend) { + fbb_.AddOffset(Merge::VT_BACKEND, backend); + } + void add_oplists(flatbuffers::Offset>> oplists) { + fbb_.AddOffset(Merge::VT_OPLISTS, oplists); + } + explicit MergeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + MergeBuilder &operator=(const MergeBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateMerge( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> outputIndexes = 0, + flatbuffers::Offset> inputIndexes = 0, + int32_t tensorNumber = 0, + flatbuffers::Offset backend = 0, + flatbuffers::Offset>> oplists = 0) { + MergeBuilder builder_(_fbb); + builder_.add_oplists(oplists); + builder_.add_backend(backend); + builder_.add_tensorNumber(tensorNumber); + builder_.add_inputIndexes(inputIndexes); + builder_.add_outputIndexes(outputIndexes); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateMergeDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *outputIndexes = nullptr, + const std::vector *inputIndexes = nullptr, + int32_t tensorNumber = 0, + flatbuffers::Offset backend = 0, + const std::vector> *oplists = nullptr) { + auto outputIndexes__ = outputIndexes ? _fbb.CreateVector(*outputIndexes) : 0; + auto inputIndexes__ = inputIndexes ? _fbb.CreateVector(*inputIndexes) : 0; + auto oplists__ = oplists ? _fbb.CreateVector>(*oplists) : 0; + return MNN::Optimizer::CreateMerge( + _fbb, + outputIndexes__, + inputIndexes__, + tensorNumber, + backend, + oplists__); +} + +flatbuffers::Offset CreateMerge(flatbuffers::FlatBufferBuilder &_fbb, const MergeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +inline BackendConfigT *BackendConfig::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new BackendConfigT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void BackendConfig::UnPackTo(BackendConfigT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = memroy(); _o->memroy = _e; }; + { auto _e = type(); _o->type = _e; }; + { auto _e = precision(); _o->precision = _e; }; + { auto _e = power(); _o->power = _e; }; + { auto _e = numberThread(); _o->numberThread = _e; }; +} + +inline flatbuffers::Offset BackendConfig::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BackendConfigT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateBackendConfig(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateBackendConfig(flatbuffers::FlatBufferBuilder &_fbb, const BackendConfigT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BackendConfigT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _memroy = _o->memroy; + auto _type = _o->type; + auto _precision = _o->precision; + auto _power = _o->power; + auto _numberThread = _o->numberThread; + return MNN::Optimizer::CreateBackendConfig( + _fbb, + _memroy, + _type, + _precision, + _power, + _numberThread); +} + +inline MergeT *Merge::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new MergeT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Merge::UnPackTo(MergeT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = outputIndexes(); if (_e) { _o->outputIndexes.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputIndexes[_i] = _e->Get(_i); } } }; + { auto _e = inputIndexes(); if (_e) { _o->inputIndexes.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputIndexes[_i] = _e->Get(_i); } } }; + { auto _e = tensorNumber(); _o->tensorNumber = _e; }; + { auto _e = backend(); if (_e) _o->backend = std::unique_ptr(_e->UnPack(_resolver)); }; + { auto _e = oplists(); if (_e) { _o->oplists.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->oplists[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } }; +} + +inline flatbuffers::Offset Merge::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MergeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateMerge(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateMerge(flatbuffers::FlatBufferBuilder &_fbb, const MergeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MergeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _outputIndexes = _o->outputIndexes.size() ? _fbb.CreateVector(_o->outputIndexes) : 0; + auto _inputIndexes = _o->inputIndexes.size() ? _fbb.CreateVector(_o->inputIndexes) : 0; + auto _tensorNumber = _o->tensorNumber; + auto _backend = _o->backend ? CreateBackendConfig(_fbb, _o->backend.get(), _rehasher) : 0; + auto _oplists = _o->oplists.size() ? _fbb.CreateVector> (_o->oplists.size(), [](size_t i, _VectorArgs *__va) { return CreateOp(*__va->__fbb, __va->__o->oplists[i].get(), __va->__rehasher); }, &_va ) : 0; + return MNN::Optimizer::CreateMerge( + _fbb, + _outputIndexes, + _inputIndexes, + _tensorNumber, + _backend, + _oplists); +} + +inline const flatbuffers::TypeTable *BackendConfigTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + MNN::ForwardTypeTypeTable + }; + static const char * const names[] = { + "memroy", + "type", + "precision", + "power", + "numberThread" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 5, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *MergeTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 1, -1 }, + { flatbuffers::ET_INT, 1, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_SEQUENCE, 0, 0 }, + { flatbuffers::ET_SEQUENCE, 1, 1 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + BackendConfigTypeTable, + MNN::OpTypeTable + }; + static const char * const names[] = { + "outputIndexes", + "inputIndexes", + "tensorNumber", + "backend", + "oplists" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 5, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +} // namespace Optimizer +} // namespace MNN + +#endif // FLATBUFFERS_GENERATED_BASICOPTIMIZER_MNN_OPTIMIZER_H_ diff --git a/schema/current/CaffeOp_generated.h b/schema/current/CaffeOp_generated.h new file mode 100644 index 000000000..816564b35 --- /dev/null +++ b/schema/current/CaffeOp_generated.h @@ -0,0 +1,6179 @@ +// automatically generated by the FlatBuffers compiler, do not modify + + +#ifndef FLATBUFFERS_GENERATED_CAFFEOP_MNN_H_ +#define FLATBUFFERS_GENERATED_CAFFEOP_MNN_H_ + +#include "flatbuffers/flatbuffers.h" + +#include "Tensor_generated.h" +#include "Type_generated.h" + +namespace MNN { + +struct Convolution2DCommon; +struct Convolution2DCommonT; + +struct Convolution3DCommon; +struct Convolution3DCommonT; + +struct IDSTQuan; +struct IDSTQuanT; + +struct QuantizedFloatParam; +struct QuantizedFloatParamT; + +struct Convolution2D; +struct Convolution2DT; + +struct Convolution3D; +struct Convolution3DT; + +struct InnerProduct; +struct InnerProductT; + +struct Pool; +struct PoolT; + +struct Pool3D; +struct Pool3DT; + +struct Relu; +struct ReluT; + +struct Relu6; +struct Relu6T; + +struct PRelu; +struct PReluT; + +struct ELU; +struct ELUT; + +struct LRN; +struct LRNT; + +struct ArgMax; +struct ArgMaxT; + +struct Axis; +struct AxisT; + +struct Input; +struct InputT; + +struct LSTM; +struct LSTMT; + +struct Slice; +struct SliceT; + +struct BatchNorm; +struct BatchNormT; + +struct Scale; +struct ScaleT; + +struct Eltwise; +struct EltwiseT; + +struct Flatten; +struct FlattenT; + +struct Permute; +struct PermuteT; + +struct Reshape; +struct ReshapeT; + +struct DetectionOutput; +struct DetectionOutputT; + +struct RoiPooling; +struct RoiPoolingT; + +struct Proposal; +struct ProposalT; + +struct Interp; +struct InterpT; + +struct Resize; +struct ResizeT; + +struct PriorBox; +struct PriorBoxT; + +struct Normalize; +struct NormalizeT; + +struct EltwiseInt8; +struct EltwiseInt8T; + +inline const flatbuffers::TypeTable *Convolution2DCommonTypeTable(); + +inline const flatbuffers::TypeTable *Convolution3DCommonTypeTable(); + +inline const flatbuffers::TypeTable *IDSTQuanTypeTable(); + +inline const flatbuffers::TypeTable *QuantizedFloatParamTypeTable(); + +inline const flatbuffers::TypeTable *Convolution2DTypeTable(); + +inline const flatbuffers::TypeTable *Convolution3DTypeTable(); + +inline const flatbuffers::TypeTable *InnerProductTypeTable(); + +inline const flatbuffers::TypeTable *PoolTypeTable(); + +inline const flatbuffers::TypeTable *Pool3DTypeTable(); + +inline const flatbuffers::TypeTable *ReluTypeTable(); + +inline const flatbuffers::TypeTable *Relu6TypeTable(); + +inline const flatbuffers::TypeTable *PReluTypeTable(); + +inline const flatbuffers::TypeTable *ELUTypeTable(); + +inline const flatbuffers::TypeTable *LRNTypeTable(); + +inline const flatbuffers::TypeTable *ArgMaxTypeTable(); + +inline const flatbuffers::TypeTable *AxisTypeTable(); + +inline const flatbuffers::TypeTable *InputTypeTable(); + +inline const flatbuffers::TypeTable *LSTMTypeTable(); + +inline const flatbuffers::TypeTable *SliceTypeTable(); + +inline const flatbuffers::TypeTable *BatchNormTypeTable(); + +inline const flatbuffers::TypeTable *ScaleTypeTable(); + +inline const flatbuffers::TypeTable *EltwiseTypeTable(); + +inline const flatbuffers::TypeTable *FlattenTypeTable(); + +inline const flatbuffers::TypeTable *PermuteTypeTable(); + +inline const flatbuffers::TypeTable *ReshapeTypeTable(); + +inline const flatbuffers::TypeTable *DetectionOutputTypeTable(); + +inline const flatbuffers::TypeTable *RoiPoolingTypeTable(); + +inline const flatbuffers::TypeTable *ProposalTypeTable(); + +inline const flatbuffers::TypeTable *InterpTypeTable(); + +inline const flatbuffers::TypeTable *ResizeTypeTable(); + +inline const flatbuffers::TypeTable *PriorBoxTypeTable(); + +inline const flatbuffers::TypeTable *NormalizeTypeTable(); + +inline const flatbuffers::TypeTable *EltwiseInt8TypeTable(); + +enum PadMode { + PadMode_CAFFE = 0, + PadMode_VALID = 1, + PadMode_SAME = 2, + PadMode_MIN = PadMode_CAFFE, + PadMode_MAX = PadMode_SAME +}; + +inline const PadMode (&EnumValuesPadMode())[3] { + static const PadMode values[] = { + PadMode_CAFFE, + PadMode_VALID, + PadMode_SAME + }; + return values; +} + +inline const char * const *EnumNamesPadMode() { + static const char * const names[] = { + "CAFFE", + "VALID", + "SAME", + nullptr + }; + return names; +} + +inline const char *EnumNamePadMode(PadMode e) { + if (e < PadMode_CAFFE || e > PadMode_SAME) return ""; + const size_t index = static_cast(e); + return EnumNamesPadMode()[index]; +} + +enum PoolType { + PoolType_MAXPOOL = 0, + PoolType_AVEPOOL = 1, + PoolType_MIN = PoolType_MAXPOOL, + PoolType_MAX = PoolType_AVEPOOL +}; + +inline const PoolType (&EnumValuesPoolType())[2] { + static const PoolType values[] = { + PoolType_MAXPOOL, + PoolType_AVEPOOL + }; + return values; +} + +inline const char * const *EnumNamesPoolType() { + static const char * const names[] = { + "MAXPOOL", + "AVEPOOL", + nullptr + }; + return names; +} + +inline const char *EnumNamePoolType(PoolType e) { + if (e < PoolType_MAXPOOL || e > PoolType_AVEPOOL) return ""; + const size_t index = static_cast(e); + return EnumNamesPoolType()[index]; +} + +enum PoolPadType { + PoolPadType_CAFFE = 0, + PoolPadType_VALID = 1, + PoolPadType_SAME = 2, + PoolPadType_MIN = PoolPadType_CAFFE, + PoolPadType_MAX = PoolPadType_SAME +}; + +inline const PoolPadType (&EnumValuesPoolPadType())[3] { + static const PoolPadType values[] = { + PoolPadType_CAFFE, + PoolPadType_VALID, + PoolPadType_SAME + }; + return values; +} + +inline const char * const *EnumNamesPoolPadType() { + static const char * const names[] = { + "CAFFE", + "VALID", + "SAME", + nullptr + }; + return names; +} + +inline const char *EnumNamePoolPadType(PoolPadType e) { + if (e < PoolPadType_CAFFE || e > PoolPadType_SAME) return ""; + const size_t index = static_cast(e); + return EnumNamesPoolPadType()[index]; +} + +enum EltwiseType { + EltwiseType_PROD = 0, + EltwiseType_SUM = 1, + EltwiseType_MAXIMUM = 2, + EltwiseType_SUB = 3, + EltwiseType_MIN = EltwiseType_PROD, + EltwiseType_MAX = EltwiseType_SUB +}; + +inline const EltwiseType (&EnumValuesEltwiseType())[4] { + static const EltwiseType values[] = { + EltwiseType_PROD, + EltwiseType_SUM, + EltwiseType_MAXIMUM, + EltwiseType_SUB + }; + return values; +} + +inline const char * const *EnumNamesEltwiseType() { + static const char * const names[] = { + "PROD", + "SUM", + "MAXIMUM", + "SUB", + nullptr + }; + return names; +} + +inline const char *EnumNameEltwiseType(EltwiseType e) { + if (e < EltwiseType_PROD || e > EltwiseType_SUB) return ""; + const size_t index = static_cast(e); + return EnumNamesEltwiseType()[index]; +} + +struct Convolution2DCommonT : public flatbuffers::NativeTable { + typedef Convolution2DCommon TableType; + int32_t padX; + int32_t padY; + int32_t kernelX; + int32_t kernelY; + int32_t strideX; + int32_t strideY; + int32_t dilateX; + int32_t dilateY; + PadMode padMode; + int32_t group; + int32_t outputCount; + int32_t inputCount; + bool relu; + bool relu6; + Convolution2DCommonT() + : padX(0), + padY(0), + kernelX(1), + kernelY(1), + strideX(1), + strideY(1), + dilateX(1), + dilateY(1), + padMode(PadMode_CAFFE), + group(1), + outputCount(0), + inputCount(0), + relu(false), + relu6(false) { + } +}; + +struct Convolution2DCommon FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef Convolution2DCommonT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return Convolution2DCommonTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_PADX = 4, + VT_PADY = 6, + VT_KERNELX = 8, + VT_KERNELY = 10, + VT_STRIDEX = 12, + VT_STRIDEY = 14, + VT_DILATEX = 16, + VT_DILATEY = 18, + VT_PADMODE = 20, + VT_GROUP = 22, + VT_OUTPUTCOUNT = 24, + VT_INPUTCOUNT = 26, + VT_RELU = 28, + VT_RELU6 = 30 + }; + int32_t padX() const { + return GetField(VT_PADX, 0); + } + int32_t padY() const { + return GetField(VT_PADY, 0); + } + int32_t kernelX() const { + return GetField(VT_KERNELX, 1); + } + int32_t kernelY() const { + return GetField(VT_KERNELY, 1); + } + int32_t strideX() const { + return GetField(VT_STRIDEX, 1); + } + int32_t strideY() const { + return GetField(VT_STRIDEY, 1); + } + int32_t dilateX() const { + return GetField(VT_DILATEX, 1); + } + int32_t dilateY() const { + return GetField(VT_DILATEY, 1); + } + PadMode padMode() const { + return static_cast(GetField(VT_PADMODE, 0)); + } + int32_t group() const { + return GetField(VT_GROUP, 1); + } + int32_t outputCount() const { + return GetField(VT_OUTPUTCOUNT, 0); + } + int32_t inputCount() const { + return GetField(VT_INPUTCOUNT, 0); + } + bool relu() const { + return GetField(VT_RELU, 0) != 0; + } + bool relu6() const { + return GetField(VT_RELU6, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_PADX) && + VerifyField(verifier, VT_PADY) && + VerifyField(verifier, VT_KERNELX) && + VerifyField(verifier, VT_KERNELY) && + VerifyField(verifier, VT_STRIDEX) && + VerifyField(verifier, VT_STRIDEY) && + VerifyField(verifier, VT_DILATEX) && + VerifyField(verifier, VT_DILATEY) && + VerifyField(verifier, VT_PADMODE) && + VerifyField(verifier, VT_GROUP) && + VerifyField(verifier, VT_OUTPUTCOUNT) && + VerifyField(verifier, VT_INPUTCOUNT) && + VerifyField(verifier, VT_RELU) && + VerifyField(verifier, VT_RELU6) && + verifier.EndTable(); + } + Convolution2DCommonT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(Convolution2DCommonT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Convolution2DCommonT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct Convolution2DCommonBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_padX(int32_t padX) { + fbb_.AddElement(Convolution2DCommon::VT_PADX, padX, 0); + } + void add_padY(int32_t padY) { + fbb_.AddElement(Convolution2DCommon::VT_PADY, padY, 0); + } + void add_kernelX(int32_t kernelX) { + fbb_.AddElement(Convolution2DCommon::VT_KERNELX, kernelX, 1); + } + void add_kernelY(int32_t kernelY) { + fbb_.AddElement(Convolution2DCommon::VT_KERNELY, kernelY, 1); + } + void add_strideX(int32_t strideX) { + fbb_.AddElement(Convolution2DCommon::VT_STRIDEX, strideX, 1); + } + void add_strideY(int32_t strideY) { + fbb_.AddElement(Convolution2DCommon::VT_STRIDEY, strideY, 1); + } + void add_dilateX(int32_t dilateX) { + fbb_.AddElement(Convolution2DCommon::VT_DILATEX, dilateX, 1); + } + void add_dilateY(int32_t dilateY) { + fbb_.AddElement(Convolution2DCommon::VT_DILATEY, dilateY, 1); + } + void add_padMode(PadMode padMode) { + fbb_.AddElement(Convolution2DCommon::VT_PADMODE, static_cast(padMode), 0); + } + void add_group(int32_t group) { + fbb_.AddElement(Convolution2DCommon::VT_GROUP, group, 1); + } + void add_outputCount(int32_t outputCount) { + fbb_.AddElement(Convolution2DCommon::VT_OUTPUTCOUNT, outputCount, 0); + } + void add_inputCount(int32_t inputCount) { + fbb_.AddElement(Convolution2DCommon::VT_INPUTCOUNT, inputCount, 0); + } + void add_relu(bool relu) { + fbb_.AddElement(Convolution2DCommon::VT_RELU, static_cast(relu), 0); + } + void add_relu6(bool relu6) { + fbb_.AddElement(Convolution2DCommon::VT_RELU6, static_cast(relu6), 0); + } + explicit Convolution2DCommonBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + Convolution2DCommonBuilder &operator=(const Convolution2DCommonBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateConvolution2DCommon( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t padX = 0, + int32_t padY = 0, + int32_t kernelX = 1, + int32_t kernelY = 1, + int32_t strideX = 1, + int32_t strideY = 1, + int32_t dilateX = 1, + int32_t dilateY = 1, + PadMode padMode = PadMode_CAFFE, + int32_t group = 1, + int32_t outputCount = 0, + int32_t inputCount = 0, + bool relu = false, + bool relu6 = false) { + Convolution2DCommonBuilder builder_(_fbb); + builder_.add_inputCount(inputCount); + builder_.add_outputCount(outputCount); + builder_.add_group(group); + builder_.add_dilateY(dilateY); + builder_.add_dilateX(dilateX); + builder_.add_strideY(strideY); + builder_.add_strideX(strideX); + builder_.add_kernelY(kernelY); + builder_.add_kernelX(kernelX); + builder_.add_padY(padY); + builder_.add_padX(padX); + builder_.add_relu6(relu6); + builder_.add_relu(relu); + builder_.add_padMode(padMode); + return builder_.Finish(); +} + +flatbuffers::Offset CreateConvolution2DCommon(flatbuffers::FlatBufferBuilder &_fbb, const Convolution2DCommonT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct Convolution3DCommonT : public flatbuffers::NativeTable { + typedef Convolution3DCommon TableType; + std::vector dilates; + std::vector strides; + std::vector kernels; + std::vector pads; + PadMode padMode; + int32_t inputCount; + int32_t outputCount; + bool relu; + bool relu6; + Convolution3DCommonT() + : padMode(PadMode_CAFFE), + inputCount(0), + outputCount(0), + relu(false), + relu6(false) { + } +}; + +struct Convolution3DCommon FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef Convolution3DCommonT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return Convolution3DCommonTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_DILATES = 4, + VT_STRIDES = 6, + VT_KERNELS = 8, + VT_PADS = 10, + VT_PADMODE = 12, + VT_INPUTCOUNT = 14, + VT_OUTPUTCOUNT = 16, + VT_RELU = 18, + VT_RELU6 = 20 + }; + const flatbuffers::Vector *dilates() const { + return GetPointer *>(VT_DILATES); + } + const flatbuffers::Vector *strides() const { + return GetPointer *>(VT_STRIDES); + } + const flatbuffers::Vector *kernels() const { + return GetPointer *>(VT_KERNELS); + } + const flatbuffers::Vector *pads() const { + return GetPointer *>(VT_PADS); + } + PadMode padMode() const { + return static_cast(GetField(VT_PADMODE, 0)); + } + int32_t inputCount() const { + return GetField(VT_INPUTCOUNT, 0); + } + int32_t outputCount() const { + return GetField(VT_OUTPUTCOUNT, 0); + } + bool relu() const { + return GetField(VT_RELU, 0) != 0; + } + bool relu6() const { + return GetField(VT_RELU6, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_DILATES) && + verifier.VerifyVector(dilates()) && + VerifyOffset(verifier, VT_STRIDES) && + verifier.VerifyVector(strides()) && + VerifyOffset(verifier, VT_KERNELS) && + verifier.VerifyVector(kernels()) && + VerifyOffset(verifier, VT_PADS) && + verifier.VerifyVector(pads()) && + VerifyField(verifier, VT_PADMODE) && + VerifyField(verifier, VT_INPUTCOUNT) && + VerifyField(verifier, VT_OUTPUTCOUNT) && + VerifyField(verifier, VT_RELU) && + VerifyField(verifier, VT_RELU6) && + verifier.EndTable(); + } + Convolution3DCommonT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(Convolution3DCommonT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Convolution3DCommonT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct Convolution3DCommonBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_dilates(flatbuffers::Offset> dilates) { + fbb_.AddOffset(Convolution3DCommon::VT_DILATES, dilates); + } + void add_strides(flatbuffers::Offset> strides) { + fbb_.AddOffset(Convolution3DCommon::VT_STRIDES, strides); + } + void add_kernels(flatbuffers::Offset> kernels) { + fbb_.AddOffset(Convolution3DCommon::VT_KERNELS, kernels); + } + void add_pads(flatbuffers::Offset> pads) { + fbb_.AddOffset(Convolution3DCommon::VT_PADS, pads); + } + void add_padMode(PadMode padMode) { + fbb_.AddElement(Convolution3DCommon::VT_PADMODE, static_cast(padMode), 0); + } + void add_inputCount(int32_t inputCount) { + fbb_.AddElement(Convolution3DCommon::VT_INPUTCOUNT, inputCount, 0); + } + void add_outputCount(int32_t outputCount) { + fbb_.AddElement(Convolution3DCommon::VT_OUTPUTCOUNT, outputCount, 0); + } + void add_relu(bool relu) { + fbb_.AddElement(Convolution3DCommon::VT_RELU, static_cast(relu), 0); + } + void add_relu6(bool relu6) { + fbb_.AddElement(Convolution3DCommon::VT_RELU6, static_cast(relu6), 0); + } + explicit Convolution3DCommonBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + Convolution3DCommonBuilder &operator=(const Convolution3DCommonBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateConvolution3DCommon( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> dilates = 0, + flatbuffers::Offset> strides = 0, + flatbuffers::Offset> kernels = 0, + flatbuffers::Offset> pads = 0, + PadMode padMode = PadMode_CAFFE, + int32_t inputCount = 0, + int32_t outputCount = 0, + bool relu = false, + bool relu6 = false) { + Convolution3DCommonBuilder builder_(_fbb); + builder_.add_outputCount(outputCount); + builder_.add_inputCount(inputCount); + builder_.add_pads(pads); + builder_.add_kernels(kernels); + builder_.add_strides(strides); + builder_.add_dilates(dilates); + builder_.add_relu6(relu6); + builder_.add_relu(relu); + builder_.add_padMode(padMode); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateConvolution3DCommonDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *dilates = nullptr, + const std::vector *strides = nullptr, + const std::vector *kernels = nullptr, + const std::vector *pads = nullptr, + PadMode padMode = PadMode_CAFFE, + int32_t inputCount = 0, + int32_t outputCount = 0, + bool relu = false, + bool relu6 = false) { + auto dilates__ = dilates ? _fbb.CreateVector(*dilates) : 0; + auto strides__ = strides ? _fbb.CreateVector(*strides) : 0; + auto kernels__ = kernels ? _fbb.CreateVector(*kernels) : 0; + auto pads__ = pads ? _fbb.CreateVector(*pads) : 0; + return MNN::CreateConvolution3DCommon( + _fbb, + dilates__, + strides__, + kernels__, + pads__, + padMode, + inputCount, + outputCount, + relu, + relu6); +} + +flatbuffers::Offset CreateConvolution3DCommon(flatbuffers::FlatBufferBuilder &_fbb, const Convolution3DCommonT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct IDSTQuanT : public flatbuffers::NativeTable { + typedef IDSTQuan TableType; + std::vector buffer; + std::vector alpha; + int32_t type; + bool useInt32; + float quantScale; + float scaleIn; + float scaleOut; + int32_t aMax; + int32_t aMin; + int32_t readType; + bool has_scaleInt; + IDSTQuanT() + : type(0), + useInt32(false), + quantScale(0.0f), + scaleIn(0.0f), + scaleOut(0.0f), + aMax(0), + aMin(0), + readType(0), + has_scaleInt(false) { + } +}; + +struct IDSTQuan FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef IDSTQuanT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return IDSTQuanTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BUFFER = 4, + VT_ALPHA = 6, + VT_TYPE = 8, + VT_USEINT32 = 10, + VT_QUANTSCALE = 12, + VT_SCALEIN = 14, + VT_SCALEOUT = 16, + VT_AMAX = 18, + VT_AMIN = 20, + VT_READTYPE = 22, + VT_HAS_SCALEINT = 24 + }; + const flatbuffers::Vector *buffer() const { + return GetPointer *>(VT_BUFFER); + } + const flatbuffers::Vector *alpha() const { + return GetPointer *>(VT_ALPHA); + } + int32_t type() const { + return GetField(VT_TYPE, 0); + } + bool useInt32() const { + return GetField(VT_USEINT32, 0) != 0; + } + float quantScale() const { + return GetField(VT_QUANTSCALE, 0.0f); + } + float scaleIn() const { + return GetField(VT_SCALEIN, 0.0f); + } + float scaleOut() const { + return GetField(VT_SCALEOUT, 0.0f); + } + int32_t aMax() const { + return GetField(VT_AMAX, 0); + } + int32_t aMin() const { + return GetField(VT_AMIN, 0); + } + int32_t readType() const { + return GetField(VT_READTYPE, 0); + } + bool has_scaleInt() const { + return GetField(VT_HAS_SCALEINT, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_BUFFER) && + verifier.VerifyVector(buffer()) && + VerifyOffset(verifier, VT_ALPHA) && + verifier.VerifyVector(alpha()) && + VerifyField(verifier, VT_TYPE) && + VerifyField(verifier, VT_USEINT32) && + VerifyField(verifier, VT_QUANTSCALE) && + VerifyField(verifier, VT_SCALEIN) && + VerifyField(verifier, VT_SCALEOUT) && + VerifyField(verifier, VT_AMAX) && + VerifyField(verifier, VT_AMIN) && + VerifyField(verifier, VT_READTYPE) && + VerifyField(verifier, VT_HAS_SCALEINT) && + verifier.EndTable(); + } + IDSTQuanT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(IDSTQuanT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const IDSTQuanT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct IDSTQuanBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_buffer(flatbuffers::Offset> buffer) { + fbb_.AddOffset(IDSTQuan::VT_BUFFER, buffer); + } + void add_alpha(flatbuffers::Offset> alpha) { + fbb_.AddOffset(IDSTQuan::VT_ALPHA, alpha); + } + void add_type(int32_t type) { + fbb_.AddElement(IDSTQuan::VT_TYPE, type, 0); + } + void add_useInt32(bool useInt32) { + fbb_.AddElement(IDSTQuan::VT_USEINT32, static_cast(useInt32), 0); + } + void add_quantScale(float quantScale) { + fbb_.AddElement(IDSTQuan::VT_QUANTSCALE, quantScale, 0.0f); + } + void add_scaleIn(float scaleIn) { + fbb_.AddElement(IDSTQuan::VT_SCALEIN, scaleIn, 0.0f); + } + void add_scaleOut(float scaleOut) { + fbb_.AddElement(IDSTQuan::VT_SCALEOUT, scaleOut, 0.0f); + } + void add_aMax(int32_t aMax) { + fbb_.AddElement(IDSTQuan::VT_AMAX, aMax, 0); + } + void add_aMin(int32_t aMin) { + fbb_.AddElement(IDSTQuan::VT_AMIN, aMin, 0); + } + void add_readType(int32_t readType) { + fbb_.AddElement(IDSTQuan::VT_READTYPE, readType, 0); + } + void add_has_scaleInt(bool has_scaleInt) { + fbb_.AddElement(IDSTQuan::VT_HAS_SCALEINT, static_cast(has_scaleInt), 0); + } + explicit IDSTQuanBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + IDSTQuanBuilder &operator=(const IDSTQuanBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateIDSTQuan( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> buffer = 0, + flatbuffers::Offset> alpha = 0, + int32_t type = 0, + bool useInt32 = false, + float quantScale = 0.0f, + float scaleIn = 0.0f, + float scaleOut = 0.0f, + int32_t aMax = 0, + int32_t aMin = 0, + int32_t readType = 0, + bool has_scaleInt = false) { + IDSTQuanBuilder builder_(_fbb); + builder_.add_readType(readType); + builder_.add_aMin(aMin); + builder_.add_aMax(aMax); + builder_.add_scaleOut(scaleOut); + builder_.add_scaleIn(scaleIn); + builder_.add_quantScale(quantScale); + builder_.add_type(type); + builder_.add_alpha(alpha); + builder_.add_buffer(buffer); + builder_.add_has_scaleInt(has_scaleInt); + builder_.add_useInt32(useInt32); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateIDSTQuanDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *buffer = nullptr, + const std::vector *alpha = nullptr, + int32_t type = 0, + bool useInt32 = false, + float quantScale = 0.0f, + float scaleIn = 0.0f, + float scaleOut = 0.0f, + int32_t aMax = 0, + int32_t aMin = 0, + int32_t readType = 0, + bool has_scaleInt = false) { + auto buffer__ = buffer ? _fbb.CreateVector(*buffer) : 0; + auto alpha__ = alpha ? _fbb.CreateVector(*alpha) : 0; + return MNN::CreateIDSTQuan( + _fbb, + buffer__, + alpha__, + type, + useInt32, + quantScale, + scaleIn, + scaleOut, + aMax, + aMin, + readType, + has_scaleInt); +} + +flatbuffers::Offset CreateIDSTQuan(flatbuffers::FlatBufferBuilder &_fbb, const IDSTQuanT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct QuantizedFloatParamT : public flatbuffers::NativeTable { + typedef QuantizedFloatParam TableType; + std::vector weight; + std::vector bias; + std::vector scale; + std::vector tensorScale; + QuantizedFloatParamT() { + } +}; + +struct QuantizedFloatParam FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef QuantizedFloatParamT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return QuantizedFloatParamTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_WEIGHT = 4, + VT_BIAS = 6, + VT_SCALE = 8, + VT_TENSORSCALE = 10 + }; + const flatbuffers::Vector *weight() const { + return GetPointer *>(VT_WEIGHT); + } + const flatbuffers::Vector *bias() const { + return GetPointer *>(VT_BIAS); + } + const flatbuffers::Vector *scale() const { + return GetPointer *>(VT_SCALE); + } + const flatbuffers::Vector *tensorScale() const { + return GetPointer *>(VT_TENSORSCALE); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_WEIGHT) && + verifier.VerifyVector(weight()) && + VerifyOffset(verifier, VT_BIAS) && + verifier.VerifyVector(bias()) && + VerifyOffset(verifier, VT_SCALE) && + verifier.VerifyVector(scale()) && + VerifyOffset(verifier, VT_TENSORSCALE) && + verifier.VerifyVector(tensorScale()) && + verifier.EndTable(); + } + QuantizedFloatParamT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(QuantizedFloatParamT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedFloatParamT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct QuantizedFloatParamBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_weight(flatbuffers::Offset> weight) { + fbb_.AddOffset(QuantizedFloatParam::VT_WEIGHT, weight); + } + void add_bias(flatbuffers::Offset> bias) { + fbb_.AddOffset(QuantizedFloatParam::VT_BIAS, bias); + } + void add_scale(flatbuffers::Offset> scale) { + fbb_.AddOffset(QuantizedFloatParam::VT_SCALE, scale); + } + void add_tensorScale(flatbuffers::Offset> tensorScale) { + fbb_.AddOffset(QuantizedFloatParam::VT_TENSORSCALE, tensorScale); + } + explicit QuantizedFloatParamBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + QuantizedFloatParamBuilder &operator=(const QuantizedFloatParamBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateQuantizedFloatParam( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> weight = 0, + flatbuffers::Offset> bias = 0, + flatbuffers::Offset> scale = 0, + flatbuffers::Offset> tensorScale = 0) { + QuantizedFloatParamBuilder builder_(_fbb); + builder_.add_tensorScale(tensorScale); + builder_.add_scale(scale); + builder_.add_bias(bias); + builder_.add_weight(weight); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateQuantizedFloatParamDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *weight = nullptr, + const std::vector *bias = nullptr, + const std::vector *scale = nullptr, + const std::vector *tensorScale = nullptr) { + auto weight__ = weight ? _fbb.CreateVector(*weight) : 0; + auto bias__ = bias ? _fbb.CreateVector(*bias) : 0; + auto scale__ = scale ? _fbb.CreateVector(*scale) : 0; + auto tensorScale__ = tensorScale ? _fbb.CreateVector(*tensorScale) : 0; + return MNN::CreateQuantizedFloatParam( + _fbb, + weight__, + bias__, + scale__, + tensorScale__); +} + +flatbuffers::Offset CreateQuantizedFloatParam(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedFloatParamT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct Convolution2DT : public flatbuffers::NativeTable { + typedef Convolution2D TableType; + std::unique_ptr common; + std::vector weight; + std::vector bias; + std::unique_ptr quanParameter; + std::unique_ptr symmetricQuan; + Convolution2DT() { + } +}; + +struct Convolution2D FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef Convolution2DT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return Convolution2DTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_COMMON = 4, + VT_WEIGHT = 6, + VT_BIAS = 8, + VT_QUANPARAMETER = 10, + VT_SYMMETRICQUAN = 12 + }; + const Convolution2DCommon *common() const { + return GetPointer(VT_COMMON); + } + const flatbuffers::Vector *weight() const { + return GetPointer *>(VT_WEIGHT); + } + const flatbuffers::Vector *bias() const { + return GetPointer *>(VT_BIAS); + } + const IDSTQuan *quanParameter() const { + return GetPointer(VT_QUANPARAMETER); + } + const QuantizedFloatParam *symmetricQuan() const { + return GetPointer(VT_SYMMETRICQUAN); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_COMMON) && + verifier.VerifyTable(common()) && + VerifyOffset(verifier, VT_WEIGHT) && + verifier.VerifyVector(weight()) && + VerifyOffset(verifier, VT_BIAS) && + verifier.VerifyVector(bias()) && + VerifyOffset(verifier, VT_QUANPARAMETER) && + verifier.VerifyTable(quanParameter()) && + VerifyOffset(verifier, VT_SYMMETRICQUAN) && + verifier.VerifyTable(symmetricQuan()) && + verifier.EndTable(); + } + Convolution2DT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(Convolution2DT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Convolution2DT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct Convolution2DBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_common(flatbuffers::Offset common) { + fbb_.AddOffset(Convolution2D::VT_COMMON, common); + } + void add_weight(flatbuffers::Offset> weight) { + fbb_.AddOffset(Convolution2D::VT_WEIGHT, weight); + } + void add_bias(flatbuffers::Offset> bias) { + fbb_.AddOffset(Convolution2D::VT_BIAS, bias); + } + void add_quanParameter(flatbuffers::Offset quanParameter) { + fbb_.AddOffset(Convolution2D::VT_QUANPARAMETER, quanParameter); + } + void add_symmetricQuan(flatbuffers::Offset symmetricQuan) { + fbb_.AddOffset(Convolution2D::VT_SYMMETRICQUAN, symmetricQuan); + } + explicit Convolution2DBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + Convolution2DBuilder &operator=(const Convolution2DBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateConvolution2D( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset common = 0, + flatbuffers::Offset> weight = 0, + flatbuffers::Offset> bias = 0, + flatbuffers::Offset quanParameter = 0, + flatbuffers::Offset symmetricQuan = 0) { + Convolution2DBuilder builder_(_fbb); + builder_.add_symmetricQuan(symmetricQuan); + builder_.add_quanParameter(quanParameter); + builder_.add_bias(bias); + builder_.add_weight(weight); + builder_.add_common(common); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateConvolution2DDirect( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset common = 0, + const std::vector *weight = nullptr, + const std::vector *bias = nullptr, + flatbuffers::Offset quanParameter = 0, + flatbuffers::Offset symmetricQuan = 0) { + auto weight__ = weight ? _fbb.CreateVector(*weight) : 0; + auto bias__ = bias ? _fbb.CreateVector(*bias) : 0; + return MNN::CreateConvolution2D( + _fbb, + common, + weight__, + bias__, + quanParameter, + symmetricQuan); +} + +flatbuffers::Offset CreateConvolution2D(flatbuffers::FlatBufferBuilder &_fbb, const Convolution2DT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct Convolution3DT : public flatbuffers::NativeTable { + typedef Convolution3D TableType; + std::unique_ptr common; + std::vector weight; + std::vector bias; + Convolution3DT() { + } +}; + +struct Convolution3D FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef Convolution3DT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return Convolution3DTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_COMMON = 4, + VT_WEIGHT = 6, + VT_BIAS = 8 + }; + const Convolution3DCommon *common() const { + return GetPointer(VT_COMMON); + } + const flatbuffers::Vector *weight() const { + return GetPointer *>(VT_WEIGHT); + } + const flatbuffers::Vector *bias() const { + return GetPointer *>(VT_BIAS); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_COMMON) && + verifier.VerifyTable(common()) && + VerifyOffset(verifier, VT_WEIGHT) && + verifier.VerifyVector(weight()) && + VerifyOffset(verifier, VT_BIAS) && + verifier.VerifyVector(bias()) && + verifier.EndTable(); + } + Convolution3DT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(Convolution3DT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Convolution3DT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct Convolution3DBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_common(flatbuffers::Offset common) { + fbb_.AddOffset(Convolution3D::VT_COMMON, common); + } + void add_weight(flatbuffers::Offset> weight) { + fbb_.AddOffset(Convolution3D::VT_WEIGHT, weight); + } + void add_bias(flatbuffers::Offset> bias) { + fbb_.AddOffset(Convolution3D::VT_BIAS, bias); + } + explicit Convolution3DBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + Convolution3DBuilder &operator=(const Convolution3DBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateConvolution3D( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset common = 0, + flatbuffers::Offset> weight = 0, + flatbuffers::Offset> bias = 0) { + Convolution3DBuilder builder_(_fbb); + builder_.add_bias(bias); + builder_.add_weight(weight); + builder_.add_common(common); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateConvolution3DDirect( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset common = 0, + const std::vector *weight = nullptr, + const std::vector *bias = nullptr) { + auto weight__ = weight ? _fbb.CreateVector(*weight) : 0; + auto bias__ = bias ? _fbb.CreateVector(*bias) : 0; + return MNN::CreateConvolution3D( + _fbb, + common, + weight__, + bias__); +} + +flatbuffers::Offset CreateConvolution3D(flatbuffers::FlatBufferBuilder &_fbb, const Convolution3DT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct InnerProductT : public flatbuffers::NativeTable { + typedef InnerProduct TableType; + int32_t outputCount; + int32_t biasTerm; + int32_t weightSize; + std::vector weight; + std::vector bias; + int32_t axis; + bool transpose; + std::unique_ptr quanParameter; + InnerProductT() + : outputCount(0), + biasTerm(0), + weightSize(0), + axis(0), + transpose(false) { + } +}; + +struct InnerProduct FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef InnerProductT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return InnerProductTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_OUTPUTCOUNT = 4, + VT_BIASTERM = 6, + VT_WEIGHTSIZE = 8, + VT_WEIGHT = 10, + VT_BIAS = 12, + VT_AXIS = 14, + VT_TRANSPOSE = 16, + VT_QUANPARAMETER = 18 + }; + int32_t outputCount() const { + return GetField(VT_OUTPUTCOUNT, 0); + } + int32_t biasTerm() const { + return GetField(VT_BIASTERM, 0); + } + int32_t weightSize() const { + return GetField(VT_WEIGHTSIZE, 0); + } + const flatbuffers::Vector *weight() const { + return GetPointer *>(VT_WEIGHT); + } + const flatbuffers::Vector *bias() const { + return GetPointer *>(VT_BIAS); + } + int32_t axis() const { + return GetField(VT_AXIS, 0); + } + bool transpose() const { + return GetField(VT_TRANSPOSE, 0) != 0; + } + const IDSTQuan *quanParameter() const { + return GetPointer(VT_QUANPARAMETER); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_OUTPUTCOUNT) && + VerifyField(verifier, VT_BIASTERM) && + VerifyField(verifier, VT_WEIGHTSIZE) && + VerifyOffset(verifier, VT_WEIGHT) && + verifier.VerifyVector(weight()) && + VerifyOffset(verifier, VT_BIAS) && + verifier.VerifyVector(bias()) && + VerifyField(verifier, VT_AXIS) && + VerifyField(verifier, VT_TRANSPOSE) && + VerifyOffset(verifier, VT_QUANPARAMETER) && + verifier.VerifyTable(quanParameter()) && + verifier.EndTable(); + } + InnerProductT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(InnerProductT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const InnerProductT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct InnerProductBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_outputCount(int32_t outputCount) { + fbb_.AddElement(InnerProduct::VT_OUTPUTCOUNT, outputCount, 0); + } + void add_biasTerm(int32_t biasTerm) { + fbb_.AddElement(InnerProduct::VT_BIASTERM, biasTerm, 0); + } + void add_weightSize(int32_t weightSize) { + fbb_.AddElement(InnerProduct::VT_WEIGHTSIZE, weightSize, 0); + } + void add_weight(flatbuffers::Offset> weight) { + fbb_.AddOffset(InnerProduct::VT_WEIGHT, weight); + } + void add_bias(flatbuffers::Offset> bias) { + fbb_.AddOffset(InnerProduct::VT_BIAS, bias); + } + void add_axis(int32_t axis) { + fbb_.AddElement(InnerProduct::VT_AXIS, axis, 0); + } + void add_transpose(bool transpose) { + fbb_.AddElement(InnerProduct::VT_TRANSPOSE, static_cast(transpose), 0); + } + void add_quanParameter(flatbuffers::Offset quanParameter) { + fbb_.AddOffset(InnerProduct::VT_QUANPARAMETER, quanParameter); + } + explicit InnerProductBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + InnerProductBuilder &operator=(const InnerProductBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateInnerProduct( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t outputCount = 0, + int32_t biasTerm = 0, + int32_t weightSize = 0, + flatbuffers::Offset> weight = 0, + flatbuffers::Offset> bias = 0, + int32_t axis = 0, + bool transpose = false, + flatbuffers::Offset quanParameter = 0) { + InnerProductBuilder builder_(_fbb); + builder_.add_quanParameter(quanParameter); + builder_.add_axis(axis); + builder_.add_bias(bias); + builder_.add_weight(weight); + builder_.add_weightSize(weightSize); + builder_.add_biasTerm(biasTerm); + builder_.add_outputCount(outputCount); + builder_.add_transpose(transpose); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateInnerProductDirect( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t outputCount = 0, + int32_t biasTerm = 0, + int32_t weightSize = 0, + const std::vector *weight = nullptr, + const std::vector *bias = nullptr, + int32_t axis = 0, + bool transpose = false, + flatbuffers::Offset quanParameter = 0) { + auto weight__ = weight ? _fbb.CreateVector(*weight) : 0; + auto bias__ = bias ? _fbb.CreateVector(*bias) : 0; + return MNN::CreateInnerProduct( + _fbb, + outputCount, + biasTerm, + weightSize, + weight__, + bias__, + axis, + transpose, + quanParameter); +} + +flatbuffers::Offset CreateInnerProduct(flatbuffers::FlatBufferBuilder &_fbb, const InnerProductT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct PoolT : public flatbuffers::NativeTable { + typedef Pool TableType; + int32_t padX; + int32_t padY; + bool isGlobal; + int32_t kernelX; + int32_t kernelY; + int32_t strideX; + int32_t strideY; + PoolType type; + PoolPadType padType; + DataType dataType; + bool ceilModel; + PoolT() + : padX(0), + padY(0), + isGlobal(false), + kernelX(0), + kernelY(0), + strideX(0), + strideY(0), + type(PoolType_MAXPOOL), + padType(PoolPadType_CAFFE), + dataType(DataType_DT_FLOAT), + ceilModel(true) { + } +}; + +struct Pool FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef PoolT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return PoolTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_PADX = 4, + VT_PADY = 6, + VT_ISGLOBAL = 8, + VT_KERNELX = 10, + VT_KERNELY = 12, + VT_STRIDEX = 14, + VT_STRIDEY = 16, + VT_TYPE = 18, + VT_PADTYPE = 20, + VT_DATATYPE = 22, + VT_CEILMODEL = 24 + }; + int32_t padX() const { + return GetField(VT_PADX, 0); + } + int32_t padY() const { + return GetField(VT_PADY, 0); + } + bool isGlobal() const { + return GetField(VT_ISGLOBAL, 0) != 0; + } + int32_t kernelX() const { + return GetField(VT_KERNELX, 0); + } + int32_t kernelY() const { + return GetField(VT_KERNELY, 0); + } + int32_t strideX() const { + return GetField(VT_STRIDEX, 0); + } + int32_t strideY() const { + return GetField(VT_STRIDEY, 0); + } + PoolType type() const { + return static_cast(GetField(VT_TYPE, 0)); + } + PoolPadType padType() const { + return static_cast(GetField(VT_PADTYPE, 0)); + } + DataType dataType() const { + return static_cast(GetField(VT_DATATYPE, 1)); + } + bool ceilModel() const { + return GetField(VT_CEILMODEL, 1) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_PADX) && + VerifyField(verifier, VT_PADY) && + VerifyField(verifier, VT_ISGLOBAL) && + VerifyField(verifier, VT_KERNELX) && + VerifyField(verifier, VT_KERNELY) && + VerifyField(verifier, VT_STRIDEX) && + VerifyField(verifier, VT_STRIDEY) && + VerifyField(verifier, VT_TYPE) && + VerifyField(verifier, VT_PADTYPE) && + VerifyField(verifier, VT_DATATYPE) && + VerifyField(verifier, VT_CEILMODEL) && + verifier.EndTable(); + } + PoolT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(PoolT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PoolT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct PoolBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_padX(int32_t padX) { + fbb_.AddElement(Pool::VT_PADX, padX, 0); + } + void add_padY(int32_t padY) { + fbb_.AddElement(Pool::VT_PADY, padY, 0); + } + void add_isGlobal(bool isGlobal) { + fbb_.AddElement(Pool::VT_ISGLOBAL, static_cast(isGlobal), 0); + } + void add_kernelX(int32_t kernelX) { + fbb_.AddElement(Pool::VT_KERNELX, kernelX, 0); + } + void add_kernelY(int32_t kernelY) { + fbb_.AddElement(Pool::VT_KERNELY, kernelY, 0); + } + void add_strideX(int32_t strideX) { + fbb_.AddElement(Pool::VT_STRIDEX, strideX, 0); + } + void add_strideY(int32_t strideY) { + fbb_.AddElement(Pool::VT_STRIDEY, strideY, 0); + } + void add_type(PoolType type) { + fbb_.AddElement(Pool::VT_TYPE, static_cast(type), 0); + } + void add_padType(PoolPadType padType) { + fbb_.AddElement(Pool::VT_PADTYPE, static_cast(padType), 0); + } + void add_dataType(DataType dataType) { + fbb_.AddElement(Pool::VT_DATATYPE, static_cast(dataType), 1); + } + void add_ceilModel(bool ceilModel) { + fbb_.AddElement(Pool::VT_CEILMODEL, static_cast(ceilModel), 1); + } + explicit PoolBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + PoolBuilder &operator=(const PoolBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreatePool( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t padX = 0, + int32_t padY = 0, + bool isGlobal = false, + int32_t kernelX = 0, + int32_t kernelY = 0, + int32_t strideX = 0, + int32_t strideY = 0, + PoolType type = PoolType_MAXPOOL, + PoolPadType padType = PoolPadType_CAFFE, + DataType dataType = DataType_DT_FLOAT, + bool ceilModel = true) { + PoolBuilder builder_(_fbb); + builder_.add_dataType(dataType); + builder_.add_strideY(strideY); + builder_.add_strideX(strideX); + builder_.add_kernelY(kernelY); + builder_.add_kernelX(kernelX); + builder_.add_padY(padY); + builder_.add_padX(padX); + builder_.add_ceilModel(ceilModel); + builder_.add_padType(padType); + builder_.add_type(type); + builder_.add_isGlobal(isGlobal); + return builder_.Finish(); +} + +flatbuffers::Offset CreatePool(flatbuffers::FlatBufferBuilder &_fbb, const PoolT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct Pool3DT : public flatbuffers::NativeTable { + typedef Pool3D TableType; + std::vector strides; + std::vector kernels; + std::vector pads; + PoolType type; + PoolPadType padType; + Pool3DT() + : type(PoolType_MAXPOOL), + padType(PoolPadType_CAFFE) { + } +}; + +struct Pool3D FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef Pool3DT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return Pool3DTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_STRIDES = 4, + VT_KERNELS = 6, + VT_PADS = 8, + VT_TYPE = 10, + VT_PADTYPE = 12 + }; + const flatbuffers::Vector *strides() const { + return GetPointer *>(VT_STRIDES); + } + const flatbuffers::Vector *kernels() const { + return GetPointer *>(VT_KERNELS); + } + const flatbuffers::Vector *pads() const { + return GetPointer *>(VT_PADS); + } + PoolType type() const { + return static_cast(GetField(VT_TYPE, 0)); + } + PoolPadType padType() const { + return static_cast(GetField(VT_PADTYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_STRIDES) && + verifier.VerifyVector(strides()) && + VerifyOffset(verifier, VT_KERNELS) && + verifier.VerifyVector(kernels()) && + VerifyOffset(verifier, VT_PADS) && + verifier.VerifyVector(pads()) && + VerifyField(verifier, VT_TYPE) && + VerifyField(verifier, VT_PADTYPE) && + verifier.EndTable(); + } + Pool3DT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(Pool3DT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Pool3DT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct Pool3DBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_strides(flatbuffers::Offset> strides) { + fbb_.AddOffset(Pool3D::VT_STRIDES, strides); + } + void add_kernels(flatbuffers::Offset> kernels) { + fbb_.AddOffset(Pool3D::VT_KERNELS, kernels); + } + void add_pads(flatbuffers::Offset> pads) { + fbb_.AddOffset(Pool3D::VT_PADS, pads); + } + void add_type(PoolType type) { + fbb_.AddElement(Pool3D::VT_TYPE, static_cast(type), 0); + } + void add_padType(PoolPadType padType) { + fbb_.AddElement(Pool3D::VT_PADTYPE, static_cast(padType), 0); + } + explicit Pool3DBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + Pool3DBuilder &operator=(const Pool3DBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreatePool3D( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> strides = 0, + flatbuffers::Offset> kernels = 0, + flatbuffers::Offset> pads = 0, + PoolType type = PoolType_MAXPOOL, + PoolPadType padType = PoolPadType_CAFFE) { + Pool3DBuilder builder_(_fbb); + builder_.add_pads(pads); + builder_.add_kernels(kernels); + builder_.add_strides(strides); + builder_.add_padType(padType); + builder_.add_type(type); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreatePool3DDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *strides = nullptr, + const std::vector *kernels = nullptr, + const std::vector *pads = nullptr, + PoolType type = PoolType_MAXPOOL, + PoolPadType padType = PoolPadType_CAFFE) { + auto strides__ = strides ? _fbb.CreateVector(*strides) : 0; + auto kernels__ = kernels ? _fbb.CreateVector(*kernels) : 0; + auto pads__ = pads ? _fbb.CreateVector(*pads) : 0; + return MNN::CreatePool3D( + _fbb, + strides__, + kernels__, + pads__, + type, + padType); +} + +flatbuffers::Offset CreatePool3D(flatbuffers::FlatBufferBuilder &_fbb, const Pool3DT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ReluT : public flatbuffers::NativeTable { + typedef Relu TableType; + float slope; + ReluT() + : slope(0.0f) { + } +}; + +struct Relu FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ReluT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return ReluTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SLOPE = 4 + }; + float slope() const { + return GetField(VT_SLOPE, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_SLOPE) && + verifier.EndTable(); + } + ReluT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ReluT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReluT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ReluBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_slope(float slope) { + fbb_.AddElement(Relu::VT_SLOPE, slope, 0.0f); + } + explicit ReluBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ReluBuilder &operator=(const ReluBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateRelu( + flatbuffers::FlatBufferBuilder &_fbb, + float slope = 0.0f) { + ReluBuilder builder_(_fbb); + builder_.add_slope(slope); + return builder_.Finish(); +} + +flatbuffers::Offset CreateRelu(flatbuffers::FlatBufferBuilder &_fbb, const ReluT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct Relu6T : public flatbuffers::NativeTable { + typedef Relu6 TableType; + float slope; + Relu6T() + : slope(0.0f) { + } +}; + +struct Relu6 FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef Relu6T NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return Relu6TypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SLOPE = 4 + }; + float slope() const { + return GetField(VT_SLOPE, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_SLOPE) && + verifier.EndTable(); + } + Relu6T *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(Relu6T *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Relu6T* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct Relu6Builder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_slope(float slope) { + fbb_.AddElement(Relu6::VT_SLOPE, slope, 0.0f); + } + explicit Relu6Builder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + Relu6Builder &operator=(const Relu6Builder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateRelu6( + flatbuffers::FlatBufferBuilder &_fbb, + float slope = 0.0f) { + Relu6Builder builder_(_fbb); + builder_.add_slope(slope); + return builder_.Finish(); +} + +flatbuffers::Offset CreateRelu6(flatbuffers::FlatBufferBuilder &_fbb, const Relu6T *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct PReluT : public flatbuffers::NativeTable { + typedef PRelu TableType; + int32_t slopeCount; + std::vector slope; + PReluT() + : slopeCount(0) { + } +}; + +struct PRelu FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef PReluT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return PReluTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SLOPECOUNT = 4, + VT_SLOPE = 6 + }; + int32_t slopeCount() const { + return GetField(VT_SLOPECOUNT, 0); + } + const flatbuffers::Vector *slope() const { + return GetPointer *>(VT_SLOPE); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_SLOPECOUNT) && + VerifyOffset(verifier, VT_SLOPE) && + verifier.VerifyVector(slope()) && + verifier.EndTable(); + } + PReluT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(PReluT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PReluT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct PReluBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_slopeCount(int32_t slopeCount) { + fbb_.AddElement(PRelu::VT_SLOPECOUNT, slopeCount, 0); + } + void add_slope(flatbuffers::Offset> slope) { + fbb_.AddOffset(PRelu::VT_SLOPE, slope); + } + explicit PReluBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + PReluBuilder &operator=(const PReluBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreatePRelu( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t slopeCount = 0, + flatbuffers::Offset> slope = 0) { + PReluBuilder builder_(_fbb); + builder_.add_slope(slope); + builder_.add_slopeCount(slopeCount); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreatePReluDirect( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t slopeCount = 0, + const std::vector *slope = nullptr) { + auto slope__ = slope ? _fbb.CreateVector(*slope) : 0; + return MNN::CreatePRelu( + _fbb, + slopeCount, + slope__); +} + +flatbuffers::Offset CreatePRelu(flatbuffers::FlatBufferBuilder &_fbb, const PReluT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ELUT : public flatbuffers::NativeTable { + typedef ELU TableType; + float alpha; + ELUT() + : alpha(0.0f) { + } +}; + +struct ELU FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ELUT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return ELUTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ALPHA = 4 + }; + float alpha() const { + return GetField(VT_ALPHA, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_ALPHA) && + verifier.EndTable(); + } + ELUT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ELUT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ELUT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ELUBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_alpha(float alpha) { + fbb_.AddElement(ELU::VT_ALPHA, alpha, 0.0f); + } + explicit ELUBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ELUBuilder &operator=(const ELUBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateELU( + flatbuffers::FlatBufferBuilder &_fbb, + float alpha = 0.0f) { + ELUBuilder builder_(_fbb); + builder_.add_alpha(alpha); + return builder_.Finish(); +} + +flatbuffers::Offset CreateELU(flatbuffers::FlatBufferBuilder &_fbb, const ELUT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct LRNT : public flatbuffers::NativeTable { + typedef LRN TableType; + int32_t regionType; + int32_t localSize; + float alpha; + float beta; + LRNT() + : regionType(0), + localSize(0), + alpha(0.0f), + beta(0.0f) { + } +}; + +struct LRN FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef LRNT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return LRNTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_REGIONTYPE = 4, + VT_LOCALSIZE = 6, + VT_ALPHA = 8, + VT_BETA = 10 + }; + int32_t regionType() const { + return GetField(VT_REGIONTYPE, 0); + } + int32_t localSize() const { + return GetField(VT_LOCALSIZE, 0); + } + float alpha() const { + return GetField(VT_ALPHA, 0.0f); + } + float beta() const { + return GetField(VT_BETA, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_REGIONTYPE) && + VerifyField(verifier, VT_LOCALSIZE) && + VerifyField(verifier, VT_ALPHA) && + VerifyField(verifier, VT_BETA) && + verifier.EndTable(); + } + LRNT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(LRNT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LRNT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct LRNBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_regionType(int32_t regionType) { + fbb_.AddElement(LRN::VT_REGIONTYPE, regionType, 0); + } + void add_localSize(int32_t localSize) { + fbb_.AddElement(LRN::VT_LOCALSIZE, localSize, 0); + } + void add_alpha(float alpha) { + fbb_.AddElement(LRN::VT_ALPHA, alpha, 0.0f); + } + void add_beta(float beta) { + fbb_.AddElement(LRN::VT_BETA, beta, 0.0f); + } + explicit LRNBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + LRNBuilder &operator=(const LRNBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLRN( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t regionType = 0, + int32_t localSize = 0, + float alpha = 0.0f, + float beta = 0.0f) { + LRNBuilder builder_(_fbb); + builder_.add_beta(beta); + builder_.add_alpha(alpha); + builder_.add_localSize(localSize); + builder_.add_regionType(regionType); + return builder_.Finish(); +} + +flatbuffers::Offset CreateLRN(flatbuffers::FlatBufferBuilder &_fbb, const LRNT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ArgMaxT : public flatbuffers::NativeTable { + typedef ArgMax TableType; + int32_t outMaxVal; + int32_t topK; + int32_t axis; + int32_t softmaxThreshold; + ArgMaxT() + : outMaxVal(0), + topK(0), + axis(0), + softmaxThreshold(0) { + } +}; + +struct ArgMax FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ArgMaxT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return ArgMaxTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_OUTMAXVAL = 4, + VT_TOPK = 6, + VT_AXIS = 8, + VT_SOFTMAXTHRESHOLD = 10 + }; + int32_t outMaxVal() const { + return GetField(VT_OUTMAXVAL, 0); + } + int32_t topK() const { + return GetField(VT_TOPK, 0); + } + int32_t axis() const { + return GetField(VT_AXIS, 0); + } + int32_t softmaxThreshold() const { + return GetField(VT_SOFTMAXTHRESHOLD, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_OUTMAXVAL) && + VerifyField(verifier, VT_TOPK) && + VerifyField(verifier, VT_AXIS) && + VerifyField(verifier, VT_SOFTMAXTHRESHOLD) && + verifier.EndTable(); + } + ArgMaxT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ArgMaxT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ArgMaxBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_outMaxVal(int32_t outMaxVal) { + fbb_.AddElement(ArgMax::VT_OUTMAXVAL, outMaxVal, 0); + } + void add_topK(int32_t topK) { + fbb_.AddElement(ArgMax::VT_TOPK, topK, 0); + } + void add_axis(int32_t axis) { + fbb_.AddElement(ArgMax::VT_AXIS, axis, 0); + } + void add_softmaxThreshold(int32_t softmaxThreshold) { + fbb_.AddElement(ArgMax::VT_SOFTMAXTHRESHOLD, softmaxThreshold, 0); + } + explicit ArgMaxBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ArgMaxBuilder &operator=(const ArgMaxBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateArgMax( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t outMaxVal = 0, + int32_t topK = 0, + int32_t axis = 0, + int32_t softmaxThreshold = 0) { + ArgMaxBuilder builder_(_fbb); + builder_.add_softmaxThreshold(softmaxThreshold); + builder_.add_axis(axis); + builder_.add_topK(topK); + builder_.add_outMaxVal(outMaxVal); + return builder_.Finish(); +} + +flatbuffers::Offset CreateArgMax(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct AxisT : public flatbuffers::NativeTable { + typedef Axis TableType; + int32_t axis; + AxisT() + : axis(0) { + } +}; + +struct Axis FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef AxisT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return AxisTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4 + }; + int32_t axis() const { + return GetField(VT_AXIS, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_AXIS) && + verifier.EndTable(); + } + AxisT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(AxisT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const AxisT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct AxisBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(int32_t axis) { + fbb_.AddElement(Axis::VT_AXIS, axis, 0); + } + explicit AxisBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + AxisBuilder &operator=(const AxisBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateAxis( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t axis = 0) { + AxisBuilder builder_(_fbb); + builder_.add_axis(axis); + return builder_.Finish(); +} + +flatbuffers::Offset CreateAxis(flatbuffers::FlatBufferBuilder &_fbb, const AxisT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct InputT : public flatbuffers::NativeTable { + typedef Input TableType; + std::vector dims; + DataType dtype; + MNN_DATA_FORMAT dformat; + InputT() + : dtype(DataType_DT_FLOAT), + dformat(MNN_DATA_FORMAT_NC4HW4) { + } +}; + +struct Input FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef InputT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return InputTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_DIMS = 4, + VT_DTYPE = 6, + VT_DFORMAT = 8 + }; + const flatbuffers::Vector *dims() const { + return GetPointer *>(VT_DIMS); + } + DataType dtype() const { + return static_cast(GetField(VT_DTYPE, 1)); + } + MNN_DATA_FORMAT dformat() const { + return static_cast(GetField(VT_DFORMAT, 2)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_DIMS) && + verifier.VerifyVector(dims()) && + VerifyField(verifier, VT_DTYPE) && + VerifyField(verifier, VT_DFORMAT) && + verifier.EndTable(); + } + InputT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(InputT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const InputT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct InputBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_dims(flatbuffers::Offset> dims) { + fbb_.AddOffset(Input::VT_DIMS, dims); + } + void add_dtype(DataType dtype) { + fbb_.AddElement(Input::VT_DTYPE, static_cast(dtype), 1); + } + void add_dformat(MNN_DATA_FORMAT dformat) { + fbb_.AddElement(Input::VT_DFORMAT, static_cast(dformat), 2); + } + explicit InputBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + InputBuilder &operator=(const InputBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateInput( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> dims = 0, + DataType dtype = DataType_DT_FLOAT, + MNN_DATA_FORMAT dformat = MNN_DATA_FORMAT_NC4HW4) { + InputBuilder builder_(_fbb); + builder_.add_dtype(dtype); + builder_.add_dims(dims); + builder_.add_dformat(dformat); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateInputDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *dims = nullptr, + DataType dtype = DataType_DT_FLOAT, + MNN_DATA_FORMAT dformat = MNN_DATA_FORMAT_NC4HW4) { + auto dims__ = dims ? _fbb.CreateVector(*dims) : 0; + return MNN::CreateInput( + _fbb, + dims__, + dtype, + dformat); +} + +flatbuffers::Offset CreateInput(flatbuffers::FlatBufferBuilder &_fbb, const InputT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct LSTMT : public flatbuffers::NativeTable { + typedef LSTM TableType; + int32_t outputCount; + int32_t weightSize; + float clippingThreshold; + std::unique_ptr weightI; + std::unique_ptr weightH; + std::unique_ptr bias; + std::unique_ptr weightIQ; + std::unique_ptr weightIA; + float quantScale; + LSTMT() + : outputCount(0), + weightSize(0), + clippingThreshold(0.0f), + quantScale(0.0f) { + } +}; + +struct LSTM FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef LSTMT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return LSTMTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_OUTPUTCOUNT = 4, + VT_WEIGHTSIZE = 6, + VT_CLIPPINGTHRESHOLD = 8, + VT_WEIGHTI = 10, + VT_WEIGHTH = 12, + VT_BIAS = 14, + VT_WEIGHTIQ = 16, + VT_WEIGHTIA = 18, + VT_QUANTSCALE = 20 + }; + int32_t outputCount() const { + return GetField(VT_OUTPUTCOUNT, 0); + } + int32_t weightSize() const { + return GetField(VT_WEIGHTSIZE, 0); + } + float clippingThreshold() const { + return GetField(VT_CLIPPINGTHRESHOLD, 0.0f); + } + const Blob *weightI() const { + return GetPointer(VT_WEIGHTI); + } + const Blob *weightH() const { + return GetPointer(VT_WEIGHTH); + } + const Blob *bias() const { + return GetPointer(VT_BIAS); + } + const Blob *weightIQ() const { + return GetPointer(VT_WEIGHTIQ); + } + const Blob *weightIA() const { + return GetPointer(VT_WEIGHTIA); + } + float quantScale() const { + return GetField(VT_QUANTSCALE, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_OUTPUTCOUNT) && + VerifyField(verifier, VT_WEIGHTSIZE) && + VerifyField(verifier, VT_CLIPPINGTHRESHOLD) && + VerifyOffset(verifier, VT_WEIGHTI) && + verifier.VerifyTable(weightI()) && + VerifyOffset(verifier, VT_WEIGHTH) && + verifier.VerifyTable(weightH()) && + VerifyOffset(verifier, VT_BIAS) && + verifier.VerifyTable(bias()) && + VerifyOffset(verifier, VT_WEIGHTIQ) && + verifier.VerifyTable(weightIQ()) && + VerifyOffset(verifier, VT_WEIGHTIA) && + verifier.VerifyTable(weightIA()) && + VerifyField(verifier, VT_QUANTSCALE) && + verifier.EndTable(); + } + LSTMT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(LSTMT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSTMT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct LSTMBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_outputCount(int32_t outputCount) { + fbb_.AddElement(LSTM::VT_OUTPUTCOUNT, outputCount, 0); + } + void add_weightSize(int32_t weightSize) { + fbb_.AddElement(LSTM::VT_WEIGHTSIZE, weightSize, 0); + } + void add_clippingThreshold(float clippingThreshold) { + fbb_.AddElement(LSTM::VT_CLIPPINGTHRESHOLD, clippingThreshold, 0.0f); + } + void add_weightI(flatbuffers::Offset weightI) { + fbb_.AddOffset(LSTM::VT_WEIGHTI, weightI); + } + void add_weightH(flatbuffers::Offset weightH) { + fbb_.AddOffset(LSTM::VT_WEIGHTH, weightH); + } + void add_bias(flatbuffers::Offset bias) { + fbb_.AddOffset(LSTM::VT_BIAS, bias); + } + void add_weightIQ(flatbuffers::Offset weightIQ) { + fbb_.AddOffset(LSTM::VT_WEIGHTIQ, weightIQ); + } + void add_weightIA(flatbuffers::Offset weightIA) { + fbb_.AddOffset(LSTM::VT_WEIGHTIA, weightIA); + } + void add_quantScale(float quantScale) { + fbb_.AddElement(LSTM::VT_QUANTSCALE, quantScale, 0.0f); + } + explicit LSTMBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + LSTMBuilder &operator=(const LSTMBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLSTM( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t outputCount = 0, + int32_t weightSize = 0, + float clippingThreshold = 0.0f, + flatbuffers::Offset weightI = 0, + flatbuffers::Offset weightH = 0, + flatbuffers::Offset bias = 0, + flatbuffers::Offset weightIQ = 0, + flatbuffers::Offset weightIA = 0, + float quantScale = 0.0f) { + LSTMBuilder builder_(_fbb); + builder_.add_quantScale(quantScale); + builder_.add_weightIA(weightIA); + builder_.add_weightIQ(weightIQ); + builder_.add_bias(bias); + builder_.add_weightH(weightH); + builder_.add_weightI(weightI); + builder_.add_clippingThreshold(clippingThreshold); + builder_.add_weightSize(weightSize); + builder_.add_outputCount(outputCount); + return builder_.Finish(); +} + +flatbuffers::Offset CreateLSTM(flatbuffers::FlatBufferBuilder &_fbb, const LSTMT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SliceT : public flatbuffers::NativeTable { + typedef Slice TableType; + int32_t axis; + std::vector slicePoints; + NetSource sourceType; + SliceT() + : axis(0), + sourceType(NetSource_CAFFE) { + } +}; + +struct Slice FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SliceT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return SliceTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4, + VT_SLICEPOINTS = 6, + VT_SOURCETYPE = 8 + }; + int32_t axis() const { + return GetField(VT_AXIS, 0); + } + const flatbuffers::Vector *slicePoints() const { + return GetPointer *>(VT_SLICEPOINTS); + } + NetSource sourceType() const { + return static_cast(GetField(VT_SOURCETYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_AXIS) && + VerifyOffset(verifier, VT_SLICEPOINTS) && + verifier.VerifyVector(slicePoints()) && + VerifyField(verifier, VT_SOURCETYPE) && + verifier.EndTable(); + } + SliceT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SliceT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SliceT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SliceBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(int32_t axis) { + fbb_.AddElement(Slice::VT_AXIS, axis, 0); + } + void add_slicePoints(flatbuffers::Offset> slicePoints) { + fbb_.AddOffset(Slice::VT_SLICEPOINTS, slicePoints); + } + void add_sourceType(NetSource sourceType) { + fbb_.AddElement(Slice::VT_SOURCETYPE, static_cast(sourceType), 0); + } + explicit SliceBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + SliceBuilder &operator=(const SliceBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSlice( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t axis = 0, + flatbuffers::Offset> slicePoints = 0, + NetSource sourceType = NetSource_CAFFE) { + SliceBuilder builder_(_fbb); + builder_.add_slicePoints(slicePoints); + builder_.add_axis(axis); + builder_.add_sourceType(sourceType); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateSliceDirect( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t axis = 0, + const std::vector *slicePoints = nullptr, + NetSource sourceType = NetSource_CAFFE) { + auto slicePoints__ = slicePoints ? _fbb.CreateVector(*slicePoints) : 0; + return MNN::CreateSlice( + _fbb, + axis, + slicePoints__, + sourceType); +} + +flatbuffers::Offset CreateSlice(flatbuffers::FlatBufferBuilder &_fbb, const SliceT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct BatchNormT : public flatbuffers::NativeTable { + typedef BatchNorm TableType; + int32_t channels; + std::vector slopeData; + std::vector meanData; + std::vector varData; + std::vector biasData; + std::vector Adata; + std::vector Bdata; + float epsilon; + BatchNormT() + : channels(0), + epsilon(0.001f) { + } +}; + +struct BatchNorm FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef BatchNormT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return BatchNormTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_CHANNELS = 4, + VT_SLOPEDATA = 6, + VT_MEANDATA = 8, + VT_VARDATA = 10, + VT_BIASDATA = 12, + VT_ADATA = 14, + VT_BDATA = 16, + VT_EPSILON = 18 + }; + int32_t channels() const { + return GetField(VT_CHANNELS, 0); + } + const flatbuffers::Vector *slopeData() const { + return GetPointer *>(VT_SLOPEDATA); + } + const flatbuffers::Vector *meanData() const { + return GetPointer *>(VT_MEANDATA); + } + const flatbuffers::Vector *varData() const { + return GetPointer *>(VT_VARDATA); + } + const flatbuffers::Vector *biasData() const { + return GetPointer *>(VT_BIASDATA); + } + const flatbuffers::Vector *Adata() const { + return GetPointer *>(VT_ADATA); + } + const flatbuffers::Vector *Bdata() const { + return GetPointer *>(VT_BDATA); + } + float epsilon() const { + return GetField(VT_EPSILON, 0.001f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_CHANNELS) && + VerifyOffset(verifier, VT_SLOPEDATA) && + verifier.VerifyVector(slopeData()) && + VerifyOffset(verifier, VT_MEANDATA) && + verifier.VerifyVector(meanData()) && + VerifyOffset(verifier, VT_VARDATA) && + verifier.VerifyVector(varData()) && + VerifyOffset(verifier, VT_BIASDATA) && + verifier.VerifyVector(biasData()) && + VerifyOffset(verifier, VT_ADATA) && + verifier.VerifyVector(Adata()) && + VerifyOffset(verifier, VT_BDATA) && + verifier.VerifyVector(Bdata()) && + VerifyField(verifier, VT_EPSILON) && + verifier.EndTable(); + } + BatchNormT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(BatchNormT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchNormT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct BatchNormBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_channels(int32_t channels) { + fbb_.AddElement(BatchNorm::VT_CHANNELS, channels, 0); + } + void add_slopeData(flatbuffers::Offset> slopeData) { + fbb_.AddOffset(BatchNorm::VT_SLOPEDATA, slopeData); + } + void add_meanData(flatbuffers::Offset> meanData) { + fbb_.AddOffset(BatchNorm::VT_MEANDATA, meanData); + } + void add_varData(flatbuffers::Offset> varData) { + fbb_.AddOffset(BatchNorm::VT_VARDATA, varData); + } + void add_biasData(flatbuffers::Offset> biasData) { + fbb_.AddOffset(BatchNorm::VT_BIASDATA, biasData); + } + void add_Adata(flatbuffers::Offset> Adata) { + fbb_.AddOffset(BatchNorm::VT_ADATA, Adata); + } + void add_Bdata(flatbuffers::Offset> Bdata) { + fbb_.AddOffset(BatchNorm::VT_BDATA, Bdata); + } + void add_epsilon(float epsilon) { + fbb_.AddElement(BatchNorm::VT_EPSILON, epsilon, 0.001f); + } + explicit BatchNormBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + BatchNormBuilder &operator=(const BatchNormBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateBatchNorm( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t channels = 0, + flatbuffers::Offset> slopeData = 0, + flatbuffers::Offset> meanData = 0, + flatbuffers::Offset> varData = 0, + flatbuffers::Offset> biasData = 0, + flatbuffers::Offset> Adata = 0, + flatbuffers::Offset> Bdata = 0, + float epsilon = 0.001f) { + BatchNormBuilder builder_(_fbb); + builder_.add_epsilon(epsilon); + builder_.add_Bdata(Bdata); + builder_.add_Adata(Adata); + builder_.add_biasData(biasData); + builder_.add_varData(varData); + builder_.add_meanData(meanData); + builder_.add_slopeData(slopeData); + builder_.add_channels(channels); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateBatchNormDirect( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t channels = 0, + const std::vector *slopeData = nullptr, + const std::vector *meanData = nullptr, + const std::vector *varData = nullptr, + const std::vector *biasData = nullptr, + const std::vector *Adata = nullptr, + const std::vector *Bdata = nullptr, + float epsilon = 0.001f) { + auto slopeData__ = slopeData ? _fbb.CreateVector(*slopeData) : 0; + auto meanData__ = meanData ? _fbb.CreateVector(*meanData) : 0; + auto varData__ = varData ? _fbb.CreateVector(*varData) : 0; + auto biasData__ = biasData ? _fbb.CreateVector(*biasData) : 0; + auto Adata__ = Adata ? _fbb.CreateVector(*Adata) : 0; + auto Bdata__ = Bdata ? _fbb.CreateVector(*Bdata) : 0; + return MNN::CreateBatchNorm( + _fbb, + channels, + slopeData__, + meanData__, + varData__, + biasData__, + Adata__, + Bdata__, + epsilon); +} + +flatbuffers::Offset CreateBatchNorm(flatbuffers::FlatBufferBuilder &_fbb, const BatchNormT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ScaleT : public flatbuffers::NativeTable { + typedef Scale TableType; + int32_t channels; + std::vector scaleData; + std::vector biasData; + ScaleT() + : channels(0) { + } +}; + +struct Scale FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ScaleT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return ScaleTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_CHANNELS = 4, + VT_SCALEDATA = 6, + VT_BIASDATA = 8 + }; + int32_t channels() const { + return GetField(VT_CHANNELS, 0); + } + const flatbuffers::Vector *scaleData() const { + return GetPointer *>(VT_SCALEDATA); + } + const flatbuffers::Vector *biasData() const { + return GetPointer *>(VT_BIASDATA); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_CHANNELS) && + VerifyOffset(verifier, VT_SCALEDATA) && + verifier.VerifyVector(scaleData()) && + VerifyOffset(verifier, VT_BIASDATA) && + verifier.VerifyVector(biasData()) && + verifier.EndTable(); + } + ScaleT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ScaleT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ScaleT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ScaleBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_channels(int32_t channels) { + fbb_.AddElement(Scale::VT_CHANNELS, channels, 0); + } + void add_scaleData(flatbuffers::Offset> scaleData) { + fbb_.AddOffset(Scale::VT_SCALEDATA, scaleData); + } + void add_biasData(flatbuffers::Offset> biasData) { + fbb_.AddOffset(Scale::VT_BIASDATA, biasData); + } + explicit ScaleBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ScaleBuilder &operator=(const ScaleBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateScale( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t channels = 0, + flatbuffers::Offset> scaleData = 0, + flatbuffers::Offset> biasData = 0) { + ScaleBuilder builder_(_fbb); + builder_.add_biasData(biasData); + builder_.add_scaleData(scaleData); + builder_.add_channels(channels); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateScaleDirect( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t channels = 0, + const std::vector *scaleData = nullptr, + const std::vector *biasData = nullptr) { + auto scaleData__ = scaleData ? _fbb.CreateVector(*scaleData) : 0; + auto biasData__ = biasData ? _fbb.CreateVector(*biasData) : 0; + return MNN::CreateScale( + _fbb, + channels, + scaleData__, + biasData__); +} + +flatbuffers::Offset CreateScale(flatbuffers::FlatBufferBuilder &_fbb, const ScaleT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct EltwiseT : public flatbuffers::NativeTable { + typedef Eltwise TableType; + EltwiseType type; + std::vector coeff; + EltwiseT() + : type(EltwiseType_PROD) { + } +}; + +struct Eltwise FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef EltwiseT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return EltwiseTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TYPE = 4, + VT_COEFF = 6 + }; + EltwiseType type() const { + return static_cast(GetField(VT_TYPE, 0)); + } + const flatbuffers::Vector *coeff() const { + return GetPointer *>(VT_COEFF); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_TYPE) && + VerifyOffset(verifier, VT_COEFF) && + verifier.VerifyVector(coeff()) && + verifier.EndTable(); + } + EltwiseT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(EltwiseT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const EltwiseT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct EltwiseBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_type(EltwiseType type) { + fbb_.AddElement(Eltwise::VT_TYPE, static_cast(type), 0); + } + void add_coeff(flatbuffers::Offset> coeff) { + fbb_.AddOffset(Eltwise::VT_COEFF, coeff); + } + explicit EltwiseBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + EltwiseBuilder &operator=(const EltwiseBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateEltwise( + flatbuffers::FlatBufferBuilder &_fbb, + EltwiseType type = EltwiseType_PROD, + flatbuffers::Offset> coeff = 0) { + EltwiseBuilder builder_(_fbb); + builder_.add_coeff(coeff); + builder_.add_type(type); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateEltwiseDirect( + flatbuffers::FlatBufferBuilder &_fbb, + EltwiseType type = EltwiseType_PROD, + const std::vector *coeff = nullptr) { + auto coeff__ = coeff ? _fbb.CreateVector(*coeff) : 0; + return MNN::CreateEltwise( + _fbb, + type, + coeff__); +} + +flatbuffers::Offset CreateEltwise(flatbuffers::FlatBufferBuilder &_fbb, const EltwiseT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct FlattenT : public flatbuffers::NativeTable { + typedef Flatten TableType; + int32_t axis; + int32_t endAxis; + FlattenT() + : axis(0), + endAxis(0) { + } +}; + +struct Flatten FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef FlattenT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return FlattenTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4, + VT_ENDAXIS = 6 + }; + int32_t axis() const { + return GetField(VT_AXIS, 0); + } + int32_t endAxis() const { + return GetField(VT_ENDAXIS, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_AXIS) && + VerifyField(verifier, VT_ENDAXIS) && + verifier.EndTable(); + } + FlattenT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(FlattenT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const FlattenT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct FlattenBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(int32_t axis) { + fbb_.AddElement(Flatten::VT_AXIS, axis, 0); + } + void add_endAxis(int32_t endAxis) { + fbb_.AddElement(Flatten::VT_ENDAXIS, endAxis, 0); + } + explicit FlattenBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + FlattenBuilder &operator=(const FlattenBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateFlatten( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t axis = 0, + int32_t endAxis = 0) { + FlattenBuilder builder_(_fbb); + builder_.add_endAxis(endAxis); + builder_.add_axis(axis); + return builder_.Finish(); +} + +flatbuffers::Offset CreateFlatten(flatbuffers::FlatBufferBuilder &_fbb, const FlattenT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct PermuteT : public flatbuffers::NativeTable { + typedef Permute TableType; + std::vector dims; + PermuteT() { + } +}; + +struct Permute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef PermuteT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return PermuteTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_DIMS = 4 + }; + const flatbuffers::Vector *dims() const { + return GetPointer *>(VT_DIMS); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_DIMS) && + verifier.VerifyVector(dims()) && + verifier.EndTable(); + } + PermuteT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(PermuteT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PermuteT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct PermuteBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_dims(flatbuffers::Offset> dims) { + fbb_.AddOffset(Permute::VT_DIMS, dims); + } + explicit PermuteBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + PermuteBuilder &operator=(const PermuteBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreatePermute( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> dims = 0) { + PermuteBuilder builder_(_fbb); + builder_.add_dims(dims); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreatePermuteDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *dims = nullptr) { + auto dims__ = dims ? _fbb.CreateVector(*dims) : 0; + return MNN::CreatePermute( + _fbb, + dims__); +} + +flatbuffers::Offset CreatePermute(flatbuffers::FlatBufferBuilder &_fbb, const PermuteT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ReshapeT : public flatbuffers::NativeTable { + typedef Reshape TableType; + std::vector dims; + MNN_DATA_FORMAT dimType; + ReshapeT() + : dimType(MNN_DATA_FORMAT_NCHW) { + } +}; + +struct Reshape FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ReshapeT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return ReshapeTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_DIMS = 4, + VT_DIMTYPE = 6 + }; + const flatbuffers::Vector *dims() const { + return GetPointer *>(VT_DIMS); + } + MNN_DATA_FORMAT dimType() const { + return static_cast(GetField(VT_DIMTYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_DIMS) && + verifier.VerifyVector(dims()) && + VerifyField(verifier, VT_DIMTYPE) && + verifier.EndTable(); + } + ReshapeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ReshapeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ReshapeBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_dims(flatbuffers::Offset> dims) { + fbb_.AddOffset(Reshape::VT_DIMS, dims); + } + void add_dimType(MNN_DATA_FORMAT dimType) { + fbb_.AddElement(Reshape::VT_DIMTYPE, static_cast(dimType), 0); + } + explicit ReshapeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ReshapeBuilder &operator=(const ReshapeBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateReshape( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> dims = 0, + MNN_DATA_FORMAT dimType = MNN_DATA_FORMAT_NCHW) { + ReshapeBuilder builder_(_fbb); + builder_.add_dims(dims); + builder_.add_dimType(dimType); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateReshapeDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *dims = nullptr, + MNN_DATA_FORMAT dimType = MNN_DATA_FORMAT_NCHW) { + auto dims__ = dims ? _fbb.CreateVector(*dims) : 0; + return MNN::CreateReshape( + _fbb, + dims__, + dimType); +} + +flatbuffers::Offset CreateReshape(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct DetectionOutputT : public flatbuffers::NativeTable { + typedef DetectionOutput TableType; + int32_t classCount; + float nmsThresholdold; + int32_t nmsTopK; + int32_t keepTopK; + float confidenceThreshold; + int32_t shareLocation; + int32_t backgroundLable; + int32_t varianceEncodedTarget; + int32_t codeType; + float objectnessScore; + DetectionOutputT() + : classCount(0), + nmsThresholdold(0.0f), + nmsTopK(0), + keepTopK(0), + confidenceThreshold(0.0f), + shareLocation(0), + backgroundLable(0), + varianceEncodedTarget(0), + codeType(0), + objectnessScore(0.01f) { + } +}; + +struct DetectionOutput FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef DetectionOutputT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return DetectionOutputTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_CLASSCOUNT = 4, + VT_NMSTHRESHOLDOLD = 6, + VT_NMSTOPK = 8, + VT_KEEPTOPK = 10, + VT_CONFIDENCETHRESHOLD = 12, + VT_SHARELOCATION = 14, + VT_BACKGROUNDLABLE = 16, + VT_VARIANCEENCODEDTARGET = 18, + VT_CODETYPE = 20, + VT_OBJECTNESSSCORE = 22 + }; + int32_t classCount() const { + return GetField(VT_CLASSCOUNT, 0); + } + float nmsThresholdold() const { + return GetField(VT_NMSTHRESHOLDOLD, 0.0f); + } + int32_t nmsTopK() const { + return GetField(VT_NMSTOPK, 0); + } + int32_t keepTopK() const { + return GetField(VT_KEEPTOPK, 0); + } + float confidenceThreshold() const { + return GetField(VT_CONFIDENCETHRESHOLD, 0.0f); + } + int32_t shareLocation() const { + return GetField(VT_SHARELOCATION, 0); + } + int32_t backgroundLable() const { + return GetField(VT_BACKGROUNDLABLE, 0); + } + int32_t varianceEncodedTarget() const { + return GetField(VT_VARIANCEENCODEDTARGET, 0); + } + int32_t codeType() const { + return GetField(VT_CODETYPE, 0); + } + float objectnessScore() const { + return GetField(VT_OBJECTNESSSCORE, 0.01f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_CLASSCOUNT) && + VerifyField(verifier, VT_NMSTHRESHOLDOLD) && + VerifyField(verifier, VT_NMSTOPK) && + VerifyField(verifier, VT_KEEPTOPK) && + VerifyField(verifier, VT_CONFIDENCETHRESHOLD) && + VerifyField(verifier, VT_SHARELOCATION) && + VerifyField(verifier, VT_BACKGROUNDLABLE) && + VerifyField(verifier, VT_VARIANCEENCODEDTARGET) && + VerifyField(verifier, VT_CODETYPE) && + VerifyField(verifier, VT_OBJECTNESSSCORE) && + verifier.EndTable(); + } + DetectionOutputT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(DetectionOutputT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DetectionOutputT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct DetectionOutputBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_classCount(int32_t classCount) { + fbb_.AddElement(DetectionOutput::VT_CLASSCOUNT, classCount, 0); + } + void add_nmsThresholdold(float nmsThresholdold) { + fbb_.AddElement(DetectionOutput::VT_NMSTHRESHOLDOLD, nmsThresholdold, 0.0f); + } + void add_nmsTopK(int32_t nmsTopK) { + fbb_.AddElement(DetectionOutput::VT_NMSTOPK, nmsTopK, 0); + } + void add_keepTopK(int32_t keepTopK) { + fbb_.AddElement(DetectionOutput::VT_KEEPTOPK, keepTopK, 0); + } + void add_confidenceThreshold(float confidenceThreshold) { + fbb_.AddElement(DetectionOutput::VT_CONFIDENCETHRESHOLD, confidenceThreshold, 0.0f); + } + void add_shareLocation(int32_t shareLocation) { + fbb_.AddElement(DetectionOutput::VT_SHARELOCATION, shareLocation, 0); + } + void add_backgroundLable(int32_t backgroundLable) { + fbb_.AddElement(DetectionOutput::VT_BACKGROUNDLABLE, backgroundLable, 0); + } + void add_varianceEncodedTarget(int32_t varianceEncodedTarget) { + fbb_.AddElement(DetectionOutput::VT_VARIANCEENCODEDTARGET, varianceEncodedTarget, 0); + } + void add_codeType(int32_t codeType) { + fbb_.AddElement(DetectionOutput::VT_CODETYPE, codeType, 0); + } + void add_objectnessScore(float objectnessScore) { + fbb_.AddElement(DetectionOutput::VT_OBJECTNESSSCORE, objectnessScore, 0.01f); + } + explicit DetectionOutputBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + DetectionOutputBuilder &operator=(const DetectionOutputBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateDetectionOutput( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t classCount = 0, + float nmsThresholdold = 0.0f, + int32_t nmsTopK = 0, + int32_t keepTopK = 0, + float confidenceThreshold = 0.0f, + int32_t shareLocation = 0, + int32_t backgroundLable = 0, + int32_t varianceEncodedTarget = 0, + int32_t codeType = 0, + float objectnessScore = 0.01f) { + DetectionOutputBuilder builder_(_fbb); + builder_.add_objectnessScore(objectnessScore); + builder_.add_codeType(codeType); + builder_.add_varianceEncodedTarget(varianceEncodedTarget); + builder_.add_backgroundLable(backgroundLable); + builder_.add_shareLocation(shareLocation); + builder_.add_confidenceThreshold(confidenceThreshold); + builder_.add_keepTopK(keepTopK); + builder_.add_nmsTopK(nmsTopK); + builder_.add_nmsThresholdold(nmsThresholdold); + builder_.add_classCount(classCount); + return builder_.Finish(); +} + +flatbuffers::Offset CreateDetectionOutput(flatbuffers::FlatBufferBuilder &_fbb, const DetectionOutputT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct RoiPoolingT : public flatbuffers::NativeTable { + typedef RoiPooling TableType; + int32_t pooledWidth; + int32_t pooledHeight; + float spatialScale; + RoiPoolingT() + : pooledWidth(0), + pooledHeight(0), + spatialScale(0.0f) { + } +}; + +struct RoiPooling FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef RoiPoolingT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return RoiPoolingTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_POOLEDWIDTH = 4, + VT_POOLEDHEIGHT = 6, + VT_SPATIALSCALE = 8 + }; + int32_t pooledWidth() const { + return GetField(VT_POOLEDWIDTH, 0); + } + int32_t pooledHeight() const { + return GetField(VT_POOLEDHEIGHT, 0); + } + float spatialScale() const { + return GetField(VT_SPATIALSCALE, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_POOLEDWIDTH) && + VerifyField(verifier, VT_POOLEDHEIGHT) && + VerifyField(verifier, VT_SPATIALSCALE) && + verifier.EndTable(); + } + RoiPoolingT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(RoiPoolingT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const RoiPoolingT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct RoiPoolingBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_pooledWidth(int32_t pooledWidth) { + fbb_.AddElement(RoiPooling::VT_POOLEDWIDTH, pooledWidth, 0); + } + void add_pooledHeight(int32_t pooledHeight) { + fbb_.AddElement(RoiPooling::VT_POOLEDHEIGHT, pooledHeight, 0); + } + void add_spatialScale(float spatialScale) { + fbb_.AddElement(RoiPooling::VT_SPATIALSCALE, spatialScale, 0.0f); + } + explicit RoiPoolingBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + RoiPoolingBuilder &operator=(const RoiPoolingBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateRoiPooling( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t pooledWidth = 0, + int32_t pooledHeight = 0, + float spatialScale = 0.0f) { + RoiPoolingBuilder builder_(_fbb); + builder_.add_spatialScale(spatialScale); + builder_.add_pooledHeight(pooledHeight); + builder_.add_pooledWidth(pooledWidth); + return builder_.Finish(); +} + +flatbuffers::Offset CreateRoiPooling(flatbuffers::FlatBufferBuilder &_fbb, const RoiPoolingT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ProposalT : public flatbuffers::NativeTable { + typedef Proposal TableType; + int32_t featStride; + int32_t baseSize; + int32_t preNmsTopN; + int32_t afterNmsTopN; + float nmsThreshold; + int32_t minSize; + std::unique_ptr ratios; + std::unique_ptr scales; + std::unique_ptr anchors; + ProposalT() + : featStride(0), + baseSize(0), + preNmsTopN(0), + afterNmsTopN(0), + nmsThreshold(0.0f), + minSize(0) { + } +}; + +struct Proposal FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ProposalT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return ProposalTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FEATSTRIDE = 4, + VT_BASESIZE = 6, + VT_PRENMSTOPN = 8, + VT_AFTERNMSTOPN = 10, + VT_NMSTHRESHOLD = 12, + VT_MINSIZE = 14, + VT_RATIOS = 16, + VT_SCALES = 18, + VT_ANCHORS = 20 + }; + int32_t featStride() const { + return GetField(VT_FEATSTRIDE, 0); + } + int32_t baseSize() const { + return GetField(VT_BASESIZE, 0); + } + int32_t preNmsTopN() const { + return GetField(VT_PRENMSTOPN, 0); + } + int32_t afterNmsTopN() const { + return GetField(VT_AFTERNMSTOPN, 0); + } + float nmsThreshold() const { + return GetField(VT_NMSTHRESHOLD, 0.0f); + } + int32_t minSize() const { + return GetField(VT_MINSIZE, 0); + } + const Blob *ratios() const { + return GetPointer(VT_RATIOS); + } + const Blob *scales() const { + return GetPointer(VT_SCALES); + } + const Blob *anchors() const { + return GetPointer(VT_ANCHORS); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FEATSTRIDE) && + VerifyField(verifier, VT_BASESIZE) && + VerifyField(verifier, VT_PRENMSTOPN) && + VerifyField(verifier, VT_AFTERNMSTOPN) && + VerifyField(verifier, VT_NMSTHRESHOLD) && + VerifyField(verifier, VT_MINSIZE) && + VerifyOffset(verifier, VT_RATIOS) && + verifier.VerifyTable(ratios()) && + VerifyOffset(verifier, VT_SCALES) && + verifier.VerifyTable(scales()) && + VerifyOffset(verifier, VT_ANCHORS) && + verifier.VerifyTable(anchors()) && + verifier.EndTable(); + } + ProposalT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ProposalT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ProposalT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ProposalBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_featStride(int32_t featStride) { + fbb_.AddElement(Proposal::VT_FEATSTRIDE, featStride, 0); + } + void add_baseSize(int32_t baseSize) { + fbb_.AddElement(Proposal::VT_BASESIZE, baseSize, 0); + } + void add_preNmsTopN(int32_t preNmsTopN) { + fbb_.AddElement(Proposal::VT_PRENMSTOPN, preNmsTopN, 0); + } + void add_afterNmsTopN(int32_t afterNmsTopN) { + fbb_.AddElement(Proposal::VT_AFTERNMSTOPN, afterNmsTopN, 0); + } + void add_nmsThreshold(float nmsThreshold) { + fbb_.AddElement(Proposal::VT_NMSTHRESHOLD, nmsThreshold, 0.0f); + } + void add_minSize(int32_t minSize) { + fbb_.AddElement(Proposal::VT_MINSIZE, minSize, 0); + } + void add_ratios(flatbuffers::Offset ratios) { + fbb_.AddOffset(Proposal::VT_RATIOS, ratios); + } + void add_scales(flatbuffers::Offset scales) { + fbb_.AddOffset(Proposal::VT_SCALES, scales); + } + void add_anchors(flatbuffers::Offset anchors) { + fbb_.AddOffset(Proposal::VT_ANCHORS, anchors); + } + explicit ProposalBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ProposalBuilder &operator=(const ProposalBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateProposal( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t featStride = 0, + int32_t baseSize = 0, + int32_t preNmsTopN = 0, + int32_t afterNmsTopN = 0, + float nmsThreshold = 0.0f, + int32_t minSize = 0, + flatbuffers::Offset ratios = 0, + flatbuffers::Offset scales = 0, + flatbuffers::Offset anchors = 0) { + ProposalBuilder builder_(_fbb); + builder_.add_anchors(anchors); + builder_.add_scales(scales); + builder_.add_ratios(ratios); + builder_.add_minSize(minSize); + builder_.add_nmsThreshold(nmsThreshold); + builder_.add_afterNmsTopN(afterNmsTopN); + builder_.add_preNmsTopN(preNmsTopN); + builder_.add_baseSize(baseSize); + builder_.add_featStride(featStride); + return builder_.Finish(); +} + +flatbuffers::Offset CreateProposal(flatbuffers::FlatBufferBuilder &_fbb, const ProposalT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct InterpT : public flatbuffers::NativeTable { + typedef Interp TableType; + float widthScale; + float heightScale; + int32_t outputWidth; + int32_t outputHeight; + int32_t resizeType; + bool alignCorners; + InterpT() + : widthScale(0.0f), + heightScale(0.0f), + outputWidth(0), + outputHeight(0), + resizeType(0), + alignCorners(false) { + } +}; + +struct Interp FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef InterpT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return InterpTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_WIDTHSCALE = 4, + VT_HEIGHTSCALE = 6, + VT_OUTPUTWIDTH = 8, + VT_OUTPUTHEIGHT = 10, + VT_RESIZETYPE = 12, + VT_ALIGNCORNERS = 14 + }; + float widthScale() const { + return GetField(VT_WIDTHSCALE, 0.0f); + } + float heightScale() const { + return GetField(VT_HEIGHTSCALE, 0.0f); + } + int32_t outputWidth() const { + return GetField(VT_OUTPUTWIDTH, 0); + } + int32_t outputHeight() const { + return GetField(VT_OUTPUTHEIGHT, 0); + } + int32_t resizeType() const { + return GetField(VT_RESIZETYPE, 0); + } + bool alignCorners() const { + return GetField(VT_ALIGNCORNERS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_WIDTHSCALE) && + VerifyField(verifier, VT_HEIGHTSCALE) && + VerifyField(verifier, VT_OUTPUTWIDTH) && + VerifyField(verifier, VT_OUTPUTHEIGHT) && + VerifyField(verifier, VT_RESIZETYPE) && + VerifyField(verifier, VT_ALIGNCORNERS) && + verifier.EndTable(); + } + InterpT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(InterpT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const InterpT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct InterpBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_widthScale(float widthScale) { + fbb_.AddElement(Interp::VT_WIDTHSCALE, widthScale, 0.0f); + } + void add_heightScale(float heightScale) { + fbb_.AddElement(Interp::VT_HEIGHTSCALE, heightScale, 0.0f); + } + void add_outputWidth(int32_t outputWidth) { + fbb_.AddElement(Interp::VT_OUTPUTWIDTH, outputWidth, 0); + } + void add_outputHeight(int32_t outputHeight) { + fbb_.AddElement(Interp::VT_OUTPUTHEIGHT, outputHeight, 0); + } + void add_resizeType(int32_t resizeType) { + fbb_.AddElement(Interp::VT_RESIZETYPE, resizeType, 0); + } + void add_alignCorners(bool alignCorners) { + fbb_.AddElement(Interp::VT_ALIGNCORNERS, static_cast(alignCorners), 0); + } + explicit InterpBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + InterpBuilder &operator=(const InterpBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateInterp( + flatbuffers::FlatBufferBuilder &_fbb, + float widthScale = 0.0f, + float heightScale = 0.0f, + int32_t outputWidth = 0, + int32_t outputHeight = 0, + int32_t resizeType = 0, + bool alignCorners = false) { + InterpBuilder builder_(_fbb); + builder_.add_resizeType(resizeType); + builder_.add_outputHeight(outputHeight); + builder_.add_outputWidth(outputWidth); + builder_.add_heightScale(heightScale); + builder_.add_widthScale(widthScale); + builder_.add_alignCorners(alignCorners); + return builder_.Finish(); +} + +flatbuffers::Offset CreateInterp(flatbuffers::FlatBufferBuilder &_fbb, const InterpT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ResizeT : public flatbuffers::NativeTable { + typedef Resize TableType; + float xScale; + float yScale; + ResizeT() + : xScale(0.0f), + yScale(0.0f) { + } +}; + +struct Resize FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ResizeT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return ResizeTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_XSCALE = 4, + VT_YSCALE = 6 + }; + float xScale() const { + return GetField(VT_XSCALE, 0.0f); + } + float yScale() const { + return GetField(VT_YSCALE, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_XSCALE) && + VerifyField(verifier, VT_YSCALE) && + verifier.EndTable(); + } + ResizeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ResizeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ResizeBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_xScale(float xScale) { + fbb_.AddElement(Resize::VT_XSCALE, xScale, 0.0f); + } + void add_yScale(float yScale) { + fbb_.AddElement(Resize::VT_YSCALE, yScale, 0.0f); + } + explicit ResizeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ResizeBuilder &operator=(const ResizeBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateResize( + flatbuffers::FlatBufferBuilder &_fbb, + float xScale = 0.0f, + float yScale = 0.0f) { + ResizeBuilder builder_(_fbb); + builder_.add_yScale(yScale); + builder_.add_xScale(xScale); + return builder_.Finish(); +} + +flatbuffers::Offset CreateResize(flatbuffers::FlatBufferBuilder &_fbb, const ResizeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct PriorBoxT : public flatbuffers::NativeTable { + typedef PriorBox TableType; + std::vector minSizes; + std::vector maxSizes; + std::vector aspectRatios; + std::vector variances; + bool flip; + bool clip; + int32_t imageWidth; + int32_t imageHeight; + int32_t stepWidth; + int32_t stepHeight; + float offset; + PriorBoxT() + : flip(false), + clip(false), + imageWidth(0), + imageHeight(0), + stepWidth(0), + stepHeight(0), + offset(0.0f) { + } +}; + +struct PriorBox FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef PriorBoxT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return PriorBoxTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_MINSIZES = 4, + VT_MAXSIZES = 6, + VT_ASPECTRATIOS = 8, + VT_VARIANCES = 10, + VT_FLIP = 12, + VT_CLIP = 14, + VT_IMAGEWIDTH = 16, + VT_IMAGEHEIGHT = 18, + VT_STEPWIDTH = 20, + VT_STEPHEIGHT = 22, + VT_OFFSET = 24 + }; + const flatbuffers::Vector *minSizes() const { + return GetPointer *>(VT_MINSIZES); + } + const flatbuffers::Vector *maxSizes() const { + return GetPointer *>(VT_MAXSIZES); + } + const flatbuffers::Vector *aspectRatios() const { + return GetPointer *>(VT_ASPECTRATIOS); + } + const flatbuffers::Vector *variances() const { + return GetPointer *>(VT_VARIANCES); + } + bool flip() const { + return GetField(VT_FLIP, 0) != 0; + } + bool clip() const { + return GetField(VT_CLIP, 0) != 0; + } + int32_t imageWidth() const { + return GetField(VT_IMAGEWIDTH, 0); + } + int32_t imageHeight() const { + return GetField(VT_IMAGEHEIGHT, 0); + } + int32_t stepWidth() const { + return GetField(VT_STEPWIDTH, 0); + } + int32_t stepHeight() const { + return GetField(VT_STEPHEIGHT, 0); + } + float offset() const { + return GetField(VT_OFFSET, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_MINSIZES) && + verifier.VerifyVector(minSizes()) && + VerifyOffset(verifier, VT_MAXSIZES) && + verifier.VerifyVector(maxSizes()) && + VerifyOffset(verifier, VT_ASPECTRATIOS) && + verifier.VerifyVector(aspectRatios()) && + VerifyOffset(verifier, VT_VARIANCES) && + verifier.VerifyVector(variances()) && + VerifyField(verifier, VT_FLIP) && + VerifyField(verifier, VT_CLIP) && + VerifyField(verifier, VT_IMAGEWIDTH) && + VerifyField(verifier, VT_IMAGEHEIGHT) && + VerifyField(verifier, VT_STEPWIDTH) && + VerifyField(verifier, VT_STEPHEIGHT) && + VerifyField(verifier, VT_OFFSET) && + verifier.EndTable(); + } + PriorBoxT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(PriorBoxT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PriorBoxT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct PriorBoxBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_minSizes(flatbuffers::Offset> minSizes) { + fbb_.AddOffset(PriorBox::VT_MINSIZES, minSizes); + } + void add_maxSizes(flatbuffers::Offset> maxSizes) { + fbb_.AddOffset(PriorBox::VT_MAXSIZES, maxSizes); + } + void add_aspectRatios(flatbuffers::Offset> aspectRatios) { + fbb_.AddOffset(PriorBox::VT_ASPECTRATIOS, aspectRatios); + } + void add_variances(flatbuffers::Offset> variances) { + fbb_.AddOffset(PriorBox::VT_VARIANCES, variances); + } + void add_flip(bool flip) { + fbb_.AddElement(PriorBox::VT_FLIP, static_cast(flip), 0); + } + void add_clip(bool clip) { + fbb_.AddElement(PriorBox::VT_CLIP, static_cast(clip), 0); + } + void add_imageWidth(int32_t imageWidth) { + fbb_.AddElement(PriorBox::VT_IMAGEWIDTH, imageWidth, 0); + } + void add_imageHeight(int32_t imageHeight) { + fbb_.AddElement(PriorBox::VT_IMAGEHEIGHT, imageHeight, 0); + } + void add_stepWidth(int32_t stepWidth) { + fbb_.AddElement(PriorBox::VT_STEPWIDTH, stepWidth, 0); + } + void add_stepHeight(int32_t stepHeight) { + fbb_.AddElement(PriorBox::VT_STEPHEIGHT, stepHeight, 0); + } + void add_offset(float offset) { + fbb_.AddElement(PriorBox::VT_OFFSET, offset, 0.0f); + } + explicit PriorBoxBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + PriorBoxBuilder &operator=(const PriorBoxBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreatePriorBox( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> minSizes = 0, + flatbuffers::Offset> maxSizes = 0, + flatbuffers::Offset> aspectRatios = 0, + flatbuffers::Offset> variances = 0, + bool flip = false, + bool clip = false, + int32_t imageWidth = 0, + int32_t imageHeight = 0, + int32_t stepWidth = 0, + int32_t stepHeight = 0, + float offset = 0.0f) { + PriorBoxBuilder builder_(_fbb); + builder_.add_offset(offset); + builder_.add_stepHeight(stepHeight); + builder_.add_stepWidth(stepWidth); + builder_.add_imageHeight(imageHeight); + builder_.add_imageWidth(imageWidth); + builder_.add_variances(variances); + builder_.add_aspectRatios(aspectRatios); + builder_.add_maxSizes(maxSizes); + builder_.add_minSizes(minSizes); + builder_.add_clip(clip); + builder_.add_flip(flip); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreatePriorBoxDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *minSizes = nullptr, + const std::vector *maxSizes = nullptr, + const std::vector *aspectRatios = nullptr, + const std::vector *variances = nullptr, + bool flip = false, + bool clip = false, + int32_t imageWidth = 0, + int32_t imageHeight = 0, + int32_t stepWidth = 0, + int32_t stepHeight = 0, + float offset = 0.0f) { + auto minSizes__ = minSizes ? _fbb.CreateVector(*minSizes) : 0; + auto maxSizes__ = maxSizes ? _fbb.CreateVector(*maxSizes) : 0; + auto aspectRatios__ = aspectRatios ? _fbb.CreateVector(*aspectRatios) : 0; + auto variances__ = variances ? _fbb.CreateVector(*variances) : 0; + return MNN::CreatePriorBox( + _fbb, + minSizes__, + maxSizes__, + aspectRatios__, + variances__, + flip, + clip, + imageWidth, + imageHeight, + stepWidth, + stepHeight, + offset); +} + +flatbuffers::Offset CreatePriorBox(flatbuffers::FlatBufferBuilder &_fbb, const PriorBoxT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct NormalizeT : public flatbuffers::NativeTable { + typedef Normalize TableType; + int32_t acrossSpatial; + int32_t channelShared; + float eps; + std::vector scale; + NormalizeT() + : acrossSpatial(0), + channelShared(0), + eps(0.0f) { + } +}; + +struct Normalize FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef NormalizeT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return NormalizeTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ACROSSSPATIAL = 4, + VT_CHANNELSHARED = 6, + VT_EPS = 8, + VT_SCALE = 10 + }; + int32_t acrossSpatial() const { + return GetField(VT_ACROSSSPATIAL, 0); + } + int32_t channelShared() const { + return GetField(VT_CHANNELSHARED, 0); + } + float eps() const { + return GetField(VT_EPS, 0.0f); + } + const flatbuffers::Vector *scale() const { + return GetPointer *>(VT_SCALE); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_ACROSSSPATIAL) && + VerifyField(verifier, VT_CHANNELSHARED) && + VerifyField(verifier, VT_EPS) && + VerifyOffset(verifier, VT_SCALE) && + verifier.VerifyVector(scale()) && + verifier.EndTable(); + } + NormalizeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(NormalizeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const NormalizeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct NormalizeBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_acrossSpatial(int32_t acrossSpatial) { + fbb_.AddElement(Normalize::VT_ACROSSSPATIAL, acrossSpatial, 0); + } + void add_channelShared(int32_t channelShared) { + fbb_.AddElement(Normalize::VT_CHANNELSHARED, channelShared, 0); + } + void add_eps(float eps) { + fbb_.AddElement(Normalize::VT_EPS, eps, 0.0f); + } + void add_scale(flatbuffers::Offset> scale) { + fbb_.AddOffset(Normalize::VT_SCALE, scale); + } + explicit NormalizeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + NormalizeBuilder &operator=(const NormalizeBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateNormalize( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t acrossSpatial = 0, + int32_t channelShared = 0, + float eps = 0.0f, + flatbuffers::Offset> scale = 0) { + NormalizeBuilder builder_(_fbb); + builder_.add_scale(scale); + builder_.add_eps(eps); + builder_.add_channelShared(channelShared); + builder_.add_acrossSpatial(acrossSpatial); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateNormalizeDirect( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t acrossSpatial = 0, + int32_t channelShared = 0, + float eps = 0.0f, + const std::vector *scale = nullptr) { + auto scale__ = scale ? _fbb.CreateVector(*scale) : 0; + return MNN::CreateNormalize( + _fbb, + acrossSpatial, + channelShared, + eps, + scale__); +} + +flatbuffers::Offset CreateNormalize(flatbuffers::FlatBufferBuilder &_fbb, const NormalizeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct EltwiseInt8T : public flatbuffers::NativeTable { + typedef EltwiseInt8 TableType; + EltwiseType type; + std::unique_ptr inputQuan0; + std::unique_ptr inputQuan1; + std::unique_ptr outputQuan; + EltwiseInt8T() + : type(EltwiseType_PROD) { + } +}; + +struct EltwiseInt8 FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef EltwiseInt8T NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return EltwiseInt8TypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TYPE = 4, + VT_INPUTQUAN0 = 6, + VT_INPUTQUAN1 = 8, + VT_OUTPUTQUAN = 10 + }; + EltwiseType type() const { + return static_cast(GetField(VT_TYPE, 0)); + } + const QuantizedFloatParam *inputQuan0() const { + return GetPointer(VT_INPUTQUAN0); + } + const QuantizedFloatParam *inputQuan1() const { + return GetPointer(VT_INPUTQUAN1); + } + const QuantizedFloatParam *outputQuan() const { + return GetPointer(VT_OUTPUTQUAN); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_TYPE) && + VerifyOffset(verifier, VT_INPUTQUAN0) && + verifier.VerifyTable(inputQuan0()) && + VerifyOffset(verifier, VT_INPUTQUAN1) && + verifier.VerifyTable(inputQuan1()) && + VerifyOffset(verifier, VT_OUTPUTQUAN) && + verifier.VerifyTable(outputQuan()) && + verifier.EndTable(); + } + EltwiseInt8T *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(EltwiseInt8T *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const EltwiseInt8T* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct EltwiseInt8Builder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_type(EltwiseType type) { + fbb_.AddElement(EltwiseInt8::VT_TYPE, static_cast(type), 0); + } + void add_inputQuan0(flatbuffers::Offset inputQuan0) { + fbb_.AddOffset(EltwiseInt8::VT_INPUTQUAN0, inputQuan0); + } + void add_inputQuan1(flatbuffers::Offset inputQuan1) { + fbb_.AddOffset(EltwiseInt8::VT_INPUTQUAN1, inputQuan1); + } + void add_outputQuan(flatbuffers::Offset outputQuan) { + fbb_.AddOffset(EltwiseInt8::VT_OUTPUTQUAN, outputQuan); + } + explicit EltwiseInt8Builder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + EltwiseInt8Builder &operator=(const EltwiseInt8Builder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateEltwiseInt8( + flatbuffers::FlatBufferBuilder &_fbb, + EltwiseType type = EltwiseType_PROD, + flatbuffers::Offset inputQuan0 = 0, + flatbuffers::Offset inputQuan1 = 0, + flatbuffers::Offset outputQuan = 0) { + EltwiseInt8Builder builder_(_fbb); + builder_.add_outputQuan(outputQuan); + builder_.add_inputQuan1(inputQuan1); + builder_.add_inputQuan0(inputQuan0); + builder_.add_type(type); + return builder_.Finish(); +} + +flatbuffers::Offset CreateEltwiseInt8(flatbuffers::FlatBufferBuilder &_fbb, const EltwiseInt8T *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +inline Convolution2DCommonT *Convolution2DCommon::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new Convolution2DCommonT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Convolution2DCommon::UnPackTo(Convolution2DCommonT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = padX(); _o->padX = _e; }; + { auto _e = padY(); _o->padY = _e; }; + { auto _e = kernelX(); _o->kernelX = _e; }; + { auto _e = kernelY(); _o->kernelY = _e; }; + { auto _e = strideX(); _o->strideX = _e; }; + { auto _e = strideY(); _o->strideY = _e; }; + { auto _e = dilateX(); _o->dilateX = _e; }; + { auto _e = dilateY(); _o->dilateY = _e; }; + { auto _e = padMode(); _o->padMode = _e; }; + { auto _e = group(); _o->group = _e; }; + { auto _e = outputCount(); _o->outputCount = _e; }; + { auto _e = inputCount(); _o->inputCount = _e; }; + { auto _e = relu(); _o->relu = _e; }; + { auto _e = relu6(); _o->relu6 = _e; }; +} + +inline flatbuffers::Offset Convolution2DCommon::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Convolution2DCommonT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateConvolution2DCommon(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateConvolution2DCommon(flatbuffers::FlatBufferBuilder &_fbb, const Convolution2DCommonT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Convolution2DCommonT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _padX = _o->padX; + auto _padY = _o->padY; + auto _kernelX = _o->kernelX; + auto _kernelY = _o->kernelY; + auto _strideX = _o->strideX; + auto _strideY = _o->strideY; + auto _dilateX = _o->dilateX; + auto _dilateY = _o->dilateY; + auto _padMode = _o->padMode; + auto _group = _o->group; + auto _outputCount = _o->outputCount; + auto _inputCount = _o->inputCount; + auto _relu = _o->relu; + auto _relu6 = _o->relu6; + return MNN::CreateConvolution2DCommon( + _fbb, + _padX, + _padY, + _kernelX, + _kernelY, + _strideX, + _strideY, + _dilateX, + _dilateY, + _padMode, + _group, + _outputCount, + _inputCount, + _relu, + _relu6); +} + +inline Convolution3DCommonT *Convolution3DCommon::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new Convolution3DCommonT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Convolution3DCommon::UnPackTo(Convolution3DCommonT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = dilates(); if (_e) { _o->dilates.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->dilates[_i] = _e->Get(_i); } } }; + { auto _e = strides(); if (_e) { _o->strides.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->strides[_i] = _e->Get(_i); } } }; + { auto _e = kernels(); if (_e) { _o->kernels.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->kernels[_i] = _e->Get(_i); } } }; + { auto _e = pads(); if (_e) { _o->pads.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->pads[_i] = _e->Get(_i); } } }; + { auto _e = padMode(); _o->padMode = _e; }; + { auto _e = inputCount(); _o->inputCount = _e; }; + { auto _e = outputCount(); _o->outputCount = _e; }; + { auto _e = relu(); _o->relu = _e; }; + { auto _e = relu6(); _o->relu6 = _e; }; +} + +inline flatbuffers::Offset Convolution3DCommon::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Convolution3DCommonT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateConvolution3DCommon(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateConvolution3DCommon(flatbuffers::FlatBufferBuilder &_fbb, const Convolution3DCommonT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Convolution3DCommonT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _dilates = _o->dilates.size() ? _fbb.CreateVector(_o->dilates) : 0; + auto _strides = _o->strides.size() ? _fbb.CreateVector(_o->strides) : 0; + auto _kernels = _o->kernels.size() ? _fbb.CreateVector(_o->kernels) : 0; + auto _pads = _o->pads.size() ? _fbb.CreateVector(_o->pads) : 0; + auto _padMode = _o->padMode; + auto _inputCount = _o->inputCount; + auto _outputCount = _o->outputCount; + auto _relu = _o->relu; + auto _relu6 = _o->relu6; + return MNN::CreateConvolution3DCommon( + _fbb, + _dilates, + _strides, + _kernels, + _pads, + _padMode, + _inputCount, + _outputCount, + _relu, + _relu6); +} + +inline IDSTQuanT *IDSTQuan::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new IDSTQuanT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void IDSTQuan::UnPackTo(IDSTQuanT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = buffer(); if (_e) { _o->buffer.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->buffer[_i] = _e->Get(_i); } } }; + { auto _e = alpha(); if (_e) { _o->alpha.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->alpha[_i] = _e->Get(_i); } } }; + { auto _e = type(); _o->type = _e; }; + { auto _e = useInt32(); _o->useInt32 = _e; }; + { auto _e = quantScale(); _o->quantScale = _e; }; + { auto _e = scaleIn(); _o->scaleIn = _e; }; + { auto _e = scaleOut(); _o->scaleOut = _e; }; + { auto _e = aMax(); _o->aMax = _e; }; + { auto _e = aMin(); _o->aMin = _e; }; + { auto _e = readType(); _o->readType = _e; }; + { auto _e = has_scaleInt(); _o->has_scaleInt = _e; }; +} + +inline flatbuffers::Offset IDSTQuan::Pack(flatbuffers::FlatBufferBuilder &_fbb, const IDSTQuanT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateIDSTQuan(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateIDSTQuan(flatbuffers::FlatBufferBuilder &_fbb, const IDSTQuanT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const IDSTQuanT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _buffer = _o->buffer.size() ? _fbb.CreateVector(_o->buffer) : 0; + auto _alpha = _o->alpha.size() ? _fbb.CreateVector(_o->alpha) : 0; + auto _type = _o->type; + auto _useInt32 = _o->useInt32; + auto _quantScale = _o->quantScale; + auto _scaleIn = _o->scaleIn; + auto _scaleOut = _o->scaleOut; + auto _aMax = _o->aMax; + auto _aMin = _o->aMin; + auto _readType = _o->readType; + auto _has_scaleInt = _o->has_scaleInt; + return MNN::CreateIDSTQuan( + _fbb, + _buffer, + _alpha, + _type, + _useInt32, + _quantScale, + _scaleIn, + _scaleOut, + _aMax, + _aMin, + _readType, + _has_scaleInt); +} + +inline QuantizedFloatParamT *QuantizedFloatParam::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new QuantizedFloatParamT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void QuantizedFloatParam::UnPackTo(QuantizedFloatParamT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = weight(); if (_e) { _o->weight.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->weight[_i] = _e->Get(_i); } } }; + { auto _e = bias(); if (_e) { _o->bias.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->bias[_i] = _e->Get(_i); } } }; + { auto _e = scale(); if (_e) { _o->scale.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->scale[_i] = _e->Get(_i); } } }; + { auto _e = tensorScale(); if (_e) { _o->tensorScale.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->tensorScale[_i] = _e->Get(_i); } } }; +} + +inline flatbuffers::Offset QuantizedFloatParam::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedFloatParamT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateQuantizedFloatParam(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateQuantizedFloatParam(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedFloatParamT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedFloatParamT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _weight = _o->weight.size() ? _fbb.CreateVector(_o->weight) : 0; + auto _bias = _o->bias.size() ? _fbb.CreateVector(_o->bias) : 0; + auto _scale = _o->scale.size() ? _fbb.CreateVector(_o->scale) : 0; + auto _tensorScale = _o->tensorScale.size() ? _fbb.CreateVector(_o->tensorScale) : 0; + return MNN::CreateQuantizedFloatParam( + _fbb, + _weight, + _bias, + _scale, + _tensorScale); +} + +inline Convolution2DT *Convolution2D::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new Convolution2DT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Convolution2D::UnPackTo(Convolution2DT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = common(); if (_e) _o->common = std::unique_ptr(_e->UnPack(_resolver)); }; + { auto _e = weight(); if (_e) { _o->weight.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->weight[_i] = _e->Get(_i); } } }; + { auto _e = bias(); if (_e) { _o->bias.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->bias[_i] = _e->Get(_i); } } }; + { auto _e = quanParameter(); if (_e) _o->quanParameter = std::unique_ptr(_e->UnPack(_resolver)); }; + { auto _e = symmetricQuan(); if (_e) _o->symmetricQuan = std::unique_ptr(_e->UnPack(_resolver)); }; +} + +inline flatbuffers::Offset Convolution2D::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Convolution2DT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateConvolution2D(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateConvolution2D(flatbuffers::FlatBufferBuilder &_fbb, const Convolution2DT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Convolution2DT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _common = _o->common ? CreateConvolution2DCommon(_fbb, _o->common.get(), _rehasher) : 0; + auto _weight = _o->weight.size() ? _fbb.CreateVector(_o->weight) : 0; + auto _bias = _o->bias.size() ? _fbb.CreateVector(_o->bias) : 0; + auto _quanParameter = _o->quanParameter ? CreateIDSTQuan(_fbb, _o->quanParameter.get(), _rehasher) : 0; + auto _symmetricQuan = _o->symmetricQuan ? CreateQuantizedFloatParam(_fbb, _o->symmetricQuan.get(), _rehasher) : 0; + return MNN::CreateConvolution2D( + _fbb, + _common, + _weight, + _bias, + _quanParameter, + _symmetricQuan); +} + +inline Convolution3DT *Convolution3D::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new Convolution3DT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Convolution3D::UnPackTo(Convolution3DT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = common(); if (_e) _o->common = std::unique_ptr(_e->UnPack(_resolver)); }; + { auto _e = weight(); if (_e) { _o->weight.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->weight[_i] = _e->Get(_i); } } }; + { auto _e = bias(); if (_e) { _o->bias.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->bias[_i] = _e->Get(_i); } } }; +} + +inline flatbuffers::Offset Convolution3D::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Convolution3DT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateConvolution3D(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateConvolution3D(flatbuffers::FlatBufferBuilder &_fbb, const Convolution3DT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Convolution3DT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _common = _o->common ? CreateConvolution3DCommon(_fbb, _o->common.get(), _rehasher) : 0; + auto _weight = _o->weight.size() ? _fbb.CreateVector(_o->weight) : 0; + auto _bias = _o->bias.size() ? _fbb.CreateVector(_o->bias) : 0; + return MNN::CreateConvolution3D( + _fbb, + _common, + _weight, + _bias); +} + +inline InnerProductT *InnerProduct::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new InnerProductT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void InnerProduct::UnPackTo(InnerProductT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = outputCount(); _o->outputCount = _e; }; + { auto _e = biasTerm(); _o->biasTerm = _e; }; + { auto _e = weightSize(); _o->weightSize = _e; }; + { auto _e = weight(); if (_e) { _o->weight.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->weight[_i] = _e->Get(_i); } } }; + { auto _e = bias(); if (_e) { _o->bias.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->bias[_i] = _e->Get(_i); } } }; + { auto _e = axis(); _o->axis = _e; }; + { auto _e = transpose(); _o->transpose = _e; }; + { auto _e = quanParameter(); if (_e) _o->quanParameter = std::unique_ptr(_e->UnPack(_resolver)); }; +} + +inline flatbuffers::Offset InnerProduct::Pack(flatbuffers::FlatBufferBuilder &_fbb, const InnerProductT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateInnerProduct(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateInnerProduct(flatbuffers::FlatBufferBuilder &_fbb, const InnerProductT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const InnerProductT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _outputCount = _o->outputCount; + auto _biasTerm = _o->biasTerm; + auto _weightSize = _o->weightSize; + auto _weight = _o->weight.size() ? _fbb.CreateVector(_o->weight) : 0; + auto _bias = _o->bias.size() ? _fbb.CreateVector(_o->bias) : 0; + auto _axis = _o->axis; + auto _transpose = _o->transpose; + auto _quanParameter = _o->quanParameter ? CreateIDSTQuan(_fbb, _o->quanParameter.get(), _rehasher) : 0; + return MNN::CreateInnerProduct( + _fbb, + _outputCount, + _biasTerm, + _weightSize, + _weight, + _bias, + _axis, + _transpose, + _quanParameter); +} + +inline PoolT *Pool::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new PoolT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Pool::UnPackTo(PoolT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = padX(); _o->padX = _e; }; + { auto _e = padY(); _o->padY = _e; }; + { auto _e = isGlobal(); _o->isGlobal = _e; }; + { auto _e = kernelX(); _o->kernelX = _e; }; + { auto _e = kernelY(); _o->kernelY = _e; }; + { auto _e = strideX(); _o->strideX = _e; }; + { auto _e = strideY(); _o->strideY = _e; }; + { auto _e = type(); _o->type = _e; }; + { auto _e = padType(); _o->padType = _e; }; + { auto _e = dataType(); _o->dataType = _e; }; + { auto _e = ceilModel(); _o->ceilModel = _e; }; +} + +inline flatbuffers::Offset Pool::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PoolT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreatePool(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreatePool(flatbuffers::FlatBufferBuilder &_fbb, const PoolT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PoolT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _padX = _o->padX; + auto _padY = _o->padY; + auto _isGlobal = _o->isGlobal; + auto _kernelX = _o->kernelX; + auto _kernelY = _o->kernelY; + auto _strideX = _o->strideX; + auto _strideY = _o->strideY; + auto _type = _o->type; + auto _padType = _o->padType; + auto _dataType = _o->dataType; + auto _ceilModel = _o->ceilModel; + return MNN::CreatePool( + _fbb, + _padX, + _padY, + _isGlobal, + _kernelX, + _kernelY, + _strideX, + _strideY, + _type, + _padType, + _dataType, + _ceilModel); +} + +inline Pool3DT *Pool3D::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new Pool3DT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Pool3D::UnPackTo(Pool3DT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = strides(); if (_e) { _o->strides.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->strides[_i] = _e->Get(_i); } } }; + { auto _e = kernels(); if (_e) { _o->kernels.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->kernels[_i] = _e->Get(_i); } } }; + { auto _e = pads(); if (_e) { _o->pads.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->pads[_i] = _e->Get(_i); } } }; + { auto _e = type(); _o->type = _e; }; + { auto _e = padType(); _o->padType = _e; }; +} + +inline flatbuffers::Offset Pool3D::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Pool3DT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreatePool3D(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreatePool3D(flatbuffers::FlatBufferBuilder &_fbb, const Pool3DT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Pool3DT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _strides = _o->strides.size() ? _fbb.CreateVector(_o->strides) : 0; + auto _kernels = _o->kernels.size() ? _fbb.CreateVector(_o->kernels) : 0; + auto _pads = _o->pads.size() ? _fbb.CreateVector(_o->pads) : 0; + auto _type = _o->type; + auto _padType = _o->padType; + return MNN::CreatePool3D( + _fbb, + _strides, + _kernels, + _pads, + _type, + _padType); +} + +inline ReluT *Relu::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new ReluT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Relu::UnPackTo(ReluT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = slope(); _o->slope = _e; }; +} + +inline flatbuffers::Offset Relu::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReluT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateRelu(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateRelu(flatbuffers::FlatBufferBuilder &_fbb, const ReluT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReluT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _slope = _o->slope; + return MNN::CreateRelu( + _fbb, + _slope); +} + +inline Relu6T *Relu6::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new Relu6T(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Relu6::UnPackTo(Relu6T *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = slope(); _o->slope = _e; }; +} + +inline flatbuffers::Offset Relu6::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Relu6T* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateRelu6(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateRelu6(flatbuffers::FlatBufferBuilder &_fbb, const Relu6T *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Relu6T* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _slope = _o->slope; + return MNN::CreateRelu6( + _fbb, + _slope); +} + +inline PReluT *PRelu::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new PReluT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void PRelu::UnPackTo(PReluT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = slopeCount(); _o->slopeCount = _e; }; + { auto _e = slope(); if (_e) { _o->slope.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->slope[_i] = _e->Get(_i); } } }; +} + +inline flatbuffers::Offset PRelu::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PReluT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreatePRelu(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreatePRelu(flatbuffers::FlatBufferBuilder &_fbb, const PReluT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PReluT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _slopeCount = _o->slopeCount; + auto _slope = _o->slope.size() ? _fbb.CreateVector(_o->slope) : 0; + return MNN::CreatePRelu( + _fbb, + _slopeCount, + _slope); +} + +inline ELUT *ELU::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new ELUT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void ELU::UnPackTo(ELUT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = alpha(); _o->alpha = _e; }; +} + +inline flatbuffers::Offset ELU::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ELUT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateELU(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateELU(flatbuffers::FlatBufferBuilder &_fbb, const ELUT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ELUT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _alpha = _o->alpha; + return MNN::CreateELU( + _fbb, + _alpha); +} + +inline LRNT *LRN::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new LRNT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void LRN::UnPackTo(LRNT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = regionType(); _o->regionType = _e; }; + { auto _e = localSize(); _o->localSize = _e; }; + { auto _e = alpha(); _o->alpha = _e; }; + { auto _e = beta(); _o->beta = _e; }; +} + +inline flatbuffers::Offset LRN::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LRNT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateLRN(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateLRN(flatbuffers::FlatBufferBuilder &_fbb, const LRNT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LRNT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _regionType = _o->regionType; + auto _localSize = _o->localSize; + auto _alpha = _o->alpha; + auto _beta = _o->beta; + return MNN::CreateLRN( + _fbb, + _regionType, + _localSize, + _alpha, + _beta); +} + +inline ArgMaxT *ArgMax::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new ArgMaxT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void ArgMax::UnPackTo(ArgMaxT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = outMaxVal(); _o->outMaxVal = _e; }; + { auto _e = topK(); _o->topK = _e; }; + { auto _e = axis(); _o->axis = _e; }; + { auto _e = softmaxThreshold(); _o->softmaxThreshold = _e; }; +} + +inline flatbuffers::Offset ArgMax::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateArgMax(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateArgMax(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ArgMaxT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _outMaxVal = _o->outMaxVal; + auto _topK = _o->topK; + auto _axis = _o->axis; + auto _softmaxThreshold = _o->softmaxThreshold; + return MNN::CreateArgMax( + _fbb, + _outMaxVal, + _topK, + _axis, + _softmaxThreshold); +} + +inline AxisT *Axis::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new AxisT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Axis::UnPackTo(AxisT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = axis(); _o->axis = _e; }; +} + +inline flatbuffers::Offset Axis::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AxisT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateAxis(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateAxis(flatbuffers::FlatBufferBuilder &_fbb, const AxisT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const AxisT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _axis = _o->axis; + return MNN::CreateAxis( + _fbb, + _axis); +} + +inline InputT *Input::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new InputT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Input::UnPackTo(InputT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = dims(); if (_e) { _o->dims.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->dims[_i] = _e->Get(_i); } } }; + { auto _e = dtype(); _o->dtype = _e; }; + { auto _e = dformat(); _o->dformat = _e; }; +} + +inline flatbuffers::Offset Input::Pack(flatbuffers::FlatBufferBuilder &_fbb, const InputT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateInput(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateInput(flatbuffers::FlatBufferBuilder &_fbb, const InputT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const InputT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _dims = _o->dims.size() ? _fbb.CreateVector(_o->dims) : 0; + auto _dtype = _o->dtype; + auto _dformat = _o->dformat; + return MNN::CreateInput( + _fbb, + _dims, + _dtype, + _dformat); +} + +inline LSTMT *LSTM::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new LSTMT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void LSTM::UnPackTo(LSTMT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = outputCount(); _o->outputCount = _e; }; + { auto _e = weightSize(); _o->weightSize = _e; }; + { auto _e = clippingThreshold(); _o->clippingThreshold = _e; }; + { auto _e = weightI(); if (_e) _o->weightI = std::unique_ptr(_e->UnPack(_resolver)); }; + { auto _e = weightH(); if (_e) _o->weightH = std::unique_ptr(_e->UnPack(_resolver)); }; + { auto _e = bias(); if (_e) _o->bias = std::unique_ptr(_e->UnPack(_resolver)); }; + { auto _e = weightIQ(); if (_e) _o->weightIQ = std::unique_ptr(_e->UnPack(_resolver)); }; + { auto _e = weightIA(); if (_e) _o->weightIA = std::unique_ptr(_e->UnPack(_resolver)); }; + { auto _e = quantScale(); _o->quantScale = _e; }; +} + +inline flatbuffers::Offset LSTM::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSTMT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateLSTM(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateLSTM(flatbuffers::FlatBufferBuilder &_fbb, const LSTMT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LSTMT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _outputCount = _o->outputCount; + auto _weightSize = _o->weightSize; + auto _clippingThreshold = _o->clippingThreshold; + auto _weightI = _o->weightI ? CreateBlob(_fbb, _o->weightI.get(), _rehasher) : 0; + auto _weightH = _o->weightH ? CreateBlob(_fbb, _o->weightH.get(), _rehasher) : 0; + auto _bias = _o->bias ? CreateBlob(_fbb, _o->bias.get(), _rehasher) : 0; + auto _weightIQ = _o->weightIQ ? CreateBlob(_fbb, _o->weightIQ.get(), _rehasher) : 0; + auto _weightIA = _o->weightIA ? CreateBlob(_fbb, _o->weightIA.get(), _rehasher) : 0; + auto _quantScale = _o->quantScale; + return MNN::CreateLSTM( + _fbb, + _outputCount, + _weightSize, + _clippingThreshold, + _weightI, + _weightH, + _bias, + _weightIQ, + _weightIA, + _quantScale); +} + +inline SliceT *Slice::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new SliceT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Slice::UnPackTo(SliceT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = axis(); _o->axis = _e; }; + { auto _e = slicePoints(); if (_e) { _o->slicePoints.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->slicePoints[_i] = _e->Get(_i); } } }; + { auto _e = sourceType(); _o->sourceType = _e; }; +} + +inline flatbuffers::Offset Slice::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SliceT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSlice(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateSlice(flatbuffers::FlatBufferBuilder &_fbb, const SliceT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SliceT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _axis = _o->axis; + auto _slicePoints = _o->slicePoints.size() ? _fbb.CreateVector(_o->slicePoints) : 0; + auto _sourceType = _o->sourceType; + return MNN::CreateSlice( + _fbb, + _axis, + _slicePoints, + _sourceType); +} + +inline BatchNormT *BatchNorm::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new BatchNormT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void BatchNorm::UnPackTo(BatchNormT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = channels(); _o->channels = _e; }; + { auto _e = slopeData(); if (_e) { _o->slopeData.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->slopeData[_i] = _e->Get(_i); } } }; + { auto _e = meanData(); if (_e) { _o->meanData.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->meanData[_i] = _e->Get(_i); } } }; + { auto _e = varData(); if (_e) { _o->varData.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->varData[_i] = _e->Get(_i); } } }; + { auto _e = biasData(); if (_e) { _o->biasData.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->biasData[_i] = _e->Get(_i); } } }; + { auto _e = Adata(); if (_e) { _o->Adata.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->Adata[_i] = _e->Get(_i); } } }; + { auto _e = Bdata(); if (_e) { _o->Bdata.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->Bdata[_i] = _e->Get(_i); } } }; + { auto _e = epsilon(); _o->epsilon = _e; }; +} + +inline flatbuffers::Offset BatchNorm::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchNormT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateBatchNorm(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateBatchNorm(flatbuffers::FlatBufferBuilder &_fbb, const BatchNormT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BatchNormT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _channels = _o->channels; + auto _slopeData = _o->slopeData.size() ? _fbb.CreateVector(_o->slopeData) : 0; + auto _meanData = _o->meanData.size() ? _fbb.CreateVector(_o->meanData) : 0; + auto _varData = _o->varData.size() ? _fbb.CreateVector(_o->varData) : 0; + auto _biasData = _o->biasData.size() ? _fbb.CreateVector(_o->biasData) : 0; + auto _Adata = _o->Adata.size() ? _fbb.CreateVector(_o->Adata) : 0; + auto _Bdata = _o->Bdata.size() ? _fbb.CreateVector(_o->Bdata) : 0; + auto _epsilon = _o->epsilon; + return MNN::CreateBatchNorm( + _fbb, + _channels, + _slopeData, + _meanData, + _varData, + _biasData, + _Adata, + _Bdata, + _epsilon); +} + +inline ScaleT *Scale::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new ScaleT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Scale::UnPackTo(ScaleT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = channels(); _o->channels = _e; }; + { auto _e = scaleData(); if (_e) { _o->scaleData.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->scaleData[_i] = _e->Get(_i); } } }; + { auto _e = biasData(); if (_e) { _o->biasData.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->biasData[_i] = _e->Get(_i); } } }; +} + +inline flatbuffers::Offset Scale::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ScaleT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateScale(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateScale(flatbuffers::FlatBufferBuilder &_fbb, const ScaleT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ScaleT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _channels = _o->channels; + auto _scaleData = _o->scaleData.size() ? _fbb.CreateVector(_o->scaleData) : 0; + auto _biasData = _o->biasData.size() ? _fbb.CreateVector(_o->biasData) : 0; + return MNN::CreateScale( + _fbb, + _channels, + _scaleData, + _biasData); +} + +inline EltwiseT *Eltwise::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new EltwiseT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Eltwise::UnPackTo(EltwiseT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = type(); _o->type = _e; }; + { auto _e = coeff(); if (_e) { _o->coeff.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->coeff[_i] = _e->Get(_i); } } }; +} + +inline flatbuffers::Offset Eltwise::Pack(flatbuffers::FlatBufferBuilder &_fbb, const EltwiseT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateEltwise(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateEltwise(flatbuffers::FlatBufferBuilder &_fbb, const EltwiseT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const EltwiseT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _type = _o->type; + auto _coeff = _o->coeff.size() ? _fbb.CreateVector(_o->coeff) : 0; + return MNN::CreateEltwise( + _fbb, + _type, + _coeff); +} + +inline FlattenT *Flatten::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new FlattenT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Flatten::UnPackTo(FlattenT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = axis(); _o->axis = _e; }; + { auto _e = endAxis(); _o->endAxis = _e; }; +} + +inline flatbuffers::Offset Flatten::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FlattenT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateFlatten(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateFlatten(flatbuffers::FlatBufferBuilder &_fbb, const FlattenT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FlattenT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _axis = _o->axis; + auto _endAxis = _o->endAxis; + return MNN::CreateFlatten( + _fbb, + _axis, + _endAxis); +} + +inline PermuteT *Permute::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new PermuteT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Permute::UnPackTo(PermuteT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = dims(); if (_e) { _o->dims.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->dims[_i] = _e->Get(_i); } } }; +} + +inline flatbuffers::Offset Permute::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PermuteT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreatePermute(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreatePermute(flatbuffers::FlatBufferBuilder &_fbb, const PermuteT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PermuteT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _dims = _o->dims.size() ? _fbb.CreateVector(_o->dims) : 0; + return MNN::CreatePermute( + _fbb, + _dims); +} + +inline ReshapeT *Reshape::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new ReshapeT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Reshape::UnPackTo(ReshapeT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = dims(); if (_e) { _o->dims.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->dims[_i] = _e->Get(_i); } } }; + { auto _e = dimType(); _o->dimType = _e; }; +} + +inline flatbuffers::Offset Reshape::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateReshape(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateReshape(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReshapeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _dims = _o->dims.size() ? _fbb.CreateVector(_o->dims) : 0; + auto _dimType = _o->dimType; + return MNN::CreateReshape( + _fbb, + _dims, + _dimType); +} + +inline DetectionOutputT *DetectionOutput::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new DetectionOutputT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void DetectionOutput::UnPackTo(DetectionOutputT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = classCount(); _o->classCount = _e; }; + { auto _e = nmsThresholdold(); _o->nmsThresholdold = _e; }; + { auto _e = nmsTopK(); _o->nmsTopK = _e; }; + { auto _e = keepTopK(); _o->keepTopK = _e; }; + { auto _e = confidenceThreshold(); _o->confidenceThreshold = _e; }; + { auto _e = shareLocation(); _o->shareLocation = _e; }; + { auto _e = backgroundLable(); _o->backgroundLable = _e; }; + { auto _e = varianceEncodedTarget(); _o->varianceEncodedTarget = _e; }; + { auto _e = codeType(); _o->codeType = _e; }; + { auto _e = objectnessScore(); _o->objectnessScore = _e; }; +} + +inline flatbuffers::Offset DetectionOutput::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DetectionOutputT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateDetectionOutput(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateDetectionOutput(flatbuffers::FlatBufferBuilder &_fbb, const DetectionOutputT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DetectionOutputT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _classCount = _o->classCount; + auto _nmsThresholdold = _o->nmsThresholdold; + auto _nmsTopK = _o->nmsTopK; + auto _keepTopK = _o->keepTopK; + auto _confidenceThreshold = _o->confidenceThreshold; + auto _shareLocation = _o->shareLocation; + auto _backgroundLable = _o->backgroundLable; + auto _varianceEncodedTarget = _o->varianceEncodedTarget; + auto _codeType = _o->codeType; + auto _objectnessScore = _o->objectnessScore; + return MNN::CreateDetectionOutput( + _fbb, + _classCount, + _nmsThresholdold, + _nmsTopK, + _keepTopK, + _confidenceThreshold, + _shareLocation, + _backgroundLable, + _varianceEncodedTarget, + _codeType, + _objectnessScore); +} + +inline RoiPoolingT *RoiPooling::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new RoiPoolingT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void RoiPooling::UnPackTo(RoiPoolingT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = pooledWidth(); _o->pooledWidth = _e; }; + { auto _e = pooledHeight(); _o->pooledHeight = _e; }; + { auto _e = spatialScale(); _o->spatialScale = _e; }; +} + +inline flatbuffers::Offset RoiPooling::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RoiPoolingT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateRoiPooling(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateRoiPooling(flatbuffers::FlatBufferBuilder &_fbb, const RoiPoolingT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RoiPoolingT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _pooledWidth = _o->pooledWidth; + auto _pooledHeight = _o->pooledHeight; + auto _spatialScale = _o->spatialScale; + return MNN::CreateRoiPooling( + _fbb, + _pooledWidth, + _pooledHeight, + _spatialScale); +} + +inline ProposalT *Proposal::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new ProposalT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Proposal::UnPackTo(ProposalT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = featStride(); _o->featStride = _e; }; + { auto _e = baseSize(); _o->baseSize = _e; }; + { auto _e = preNmsTopN(); _o->preNmsTopN = _e; }; + { auto _e = afterNmsTopN(); _o->afterNmsTopN = _e; }; + { auto _e = nmsThreshold(); _o->nmsThreshold = _e; }; + { auto _e = minSize(); _o->minSize = _e; }; + { auto _e = ratios(); if (_e) _o->ratios = std::unique_ptr(_e->UnPack(_resolver)); }; + { auto _e = scales(); if (_e) _o->scales = std::unique_ptr(_e->UnPack(_resolver)); }; + { auto _e = anchors(); if (_e) _o->anchors = std::unique_ptr(_e->UnPack(_resolver)); }; +} + +inline flatbuffers::Offset Proposal::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ProposalT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateProposal(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateProposal(flatbuffers::FlatBufferBuilder &_fbb, const ProposalT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ProposalT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _featStride = _o->featStride; + auto _baseSize = _o->baseSize; + auto _preNmsTopN = _o->preNmsTopN; + auto _afterNmsTopN = _o->afterNmsTopN; + auto _nmsThreshold = _o->nmsThreshold; + auto _minSize = _o->minSize; + auto _ratios = _o->ratios ? CreateBlob(_fbb, _o->ratios.get(), _rehasher) : 0; + auto _scales = _o->scales ? CreateBlob(_fbb, _o->scales.get(), _rehasher) : 0; + auto _anchors = _o->anchors ? CreateBlob(_fbb, _o->anchors.get(), _rehasher) : 0; + return MNN::CreateProposal( + _fbb, + _featStride, + _baseSize, + _preNmsTopN, + _afterNmsTopN, + _nmsThreshold, + _minSize, + _ratios, + _scales, + _anchors); +} + +inline InterpT *Interp::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new InterpT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Interp::UnPackTo(InterpT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = widthScale(); _o->widthScale = _e; }; + { auto _e = heightScale(); _o->heightScale = _e; }; + { auto _e = outputWidth(); _o->outputWidth = _e; }; + { auto _e = outputHeight(); _o->outputHeight = _e; }; + { auto _e = resizeType(); _o->resizeType = _e; }; + { auto _e = alignCorners(); _o->alignCorners = _e; }; +} + +inline flatbuffers::Offset Interp::Pack(flatbuffers::FlatBufferBuilder &_fbb, const InterpT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateInterp(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateInterp(flatbuffers::FlatBufferBuilder &_fbb, const InterpT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const InterpT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _widthScale = _o->widthScale; + auto _heightScale = _o->heightScale; + auto _outputWidth = _o->outputWidth; + auto _outputHeight = _o->outputHeight; + auto _resizeType = _o->resizeType; + auto _alignCorners = _o->alignCorners; + return MNN::CreateInterp( + _fbb, + _widthScale, + _heightScale, + _outputWidth, + _outputHeight, + _resizeType, + _alignCorners); +} + +inline ResizeT *Resize::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new ResizeT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Resize::UnPackTo(ResizeT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = xScale(); _o->xScale = _e; }; + { auto _e = yScale(); _o->yScale = _e; }; +} + +inline flatbuffers::Offset Resize::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateResize(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateResize(flatbuffers::FlatBufferBuilder &_fbb, const ResizeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ResizeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _xScale = _o->xScale; + auto _yScale = _o->yScale; + return MNN::CreateResize( + _fbb, + _xScale, + _yScale); +} + +inline PriorBoxT *PriorBox::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new PriorBoxT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void PriorBox::UnPackTo(PriorBoxT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = minSizes(); if (_e) { _o->minSizes.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->minSizes[_i] = _e->Get(_i); } } }; + { auto _e = maxSizes(); if (_e) { _o->maxSizes.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->maxSizes[_i] = _e->Get(_i); } } }; + { auto _e = aspectRatios(); if (_e) { _o->aspectRatios.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->aspectRatios[_i] = _e->Get(_i); } } }; + { auto _e = variances(); if (_e) { _o->variances.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->variances[_i] = _e->Get(_i); } } }; + { auto _e = flip(); _o->flip = _e; }; + { auto _e = clip(); _o->clip = _e; }; + { auto _e = imageWidth(); _o->imageWidth = _e; }; + { auto _e = imageHeight(); _o->imageHeight = _e; }; + { auto _e = stepWidth(); _o->stepWidth = _e; }; + { auto _e = stepHeight(); _o->stepHeight = _e; }; + { auto _e = offset(); _o->offset = _e; }; +} + +inline flatbuffers::Offset PriorBox::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PriorBoxT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreatePriorBox(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreatePriorBox(flatbuffers::FlatBufferBuilder &_fbb, const PriorBoxT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PriorBoxT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _minSizes = _o->minSizes.size() ? _fbb.CreateVector(_o->minSizes) : 0; + auto _maxSizes = _o->maxSizes.size() ? _fbb.CreateVector(_o->maxSizes) : 0; + auto _aspectRatios = _o->aspectRatios.size() ? _fbb.CreateVector(_o->aspectRatios) : 0; + auto _variances = _o->variances.size() ? _fbb.CreateVector(_o->variances) : 0; + auto _flip = _o->flip; + auto _clip = _o->clip; + auto _imageWidth = _o->imageWidth; + auto _imageHeight = _o->imageHeight; + auto _stepWidth = _o->stepWidth; + auto _stepHeight = _o->stepHeight; + auto _offset = _o->offset; + return MNN::CreatePriorBox( + _fbb, + _minSizes, + _maxSizes, + _aspectRatios, + _variances, + _flip, + _clip, + _imageWidth, + _imageHeight, + _stepWidth, + _stepHeight, + _offset); +} + +inline NormalizeT *Normalize::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new NormalizeT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Normalize::UnPackTo(NormalizeT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = acrossSpatial(); _o->acrossSpatial = _e; }; + { auto _e = channelShared(); _o->channelShared = _e; }; + { auto _e = eps(); _o->eps = _e; }; + { auto _e = scale(); if (_e) { _o->scale.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->scale[_i] = _e->Get(_i); } } }; +} + +inline flatbuffers::Offset Normalize::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NormalizeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateNormalize(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateNormalize(flatbuffers::FlatBufferBuilder &_fbb, const NormalizeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NormalizeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _acrossSpatial = _o->acrossSpatial; + auto _channelShared = _o->channelShared; + auto _eps = _o->eps; + auto _scale = _o->scale.size() ? _fbb.CreateVector(_o->scale) : 0; + return MNN::CreateNormalize( + _fbb, + _acrossSpatial, + _channelShared, + _eps, + _scale); +} + +inline EltwiseInt8T *EltwiseInt8::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new EltwiseInt8T(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void EltwiseInt8::UnPackTo(EltwiseInt8T *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = type(); _o->type = _e; }; + { auto _e = inputQuan0(); if (_e) _o->inputQuan0 = std::unique_ptr(_e->UnPack(_resolver)); }; + { auto _e = inputQuan1(); if (_e) _o->inputQuan1 = std::unique_ptr(_e->UnPack(_resolver)); }; + { auto _e = outputQuan(); if (_e) _o->outputQuan = std::unique_ptr(_e->UnPack(_resolver)); }; +} + +inline flatbuffers::Offset EltwiseInt8::Pack(flatbuffers::FlatBufferBuilder &_fbb, const EltwiseInt8T* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateEltwiseInt8(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateEltwiseInt8(flatbuffers::FlatBufferBuilder &_fbb, const EltwiseInt8T *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const EltwiseInt8T* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _type = _o->type; + auto _inputQuan0 = _o->inputQuan0 ? CreateQuantizedFloatParam(_fbb, _o->inputQuan0.get(), _rehasher) : 0; + auto _inputQuan1 = _o->inputQuan1 ? CreateQuantizedFloatParam(_fbb, _o->inputQuan1.get(), _rehasher) : 0; + auto _outputQuan = _o->outputQuan ? CreateQuantizedFloatParam(_fbb, _o->outputQuan.get(), _rehasher) : 0; + return MNN::CreateEltwiseInt8( + _fbb, + _type, + _inputQuan0, + _inputQuan1, + _outputQuan); +} + +inline const flatbuffers::TypeTable *PadModeTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + PadModeTypeTable + }; + static const char * const names[] = { + "CAFFE", + "VALID", + "SAME" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_ENUM, 3, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *PoolTypeTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + PoolTypeTypeTable + }; + static const char * const names[] = { + "MAXPOOL", + "AVEPOOL" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_ENUM, 2, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *PoolPadTypeTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + PoolPadTypeTypeTable + }; + static const char * const names[] = { + "CAFFE", + "VALID", + "SAME" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_ENUM, 3, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *EltwiseTypeTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + EltwiseTypeTypeTable + }; + static const char * const names[] = { + "PROD", + "SUM", + "MAXIMUM", + "SUB" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_ENUM, 4, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *Convolution2DCommonTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_BOOL, 0, -1 }, + { flatbuffers::ET_BOOL, 0, -1 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + PadModeTypeTable + }; + static const char * const names[] = { + "padX", + "padY", + "kernelX", + "kernelY", + "strideX", + "strideY", + "dilateX", + "dilateY", + "padMode", + "group", + "outputCount", + "inputCount", + "relu", + "relu6" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 14, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *Convolution3DCommonTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 1, -1 }, + { flatbuffers::ET_INT, 1, -1 }, + { flatbuffers::ET_INT, 1, -1 }, + { flatbuffers::ET_INT, 1, -1 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_BOOL, 0, -1 }, + { flatbuffers::ET_BOOL, 0, -1 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + PadModeTypeTable + }; + static const char * const names[] = { + "dilates", + "strides", + "kernels", + "pads", + "padMode", + "inputCount", + "outputCount", + "relu", + "relu6" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 9, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *IDSTQuanTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_CHAR, 1, -1 }, + { flatbuffers::ET_FLOAT, 1, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_BOOL, 0, -1 }, + { flatbuffers::ET_FLOAT, 0, -1 }, + { flatbuffers::ET_FLOAT, 0, -1 }, + { flatbuffers::ET_FLOAT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_BOOL, 0, -1 } + }; + static const char * const names[] = { + "buffer", + "alpha", + "type", + "useInt32", + "quantScale", + "scaleIn", + "scaleOut", + "aMax", + "aMin", + "readType", + "has_scaleInt" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 11, type_codes, nullptr, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *QuantizedFloatParamTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_CHAR, 1, -1 }, + { flatbuffers::ET_INT, 1, -1 }, + { flatbuffers::ET_FLOAT, 1, -1 }, + { flatbuffers::ET_FLOAT, 1, -1 } + }; + static const char * const names[] = { + "weight", + "bias", + "scale", + "tensorScale" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 4, type_codes, nullptr, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *Convolution2DTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_SEQUENCE, 0, 0 }, + { flatbuffers::ET_FLOAT, 1, -1 }, + { flatbuffers::ET_FLOAT, 1, -1 }, + { flatbuffers::ET_SEQUENCE, 0, 1 }, + { flatbuffers::ET_SEQUENCE, 0, 2 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + Convolution2DCommonTypeTable, + IDSTQuanTypeTable, + QuantizedFloatParamTypeTable + }; + static const char * const names[] = { + "common", + "weight", + "bias", + "quanParameter", + "symmetricQuan" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 5, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *Convolution3DTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_SEQUENCE, 0, 0 }, + { flatbuffers::ET_FLOAT, 1, -1 }, + { flatbuffers::ET_FLOAT, 1, -1 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + Convolution3DCommonTypeTable + }; + static const char * const names[] = { + "common", + "weight", + "bias" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 3, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *InnerProductTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_FLOAT, 1, -1 }, + { flatbuffers::ET_FLOAT, 1, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_BOOL, 0, -1 }, + { flatbuffers::ET_SEQUENCE, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + IDSTQuanTypeTable + }; + static const char * const names[] = { + "outputCount", + "biasTerm", + "weightSize", + "weight", + "bias", + "axis", + "transpose", + "quanParameter" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 8, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *PoolTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_BOOL, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 1 }, + { flatbuffers::ET_INT, 0, 2 }, + { flatbuffers::ET_BOOL, 0, -1 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + PoolTypeTypeTable, + PoolPadTypeTypeTable, + DataTypeTypeTable + }; + static const char * const names[] = { + "padX", + "padY", + "isGlobal", + "kernelX", + "kernelY", + "strideX", + "strideY", + "type", + "padType", + "dataType", + "ceilModel" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 11, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *Pool3DTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 1, -1 }, + { flatbuffers::ET_INT, 1, -1 }, + { flatbuffers::ET_INT, 1, -1 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 1 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + PoolTypeTypeTable, + PoolPadTypeTypeTable + }; + static const char * const names[] = { + "strides", + "kernels", + "pads", + "type", + "padType" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 5, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *ReluTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_FLOAT, 0, -1 } + }; + static const char * const names[] = { + "slope" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *Relu6TypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_FLOAT, 0, -1 } + }; + static const char * const names[] = { + "slope" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *PReluTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_FLOAT, 1, -1 } + }; + static const char * const names[] = { + "slopeCount", + "slope" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *ELUTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_FLOAT, 0, -1 } + }; + static const char * const names[] = { + "alpha" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *LRNTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_FLOAT, 0, -1 }, + { flatbuffers::ET_FLOAT, 0, -1 } + }; + static const char * const names[] = { + "regionType", + "localSize", + "alpha", + "beta" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 4, type_codes, nullptr, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *ArgMaxTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 } + }; + static const char * const names[] = { + "outMaxVal", + "topK", + "axis", + "softmaxThreshold" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 4, type_codes, nullptr, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *AxisTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, -1 } + }; + static const char * const names[] = { + "axis" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *InputTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 1, -1 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 1 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + DataTypeTypeTable, + MNN_DATA_FORMATTypeTable + }; + static const char * const names[] = { + "dims", + "dtype", + "dformat" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 3, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *LSTMTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_FLOAT, 0, -1 }, + { flatbuffers::ET_SEQUENCE, 0, 0 }, + { flatbuffers::ET_SEQUENCE, 0, 0 }, + { flatbuffers::ET_SEQUENCE, 0, 0 }, + { flatbuffers::ET_SEQUENCE, 0, 0 }, + { flatbuffers::ET_SEQUENCE, 0, 0 }, + { flatbuffers::ET_FLOAT, 0, -1 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + BlobTypeTable + }; + static const char * const names[] = { + "outputCount", + "weightSize", + "clippingThreshold", + "weightI", + "weightH", + "bias", + "weightIQ", + "weightIA", + "quantScale" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 9, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *SliceTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 1, -1 }, + { flatbuffers::ET_CHAR, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + NetSourceTypeTable + }; + static const char * const names[] = { + "axis", + "slicePoints", + "sourceType" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 3, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *BatchNormTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_FLOAT, 1, -1 }, + { flatbuffers::ET_FLOAT, 1, -1 }, + { flatbuffers::ET_FLOAT, 1, -1 }, + { flatbuffers::ET_FLOAT, 1, -1 }, + { flatbuffers::ET_FLOAT, 1, -1 }, + { flatbuffers::ET_FLOAT, 1, -1 }, + { flatbuffers::ET_FLOAT, 0, -1 } + }; + static const char * const names[] = { + "channels", + "slopeData", + "meanData", + "varData", + "biasData", + "Adata", + "Bdata", + "epsilon" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 8, type_codes, nullptr, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *ScaleTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_FLOAT, 1, -1 }, + { flatbuffers::ET_FLOAT, 1, -1 } + }; + static const char * const names[] = { + "channels", + "scaleData", + "biasData" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 3, type_codes, nullptr, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *EltwiseTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_FLOAT, 1, -1 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + EltwiseTypeTypeTable + }; + static const char * const names[] = { + "type", + "coeff" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *FlattenTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 } + }; + static const char * const names[] = { + "axis", + "endAxis" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *PermuteTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 1, -1 } + }; + static const char * const names[] = { + "dims" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *ReshapeTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 1, -1 }, + { flatbuffers::ET_CHAR, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + MNN_DATA_FORMATTypeTable + }; + static const char * const names[] = { + "dims", + "dimType" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *DetectionOutputTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_FLOAT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_FLOAT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_FLOAT, 0, -1 } + }; + static const char * const names[] = { + "classCount", + "nmsThresholdold", + "nmsTopK", + "keepTopK", + "confidenceThreshold", + "shareLocation", + "backgroundLable", + "varianceEncodedTarget", + "codeType", + "objectnessScore" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 10, type_codes, nullptr, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *RoiPoolingTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_FLOAT, 0, -1 } + }; + static const char * const names[] = { + "pooledWidth", + "pooledHeight", + "spatialScale" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 3, type_codes, nullptr, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *ProposalTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_FLOAT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_SEQUENCE, 0, 0 }, + { flatbuffers::ET_SEQUENCE, 0, 0 }, + { flatbuffers::ET_SEQUENCE, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + BlobTypeTable + }; + static const char * const names[] = { + "featStride", + "baseSize", + "preNmsTopN", + "afterNmsTopN", + "nmsThreshold", + "minSize", + "ratios", + "scales", + "anchors" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 9, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *InterpTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_FLOAT, 0, -1 }, + { flatbuffers::ET_FLOAT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_BOOL, 0, -1 } + }; + static const char * const names[] = { + "widthScale", + "heightScale", + "outputWidth", + "outputHeight", + "resizeType", + "alignCorners" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 6, type_codes, nullptr, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *ResizeTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_FLOAT, 0, -1 }, + { flatbuffers::ET_FLOAT, 0, -1 } + }; + static const char * const names[] = { + "xScale", + "yScale" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *PriorBoxTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_FLOAT, 1, -1 }, + { flatbuffers::ET_FLOAT, 1, -1 }, + { flatbuffers::ET_FLOAT, 1, -1 }, + { flatbuffers::ET_FLOAT, 1, -1 }, + { flatbuffers::ET_BOOL, 0, -1 }, + { flatbuffers::ET_BOOL, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_FLOAT, 0, -1 } + }; + static const char * const names[] = { + "minSizes", + "maxSizes", + "aspectRatios", + "variances", + "flip", + "clip", + "imageWidth", + "imageHeight", + "stepWidth", + "stepHeight", + "offset" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 11, type_codes, nullptr, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *NormalizeTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_FLOAT, 0, -1 }, + { flatbuffers::ET_FLOAT, 1, -1 } + }; + static const char * const names[] = { + "acrossSpatial", + "channelShared", + "eps", + "scale" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 4, type_codes, nullptr, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *EltwiseInt8TypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_SEQUENCE, 0, 1 }, + { flatbuffers::ET_SEQUENCE, 0, 1 }, + { flatbuffers::ET_SEQUENCE, 0, 1 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + EltwiseTypeTypeTable, + QuantizedFloatParamTypeTable + }; + static const char * const names[] = { + "type", + "inputQuan0", + "inputQuan1", + "outputQuan" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 4, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +} // namespace MNN + +#endif // FLATBUFFERS_GENERATED_CAFFEOP_MNN_H_ diff --git a/schema/current/GpuLibrary_generated.h b/schema/current/GpuLibrary_generated.h new file mode 100644 index 000000000..eb29567b6 --- /dev/null +++ b/schema/current/GpuLibrary_generated.h @@ -0,0 +1,1035 @@ +// automatically generated by the FlatBuffers compiler, do not modify + + +#ifndef FLATBUFFERS_GENERATED_GPULIBRARY_MNN_H_ +#define FLATBUFFERS_GENERATED_GPULIBRARY_MNN_H_ + +#include "flatbuffers/flatbuffers.h" + +#include "Tensor_generated.h" +#include "Type_generated.h" + +namespace MNN { + +struct GpuBuffer; +struct GpuBufferT; + +struct GpuPipeline; +struct GpuPipelineT; + +struct GpuStage; +struct GpuStageT; + +struct GpuFunction; +struct GpuFunctionT; + +struct GpuLibrary; +struct GpuLibraryT; + +inline const flatbuffers::TypeTable *GpuBufferTypeTable(); + +inline const flatbuffers::TypeTable *GpuPipelineTypeTable(); + +inline const flatbuffers::TypeTable *GpuStageTypeTable(); + +inline const flatbuffers::TypeTable *GpuFunctionTypeTable(); + +inline const flatbuffers::TypeTable *GpuLibraryTypeTable(); + +enum STORAGE_TYPE { + STORAGE_TYPE_BUFFER = 0, + STORAGE_TYPE_UNIFORM = 1, + STORAGE_TYPE_IMAGE = 2, + STORAGE_TYPE_MIN = STORAGE_TYPE_BUFFER, + STORAGE_TYPE_MAX = STORAGE_TYPE_IMAGE +}; + +inline const STORAGE_TYPE (&EnumValuesSTORAGE_TYPE())[3] { + static const STORAGE_TYPE values[] = { + STORAGE_TYPE_BUFFER, + STORAGE_TYPE_UNIFORM, + STORAGE_TYPE_IMAGE + }; + return values; +} + +inline const char * const *EnumNamesSTORAGE_TYPE() { + static const char * const names[] = { + "BUFFER", + "UNIFORM", + "IMAGE", + nullptr + }; + return names; +} + +inline const char *EnumNameSTORAGE_TYPE(STORAGE_TYPE e) { + if (e < STORAGE_TYPE_BUFFER || e > STORAGE_TYPE_IMAGE) return ""; + const size_t index = static_cast(e); + return EnumNamesSTORAGE_TYPE()[index]; +} + +enum ACCESS_TYPE { + ACCESS_TYPE_READ_ONLY = 0, + ACCESS_TYPE_WRITE_ONLY = 1, + ACCESS_TYPE_READ_WRITE = 2, + ACCESS_TYPE_MIN = ACCESS_TYPE_READ_ONLY, + ACCESS_TYPE_MAX = ACCESS_TYPE_READ_WRITE +}; + +inline const ACCESS_TYPE (&EnumValuesACCESS_TYPE())[3] { + static const ACCESS_TYPE values[] = { + ACCESS_TYPE_READ_ONLY, + ACCESS_TYPE_WRITE_ONLY, + ACCESS_TYPE_READ_WRITE + }; + return values; +} + +inline const char * const *EnumNamesACCESS_TYPE() { + static const char * const names[] = { + "READ_ONLY", + "WRITE_ONLY", + "READ_WRITE", + nullptr + }; + return names; +} + +inline const char *EnumNameACCESS_TYPE(ACCESS_TYPE e) { + if (e < ACCESS_TYPE_READ_ONLY || e > ACCESS_TYPE_READ_WRITE) return ""; + const size_t index = static_cast(e); + return EnumNamesACCESS_TYPE()[index]; +} + +struct GpuBufferT : public flatbuffers::NativeTable { + typedef GpuBuffer TableType; + ACCESS_TYPE access; + STORAGE_TYPE storage; + std::unique_ptr content; + GpuBufferT() + : access(ACCESS_TYPE_READ_ONLY), + storage(STORAGE_TYPE_BUFFER) { + } +}; + +struct GpuBuffer FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef GpuBufferT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return GpuBufferTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ACCESS = 4, + VT_STORAGE = 6, + VT_CONTENT = 8 + }; + ACCESS_TYPE access() const { + return static_cast(GetField(VT_ACCESS, 0)); + } + STORAGE_TYPE storage() const { + return static_cast(GetField(VT_STORAGE, 0)); + } + const Blob *content() const { + return GetPointer(VT_CONTENT); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_ACCESS) && + VerifyField(verifier, VT_STORAGE) && + VerifyOffset(verifier, VT_CONTENT) && + verifier.VerifyTable(content()) && + verifier.EndTable(); + } + GpuBufferT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(GpuBufferT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GpuBufferT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct GpuBufferBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_access(ACCESS_TYPE access) { + fbb_.AddElement(GpuBuffer::VT_ACCESS, static_cast(access), 0); + } + void add_storage(STORAGE_TYPE storage) { + fbb_.AddElement(GpuBuffer::VT_STORAGE, static_cast(storage), 0); + } + void add_content(flatbuffers::Offset content) { + fbb_.AddOffset(GpuBuffer::VT_CONTENT, content); + } + explicit GpuBufferBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + GpuBufferBuilder &operator=(const GpuBufferBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateGpuBuffer( + flatbuffers::FlatBufferBuilder &_fbb, + ACCESS_TYPE access = ACCESS_TYPE_READ_ONLY, + STORAGE_TYPE storage = STORAGE_TYPE_BUFFER, + flatbuffers::Offset content = 0) { + GpuBufferBuilder builder_(_fbb); + builder_.add_content(content); + builder_.add_storage(storage); + builder_.add_access(access); + return builder_.Finish(); +} + +flatbuffers::Offset CreateGpuBuffer(flatbuffers::FlatBufferBuilder &_fbb, const GpuBufferT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct GpuPipelineT : public flatbuffers::NativeTable { + typedef GpuPipeline TableType; + std::vector localSize; + std::string key; + std::vector metal; + std::vector vulkan; + std::string openglComputeShader; + std::string openclKernel; + GpuPipelineT() { + } +}; + +struct GpuPipeline FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef GpuPipelineT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return GpuPipelineTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_LOCALSIZE = 4, + VT_KEY = 6, + VT_METAL = 8, + VT_VULKAN = 10, + VT_OPENGLCOMPUTESHADER = 12, + VT_OPENCLKERNEL = 14 + }; + const flatbuffers::Vector *localSize() const { + return GetPointer *>(VT_LOCALSIZE); + } + const flatbuffers::String *key() const { + return GetPointer(VT_KEY); + } + const flatbuffers::Vector *metal() const { + return GetPointer *>(VT_METAL); + } + const flatbuffers::Vector *vulkan() const { + return GetPointer *>(VT_VULKAN); + } + const flatbuffers::String *openglComputeShader() const { + return GetPointer(VT_OPENGLCOMPUTESHADER); + } + const flatbuffers::String *openclKernel() const { + return GetPointer(VT_OPENCLKERNEL); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_LOCALSIZE) && + verifier.VerifyVector(localSize()) && + VerifyOffset(verifier, VT_KEY) && + verifier.VerifyString(key()) && + VerifyOffset(verifier, VT_METAL) && + verifier.VerifyVector(metal()) && + VerifyOffset(verifier, VT_VULKAN) && + verifier.VerifyVector(vulkan()) && + VerifyOffset(verifier, VT_OPENGLCOMPUTESHADER) && + verifier.VerifyString(openglComputeShader()) && + VerifyOffset(verifier, VT_OPENCLKERNEL) && + verifier.VerifyString(openclKernel()) && + verifier.EndTable(); + } + GpuPipelineT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(GpuPipelineT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GpuPipelineT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct GpuPipelineBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_localSize(flatbuffers::Offset> localSize) { + fbb_.AddOffset(GpuPipeline::VT_LOCALSIZE, localSize); + } + void add_key(flatbuffers::Offset key) { + fbb_.AddOffset(GpuPipeline::VT_KEY, key); + } + void add_metal(flatbuffers::Offset> metal) { + fbb_.AddOffset(GpuPipeline::VT_METAL, metal); + } + void add_vulkan(flatbuffers::Offset> vulkan) { + fbb_.AddOffset(GpuPipeline::VT_VULKAN, vulkan); + } + void add_openglComputeShader(flatbuffers::Offset openglComputeShader) { + fbb_.AddOffset(GpuPipeline::VT_OPENGLCOMPUTESHADER, openglComputeShader); + } + void add_openclKernel(flatbuffers::Offset openclKernel) { + fbb_.AddOffset(GpuPipeline::VT_OPENCLKERNEL, openclKernel); + } + explicit GpuPipelineBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + GpuPipelineBuilder &operator=(const GpuPipelineBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateGpuPipeline( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> localSize = 0, + flatbuffers::Offset key = 0, + flatbuffers::Offset> metal = 0, + flatbuffers::Offset> vulkan = 0, + flatbuffers::Offset openglComputeShader = 0, + flatbuffers::Offset openclKernel = 0) { + GpuPipelineBuilder builder_(_fbb); + builder_.add_openclKernel(openclKernel); + builder_.add_openglComputeShader(openglComputeShader); + builder_.add_vulkan(vulkan); + builder_.add_metal(metal); + builder_.add_key(key); + builder_.add_localSize(localSize); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateGpuPipelineDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *localSize = nullptr, + const char *key = nullptr, + const std::vector *metal = nullptr, + const std::vector *vulkan = nullptr, + const char *openglComputeShader = nullptr, + const char *openclKernel = nullptr) { + auto localSize__ = localSize ? _fbb.CreateVector(*localSize) : 0; + auto key__ = key ? _fbb.CreateString(key) : 0; + auto metal__ = metal ? _fbb.CreateVector(*metal) : 0; + auto vulkan__ = vulkan ? _fbb.CreateVector(*vulkan) : 0; + auto openglComputeShader__ = openglComputeShader ? _fbb.CreateString(openglComputeShader) : 0; + auto openclKernel__ = openclKernel ? _fbb.CreateString(openclKernel) : 0; + return MNN::CreateGpuPipeline( + _fbb, + localSize__, + key__, + metal__, + vulkan__, + openglComputeShader__, + openclKernel__); +} + +flatbuffers::Offset CreateGpuPipeline(flatbuffers::FlatBufferBuilder &_fbb, const GpuPipelineT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct GpuStageT : public flatbuffers::NativeTable { + typedef GpuStage TableType; + std::string pipeline; + std::vector groupSize; + std::vector inputIndexes; + std::vector outputIndexes; + std::vector> middleBuffer; + std::vector> constBuffer; + int32_t globalSizeIndex; + std::vector globalSizeDivide; + bool requireSize; + GpuStageT() + : globalSizeIndex(0), + requireSize(false) { + } +}; + +struct GpuStage FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef GpuStageT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return GpuStageTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_PIPELINE = 4, + VT_GROUPSIZE = 6, + VT_INPUTINDEXES = 8, + VT_OUTPUTINDEXES = 10, + VT_MIDDLEBUFFER = 12, + VT_CONSTBUFFER = 14, + VT_GLOBALSIZEINDEX = 16, + VT_GLOBALSIZEDIVIDE = 18, + VT_REQUIRESIZE = 20 + }; + const flatbuffers::String *pipeline() const { + return GetPointer(VT_PIPELINE); + } + const flatbuffers::Vector *groupSize() const { + return GetPointer *>(VT_GROUPSIZE); + } + const flatbuffers::Vector *inputIndexes() const { + return GetPointer *>(VT_INPUTINDEXES); + } + const flatbuffers::Vector *outputIndexes() const { + return GetPointer *>(VT_OUTPUTINDEXES); + } + const flatbuffers::Vector> *middleBuffer() const { + return GetPointer> *>(VT_MIDDLEBUFFER); + } + const flatbuffers::Vector> *constBuffer() const { + return GetPointer> *>(VT_CONSTBUFFER); + } + int32_t globalSizeIndex() const { + return GetField(VT_GLOBALSIZEINDEX, 0); + } + const flatbuffers::Vector *globalSizeDivide() const { + return GetPointer *>(VT_GLOBALSIZEDIVIDE); + } + bool requireSize() const { + return GetField(VT_REQUIRESIZE, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_PIPELINE) && + verifier.VerifyString(pipeline()) && + VerifyOffset(verifier, VT_GROUPSIZE) && + verifier.VerifyVector(groupSize()) && + VerifyOffset(verifier, VT_INPUTINDEXES) && + verifier.VerifyVector(inputIndexes()) && + VerifyOffset(verifier, VT_OUTPUTINDEXES) && + verifier.VerifyVector(outputIndexes()) && + VerifyOffset(verifier, VT_MIDDLEBUFFER) && + verifier.VerifyVector(middleBuffer()) && + verifier.VerifyVectorOfTables(middleBuffer()) && + VerifyOffset(verifier, VT_CONSTBUFFER) && + verifier.VerifyVector(constBuffer()) && + verifier.VerifyVectorOfTables(constBuffer()) && + VerifyField(verifier, VT_GLOBALSIZEINDEX) && + VerifyOffset(verifier, VT_GLOBALSIZEDIVIDE) && + verifier.VerifyVector(globalSizeDivide()) && + VerifyField(verifier, VT_REQUIRESIZE) && + verifier.EndTable(); + } + GpuStageT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(GpuStageT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GpuStageT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct GpuStageBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_pipeline(flatbuffers::Offset pipeline) { + fbb_.AddOffset(GpuStage::VT_PIPELINE, pipeline); + } + void add_groupSize(flatbuffers::Offset> groupSize) { + fbb_.AddOffset(GpuStage::VT_GROUPSIZE, groupSize); + } + void add_inputIndexes(flatbuffers::Offset> inputIndexes) { + fbb_.AddOffset(GpuStage::VT_INPUTINDEXES, inputIndexes); + } + void add_outputIndexes(flatbuffers::Offset> outputIndexes) { + fbb_.AddOffset(GpuStage::VT_OUTPUTINDEXES, outputIndexes); + } + void add_middleBuffer(flatbuffers::Offset>> middleBuffer) { + fbb_.AddOffset(GpuStage::VT_MIDDLEBUFFER, middleBuffer); + } + void add_constBuffer(flatbuffers::Offset>> constBuffer) { + fbb_.AddOffset(GpuStage::VT_CONSTBUFFER, constBuffer); + } + void add_globalSizeIndex(int32_t globalSizeIndex) { + fbb_.AddElement(GpuStage::VT_GLOBALSIZEINDEX, globalSizeIndex, 0); + } + void add_globalSizeDivide(flatbuffers::Offset> globalSizeDivide) { + fbb_.AddOffset(GpuStage::VT_GLOBALSIZEDIVIDE, globalSizeDivide); + } + void add_requireSize(bool requireSize) { + fbb_.AddElement(GpuStage::VT_REQUIRESIZE, static_cast(requireSize), 0); + } + explicit GpuStageBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + GpuStageBuilder &operator=(const GpuStageBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateGpuStage( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset pipeline = 0, + flatbuffers::Offset> groupSize = 0, + flatbuffers::Offset> inputIndexes = 0, + flatbuffers::Offset> outputIndexes = 0, + flatbuffers::Offset>> middleBuffer = 0, + flatbuffers::Offset>> constBuffer = 0, + int32_t globalSizeIndex = 0, + flatbuffers::Offset> globalSizeDivide = 0, + bool requireSize = false) { + GpuStageBuilder builder_(_fbb); + builder_.add_globalSizeDivide(globalSizeDivide); + builder_.add_globalSizeIndex(globalSizeIndex); + builder_.add_constBuffer(constBuffer); + builder_.add_middleBuffer(middleBuffer); + builder_.add_outputIndexes(outputIndexes); + builder_.add_inputIndexes(inputIndexes); + builder_.add_groupSize(groupSize); + builder_.add_pipeline(pipeline); + builder_.add_requireSize(requireSize); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateGpuStageDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const char *pipeline = nullptr, + const std::vector *groupSize = nullptr, + const std::vector *inputIndexes = nullptr, + const std::vector *outputIndexes = nullptr, + const std::vector> *middleBuffer = nullptr, + const std::vector> *constBuffer = nullptr, + int32_t globalSizeIndex = 0, + const std::vector *globalSizeDivide = nullptr, + bool requireSize = false) { + auto pipeline__ = pipeline ? _fbb.CreateString(pipeline) : 0; + auto groupSize__ = groupSize ? _fbb.CreateVector(*groupSize) : 0; + auto inputIndexes__ = inputIndexes ? _fbb.CreateVector(*inputIndexes) : 0; + auto outputIndexes__ = outputIndexes ? _fbb.CreateVector(*outputIndexes) : 0; + auto middleBuffer__ = middleBuffer ? _fbb.CreateVector>(*middleBuffer) : 0; + auto constBuffer__ = constBuffer ? _fbb.CreateVector>(*constBuffer) : 0; + auto globalSizeDivide__ = globalSizeDivide ? _fbb.CreateVector(*globalSizeDivide) : 0; + return MNN::CreateGpuStage( + _fbb, + pipeline__, + groupSize__, + inputIndexes__, + outputIndexes__, + middleBuffer__, + constBuffer__, + globalSizeIndex, + globalSizeDivide__, + requireSize); +} + +flatbuffers::Offset CreateGpuStage(flatbuffers::FlatBufferBuilder &_fbb, const GpuStageT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct GpuFunctionT : public flatbuffers::NativeTable { + typedef GpuFunction TableType; + std::vector> stags; + std::string name; + GpuFunctionT() { + } +}; + +struct GpuFunction FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef GpuFunctionT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return GpuFunctionTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_STAGS = 4, + VT_NAME = 6 + }; + const flatbuffers::Vector> *stags() const { + return GetPointer> *>(VT_STAGS); + } + const flatbuffers::String *name() const { + return GetPointer(VT_NAME); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_STAGS) && + verifier.VerifyVector(stags()) && + verifier.VerifyVectorOfTables(stags()) && + VerifyOffset(verifier, VT_NAME) && + verifier.VerifyString(name()) && + verifier.EndTable(); + } + GpuFunctionT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(GpuFunctionT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GpuFunctionT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct GpuFunctionBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_stags(flatbuffers::Offset>> stags) { + fbb_.AddOffset(GpuFunction::VT_STAGS, stags); + } + void add_name(flatbuffers::Offset name) { + fbb_.AddOffset(GpuFunction::VT_NAME, name); + } + explicit GpuFunctionBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + GpuFunctionBuilder &operator=(const GpuFunctionBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateGpuFunction( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset>> stags = 0, + flatbuffers::Offset name = 0) { + GpuFunctionBuilder builder_(_fbb); + builder_.add_name(name); + builder_.add_stags(stags); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateGpuFunctionDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector> *stags = nullptr, + const char *name = nullptr) { + auto stags__ = stags ? _fbb.CreateVector>(*stags) : 0; + auto name__ = name ? _fbb.CreateString(name) : 0; + return MNN::CreateGpuFunction( + _fbb, + stags__, + name__); +} + +flatbuffers::Offset CreateGpuFunction(flatbuffers::FlatBufferBuilder &_fbb, const GpuFunctionT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct GpuLibraryT : public flatbuffers::NativeTable { + typedef GpuLibrary TableType; + std::vector> functions; + std::vector> pipeline; + std::string name; + GpuLibraryT() { + } +}; + +struct GpuLibrary FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef GpuLibraryT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return GpuLibraryTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FUNCTIONS = 4, + VT_PIPELINE = 6, + VT_NAME = 8 + }; + const flatbuffers::Vector> *functions() const { + return GetPointer> *>(VT_FUNCTIONS); + } + const flatbuffers::Vector> *pipeline() const { + return GetPointer> *>(VT_PIPELINE); + } + const flatbuffers::String *name() const { + return GetPointer(VT_NAME); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_FUNCTIONS) && + verifier.VerifyVector(functions()) && + verifier.VerifyVectorOfTables(functions()) && + VerifyOffset(verifier, VT_PIPELINE) && + verifier.VerifyVector(pipeline()) && + verifier.VerifyVectorOfTables(pipeline()) && + VerifyOffset(verifier, VT_NAME) && + verifier.VerifyString(name()) && + verifier.EndTable(); + } + GpuLibraryT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(GpuLibraryT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GpuLibraryT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct GpuLibraryBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_functions(flatbuffers::Offset>> functions) { + fbb_.AddOffset(GpuLibrary::VT_FUNCTIONS, functions); + } + void add_pipeline(flatbuffers::Offset>> pipeline) { + fbb_.AddOffset(GpuLibrary::VT_PIPELINE, pipeline); + } + void add_name(flatbuffers::Offset name) { + fbb_.AddOffset(GpuLibrary::VT_NAME, name); + } + explicit GpuLibraryBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + GpuLibraryBuilder &operator=(const GpuLibraryBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateGpuLibrary( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset>> functions = 0, + flatbuffers::Offset>> pipeline = 0, + flatbuffers::Offset name = 0) { + GpuLibraryBuilder builder_(_fbb); + builder_.add_name(name); + builder_.add_pipeline(pipeline); + builder_.add_functions(functions); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateGpuLibraryDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector> *functions = nullptr, + const std::vector> *pipeline = nullptr, + const char *name = nullptr) { + auto functions__ = functions ? _fbb.CreateVector>(*functions) : 0; + auto pipeline__ = pipeline ? _fbb.CreateVector>(*pipeline) : 0; + auto name__ = name ? _fbb.CreateString(name) : 0; + return MNN::CreateGpuLibrary( + _fbb, + functions__, + pipeline__, + name__); +} + +flatbuffers::Offset CreateGpuLibrary(flatbuffers::FlatBufferBuilder &_fbb, const GpuLibraryT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +inline GpuBufferT *GpuBuffer::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new GpuBufferT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void GpuBuffer::UnPackTo(GpuBufferT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = access(); _o->access = _e; }; + { auto _e = storage(); _o->storage = _e; }; + { auto _e = content(); if (_e) _o->content = std::unique_ptr(_e->UnPack(_resolver)); }; +} + +inline flatbuffers::Offset GpuBuffer::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GpuBufferT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateGpuBuffer(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateGpuBuffer(flatbuffers::FlatBufferBuilder &_fbb, const GpuBufferT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GpuBufferT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _access = _o->access; + auto _storage = _o->storage; + auto _content = _o->content ? CreateBlob(_fbb, _o->content.get(), _rehasher) : 0; + return MNN::CreateGpuBuffer( + _fbb, + _access, + _storage, + _content); +} + +inline GpuPipelineT *GpuPipeline::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new GpuPipelineT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void GpuPipeline::UnPackTo(GpuPipelineT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = localSize(); if (_e) { _o->localSize.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->localSize[_i] = _e->Get(_i); } } }; + { auto _e = key(); if (_e) _o->key = _e->str(); }; + { auto _e = metal(); if (_e) { _o->metal.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->metal[_i] = _e->Get(_i); } } }; + { auto _e = vulkan(); if (_e) { _o->vulkan.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->vulkan[_i] = _e->Get(_i); } } }; + { auto _e = openglComputeShader(); if (_e) _o->openglComputeShader = _e->str(); }; + { auto _e = openclKernel(); if (_e) _o->openclKernel = _e->str(); }; +} + +inline flatbuffers::Offset GpuPipeline::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GpuPipelineT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateGpuPipeline(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateGpuPipeline(flatbuffers::FlatBufferBuilder &_fbb, const GpuPipelineT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GpuPipelineT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _localSize = _o->localSize.size() ? _fbb.CreateVector(_o->localSize) : 0; + auto _key = _o->key.empty() ? 0 : _fbb.CreateString(_o->key); + auto _metal = _o->metal.size() ? _fbb.CreateVector(_o->metal) : 0; + auto _vulkan = _o->vulkan.size() ? _fbb.CreateVector(_o->vulkan) : 0; + auto _openglComputeShader = _o->openglComputeShader.empty() ? 0 : _fbb.CreateString(_o->openglComputeShader); + auto _openclKernel = _o->openclKernel.empty() ? 0 : _fbb.CreateString(_o->openclKernel); + return MNN::CreateGpuPipeline( + _fbb, + _localSize, + _key, + _metal, + _vulkan, + _openglComputeShader, + _openclKernel); +} + +inline GpuStageT *GpuStage::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new GpuStageT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void GpuStage::UnPackTo(GpuStageT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = pipeline(); if (_e) _o->pipeline = _e->str(); }; + { auto _e = groupSize(); if (_e) { _o->groupSize.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->groupSize[_i] = _e->Get(_i); } } }; + { auto _e = inputIndexes(); if (_e) { _o->inputIndexes.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputIndexes[_i] = _e->Get(_i); } } }; + { auto _e = outputIndexes(); if (_e) { _o->outputIndexes.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputIndexes[_i] = _e->Get(_i); } } }; + { auto _e = middleBuffer(); if (_e) { _o->middleBuffer.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->middleBuffer[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } }; + { auto _e = constBuffer(); if (_e) { _o->constBuffer.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->constBuffer[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } }; + { auto _e = globalSizeIndex(); _o->globalSizeIndex = _e; }; + { auto _e = globalSizeDivide(); if (_e) { _o->globalSizeDivide.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->globalSizeDivide[_i] = _e->Get(_i); } } }; + { auto _e = requireSize(); _o->requireSize = _e; }; +} + +inline flatbuffers::Offset GpuStage::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GpuStageT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateGpuStage(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateGpuStage(flatbuffers::FlatBufferBuilder &_fbb, const GpuStageT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GpuStageT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _pipeline = _o->pipeline.empty() ? 0 : _fbb.CreateString(_o->pipeline); + auto _groupSize = _o->groupSize.size() ? _fbb.CreateVector(_o->groupSize) : 0; + auto _inputIndexes = _o->inputIndexes.size() ? _fbb.CreateVector(_o->inputIndexes) : 0; + auto _outputIndexes = _o->outputIndexes.size() ? _fbb.CreateVector(_o->outputIndexes) : 0; + auto _middleBuffer = _o->middleBuffer.size() ? _fbb.CreateVector> (_o->middleBuffer.size(), [](size_t i, _VectorArgs *__va) { return CreateGpuBuffer(*__va->__fbb, __va->__o->middleBuffer[i].get(), __va->__rehasher); }, &_va ) : 0; + auto _constBuffer = _o->constBuffer.size() ? _fbb.CreateVector> (_o->constBuffer.size(), [](size_t i, _VectorArgs *__va) { return CreateGpuBuffer(*__va->__fbb, __va->__o->constBuffer[i].get(), __va->__rehasher); }, &_va ) : 0; + auto _globalSizeIndex = _o->globalSizeIndex; + auto _globalSizeDivide = _o->globalSizeDivide.size() ? _fbb.CreateVector(_o->globalSizeDivide) : 0; + auto _requireSize = _o->requireSize; + return MNN::CreateGpuStage( + _fbb, + _pipeline, + _groupSize, + _inputIndexes, + _outputIndexes, + _middleBuffer, + _constBuffer, + _globalSizeIndex, + _globalSizeDivide, + _requireSize); +} + +inline GpuFunctionT *GpuFunction::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new GpuFunctionT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void GpuFunction::UnPackTo(GpuFunctionT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = stags(); if (_e) { _o->stags.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->stags[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } }; + { auto _e = name(); if (_e) _o->name = _e->str(); }; +} + +inline flatbuffers::Offset GpuFunction::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GpuFunctionT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateGpuFunction(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateGpuFunction(flatbuffers::FlatBufferBuilder &_fbb, const GpuFunctionT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GpuFunctionT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _stags = _o->stags.size() ? _fbb.CreateVector> (_o->stags.size(), [](size_t i, _VectorArgs *__va) { return CreateGpuStage(*__va->__fbb, __va->__o->stags[i].get(), __va->__rehasher); }, &_va ) : 0; + auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); + return MNN::CreateGpuFunction( + _fbb, + _stags, + _name); +} + +inline GpuLibraryT *GpuLibrary::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new GpuLibraryT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void GpuLibrary::UnPackTo(GpuLibraryT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = functions(); if (_e) { _o->functions.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->functions[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } }; + { auto _e = pipeline(); if (_e) { _o->pipeline.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->pipeline[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } }; + { auto _e = name(); if (_e) _o->name = _e->str(); }; +} + +inline flatbuffers::Offset GpuLibrary::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GpuLibraryT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateGpuLibrary(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateGpuLibrary(flatbuffers::FlatBufferBuilder &_fbb, const GpuLibraryT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GpuLibraryT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _functions = _o->functions.size() ? _fbb.CreateVector> (_o->functions.size(), [](size_t i, _VectorArgs *__va) { return CreateGpuFunction(*__va->__fbb, __va->__o->functions[i].get(), __va->__rehasher); }, &_va ) : 0; + auto _pipeline = _o->pipeline.size() ? _fbb.CreateVector> (_o->pipeline.size(), [](size_t i, _VectorArgs *__va) { return CreateGpuPipeline(*__va->__fbb, __va->__o->pipeline[i].get(), __va->__rehasher); }, &_va ) : 0; + auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); + return MNN::CreateGpuLibrary( + _fbb, + _functions, + _pipeline, + _name); +} + +inline const flatbuffers::TypeTable *STORAGE_TYPETypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + STORAGE_TYPETypeTable + }; + static const char * const names[] = { + "BUFFER", + "UNIFORM", + "IMAGE" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_ENUM, 3, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *ACCESS_TYPETypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + ACCESS_TYPETypeTable + }; + static const char * const names[] = { + "READ_ONLY", + "WRITE_ONLY", + "READ_WRITE" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_ENUM, 3, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *GpuBufferTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 1 }, + { flatbuffers::ET_SEQUENCE, 0, 2 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + ACCESS_TYPETypeTable, + STORAGE_TYPETypeTable, + BlobTypeTable + }; + static const char * const names[] = { + "access", + "storage", + "content" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 3, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *GpuPipelineTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 1, -1 }, + { flatbuffers::ET_STRING, 0, -1 }, + { flatbuffers::ET_CHAR, 1, -1 }, + { flatbuffers::ET_CHAR, 1, -1 }, + { flatbuffers::ET_STRING, 0, -1 }, + { flatbuffers::ET_STRING, 0, -1 } + }; + static const char * const names[] = { + "localSize", + "key", + "metal", + "vulkan", + "openglComputeShader", + "openclKernel" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 6, type_codes, nullptr, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *GpuStageTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_STRING, 0, -1 }, + { flatbuffers::ET_INT, 1, -1 }, + { flatbuffers::ET_INT, 1, -1 }, + { flatbuffers::ET_INT, 1, -1 }, + { flatbuffers::ET_SEQUENCE, 1, 0 }, + { flatbuffers::ET_SEQUENCE, 1, 0 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 1, -1 }, + { flatbuffers::ET_BOOL, 0, -1 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + GpuBufferTypeTable + }; + static const char * const names[] = { + "pipeline", + "groupSize", + "inputIndexes", + "outputIndexes", + "middleBuffer", + "constBuffer", + "globalSizeIndex", + "globalSizeDivide", + "requireSize" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 9, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *GpuFunctionTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_SEQUENCE, 1, 0 }, + { flatbuffers::ET_STRING, 0, -1 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + GpuStageTypeTable + }; + static const char * const names[] = { + "stags", + "name" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *GpuLibraryTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_SEQUENCE, 1, 0 }, + { flatbuffers::ET_SEQUENCE, 1, 1 }, + { flatbuffers::ET_STRING, 0, -1 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + GpuFunctionTypeTable, + GpuPipelineTypeTable + }; + static const char * const names[] = { + "functions", + "pipeline", + "name" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 3, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +} // namespace MNN + +#endif // FLATBUFFERS_GENERATED_GPULIBRARY_MNN_H_ diff --git a/schema/current/MNN_generated.h b/schema/current/MNN_generated.h new file mode 100644 index 000000000..658e1a26f --- /dev/null +++ b/schema/current/MNN_generated.h @@ -0,0 +1,6287 @@ +// automatically generated by the FlatBuffers compiler, do not modify + + +#ifndef FLATBUFFERS_GENERATED_MNN_MNN_H_ +#define FLATBUFFERS_GENERATED_MNN_MNN_H_ + +#include "flatbuffers/flatbuffers.h" + +#include "CaffeOp_generated.h" +#include "GpuLibrary_generated.h" +#include "TFQuantizeOp_generated.h" +#include "Tensor_generated.h" +#include "TensorflowOp_generated.h" +#include "Type_generated.h" +#include "UserDefine_generated.h" + +namespace MNN { + +struct Plugin; +struct PluginT; + +struct Extra; +struct ExtraT; + +struct Op; +struct OpT; + +struct TensorDescribe; +struct TensorDescribeT; + +struct Net; +struct NetT; + +inline const flatbuffers::TypeTable *PluginTypeTable(); + +inline const flatbuffers::TypeTable *ExtraTypeTable(); + +inline const flatbuffers::TypeTable *OpTypeTable(); + +inline const flatbuffers::TypeTable *TensorDescribeTypeTable(); + +inline const flatbuffers::TypeTable *NetTypeTable(); + +enum OpType { + OpType_AbsVal = 0, + OpType_QuantizedAdd = 1, + OpType_ArgMax = 2, + OpType_AsString = 3, + OpType_InstanceNorm = 4, + OpType_BatchToSpaceND = 5, + OpType_Bias = 6, + OpType_BinaryOp = 7, + OpType_Bnll = 8, + OpType_Cast = 9, + OpType_Concat = 10, + OpType_Const = 11, + OpType_Convolution = 12, + OpType_ConvolutionDepthwise = 13, + OpType_Crop = 14, + OpType_CropAndResize = 15, + OpType_Cubic = 16, + OpType_Deconvolution = 17, + OpType_DeconvolutionDepthwise = 18, + OpType_Dequantize = 19, + OpType_DetectionOutput = 20, + OpType_Dropout = 21, + OpType_Eltwise = 22, + OpType_ELU = 23, + OpType_Embed = 24, + OpType_Exp = 25, + OpType_ExpandDims = 26, + OpType_Fill = 27, + OpType_Flatten = 28, + OpType_FloorMod = 29, + OpType_Gather = 30, + OpType_GatherV2 = 31, + OpType_Im2Seq = 32, + OpType_InnerProduct = 33, + OpType_Input = 34, + OpType_Interp = 35, + OpType_Log = 36, + OpType_LRN = 37, + OpType_LSTM = 38, + OpType_MatMul = 39, + OpType_MVN = 40, + OpType_NonMaxSuppression = 41, + OpType_NonMaxSuppressionV2 = 42, + OpType_Normalize = 43, + OpType_Pack = 44, + OpType_Padding = 45, + OpType_Permute = 46, + OpType_Pooling = 47, + OpType_Power = 48, + OpType_PReLU = 49, + OpType_PriorBox = 50, + OpType_Proposal = 51, + OpType_QuantizedAvgPool = 52, + OpType_QuantizedBiasAdd = 53, + OpType_QuantizedConcat = 54, + OpType_QuantizedDepthwiseConv2D = 55, + OpType_QuantizedLogistic = 56, + OpType_QuantizedMatMul = 57, + OpType_QuantizedMaxPool = 58, + OpType_QuantizedRelu = 59, + OpType_QuantizedRelu6 = 60, + OpType_QuantizedReshape = 61, + OpType_QuantizedSoftmax = 62, + OpType_QuantizeMaxMin = 63, + OpType_QuantizeV2 = 64, + OpType_Range = 65, + OpType_Rank = 66, + OpType_ReduceJoin = 67, + OpType_Reduction = 68, + OpType_ReLU = 69, + OpType_ReLU6 = 70, + OpType_RequantizationRange = 71, + OpType_Requantize = 72, + OpType_Reshape = 73, + OpType_Resize = 74, + OpType_RNN = 75, + OpType_ROIPooling = 76, + OpType_Scale = 77, + OpType_Selu = 78, + OpType_Seq2Out = 79, + OpType_Shape = 80, + OpType_Sigmoid = 81, + OpType_Size = 82, + OpType_Slice = 83, + OpType_SliceTf = 84, + OpType_Softmax = 85, + OpType_SpaceToBatchND = 86, + OpType_SpatialProduct = 87, + OpType_Split = 88, + OpType_SPP = 89, + OpType_Squeeze = 90, + OpType_StridedSlice = 91, + OpType_StringJoin = 92, + OpType_StringSplit = 93, + OpType_StringToNumber = 94, + OpType_TanH = 95, + OpType_TfQuantizedConv2D = 96, + OpType_Threshold = 97, + OpType_Tile = 98, + OpType_TopKV2 = 99, + OpType_Transpose = 100, + OpType_UnaryOp = 101, + OpType_Unpack = 102, + OpType_Where = 103, + OpType_Moments = 104, + OpType_RNNSequenceGRU = 105, + OpType_BatchMatMul = 106, + OpType_Unsqueeze = 107, + OpType_CosineSimilarity = 108, + OpType_DepthToSpace = 109, + OpType_SpaceToDepth = 110, + OpType_ReverseSequence = 111, + OpType_Pooling3D = 112, + OpType_Convolution3D = 113, + OpType_MatrixBandPart = 114, + OpType_GatherND = 115, + OpType_DetectionPostProcess = 116, + OpType_UnravelIndex = 117, + OpType_ScatterNd = 118, + OpType_OneHot = 119, + OpType_BroadcastTo = 120, + OpType_Dilation2D = 121, + OpType_MaxLayerCount = 128, + OpType_ConvertTensor = 129, + OpType_ArgMin = 130, + OpType_LinSpace = 131, + OpType_PLUGIN = 256, + OpType_Select = 257, + OpType_ZerosLike = 258, + OpType_Broastcast = 259, + OpType_SetDiff1D = 260, + OpType_ReluGrad = 261, + OpType_Relu6Grad = 262, + OpType_PoolGrad = 263, + OpType_SoftmaxGrad = 264, + OpType_Conv2DBackPropFilter = 265, + OpType_TrainableParam = 266, + OpType_BatchNorm = 267, + OpType_Extra = 512, + OpType_ConvInt8 = 513, + OpType_Int8ToFloat = 514, + OpType_DepthwiseConvInt8 = 515, + OpType_PoolInt8 = 516, + OpType_FloatToInt8 = 517, + OpType_EltwiseInt8 = 518, + OpType_MIN = OpType_AbsVal, + OpType_MAX = OpType_EltwiseInt8 +}; + +inline const OpType (&EnumValuesOpType())[145] { + static const OpType values[] = { + OpType_AbsVal, + OpType_QuantizedAdd, + OpType_ArgMax, + OpType_AsString, + OpType_InstanceNorm, + OpType_BatchToSpaceND, + OpType_Bias, + OpType_BinaryOp, + OpType_Bnll, + OpType_Cast, + OpType_Concat, + OpType_Const, + OpType_Convolution, + OpType_ConvolutionDepthwise, + OpType_Crop, + OpType_CropAndResize, + OpType_Cubic, + OpType_Deconvolution, + OpType_DeconvolutionDepthwise, + OpType_Dequantize, + OpType_DetectionOutput, + OpType_Dropout, + OpType_Eltwise, + OpType_ELU, + OpType_Embed, + OpType_Exp, + OpType_ExpandDims, + OpType_Fill, + OpType_Flatten, + OpType_FloorMod, + OpType_Gather, + OpType_GatherV2, + OpType_Im2Seq, + OpType_InnerProduct, + OpType_Input, + OpType_Interp, + OpType_Log, + OpType_LRN, + OpType_LSTM, + OpType_MatMul, + OpType_MVN, + OpType_NonMaxSuppression, + OpType_NonMaxSuppressionV2, + OpType_Normalize, + OpType_Pack, + OpType_Padding, + OpType_Permute, + OpType_Pooling, + OpType_Power, + OpType_PReLU, + OpType_PriorBox, + OpType_Proposal, + OpType_QuantizedAvgPool, + OpType_QuantizedBiasAdd, + OpType_QuantizedConcat, + OpType_QuantizedDepthwiseConv2D, + OpType_QuantizedLogistic, + OpType_QuantizedMatMul, + OpType_QuantizedMaxPool, + OpType_QuantizedRelu, + OpType_QuantizedRelu6, + OpType_QuantizedReshape, + OpType_QuantizedSoftmax, + OpType_QuantizeMaxMin, + OpType_QuantizeV2, + OpType_Range, + OpType_Rank, + OpType_ReduceJoin, + OpType_Reduction, + OpType_ReLU, + OpType_ReLU6, + OpType_RequantizationRange, + OpType_Requantize, + OpType_Reshape, + OpType_Resize, + OpType_RNN, + OpType_ROIPooling, + OpType_Scale, + OpType_Selu, + OpType_Seq2Out, + OpType_Shape, + OpType_Sigmoid, + OpType_Size, + OpType_Slice, + OpType_SliceTf, + OpType_Softmax, + OpType_SpaceToBatchND, + OpType_SpatialProduct, + OpType_Split, + OpType_SPP, + OpType_Squeeze, + OpType_StridedSlice, + OpType_StringJoin, + OpType_StringSplit, + OpType_StringToNumber, + OpType_TanH, + OpType_TfQuantizedConv2D, + OpType_Threshold, + OpType_Tile, + OpType_TopKV2, + OpType_Transpose, + OpType_UnaryOp, + OpType_Unpack, + OpType_Where, + OpType_Moments, + OpType_RNNSequenceGRU, + OpType_BatchMatMul, + OpType_Unsqueeze, + OpType_CosineSimilarity, + OpType_DepthToSpace, + OpType_SpaceToDepth, + OpType_ReverseSequence, + OpType_Pooling3D, + OpType_Convolution3D, + OpType_MatrixBandPart, + OpType_GatherND, + OpType_DetectionPostProcess, + OpType_UnravelIndex, + OpType_ScatterNd, + OpType_OneHot, + OpType_BroadcastTo, + OpType_Dilation2D, + OpType_MaxLayerCount, + OpType_ConvertTensor, + OpType_ArgMin, + OpType_LinSpace, + OpType_PLUGIN, + OpType_Select, + OpType_ZerosLike, + OpType_Broastcast, + OpType_SetDiff1D, + OpType_ReluGrad, + OpType_Relu6Grad, + OpType_PoolGrad, + OpType_SoftmaxGrad, + OpType_Conv2DBackPropFilter, + OpType_TrainableParam, + OpType_BatchNorm, + OpType_Extra, + OpType_ConvInt8, + OpType_Int8ToFloat, + OpType_DepthwiseConvInt8, + OpType_PoolInt8, + OpType_FloatToInt8, + OpType_EltwiseInt8 + }; + return values; +} + +inline const char * const *EnumNamesOpType() { + static const char * const names[] = { + "AbsVal", + "QuantizedAdd", + "ArgMax", + "AsString", + "InstanceNorm", + "BatchToSpaceND", + "Bias", + "BinaryOp", + "Bnll", + "Cast", + "Concat", + "Const", + "Convolution", + "ConvolutionDepthwise", + "Crop", + "CropAndResize", + "Cubic", + "Deconvolution", + "DeconvolutionDepthwise", + "Dequantize", + "DetectionOutput", + "Dropout", + "Eltwise", + "ELU", + "Embed", + "Exp", + "ExpandDims", + "Fill", + "Flatten", + "FloorMod", + "Gather", + "GatherV2", + "Im2Seq", + "InnerProduct", + "Input", + "Interp", + "Log", + "LRN", + "LSTM", + "MatMul", + "MVN", + "NonMaxSuppression", + "NonMaxSuppressionV2", + "Normalize", + "Pack", + "Padding", + "Permute", + "Pooling", + "Power", + "PReLU", + "PriorBox", + "Proposal", + "QuantizedAvgPool", + "QuantizedBiasAdd", + "QuantizedConcat", + "QuantizedDepthwiseConv2D", + "QuantizedLogistic", + "QuantizedMatMul", + "QuantizedMaxPool", + "QuantizedRelu", + "QuantizedRelu6", + "QuantizedReshape", + "QuantizedSoftmax", + "QuantizeMaxMin", + "QuantizeV2", + "Range", + "Rank", + "ReduceJoin", + "Reduction", + "ReLU", + "ReLU6", + "RequantizationRange", + "Requantize", + "Reshape", + "Resize", + "RNN", + "ROIPooling", + "Scale", + "Selu", + "Seq2Out", + "Shape", + "Sigmoid", + "Size", + "Slice", + "SliceTf", + "Softmax", + "SpaceToBatchND", + "SpatialProduct", + "Split", + "SPP", + "Squeeze", + "StridedSlice", + "StringJoin", + "StringSplit", + "StringToNumber", + "TanH", + "TfQuantizedConv2D", + "Threshold", + "Tile", + "TopKV2", + "Transpose", + "UnaryOp", + "Unpack", + "Where", + "Moments", + "RNNSequenceGRU", + "BatchMatMul", + "Unsqueeze", + "CosineSimilarity", + "DepthToSpace", + "SpaceToDepth", + "ReverseSequence", + "Pooling3D", + "Convolution3D", + "MatrixBandPart", + "GatherND", + "DetectionPostProcess", + "UnravelIndex", + "ScatterNd", + "OneHot", + "BroadcastTo", + "Dilation2D", + "", + "", + "", + "", + "", + "", + "MaxLayerCount", + "ConvertTensor", + "ArgMin", + "LinSpace", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "PLUGIN", + "Select", + "ZerosLike", + "Broastcast", + "SetDiff1D", + "ReluGrad", + "Relu6Grad", + "PoolGrad", + "SoftmaxGrad", + "Conv2DBackPropFilter", + "TrainableParam", + "BatchNorm", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "Extra", + "ConvInt8", + "Int8ToFloat", + "DepthwiseConvInt8", + "PoolInt8", + "FloatToInt8", + "EltwiseInt8", + nullptr + }; + return names; +} + +inline const char *EnumNameOpType(OpType e) { + if (e < OpType_AbsVal || e > OpType_EltwiseInt8) return ""; + const size_t index = static_cast(e); + return EnumNamesOpType()[index]; +} + +enum OpParameter { + OpParameter_NONE = 0, + OpParameter_QuantizedAdd = 1, + OpParameter_ArgMax = 2, + OpParameter_AsString = 3, + OpParameter_Axis = 4, + OpParameter_BatchNorm = 5, + OpParameter_BinaryOp = 6, + OpParameter_Blob = 7, + OpParameter_CastParam = 8, + OpParameter_Convolution2D = 9, + OpParameter_Crop = 10, + OpParameter_CropAndResize = 11, + OpParameter_Dequantize = 12, + OpParameter_DetectionOutput = 13, + OpParameter_Eltwise = 14, + OpParameter_ExpandDims = 15, + OpParameter_Fill = 16, + OpParameter_Flatten = 17, + OpParameter_Gather = 18, + OpParameter_GatherV2 = 19, + OpParameter_InnerProduct = 20, + OpParameter_Input = 21, + OpParameter_Interp = 22, + OpParameter_LRN = 23, + OpParameter_LSTM = 24, + OpParameter_MatMul = 25, + OpParameter_NonMaxSuppressionV2 = 26, + OpParameter_Normalize = 27, + OpParameter_PackParam = 28, + OpParameter_Permute = 29, + OpParameter_Plugin = 30, + OpParameter_Pool = 31, + OpParameter_PRelu = 32, + OpParameter_PriorBox = 33, + OpParameter_Proposal = 34, + OpParameter_QuantizedAvgPool = 35, + OpParameter_QuantizedBiasAdd = 36, + OpParameter_QuantizedConcat = 37, + OpParameter_QuantizedLogistic = 38, + OpParameter_QuantizedMatMul = 39, + OpParameter_QuantizedMaxPool = 40, + OpParameter_QuantizedRelu = 41, + OpParameter_QuantizedRelu6 = 42, + OpParameter_QuantizedReshape = 43, + OpParameter_QuantizedSoftmax = 44, + OpParameter_QuantizeMaxMin = 45, + OpParameter_QuantizeV2 = 46, + OpParameter_Range = 47, + OpParameter_Rank = 48, + OpParameter_ReduceJoin = 49, + OpParameter_ReductionParam = 50, + OpParameter_Relu = 51, + OpParameter_Relu6 = 52, + OpParameter_RequantizationRange = 53, + OpParameter_Requantize = 54, + OpParameter_Reshape = 55, + OpParameter_Resize = 56, + OpParameter_RoiPooling = 57, + OpParameter_Scale = 58, + OpParameter_Selu = 59, + OpParameter_Size = 60, + OpParameter_Slice = 61, + OpParameter_SliceTf = 62, + OpParameter_SpaceBatch = 63, + OpParameter_SqueezeParam = 64, + OpParameter_StridedSliceParam = 65, + OpParameter_TensorConvertInfo = 66, + OpParameter_TfQuantizedConv2D = 67, + OpParameter_TopKV2 = 68, + OpParameter_Transpose = 69, + OpParameter_UnaryOp = 70, + OpParameter_MomentsParam = 71, + OpParameter_RNNParam = 72, + OpParameter_BatchMatMulParam = 73, + OpParameter_QuantizedFloatParam = 74, + OpParameter_DepthSpaceParam = 75, + OpParameter_EltwiseInt8 = 76, + OpParameter_ReverseSequenceParam = 77, + OpParameter_Extra = 78, + OpParameter_Pool3D = 79, + OpParameter_Convolution3D = 80, + OpParameter_ELU = 81, + OpParameter_DetectionPostProcessParam = 82, + OpParameter_OneHotParam = 83, + OpParameter_PadParam = 84, + OpParameter_MIN = OpParameter_NONE, + OpParameter_MAX = OpParameter_PadParam +}; + +inline const OpParameter (&EnumValuesOpParameter())[85] { + static const OpParameter values[] = { + OpParameter_NONE, + OpParameter_QuantizedAdd, + OpParameter_ArgMax, + OpParameter_AsString, + OpParameter_Axis, + OpParameter_BatchNorm, + OpParameter_BinaryOp, + OpParameter_Blob, + OpParameter_CastParam, + OpParameter_Convolution2D, + OpParameter_Crop, + OpParameter_CropAndResize, + OpParameter_Dequantize, + OpParameter_DetectionOutput, + OpParameter_Eltwise, + OpParameter_ExpandDims, + OpParameter_Fill, + OpParameter_Flatten, + OpParameter_Gather, + OpParameter_GatherV2, + OpParameter_InnerProduct, + OpParameter_Input, + OpParameter_Interp, + OpParameter_LRN, + OpParameter_LSTM, + OpParameter_MatMul, + OpParameter_NonMaxSuppressionV2, + OpParameter_Normalize, + OpParameter_PackParam, + OpParameter_Permute, + OpParameter_Plugin, + OpParameter_Pool, + OpParameter_PRelu, + OpParameter_PriorBox, + OpParameter_Proposal, + OpParameter_QuantizedAvgPool, + OpParameter_QuantizedBiasAdd, + OpParameter_QuantizedConcat, + OpParameter_QuantizedLogistic, + OpParameter_QuantizedMatMul, + OpParameter_QuantizedMaxPool, + OpParameter_QuantizedRelu, + OpParameter_QuantizedRelu6, + OpParameter_QuantizedReshape, + OpParameter_QuantizedSoftmax, + OpParameter_QuantizeMaxMin, + OpParameter_QuantizeV2, + OpParameter_Range, + OpParameter_Rank, + OpParameter_ReduceJoin, + OpParameter_ReductionParam, + OpParameter_Relu, + OpParameter_Relu6, + OpParameter_RequantizationRange, + OpParameter_Requantize, + OpParameter_Reshape, + OpParameter_Resize, + OpParameter_RoiPooling, + OpParameter_Scale, + OpParameter_Selu, + OpParameter_Size, + OpParameter_Slice, + OpParameter_SliceTf, + OpParameter_SpaceBatch, + OpParameter_SqueezeParam, + OpParameter_StridedSliceParam, + OpParameter_TensorConvertInfo, + OpParameter_TfQuantizedConv2D, + OpParameter_TopKV2, + OpParameter_Transpose, + OpParameter_UnaryOp, + OpParameter_MomentsParam, + OpParameter_RNNParam, + OpParameter_BatchMatMulParam, + OpParameter_QuantizedFloatParam, + OpParameter_DepthSpaceParam, + OpParameter_EltwiseInt8, + OpParameter_ReverseSequenceParam, + OpParameter_Extra, + OpParameter_Pool3D, + OpParameter_Convolution3D, + OpParameter_ELU, + OpParameter_DetectionPostProcessParam, + OpParameter_OneHotParam, + OpParameter_PadParam + }; + return values; +} + +inline const char * const *EnumNamesOpParameter() { + static const char * const names[] = { + "NONE", + "QuantizedAdd", + "ArgMax", + "AsString", + "Axis", + "BatchNorm", + "BinaryOp", + "Blob", + "CastParam", + "Convolution2D", + "Crop", + "CropAndResize", + "Dequantize", + "DetectionOutput", + "Eltwise", + "ExpandDims", + "Fill", + "Flatten", + "Gather", + "GatherV2", + "InnerProduct", + "Input", + "Interp", + "LRN", + "LSTM", + "MatMul", + "NonMaxSuppressionV2", + "Normalize", + "PackParam", + "Permute", + "Plugin", + "Pool", + "PRelu", + "PriorBox", + "Proposal", + "QuantizedAvgPool", + "QuantizedBiasAdd", + "QuantizedConcat", + "QuantizedLogistic", + "QuantizedMatMul", + "QuantizedMaxPool", + "QuantizedRelu", + "QuantizedRelu6", + "QuantizedReshape", + "QuantizedSoftmax", + "QuantizeMaxMin", + "QuantizeV2", + "Range", + "Rank", + "ReduceJoin", + "ReductionParam", + "Relu", + "Relu6", + "RequantizationRange", + "Requantize", + "Reshape", + "Resize", + "RoiPooling", + "Scale", + "Selu", + "Size", + "Slice", + "SliceTf", + "SpaceBatch", + "SqueezeParam", + "StridedSliceParam", + "TensorConvertInfo", + "TfQuantizedConv2D", + "TopKV2", + "Transpose", + "UnaryOp", + "MomentsParam", + "RNNParam", + "BatchMatMulParam", + "QuantizedFloatParam", + "DepthSpaceParam", + "EltwiseInt8", + "ReverseSequenceParam", + "Extra", + "Pool3D", + "Convolution3D", + "ELU", + "DetectionPostProcessParam", + "OneHotParam", + "PadParam", + nullptr + }; + return names; +} + +inline const char *EnumNameOpParameter(OpParameter e) { + if (e < OpParameter_NONE || e > OpParameter_PadParam) return ""; + const size_t index = static_cast(e); + return EnumNamesOpParameter()[index]; +} + +template struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_NONE; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_QuantizedAdd; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_ArgMax; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_AsString; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_Axis; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_BatchNorm; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_BinaryOp; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_Blob; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_CastParam; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_Convolution2D; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_Crop; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_CropAndResize; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_Dequantize; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_DetectionOutput; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_Eltwise; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_ExpandDims; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_Fill; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_Flatten; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_Gather; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_GatherV2; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_InnerProduct; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_Input; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_Interp; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_LRN; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_LSTM; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_MatMul; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_NonMaxSuppressionV2; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_Normalize; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_PackParam; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_Permute; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_Plugin; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_Pool; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_PRelu; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_PriorBox; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_Proposal; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_QuantizedAvgPool; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_QuantizedBiasAdd; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_QuantizedConcat; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_QuantizedLogistic; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_QuantizedMatMul; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_QuantizedMaxPool; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_QuantizedRelu; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_QuantizedRelu6; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_QuantizedReshape; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_QuantizedSoftmax; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_QuantizeMaxMin; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_QuantizeV2; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_Range; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_Rank; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_ReduceJoin; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_ReductionParam; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_Relu; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_Relu6; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_RequantizationRange; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_Requantize; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_Reshape; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_Resize; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_RoiPooling; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_Scale; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_Selu; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_Size; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_Slice; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_SliceTf; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_SpaceBatch; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_SqueezeParam; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_StridedSliceParam; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_TensorConvertInfo; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_TfQuantizedConv2D; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_TopKV2; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_Transpose; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_UnaryOp; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_MomentsParam; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_RNNParam; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_BatchMatMulParam; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_QuantizedFloatParam; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_DepthSpaceParam; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_EltwiseInt8; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_ReverseSequenceParam; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_Extra; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_Pool3D; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_Convolution3D; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_ELU; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_DetectionPostProcessParam; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_OneHotParam; +}; + +template<> struct OpParameterTraits { + static const OpParameter enum_value = OpParameter_PadParam; +}; + +struct OpParameterUnion { + OpParameter type; + void *value; + + OpParameterUnion() : type(OpParameter_NONE), value(nullptr) {} + OpParameterUnion(OpParameterUnion&& u) FLATBUFFERS_NOEXCEPT : + type(OpParameter_NONE), value(nullptr) + { std::swap(type, u.type); std::swap(value, u.value); } + OpParameterUnion(const OpParameterUnion &) FLATBUFFERS_NOEXCEPT; + OpParameterUnion &operator=(const OpParameterUnion &u) FLATBUFFERS_NOEXCEPT + { OpParameterUnion t(u); std::swap(type, t.type); std::swap(value, t.value); return *this; } + OpParameterUnion &operator=(OpParameterUnion &&u) FLATBUFFERS_NOEXCEPT + { std::swap(type, u.type); std::swap(value, u.value); return *this; } + ~OpParameterUnion() { Reset(); } + + void Reset(); + +#ifndef FLATBUFFERS_CPP98_STL + template + void Set(T&& val) { + using RT = typename std::remove_reference::type; + Reset(); + type = OpParameterTraits::enum_value; + if (type != OpParameter_NONE) { + value = new RT(std::forward(val)); + } + } +#endif // FLATBUFFERS_CPP98_STL + + static void *UnPack(const void *obj, OpParameter type, const flatbuffers::resolver_function_t *resolver); + flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher = nullptr) const; + + QuantizedAddT *AsQuantizedAdd() { + return type == OpParameter_QuantizedAdd ? + reinterpret_cast(value) : nullptr; + } + const QuantizedAddT *AsQuantizedAdd() const { + return type == OpParameter_QuantizedAdd ? + reinterpret_cast(value) : nullptr; + } + ArgMaxT *AsArgMax() { + return type == OpParameter_ArgMax ? + reinterpret_cast(value) : nullptr; + } + const ArgMaxT *AsArgMax() const { + return type == OpParameter_ArgMax ? + reinterpret_cast(value) : nullptr; + } + AsStringT *AsAsString() { + return type == OpParameter_AsString ? + reinterpret_cast(value) : nullptr; + } + const AsStringT *AsAsString() const { + return type == OpParameter_AsString ? + reinterpret_cast(value) : nullptr; + } + AxisT *AsAxis() { + return type == OpParameter_Axis ? + reinterpret_cast(value) : nullptr; + } + const AxisT *AsAxis() const { + return type == OpParameter_Axis ? + reinterpret_cast(value) : nullptr; + } + BatchNormT *AsBatchNorm() { + return type == OpParameter_BatchNorm ? + reinterpret_cast(value) : nullptr; + } + const BatchNormT *AsBatchNorm() const { + return type == OpParameter_BatchNorm ? + reinterpret_cast(value) : nullptr; + } + BinaryOpT *AsBinaryOp() { + return type == OpParameter_BinaryOp ? + reinterpret_cast(value) : nullptr; + } + const BinaryOpT *AsBinaryOp() const { + return type == OpParameter_BinaryOp ? + reinterpret_cast(value) : nullptr; + } + BlobT *AsBlob() { + return type == OpParameter_Blob ? + reinterpret_cast(value) : nullptr; + } + const BlobT *AsBlob() const { + return type == OpParameter_Blob ? + reinterpret_cast(value) : nullptr; + } + CastParamT *AsCastParam() { + return type == OpParameter_CastParam ? + reinterpret_cast(value) : nullptr; + } + const CastParamT *AsCastParam() const { + return type == OpParameter_CastParam ? + reinterpret_cast(value) : nullptr; + } + Convolution2DT *AsConvolution2D() { + return type == OpParameter_Convolution2D ? + reinterpret_cast(value) : nullptr; + } + const Convolution2DT *AsConvolution2D() const { + return type == OpParameter_Convolution2D ? + reinterpret_cast(value) : nullptr; + } + CropT *AsCrop() { + return type == OpParameter_Crop ? + reinterpret_cast(value) : nullptr; + } + const CropT *AsCrop() const { + return type == OpParameter_Crop ? + reinterpret_cast(value) : nullptr; + } + CropAndResizeT *AsCropAndResize() { + return type == OpParameter_CropAndResize ? + reinterpret_cast(value) : nullptr; + } + const CropAndResizeT *AsCropAndResize() const { + return type == OpParameter_CropAndResize ? + reinterpret_cast(value) : nullptr; + } + DequantizeT *AsDequantize() { + return type == OpParameter_Dequantize ? + reinterpret_cast(value) : nullptr; + } + const DequantizeT *AsDequantize() const { + return type == OpParameter_Dequantize ? + reinterpret_cast(value) : nullptr; + } + DetectionOutputT *AsDetectionOutput() { + return type == OpParameter_DetectionOutput ? + reinterpret_cast(value) : nullptr; + } + const DetectionOutputT *AsDetectionOutput() const { + return type == OpParameter_DetectionOutput ? + reinterpret_cast(value) : nullptr; + } + EltwiseT *AsEltwise() { + return type == OpParameter_Eltwise ? + reinterpret_cast(value) : nullptr; + } + const EltwiseT *AsEltwise() const { + return type == OpParameter_Eltwise ? + reinterpret_cast(value) : nullptr; + } + ExpandDimsT *AsExpandDims() { + return type == OpParameter_ExpandDims ? + reinterpret_cast(value) : nullptr; + } + const ExpandDimsT *AsExpandDims() const { + return type == OpParameter_ExpandDims ? + reinterpret_cast(value) : nullptr; + } + FillT *AsFill() { + return type == OpParameter_Fill ? + reinterpret_cast(value) : nullptr; + } + const FillT *AsFill() const { + return type == OpParameter_Fill ? + reinterpret_cast(value) : nullptr; + } + FlattenT *AsFlatten() { + return type == OpParameter_Flatten ? + reinterpret_cast(value) : nullptr; + } + const FlattenT *AsFlatten() const { + return type == OpParameter_Flatten ? + reinterpret_cast(value) : nullptr; + } + GatherT *AsGather() { + return type == OpParameter_Gather ? + reinterpret_cast(value) : nullptr; + } + const GatherT *AsGather() const { + return type == OpParameter_Gather ? + reinterpret_cast(value) : nullptr; + } + GatherV2T *AsGatherV2() { + return type == OpParameter_GatherV2 ? + reinterpret_cast(value) : nullptr; + } + const GatherV2T *AsGatherV2() const { + return type == OpParameter_GatherV2 ? + reinterpret_cast(value) : nullptr; + } + InnerProductT *AsInnerProduct() { + return type == OpParameter_InnerProduct ? + reinterpret_cast(value) : nullptr; + } + const InnerProductT *AsInnerProduct() const { + return type == OpParameter_InnerProduct ? + reinterpret_cast(value) : nullptr; + } + InputT *AsInput() { + return type == OpParameter_Input ? + reinterpret_cast(value) : nullptr; + } + const InputT *AsInput() const { + return type == OpParameter_Input ? + reinterpret_cast(value) : nullptr; + } + InterpT *AsInterp() { + return type == OpParameter_Interp ? + reinterpret_cast(value) : nullptr; + } + const InterpT *AsInterp() const { + return type == OpParameter_Interp ? + reinterpret_cast(value) : nullptr; + } + LRNT *AsLRN() { + return type == OpParameter_LRN ? + reinterpret_cast(value) : nullptr; + } + const LRNT *AsLRN() const { + return type == OpParameter_LRN ? + reinterpret_cast(value) : nullptr; + } + LSTMT *AsLSTM() { + return type == OpParameter_LSTM ? + reinterpret_cast(value) : nullptr; + } + const LSTMT *AsLSTM() const { + return type == OpParameter_LSTM ? + reinterpret_cast(value) : nullptr; + } + MatMulT *AsMatMul() { + return type == OpParameter_MatMul ? + reinterpret_cast(value) : nullptr; + } + const MatMulT *AsMatMul() const { + return type == OpParameter_MatMul ? + reinterpret_cast(value) : nullptr; + } + NonMaxSuppressionV2T *AsNonMaxSuppressionV2() { + return type == OpParameter_NonMaxSuppressionV2 ? + reinterpret_cast(value) : nullptr; + } + const NonMaxSuppressionV2T *AsNonMaxSuppressionV2() const { + return type == OpParameter_NonMaxSuppressionV2 ? + reinterpret_cast(value) : nullptr; + } + NormalizeT *AsNormalize() { + return type == OpParameter_Normalize ? + reinterpret_cast(value) : nullptr; + } + const NormalizeT *AsNormalize() const { + return type == OpParameter_Normalize ? + reinterpret_cast(value) : nullptr; + } + PackParamT *AsPackParam() { + return type == OpParameter_PackParam ? + reinterpret_cast(value) : nullptr; + } + const PackParamT *AsPackParam() const { + return type == OpParameter_PackParam ? + reinterpret_cast(value) : nullptr; + } + PermuteT *AsPermute() { + return type == OpParameter_Permute ? + reinterpret_cast(value) : nullptr; + } + const PermuteT *AsPermute() const { + return type == OpParameter_Permute ? + reinterpret_cast(value) : nullptr; + } + PluginT *AsPlugin() { + return type == OpParameter_Plugin ? + reinterpret_cast(value) : nullptr; + } + const PluginT *AsPlugin() const { + return type == OpParameter_Plugin ? + reinterpret_cast(value) : nullptr; + } + PoolT *AsPool() { + return type == OpParameter_Pool ? + reinterpret_cast(value) : nullptr; + } + const PoolT *AsPool() const { + return type == OpParameter_Pool ? + reinterpret_cast(value) : nullptr; + } + PReluT *AsPRelu() { + return type == OpParameter_PRelu ? + reinterpret_cast(value) : nullptr; + } + const PReluT *AsPRelu() const { + return type == OpParameter_PRelu ? + reinterpret_cast(value) : nullptr; + } + PriorBoxT *AsPriorBox() { + return type == OpParameter_PriorBox ? + reinterpret_cast(value) : nullptr; + } + const PriorBoxT *AsPriorBox() const { + return type == OpParameter_PriorBox ? + reinterpret_cast(value) : nullptr; + } + ProposalT *AsProposal() { + return type == OpParameter_Proposal ? + reinterpret_cast(value) : nullptr; + } + const ProposalT *AsProposal() const { + return type == OpParameter_Proposal ? + reinterpret_cast(value) : nullptr; + } + QuantizedAvgPoolT *AsQuantizedAvgPool() { + return type == OpParameter_QuantizedAvgPool ? + reinterpret_cast(value) : nullptr; + } + const QuantizedAvgPoolT *AsQuantizedAvgPool() const { + return type == OpParameter_QuantizedAvgPool ? + reinterpret_cast(value) : nullptr; + } + QuantizedBiasAddT *AsQuantizedBiasAdd() { + return type == OpParameter_QuantizedBiasAdd ? + reinterpret_cast(value) : nullptr; + } + const QuantizedBiasAddT *AsQuantizedBiasAdd() const { + return type == OpParameter_QuantizedBiasAdd ? + reinterpret_cast(value) : nullptr; + } + QuantizedConcatT *AsQuantizedConcat() { + return type == OpParameter_QuantizedConcat ? + reinterpret_cast(value) : nullptr; + } + const QuantizedConcatT *AsQuantizedConcat() const { + return type == OpParameter_QuantizedConcat ? + reinterpret_cast(value) : nullptr; + } + QuantizedLogisticT *AsQuantizedLogistic() { + return type == OpParameter_QuantizedLogistic ? + reinterpret_cast(value) : nullptr; + } + const QuantizedLogisticT *AsQuantizedLogistic() const { + return type == OpParameter_QuantizedLogistic ? + reinterpret_cast(value) : nullptr; + } + QuantizedMatMulT *AsQuantizedMatMul() { + return type == OpParameter_QuantizedMatMul ? + reinterpret_cast(value) : nullptr; + } + const QuantizedMatMulT *AsQuantizedMatMul() const { + return type == OpParameter_QuantizedMatMul ? + reinterpret_cast(value) : nullptr; + } + QuantizedMaxPoolT *AsQuantizedMaxPool() { + return type == OpParameter_QuantizedMaxPool ? + reinterpret_cast(value) : nullptr; + } + const QuantizedMaxPoolT *AsQuantizedMaxPool() const { + return type == OpParameter_QuantizedMaxPool ? + reinterpret_cast(value) : nullptr; + } + QuantizedReluT *AsQuantizedRelu() { + return type == OpParameter_QuantizedRelu ? + reinterpret_cast(value) : nullptr; + } + const QuantizedReluT *AsQuantizedRelu() const { + return type == OpParameter_QuantizedRelu ? + reinterpret_cast(value) : nullptr; + } + QuantizedRelu6T *AsQuantizedRelu6() { + return type == OpParameter_QuantizedRelu6 ? + reinterpret_cast(value) : nullptr; + } + const QuantizedRelu6T *AsQuantizedRelu6() const { + return type == OpParameter_QuantizedRelu6 ? + reinterpret_cast(value) : nullptr; + } + QuantizedReshapeT *AsQuantizedReshape() { + return type == OpParameter_QuantizedReshape ? + reinterpret_cast(value) : nullptr; + } + const QuantizedReshapeT *AsQuantizedReshape() const { + return type == OpParameter_QuantizedReshape ? + reinterpret_cast(value) : nullptr; + } + QuantizedSoftmaxT *AsQuantizedSoftmax() { + return type == OpParameter_QuantizedSoftmax ? + reinterpret_cast(value) : nullptr; + } + const QuantizedSoftmaxT *AsQuantizedSoftmax() const { + return type == OpParameter_QuantizedSoftmax ? + reinterpret_cast(value) : nullptr; + } + QuantizeMaxMinT *AsQuantizeMaxMin() { + return type == OpParameter_QuantizeMaxMin ? + reinterpret_cast(value) : nullptr; + } + const QuantizeMaxMinT *AsQuantizeMaxMin() const { + return type == OpParameter_QuantizeMaxMin ? + reinterpret_cast(value) : nullptr; + } + QuantizeV2T *AsQuantizeV2() { + return type == OpParameter_QuantizeV2 ? + reinterpret_cast(value) : nullptr; + } + const QuantizeV2T *AsQuantizeV2() const { + return type == OpParameter_QuantizeV2 ? + reinterpret_cast(value) : nullptr; + } + RangeT *AsRange() { + return type == OpParameter_Range ? + reinterpret_cast(value) : nullptr; + } + const RangeT *AsRange() const { + return type == OpParameter_Range ? + reinterpret_cast(value) : nullptr; + } + RankT *AsRank() { + return type == OpParameter_Rank ? + reinterpret_cast(value) : nullptr; + } + const RankT *AsRank() const { + return type == OpParameter_Rank ? + reinterpret_cast(value) : nullptr; + } + ReduceJoinT *AsReduceJoin() { + return type == OpParameter_ReduceJoin ? + reinterpret_cast(value) : nullptr; + } + const ReduceJoinT *AsReduceJoin() const { + return type == OpParameter_ReduceJoin ? + reinterpret_cast(value) : nullptr; + } + ReductionParamT *AsReductionParam() { + return type == OpParameter_ReductionParam ? + reinterpret_cast(value) : nullptr; + } + const ReductionParamT *AsReductionParam() const { + return type == OpParameter_ReductionParam ? + reinterpret_cast(value) : nullptr; + } + ReluT *AsRelu() { + return type == OpParameter_Relu ? + reinterpret_cast(value) : nullptr; + } + const ReluT *AsRelu() const { + return type == OpParameter_Relu ? + reinterpret_cast(value) : nullptr; + } + Relu6T *AsRelu6() { + return type == OpParameter_Relu6 ? + reinterpret_cast(value) : nullptr; + } + const Relu6T *AsRelu6() const { + return type == OpParameter_Relu6 ? + reinterpret_cast(value) : nullptr; + } + RequantizationRangeT *AsRequantizationRange() { + return type == OpParameter_RequantizationRange ? + reinterpret_cast(value) : nullptr; + } + const RequantizationRangeT *AsRequantizationRange() const { + return type == OpParameter_RequantizationRange ? + reinterpret_cast(value) : nullptr; + } + RequantizeT *AsRequantize() { + return type == OpParameter_Requantize ? + reinterpret_cast(value) : nullptr; + } + const RequantizeT *AsRequantize() const { + return type == OpParameter_Requantize ? + reinterpret_cast(value) : nullptr; + } + ReshapeT *AsReshape() { + return type == OpParameter_Reshape ? + reinterpret_cast(value) : nullptr; + } + const ReshapeT *AsReshape() const { + return type == OpParameter_Reshape ? + reinterpret_cast(value) : nullptr; + } + ResizeT *AsResize() { + return type == OpParameter_Resize ? + reinterpret_cast(value) : nullptr; + } + const ResizeT *AsResize() const { + return type == OpParameter_Resize ? + reinterpret_cast(value) : nullptr; + } + RoiPoolingT *AsRoiPooling() { + return type == OpParameter_RoiPooling ? + reinterpret_cast(value) : nullptr; + } + const RoiPoolingT *AsRoiPooling() const { + return type == OpParameter_RoiPooling ? + reinterpret_cast(value) : nullptr; + } + ScaleT *AsScale() { + return type == OpParameter_Scale ? + reinterpret_cast(value) : nullptr; + } + const ScaleT *AsScale() const { + return type == OpParameter_Scale ? + reinterpret_cast(value) : nullptr; + } + SeluT *AsSelu() { + return type == OpParameter_Selu ? + reinterpret_cast(value) : nullptr; + } + const SeluT *AsSelu() const { + return type == OpParameter_Selu ? + reinterpret_cast(value) : nullptr; + } + SizeT *AsSize() { + return type == OpParameter_Size ? + reinterpret_cast(value) : nullptr; + } + const SizeT *AsSize() const { + return type == OpParameter_Size ? + reinterpret_cast(value) : nullptr; + } + SliceT *AsSlice() { + return type == OpParameter_Slice ? + reinterpret_cast(value) : nullptr; + } + const SliceT *AsSlice() const { + return type == OpParameter_Slice ? + reinterpret_cast(value) : nullptr; + } + SliceTfT *AsSliceTf() { + return type == OpParameter_SliceTf ? + reinterpret_cast(value) : nullptr; + } + const SliceTfT *AsSliceTf() const { + return type == OpParameter_SliceTf ? + reinterpret_cast(value) : nullptr; + } + SpaceBatchT *AsSpaceBatch() { + return type == OpParameter_SpaceBatch ? + reinterpret_cast(value) : nullptr; + } + const SpaceBatchT *AsSpaceBatch() const { + return type == OpParameter_SpaceBatch ? + reinterpret_cast(value) : nullptr; + } + SqueezeParamT *AsSqueezeParam() { + return type == OpParameter_SqueezeParam ? + reinterpret_cast(value) : nullptr; + } + const SqueezeParamT *AsSqueezeParam() const { + return type == OpParameter_SqueezeParam ? + reinterpret_cast(value) : nullptr; + } + StridedSliceParamT *AsStridedSliceParam() { + return type == OpParameter_StridedSliceParam ? + reinterpret_cast(value) : nullptr; + } + const StridedSliceParamT *AsStridedSliceParam() const { + return type == OpParameter_StridedSliceParam ? + reinterpret_cast(value) : nullptr; + } + TensorConvertInfoT *AsTensorConvertInfo() { + return type == OpParameter_TensorConvertInfo ? + reinterpret_cast(value) : nullptr; + } + const TensorConvertInfoT *AsTensorConvertInfo() const { + return type == OpParameter_TensorConvertInfo ? + reinterpret_cast(value) : nullptr; + } + TfQuantizedConv2DT *AsTfQuantizedConv2D() { + return type == OpParameter_TfQuantizedConv2D ? + reinterpret_cast(value) : nullptr; + } + const TfQuantizedConv2DT *AsTfQuantizedConv2D() const { + return type == OpParameter_TfQuantizedConv2D ? + reinterpret_cast(value) : nullptr; + } + TopKV2T *AsTopKV2() { + return type == OpParameter_TopKV2 ? + reinterpret_cast(value) : nullptr; + } + const TopKV2T *AsTopKV2() const { + return type == OpParameter_TopKV2 ? + reinterpret_cast(value) : nullptr; + } + TransposeT *AsTranspose() { + return type == OpParameter_Transpose ? + reinterpret_cast(value) : nullptr; + } + const TransposeT *AsTranspose() const { + return type == OpParameter_Transpose ? + reinterpret_cast(value) : nullptr; + } + UnaryOpT *AsUnaryOp() { + return type == OpParameter_UnaryOp ? + reinterpret_cast(value) : nullptr; + } + const UnaryOpT *AsUnaryOp() const { + return type == OpParameter_UnaryOp ? + reinterpret_cast(value) : nullptr; + } + MomentsParamT *AsMomentsParam() { + return type == OpParameter_MomentsParam ? + reinterpret_cast(value) : nullptr; + } + const MomentsParamT *AsMomentsParam() const { + return type == OpParameter_MomentsParam ? + reinterpret_cast(value) : nullptr; + } + RNNParamT *AsRNNParam() { + return type == OpParameter_RNNParam ? + reinterpret_cast(value) : nullptr; + } + const RNNParamT *AsRNNParam() const { + return type == OpParameter_RNNParam ? + reinterpret_cast(value) : nullptr; + } + BatchMatMulParamT *AsBatchMatMulParam() { + return type == OpParameter_BatchMatMulParam ? + reinterpret_cast(value) : nullptr; + } + const BatchMatMulParamT *AsBatchMatMulParam() const { + return type == OpParameter_BatchMatMulParam ? + reinterpret_cast(value) : nullptr; + } + QuantizedFloatParamT *AsQuantizedFloatParam() { + return type == OpParameter_QuantizedFloatParam ? + reinterpret_cast(value) : nullptr; + } + const QuantizedFloatParamT *AsQuantizedFloatParam() const { + return type == OpParameter_QuantizedFloatParam ? + reinterpret_cast(value) : nullptr; + } + DepthSpaceParamT *AsDepthSpaceParam() { + return type == OpParameter_DepthSpaceParam ? + reinterpret_cast(value) : nullptr; + } + const DepthSpaceParamT *AsDepthSpaceParam() const { + return type == OpParameter_DepthSpaceParam ? + reinterpret_cast(value) : nullptr; + } + EltwiseInt8T *AsEltwiseInt8() { + return type == OpParameter_EltwiseInt8 ? + reinterpret_cast(value) : nullptr; + } + const EltwiseInt8T *AsEltwiseInt8() const { + return type == OpParameter_EltwiseInt8 ? + reinterpret_cast(value) : nullptr; + } + ReverseSequenceParamT *AsReverseSequenceParam() { + return type == OpParameter_ReverseSequenceParam ? + reinterpret_cast(value) : nullptr; + } + const ReverseSequenceParamT *AsReverseSequenceParam() const { + return type == OpParameter_ReverseSequenceParam ? + reinterpret_cast(value) : nullptr; + } + ExtraT *AsExtra() { + return type == OpParameter_Extra ? + reinterpret_cast(value) : nullptr; + } + const ExtraT *AsExtra() const { + return type == OpParameter_Extra ? + reinterpret_cast(value) : nullptr; + } + Pool3DT *AsPool3D() { + return type == OpParameter_Pool3D ? + reinterpret_cast(value) : nullptr; + } + const Pool3DT *AsPool3D() const { + return type == OpParameter_Pool3D ? + reinterpret_cast(value) : nullptr; + } + Convolution3DT *AsConvolution3D() { + return type == OpParameter_Convolution3D ? + reinterpret_cast(value) : nullptr; + } + const Convolution3DT *AsConvolution3D() const { + return type == OpParameter_Convolution3D ? + reinterpret_cast(value) : nullptr; + } + ELUT *AsELU() { + return type == OpParameter_ELU ? + reinterpret_cast(value) : nullptr; + } + const ELUT *AsELU() const { + return type == OpParameter_ELU ? + reinterpret_cast(value) : nullptr; + } + DetectionPostProcessParamT *AsDetectionPostProcessParam() { + return type == OpParameter_DetectionPostProcessParam ? + reinterpret_cast(value) : nullptr; + } + const DetectionPostProcessParamT *AsDetectionPostProcessParam() const { + return type == OpParameter_DetectionPostProcessParam ? + reinterpret_cast(value) : nullptr; + } + OneHotParamT *AsOneHotParam() { + return type == OpParameter_OneHotParam ? + reinterpret_cast(value) : nullptr; + } + const OneHotParamT *AsOneHotParam() const { + return type == OpParameter_OneHotParam ? + reinterpret_cast(value) : nullptr; + } + PadParamT *AsPadParam() { + return type == OpParameter_PadParam ? + reinterpret_cast(value) : nullptr; + } + const PadParamT *AsPadParam() const { + return type == OpParameter_PadParam ? + reinterpret_cast(value) : nullptr; + } +}; + +bool VerifyOpParameter(flatbuffers::Verifier &verifier, const void *obj, OpParameter type); +bool VerifyOpParameterVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types); + +enum ForwardType { + ForwardType_CPU = 0, + ForwardType_METAL = 1, + ForwardType_OPENCL = 2, + ForwardType_OPENGLES = 3, + ForwardType_VULKAN = 4, + ForwardType_MIN = ForwardType_CPU, + ForwardType_MAX = ForwardType_VULKAN +}; + +inline const ForwardType (&EnumValuesForwardType())[5] { + static const ForwardType values[] = { + ForwardType_CPU, + ForwardType_METAL, + ForwardType_OPENCL, + ForwardType_OPENGLES, + ForwardType_VULKAN + }; + return values; +} + +inline const char * const *EnumNamesForwardType() { + static const char * const names[] = { + "CPU", + "METAL", + "OPENCL", + "OPENGLES", + "VULKAN", + nullptr + }; + return names; +} + +inline const char *EnumNameForwardType(ForwardType e) { + if (e < ForwardType_CPU || e > ForwardType_VULKAN) return ""; + const size_t index = static_cast(e); + return EnumNamesForwardType()[index]; +} + +enum Usage { + Usage_INFERENCE = 0, + Usage_TRAIN = 1, + Usage_MIN = Usage_INFERENCE, + Usage_MAX = Usage_TRAIN +}; + +inline const Usage (&EnumValuesUsage())[2] { + static const Usage values[] = { + Usage_INFERENCE, + Usage_TRAIN + }; + return values; +} + +inline const char * const *EnumNamesUsage() { + static const char * const names[] = { + "INFERENCE", + "TRAIN", + nullptr + }; + return names; +} + +inline const char *EnumNameUsage(Usage e) { + if (e < Usage_INFERENCE || e > Usage_TRAIN) return ""; + const size_t index = static_cast(e); + return EnumNamesUsage()[index]; +} + +struct PluginT : public flatbuffers::NativeTable { + typedef Plugin TableType; + std::string type; + std::vector> buffer; + PluginT() { + } +}; + +struct Plugin FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef PluginT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return PluginTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TYPE = 4, + VT_BUFFER = 6 + }; + const flatbuffers::String *type() const { + return GetPointer(VT_TYPE); + } + const flatbuffers::Vector> *buffer() const { + return GetPointer> *>(VT_BUFFER); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_TYPE) && + verifier.VerifyString(type()) && + VerifyOffset(verifier, VT_BUFFER) && + verifier.VerifyVector(buffer()) && + verifier.VerifyVectorOfTables(buffer()) && + verifier.EndTable(); + } + PluginT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(PluginT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PluginT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct PluginBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_type(flatbuffers::Offset type) { + fbb_.AddOffset(Plugin::VT_TYPE, type); + } + void add_buffer(flatbuffers::Offset>> buffer) { + fbb_.AddOffset(Plugin::VT_BUFFER, buffer); + } + explicit PluginBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + PluginBuilder &operator=(const PluginBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreatePlugin( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset type = 0, + flatbuffers::Offset>> buffer = 0) { + PluginBuilder builder_(_fbb); + builder_.add_buffer(buffer); + builder_.add_type(type); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreatePluginDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const char *type = nullptr, + const std::vector> *buffer = nullptr) { + auto type__ = type ? _fbb.CreateString(type) : 0; + auto buffer__ = buffer ? _fbb.CreateVector>(*buffer) : 0; + return MNN::CreatePlugin( + _fbb, + type__, + buffer__); +} + +flatbuffers::Offset CreatePlugin(flatbuffers::FlatBufferBuilder &_fbb, const PluginT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ExtraT : public flatbuffers::NativeTable { + typedef Extra TableType; + std::string type; + std::string engine; + std::vector info; + std::vector> attr; + ExtraT() { + } +}; + +struct Extra FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ExtraT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return ExtraTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TYPE = 4, + VT_ENGINE = 6, + VT_INFO = 8, + VT_ATTR = 10 + }; + const flatbuffers::String *type() const { + return GetPointer(VT_TYPE); + } + const flatbuffers::String *engine() const { + return GetPointer(VT_ENGINE); + } + const flatbuffers::Vector *info() const { + return GetPointer *>(VT_INFO); + } + const flatbuffers::Vector> *attr() const { + return GetPointer> *>(VT_ATTR); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_TYPE) && + verifier.VerifyString(type()) && + VerifyOffset(verifier, VT_ENGINE) && + verifier.VerifyString(engine()) && + VerifyOffset(verifier, VT_INFO) && + verifier.VerifyVector(info()) && + VerifyOffset(verifier, VT_ATTR) && + verifier.VerifyVector(attr()) && + verifier.VerifyVectorOfTables(attr()) && + verifier.EndTable(); + } + ExtraT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ExtraT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExtraT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ExtraBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_type(flatbuffers::Offset type) { + fbb_.AddOffset(Extra::VT_TYPE, type); + } + void add_engine(flatbuffers::Offset engine) { + fbb_.AddOffset(Extra::VT_ENGINE, engine); + } + void add_info(flatbuffers::Offset> info) { + fbb_.AddOffset(Extra::VT_INFO, info); + } + void add_attr(flatbuffers::Offset>> attr) { + fbb_.AddOffset(Extra::VT_ATTR, attr); + } + explicit ExtraBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ExtraBuilder &operator=(const ExtraBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateExtra( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset type = 0, + flatbuffers::Offset engine = 0, + flatbuffers::Offset> info = 0, + flatbuffers::Offset>> attr = 0) { + ExtraBuilder builder_(_fbb); + builder_.add_attr(attr); + builder_.add_info(info); + builder_.add_engine(engine); + builder_.add_type(type); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateExtraDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const char *type = nullptr, + const char *engine = nullptr, + const std::vector *info = nullptr, + const std::vector> *attr = nullptr) { + auto type__ = type ? _fbb.CreateString(type) : 0; + auto engine__ = engine ? _fbb.CreateString(engine) : 0; + auto info__ = info ? _fbb.CreateVector(*info) : 0; + auto attr__ = attr ? _fbb.CreateVector>(*attr) : 0; + return MNN::CreateExtra( + _fbb, + type__, + engine__, + info__, + attr__); +} + +flatbuffers::Offset CreateExtra(flatbuffers::FlatBufferBuilder &_fbb, const ExtraT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct OpT : public flatbuffers::NativeTable { + typedef Op TableType; + std::vector inputIndexes; + OpParameterUnion main; + std::string name; + std::vector outputIndexes; + OpType type; + MNN_DATA_FORMAT defaultDimentionFormat; + OpT() + : type(OpType_AbsVal), + defaultDimentionFormat(MNN_DATA_FORMAT_NHWC) { + } +}; + +struct Op FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef OpT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return OpTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_INPUTINDEXES = 4, + VT_MAIN_TYPE = 6, + VT_MAIN = 8, + VT_NAME = 10, + VT_OUTPUTINDEXES = 12, + VT_TYPE = 14, + VT_DEFAULTDIMENTIONFORMAT = 16 + }; + const flatbuffers::Vector *inputIndexes() const { + return GetPointer *>(VT_INPUTINDEXES); + } + OpParameter main_type() const { + return static_cast(GetField(VT_MAIN_TYPE, 0)); + } + const void *main() const { + return GetPointer(VT_MAIN); + } + template const T *main_as() const; + const QuantizedAdd *main_as_QuantizedAdd() const { + return main_type() == OpParameter_QuantizedAdd ? static_cast(main()) : nullptr; + } + const ArgMax *main_as_ArgMax() const { + return main_type() == OpParameter_ArgMax ? static_cast(main()) : nullptr; + } + const AsString *main_as_AsString() const { + return main_type() == OpParameter_AsString ? static_cast(main()) : nullptr; + } + const Axis *main_as_Axis() const { + return main_type() == OpParameter_Axis ? static_cast(main()) : nullptr; + } + const BatchNorm *main_as_BatchNorm() const { + return main_type() == OpParameter_BatchNorm ? static_cast(main()) : nullptr; + } + const BinaryOp *main_as_BinaryOp() const { + return main_type() == OpParameter_BinaryOp ? static_cast(main()) : nullptr; + } + const Blob *main_as_Blob() const { + return main_type() == OpParameter_Blob ? static_cast(main()) : nullptr; + } + const CastParam *main_as_CastParam() const { + return main_type() == OpParameter_CastParam ? static_cast(main()) : nullptr; + } + const Convolution2D *main_as_Convolution2D() const { + return main_type() == OpParameter_Convolution2D ? static_cast(main()) : nullptr; + } + const Crop *main_as_Crop() const { + return main_type() == OpParameter_Crop ? static_cast(main()) : nullptr; + } + const CropAndResize *main_as_CropAndResize() const { + return main_type() == OpParameter_CropAndResize ? static_cast(main()) : nullptr; + } + const Dequantize *main_as_Dequantize() const { + return main_type() == OpParameter_Dequantize ? static_cast(main()) : nullptr; + } + const DetectionOutput *main_as_DetectionOutput() const { + return main_type() == OpParameter_DetectionOutput ? static_cast(main()) : nullptr; + } + const Eltwise *main_as_Eltwise() const { + return main_type() == OpParameter_Eltwise ? static_cast(main()) : nullptr; + } + const ExpandDims *main_as_ExpandDims() const { + return main_type() == OpParameter_ExpandDims ? static_cast(main()) : nullptr; + } + const Fill *main_as_Fill() const { + return main_type() == OpParameter_Fill ? static_cast(main()) : nullptr; + } + const Flatten *main_as_Flatten() const { + return main_type() == OpParameter_Flatten ? static_cast(main()) : nullptr; + } + const Gather *main_as_Gather() const { + return main_type() == OpParameter_Gather ? static_cast(main()) : nullptr; + } + const GatherV2 *main_as_GatherV2() const { + return main_type() == OpParameter_GatherV2 ? static_cast(main()) : nullptr; + } + const InnerProduct *main_as_InnerProduct() const { + return main_type() == OpParameter_InnerProduct ? static_cast(main()) : nullptr; + } + const Input *main_as_Input() const { + return main_type() == OpParameter_Input ? static_cast(main()) : nullptr; + } + const Interp *main_as_Interp() const { + return main_type() == OpParameter_Interp ? static_cast(main()) : nullptr; + } + const LRN *main_as_LRN() const { + return main_type() == OpParameter_LRN ? static_cast(main()) : nullptr; + } + const LSTM *main_as_LSTM() const { + return main_type() == OpParameter_LSTM ? static_cast(main()) : nullptr; + } + const MatMul *main_as_MatMul() const { + return main_type() == OpParameter_MatMul ? static_cast(main()) : nullptr; + } + const NonMaxSuppressionV2 *main_as_NonMaxSuppressionV2() const { + return main_type() == OpParameter_NonMaxSuppressionV2 ? static_cast(main()) : nullptr; + } + const Normalize *main_as_Normalize() const { + return main_type() == OpParameter_Normalize ? static_cast(main()) : nullptr; + } + const PackParam *main_as_PackParam() const { + return main_type() == OpParameter_PackParam ? static_cast(main()) : nullptr; + } + const Permute *main_as_Permute() const { + return main_type() == OpParameter_Permute ? static_cast(main()) : nullptr; + } + const Plugin *main_as_Plugin() const { + return main_type() == OpParameter_Plugin ? static_cast(main()) : nullptr; + } + const Pool *main_as_Pool() const { + return main_type() == OpParameter_Pool ? static_cast(main()) : nullptr; + } + const PRelu *main_as_PRelu() const { + return main_type() == OpParameter_PRelu ? static_cast(main()) : nullptr; + } + const PriorBox *main_as_PriorBox() const { + return main_type() == OpParameter_PriorBox ? static_cast(main()) : nullptr; + } + const Proposal *main_as_Proposal() const { + return main_type() == OpParameter_Proposal ? static_cast(main()) : nullptr; + } + const QuantizedAvgPool *main_as_QuantizedAvgPool() const { + return main_type() == OpParameter_QuantizedAvgPool ? static_cast(main()) : nullptr; + } + const QuantizedBiasAdd *main_as_QuantizedBiasAdd() const { + return main_type() == OpParameter_QuantizedBiasAdd ? static_cast(main()) : nullptr; + } + const QuantizedConcat *main_as_QuantizedConcat() const { + return main_type() == OpParameter_QuantizedConcat ? static_cast(main()) : nullptr; + } + const QuantizedLogistic *main_as_QuantizedLogistic() const { + return main_type() == OpParameter_QuantizedLogistic ? static_cast(main()) : nullptr; + } + const QuantizedMatMul *main_as_QuantizedMatMul() const { + return main_type() == OpParameter_QuantizedMatMul ? static_cast(main()) : nullptr; + } + const QuantizedMaxPool *main_as_QuantizedMaxPool() const { + return main_type() == OpParameter_QuantizedMaxPool ? static_cast(main()) : nullptr; + } + const QuantizedRelu *main_as_QuantizedRelu() const { + return main_type() == OpParameter_QuantizedRelu ? static_cast(main()) : nullptr; + } + const QuantizedRelu6 *main_as_QuantizedRelu6() const { + return main_type() == OpParameter_QuantizedRelu6 ? static_cast(main()) : nullptr; + } + const QuantizedReshape *main_as_QuantizedReshape() const { + return main_type() == OpParameter_QuantizedReshape ? static_cast(main()) : nullptr; + } + const QuantizedSoftmax *main_as_QuantizedSoftmax() const { + return main_type() == OpParameter_QuantizedSoftmax ? static_cast(main()) : nullptr; + } + const QuantizeMaxMin *main_as_QuantizeMaxMin() const { + return main_type() == OpParameter_QuantizeMaxMin ? static_cast(main()) : nullptr; + } + const QuantizeV2 *main_as_QuantizeV2() const { + return main_type() == OpParameter_QuantizeV2 ? static_cast(main()) : nullptr; + } + const Range *main_as_Range() const { + return main_type() == OpParameter_Range ? static_cast(main()) : nullptr; + } + const Rank *main_as_Rank() const { + return main_type() == OpParameter_Rank ? static_cast(main()) : nullptr; + } + const ReduceJoin *main_as_ReduceJoin() const { + return main_type() == OpParameter_ReduceJoin ? static_cast(main()) : nullptr; + } + const ReductionParam *main_as_ReductionParam() const { + return main_type() == OpParameter_ReductionParam ? static_cast(main()) : nullptr; + } + const Relu *main_as_Relu() const { + return main_type() == OpParameter_Relu ? static_cast(main()) : nullptr; + } + const Relu6 *main_as_Relu6() const { + return main_type() == OpParameter_Relu6 ? static_cast(main()) : nullptr; + } + const RequantizationRange *main_as_RequantizationRange() const { + return main_type() == OpParameter_RequantizationRange ? static_cast(main()) : nullptr; + } + const Requantize *main_as_Requantize() const { + return main_type() == OpParameter_Requantize ? static_cast(main()) : nullptr; + } + const Reshape *main_as_Reshape() const { + return main_type() == OpParameter_Reshape ? static_cast(main()) : nullptr; + } + const Resize *main_as_Resize() const { + return main_type() == OpParameter_Resize ? static_cast(main()) : nullptr; + } + const RoiPooling *main_as_RoiPooling() const { + return main_type() == OpParameter_RoiPooling ? static_cast(main()) : nullptr; + } + const Scale *main_as_Scale() const { + return main_type() == OpParameter_Scale ? static_cast(main()) : nullptr; + } + const Selu *main_as_Selu() const { + return main_type() == OpParameter_Selu ? static_cast(main()) : nullptr; + } + const Size *main_as_Size() const { + return main_type() == OpParameter_Size ? static_cast(main()) : nullptr; + } + const Slice *main_as_Slice() const { + return main_type() == OpParameter_Slice ? static_cast(main()) : nullptr; + } + const SliceTf *main_as_SliceTf() const { + return main_type() == OpParameter_SliceTf ? static_cast(main()) : nullptr; + } + const SpaceBatch *main_as_SpaceBatch() const { + return main_type() == OpParameter_SpaceBatch ? static_cast(main()) : nullptr; + } + const SqueezeParam *main_as_SqueezeParam() const { + return main_type() == OpParameter_SqueezeParam ? static_cast(main()) : nullptr; + } + const StridedSliceParam *main_as_StridedSliceParam() const { + return main_type() == OpParameter_StridedSliceParam ? static_cast(main()) : nullptr; + } + const TensorConvertInfo *main_as_TensorConvertInfo() const { + return main_type() == OpParameter_TensorConvertInfo ? static_cast(main()) : nullptr; + } + const TfQuantizedConv2D *main_as_TfQuantizedConv2D() const { + return main_type() == OpParameter_TfQuantizedConv2D ? static_cast(main()) : nullptr; + } + const TopKV2 *main_as_TopKV2() const { + return main_type() == OpParameter_TopKV2 ? static_cast(main()) : nullptr; + } + const Transpose *main_as_Transpose() const { + return main_type() == OpParameter_Transpose ? static_cast(main()) : nullptr; + } + const UnaryOp *main_as_UnaryOp() const { + return main_type() == OpParameter_UnaryOp ? static_cast(main()) : nullptr; + } + const MomentsParam *main_as_MomentsParam() const { + return main_type() == OpParameter_MomentsParam ? static_cast(main()) : nullptr; + } + const RNNParam *main_as_RNNParam() const { + return main_type() == OpParameter_RNNParam ? static_cast(main()) : nullptr; + } + const BatchMatMulParam *main_as_BatchMatMulParam() const { + return main_type() == OpParameter_BatchMatMulParam ? static_cast(main()) : nullptr; + } + const QuantizedFloatParam *main_as_QuantizedFloatParam() const { + return main_type() == OpParameter_QuantizedFloatParam ? static_cast(main()) : nullptr; + } + const DepthSpaceParam *main_as_DepthSpaceParam() const { + return main_type() == OpParameter_DepthSpaceParam ? static_cast(main()) : nullptr; + } + const EltwiseInt8 *main_as_EltwiseInt8() const { + return main_type() == OpParameter_EltwiseInt8 ? static_cast(main()) : nullptr; + } + const ReverseSequenceParam *main_as_ReverseSequenceParam() const { + return main_type() == OpParameter_ReverseSequenceParam ? static_cast(main()) : nullptr; + } + const Extra *main_as_Extra() const { + return main_type() == OpParameter_Extra ? static_cast(main()) : nullptr; + } + const Pool3D *main_as_Pool3D() const { + return main_type() == OpParameter_Pool3D ? static_cast(main()) : nullptr; + } + const Convolution3D *main_as_Convolution3D() const { + return main_type() == OpParameter_Convolution3D ? static_cast(main()) : nullptr; + } + const ELU *main_as_ELU() const { + return main_type() == OpParameter_ELU ? static_cast(main()) : nullptr; + } + const DetectionPostProcessParam *main_as_DetectionPostProcessParam() const { + return main_type() == OpParameter_DetectionPostProcessParam ? static_cast(main()) : nullptr; + } + const OneHotParam *main_as_OneHotParam() const { + return main_type() == OpParameter_OneHotParam ? static_cast(main()) : nullptr; + } + const PadParam *main_as_PadParam() const { + return main_type() == OpParameter_PadParam ? static_cast(main()) : nullptr; + } + const flatbuffers::String *name() const { + return GetPointer(VT_NAME); + } + const flatbuffers::Vector *outputIndexes() const { + return GetPointer *>(VT_OUTPUTINDEXES); + } + OpType type() const { + return static_cast(GetField(VT_TYPE, 0)); + } + MNN_DATA_FORMAT defaultDimentionFormat() const { + return static_cast(GetField(VT_DEFAULTDIMENTIONFORMAT, 1)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_INPUTINDEXES) && + verifier.VerifyVector(inputIndexes()) && + VerifyField(verifier, VT_MAIN_TYPE) && + VerifyOffset(verifier, VT_MAIN) && + VerifyOpParameter(verifier, main(), main_type()) && + VerifyOffset(verifier, VT_NAME) && + verifier.VerifyString(name()) && + VerifyOffset(verifier, VT_OUTPUTINDEXES) && + verifier.VerifyVector(outputIndexes()) && + VerifyField(verifier, VT_TYPE) && + VerifyField(verifier, VT_DEFAULTDIMENTIONFORMAT) && + verifier.EndTable(); + } + OpT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(OpT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const OpT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +template<> inline const QuantizedAdd *Op::main_as() const { + return main_as_QuantizedAdd(); +} + +template<> inline const ArgMax *Op::main_as() const { + return main_as_ArgMax(); +} + +template<> inline const AsString *Op::main_as() const { + return main_as_AsString(); +} + +template<> inline const Axis *Op::main_as() const { + return main_as_Axis(); +} + +template<> inline const BatchNorm *Op::main_as() const { + return main_as_BatchNorm(); +} + +template<> inline const BinaryOp *Op::main_as() const { + return main_as_BinaryOp(); +} + +template<> inline const Blob *Op::main_as() const { + return main_as_Blob(); +} + +template<> inline const CastParam *Op::main_as() const { + return main_as_CastParam(); +} + +template<> inline const Convolution2D *Op::main_as() const { + return main_as_Convolution2D(); +} + +template<> inline const Crop *Op::main_as() const { + return main_as_Crop(); +} + +template<> inline const CropAndResize *Op::main_as() const { + return main_as_CropAndResize(); +} + +template<> inline const Dequantize *Op::main_as() const { + return main_as_Dequantize(); +} + +template<> inline const DetectionOutput *Op::main_as() const { + return main_as_DetectionOutput(); +} + +template<> inline const Eltwise *Op::main_as() const { + return main_as_Eltwise(); +} + +template<> inline const ExpandDims *Op::main_as() const { + return main_as_ExpandDims(); +} + +template<> inline const Fill *Op::main_as() const { + return main_as_Fill(); +} + +template<> inline const Flatten *Op::main_as() const { + return main_as_Flatten(); +} + +template<> inline const Gather *Op::main_as() const { + return main_as_Gather(); +} + +template<> inline const GatherV2 *Op::main_as() const { + return main_as_GatherV2(); +} + +template<> inline const InnerProduct *Op::main_as() const { + return main_as_InnerProduct(); +} + +template<> inline const Input *Op::main_as() const { + return main_as_Input(); +} + +template<> inline const Interp *Op::main_as() const { + return main_as_Interp(); +} + +template<> inline const LRN *Op::main_as() const { + return main_as_LRN(); +} + +template<> inline const LSTM *Op::main_as() const { + return main_as_LSTM(); +} + +template<> inline const MatMul *Op::main_as() const { + return main_as_MatMul(); +} + +template<> inline const NonMaxSuppressionV2 *Op::main_as() const { + return main_as_NonMaxSuppressionV2(); +} + +template<> inline const Normalize *Op::main_as() const { + return main_as_Normalize(); +} + +template<> inline const PackParam *Op::main_as() const { + return main_as_PackParam(); +} + +template<> inline const Permute *Op::main_as() const { + return main_as_Permute(); +} + +template<> inline const Plugin *Op::main_as() const { + return main_as_Plugin(); +} + +template<> inline const Pool *Op::main_as() const { + return main_as_Pool(); +} + +template<> inline const PRelu *Op::main_as() const { + return main_as_PRelu(); +} + +template<> inline const PriorBox *Op::main_as() const { + return main_as_PriorBox(); +} + +template<> inline const Proposal *Op::main_as() const { + return main_as_Proposal(); +} + +template<> inline const QuantizedAvgPool *Op::main_as() const { + return main_as_QuantizedAvgPool(); +} + +template<> inline const QuantizedBiasAdd *Op::main_as() const { + return main_as_QuantizedBiasAdd(); +} + +template<> inline const QuantizedConcat *Op::main_as() const { + return main_as_QuantizedConcat(); +} + +template<> inline const QuantizedLogistic *Op::main_as() const { + return main_as_QuantizedLogistic(); +} + +template<> inline const QuantizedMatMul *Op::main_as() const { + return main_as_QuantizedMatMul(); +} + +template<> inline const QuantizedMaxPool *Op::main_as() const { + return main_as_QuantizedMaxPool(); +} + +template<> inline const QuantizedRelu *Op::main_as() const { + return main_as_QuantizedRelu(); +} + +template<> inline const QuantizedRelu6 *Op::main_as() const { + return main_as_QuantizedRelu6(); +} + +template<> inline const QuantizedReshape *Op::main_as() const { + return main_as_QuantizedReshape(); +} + +template<> inline const QuantizedSoftmax *Op::main_as() const { + return main_as_QuantizedSoftmax(); +} + +template<> inline const QuantizeMaxMin *Op::main_as() const { + return main_as_QuantizeMaxMin(); +} + +template<> inline const QuantizeV2 *Op::main_as() const { + return main_as_QuantizeV2(); +} + +template<> inline const Range *Op::main_as() const { + return main_as_Range(); +} + +template<> inline const Rank *Op::main_as() const { + return main_as_Rank(); +} + +template<> inline const ReduceJoin *Op::main_as() const { + return main_as_ReduceJoin(); +} + +template<> inline const ReductionParam *Op::main_as() const { + return main_as_ReductionParam(); +} + +template<> inline const Relu *Op::main_as() const { + return main_as_Relu(); +} + +template<> inline const Relu6 *Op::main_as() const { + return main_as_Relu6(); +} + +template<> inline const RequantizationRange *Op::main_as() const { + return main_as_RequantizationRange(); +} + +template<> inline const Requantize *Op::main_as() const { + return main_as_Requantize(); +} + +template<> inline const Reshape *Op::main_as() const { + return main_as_Reshape(); +} + +template<> inline const Resize *Op::main_as() const { + return main_as_Resize(); +} + +template<> inline const RoiPooling *Op::main_as() const { + return main_as_RoiPooling(); +} + +template<> inline const Scale *Op::main_as() const { + return main_as_Scale(); +} + +template<> inline const Selu *Op::main_as() const { + return main_as_Selu(); +} + +template<> inline const Size *Op::main_as() const { + return main_as_Size(); +} + +template<> inline const Slice *Op::main_as() const { + return main_as_Slice(); +} + +template<> inline const SliceTf *Op::main_as() const { + return main_as_SliceTf(); +} + +template<> inline const SpaceBatch *Op::main_as() const { + return main_as_SpaceBatch(); +} + +template<> inline const SqueezeParam *Op::main_as() const { + return main_as_SqueezeParam(); +} + +template<> inline const StridedSliceParam *Op::main_as() const { + return main_as_StridedSliceParam(); +} + +template<> inline const TensorConvertInfo *Op::main_as() const { + return main_as_TensorConvertInfo(); +} + +template<> inline const TfQuantizedConv2D *Op::main_as() const { + return main_as_TfQuantizedConv2D(); +} + +template<> inline const TopKV2 *Op::main_as() const { + return main_as_TopKV2(); +} + +template<> inline const Transpose *Op::main_as() const { + return main_as_Transpose(); +} + +template<> inline const UnaryOp *Op::main_as() const { + return main_as_UnaryOp(); +} + +template<> inline const MomentsParam *Op::main_as() const { + return main_as_MomentsParam(); +} + +template<> inline const RNNParam *Op::main_as() const { + return main_as_RNNParam(); +} + +template<> inline const BatchMatMulParam *Op::main_as() const { + return main_as_BatchMatMulParam(); +} + +template<> inline const QuantizedFloatParam *Op::main_as() const { + return main_as_QuantizedFloatParam(); +} + +template<> inline const DepthSpaceParam *Op::main_as() const { + return main_as_DepthSpaceParam(); +} + +template<> inline const EltwiseInt8 *Op::main_as() const { + return main_as_EltwiseInt8(); +} + +template<> inline const ReverseSequenceParam *Op::main_as() const { + return main_as_ReverseSequenceParam(); +} + +template<> inline const Extra *Op::main_as() const { + return main_as_Extra(); +} + +template<> inline const Pool3D *Op::main_as() const { + return main_as_Pool3D(); +} + +template<> inline const Convolution3D *Op::main_as() const { + return main_as_Convolution3D(); +} + +template<> inline const ELU *Op::main_as() const { + return main_as_ELU(); +} + +template<> inline const DetectionPostProcessParam *Op::main_as() const { + return main_as_DetectionPostProcessParam(); +} + +template<> inline const OneHotParam *Op::main_as() const { + return main_as_OneHotParam(); +} + +template<> inline const PadParam *Op::main_as() const { + return main_as_PadParam(); +} + +struct OpBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_inputIndexes(flatbuffers::Offset> inputIndexes) { + fbb_.AddOffset(Op::VT_INPUTINDEXES, inputIndexes); + } + void add_main_type(OpParameter main_type) { + fbb_.AddElement(Op::VT_MAIN_TYPE, static_cast(main_type), 0); + } + void add_main(flatbuffers::Offset main) { + fbb_.AddOffset(Op::VT_MAIN, main); + } + void add_name(flatbuffers::Offset name) { + fbb_.AddOffset(Op::VT_NAME, name); + } + void add_outputIndexes(flatbuffers::Offset> outputIndexes) { + fbb_.AddOffset(Op::VT_OUTPUTINDEXES, outputIndexes); + } + void add_type(OpType type) { + fbb_.AddElement(Op::VT_TYPE, static_cast(type), 0); + } + void add_defaultDimentionFormat(MNN_DATA_FORMAT defaultDimentionFormat) { + fbb_.AddElement(Op::VT_DEFAULTDIMENTIONFORMAT, static_cast(defaultDimentionFormat), 1); + } + explicit OpBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + OpBuilder &operator=(const OpBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateOp( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> inputIndexes = 0, + OpParameter main_type = OpParameter_NONE, + flatbuffers::Offset main = 0, + flatbuffers::Offset name = 0, + flatbuffers::Offset> outputIndexes = 0, + OpType type = OpType_AbsVal, + MNN_DATA_FORMAT defaultDimentionFormat = MNN_DATA_FORMAT_NHWC) { + OpBuilder builder_(_fbb); + builder_.add_type(type); + builder_.add_outputIndexes(outputIndexes); + builder_.add_name(name); + builder_.add_main(main); + builder_.add_inputIndexes(inputIndexes); + builder_.add_defaultDimentionFormat(defaultDimentionFormat); + builder_.add_main_type(main_type); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateOpDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *inputIndexes = nullptr, + OpParameter main_type = OpParameter_NONE, + flatbuffers::Offset main = 0, + const char *name = nullptr, + const std::vector *outputIndexes = nullptr, + OpType type = OpType_AbsVal, + MNN_DATA_FORMAT defaultDimentionFormat = MNN_DATA_FORMAT_NHWC) { + auto inputIndexes__ = inputIndexes ? _fbb.CreateVector(*inputIndexes) : 0; + auto name__ = name ? _fbb.CreateString(name) : 0; + auto outputIndexes__ = outputIndexes ? _fbb.CreateVector(*outputIndexes) : 0; + return MNN::CreateOp( + _fbb, + inputIndexes__, + main_type, + main, + name__, + outputIndexes__, + type, + defaultDimentionFormat); +} + +flatbuffers::Offset CreateOp(flatbuffers::FlatBufferBuilder &_fbb, const OpT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct TensorDescribeT : public flatbuffers::NativeTable { + typedef TensorDescribe TableType; + std::unique_ptr blob; + int32_t index; + std::string name; + TensorDescribeT() + : index(0) { + } +}; + +struct TensorDescribe FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef TensorDescribeT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return TensorDescribeTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BLOB = 4, + VT_INDEX = 6, + VT_NAME = 8 + }; + const Blob *blob() const { + return GetPointer(VT_BLOB); + } + int32_t index() const { + return GetField(VT_INDEX, 0); + } + const flatbuffers::String *name() const { + return GetPointer(VT_NAME); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_BLOB) && + verifier.VerifyTable(blob()) && + VerifyField(verifier, VT_INDEX) && + VerifyOffset(verifier, VT_NAME) && + verifier.VerifyString(name()) && + verifier.EndTable(); + } + TensorDescribeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(TensorDescribeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorDescribeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct TensorDescribeBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_blob(flatbuffers::Offset blob) { + fbb_.AddOffset(TensorDescribe::VT_BLOB, blob); + } + void add_index(int32_t index) { + fbb_.AddElement(TensorDescribe::VT_INDEX, index, 0); + } + void add_name(flatbuffers::Offset name) { + fbb_.AddOffset(TensorDescribe::VT_NAME, name); + } + explicit TensorDescribeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + TensorDescribeBuilder &operator=(const TensorDescribeBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateTensorDescribe( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset blob = 0, + int32_t index = 0, + flatbuffers::Offset name = 0) { + TensorDescribeBuilder builder_(_fbb); + builder_.add_name(name); + builder_.add_index(index); + builder_.add_blob(blob); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateTensorDescribeDirect( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset blob = 0, + int32_t index = 0, + const char *name = nullptr) { + auto name__ = name ? _fbb.CreateString(name) : 0; + return MNN::CreateTensorDescribe( + _fbb, + blob, + index, + name__); +} + +flatbuffers::Offset CreateTensorDescribe(flatbuffers::FlatBufferBuilder &_fbb, const TensorDescribeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct NetT : public flatbuffers::NativeTable { + typedef Net TableType; + std::string bizCode; + std::vector> extraTensorDescribe; + std::unique_ptr gpulibrary; + std::vector> oplists; + std::vector outputName; + ForwardType preferForwardType; + NetSource sourceType; + std::vector tensorName; + int32_t tensorNumber; + Usage usage; + NetT() + : preferForwardType(ForwardType_CPU), + sourceType(NetSource_CAFFE), + tensorNumber(0), + usage(Usage_INFERENCE) { + } +}; + +struct Net FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef NetT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return NetTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BIZCODE = 4, + VT_EXTRATENSORDESCRIBE = 6, + VT_GPULIBRARY = 8, + VT_OPLISTS = 10, + VT_OUTPUTNAME = 12, + VT_PREFERFORWARDTYPE = 14, + VT_SOURCETYPE = 16, + VT_TENSORNAME = 18, + VT_TENSORNUMBER = 20, + VT_USAGE = 22 + }; + const flatbuffers::String *bizCode() const { + return GetPointer(VT_BIZCODE); + } + const flatbuffers::Vector> *extraTensorDescribe() const { + return GetPointer> *>(VT_EXTRATENSORDESCRIBE); + } + const GpuLibrary *gpulibrary() const { + return GetPointer(VT_GPULIBRARY); + } + const flatbuffers::Vector> *oplists() const { + return GetPointer> *>(VT_OPLISTS); + } + const flatbuffers::Vector> *outputName() const { + return GetPointer> *>(VT_OUTPUTNAME); + } + ForwardType preferForwardType() const { + return static_cast(GetField(VT_PREFERFORWARDTYPE, 0)); + } + NetSource sourceType() const { + return static_cast(GetField(VT_SOURCETYPE, 0)); + } + const flatbuffers::Vector> *tensorName() const { + return GetPointer> *>(VT_TENSORNAME); + } + int32_t tensorNumber() const { + return GetField(VT_TENSORNUMBER, 0); + } + Usage usage() const { + return static_cast(GetField(VT_USAGE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_BIZCODE) && + verifier.VerifyString(bizCode()) && + VerifyOffset(verifier, VT_EXTRATENSORDESCRIBE) && + verifier.VerifyVector(extraTensorDescribe()) && + verifier.VerifyVectorOfTables(extraTensorDescribe()) && + VerifyOffset(verifier, VT_GPULIBRARY) && + verifier.VerifyTable(gpulibrary()) && + VerifyOffset(verifier, VT_OPLISTS) && + verifier.VerifyVector(oplists()) && + verifier.VerifyVectorOfTables(oplists()) && + VerifyOffset(verifier, VT_OUTPUTNAME) && + verifier.VerifyVector(outputName()) && + verifier.VerifyVectorOfStrings(outputName()) && + VerifyField(verifier, VT_PREFERFORWARDTYPE) && + VerifyField(verifier, VT_SOURCETYPE) && + VerifyOffset(verifier, VT_TENSORNAME) && + verifier.VerifyVector(tensorName()) && + verifier.VerifyVectorOfStrings(tensorName()) && + VerifyField(verifier, VT_TENSORNUMBER) && + VerifyField(verifier, VT_USAGE) && + verifier.EndTable(); + } + NetT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(NetT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const NetT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct NetBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_bizCode(flatbuffers::Offset bizCode) { + fbb_.AddOffset(Net::VT_BIZCODE, bizCode); + } + void add_extraTensorDescribe(flatbuffers::Offset>> extraTensorDescribe) { + fbb_.AddOffset(Net::VT_EXTRATENSORDESCRIBE, extraTensorDescribe); + } + void add_gpulibrary(flatbuffers::Offset gpulibrary) { + fbb_.AddOffset(Net::VT_GPULIBRARY, gpulibrary); + } + void add_oplists(flatbuffers::Offset>> oplists) { + fbb_.AddOffset(Net::VT_OPLISTS, oplists); + } + void add_outputName(flatbuffers::Offset>> outputName) { + fbb_.AddOffset(Net::VT_OUTPUTNAME, outputName); + } + void add_preferForwardType(ForwardType preferForwardType) { + fbb_.AddElement(Net::VT_PREFERFORWARDTYPE, static_cast(preferForwardType), 0); + } + void add_sourceType(NetSource sourceType) { + fbb_.AddElement(Net::VT_SOURCETYPE, static_cast(sourceType), 0); + } + void add_tensorName(flatbuffers::Offset>> tensorName) { + fbb_.AddOffset(Net::VT_TENSORNAME, tensorName); + } + void add_tensorNumber(int32_t tensorNumber) { + fbb_.AddElement(Net::VT_TENSORNUMBER, tensorNumber, 0); + } + void add_usage(Usage usage) { + fbb_.AddElement(Net::VT_USAGE, static_cast(usage), 0); + } + explicit NetBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + NetBuilder &operator=(const NetBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateNet( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset bizCode = 0, + flatbuffers::Offset>> extraTensorDescribe = 0, + flatbuffers::Offset gpulibrary = 0, + flatbuffers::Offset>> oplists = 0, + flatbuffers::Offset>> outputName = 0, + ForwardType preferForwardType = ForwardType_CPU, + NetSource sourceType = NetSource_CAFFE, + flatbuffers::Offset>> tensorName = 0, + int32_t tensorNumber = 0, + Usage usage = Usage_INFERENCE) { + NetBuilder builder_(_fbb); + builder_.add_tensorNumber(tensorNumber); + builder_.add_tensorName(tensorName); + builder_.add_outputName(outputName); + builder_.add_oplists(oplists); + builder_.add_gpulibrary(gpulibrary); + builder_.add_extraTensorDescribe(extraTensorDescribe); + builder_.add_bizCode(bizCode); + builder_.add_usage(usage); + builder_.add_sourceType(sourceType); + builder_.add_preferForwardType(preferForwardType); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateNetDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const char *bizCode = nullptr, + const std::vector> *extraTensorDescribe = nullptr, + flatbuffers::Offset gpulibrary = 0, + const std::vector> *oplists = nullptr, + const std::vector> *outputName = nullptr, + ForwardType preferForwardType = ForwardType_CPU, + NetSource sourceType = NetSource_CAFFE, + const std::vector> *tensorName = nullptr, + int32_t tensorNumber = 0, + Usage usage = Usage_INFERENCE) { + auto bizCode__ = bizCode ? _fbb.CreateString(bizCode) : 0; + auto extraTensorDescribe__ = extraTensorDescribe ? _fbb.CreateVector>(*extraTensorDescribe) : 0; + auto oplists__ = oplists ? _fbb.CreateVector>(*oplists) : 0; + auto outputName__ = outputName ? _fbb.CreateVector>(*outputName) : 0; + auto tensorName__ = tensorName ? _fbb.CreateVector>(*tensorName) : 0; + return MNN::CreateNet( + _fbb, + bizCode__, + extraTensorDescribe__, + gpulibrary, + oplists__, + outputName__, + preferForwardType, + sourceType, + tensorName__, + tensorNumber, + usage); +} + +flatbuffers::Offset CreateNet(flatbuffers::FlatBufferBuilder &_fbb, const NetT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +inline PluginT *Plugin::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new PluginT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Plugin::UnPackTo(PluginT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = type(); if (_e) _o->type = _e->str(); }; + { auto _e = buffer(); if (_e) { _o->buffer.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->buffer[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } }; +} + +inline flatbuffers::Offset Plugin::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PluginT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreatePlugin(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreatePlugin(flatbuffers::FlatBufferBuilder &_fbb, const PluginT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PluginT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _type = _o->type.empty() ? 0 : _fbb.CreateString(_o->type); + auto _buffer = _o->buffer.size() ? _fbb.CreateVector> (_o->buffer.size(), [](size_t i, _VectorArgs *__va) { return CreateBlob(*__va->__fbb, __va->__o->buffer[i].get(), __va->__rehasher); }, &_va ) : 0; + return MNN::CreatePlugin( + _fbb, + _type, + _buffer); +} + +inline ExtraT *Extra::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new ExtraT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Extra::UnPackTo(ExtraT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = type(); if (_e) _o->type = _e->str(); }; + { auto _e = engine(); if (_e) _o->engine = _e->str(); }; + { auto _e = info(); if (_e) { _o->info.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->info[_i] = _e->Get(_i); } } }; + { auto _e = attr(); if (_e) { _o->attr.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->attr[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } }; +} + +inline flatbuffers::Offset Extra::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExtraT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateExtra(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateExtra(flatbuffers::FlatBufferBuilder &_fbb, const ExtraT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ExtraT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _type = _o->type.empty() ? 0 : _fbb.CreateString(_o->type); + auto _engine = _o->engine.empty() ? 0 : _fbb.CreateString(_o->engine); + auto _info = _o->info.size() ? _fbb.CreateVector(_o->info) : 0; + auto _attr = _o->attr.size() ? _fbb.CreateVector> (_o->attr.size(), [](size_t i, _VectorArgs *__va) { return CreateAttribute(*__va->__fbb, __va->__o->attr[i].get(), __va->__rehasher); }, &_va ) : 0; + return MNN::CreateExtra( + _fbb, + _type, + _engine, + _info, + _attr); +} + +inline OpT *Op::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new OpT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Op::UnPackTo(OpT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = inputIndexes(); if (_e) { _o->inputIndexes.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputIndexes[_i] = _e->Get(_i); } } }; + { auto _e = main_type(); _o->main.type = _e; }; + { auto _e = main(); if (_e) _o->main.value = OpParameterUnion::UnPack(_e, main_type(), _resolver); }; + { auto _e = name(); if (_e) _o->name = _e->str(); }; + { auto _e = outputIndexes(); if (_e) { _o->outputIndexes.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputIndexes[_i] = _e->Get(_i); } } }; + { auto _e = type(); _o->type = _e; }; + { auto _e = defaultDimentionFormat(); _o->defaultDimentionFormat = _e; }; +} + +inline flatbuffers::Offset Op::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OpT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateOp(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateOp(flatbuffers::FlatBufferBuilder &_fbb, const OpT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const OpT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _inputIndexes = _o->inputIndexes.size() ? _fbb.CreateVector(_o->inputIndexes) : 0; + auto _main_type = _o->main.type; + auto _main = _o->main.Pack(_fbb); + auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); + auto _outputIndexes = _o->outputIndexes.size() ? _fbb.CreateVector(_o->outputIndexes) : 0; + auto _type = _o->type; + auto _defaultDimentionFormat = _o->defaultDimentionFormat; + return MNN::CreateOp( + _fbb, + _inputIndexes, + _main_type, + _main, + _name, + _outputIndexes, + _type, + _defaultDimentionFormat); +} + +inline TensorDescribeT *TensorDescribe::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new TensorDescribeT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void TensorDescribe::UnPackTo(TensorDescribeT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = blob(); if (_e) _o->blob = std::unique_ptr(_e->UnPack(_resolver)); }; + { auto _e = index(); _o->index = _e; }; + { auto _e = name(); if (_e) _o->name = _e->str(); }; +} + +inline flatbuffers::Offset TensorDescribe::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorDescribeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateTensorDescribe(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateTensorDescribe(flatbuffers::FlatBufferBuilder &_fbb, const TensorDescribeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TensorDescribeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _blob = _o->blob ? CreateBlob(_fbb, _o->blob.get(), _rehasher) : 0; + auto _index = _o->index; + auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); + return MNN::CreateTensorDescribe( + _fbb, + _blob, + _index, + _name); +} + +inline NetT *Net::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new NetT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Net::UnPackTo(NetT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = bizCode(); if (_e) _o->bizCode = _e->str(); }; + { auto _e = extraTensorDescribe(); if (_e) { _o->extraTensorDescribe.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->extraTensorDescribe[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } }; + { auto _e = gpulibrary(); if (_e) _o->gpulibrary = std::unique_ptr(_e->UnPack(_resolver)); }; + { auto _e = oplists(); if (_e) { _o->oplists.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->oplists[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } }; + { auto _e = outputName(); if (_e) { _o->outputName.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputName[_i] = _e->Get(_i)->str(); } } }; + { auto _e = preferForwardType(); _o->preferForwardType = _e; }; + { auto _e = sourceType(); _o->sourceType = _e; }; + { auto _e = tensorName(); if (_e) { _o->tensorName.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->tensorName[_i] = _e->Get(_i)->str(); } } }; + { auto _e = tensorNumber(); _o->tensorNumber = _e; }; + { auto _e = usage(); _o->usage = _e; }; +} + +inline flatbuffers::Offset Net::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NetT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateNet(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateNet(flatbuffers::FlatBufferBuilder &_fbb, const NetT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NetT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _bizCode = _o->bizCode.empty() ? 0 : _fbb.CreateString(_o->bizCode); + auto _extraTensorDescribe = _o->extraTensorDescribe.size() ? _fbb.CreateVector> (_o->extraTensorDescribe.size(), [](size_t i, _VectorArgs *__va) { return CreateTensorDescribe(*__va->__fbb, __va->__o->extraTensorDescribe[i].get(), __va->__rehasher); }, &_va ) : 0; + auto _gpulibrary = _o->gpulibrary ? CreateGpuLibrary(_fbb, _o->gpulibrary.get(), _rehasher) : 0; + auto _oplists = _o->oplists.size() ? _fbb.CreateVector> (_o->oplists.size(), [](size_t i, _VectorArgs *__va) { return CreateOp(*__va->__fbb, __va->__o->oplists[i].get(), __va->__rehasher); }, &_va ) : 0; + auto _outputName = _o->outputName.size() ? _fbb.CreateVectorOfStrings(_o->outputName) : 0; + auto _preferForwardType = _o->preferForwardType; + auto _sourceType = _o->sourceType; + auto _tensorName = _o->tensorName.size() ? _fbb.CreateVectorOfStrings(_o->tensorName) : 0; + auto _tensorNumber = _o->tensorNumber; + auto _usage = _o->usage; + return MNN::CreateNet( + _fbb, + _bizCode, + _extraTensorDescribe, + _gpulibrary, + _oplists, + _outputName, + _preferForwardType, + _sourceType, + _tensorName, + _tensorNumber, + _usage); +} + +inline bool VerifyOpParameter(flatbuffers::Verifier &verifier, const void *obj, OpParameter type) { + switch (type) { + case OpParameter_NONE: { + return true; + } + case OpParameter_QuantizedAdd: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_ArgMax: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_AsString: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_Axis: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_BatchNorm: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_BinaryOp: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_Blob: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_CastParam: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_Convolution2D: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_Crop: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_CropAndResize: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_Dequantize: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_DetectionOutput: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_Eltwise: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_ExpandDims: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_Fill: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_Flatten: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_Gather: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_GatherV2: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_InnerProduct: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_Input: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_Interp: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_LRN: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_LSTM: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_MatMul: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_NonMaxSuppressionV2: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_Normalize: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_PackParam: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_Permute: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_Plugin: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_Pool: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_PRelu: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_PriorBox: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_Proposal: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_QuantizedAvgPool: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_QuantizedBiasAdd: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_QuantizedConcat: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_QuantizedLogistic: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_QuantizedMatMul: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_QuantizedMaxPool: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_QuantizedRelu: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_QuantizedRelu6: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_QuantizedReshape: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_QuantizedSoftmax: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_QuantizeMaxMin: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_QuantizeV2: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_Range: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_Rank: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_ReduceJoin: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_ReductionParam: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_Relu: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_Relu6: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_RequantizationRange: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_Requantize: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_Reshape: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_Resize: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_RoiPooling: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_Scale: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_Selu: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_Size: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_Slice: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_SliceTf: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_SpaceBatch: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_SqueezeParam: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_StridedSliceParam: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_TensorConvertInfo: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_TfQuantizedConv2D: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_TopKV2: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_Transpose: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_UnaryOp: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_MomentsParam: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_RNNParam: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_BatchMatMulParam: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_QuantizedFloatParam: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_DepthSpaceParam: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_EltwiseInt8: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_ReverseSequenceParam: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_Extra: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_Pool3D: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_Convolution3D: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_ELU: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_DetectionPostProcessParam: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_OneHotParam: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case OpParameter_PadParam: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + default: return false; + } +} + +inline bool VerifyOpParameterVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types) { + if (!values || !types) return !values && !types; + if (values->size() != types->size()) return false; + for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) { + if (!VerifyOpParameter( + verifier, values->Get(i), types->GetEnum(i))) { + return false; + } + } + return true; +} + +inline void *OpParameterUnion::UnPack(const void *obj, OpParameter type, const flatbuffers::resolver_function_t *resolver) { + switch (type) { + case OpParameter_QuantizedAdd: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_ArgMax: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_AsString: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_Axis: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_BatchNorm: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_BinaryOp: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_Blob: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_CastParam: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_Convolution2D: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_Crop: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_CropAndResize: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_Dequantize: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_DetectionOutput: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_Eltwise: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_ExpandDims: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_Fill: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_Flatten: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_Gather: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_GatherV2: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_InnerProduct: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_Input: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_Interp: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_LRN: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_LSTM: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_MatMul: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_NonMaxSuppressionV2: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_Normalize: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_PackParam: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_Permute: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_Plugin: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_Pool: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_PRelu: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_PriorBox: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_Proposal: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_QuantizedAvgPool: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_QuantizedBiasAdd: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_QuantizedConcat: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_QuantizedLogistic: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_QuantizedMatMul: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_QuantizedMaxPool: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_QuantizedRelu: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_QuantizedRelu6: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_QuantizedReshape: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_QuantizedSoftmax: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_QuantizeMaxMin: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_QuantizeV2: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_Range: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_Rank: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_ReduceJoin: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_ReductionParam: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_Relu: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_Relu6: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_RequantizationRange: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_Requantize: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_Reshape: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_Resize: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_RoiPooling: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_Scale: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_Selu: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_Size: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_Slice: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_SliceTf: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_SpaceBatch: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_SqueezeParam: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_StridedSliceParam: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_TensorConvertInfo: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_TfQuantizedConv2D: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_TopKV2: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_Transpose: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_UnaryOp: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_MomentsParam: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_RNNParam: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_BatchMatMulParam: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_QuantizedFloatParam: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_DepthSpaceParam: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_EltwiseInt8: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_ReverseSequenceParam: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_Extra: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_Pool3D: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_Convolution3D: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_ELU: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_DetectionPostProcessParam: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_OneHotParam: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case OpParameter_PadParam: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + default: return nullptr; + } +} + +inline flatbuffers::Offset OpParameterUnion::Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher) const { + switch (type) { + case OpParameter_QuantizedAdd: { + auto ptr = reinterpret_cast(value); + return CreateQuantizedAdd(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_ArgMax: { + auto ptr = reinterpret_cast(value); + return CreateArgMax(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_AsString: { + auto ptr = reinterpret_cast(value); + return CreateAsString(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_Axis: { + auto ptr = reinterpret_cast(value); + return CreateAxis(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_BatchNorm: { + auto ptr = reinterpret_cast(value); + return CreateBatchNorm(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_BinaryOp: { + auto ptr = reinterpret_cast(value); + return CreateBinaryOp(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_Blob: { + auto ptr = reinterpret_cast(value); + return CreateBlob(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_CastParam: { + auto ptr = reinterpret_cast(value); + return CreateCastParam(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_Convolution2D: { + auto ptr = reinterpret_cast(value); + return CreateConvolution2D(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_Crop: { + auto ptr = reinterpret_cast(value); + return CreateCrop(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_CropAndResize: { + auto ptr = reinterpret_cast(value); + return CreateCropAndResize(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_Dequantize: { + auto ptr = reinterpret_cast(value); + return CreateDequantize(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_DetectionOutput: { + auto ptr = reinterpret_cast(value); + return CreateDetectionOutput(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_Eltwise: { + auto ptr = reinterpret_cast(value); + return CreateEltwise(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_ExpandDims: { + auto ptr = reinterpret_cast(value); + return CreateExpandDims(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_Fill: { + auto ptr = reinterpret_cast(value); + return CreateFill(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_Flatten: { + auto ptr = reinterpret_cast(value); + return CreateFlatten(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_Gather: { + auto ptr = reinterpret_cast(value); + return CreateGather(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_GatherV2: { + auto ptr = reinterpret_cast(value); + return CreateGatherV2(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_InnerProduct: { + auto ptr = reinterpret_cast(value); + return CreateInnerProduct(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_Input: { + auto ptr = reinterpret_cast(value); + return CreateInput(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_Interp: { + auto ptr = reinterpret_cast(value); + return CreateInterp(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_LRN: { + auto ptr = reinterpret_cast(value); + return CreateLRN(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_LSTM: { + auto ptr = reinterpret_cast(value); + return CreateLSTM(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_MatMul: { + auto ptr = reinterpret_cast(value); + return CreateMatMul(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_NonMaxSuppressionV2: { + auto ptr = reinterpret_cast(value); + return CreateNonMaxSuppressionV2(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_Normalize: { + auto ptr = reinterpret_cast(value); + return CreateNormalize(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_PackParam: { + auto ptr = reinterpret_cast(value); + return CreatePackParam(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_Permute: { + auto ptr = reinterpret_cast(value); + return CreatePermute(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_Plugin: { + auto ptr = reinterpret_cast(value); + return CreatePlugin(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_Pool: { + auto ptr = reinterpret_cast(value); + return CreatePool(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_PRelu: { + auto ptr = reinterpret_cast(value); + return CreatePRelu(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_PriorBox: { + auto ptr = reinterpret_cast(value); + return CreatePriorBox(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_Proposal: { + auto ptr = reinterpret_cast(value); + return CreateProposal(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_QuantizedAvgPool: { + auto ptr = reinterpret_cast(value); + return CreateQuantizedAvgPool(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_QuantizedBiasAdd: { + auto ptr = reinterpret_cast(value); + return CreateQuantizedBiasAdd(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_QuantizedConcat: { + auto ptr = reinterpret_cast(value); + return CreateQuantizedConcat(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_QuantizedLogistic: { + auto ptr = reinterpret_cast(value); + return CreateQuantizedLogistic(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_QuantizedMatMul: { + auto ptr = reinterpret_cast(value); + return CreateQuantizedMatMul(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_QuantizedMaxPool: { + auto ptr = reinterpret_cast(value); + return CreateQuantizedMaxPool(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_QuantizedRelu: { + auto ptr = reinterpret_cast(value); + return CreateQuantizedRelu(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_QuantizedRelu6: { + auto ptr = reinterpret_cast(value); + return CreateQuantizedRelu6(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_QuantizedReshape: { + auto ptr = reinterpret_cast(value); + return CreateQuantizedReshape(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_QuantizedSoftmax: { + auto ptr = reinterpret_cast(value); + return CreateQuantizedSoftmax(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_QuantizeMaxMin: { + auto ptr = reinterpret_cast(value); + return CreateQuantizeMaxMin(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_QuantizeV2: { + auto ptr = reinterpret_cast(value); + return CreateQuantizeV2(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_Range: { + auto ptr = reinterpret_cast(value); + return CreateRange(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_Rank: { + auto ptr = reinterpret_cast(value); + return CreateRank(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_ReduceJoin: { + auto ptr = reinterpret_cast(value); + return CreateReduceJoin(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_ReductionParam: { + auto ptr = reinterpret_cast(value); + return CreateReductionParam(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_Relu: { + auto ptr = reinterpret_cast(value); + return CreateRelu(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_Relu6: { + auto ptr = reinterpret_cast(value); + return CreateRelu6(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_RequantizationRange: { + auto ptr = reinterpret_cast(value); + return CreateRequantizationRange(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_Requantize: { + auto ptr = reinterpret_cast(value); + return CreateRequantize(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_Reshape: { + auto ptr = reinterpret_cast(value); + return CreateReshape(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_Resize: { + auto ptr = reinterpret_cast(value); + return CreateResize(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_RoiPooling: { + auto ptr = reinterpret_cast(value); + return CreateRoiPooling(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_Scale: { + auto ptr = reinterpret_cast(value); + return CreateScale(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_Selu: { + auto ptr = reinterpret_cast(value); + return CreateSelu(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_Size: { + auto ptr = reinterpret_cast(value); + return CreateSize(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_Slice: { + auto ptr = reinterpret_cast(value); + return CreateSlice(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_SliceTf: { + auto ptr = reinterpret_cast(value); + return CreateSliceTf(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_SpaceBatch: { + auto ptr = reinterpret_cast(value); + return CreateSpaceBatch(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_SqueezeParam: { + auto ptr = reinterpret_cast(value); + return CreateSqueezeParam(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_StridedSliceParam: { + auto ptr = reinterpret_cast(value); + return CreateStridedSliceParam(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_TensorConvertInfo: { + auto ptr = reinterpret_cast(value); + return CreateTensorConvertInfo(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_TfQuantizedConv2D: { + auto ptr = reinterpret_cast(value); + return CreateTfQuantizedConv2D(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_TopKV2: { + auto ptr = reinterpret_cast(value); + return CreateTopKV2(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_Transpose: { + auto ptr = reinterpret_cast(value); + return CreateTranspose(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_UnaryOp: { + auto ptr = reinterpret_cast(value); + return CreateUnaryOp(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_MomentsParam: { + auto ptr = reinterpret_cast(value); + return CreateMomentsParam(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_RNNParam: { + auto ptr = reinterpret_cast(value); + return CreateRNNParam(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_BatchMatMulParam: { + auto ptr = reinterpret_cast(value); + return CreateBatchMatMulParam(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_QuantizedFloatParam: { + auto ptr = reinterpret_cast(value); + return CreateQuantizedFloatParam(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_DepthSpaceParam: { + auto ptr = reinterpret_cast(value); + return CreateDepthSpaceParam(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_EltwiseInt8: { + auto ptr = reinterpret_cast(value); + return CreateEltwiseInt8(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_ReverseSequenceParam: { + auto ptr = reinterpret_cast(value); + return CreateReverseSequenceParam(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_Extra: { + auto ptr = reinterpret_cast(value); + return CreateExtra(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_Pool3D: { + auto ptr = reinterpret_cast(value); + return CreatePool3D(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_Convolution3D: { + auto ptr = reinterpret_cast(value); + return CreateConvolution3D(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_ELU: { + auto ptr = reinterpret_cast(value); + return CreateELU(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_DetectionPostProcessParam: { + auto ptr = reinterpret_cast(value); + return CreateDetectionPostProcessParam(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_OneHotParam: { + auto ptr = reinterpret_cast(value); + return CreateOneHotParam(_fbb, ptr, _rehasher).Union(); + } + case OpParameter_PadParam: { + auto ptr = reinterpret_cast(value); + return CreatePadParam(_fbb, ptr, _rehasher).Union(); + } + default: return 0; + } +} + +inline OpParameterUnion::OpParameterUnion(const OpParameterUnion &u) FLATBUFFERS_NOEXCEPT : type(u.type), value(nullptr) { + switch (type) { + case OpParameter_QuantizedAdd: { + FLATBUFFERS_ASSERT(false); // QuantizedAddT not copyable. + break; + } + case OpParameter_ArgMax: { + value = new ArgMaxT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_AsString: { + value = new AsStringT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_Axis: { + value = new AxisT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_BatchNorm: { + value = new BatchNormT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_BinaryOp: { + value = new BinaryOpT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_Blob: { + value = new BlobT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_CastParam: { + value = new CastParamT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_Convolution2D: { + FLATBUFFERS_ASSERT(false); // Convolution2DT not copyable. + break; + } + case OpParameter_Crop: { + value = new CropT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_CropAndResize: { + value = new CropAndResizeT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_Dequantize: { + FLATBUFFERS_ASSERT(false); // DequantizeT not copyable. + break; + } + case OpParameter_DetectionOutput: { + value = new DetectionOutputT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_Eltwise: { + value = new EltwiseT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_ExpandDims: { + value = new ExpandDimsT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_Fill: { + value = new FillT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_Flatten: { + value = new FlattenT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_Gather: { + value = new GatherT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_GatherV2: { + value = new GatherV2T(*reinterpret_cast(u.value)); + break; + } + case OpParameter_InnerProduct: { + FLATBUFFERS_ASSERT(false); // InnerProductT not copyable. + break; + } + case OpParameter_Input: { + value = new InputT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_Interp: { + value = new InterpT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_LRN: { + value = new LRNT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_LSTM: { + FLATBUFFERS_ASSERT(false); // LSTMT not copyable. + break; + } + case OpParameter_MatMul: { + value = new MatMulT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_NonMaxSuppressionV2: { + value = new NonMaxSuppressionV2T(*reinterpret_cast(u.value)); + break; + } + case OpParameter_Normalize: { + value = new NormalizeT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_PackParam: { + value = new PackParamT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_Permute: { + value = new PermuteT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_Plugin: { + FLATBUFFERS_ASSERT(false); // PluginT not copyable. + break; + } + case OpParameter_Pool: { + value = new PoolT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_PRelu: { + value = new PReluT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_PriorBox: { + value = new PriorBoxT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_Proposal: { + FLATBUFFERS_ASSERT(false); // ProposalT not copyable. + break; + } + case OpParameter_QuantizedAvgPool: { + value = new QuantizedAvgPoolT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_QuantizedBiasAdd: { + value = new QuantizedBiasAddT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_QuantizedConcat: { + FLATBUFFERS_ASSERT(false); // QuantizedConcatT not copyable. + break; + } + case OpParameter_QuantizedLogistic: { + FLATBUFFERS_ASSERT(false); // QuantizedLogisticT not copyable. + break; + } + case OpParameter_QuantizedMatMul: { + value = new QuantizedMatMulT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_QuantizedMaxPool: { + value = new QuantizedMaxPoolT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_QuantizedRelu: { + value = new QuantizedReluT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_QuantizedRelu6: { + value = new QuantizedRelu6T(*reinterpret_cast(u.value)); + break; + } + case OpParameter_QuantizedReshape: { + value = new QuantizedReshapeT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_QuantizedSoftmax: { + value = new QuantizedSoftmaxT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_QuantizeMaxMin: { + value = new QuantizeMaxMinT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_QuantizeV2: { + value = new QuantizeV2T(*reinterpret_cast(u.value)); + break; + } + case OpParameter_Range: { + value = new RangeT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_Rank: { + value = new RankT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_ReduceJoin: { + value = new ReduceJoinT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_ReductionParam: { + value = new ReductionParamT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_Relu: { + value = new ReluT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_Relu6: { + value = new Relu6T(*reinterpret_cast(u.value)); + break; + } + case OpParameter_RequantizationRange: { + value = new RequantizationRangeT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_Requantize: { + value = new RequantizeT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_Reshape: { + value = new ReshapeT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_Resize: { + value = new ResizeT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_RoiPooling: { + value = new RoiPoolingT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_Scale: { + value = new ScaleT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_Selu: { + value = new SeluT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_Size: { + value = new SizeT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_Slice: { + value = new SliceT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_SliceTf: { + value = new SliceTfT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_SpaceBatch: { + FLATBUFFERS_ASSERT(false); // SpaceBatchT not copyable. + break; + } + case OpParameter_SqueezeParam: { + value = new SqueezeParamT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_StridedSliceParam: { + value = new StridedSliceParamT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_TensorConvertInfo: { + value = new TensorConvertInfoT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_TfQuantizedConv2D: { + FLATBUFFERS_ASSERT(false); // TfQuantizedConv2DT not copyable. + break; + } + case OpParameter_TopKV2: { + value = new TopKV2T(*reinterpret_cast(u.value)); + break; + } + case OpParameter_Transpose: { + value = new TransposeT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_UnaryOp: { + value = new UnaryOpT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_MomentsParam: { + value = new MomentsParamT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_RNNParam: { + FLATBUFFERS_ASSERT(false); // RNNParamT not copyable. + break; + } + case OpParameter_BatchMatMulParam: { + value = new BatchMatMulParamT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_QuantizedFloatParam: { + value = new QuantizedFloatParamT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_DepthSpaceParam: { + value = new DepthSpaceParamT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_EltwiseInt8: { + FLATBUFFERS_ASSERT(false); // EltwiseInt8T not copyable. + break; + } + case OpParameter_ReverseSequenceParam: { + value = new ReverseSequenceParamT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_Extra: { + FLATBUFFERS_ASSERT(false); // ExtraT not copyable. + break; + } + case OpParameter_Pool3D: { + value = new Pool3DT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_Convolution3D: { + FLATBUFFERS_ASSERT(false); // Convolution3DT not copyable. + break; + } + case OpParameter_ELU: { + value = new ELUT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_DetectionPostProcessParam: { + value = new DetectionPostProcessParamT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_OneHotParam: { + value = new OneHotParamT(*reinterpret_cast(u.value)); + break; + } + case OpParameter_PadParam: { + value = new PadParamT(*reinterpret_cast(u.value)); + break; + } + default: + break; + } +} + +inline void OpParameterUnion::Reset() { + switch (type) { + case OpParameter_QuantizedAdd: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_ArgMax: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_AsString: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_Axis: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_BatchNorm: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_BinaryOp: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_Blob: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_CastParam: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_Convolution2D: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_Crop: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_CropAndResize: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_Dequantize: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_DetectionOutput: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_Eltwise: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_ExpandDims: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_Fill: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_Flatten: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_Gather: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_GatherV2: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_InnerProduct: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_Input: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_Interp: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_LRN: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_LSTM: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_MatMul: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_NonMaxSuppressionV2: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_Normalize: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_PackParam: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_Permute: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_Plugin: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_Pool: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_PRelu: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_PriorBox: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_Proposal: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_QuantizedAvgPool: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_QuantizedBiasAdd: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_QuantizedConcat: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_QuantizedLogistic: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_QuantizedMatMul: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_QuantizedMaxPool: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_QuantizedRelu: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_QuantizedRelu6: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_QuantizedReshape: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_QuantizedSoftmax: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_QuantizeMaxMin: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_QuantizeV2: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_Range: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_Rank: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_ReduceJoin: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_ReductionParam: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_Relu: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_Relu6: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_RequantizationRange: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_Requantize: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_Reshape: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_Resize: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_RoiPooling: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_Scale: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_Selu: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_Size: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_Slice: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_SliceTf: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_SpaceBatch: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_SqueezeParam: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_StridedSliceParam: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_TensorConvertInfo: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_TfQuantizedConv2D: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_TopKV2: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_Transpose: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_UnaryOp: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_MomentsParam: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_RNNParam: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_BatchMatMulParam: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_QuantizedFloatParam: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_DepthSpaceParam: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_EltwiseInt8: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_ReverseSequenceParam: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_Extra: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_Pool3D: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_Convolution3D: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_ELU: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_DetectionPostProcessParam: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_OneHotParam: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case OpParameter_PadParam: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + default: break; + } + value = nullptr; + type = OpParameter_NONE; +} + +inline const flatbuffers::TypeTable *OpTypeTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + OpTypeTypeTable + }; + static const int64_t values[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 128, 129, 130, 131, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 512, 513, 514, 515, 516, 517, 518 }; + static const char * const names[] = { + "AbsVal", + "QuantizedAdd", + "ArgMax", + "AsString", + "InstanceNorm", + "BatchToSpaceND", + "Bias", + "BinaryOp", + "Bnll", + "Cast", + "Concat", + "Const", + "Convolution", + "ConvolutionDepthwise", + "Crop", + "CropAndResize", + "Cubic", + "Deconvolution", + "DeconvolutionDepthwise", + "Dequantize", + "DetectionOutput", + "Dropout", + "Eltwise", + "ELU", + "Embed", + "Exp", + "ExpandDims", + "Fill", + "Flatten", + "FloorMod", + "Gather", + "GatherV2", + "Im2Seq", + "InnerProduct", + "Input", + "Interp", + "Log", + "LRN", + "LSTM", + "MatMul", + "MVN", + "NonMaxSuppression", + "NonMaxSuppressionV2", + "Normalize", + "Pack", + "Padding", + "Permute", + "Pooling", + "Power", + "PReLU", + "PriorBox", + "Proposal", + "QuantizedAvgPool", + "QuantizedBiasAdd", + "QuantizedConcat", + "QuantizedDepthwiseConv2D", + "QuantizedLogistic", + "QuantizedMatMul", + "QuantizedMaxPool", + "QuantizedRelu", + "QuantizedRelu6", + "QuantizedReshape", + "QuantizedSoftmax", + "QuantizeMaxMin", + "QuantizeV2", + "Range", + "Rank", + "ReduceJoin", + "Reduction", + "ReLU", + "ReLU6", + "RequantizationRange", + "Requantize", + "Reshape", + "Resize", + "RNN", + "ROIPooling", + "Scale", + "Selu", + "Seq2Out", + "Shape", + "Sigmoid", + "Size", + "Slice", + "SliceTf", + "Softmax", + "SpaceToBatchND", + "SpatialProduct", + "Split", + "SPP", + "Squeeze", + "StridedSlice", + "StringJoin", + "StringSplit", + "StringToNumber", + "TanH", + "TfQuantizedConv2D", + "Threshold", + "Tile", + "TopKV2", + "Transpose", + "UnaryOp", + "Unpack", + "Where", + "Moments", + "RNNSequenceGRU", + "BatchMatMul", + "Unsqueeze", + "CosineSimilarity", + "DepthToSpace", + "SpaceToDepth", + "ReverseSequence", + "Pooling3D", + "Convolution3D", + "MatrixBandPart", + "GatherND", + "DetectionPostProcess", + "UnravelIndex", + "ScatterNd", + "OneHot", + "BroadcastTo", + "Dilation2D", + "MaxLayerCount", + "ConvertTensor", + "ArgMin", + "LinSpace", + "PLUGIN", + "Select", + "ZerosLike", + "Broastcast", + "SetDiff1D", + "ReluGrad", + "Relu6Grad", + "PoolGrad", + "SoftmaxGrad", + "Conv2DBackPropFilter", + "TrainableParam", + "BatchNorm", + "Extra", + "ConvInt8", + "Int8ToFloat", + "DepthwiseConvInt8", + "PoolInt8", + "FloatToInt8", + "EltwiseInt8" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_ENUM, 145, type_codes, type_refs, values, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *OpParameterTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_SEQUENCE, 0, -1 }, + { flatbuffers::ET_SEQUENCE, 0, 0 }, + { flatbuffers::ET_SEQUENCE, 0, 1 }, + { flatbuffers::ET_SEQUENCE, 0, 2 }, + { flatbuffers::ET_SEQUENCE, 0, 3 }, + { flatbuffers::ET_SEQUENCE, 0, 4 }, + { flatbuffers::ET_SEQUENCE, 0, 5 }, + { flatbuffers::ET_SEQUENCE, 0, 6 }, + { flatbuffers::ET_SEQUENCE, 0, 7 }, + { flatbuffers::ET_SEQUENCE, 0, 8 }, + { flatbuffers::ET_SEQUENCE, 0, 9 }, + { flatbuffers::ET_SEQUENCE, 0, 10 }, + { flatbuffers::ET_SEQUENCE, 0, 11 }, + { flatbuffers::ET_SEQUENCE, 0, 12 }, + { flatbuffers::ET_SEQUENCE, 0, 13 }, + { flatbuffers::ET_SEQUENCE, 0, 14 }, + { flatbuffers::ET_SEQUENCE, 0, 15 }, + { flatbuffers::ET_SEQUENCE, 0, 16 }, + { flatbuffers::ET_SEQUENCE, 0, 17 }, + { flatbuffers::ET_SEQUENCE, 0, 18 }, + { flatbuffers::ET_SEQUENCE, 0, 19 }, + { flatbuffers::ET_SEQUENCE, 0, 20 }, + { flatbuffers::ET_SEQUENCE, 0, 21 }, + { flatbuffers::ET_SEQUENCE, 0, 22 }, + { flatbuffers::ET_SEQUENCE, 0, 23 }, + { flatbuffers::ET_SEQUENCE, 0, 24 }, + { flatbuffers::ET_SEQUENCE, 0, 25 }, + { flatbuffers::ET_SEQUENCE, 0, 26 }, + { flatbuffers::ET_SEQUENCE, 0, 27 }, + { flatbuffers::ET_SEQUENCE, 0, 28 }, + { flatbuffers::ET_SEQUENCE, 0, 29 }, + { flatbuffers::ET_SEQUENCE, 0, 30 }, + { flatbuffers::ET_SEQUENCE, 0, 31 }, + { flatbuffers::ET_SEQUENCE, 0, 32 }, + { flatbuffers::ET_SEQUENCE, 0, 33 }, + { flatbuffers::ET_SEQUENCE, 0, 34 }, + { flatbuffers::ET_SEQUENCE, 0, 35 }, + { flatbuffers::ET_SEQUENCE, 0, 36 }, + { flatbuffers::ET_SEQUENCE, 0, 37 }, + { flatbuffers::ET_SEQUENCE, 0, 38 }, + { flatbuffers::ET_SEQUENCE, 0, 39 }, + { flatbuffers::ET_SEQUENCE, 0, 40 }, + { flatbuffers::ET_SEQUENCE, 0, 41 }, + { flatbuffers::ET_SEQUENCE, 0, 42 }, + { flatbuffers::ET_SEQUENCE, 0, 43 }, + { flatbuffers::ET_SEQUENCE, 0, 44 }, + { flatbuffers::ET_SEQUENCE, 0, 45 }, + { flatbuffers::ET_SEQUENCE, 0, 46 }, + { flatbuffers::ET_SEQUENCE, 0, 47 }, + { flatbuffers::ET_SEQUENCE, 0, 48 }, + { flatbuffers::ET_SEQUENCE, 0, 49 }, + { flatbuffers::ET_SEQUENCE, 0, 50 }, + { flatbuffers::ET_SEQUENCE, 0, 51 }, + { flatbuffers::ET_SEQUENCE, 0, 52 }, + { flatbuffers::ET_SEQUENCE, 0, 53 }, + { flatbuffers::ET_SEQUENCE, 0, 54 }, + { flatbuffers::ET_SEQUENCE, 0, 55 }, + { flatbuffers::ET_SEQUENCE, 0, 56 }, + { flatbuffers::ET_SEQUENCE, 0, 57 }, + { flatbuffers::ET_SEQUENCE, 0, 58 }, + { flatbuffers::ET_SEQUENCE, 0, 59 }, + { flatbuffers::ET_SEQUENCE, 0, 60 }, + { flatbuffers::ET_SEQUENCE, 0, 61 }, + { flatbuffers::ET_SEQUENCE, 0, 62 }, + { flatbuffers::ET_SEQUENCE, 0, 63 }, + { flatbuffers::ET_SEQUENCE, 0, 64 }, + { flatbuffers::ET_SEQUENCE, 0, 65 }, + { flatbuffers::ET_SEQUENCE, 0, 66 }, + { flatbuffers::ET_SEQUENCE, 0, 67 }, + { flatbuffers::ET_SEQUENCE, 0, 68 }, + { flatbuffers::ET_SEQUENCE, 0, 69 }, + { flatbuffers::ET_SEQUENCE, 0, 70 }, + { flatbuffers::ET_SEQUENCE, 0, 71 }, + { flatbuffers::ET_SEQUENCE, 0, 72 }, + { flatbuffers::ET_SEQUENCE, 0, 73 }, + { flatbuffers::ET_SEQUENCE, 0, 74 }, + { flatbuffers::ET_SEQUENCE, 0, 75 }, + { flatbuffers::ET_SEQUENCE, 0, 76 }, + { flatbuffers::ET_SEQUENCE, 0, 77 }, + { flatbuffers::ET_SEQUENCE, 0, 78 }, + { flatbuffers::ET_SEQUENCE, 0, 79 }, + { flatbuffers::ET_SEQUENCE, 0, 80 }, + { flatbuffers::ET_SEQUENCE, 0, 81 }, + { flatbuffers::ET_SEQUENCE, 0, 82 }, + { flatbuffers::ET_SEQUENCE, 0, 83 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + QuantizedAddTypeTable, + ArgMaxTypeTable, + AsStringTypeTable, + AxisTypeTable, + BatchNormTypeTable, + BinaryOpTypeTable, + BlobTypeTable, + CastParamTypeTable, + Convolution2DTypeTable, + CropTypeTable, + CropAndResizeTypeTable, + DequantizeTypeTable, + DetectionOutputTypeTable, + EltwiseTypeTable, + ExpandDimsTypeTable, + FillTypeTable, + FlattenTypeTable, + GatherTypeTable, + GatherV2TypeTable, + InnerProductTypeTable, + InputTypeTable, + InterpTypeTable, + LRNTypeTable, + LSTMTypeTable, + MatMulTypeTable, + NonMaxSuppressionV2TypeTable, + NormalizeTypeTable, + PackParamTypeTable, + PermuteTypeTable, + PluginTypeTable, + PoolTypeTable, + PReluTypeTable, + PriorBoxTypeTable, + ProposalTypeTable, + QuantizedAvgPoolTypeTable, + QuantizedBiasAddTypeTable, + QuantizedConcatTypeTable, + QuantizedLogisticTypeTable, + QuantizedMatMulTypeTable, + QuantizedMaxPoolTypeTable, + QuantizedReluTypeTable, + QuantizedRelu6TypeTable, + QuantizedReshapeTypeTable, + QuantizedSoftmaxTypeTable, + QuantizeMaxMinTypeTable, + QuantizeV2TypeTable, + RangeTypeTable, + RankTypeTable, + ReduceJoinTypeTable, + ReductionParamTypeTable, + ReluTypeTable, + Relu6TypeTable, + RequantizationRangeTypeTable, + RequantizeTypeTable, + ReshapeTypeTable, + ResizeTypeTable, + RoiPoolingTypeTable, + ScaleTypeTable, + SeluTypeTable, + SizeTypeTable, + SliceTypeTable, + SliceTfTypeTable, + SpaceBatchTypeTable, + SqueezeParamTypeTable, + StridedSliceParamTypeTable, + TensorConvertInfoTypeTable, + TfQuantizedConv2DTypeTable, + TopKV2TypeTable, + TransposeTypeTable, + UnaryOpTypeTable, + MomentsParamTypeTable, + RNNParamTypeTable, + BatchMatMulParamTypeTable, + QuantizedFloatParamTypeTable, + DepthSpaceParamTypeTable, + EltwiseInt8TypeTable, + ReverseSequenceParamTypeTable, + ExtraTypeTable, + Pool3DTypeTable, + Convolution3DTypeTable, + ELUTypeTable, + DetectionPostProcessParamTypeTable, + OneHotParamTypeTable, + PadParamTypeTable + }; + static const char * const names[] = { + "NONE", + "QuantizedAdd", + "ArgMax", + "AsString", + "Axis", + "BatchNorm", + "BinaryOp", + "Blob", + "CastParam", + "Convolution2D", + "Crop", + "CropAndResize", + "Dequantize", + "DetectionOutput", + "Eltwise", + "ExpandDims", + "Fill", + "Flatten", + "Gather", + "GatherV2", + "InnerProduct", + "Input", + "Interp", + "LRN", + "LSTM", + "MatMul", + "NonMaxSuppressionV2", + "Normalize", + "PackParam", + "Permute", + "Plugin", + "Pool", + "PRelu", + "PriorBox", + "Proposal", + "QuantizedAvgPool", + "QuantizedBiasAdd", + "QuantizedConcat", + "QuantizedLogistic", + "QuantizedMatMul", + "QuantizedMaxPool", + "QuantizedRelu", + "QuantizedRelu6", + "QuantizedReshape", + "QuantizedSoftmax", + "QuantizeMaxMin", + "QuantizeV2", + "Range", + "Rank", + "ReduceJoin", + "ReductionParam", + "Relu", + "Relu6", + "RequantizationRange", + "Requantize", + "Reshape", + "Resize", + "RoiPooling", + "Scale", + "Selu", + "Size", + "Slice", + "SliceTf", + "SpaceBatch", + "SqueezeParam", + "StridedSliceParam", + "TensorConvertInfo", + "TfQuantizedConv2D", + "TopKV2", + "Transpose", + "UnaryOp", + "MomentsParam", + "RNNParam", + "BatchMatMulParam", + "QuantizedFloatParam", + "DepthSpaceParam", + "EltwiseInt8", + "ReverseSequenceParam", + "Extra", + "Pool3D", + "Convolution3D", + "ELU", + "DetectionPostProcessParam", + "OneHotParam", + "PadParam" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_UNION, 85, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *ForwardTypeTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + ForwardTypeTypeTable + }; + static const char * const names[] = { + "CPU", + "METAL", + "OPENCL", + "OPENGLES", + "VULKAN" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_ENUM, 5, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *UsageTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + UsageTypeTable + }; + static const char * const names[] = { + "INFERENCE", + "TRAIN" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_ENUM, 2, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *PluginTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_STRING, 0, -1 }, + { flatbuffers::ET_SEQUENCE, 1, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + BlobTypeTable + }; + static const char * const names[] = { + "type", + "buffer" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *ExtraTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_STRING, 0, -1 }, + { flatbuffers::ET_STRING, 0, -1 }, + { flatbuffers::ET_CHAR, 1, -1 }, + { flatbuffers::ET_SEQUENCE, 1, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + AttributeTypeTable + }; + static const char * const names[] = { + "type", + "engine", + "info", + "attr" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 4, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *OpTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 1, -1 }, + { flatbuffers::ET_UTYPE, 0, 0 }, + { flatbuffers::ET_SEQUENCE, 0, 0 }, + { flatbuffers::ET_STRING, 0, -1 }, + { flatbuffers::ET_INT, 1, -1 }, + { flatbuffers::ET_INT, 0, 1 }, + { flatbuffers::ET_CHAR, 0, 2 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + OpParameterTypeTable, + OpTypeTypeTable, + MNN_DATA_FORMATTypeTable + }; + static const char * const names[] = { + "inputIndexes", + "main_type", + "main", + "name", + "outputIndexes", + "type", + "defaultDimentionFormat" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 7, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *TensorDescribeTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_SEQUENCE, 0, 0 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_STRING, 0, -1 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + BlobTypeTable + }; + static const char * const names[] = { + "blob", + "index", + "name" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 3, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *NetTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_STRING, 0, -1 }, + { flatbuffers::ET_SEQUENCE, 1, 0 }, + { flatbuffers::ET_SEQUENCE, 0, 1 }, + { flatbuffers::ET_SEQUENCE, 1, 2 }, + { flatbuffers::ET_STRING, 1, -1 }, + { flatbuffers::ET_CHAR, 0, 3 }, + { flatbuffers::ET_CHAR, 0, 4 }, + { flatbuffers::ET_STRING, 1, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_CHAR, 0, 5 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + TensorDescribeTypeTable, + GpuLibraryTypeTable, + OpTypeTable, + ForwardTypeTypeTable, + NetSourceTypeTable, + UsageTypeTable + }; + static const char * const names[] = { + "bizCode", + "extraTensorDescribe", + "gpulibrary", + "oplists", + "outputName", + "preferForwardType", + "sourceType", + "tensorName", + "tensorNumber", + "usage" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 10, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const MNN::Net *GetNet(const void *buf) { + return flatbuffers::GetRoot(buf); +} + +inline const MNN::Net *GetSizePrefixedNet(const void *buf) { + return flatbuffers::GetSizePrefixedRoot(buf); +} + +inline bool VerifyNetBuffer( + flatbuffers::Verifier &verifier) { + return verifier.VerifyBuffer(nullptr); +} + +inline bool VerifySizePrefixedNetBuffer( + flatbuffers::Verifier &verifier) { + return verifier.VerifySizePrefixedBuffer(nullptr); +} + +inline void FinishNetBuffer( + flatbuffers::FlatBufferBuilder &fbb, + flatbuffers::Offset root) { + fbb.Finish(root); +} + +inline void FinishSizePrefixedNetBuffer( + flatbuffers::FlatBufferBuilder &fbb, + flatbuffers::Offset root) { + fbb.FinishSizePrefixed(root); +} + +inline std::unique_ptr UnPackNet( + const void *buf, + const flatbuffers::resolver_function_t *res = nullptr) { + return std::unique_ptr(GetNet(buf)->UnPack(res)); +} + +} // namespace MNN + +#endif // FLATBUFFERS_GENERATED_MNN_MNN_H_ diff --git a/schema/current/TFQuantizeOp_generated.h b/schema/current/TFQuantizeOp_generated.h new file mode 100644 index 000000000..aede81f16 --- /dev/null +++ b/schema/current/TFQuantizeOp_generated.h @@ -0,0 +1,2979 @@ +// automatically generated by the FlatBuffers compiler, do not modify + + +#ifndef FLATBUFFERS_GENERATED_TFQUANTIZEOP_MNN_H_ +#define FLATBUFFERS_GENERATED_TFQUANTIZEOP_MNN_H_ + +#include "flatbuffers/flatbuffers.h" + +#include "CaffeOp_generated.h" +#include "Tensor_generated.h" +#include "Type_generated.h" + +namespace MNN { + +struct QuantizedParam; +struct QuantizedParamT; + +struct QuantizedAdd; +struct QuantizedAddT; + +struct Dequantize; +struct DequantizeT; + +struct QuantizedAvgPool; +struct QuantizedAvgPoolT; + +struct QuantizedBiasAdd; +struct QuantizedBiasAddT; + +struct QuantizedConcat; +struct QuantizedConcatT; + +struct QuantizedLogistic; +struct QuantizedLogisticT; + +struct QuantizedMatMul; +struct QuantizedMatMulT; + +struct QuantizedMaxPool; +struct QuantizedMaxPoolT; + +struct QuantizedRelu; +struct QuantizedReluT; + +struct QuantizedRelu6; +struct QuantizedRelu6T; + +struct QuantizedReshape; +struct QuantizedReshapeT; + +struct QuantizedSoftmax; +struct QuantizedSoftmaxT; + +struct QuantizeV2; +struct QuantizeV2T; + +struct RequantizationRange; +struct RequantizationRangeT; + +struct Requantize; +struct RequantizeT; + +struct TfQuantizedConv2D; +struct TfQuantizedConv2DT; + +inline const flatbuffers::TypeTable *QuantizedParamTypeTable(); + +inline const flatbuffers::TypeTable *QuantizedAddTypeTable(); + +inline const flatbuffers::TypeTable *DequantizeTypeTable(); + +inline const flatbuffers::TypeTable *QuantizedAvgPoolTypeTable(); + +inline const flatbuffers::TypeTable *QuantizedBiasAddTypeTable(); + +inline const flatbuffers::TypeTable *QuantizedConcatTypeTable(); + +inline const flatbuffers::TypeTable *QuantizedLogisticTypeTable(); + +inline const flatbuffers::TypeTable *QuantizedMatMulTypeTable(); + +inline const flatbuffers::TypeTable *QuantizedMaxPoolTypeTable(); + +inline const flatbuffers::TypeTable *QuantizedReluTypeTable(); + +inline const flatbuffers::TypeTable *QuantizedRelu6TypeTable(); + +inline const flatbuffers::TypeTable *QuantizedReshapeTypeTable(); + +inline const flatbuffers::TypeTable *QuantizedSoftmaxTypeTable(); + +inline const flatbuffers::TypeTable *QuantizeV2TypeTable(); + +inline const flatbuffers::TypeTable *RequantizationRangeTypeTable(); + +inline const flatbuffers::TypeTable *RequantizeTypeTable(); + +inline const flatbuffers::TypeTable *TfQuantizedConv2DTypeTable(); + +enum FusedActivation { + FusedActivation_kTfLiteActNone = 0, + FusedActivation_kTfLiteActRelu = 1, + FusedActivation_kTfLiteActRelu1 = 2, + FusedActivation_kTfLiteActRelu6 = 3, + FusedActivation_kTfLiteActTanh = 4, + FusedActivation_kTfLiteActSignBit = 5, + FusedActivation_kTfLiteActSigmoid = 6, + FusedActivation_MIN = FusedActivation_kTfLiteActNone, + FusedActivation_MAX = FusedActivation_kTfLiteActSigmoid +}; + +inline const FusedActivation (&EnumValuesFusedActivation())[7] { + static const FusedActivation values[] = { + FusedActivation_kTfLiteActNone, + FusedActivation_kTfLiteActRelu, + FusedActivation_kTfLiteActRelu1, + FusedActivation_kTfLiteActRelu6, + FusedActivation_kTfLiteActTanh, + FusedActivation_kTfLiteActSignBit, + FusedActivation_kTfLiteActSigmoid + }; + return values; +} + +inline const char * const *EnumNamesFusedActivation() { + static const char * const names[] = { + "kTfLiteActNone", + "kTfLiteActRelu", + "kTfLiteActRelu1", + "kTfLiteActRelu6", + "kTfLiteActTanh", + "kTfLiteActSignBit", + "kTfLiteActSigmoid", + nullptr + }; + return names; +} + +inline const char *EnumNameFusedActivation(FusedActivation e) { + if (e < FusedActivation_kTfLiteActNone || e > FusedActivation_kTfLiteActSigmoid) return ""; + const size_t index = static_cast(e); + return EnumNamesFusedActivation()[index]; +} + +enum ModeFormat { + ModeFormat_TENSORFLOW = 0, + ModeFormat_TFLITE = 1, + ModeFormat_MIN = ModeFormat_TENSORFLOW, + ModeFormat_MAX = ModeFormat_TFLITE +}; + +inline const ModeFormat (&EnumValuesModeFormat())[2] { + static const ModeFormat values[] = { + ModeFormat_TENSORFLOW, + ModeFormat_TFLITE + }; + return values; +} + +inline const char * const *EnumNamesModeFormat() { + static const char * const names[] = { + "TENSORFLOW", + "TFLITE", + nullptr + }; + return names; +} + +inline const char *EnumNameModeFormat(ModeFormat e) { + if (e < ModeFormat_TENSORFLOW || e > ModeFormat_TFLITE) return ""; + const size_t index = static_cast(e); + return EnumNamesModeFormat()[index]; +} + +enum QuantizeMode { + QuantizeMode_MIN_COMBINED = 0, + QuantizeMode_MIN_FIRST = 1, + QuantizeMode_SCALED = 2, + QuantizeMode_MIN = QuantizeMode_MIN_COMBINED, + QuantizeMode_MAX = QuantizeMode_SCALED +}; + +inline const QuantizeMode (&EnumValuesQuantizeMode())[3] { + static const QuantizeMode values[] = { + QuantizeMode_MIN_COMBINED, + QuantizeMode_MIN_FIRST, + QuantizeMode_SCALED + }; + return values; +} + +inline const char * const *EnumNamesQuantizeMode() { + static const char * const names[] = { + "MIN_COMBINED", + "MIN_FIRST", + "SCALED", + nullptr + }; + return names; +} + +inline const char *EnumNameQuantizeMode(QuantizeMode e) { + if (e < QuantizeMode_MIN_COMBINED || e > QuantizeMode_SCALED) return ""; + const size_t index = static_cast(e); + return EnumNamesQuantizeMode()[index]; +} + +enum QuantizeRoundMode { + QuantizeRoundMode_HALF_AWAY_FROM_ZERO = 0, + QuantizeRoundMode_HALF_TO_EVEN = 1, + QuantizeRoundMode_MIN = QuantizeRoundMode_HALF_AWAY_FROM_ZERO, + QuantizeRoundMode_MAX = QuantizeRoundMode_HALF_TO_EVEN +}; + +inline const QuantizeRoundMode (&EnumValuesQuantizeRoundMode())[2] { + static const QuantizeRoundMode values[] = { + QuantizeRoundMode_HALF_AWAY_FROM_ZERO, + QuantizeRoundMode_HALF_TO_EVEN + }; + return values; +} + +inline const char * const *EnumNamesQuantizeRoundMode() { + static const char * const names[] = { + "HALF_AWAY_FROM_ZERO", + "HALF_TO_EVEN", + nullptr + }; + return names; +} + +inline const char *EnumNameQuantizeRoundMode(QuantizeRoundMode e) { + if (e < QuantizeRoundMode_HALF_AWAY_FROM_ZERO || e > QuantizeRoundMode_HALF_TO_EVEN) return ""; + const size_t index = static_cast(e); + return EnumNamesQuantizeRoundMode()[index]; +} + +struct QuantizedParamT : public flatbuffers::NativeTable { + typedef QuantizedParam TableType; + int32_t zeroPoint; + float scale; + QuantizedParamT() + : zeroPoint(0), + scale(0.0f) { + } +}; + +struct QuantizedParam FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef QuantizedParamT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return QuantizedParamTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ZEROPOINT = 4, + VT_SCALE = 6 + }; + int32_t zeroPoint() const { + return GetField(VT_ZEROPOINT, 0); + } + float scale() const { + return GetField(VT_SCALE, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_ZEROPOINT) && + VerifyField(verifier, VT_SCALE) && + verifier.EndTable(); + } + QuantizedParamT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(QuantizedParamT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedParamT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct QuantizedParamBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_zeroPoint(int32_t zeroPoint) { + fbb_.AddElement(QuantizedParam::VT_ZEROPOINT, zeroPoint, 0); + } + void add_scale(float scale) { + fbb_.AddElement(QuantizedParam::VT_SCALE, scale, 0.0f); + } + explicit QuantizedParamBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + QuantizedParamBuilder &operator=(const QuantizedParamBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateQuantizedParam( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t zeroPoint = 0, + float scale = 0.0f) { + QuantizedParamBuilder builder_(_fbb); + builder_.add_scale(scale); + builder_.add_zeroPoint(zeroPoint); + return builder_.Finish(); +} + +flatbuffers::Offset CreateQuantizedParam(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedParamT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct QuantizedAddT : public flatbuffers::NativeTable { + typedef QuantizedAdd TableType; + FusedActivation activationType; + std::unique_ptr input1QuantizedParam; + std::unique_ptr input2QuantizedParam; + std::unique_ptr outputQuantizedParam; + QuantizedAddT() + : activationType(FusedActivation_kTfLiteActNone) { + } +}; + +struct QuantizedAdd FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef QuantizedAddT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return QuantizedAddTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ACTIVATIONTYPE = 4, + VT_INPUT1QUANTIZEDPARAM = 6, + VT_INPUT2QUANTIZEDPARAM = 8, + VT_OUTPUTQUANTIZEDPARAM = 10 + }; + FusedActivation activationType() const { + return static_cast(GetField(VT_ACTIVATIONTYPE, 0)); + } + const QuantizedParam *input1QuantizedParam() const { + return GetPointer(VT_INPUT1QUANTIZEDPARAM); + } + const QuantizedParam *input2QuantizedParam() const { + return GetPointer(VT_INPUT2QUANTIZEDPARAM); + } + const QuantizedParam *outputQuantizedParam() const { + return GetPointer(VT_OUTPUTQUANTIZEDPARAM); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_ACTIVATIONTYPE) && + VerifyOffset(verifier, VT_INPUT1QUANTIZEDPARAM) && + verifier.VerifyTable(input1QuantizedParam()) && + VerifyOffset(verifier, VT_INPUT2QUANTIZEDPARAM) && + verifier.VerifyTable(input2QuantizedParam()) && + VerifyOffset(verifier, VT_OUTPUTQUANTIZEDPARAM) && + verifier.VerifyTable(outputQuantizedParam()) && + verifier.EndTable(); + } + QuantizedAddT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(QuantizedAddT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedAddT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct QuantizedAddBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_activationType(FusedActivation activationType) { + fbb_.AddElement(QuantizedAdd::VT_ACTIVATIONTYPE, static_cast(activationType), 0); + } + void add_input1QuantizedParam(flatbuffers::Offset input1QuantizedParam) { + fbb_.AddOffset(QuantizedAdd::VT_INPUT1QUANTIZEDPARAM, input1QuantizedParam); + } + void add_input2QuantizedParam(flatbuffers::Offset input2QuantizedParam) { + fbb_.AddOffset(QuantizedAdd::VT_INPUT2QUANTIZEDPARAM, input2QuantizedParam); + } + void add_outputQuantizedParam(flatbuffers::Offset outputQuantizedParam) { + fbb_.AddOffset(QuantizedAdd::VT_OUTPUTQUANTIZEDPARAM, outputQuantizedParam); + } + explicit QuantizedAddBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + QuantizedAddBuilder &operator=(const QuantizedAddBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateQuantizedAdd( + flatbuffers::FlatBufferBuilder &_fbb, + FusedActivation activationType = FusedActivation_kTfLiteActNone, + flatbuffers::Offset input1QuantizedParam = 0, + flatbuffers::Offset input2QuantizedParam = 0, + flatbuffers::Offset outputQuantizedParam = 0) { + QuantizedAddBuilder builder_(_fbb); + builder_.add_outputQuantizedParam(outputQuantizedParam); + builder_.add_input2QuantizedParam(input2QuantizedParam); + builder_.add_input1QuantizedParam(input1QuantizedParam); + builder_.add_activationType(activationType); + return builder_.Finish(); +} + +flatbuffers::Offset CreateQuantizedAdd(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedAddT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct DequantizeT : public flatbuffers::NativeTable { + typedef Dequantize TableType; + std::unique_ptr inputQuantizedParam; + QuantizeMode mode; + ModeFormat modelFormat; + DataType type; + DequantizeT() + : mode(QuantizeMode_MIN_COMBINED), + modelFormat(ModeFormat_TENSORFLOW), + type(DataType_DT_INVALID) { + } +}; + +struct Dequantize FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef DequantizeT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return DequantizeTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_INPUTQUANTIZEDPARAM = 4, + VT_MODE = 6, + VT_MODELFORMAT = 8, + VT_TYPE = 10 + }; + const QuantizedParam *inputQuantizedParam() const { + return GetPointer(VT_INPUTQUANTIZEDPARAM); + } + QuantizeMode mode() const { + return static_cast(GetField(VT_MODE, 0)); + } + ModeFormat modelFormat() const { + return static_cast(GetField(VT_MODELFORMAT, 0)); + } + DataType type() const { + return static_cast(GetField(VT_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_INPUTQUANTIZEDPARAM) && + verifier.VerifyTable(inputQuantizedParam()) && + VerifyField(verifier, VT_MODE) && + VerifyField(verifier, VT_MODELFORMAT) && + VerifyField(verifier, VT_TYPE) && + verifier.EndTable(); + } + DequantizeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(DequantizeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct DequantizeBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_inputQuantizedParam(flatbuffers::Offset inputQuantizedParam) { + fbb_.AddOffset(Dequantize::VT_INPUTQUANTIZEDPARAM, inputQuantizedParam); + } + void add_mode(QuantizeMode mode) { + fbb_.AddElement(Dequantize::VT_MODE, static_cast(mode), 0); + } + void add_modelFormat(ModeFormat modelFormat) { + fbb_.AddElement(Dequantize::VT_MODELFORMAT, static_cast(modelFormat), 0); + } + void add_type(DataType type) { + fbb_.AddElement(Dequantize::VT_TYPE, static_cast(type), 0); + } + explicit DequantizeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + DequantizeBuilder &operator=(const DequantizeBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateDequantize( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset inputQuantizedParam = 0, + QuantizeMode mode = QuantizeMode_MIN_COMBINED, + ModeFormat modelFormat = ModeFormat_TENSORFLOW, + DataType type = DataType_DT_INVALID) { + DequantizeBuilder builder_(_fbb); + builder_.add_type(type); + builder_.add_inputQuantizedParam(inputQuantizedParam); + builder_.add_modelFormat(modelFormat); + builder_.add_mode(mode); + return builder_.Finish(); +} + +flatbuffers::Offset CreateDequantize(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct QuantizedAvgPoolT : public flatbuffers::NativeTable { + typedef QuantizedAvgPool TableType; + int32_t kernelX; + int32_t kernelY; + ModeFormat modelFormat; + int32_t outputActivationMax; + int32_t outputActivationMin; + PoolPadType padType; + int32_t padX; + int32_t padY; + int32_t strideX; + int32_t strideY; + DataType type; + QuantizedAvgPoolT() + : kernelX(0), + kernelY(0), + modelFormat(ModeFormat_TENSORFLOW), + outputActivationMax(0), + outputActivationMin(0), + padType(PoolPadType_CAFFE), + padX(0), + padY(0), + strideX(0), + strideY(0), + type(DataType_DT_INVALID) { + } +}; + +struct QuantizedAvgPool FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef QuantizedAvgPoolT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return QuantizedAvgPoolTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_KERNELX = 4, + VT_KERNELY = 6, + VT_MODELFORMAT = 8, + VT_OUTPUTACTIVATIONMAX = 10, + VT_OUTPUTACTIVATIONMIN = 12, + VT_PADTYPE = 14, + VT_PADX = 16, + VT_PADY = 18, + VT_STRIDEX = 20, + VT_STRIDEY = 22, + VT_TYPE = 24 + }; + int32_t kernelX() const { + return GetField(VT_KERNELX, 0); + } + int32_t kernelY() const { + return GetField(VT_KERNELY, 0); + } + ModeFormat modelFormat() const { + return static_cast(GetField(VT_MODELFORMAT, 0)); + } + int32_t outputActivationMax() const { + return GetField(VT_OUTPUTACTIVATIONMAX, 0); + } + int32_t outputActivationMin() const { + return GetField(VT_OUTPUTACTIVATIONMIN, 0); + } + PoolPadType padType() const { + return static_cast(GetField(VT_PADTYPE, 0)); + } + int32_t padX() const { + return GetField(VT_PADX, 0); + } + int32_t padY() const { + return GetField(VT_PADY, 0); + } + int32_t strideX() const { + return GetField(VT_STRIDEX, 0); + } + int32_t strideY() const { + return GetField(VT_STRIDEY, 0); + } + DataType type() const { + return static_cast(GetField(VT_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_KERNELX) && + VerifyField(verifier, VT_KERNELY) && + VerifyField(verifier, VT_MODELFORMAT) && + VerifyField(verifier, VT_OUTPUTACTIVATIONMAX) && + VerifyField(verifier, VT_OUTPUTACTIVATIONMIN) && + VerifyField(verifier, VT_PADTYPE) && + VerifyField(verifier, VT_PADX) && + VerifyField(verifier, VT_PADY) && + VerifyField(verifier, VT_STRIDEX) && + VerifyField(verifier, VT_STRIDEY) && + VerifyField(verifier, VT_TYPE) && + verifier.EndTable(); + } + QuantizedAvgPoolT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(QuantizedAvgPoolT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedAvgPoolT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct QuantizedAvgPoolBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_kernelX(int32_t kernelX) { + fbb_.AddElement(QuantizedAvgPool::VT_KERNELX, kernelX, 0); + } + void add_kernelY(int32_t kernelY) { + fbb_.AddElement(QuantizedAvgPool::VT_KERNELY, kernelY, 0); + } + void add_modelFormat(ModeFormat modelFormat) { + fbb_.AddElement(QuantizedAvgPool::VT_MODELFORMAT, static_cast(modelFormat), 0); + } + void add_outputActivationMax(int32_t outputActivationMax) { + fbb_.AddElement(QuantizedAvgPool::VT_OUTPUTACTIVATIONMAX, outputActivationMax, 0); + } + void add_outputActivationMin(int32_t outputActivationMin) { + fbb_.AddElement(QuantizedAvgPool::VT_OUTPUTACTIVATIONMIN, outputActivationMin, 0); + } + void add_padType(PoolPadType padType) { + fbb_.AddElement(QuantizedAvgPool::VT_PADTYPE, static_cast(padType), 0); + } + void add_padX(int32_t padX) { + fbb_.AddElement(QuantizedAvgPool::VT_PADX, padX, 0); + } + void add_padY(int32_t padY) { + fbb_.AddElement(QuantizedAvgPool::VT_PADY, padY, 0); + } + void add_strideX(int32_t strideX) { + fbb_.AddElement(QuantizedAvgPool::VT_STRIDEX, strideX, 0); + } + void add_strideY(int32_t strideY) { + fbb_.AddElement(QuantizedAvgPool::VT_STRIDEY, strideY, 0); + } + void add_type(DataType type) { + fbb_.AddElement(QuantizedAvgPool::VT_TYPE, static_cast(type), 0); + } + explicit QuantizedAvgPoolBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + QuantizedAvgPoolBuilder &operator=(const QuantizedAvgPoolBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateQuantizedAvgPool( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t kernelX = 0, + int32_t kernelY = 0, + ModeFormat modelFormat = ModeFormat_TENSORFLOW, + int32_t outputActivationMax = 0, + int32_t outputActivationMin = 0, + PoolPadType padType = PoolPadType_CAFFE, + int32_t padX = 0, + int32_t padY = 0, + int32_t strideX = 0, + int32_t strideY = 0, + DataType type = DataType_DT_INVALID) { + QuantizedAvgPoolBuilder builder_(_fbb); + builder_.add_type(type); + builder_.add_strideY(strideY); + builder_.add_strideX(strideX); + builder_.add_padY(padY); + builder_.add_padX(padX); + builder_.add_outputActivationMin(outputActivationMin); + builder_.add_outputActivationMax(outputActivationMax); + builder_.add_kernelY(kernelY); + builder_.add_kernelX(kernelX); + builder_.add_padType(padType); + builder_.add_modelFormat(modelFormat); + return builder_.Finish(); +} + +flatbuffers::Offset CreateQuantizedAvgPool(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedAvgPoolT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct QuantizedBiasAddT : public flatbuffers::NativeTable { + typedef QuantizedBiasAdd TableType; + std::vector bias; + DataType inputType; + int32_t max; + int32_t min; + DataType outputType; + QuantizedBiasAddT() + : inputType(DataType_DT_INVALID), + max(0), + min(0), + outputType(DataType_DT_INVALID) { + } +}; + +struct QuantizedBiasAdd FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef QuantizedBiasAddT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return QuantizedBiasAddTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BIAS = 4, + VT_INPUTTYPE = 6, + VT_MAX = 8, + VT_MIN = 10, + VT_OUTPUTTYPE = 12 + }; + const flatbuffers::Vector *bias() const { + return GetPointer *>(VT_BIAS); + } + DataType inputType() const { + return static_cast(GetField(VT_INPUTTYPE, 0)); + } + int32_t max() const { + return GetField(VT_MAX, 0); + } + int32_t min() const { + return GetField(VT_MIN, 0); + } + DataType outputType() const { + return static_cast(GetField(VT_OUTPUTTYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_BIAS) && + verifier.VerifyVector(bias()) && + VerifyField(verifier, VT_INPUTTYPE) && + VerifyField(verifier, VT_MAX) && + VerifyField(verifier, VT_MIN) && + VerifyField(verifier, VT_OUTPUTTYPE) && + verifier.EndTable(); + } + QuantizedBiasAddT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(QuantizedBiasAddT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedBiasAddT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct QuantizedBiasAddBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_bias(flatbuffers::Offset> bias) { + fbb_.AddOffset(QuantizedBiasAdd::VT_BIAS, bias); + } + void add_inputType(DataType inputType) { + fbb_.AddElement(QuantizedBiasAdd::VT_INPUTTYPE, static_cast(inputType), 0); + } + void add_max(int32_t max) { + fbb_.AddElement(QuantizedBiasAdd::VT_MAX, max, 0); + } + void add_min(int32_t min) { + fbb_.AddElement(QuantizedBiasAdd::VT_MIN, min, 0); + } + void add_outputType(DataType outputType) { + fbb_.AddElement(QuantizedBiasAdd::VT_OUTPUTTYPE, static_cast(outputType), 0); + } + explicit QuantizedBiasAddBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + QuantizedBiasAddBuilder &operator=(const QuantizedBiasAddBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateQuantizedBiasAdd( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> bias = 0, + DataType inputType = DataType_DT_INVALID, + int32_t max = 0, + int32_t min = 0, + DataType outputType = DataType_DT_INVALID) { + QuantizedBiasAddBuilder builder_(_fbb); + builder_.add_outputType(outputType); + builder_.add_min(min); + builder_.add_max(max); + builder_.add_inputType(inputType); + builder_.add_bias(bias); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateQuantizedBiasAddDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *bias = nullptr, + DataType inputType = DataType_DT_INVALID, + int32_t max = 0, + int32_t min = 0, + DataType outputType = DataType_DT_INVALID) { + auto bias__ = bias ? _fbb.CreateVector(*bias) : 0; + return MNN::CreateQuantizedBiasAdd( + _fbb, + bias__, + inputType, + max, + min, + outputType); +} + +flatbuffers::Offset CreateQuantizedBiasAdd(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedBiasAddT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct QuantizedConcatT : public flatbuffers::NativeTable { + typedef QuantizedConcat TableType; + FusedActivation activationType; + int32_t axis; + std::vector inputScale; + std::vector inputZeroPoint; + std::unique_ptr outputQuantizedParam; + QuantizedConcatT() + : activationType(FusedActivation_kTfLiteActNone), + axis(0) { + } +}; + +struct QuantizedConcat FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef QuantizedConcatT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return QuantizedConcatTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ACTIVATIONTYPE = 4, + VT_AXIS = 6, + VT_INPUTSCALE = 8, + VT_INPUTZEROPOINT = 10, + VT_OUTPUTQUANTIZEDPARAM = 12 + }; + FusedActivation activationType() const { + return static_cast(GetField(VT_ACTIVATIONTYPE, 0)); + } + int32_t axis() const { + return GetField(VT_AXIS, 0); + } + const flatbuffers::Vector *inputScale() const { + return GetPointer *>(VT_INPUTSCALE); + } + const flatbuffers::Vector *inputZeroPoint() const { + return GetPointer *>(VT_INPUTZEROPOINT); + } + const QuantizedParam *outputQuantizedParam() const { + return GetPointer(VT_OUTPUTQUANTIZEDPARAM); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_ACTIVATIONTYPE) && + VerifyField(verifier, VT_AXIS) && + VerifyOffset(verifier, VT_INPUTSCALE) && + verifier.VerifyVector(inputScale()) && + VerifyOffset(verifier, VT_INPUTZEROPOINT) && + verifier.VerifyVector(inputZeroPoint()) && + VerifyOffset(verifier, VT_OUTPUTQUANTIZEDPARAM) && + verifier.VerifyTable(outputQuantizedParam()) && + verifier.EndTable(); + } + QuantizedConcatT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(QuantizedConcatT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedConcatT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct QuantizedConcatBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_activationType(FusedActivation activationType) { + fbb_.AddElement(QuantizedConcat::VT_ACTIVATIONTYPE, static_cast(activationType), 0); + } + void add_axis(int32_t axis) { + fbb_.AddElement(QuantizedConcat::VT_AXIS, axis, 0); + } + void add_inputScale(flatbuffers::Offset> inputScale) { + fbb_.AddOffset(QuantizedConcat::VT_INPUTSCALE, inputScale); + } + void add_inputZeroPoint(flatbuffers::Offset> inputZeroPoint) { + fbb_.AddOffset(QuantizedConcat::VT_INPUTZEROPOINT, inputZeroPoint); + } + void add_outputQuantizedParam(flatbuffers::Offset outputQuantizedParam) { + fbb_.AddOffset(QuantizedConcat::VT_OUTPUTQUANTIZEDPARAM, outputQuantizedParam); + } + explicit QuantizedConcatBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + QuantizedConcatBuilder &operator=(const QuantizedConcatBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateQuantizedConcat( + flatbuffers::FlatBufferBuilder &_fbb, + FusedActivation activationType = FusedActivation_kTfLiteActNone, + int32_t axis = 0, + flatbuffers::Offset> inputScale = 0, + flatbuffers::Offset> inputZeroPoint = 0, + flatbuffers::Offset outputQuantizedParam = 0) { + QuantizedConcatBuilder builder_(_fbb); + builder_.add_outputQuantizedParam(outputQuantizedParam); + builder_.add_inputZeroPoint(inputZeroPoint); + builder_.add_inputScale(inputScale); + builder_.add_axis(axis); + builder_.add_activationType(activationType); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateQuantizedConcatDirect( + flatbuffers::FlatBufferBuilder &_fbb, + FusedActivation activationType = FusedActivation_kTfLiteActNone, + int32_t axis = 0, + const std::vector *inputScale = nullptr, + const std::vector *inputZeroPoint = nullptr, + flatbuffers::Offset outputQuantizedParam = 0) { + auto inputScale__ = inputScale ? _fbb.CreateVector(*inputScale) : 0; + auto inputZeroPoint__ = inputZeroPoint ? _fbb.CreateVector(*inputZeroPoint) : 0; + return MNN::CreateQuantizedConcat( + _fbb, + activationType, + axis, + inputScale__, + inputZeroPoint__, + outputQuantizedParam); +} + +flatbuffers::Offset CreateQuantizedConcat(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedConcatT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct QuantizedLogisticT : public flatbuffers::NativeTable { + typedef QuantizedLogistic TableType; + std::unique_ptr inputQuantizedParam; + std::unique_ptr outputQuantizedParam; + QuantizedLogisticT() { + } +}; + +struct QuantizedLogistic FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef QuantizedLogisticT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return QuantizedLogisticTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_INPUTQUANTIZEDPARAM = 4, + VT_OUTPUTQUANTIZEDPARAM = 6 + }; + const QuantizedParam *inputQuantizedParam() const { + return GetPointer(VT_INPUTQUANTIZEDPARAM); + } + const QuantizedParam *outputQuantizedParam() const { + return GetPointer(VT_OUTPUTQUANTIZEDPARAM); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_INPUTQUANTIZEDPARAM) && + verifier.VerifyTable(inputQuantizedParam()) && + VerifyOffset(verifier, VT_OUTPUTQUANTIZEDPARAM) && + verifier.VerifyTable(outputQuantizedParam()) && + verifier.EndTable(); + } + QuantizedLogisticT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(QuantizedLogisticT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedLogisticT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct QuantizedLogisticBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_inputQuantizedParam(flatbuffers::Offset inputQuantizedParam) { + fbb_.AddOffset(QuantizedLogistic::VT_INPUTQUANTIZEDPARAM, inputQuantizedParam); + } + void add_outputQuantizedParam(flatbuffers::Offset outputQuantizedParam) { + fbb_.AddOffset(QuantizedLogistic::VT_OUTPUTQUANTIZEDPARAM, outputQuantizedParam); + } + explicit QuantizedLogisticBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + QuantizedLogisticBuilder &operator=(const QuantizedLogisticBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateQuantizedLogistic( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset inputQuantizedParam = 0, + flatbuffers::Offset outputQuantizedParam = 0) { + QuantizedLogisticBuilder builder_(_fbb); + builder_.add_outputQuantizedParam(outputQuantizedParam); + builder_.add_inputQuantizedParam(inputQuantizedParam); + return builder_.Finish(); +} + +flatbuffers::Offset CreateQuantizedLogistic(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedLogisticT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct QuantizedMatMulT : public flatbuffers::NativeTable { + typedef QuantizedMatMul TableType; + bool transposeA; + bool transposeB; + QuantizedMatMulT() + : transposeA(false), + transposeB(false) { + } +}; + +struct QuantizedMatMul FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef QuantizedMatMulT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return QuantizedMatMulTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TRANSPOSEA = 4, + VT_TRANSPOSEB = 6 + }; + bool transposeA() const { + return GetField(VT_TRANSPOSEA, 0) != 0; + } + bool transposeB() const { + return GetField(VT_TRANSPOSEB, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_TRANSPOSEA) && + VerifyField(verifier, VT_TRANSPOSEB) && + verifier.EndTable(); + } + QuantizedMatMulT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(QuantizedMatMulT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedMatMulT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct QuantizedMatMulBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_transposeA(bool transposeA) { + fbb_.AddElement(QuantizedMatMul::VT_TRANSPOSEA, static_cast(transposeA), 0); + } + void add_transposeB(bool transposeB) { + fbb_.AddElement(QuantizedMatMul::VT_TRANSPOSEB, static_cast(transposeB), 0); + } + explicit QuantizedMatMulBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + QuantizedMatMulBuilder &operator=(const QuantizedMatMulBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateQuantizedMatMul( + flatbuffers::FlatBufferBuilder &_fbb, + bool transposeA = false, + bool transposeB = false) { + QuantizedMatMulBuilder builder_(_fbb); + builder_.add_transposeB(transposeB); + builder_.add_transposeA(transposeA); + return builder_.Finish(); +} + +flatbuffers::Offset CreateQuantizedMatMul(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedMatMulT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct QuantizedMaxPoolT : public flatbuffers::NativeTable { + typedef QuantizedMaxPool TableType; + int32_t kernelX; + int32_t kernelY; + ModeFormat modelFormat; + int32_t outputActivationMax; + int32_t outputActivationMin; + PoolPadType padType; + int32_t padX; + int32_t padY; + int32_t strideX; + int32_t strideY; + DataType type; + QuantizedMaxPoolT() + : kernelX(0), + kernelY(0), + modelFormat(ModeFormat_TENSORFLOW), + outputActivationMax(0), + outputActivationMin(0), + padType(PoolPadType_CAFFE), + padX(0), + padY(0), + strideX(0), + strideY(0), + type(DataType_DT_INVALID) { + } +}; + +struct QuantizedMaxPool FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef QuantizedMaxPoolT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return QuantizedMaxPoolTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_KERNELX = 4, + VT_KERNELY = 6, + VT_MODELFORMAT = 8, + VT_OUTPUTACTIVATIONMAX = 10, + VT_OUTPUTACTIVATIONMIN = 12, + VT_PADTYPE = 14, + VT_PADX = 16, + VT_PADY = 18, + VT_STRIDEX = 20, + VT_STRIDEY = 22, + VT_TYPE = 24 + }; + int32_t kernelX() const { + return GetField(VT_KERNELX, 0); + } + int32_t kernelY() const { + return GetField(VT_KERNELY, 0); + } + ModeFormat modelFormat() const { + return static_cast(GetField(VT_MODELFORMAT, 0)); + } + int32_t outputActivationMax() const { + return GetField(VT_OUTPUTACTIVATIONMAX, 0); + } + int32_t outputActivationMin() const { + return GetField(VT_OUTPUTACTIVATIONMIN, 0); + } + PoolPadType padType() const { + return static_cast(GetField(VT_PADTYPE, 0)); + } + int32_t padX() const { + return GetField(VT_PADX, 0); + } + int32_t padY() const { + return GetField(VT_PADY, 0); + } + int32_t strideX() const { + return GetField(VT_STRIDEX, 0); + } + int32_t strideY() const { + return GetField(VT_STRIDEY, 0); + } + DataType type() const { + return static_cast(GetField(VT_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_KERNELX) && + VerifyField(verifier, VT_KERNELY) && + VerifyField(verifier, VT_MODELFORMAT) && + VerifyField(verifier, VT_OUTPUTACTIVATIONMAX) && + VerifyField(verifier, VT_OUTPUTACTIVATIONMIN) && + VerifyField(verifier, VT_PADTYPE) && + VerifyField(verifier, VT_PADX) && + VerifyField(verifier, VT_PADY) && + VerifyField(verifier, VT_STRIDEX) && + VerifyField(verifier, VT_STRIDEY) && + VerifyField(verifier, VT_TYPE) && + verifier.EndTable(); + } + QuantizedMaxPoolT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(QuantizedMaxPoolT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedMaxPoolT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct QuantizedMaxPoolBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_kernelX(int32_t kernelX) { + fbb_.AddElement(QuantizedMaxPool::VT_KERNELX, kernelX, 0); + } + void add_kernelY(int32_t kernelY) { + fbb_.AddElement(QuantizedMaxPool::VT_KERNELY, kernelY, 0); + } + void add_modelFormat(ModeFormat modelFormat) { + fbb_.AddElement(QuantizedMaxPool::VT_MODELFORMAT, static_cast(modelFormat), 0); + } + void add_outputActivationMax(int32_t outputActivationMax) { + fbb_.AddElement(QuantizedMaxPool::VT_OUTPUTACTIVATIONMAX, outputActivationMax, 0); + } + void add_outputActivationMin(int32_t outputActivationMin) { + fbb_.AddElement(QuantizedMaxPool::VT_OUTPUTACTIVATIONMIN, outputActivationMin, 0); + } + void add_padType(PoolPadType padType) { + fbb_.AddElement(QuantizedMaxPool::VT_PADTYPE, static_cast(padType), 0); + } + void add_padX(int32_t padX) { + fbb_.AddElement(QuantizedMaxPool::VT_PADX, padX, 0); + } + void add_padY(int32_t padY) { + fbb_.AddElement(QuantizedMaxPool::VT_PADY, padY, 0); + } + void add_strideX(int32_t strideX) { + fbb_.AddElement(QuantizedMaxPool::VT_STRIDEX, strideX, 0); + } + void add_strideY(int32_t strideY) { + fbb_.AddElement(QuantizedMaxPool::VT_STRIDEY, strideY, 0); + } + void add_type(DataType type) { + fbb_.AddElement(QuantizedMaxPool::VT_TYPE, static_cast(type), 0); + } + explicit QuantizedMaxPoolBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + QuantizedMaxPoolBuilder &operator=(const QuantizedMaxPoolBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateQuantizedMaxPool( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t kernelX = 0, + int32_t kernelY = 0, + ModeFormat modelFormat = ModeFormat_TENSORFLOW, + int32_t outputActivationMax = 0, + int32_t outputActivationMin = 0, + PoolPadType padType = PoolPadType_CAFFE, + int32_t padX = 0, + int32_t padY = 0, + int32_t strideX = 0, + int32_t strideY = 0, + DataType type = DataType_DT_INVALID) { + QuantizedMaxPoolBuilder builder_(_fbb); + builder_.add_type(type); + builder_.add_strideY(strideY); + builder_.add_strideX(strideX); + builder_.add_padY(padY); + builder_.add_padX(padX); + builder_.add_outputActivationMin(outputActivationMin); + builder_.add_outputActivationMax(outputActivationMax); + builder_.add_kernelY(kernelY); + builder_.add_kernelX(kernelX); + builder_.add_padType(padType); + builder_.add_modelFormat(modelFormat); + return builder_.Finish(); +} + +flatbuffers::Offset CreateQuantizedMaxPool(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedMaxPoolT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct QuantizedReluT : public flatbuffers::NativeTable { + typedef QuantizedRelu TableType; + DataType type; + QuantizedReluT() + : type(DataType_DT_INVALID) { + } +}; + +struct QuantizedRelu FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef QuantizedReluT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return QuantizedReluTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TYPE = 4 + }; + DataType type() const { + return static_cast(GetField(VT_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_TYPE) && + verifier.EndTable(); + } + QuantizedReluT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(QuantizedReluT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedReluT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct QuantizedReluBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_type(DataType type) { + fbb_.AddElement(QuantizedRelu::VT_TYPE, static_cast(type), 0); + } + explicit QuantizedReluBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + QuantizedReluBuilder &operator=(const QuantizedReluBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateQuantizedRelu( + flatbuffers::FlatBufferBuilder &_fbb, + DataType type = DataType_DT_INVALID) { + QuantizedReluBuilder builder_(_fbb); + builder_.add_type(type); + return builder_.Finish(); +} + +flatbuffers::Offset CreateQuantizedRelu(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedReluT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct QuantizedRelu6T : public flatbuffers::NativeTable { + typedef QuantizedRelu6 TableType; + DataType type; + QuantizedRelu6T() + : type(DataType_DT_INVALID) { + } +}; + +struct QuantizedRelu6 FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef QuantizedRelu6T NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return QuantizedRelu6TypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TYPE = 4 + }; + DataType type() const { + return static_cast(GetField(VT_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_TYPE) && + verifier.EndTable(); + } + QuantizedRelu6T *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(QuantizedRelu6T *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedRelu6T* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct QuantizedRelu6Builder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_type(DataType type) { + fbb_.AddElement(QuantizedRelu6::VT_TYPE, static_cast(type), 0); + } + explicit QuantizedRelu6Builder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + QuantizedRelu6Builder &operator=(const QuantizedRelu6Builder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateQuantizedRelu6( + flatbuffers::FlatBufferBuilder &_fbb, + DataType type = DataType_DT_INVALID) { + QuantizedRelu6Builder builder_(_fbb); + builder_.add_type(type); + return builder_.Finish(); +} + +flatbuffers::Offset CreateQuantizedRelu6(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedRelu6T *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct QuantizedReshapeT : public flatbuffers::NativeTable { + typedef QuantizedReshape TableType; + std::vector dims; + ModeFormat modelFormat; + QuantizedReshapeT() + : modelFormat(ModeFormat_TENSORFLOW) { + } +}; + +struct QuantizedReshape FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef QuantizedReshapeT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return QuantizedReshapeTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_DIMS = 4, + VT_MODELFORMAT = 6 + }; + const flatbuffers::Vector *dims() const { + return GetPointer *>(VT_DIMS); + } + ModeFormat modelFormat() const { + return static_cast(GetField(VT_MODELFORMAT, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_DIMS) && + verifier.VerifyVector(dims()) && + VerifyField(verifier, VT_MODELFORMAT) && + verifier.EndTable(); + } + QuantizedReshapeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(QuantizedReshapeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedReshapeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct QuantizedReshapeBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_dims(flatbuffers::Offset> dims) { + fbb_.AddOffset(QuantizedReshape::VT_DIMS, dims); + } + void add_modelFormat(ModeFormat modelFormat) { + fbb_.AddElement(QuantizedReshape::VT_MODELFORMAT, static_cast(modelFormat), 0); + } + explicit QuantizedReshapeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + QuantizedReshapeBuilder &operator=(const QuantizedReshapeBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateQuantizedReshape( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> dims = 0, + ModeFormat modelFormat = ModeFormat_TENSORFLOW) { + QuantizedReshapeBuilder builder_(_fbb); + builder_.add_dims(dims); + builder_.add_modelFormat(modelFormat); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateQuantizedReshapeDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *dims = nullptr, + ModeFormat modelFormat = ModeFormat_TENSORFLOW) { + auto dims__ = dims ? _fbb.CreateVector(*dims) : 0; + return MNN::CreateQuantizedReshape( + _fbb, + dims__, + modelFormat); +} + +flatbuffers::Offset CreateQuantizedReshape(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedReshapeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct QuantizedSoftmaxT : public flatbuffers::NativeTable { + typedef QuantizedSoftmax TableType; + float beta; + float inputScale; + QuantizedSoftmaxT() + : beta(0.0f), + inputScale(0.0f) { + } +}; + +struct QuantizedSoftmax FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef QuantizedSoftmaxT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return QuantizedSoftmaxTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BETA = 4, + VT_INPUTSCALE = 6 + }; + float beta() const { + return GetField(VT_BETA, 0.0f); + } + float inputScale() const { + return GetField(VT_INPUTSCALE, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_BETA) && + VerifyField(verifier, VT_INPUTSCALE) && + verifier.EndTable(); + } + QuantizedSoftmaxT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(QuantizedSoftmaxT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedSoftmaxT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct QuantizedSoftmaxBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_beta(float beta) { + fbb_.AddElement(QuantizedSoftmax::VT_BETA, beta, 0.0f); + } + void add_inputScale(float inputScale) { + fbb_.AddElement(QuantizedSoftmax::VT_INPUTSCALE, inputScale, 0.0f); + } + explicit QuantizedSoftmaxBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + QuantizedSoftmaxBuilder &operator=(const QuantizedSoftmaxBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateQuantizedSoftmax( + flatbuffers::FlatBufferBuilder &_fbb, + float beta = 0.0f, + float inputScale = 0.0f) { + QuantizedSoftmaxBuilder builder_(_fbb); + builder_.add_inputScale(inputScale); + builder_.add_beta(beta); + return builder_.Finish(); +} + +flatbuffers::Offset CreateQuantizedSoftmax(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedSoftmaxT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct QuantizeV2T : public flatbuffers::NativeTable { + typedef QuantizeV2 TableType; + DataType type; + QuantizeMode mode; + QuantizeRoundMode roundMode; + QuantizeV2T() + : type(DataType_DT_INVALID), + mode(QuantizeMode_MIN_COMBINED), + roundMode(QuantizeRoundMode_HALF_AWAY_FROM_ZERO) { + } +}; + +struct QuantizeV2 FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef QuantizeV2T NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return QuantizeV2TypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TYPE = 4, + VT_MODE = 6, + VT_ROUNDMODE = 8 + }; + DataType type() const { + return static_cast(GetField(VT_TYPE, 0)); + } + QuantizeMode mode() const { + return static_cast(GetField(VT_MODE, 0)); + } + QuantizeRoundMode roundMode() const { + return static_cast(GetField(VT_ROUNDMODE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_TYPE) && + VerifyField(verifier, VT_MODE) && + VerifyField(verifier, VT_ROUNDMODE) && + verifier.EndTable(); + } + QuantizeV2T *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(QuantizeV2T *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeV2T* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct QuantizeV2Builder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_type(DataType type) { + fbb_.AddElement(QuantizeV2::VT_TYPE, static_cast(type), 0); + } + void add_mode(QuantizeMode mode) { + fbb_.AddElement(QuantizeV2::VT_MODE, static_cast(mode), 0); + } + void add_roundMode(QuantizeRoundMode roundMode) { + fbb_.AddElement(QuantizeV2::VT_ROUNDMODE, static_cast(roundMode), 0); + } + explicit QuantizeV2Builder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + QuantizeV2Builder &operator=(const QuantizeV2Builder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateQuantizeV2( + flatbuffers::FlatBufferBuilder &_fbb, + DataType type = DataType_DT_INVALID, + QuantizeMode mode = QuantizeMode_MIN_COMBINED, + QuantizeRoundMode roundMode = QuantizeRoundMode_HALF_AWAY_FROM_ZERO) { + QuantizeV2Builder builder_(_fbb); + builder_.add_type(type); + builder_.add_roundMode(roundMode); + builder_.add_mode(mode); + return builder_.Finish(); +} + +flatbuffers::Offset CreateQuantizeV2(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeV2T *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct RequantizationRangeT : public flatbuffers::NativeTable { + typedef RequantizationRange TableType; + RequantizationRangeT() { + } +}; + +struct RequantizationRange FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef RequantizationRangeT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return RequantizationRangeTypeTable(); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + RequantizationRangeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(RequantizationRangeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const RequantizationRangeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct RequantizationRangeBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit RequantizationRangeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + RequantizationRangeBuilder &operator=(const RequantizationRangeBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateRequantizationRange( + flatbuffers::FlatBufferBuilder &_fbb) { + RequantizationRangeBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateRequantizationRange(flatbuffers::FlatBufferBuilder &_fbb, const RequantizationRangeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct RequantizeT : public flatbuffers::NativeTable { + typedef Requantize TableType; + RequantizeT() { + } +}; + +struct Requantize FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef RequantizeT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return RequantizeTypeTable(); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + RequantizeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(RequantizeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const RequantizeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct RequantizeBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit RequantizeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + RequantizeBuilder &operator=(const RequantizeBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateRequantize( + flatbuffers::FlatBufferBuilder &_fbb) { + RequantizeBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateRequantize(flatbuffers::FlatBufferBuilder &_fbb, const RequantizeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct TfQuantizedConv2DT : public flatbuffers::NativeTable { + typedef TfQuantizedConv2D TableType; + std::vector bias; + bool biasflag; + std::unique_ptr common; + std::vector weight; + FusedActivation activationType; + int32_t multiplier; + int32_t outMax; + int32_t outMin; + int32_t shift; + std::unique_ptr biasQuantizedParam; + int32_t depthMultiplier; + std::unique_ptr filterQuantizedParam; + std::unique_ptr inputQuantizedParam; + ModeFormat modelFormat; + std::unique_ptr outputQuantizedParam; + TfQuantizedConv2DT() + : biasflag(false), + activationType(FusedActivation_kTfLiteActNone), + multiplier(0), + outMax(0), + outMin(0), + shift(0), + depthMultiplier(0), + modelFormat(ModeFormat_TENSORFLOW) { + } +}; + +struct TfQuantizedConv2D FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef TfQuantizedConv2DT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return TfQuantizedConv2DTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BIAS = 4, + VT_BIASFLAG = 6, + VT_COMMON = 8, + VT_WEIGHT = 10, + VT_ACTIVATIONTYPE = 12, + VT_MULTIPLIER = 14, + VT_OUTMAX = 16, + VT_OUTMIN = 18, + VT_SHIFT = 20, + VT_BIASQUANTIZEDPARAM = 22, + VT_DEPTHMULTIPLIER = 24, + VT_FILTERQUANTIZEDPARAM = 26, + VT_INPUTQUANTIZEDPARAM = 28, + VT_MODELFORMAT = 30, + VT_OUTPUTQUANTIZEDPARAM = 32 + }; + const flatbuffers::Vector *bias() const { + return GetPointer *>(VT_BIAS); + } + bool biasflag() const { + return GetField(VT_BIASFLAG, 0) != 0; + } + const Convolution2DCommon *common() const { + return GetPointer(VT_COMMON); + } + const flatbuffers::Vector *weight() const { + return GetPointer *>(VT_WEIGHT); + } + FusedActivation activationType() const { + return static_cast(GetField(VT_ACTIVATIONTYPE, 0)); + } + int32_t multiplier() const { + return GetField(VT_MULTIPLIER, 0); + } + int32_t outMax() const { + return GetField(VT_OUTMAX, 0); + } + int32_t outMin() const { + return GetField(VT_OUTMIN, 0); + } + int32_t shift() const { + return GetField(VT_SHIFT, 0); + } + const QuantizedParam *biasQuantizedParam() const { + return GetPointer(VT_BIASQUANTIZEDPARAM); + } + int32_t depthMultiplier() const { + return GetField(VT_DEPTHMULTIPLIER, 0); + } + const QuantizedParam *filterQuantizedParam() const { + return GetPointer(VT_FILTERQUANTIZEDPARAM); + } + const QuantizedParam *inputQuantizedParam() const { + return GetPointer(VT_INPUTQUANTIZEDPARAM); + } + ModeFormat modelFormat() const { + return static_cast(GetField(VT_MODELFORMAT, 0)); + } + const QuantizedParam *outputQuantizedParam() const { + return GetPointer(VT_OUTPUTQUANTIZEDPARAM); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_BIAS) && + verifier.VerifyVector(bias()) && + VerifyField(verifier, VT_BIASFLAG) && + VerifyOffset(verifier, VT_COMMON) && + verifier.VerifyTable(common()) && + VerifyOffset(verifier, VT_WEIGHT) && + verifier.VerifyVector(weight()) && + VerifyField(verifier, VT_ACTIVATIONTYPE) && + VerifyField(verifier, VT_MULTIPLIER) && + VerifyField(verifier, VT_OUTMAX) && + VerifyField(verifier, VT_OUTMIN) && + VerifyField(verifier, VT_SHIFT) && + VerifyOffset(verifier, VT_BIASQUANTIZEDPARAM) && + verifier.VerifyTable(biasQuantizedParam()) && + VerifyField(verifier, VT_DEPTHMULTIPLIER) && + VerifyOffset(verifier, VT_FILTERQUANTIZEDPARAM) && + verifier.VerifyTable(filterQuantizedParam()) && + VerifyOffset(verifier, VT_INPUTQUANTIZEDPARAM) && + verifier.VerifyTable(inputQuantizedParam()) && + VerifyField(verifier, VT_MODELFORMAT) && + VerifyOffset(verifier, VT_OUTPUTQUANTIZEDPARAM) && + verifier.VerifyTable(outputQuantizedParam()) && + verifier.EndTable(); + } + TfQuantizedConv2DT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(TfQuantizedConv2DT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TfQuantizedConv2DT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct TfQuantizedConv2DBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_bias(flatbuffers::Offset> bias) { + fbb_.AddOffset(TfQuantizedConv2D::VT_BIAS, bias); + } + void add_biasflag(bool biasflag) { + fbb_.AddElement(TfQuantizedConv2D::VT_BIASFLAG, static_cast(biasflag), 0); + } + void add_common(flatbuffers::Offset common) { + fbb_.AddOffset(TfQuantizedConv2D::VT_COMMON, common); + } + void add_weight(flatbuffers::Offset> weight) { + fbb_.AddOffset(TfQuantizedConv2D::VT_WEIGHT, weight); + } + void add_activationType(FusedActivation activationType) { + fbb_.AddElement(TfQuantizedConv2D::VT_ACTIVATIONTYPE, static_cast(activationType), 0); + } + void add_multiplier(int32_t multiplier) { + fbb_.AddElement(TfQuantizedConv2D::VT_MULTIPLIER, multiplier, 0); + } + void add_outMax(int32_t outMax) { + fbb_.AddElement(TfQuantizedConv2D::VT_OUTMAX, outMax, 0); + } + void add_outMin(int32_t outMin) { + fbb_.AddElement(TfQuantizedConv2D::VT_OUTMIN, outMin, 0); + } + void add_shift(int32_t shift) { + fbb_.AddElement(TfQuantizedConv2D::VT_SHIFT, shift, 0); + } + void add_biasQuantizedParam(flatbuffers::Offset biasQuantizedParam) { + fbb_.AddOffset(TfQuantizedConv2D::VT_BIASQUANTIZEDPARAM, biasQuantizedParam); + } + void add_depthMultiplier(int32_t depthMultiplier) { + fbb_.AddElement(TfQuantizedConv2D::VT_DEPTHMULTIPLIER, depthMultiplier, 0); + } + void add_filterQuantizedParam(flatbuffers::Offset filterQuantizedParam) { + fbb_.AddOffset(TfQuantizedConv2D::VT_FILTERQUANTIZEDPARAM, filterQuantizedParam); + } + void add_inputQuantizedParam(flatbuffers::Offset inputQuantizedParam) { + fbb_.AddOffset(TfQuantizedConv2D::VT_INPUTQUANTIZEDPARAM, inputQuantizedParam); + } + void add_modelFormat(ModeFormat modelFormat) { + fbb_.AddElement(TfQuantizedConv2D::VT_MODELFORMAT, static_cast(modelFormat), 0); + } + void add_outputQuantizedParam(flatbuffers::Offset outputQuantizedParam) { + fbb_.AddOffset(TfQuantizedConv2D::VT_OUTPUTQUANTIZEDPARAM, outputQuantizedParam); + } + explicit TfQuantizedConv2DBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + TfQuantizedConv2DBuilder &operator=(const TfQuantizedConv2DBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateTfQuantizedConv2D( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> bias = 0, + bool biasflag = false, + flatbuffers::Offset common = 0, + flatbuffers::Offset> weight = 0, + FusedActivation activationType = FusedActivation_kTfLiteActNone, + int32_t multiplier = 0, + int32_t outMax = 0, + int32_t outMin = 0, + int32_t shift = 0, + flatbuffers::Offset biasQuantizedParam = 0, + int32_t depthMultiplier = 0, + flatbuffers::Offset filterQuantizedParam = 0, + flatbuffers::Offset inputQuantizedParam = 0, + ModeFormat modelFormat = ModeFormat_TENSORFLOW, + flatbuffers::Offset outputQuantizedParam = 0) { + TfQuantizedConv2DBuilder builder_(_fbb); + builder_.add_outputQuantizedParam(outputQuantizedParam); + builder_.add_inputQuantizedParam(inputQuantizedParam); + builder_.add_filterQuantizedParam(filterQuantizedParam); + builder_.add_depthMultiplier(depthMultiplier); + builder_.add_biasQuantizedParam(biasQuantizedParam); + builder_.add_shift(shift); + builder_.add_outMin(outMin); + builder_.add_outMax(outMax); + builder_.add_multiplier(multiplier); + builder_.add_weight(weight); + builder_.add_common(common); + builder_.add_bias(bias); + builder_.add_modelFormat(modelFormat); + builder_.add_activationType(activationType); + builder_.add_biasflag(biasflag); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateTfQuantizedConv2DDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *bias = nullptr, + bool biasflag = false, + flatbuffers::Offset common = 0, + const std::vector *weight = nullptr, + FusedActivation activationType = FusedActivation_kTfLiteActNone, + int32_t multiplier = 0, + int32_t outMax = 0, + int32_t outMin = 0, + int32_t shift = 0, + flatbuffers::Offset biasQuantizedParam = 0, + int32_t depthMultiplier = 0, + flatbuffers::Offset filterQuantizedParam = 0, + flatbuffers::Offset inputQuantizedParam = 0, + ModeFormat modelFormat = ModeFormat_TENSORFLOW, + flatbuffers::Offset outputQuantizedParam = 0) { + auto bias__ = bias ? _fbb.CreateVector(*bias) : 0; + auto weight__ = weight ? _fbb.CreateVector(*weight) : 0; + return MNN::CreateTfQuantizedConv2D( + _fbb, + bias__, + biasflag, + common, + weight__, + activationType, + multiplier, + outMax, + outMin, + shift, + biasQuantizedParam, + depthMultiplier, + filterQuantizedParam, + inputQuantizedParam, + modelFormat, + outputQuantizedParam); +} + +flatbuffers::Offset CreateTfQuantizedConv2D(flatbuffers::FlatBufferBuilder &_fbb, const TfQuantizedConv2DT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +inline QuantizedParamT *QuantizedParam::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new QuantizedParamT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void QuantizedParam::UnPackTo(QuantizedParamT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = zeroPoint(); _o->zeroPoint = _e; }; + { auto _e = scale(); _o->scale = _e; }; +} + +inline flatbuffers::Offset QuantizedParam::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedParamT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateQuantizedParam(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateQuantizedParam(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedParamT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedParamT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _zeroPoint = _o->zeroPoint; + auto _scale = _o->scale; + return MNN::CreateQuantizedParam( + _fbb, + _zeroPoint, + _scale); +} + +inline QuantizedAddT *QuantizedAdd::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new QuantizedAddT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void QuantizedAdd::UnPackTo(QuantizedAddT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = activationType(); _o->activationType = _e; }; + { auto _e = input1QuantizedParam(); if (_e) _o->input1QuantizedParam = std::unique_ptr(_e->UnPack(_resolver)); }; + { auto _e = input2QuantizedParam(); if (_e) _o->input2QuantizedParam = std::unique_ptr(_e->UnPack(_resolver)); }; + { auto _e = outputQuantizedParam(); if (_e) _o->outputQuantizedParam = std::unique_ptr(_e->UnPack(_resolver)); }; +} + +inline flatbuffers::Offset QuantizedAdd::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedAddT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateQuantizedAdd(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateQuantizedAdd(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedAddT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedAddT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _activationType = _o->activationType; + auto _input1QuantizedParam = _o->input1QuantizedParam ? CreateQuantizedParam(_fbb, _o->input1QuantizedParam.get(), _rehasher) : 0; + auto _input2QuantizedParam = _o->input2QuantizedParam ? CreateQuantizedParam(_fbb, _o->input2QuantizedParam.get(), _rehasher) : 0; + auto _outputQuantizedParam = _o->outputQuantizedParam ? CreateQuantizedParam(_fbb, _o->outputQuantizedParam.get(), _rehasher) : 0; + return MNN::CreateQuantizedAdd( + _fbb, + _activationType, + _input1QuantizedParam, + _input2QuantizedParam, + _outputQuantizedParam); +} + +inline DequantizeT *Dequantize::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new DequantizeT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Dequantize::UnPackTo(DequantizeT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = inputQuantizedParam(); if (_e) _o->inputQuantizedParam = std::unique_ptr(_e->UnPack(_resolver)); }; + { auto _e = mode(); _o->mode = _e; }; + { auto _e = modelFormat(); _o->modelFormat = _e; }; + { auto _e = type(); _o->type = _e; }; +} + +inline flatbuffers::Offset Dequantize::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateDequantize(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateDequantize(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DequantizeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _inputQuantizedParam = _o->inputQuantizedParam ? CreateQuantizedParam(_fbb, _o->inputQuantizedParam.get(), _rehasher) : 0; + auto _mode = _o->mode; + auto _modelFormat = _o->modelFormat; + auto _type = _o->type; + return MNN::CreateDequantize( + _fbb, + _inputQuantizedParam, + _mode, + _modelFormat, + _type); +} + +inline QuantizedAvgPoolT *QuantizedAvgPool::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new QuantizedAvgPoolT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void QuantizedAvgPool::UnPackTo(QuantizedAvgPoolT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = kernelX(); _o->kernelX = _e; }; + { auto _e = kernelY(); _o->kernelY = _e; }; + { auto _e = modelFormat(); _o->modelFormat = _e; }; + { auto _e = outputActivationMax(); _o->outputActivationMax = _e; }; + { auto _e = outputActivationMin(); _o->outputActivationMin = _e; }; + { auto _e = padType(); _o->padType = _e; }; + { auto _e = padX(); _o->padX = _e; }; + { auto _e = padY(); _o->padY = _e; }; + { auto _e = strideX(); _o->strideX = _e; }; + { auto _e = strideY(); _o->strideY = _e; }; + { auto _e = type(); _o->type = _e; }; +} + +inline flatbuffers::Offset QuantizedAvgPool::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedAvgPoolT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateQuantizedAvgPool(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateQuantizedAvgPool(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedAvgPoolT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedAvgPoolT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _kernelX = _o->kernelX; + auto _kernelY = _o->kernelY; + auto _modelFormat = _o->modelFormat; + auto _outputActivationMax = _o->outputActivationMax; + auto _outputActivationMin = _o->outputActivationMin; + auto _padType = _o->padType; + auto _padX = _o->padX; + auto _padY = _o->padY; + auto _strideX = _o->strideX; + auto _strideY = _o->strideY; + auto _type = _o->type; + return MNN::CreateQuantizedAvgPool( + _fbb, + _kernelX, + _kernelY, + _modelFormat, + _outputActivationMax, + _outputActivationMin, + _padType, + _padX, + _padY, + _strideX, + _strideY, + _type); +} + +inline QuantizedBiasAddT *QuantizedBiasAdd::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new QuantizedBiasAddT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void QuantizedBiasAdd::UnPackTo(QuantizedBiasAddT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = bias(); if (_e) { _o->bias.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->bias[_i] = _e->Get(_i); } } }; + { auto _e = inputType(); _o->inputType = _e; }; + { auto _e = max(); _o->max = _e; }; + { auto _e = min(); _o->min = _e; }; + { auto _e = outputType(); _o->outputType = _e; }; +} + +inline flatbuffers::Offset QuantizedBiasAdd::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedBiasAddT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateQuantizedBiasAdd(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateQuantizedBiasAdd(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedBiasAddT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedBiasAddT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _bias = _o->bias.size() ? _fbb.CreateVector(_o->bias) : 0; + auto _inputType = _o->inputType; + auto _max = _o->max; + auto _min = _o->min; + auto _outputType = _o->outputType; + return MNN::CreateQuantizedBiasAdd( + _fbb, + _bias, + _inputType, + _max, + _min, + _outputType); +} + +inline QuantizedConcatT *QuantizedConcat::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new QuantizedConcatT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void QuantizedConcat::UnPackTo(QuantizedConcatT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = activationType(); _o->activationType = _e; }; + { auto _e = axis(); _o->axis = _e; }; + { auto _e = inputScale(); if (_e) { _o->inputScale.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputScale[_i] = _e->Get(_i); } } }; + { auto _e = inputZeroPoint(); if (_e) { _o->inputZeroPoint.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputZeroPoint[_i] = _e->Get(_i); } } }; + { auto _e = outputQuantizedParam(); if (_e) _o->outputQuantizedParam = std::unique_ptr(_e->UnPack(_resolver)); }; +} + +inline flatbuffers::Offset QuantizedConcat::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedConcatT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateQuantizedConcat(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateQuantizedConcat(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedConcatT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedConcatT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _activationType = _o->activationType; + auto _axis = _o->axis; + auto _inputScale = _o->inputScale.size() ? _fbb.CreateVector(_o->inputScale) : 0; + auto _inputZeroPoint = _o->inputZeroPoint.size() ? _fbb.CreateVector(_o->inputZeroPoint) : 0; + auto _outputQuantizedParam = _o->outputQuantizedParam ? CreateQuantizedParam(_fbb, _o->outputQuantizedParam.get(), _rehasher) : 0; + return MNN::CreateQuantizedConcat( + _fbb, + _activationType, + _axis, + _inputScale, + _inputZeroPoint, + _outputQuantizedParam); +} + +inline QuantizedLogisticT *QuantizedLogistic::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new QuantizedLogisticT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void QuantizedLogistic::UnPackTo(QuantizedLogisticT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = inputQuantizedParam(); if (_e) _o->inputQuantizedParam = std::unique_ptr(_e->UnPack(_resolver)); }; + { auto _e = outputQuantizedParam(); if (_e) _o->outputQuantizedParam = std::unique_ptr(_e->UnPack(_resolver)); }; +} + +inline flatbuffers::Offset QuantizedLogistic::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedLogisticT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateQuantizedLogistic(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateQuantizedLogistic(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedLogisticT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedLogisticT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _inputQuantizedParam = _o->inputQuantizedParam ? CreateQuantizedParam(_fbb, _o->inputQuantizedParam.get(), _rehasher) : 0; + auto _outputQuantizedParam = _o->outputQuantizedParam ? CreateQuantizedParam(_fbb, _o->outputQuantizedParam.get(), _rehasher) : 0; + return MNN::CreateQuantizedLogistic( + _fbb, + _inputQuantizedParam, + _outputQuantizedParam); +} + +inline QuantizedMatMulT *QuantizedMatMul::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new QuantizedMatMulT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void QuantizedMatMul::UnPackTo(QuantizedMatMulT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = transposeA(); _o->transposeA = _e; }; + { auto _e = transposeB(); _o->transposeB = _e; }; +} + +inline flatbuffers::Offset QuantizedMatMul::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedMatMulT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateQuantizedMatMul(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateQuantizedMatMul(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedMatMulT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedMatMulT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _transposeA = _o->transposeA; + auto _transposeB = _o->transposeB; + return MNN::CreateQuantizedMatMul( + _fbb, + _transposeA, + _transposeB); +} + +inline QuantizedMaxPoolT *QuantizedMaxPool::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new QuantizedMaxPoolT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void QuantizedMaxPool::UnPackTo(QuantizedMaxPoolT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = kernelX(); _o->kernelX = _e; }; + { auto _e = kernelY(); _o->kernelY = _e; }; + { auto _e = modelFormat(); _o->modelFormat = _e; }; + { auto _e = outputActivationMax(); _o->outputActivationMax = _e; }; + { auto _e = outputActivationMin(); _o->outputActivationMin = _e; }; + { auto _e = padType(); _o->padType = _e; }; + { auto _e = padX(); _o->padX = _e; }; + { auto _e = padY(); _o->padY = _e; }; + { auto _e = strideX(); _o->strideX = _e; }; + { auto _e = strideY(); _o->strideY = _e; }; + { auto _e = type(); _o->type = _e; }; +} + +inline flatbuffers::Offset QuantizedMaxPool::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedMaxPoolT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateQuantizedMaxPool(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateQuantizedMaxPool(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedMaxPoolT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedMaxPoolT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _kernelX = _o->kernelX; + auto _kernelY = _o->kernelY; + auto _modelFormat = _o->modelFormat; + auto _outputActivationMax = _o->outputActivationMax; + auto _outputActivationMin = _o->outputActivationMin; + auto _padType = _o->padType; + auto _padX = _o->padX; + auto _padY = _o->padY; + auto _strideX = _o->strideX; + auto _strideY = _o->strideY; + auto _type = _o->type; + return MNN::CreateQuantizedMaxPool( + _fbb, + _kernelX, + _kernelY, + _modelFormat, + _outputActivationMax, + _outputActivationMin, + _padType, + _padX, + _padY, + _strideX, + _strideY, + _type); +} + +inline QuantizedReluT *QuantizedRelu::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new QuantizedReluT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void QuantizedRelu::UnPackTo(QuantizedReluT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = type(); _o->type = _e; }; +} + +inline flatbuffers::Offset QuantizedRelu::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedReluT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateQuantizedRelu(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateQuantizedRelu(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedReluT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedReluT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _type = _o->type; + return MNN::CreateQuantizedRelu( + _fbb, + _type); +} + +inline QuantizedRelu6T *QuantizedRelu6::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new QuantizedRelu6T(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void QuantizedRelu6::UnPackTo(QuantizedRelu6T *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = type(); _o->type = _e; }; +} + +inline flatbuffers::Offset QuantizedRelu6::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedRelu6T* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateQuantizedRelu6(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateQuantizedRelu6(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedRelu6T *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedRelu6T* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _type = _o->type; + return MNN::CreateQuantizedRelu6( + _fbb, + _type); +} + +inline QuantizedReshapeT *QuantizedReshape::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new QuantizedReshapeT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void QuantizedReshape::UnPackTo(QuantizedReshapeT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = dims(); if (_e) { _o->dims.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->dims[_i] = _e->Get(_i); } } }; + { auto _e = modelFormat(); _o->modelFormat = _e; }; +} + +inline flatbuffers::Offset QuantizedReshape::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedReshapeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateQuantizedReshape(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateQuantizedReshape(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedReshapeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedReshapeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _dims = _o->dims.size() ? _fbb.CreateVector(_o->dims) : 0; + auto _modelFormat = _o->modelFormat; + return MNN::CreateQuantizedReshape( + _fbb, + _dims, + _modelFormat); +} + +inline QuantizedSoftmaxT *QuantizedSoftmax::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new QuantizedSoftmaxT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void QuantizedSoftmax::UnPackTo(QuantizedSoftmaxT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = beta(); _o->beta = _e; }; + { auto _e = inputScale(); _o->inputScale = _e; }; +} + +inline flatbuffers::Offset QuantizedSoftmax::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedSoftmaxT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateQuantizedSoftmax(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateQuantizedSoftmax(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedSoftmaxT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedSoftmaxT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _beta = _o->beta; + auto _inputScale = _o->inputScale; + return MNN::CreateQuantizedSoftmax( + _fbb, + _beta, + _inputScale); +} + +inline QuantizeV2T *QuantizeV2::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new QuantizeV2T(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void QuantizeV2::UnPackTo(QuantizeV2T *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = type(); _o->type = _e; }; + { auto _e = mode(); _o->mode = _e; }; + { auto _e = roundMode(); _o->roundMode = _e; }; +} + +inline flatbuffers::Offset QuantizeV2::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeV2T* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateQuantizeV2(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateQuantizeV2(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeV2T *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizeV2T* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _type = _o->type; + auto _mode = _o->mode; + auto _roundMode = _o->roundMode; + return MNN::CreateQuantizeV2( + _fbb, + _type, + _mode, + _roundMode); +} + +inline RequantizationRangeT *RequantizationRange::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new RequantizationRangeT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void RequantizationRange::UnPackTo(RequantizationRangeT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset RequantizationRange::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RequantizationRangeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateRequantizationRange(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateRequantizationRange(flatbuffers::FlatBufferBuilder &_fbb, const RequantizationRangeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RequantizationRangeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return MNN::CreateRequantizationRange( + _fbb); +} + +inline RequantizeT *Requantize::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new RequantizeT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Requantize::UnPackTo(RequantizeT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset Requantize::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RequantizeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateRequantize(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateRequantize(flatbuffers::FlatBufferBuilder &_fbb, const RequantizeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RequantizeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return MNN::CreateRequantize( + _fbb); +} + +inline TfQuantizedConv2DT *TfQuantizedConv2D::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new TfQuantizedConv2DT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void TfQuantizedConv2D::UnPackTo(TfQuantizedConv2DT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = bias(); if (_e) { _o->bias.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->bias[_i] = _e->Get(_i); } } }; + { auto _e = biasflag(); _o->biasflag = _e; }; + { auto _e = common(); if (_e) _o->common = std::unique_ptr(_e->UnPack(_resolver)); }; + { auto _e = weight(); if (_e) { _o->weight.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->weight[_i] = _e->Get(_i); } } }; + { auto _e = activationType(); _o->activationType = _e; }; + { auto _e = multiplier(); _o->multiplier = _e; }; + { auto _e = outMax(); _o->outMax = _e; }; + { auto _e = outMin(); _o->outMin = _e; }; + { auto _e = shift(); _o->shift = _e; }; + { auto _e = biasQuantizedParam(); if (_e) _o->biasQuantizedParam = std::unique_ptr(_e->UnPack(_resolver)); }; + { auto _e = depthMultiplier(); _o->depthMultiplier = _e; }; + { auto _e = filterQuantizedParam(); if (_e) _o->filterQuantizedParam = std::unique_ptr(_e->UnPack(_resolver)); }; + { auto _e = inputQuantizedParam(); if (_e) _o->inputQuantizedParam = std::unique_ptr(_e->UnPack(_resolver)); }; + { auto _e = modelFormat(); _o->modelFormat = _e; }; + { auto _e = outputQuantizedParam(); if (_e) _o->outputQuantizedParam = std::unique_ptr(_e->UnPack(_resolver)); }; +} + +inline flatbuffers::Offset TfQuantizedConv2D::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TfQuantizedConv2DT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateTfQuantizedConv2D(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateTfQuantizedConv2D(flatbuffers::FlatBufferBuilder &_fbb, const TfQuantizedConv2DT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TfQuantizedConv2DT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _bias = _o->bias.size() ? _fbb.CreateVector(_o->bias) : 0; + auto _biasflag = _o->biasflag; + auto _common = _o->common ? CreateConvolution2DCommon(_fbb, _o->common.get(), _rehasher) : 0; + auto _weight = _o->weight.size() ? _fbb.CreateVector(_o->weight) : 0; + auto _activationType = _o->activationType; + auto _multiplier = _o->multiplier; + auto _outMax = _o->outMax; + auto _outMin = _o->outMin; + auto _shift = _o->shift; + auto _biasQuantizedParam = _o->biasQuantizedParam ? CreateQuantizedParam(_fbb, _o->biasQuantizedParam.get(), _rehasher) : 0; + auto _depthMultiplier = _o->depthMultiplier; + auto _filterQuantizedParam = _o->filterQuantizedParam ? CreateQuantizedParam(_fbb, _o->filterQuantizedParam.get(), _rehasher) : 0; + auto _inputQuantizedParam = _o->inputQuantizedParam ? CreateQuantizedParam(_fbb, _o->inputQuantizedParam.get(), _rehasher) : 0; + auto _modelFormat = _o->modelFormat; + auto _outputQuantizedParam = _o->outputQuantizedParam ? CreateQuantizedParam(_fbb, _o->outputQuantizedParam.get(), _rehasher) : 0; + return MNN::CreateTfQuantizedConv2D( + _fbb, + _bias, + _biasflag, + _common, + _weight, + _activationType, + _multiplier, + _outMax, + _outMin, + _shift, + _biasQuantizedParam, + _depthMultiplier, + _filterQuantizedParam, + _inputQuantizedParam, + _modelFormat, + _outputQuantizedParam); +} + +inline const flatbuffers::TypeTable *FusedActivationTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + FusedActivationTypeTable + }; + static const char * const names[] = { + "kTfLiteActNone", + "kTfLiteActRelu", + "kTfLiteActRelu1", + "kTfLiteActRelu6", + "kTfLiteActTanh", + "kTfLiteActSignBit", + "kTfLiteActSigmoid" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_ENUM, 7, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *ModeFormatTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + ModeFormatTypeTable + }; + static const char * const names[] = { + "TENSORFLOW", + "TFLITE" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_ENUM, 2, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *QuantizeModeTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + QuantizeModeTypeTable + }; + static const char * const names[] = { + "MIN_COMBINED", + "MIN_FIRST", + "SCALED" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_ENUM, 3, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *QuantizeRoundModeTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + QuantizeRoundModeTypeTable + }; + static const char * const names[] = { + "HALF_AWAY_FROM_ZERO", + "HALF_TO_EVEN" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_ENUM, 2, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *QuantizedParamTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_FLOAT, 0, -1 } + }; + static const char * const names[] = { + "zeroPoint", + "scale" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *QuantizedAddTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_SEQUENCE, 0, 1 }, + { flatbuffers::ET_SEQUENCE, 0, 1 }, + { flatbuffers::ET_SEQUENCE, 0, 1 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + FusedActivationTypeTable, + QuantizedParamTypeTable + }; + static const char * const names[] = { + "activationType", + "input1QuantizedParam", + "input2QuantizedParam", + "outputQuantizedParam" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 4, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *DequantizeTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_SEQUENCE, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 1 }, + { flatbuffers::ET_CHAR, 0, 2 }, + { flatbuffers::ET_INT, 0, 3 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + QuantizedParamTypeTable, + QuantizeModeTypeTable, + ModeFormatTypeTable, + DataTypeTypeTable + }; + static const char * const names[] = { + "inputQuantizedParam", + "mode", + "modelFormat", + "type" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 4, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *QuantizedAvgPoolTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_CHAR, 0, 1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, 2 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + ModeFormatTypeTable, + PoolPadTypeTypeTable, + DataTypeTypeTable + }; + static const char * const names[] = { + "kernelX", + "kernelY", + "modelFormat", + "outputActivationMax", + "outputActivationMin", + "padType", + "padX", + "padY", + "strideX", + "strideY", + "type" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 11, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *QuantizedBiasAddTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 1, -1 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + DataTypeTypeTable + }; + static const char * const names[] = { + "bias", + "inputType", + "max", + "min", + "outputType" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 5, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *QuantizedConcatTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_FLOAT, 1, -1 }, + { flatbuffers::ET_INT, 1, -1 }, + { flatbuffers::ET_SEQUENCE, 0, 1 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + FusedActivationTypeTable, + QuantizedParamTypeTable + }; + static const char * const names[] = { + "activationType", + "axis", + "inputScale", + "inputZeroPoint", + "outputQuantizedParam" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 5, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *QuantizedLogisticTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_SEQUENCE, 0, 0 }, + { flatbuffers::ET_SEQUENCE, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + QuantizedParamTypeTable + }; + static const char * const names[] = { + "inputQuantizedParam", + "outputQuantizedParam" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *QuantizedMatMulTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_BOOL, 0, -1 }, + { flatbuffers::ET_BOOL, 0, -1 } + }; + static const char * const names[] = { + "transposeA", + "transposeB" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *QuantizedMaxPoolTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_CHAR, 0, 1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, 2 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + ModeFormatTypeTable, + PoolPadTypeTypeTable, + DataTypeTypeTable + }; + static const char * const names[] = { + "kernelX", + "kernelY", + "modelFormat", + "outputActivationMax", + "outputActivationMin", + "padType", + "padX", + "padY", + "strideX", + "strideY", + "type" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 11, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *QuantizedReluTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + DataTypeTypeTable + }; + static const char * const names[] = { + "type" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *QuantizedRelu6TypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + DataTypeTypeTable + }; + static const char * const names[] = { + "type" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *QuantizedReshapeTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 1, -1 }, + { flatbuffers::ET_CHAR, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + ModeFormatTypeTable + }; + static const char * const names[] = { + "dims", + "modelFormat" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *QuantizedSoftmaxTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_FLOAT, 0, -1 }, + { flatbuffers::ET_FLOAT, 0, -1 } + }; + static const char * const names[] = { + "beta", + "inputScale" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *QuantizeV2TypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 1 }, + { flatbuffers::ET_CHAR, 0, 2 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + DataTypeTypeTable, + QuantizeModeTypeTable, + QuantizeRoundModeTypeTable + }; + static const char * const names[] = { + "type", + "mode", + "roundMode" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 3, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *RequantizationRangeTypeTable() { + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr + }; + return &tt; +} + +inline const flatbuffers::TypeTable *RequantizeTypeTable() { + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr + }; + return &tt; +} + +inline const flatbuffers::TypeTable *TfQuantizedConv2DTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 1, -1 }, + { flatbuffers::ET_BOOL, 0, -1 }, + { flatbuffers::ET_SEQUENCE, 0, 0 }, + { flatbuffers::ET_UCHAR, 1, -1 }, + { flatbuffers::ET_CHAR, 0, 1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_SEQUENCE, 0, 2 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_SEQUENCE, 0, 2 }, + { flatbuffers::ET_SEQUENCE, 0, 2 }, + { flatbuffers::ET_CHAR, 0, 3 }, + { flatbuffers::ET_SEQUENCE, 0, 2 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + Convolution2DCommonTypeTable, + FusedActivationTypeTable, + QuantizedParamTypeTable, + ModeFormatTypeTable + }; + static const char * const names[] = { + "bias", + "biasflag", + "common", + "weight", + "activationType", + "multiplier", + "outMax", + "outMin", + "shift", + "biasQuantizedParam", + "depthMultiplier", + "filterQuantizedParam", + "inputQuantizedParam", + "modelFormat", + "outputQuantizedParam" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 15, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +} // namespace MNN + +#endif // FLATBUFFERS_GENERATED_TFQUANTIZEOP_MNN_H_ diff --git a/schema/current/Tensor_generated.h b/schema/current/Tensor_generated.h new file mode 100644 index 000000000..e66d4d077 --- /dev/null +++ b/schema/current/Tensor_generated.h @@ -0,0 +1,794 @@ +// automatically generated by the FlatBuffers compiler, do not modify + + +#ifndef FLATBUFFERS_GENERATED_TENSOR_MNN_H_ +#define FLATBUFFERS_GENERATED_TENSOR_MNN_H_ + +#include "flatbuffers/flatbuffers.h" + +#include "Type_generated.h" + +namespace MNN { + +struct Blob; +struct BlobT; + +struct ListValue; +struct ListValueT; + +struct Attribute; +struct AttributeT; + +inline const flatbuffers::TypeTable *BlobTypeTable(); + +inline const flatbuffers::TypeTable *ListValueTypeTable(); + +inline const flatbuffers::TypeTable *AttributeTypeTable(); + +enum MNN_DATA_FORMAT { + MNN_DATA_FORMAT_NCHW = 0, + MNN_DATA_FORMAT_NHWC = 1, + MNN_DATA_FORMAT_NC4HW4 = 2, + MNN_DATA_FORMAT_NHWC4 = 3, + MNN_DATA_FORMAT_UNKNOWN = 4, + MNN_DATA_FORMAT_MIN = MNN_DATA_FORMAT_NCHW, + MNN_DATA_FORMAT_MAX = MNN_DATA_FORMAT_UNKNOWN +}; + +inline const MNN_DATA_FORMAT (&EnumValuesMNN_DATA_FORMAT())[5] { + static const MNN_DATA_FORMAT values[] = { + MNN_DATA_FORMAT_NCHW, + MNN_DATA_FORMAT_NHWC, + MNN_DATA_FORMAT_NC4HW4, + MNN_DATA_FORMAT_NHWC4, + MNN_DATA_FORMAT_UNKNOWN + }; + return values; +} + +inline const char * const *EnumNamesMNN_DATA_FORMAT() { + static const char * const names[] = { + "NCHW", + "NHWC", + "NC4HW4", + "NHWC4", + "UNKNOWN", + nullptr + }; + return names; +} + +inline const char *EnumNameMNN_DATA_FORMAT(MNN_DATA_FORMAT e) { + if (e < MNN_DATA_FORMAT_NCHW || e > MNN_DATA_FORMAT_UNKNOWN) return ""; + const size_t index = static_cast(e); + return EnumNamesMNN_DATA_FORMAT()[index]; +} + +struct BlobT : public flatbuffers::NativeTable { + typedef Blob TableType; + std::vector dims; + MNN_DATA_FORMAT dataFormat; + DataType dataType; + std::vector uint8s; + std::vector int8s; + std::vector int32s; + std::vector int64s; + std::vector float32s; + std::vector strings; + BlobT() + : dataFormat(MNN_DATA_FORMAT_NCHW), + dataType(DataType_DT_FLOAT) { + } +}; + +struct Blob FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef BlobT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return BlobTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_DIMS = 4, + VT_DATAFORMAT = 6, + VT_DATATYPE = 8, + VT_UINT8S = 10, + VT_INT8S = 12, + VT_INT32S = 14, + VT_INT64S = 16, + VT_FLOAT32S = 18, + VT_STRINGS = 20 + }; + const flatbuffers::Vector *dims() const { + return GetPointer *>(VT_DIMS); + } + MNN_DATA_FORMAT dataFormat() const { + return static_cast(GetField(VT_DATAFORMAT, 0)); + } + DataType dataType() const { + return static_cast(GetField(VT_DATATYPE, 1)); + } + const flatbuffers::Vector *uint8s() const { + return GetPointer *>(VT_UINT8S); + } + const flatbuffers::Vector *int8s() const { + return GetPointer *>(VT_INT8S); + } + const flatbuffers::Vector *int32s() const { + return GetPointer *>(VT_INT32S); + } + const flatbuffers::Vector *int64s() const { + return GetPointer *>(VT_INT64S); + } + const flatbuffers::Vector *float32s() const { + return GetPointer *>(VT_FLOAT32S); + } + const flatbuffers::Vector> *strings() const { + return GetPointer> *>(VT_STRINGS); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_DIMS) && + verifier.VerifyVector(dims()) && + VerifyField(verifier, VT_DATAFORMAT) && + VerifyField(verifier, VT_DATATYPE) && + VerifyOffset(verifier, VT_UINT8S) && + verifier.VerifyVector(uint8s()) && + VerifyOffset(verifier, VT_INT8S) && + verifier.VerifyVector(int8s()) && + VerifyOffset(verifier, VT_INT32S) && + verifier.VerifyVector(int32s()) && + VerifyOffset(verifier, VT_INT64S) && + verifier.VerifyVector(int64s()) && + VerifyOffset(verifier, VT_FLOAT32S) && + verifier.VerifyVector(float32s()) && + VerifyOffset(verifier, VT_STRINGS) && + verifier.VerifyVector(strings()) && + verifier.VerifyVectorOfStrings(strings()) && + verifier.EndTable(); + } + BlobT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(BlobT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BlobT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct BlobBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_dims(flatbuffers::Offset> dims) { + fbb_.AddOffset(Blob::VT_DIMS, dims); + } + void add_dataFormat(MNN_DATA_FORMAT dataFormat) { + fbb_.AddElement(Blob::VT_DATAFORMAT, static_cast(dataFormat), 0); + } + void add_dataType(DataType dataType) { + fbb_.AddElement(Blob::VT_DATATYPE, static_cast(dataType), 1); + } + void add_uint8s(flatbuffers::Offset> uint8s) { + fbb_.AddOffset(Blob::VT_UINT8S, uint8s); + } + void add_int8s(flatbuffers::Offset> int8s) { + fbb_.AddOffset(Blob::VT_INT8S, int8s); + } + void add_int32s(flatbuffers::Offset> int32s) { + fbb_.AddOffset(Blob::VT_INT32S, int32s); + } + void add_int64s(flatbuffers::Offset> int64s) { + fbb_.AddOffset(Blob::VT_INT64S, int64s); + } + void add_float32s(flatbuffers::Offset> float32s) { + fbb_.AddOffset(Blob::VT_FLOAT32S, float32s); + } + void add_strings(flatbuffers::Offset>> strings) { + fbb_.AddOffset(Blob::VT_STRINGS, strings); + } + explicit BlobBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + BlobBuilder &operator=(const BlobBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateBlob( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> dims = 0, + MNN_DATA_FORMAT dataFormat = MNN_DATA_FORMAT_NCHW, + DataType dataType = DataType_DT_FLOAT, + flatbuffers::Offset> uint8s = 0, + flatbuffers::Offset> int8s = 0, + flatbuffers::Offset> int32s = 0, + flatbuffers::Offset> int64s = 0, + flatbuffers::Offset> float32s = 0, + flatbuffers::Offset>> strings = 0) { + BlobBuilder builder_(_fbb); + builder_.add_strings(strings); + builder_.add_float32s(float32s); + builder_.add_int64s(int64s); + builder_.add_int32s(int32s); + builder_.add_int8s(int8s); + builder_.add_uint8s(uint8s); + builder_.add_dataType(dataType); + builder_.add_dims(dims); + builder_.add_dataFormat(dataFormat); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateBlobDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *dims = nullptr, + MNN_DATA_FORMAT dataFormat = MNN_DATA_FORMAT_NCHW, + DataType dataType = DataType_DT_FLOAT, + const std::vector *uint8s = nullptr, + const std::vector *int8s = nullptr, + const std::vector *int32s = nullptr, + const std::vector *int64s = nullptr, + const std::vector *float32s = nullptr, + const std::vector> *strings = nullptr) { + auto dims__ = dims ? _fbb.CreateVector(*dims) : 0; + auto uint8s__ = uint8s ? _fbb.CreateVector(*uint8s) : 0; + auto int8s__ = int8s ? _fbb.CreateVector(*int8s) : 0; + auto int32s__ = int32s ? _fbb.CreateVector(*int32s) : 0; + auto int64s__ = int64s ? _fbb.CreateVector(*int64s) : 0; + auto float32s__ = float32s ? _fbb.CreateVector(*float32s) : 0; + auto strings__ = strings ? _fbb.CreateVector>(*strings) : 0; + return MNN::CreateBlob( + _fbb, + dims__, + dataFormat, + dataType, + uint8s__, + int8s__, + int32s__, + int64s__, + float32s__, + strings__); +} + +flatbuffers::Offset CreateBlob(flatbuffers::FlatBufferBuilder &_fbb, const BlobT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ListValueT : public flatbuffers::NativeTable { + typedef ListValue TableType; + std::vector s; + std::vector i; + std::vector f; + std::vector b; + std::vector type; + ListValueT() { + } +}; + +struct ListValue FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ListValueT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return ListValueTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_S = 4, + VT_I = 6, + VT_F = 8, + VT_B = 10, + VT_TYPE = 12 + }; + const flatbuffers::Vector> *s() const { + return GetPointer> *>(VT_S); + } + const flatbuffers::Vector *i() const { + return GetPointer *>(VT_I); + } + const flatbuffers::Vector *f() const { + return GetPointer *>(VT_F); + } + const flatbuffers::Vector *b() const { + return GetPointer *>(VT_B); + } + const flatbuffers::Vector *type() const { + return GetPointer *>(VT_TYPE); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_S) && + verifier.VerifyVector(s()) && + verifier.VerifyVectorOfStrings(s()) && + VerifyOffset(verifier, VT_I) && + verifier.VerifyVector(i()) && + VerifyOffset(verifier, VT_F) && + verifier.VerifyVector(f()) && + VerifyOffset(verifier, VT_B) && + verifier.VerifyVector(b()) && + VerifyOffset(verifier, VT_TYPE) && + verifier.VerifyVector(type()) && + verifier.EndTable(); + } + ListValueT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ListValueT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ListValueT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ListValueBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_s(flatbuffers::Offset>> s) { + fbb_.AddOffset(ListValue::VT_S, s); + } + void add_i(flatbuffers::Offset> i) { + fbb_.AddOffset(ListValue::VT_I, i); + } + void add_f(flatbuffers::Offset> f) { + fbb_.AddOffset(ListValue::VT_F, f); + } + void add_b(flatbuffers::Offset> b) { + fbb_.AddOffset(ListValue::VT_B, b); + } + void add_type(flatbuffers::Offset> type) { + fbb_.AddOffset(ListValue::VT_TYPE, type); + } + explicit ListValueBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ListValueBuilder &operator=(const ListValueBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateListValue( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset>> s = 0, + flatbuffers::Offset> i = 0, + flatbuffers::Offset> f = 0, + flatbuffers::Offset> b = 0, + flatbuffers::Offset> type = 0) { + ListValueBuilder builder_(_fbb); + builder_.add_type(type); + builder_.add_b(b); + builder_.add_f(f); + builder_.add_i(i); + builder_.add_s(s); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateListValueDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector> *s = nullptr, + const std::vector *i = nullptr, + const std::vector *f = nullptr, + const std::vector *b = nullptr, + const std::vector *type = nullptr) { + auto s__ = s ? _fbb.CreateVector>(*s) : 0; + auto i__ = i ? _fbb.CreateVector(*i) : 0; + auto f__ = f ? _fbb.CreateVector(*f) : 0; + auto b__ = b ? _fbb.CreateVector(*b) : 0; + auto type__ = type ? _fbb.CreateVector(*type) : 0; + return MNN::CreateListValue( + _fbb, + s__, + i__, + f__, + b__, + type__); +} + +flatbuffers::Offset CreateListValue(flatbuffers::FlatBufferBuilder &_fbb, const ListValueT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct AttributeT : public flatbuffers::NativeTable { + typedef Attribute TableType; + std::string s; + int32_t i; + bool b; + std::string key; + DataType type; + float f; + std::unique_ptr tensor; + std::unique_ptr list; + AttributeT() + : i(0), + b(false), + type(DataType_DT_INVALID), + f(0.0f) { + } +}; + +struct Attribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef AttributeT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return AttributeTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_S = 4, + VT_I = 6, + VT_B = 8, + VT_KEY = 10, + VT_TYPE = 12, + VT_F = 14, + VT_TENSOR = 16, + VT_LIST = 18 + }; + const flatbuffers::String *s() const { + return GetPointer(VT_S); + } + int32_t i() const { + return GetField(VT_I, 0); + } + bool b() const { + return GetField(VT_B, 0) != 0; + } + const flatbuffers::String *key() const { + return GetPointer(VT_KEY); + } + DataType type() const { + return static_cast(GetField(VT_TYPE, 0)); + } + float f() const { + return GetField(VT_F, 0.0f); + } + const Blob *tensor() const { + return GetPointer(VT_TENSOR); + } + const ListValue *list() const { + return GetPointer(VT_LIST); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_S) && + verifier.VerifyString(s()) && + VerifyField(verifier, VT_I) && + VerifyField(verifier, VT_B) && + VerifyOffset(verifier, VT_KEY) && + verifier.VerifyString(key()) && + VerifyField(verifier, VT_TYPE) && + VerifyField(verifier, VT_F) && + VerifyOffset(verifier, VT_TENSOR) && + verifier.VerifyTable(tensor()) && + VerifyOffset(verifier, VT_LIST) && + verifier.VerifyTable(list()) && + verifier.EndTable(); + } + AttributeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(AttributeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const AttributeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct AttributeBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_s(flatbuffers::Offset s) { + fbb_.AddOffset(Attribute::VT_S, s); + } + void add_i(int32_t i) { + fbb_.AddElement(Attribute::VT_I, i, 0); + } + void add_b(bool b) { + fbb_.AddElement(Attribute::VT_B, static_cast(b), 0); + } + void add_key(flatbuffers::Offset key) { + fbb_.AddOffset(Attribute::VT_KEY, key); + } + void add_type(DataType type) { + fbb_.AddElement(Attribute::VT_TYPE, static_cast(type), 0); + } + void add_f(float f) { + fbb_.AddElement(Attribute::VT_F, f, 0.0f); + } + void add_tensor(flatbuffers::Offset tensor) { + fbb_.AddOffset(Attribute::VT_TENSOR, tensor); + } + void add_list(flatbuffers::Offset list) { + fbb_.AddOffset(Attribute::VT_LIST, list); + } + explicit AttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + AttributeBuilder &operator=(const AttributeBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateAttribute( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset s = 0, + int32_t i = 0, + bool b = false, + flatbuffers::Offset key = 0, + DataType type = DataType_DT_INVALID, + float f = 0.0f, + flatbuffers::Offset tensor = 0, + flatbuffers::Offset list = 0) { + AttributeBuilder builder_(_fbb); + builder_.add_list(list); + builder_.add_tensor(tensor); + builder_.add_f(f); + builder_.add_type(type); + builder_.add_key(key); + builder_.add_i(i); + builder_.add_s(s); + builder_.add_b(b); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateAttributeDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const char *s = nullptr, + int32_t i = 0, + bool b = false, + const char *key = nullptr, + DataType type = DataType_DT_INVALID, + float f = 0.0f, + flatbuffers::Offset tensor = 0, + flatbuffers::Offset list = 0) { + auto s__ = s ? _fbb.CreateString(s) : 0; + auto key__ = key ? _fbb.CreateString(key) : 0; + return MNN::CreateAttribute( + _fbb, + s__, + i, + b, + key__, + type, + f, + tensor, + list); +} + +flatbuffers::Offset CreateAttribute(flatbuffers::FlatBufferBuilder &_fbb, const AttributeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +inline BlobT *Blob::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new BlobT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Blob::UnPackTo(BlobT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = dims(); if (_e) { _o->dims.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->dims[_i] = _e->Get(_i); } } }; + { auto _e = dataFormat(); _o->dataFormat = _e; }; + { auto _e = dataType(); _o->dataType = _e; }; + { auto _e = uint8s(); if (_e) { _o->uint8s.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->uint8s[_i] = _e->Get(_i); } } }; + { auto _e = int8s(); if (_e) { _o->int8s.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->int8s[_i] = _e->Get(_i); } } }; + { auto _e = int32s(); if (_e) { _o->int32s.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->int32s[_i] = _e->Get(_i); } } }; + { auto _e = int64s(); if (_e) { _o->int64s.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->int64s[_i] = _e->Get(_i); } } }; + { auto _e = float32s(); if (_e) { _o->float32s.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->float32s[_i] = _e->Get(_i); } } }; + { auto _e = strings(); if (_e) { _o->strings.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->strings[_i] = _e->Get(_i)->str(); } } }; +} + +inline flatbuffers::Offset Blob::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BlobT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateBlob(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateBlob(flatbuffers::FlatBufferBuilder &_fbb, const BlobT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BlobT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _dims = _o->dims.size() ? _fbb.CreateVector(_o->dims) : 0; + auto _dataFormat = _o->dataFormat; + auto _dataType = _o->dataType; + auto _uint8s = _o->uint8s.size() ? _fbb.CreateVector(_o->uint8s) : 0; + auto _int8s = _o->int8s.size() ? _fbb.CreateVector(_o->int8s) : 0; + auto _int32s = _o->int32s.size() ? _fbb.CreateVector(_o->int32s) : 0; + auto _int64s = _o->int64s.size() ? _fbb.CreateVector(_o->int64s) : 0; + auto _float32s = _o->float32s.size() ? _fbb.CreateVector(_o->float32s) : 0; + auto _strings = _o->strings.size() ? _fbb.CreateVectorOfStrings(_o->strings) : 0; + return MNN::CreateBlob( + _fbb, + _dims, + _dataFormat, + _dataType, + _uint8s, + _int8s, + _int32s, + _int64s, + _float32s, + _strings); +} + +inline ListValueT *ListValue::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new ListValueT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void ListValue::UnPackTo(ListValueT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = s(); if (_e) { _o->s.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->s[_i] = _e->Get(_i)->str(); } } }; + { auto _e = i(); if (_e) { _o->i.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->i[_i] = _e->Get(_i); } } }; + { auto _e = f(); if (_e) { _o->f.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->f[_i] = _e->Get(_i); } } }; + { auto _e = b(); if (_e) { _o->b.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->b[_i] = _e->Get(_i) != 0; } } }; + { auto _e = type(); if (_e) { _o->type.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->type[_i] = static_cast(_e->Get(_i)); } } }; +} + +inline flatbuffers::Offset ListValue::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ListValueT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateListValue(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateListValue(flatbuffers::FlatBufferBuilder &_fbb, const ListValueT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ListValueT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _s = _o->s.size() ? _fbb.CreateVectorOfStrings(_o->s) : 0; + auto _i = _o->i.size() ? _fbb.CreateVector(_o->i) : 0; + auto _f = _o->f.size() ? _fbb.CreateVector(_o->f) : 0; + auto _b = _o->b.size() ? _fbb.CreateVector(_o->b) : 0; + auto _type = _o->type.size() ? _fbb.CreateVectorScalarCast(flatbuffers::data(_o->type), _o->type.size()) : 0; + return MNN::CreateListValue( + _fbb, + _s, + _i, + _f, + _b, + _type); +} + +inline AttributeT *Attribute::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new AttributeT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Attribute::UnPackTo(AttributeT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = s(); if (_e) _o->s = _e->str(); }; + { auto _e = i(); _o->i = _e; }; + { auto _e = b(); _o->b = _e; }; + { auto _e = key(); if (_e) _o->key = _e->str(); }; + { auto _e = type(); _o->type = _e; }; + { auto _e = f(); _o->f = _e; }; + { auto _e = tensor(); if (_e) _o->tensor = std::unique_ptr(_e->UnPack(_resolver)); }; + { auto _e = list(); if (_e) _o->list = std::unique_ptr(_e->UnPack(_resolver)); }; +} + +inline flatbuffers::Offset Attribute::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AttributeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateAttribute(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateAttribute(flatbuffers::FlatBufferBuilder &_fbb, const AttributeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const AttributeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _s = _o->s.empty() ? 0 : _fbb.CreateString(_o->s); + auto _i = _o->i; + auto _b = _o->b; + auto _key = _o->key.empty() ? 0 : _fbb.CreateString(_o->key); + auto _type = _o->type; + auto _f = _o->f; + auto _tensor = _o->tensor ? CreateBlob(_fbb, _o->tensor.get(), _rehasher) : 0; + auto _list = _o->list ? CreateListValue(_fbb, _o->list.get(), _rehasher) : 0; + return MNN::CreateAttribute( + _fbb, + _s, + _i, + _b, + _key, + _type, + _f, + _tensor, + _list); +} + +inline const flatbuffers::TypeTable *MNN_DATA_FORMATTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + MNN_DATA_FORMATTypeTable + }; + static const char * const names[] = { + "NCHW", + "NHWC", + "NC4HW4", + "NHWC4", + "UNKNOWN" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_ENUM, 5, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *BlobTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 1, -1 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_INT, 0, 1 }, + { flatbuffers::ET_UCHAR, 1, -1 }, + { flatbuffers::ET_CHAR, 1, -1 }, + { flatbuffers::ET_INT, 1, -1 }, + { flatbuffers::ET_LONG, 1, -1 }, + { flatbuffers::ET_FLOAT, 1, -1 }, + { flatbuffers::ET_STRING, 1, -1 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + MNN_DATA_FORMATTypeTable, + DataTypeTypeTable + }; + static const char * const names[] = { + "dims", + "dataFormat", + "dataType", + "uint8s", + "int8s", + "int32s", + "int64s", + "float32s", + "strings" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 9, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *ListValueTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_STRING, 1, -1 }, + { flatbuffers::ET_INT, 1, -1 }, + { flatbuffers::ET_FLOAT, 1, -1 }, + { flatbuffers::ET_BOOL, 1, -1 }, + { flatbuffers::ET_INT, 1, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + DataTypeTypeTable + }; + static const char * const names[] = { + "s", + "i", + "f", + "b", + "type" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 5, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *AttributeTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_STRING, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_BOOL, 0, -1 }, + { flatbuffers::ET_STRING, 0, -1 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_FLOAT, 0, -1 }, + { flatbuffers::ET_SEQUENCE, 0, 1 }, + { flatbuffers::ET_SEQUENCE, 0, 2 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + DataTypeTypeTable, + BlobTypeTable, + ListValueTypeTable + }; + static const char * const names[] = { + "s", + "i", + "b", + "key", + "type", + "f", + "tensor", + "list" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 8, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +} // namespace MNN + +#endif // FLATBUFFERS_GENERATED_TENSOR_MNN_H_ diff --git a/schema/current/TensorflowOp_generated.h b/schema/current/TensorflowOp_generated.h new file mode 100644 index 000000000..14a259777 --- /dev/null +++ b/schema/current/TensorflowOp_generated.h @@ -0,0 +1,5076 @@ +// automatically generated by the FlatBuffers compiler, do not modify + + +#ifndef FLATBUFFERS_GENERATED_TENSORFLOWOP_MNN_H_ +#define FLATBUFFERS_GENERATED_TENSORFLOWOP_MNN_H_ + +#include "flatbuffers/flatbuffers.h" + +#include "Tensor_generated.h" +#include "Type_generated.h" + +namespace MNN { + +struct BinaryOp; +struct BinaryOpT; + +struct PackParam; +struct PackParamT; + +struct StridedSliceParam; +struct StridedSliceParamT; + +struct SqueezeParam; +struct SqueezeParamT; + +struct CastParam; +struct CastParamT; + +struct ReductionParam; +struct ReductionParamT; + +struct Gather; +struct GatherT; + +struct ExpandDims; +struct ExpandDimsT; + +struct Selu; +struct SeluT; + +struct AsString; +struct AsStringT; + +struct ReduceJoin; +struct ReduceJoinT; + +struct UnaryOp; +struct UnaryOpT; + +struct TopKV2; +struct TopKV2T; + +struct CropAndResize; +struct CropAndResizeT; + +struct Fill; +struct FillT; + +struct GatherV2; +struct GatherV2T; + +struct NonMaxSuppressionV2; +struct NonMaxSuppressionV2T; + +struct Range; +struct RangeT; + +struct Rank; +struct RankT; + +struct Size; +struct SizeT; + +struct Transpose; +struct TransposeT; + +struct SliceTf; +struct SliceTfT; + +struct QuantizeMaxMin; +struct QuantizeMaxMinT; + +struct Crop; +struct CropT; + +struct SpaceBatch; +struct SpaceBatchT; + +struct MatMul; +struct MatMulT; + +struct MomentsParam; +struct MomentsParamT; + +struct RNNParam; +struct RNNParamT; + +struct BatchMatMulParam; +struct BatchMatMulParamT; + +struct DepthSpaceParam; +struct DepthSpaceParamT; + +struct ReverseSequenceParam; +struct ReverseSequenceParamT; + +struct DetectionPostProcessParam; +struct DetectionPostProcessParamT; + +struct OneHotParam; +struct OneHotParamT; + +struct PadParam; +struct PadParamT; + +inline const flatbuffers::TypeTable *BinaryOpTypeTable(); + +inline const flatbuffers::TypeTable *PackParamTypeTable(); + +inline const flatbuffers::TypeTable *StridedSliceParamTypeTable(); + +inline const flatbuffers::TypeTable *SqueezeParamTypeTable(); + +inline const flatbuffers::TypeTable *CastParamTypeTable(); + +inline const flatbuffers::TypeTable *ReductionParamTypeTable(); + +inline const flatbuffers::TypeTable *GatherTypeTable(); + +inline const flatbuffers::TypeTable *ExpandDimsTypeTable(); + +inline const flatbuffers::TypeTable *SeluTypeTable(); + +inline const flatbuffers::TypeTable *AsStringTypeTable(); + +inline const flatbuffers::TypeTable *ReduceJoinTypeTable(); + +inline const flatbuffers::TypeTable *UnaryOpTypeTable(); + +inline const flatbuffers::TypeTable *TopKV2TypeTable(); + +inline const flatbuffers::TypeTable *CropAndResizeTypeTable(); + +inline const flatbuffers::TypeTable *FillTypeTable(); + +inline const flatbuffers::TypeTable *GatherV2TypeTable(); + +inline const flatbuffers::TypeTable *NonMaxSuppressionV2TypeTable(); + +inline const flatbuffers::TypeTable *RangeTypeTable(); + +inline const flatbuffers::TypeTable *RankTypeTable(); + +inline const flatbuffers::TypeTable *SizeTypeTable(); + +inline const flatbuffers::TypeTable *TransposeTypeTable(); + +inline const flatbuffers::TypeTable *SliceTfTypeTable(); + +inline const flatbuffers::TypeTable *QuantizeMaxMinTypeTable(); + +inline const flatbuffers::TypeTable *CropTypeTable(); + +inline const flatbuffers::TypeTable *SpaceBatchTypeTable(); + +inline const flatbuffers::TypeTable *MatMulTypeTable(); + +inline const flatbuffers::TypeTable *MomentsParamTypeTable(); + +inline const flatbuffers::TypeTable *RNNParamTypeTable(); + +inline const flatbuffers::TypeTable *BatchMatMulParamTypeTable(); + +inline const flatbuffers::TypeTable *DepthSpaceParamTypeTable(); + +inline const flatbuffers::TypeTable *ReverseSequenceParamTypeTable(); + +inline const flatbuffers::TypeTable *DetectionPostProcessParamTypeTable(); + +inline const flatbuffers::TypeTable *OneHotParamTypeTable(); + +inline const flatbuffers::TypeTable *PadParamTypeTable(); + +enum BinaryOpOperation { + BinaryOpOperation_ADD = 0, + BinaryOpOperation_SUB = 1, + BinaryOpOperation_MUL = 2, + BinaryOpOperation_DIV = 3, + BinaryOpOperation_MAX_TEMP = 4, + BinaryOpOperation_MIN_TEMP = 5, + BinaryOpOperation_POW = 6, + BinaryOpOperation_REALDIV = 7, + BinaryOpOperation_MINIMUM = 8, + BinaryOpOperation_MAXIMUM = 9, + BinaryOpOperation_GREATER = 10, + BinaryOpOperation_GREATER_EQUAL = 11, + BinaryOpOperation_LESS = 12, + BinaryOpOperation_FLOORDIV = 13, + BinaryOpOperation_SquaredDifference = 14, + BinaryOpOperation_EQUAL = 15, + BinaryOpOperation_LESS_EQUAL = 16, + BinaryOpOperation_FLOORMOD = 17, + BinaryOpOperation_MOD = 19, + BinaryOpOperation_ATAN2 = 20, + BinaryOpOperation_LOGICALOR = 21, + BinaryOpOperation_NOTEQUAL = 22, + BinaryOpOperation_MIN = BinaryOpOperation_ADD, + BinaryOpOperation_MAX = BinaryOpOperation_NOTEQUAL +}; + +inline const BinaryOpOperation (&EnumValuesBinaryOpOperation())[22] { + static const BinaryOpOperation values[] = { + BinaryOpOperation_ADD, + BinaryOpOperation_SUB, + BinaryOpOperation_MUL, + BinaryOpOperation_DIV, + BinaryOpOperation_MAX_TEMP, + BinaryOpOperation_MIN_TEMP, + BinaryOpOperation_POW, + BinaryOpOperation_REALDIV, + BinaryOpOperation_MINIMUM, + BinaryOpOperation_MAXIMUM, + BinaryOpOperation_GREATER, + BinaryOpOperation_GREATER_EQUAL, + BinaryOpOperation_LESS, + BinaryOpOperation_FLOORDIV, + BinaryOpOperation_SquaredDifference, + BinaryOpOperation_EQUAL, + BinaryOpOperation_LESS_EQUAL, + BinaryOpOperation_FLOORMOD, + BinaryOpOperation_MOD, + BinaryOpOperation_ATAN2, + BinaryOpOperation_LOGICALOR, + BinaryOpOperation_NOTEQUAL + }; + return values; +} + +inline const char * const *EnumNamesBinaryOpOperation() { + static const char * const names[] = { + "ADD", + "SUB", + "MUL", + "DIV", + "MAX_TEMP", + "MIN_TEMP", + "POW", + "REALDIV", + "MINIMUM", + "MAXIMUM", + "GREATER", + "GREATER_EQUAL", + "LESS", + "FLOORDIV", + "SquaredDifference", + "EQUAL", + "LESS_EQUAL", + "FLOORMOD", + "", + "MOD", + "ATAN2", + "LOGICALOR", + "NOTEQUAL", + nullptr + }; + return names; +} + +inline const char *EnumNameBinaryOpOperation(BinaryOpOperation e) { + if (e < BinaryOpOperation_ADD || e > BinaryOpOperation_NOTEQUAL) return ""; + const size_t index = static_cast(e); + return EnumNamesBinaryOpOperation()[index]; +} + +enum ReductionType { + ReductionType_SUM = 0, + ReductionType_ASUM = 1, + ReductionType_SUMSQ = 2, + ReductionType_MEAN = 3, + ReductionType_MAXIMUM = 4, + ReductionType_MINIMUM = 5, + ReductionType_PROD = 6, + ReductionType_ANY = 7, + ReductionType_ALL = 8, + ReductionType_MIN = ReductionType_SUM, + ReductionType_MAX = ReductionType_ALL +}; + +inline const ReductionType (&EnumValuesReductionType())[9] { + static const ReductionType values[] = { + ReductionType_SUM, + ReductionType_ASUM, + ReductionType_SUMSQ, + ReductionType_MEAN, + ReductionType_MAXIMUM, + ReductionType_MINIMUM, + ReductionType_PROD, + ReductionType_ANY, + ReductionType_ALL + }; + return values; +} + +inline const char * const *EnumNamesReductionType() { + static const char * const names[] = { + "SUM", + "ASUM", + "SUMSQ", + "MEAN", + "MAXIMUM", + "MINIMUM", + "PROD", + "ANY", + "ALL", + nullptr + }; + return names; +} + +inline const char *EnumNameReductionType(ReductionType e) { + if (e < ReductionType_SUM || e > ReductionType_ALL) return ""; + const size_t index = static_cast(e); + return EnumNamesReductionType()[index]; +} + +enum UnaryOpOperation { + UnaryOpOperation_ABS = 0, + UnaryOpOperation_NEG = 1, + UnaryOpOperation_FLOOR = 2, + UnaryOpOperation_CEIL = 3, + UnaryOpOperation_SQUARE = 4, + UnaryOpOperation_SQRT = 5, + UnaryOpOperation_RSQRT = 6, + UnaryOpOperation_EXP = 7, + UnaryOpOperation_LOG = 8, + UnaryOpOperation_SIN = 9, + UnaryOpOperation_COS = 10, + UnaryOpOperation_TAN = 11, + UnaryOpOperation_ASIN = 12, + UnaryOpOperation_ACOS = 13, + UnaryOpOperation_ATAN = 14, + UnaryOpOperation_RECIPROCAL = 15, + UnaryOpOperation_LOG1P = 16, + UnaryOpOperation_BNLL = 17, + UnaryOpOperation_ACOSH = 18, + UnaryOpOperation_SINH = 19, + UnaryOpOperation_ASINH = 20, + UnaryOpOperation_ATANH = 21, + UnaryOpOperation_SIGN = 22, + UnaryOpOperation_ROUND = 23, + UnaryOpOperation_COSH = 24, + UnaryOpOperation_ERF = 25, + UnaryOpOperation_ERFC = 26, + UnaryOpOperation_ERFINV = 27, + UnaryOpOperation_EXPM1 = 28, + UnaryOpOperation_MIN = UnaryOpOperation_ABS, + UnaryOpOperation_MAX = UnaryOpOperation_EXPM1 +}; + +inline const UnaryOpOperation (&EnumValuesUnaryOpOperation())[29] { + static const UnaryOpOperation values[] = { + UnaryOpOperation_ABS, + UnaryOpOperation_NEG, + UnaryOpOperation_FLOOR, + UnaryOpOperation_CEIL, + UnaryOpOperation_SQUARE, + UnaryOpOperation_SQRT, + UnaryOpOperation_RSQRT, + UnaryOpOperation_EXP, + UnaryOpOperation_LOG, + UnaryOpOperation_SIN, + UnaryOpOperation_COS, + UnaryOpOperation_TAN, + UnaryOpOperation_ASIN, + UnaryOpOperation_ACOS, + UnaryOpOperation_ATAN, + UnaryOpOperation_RECIPROCAL, + UnaryOpOperation_LOG1P, + UnaryOpOperation_BNLL, + UnaryOpOperation_ACOSH, + UnaryOpOperation_SINH, + UnaryOpOperation_ASINH, + UnaryOpOperation_ATANH, + UnaryOpOperation_SIGN, + UnaryOpOperation_ROUND, + UnaryOpOperation_COSH, + UnaryOpOperation_ERF, + UnaryOpOperation_ERFC, + UnaryOpOperation_ERFINV, + UnaryOpOperation_EXPM1 + }; + return values; +} + +inline const char * const *EnumNamesUnaryOpOperation() { + static const char * const names[] = { + "ABS", + "NEG", + "FLOOR", + "CEIL", + "SQUARE", + "SQRT", + "RSQRT", + "EXP", + "LOG", + "SIN", + "COS", + "TAN", + "ASIN", + "ACOS", + "ATAN", + "RECIPROCAL", + "LOG1P", + "BNLL", + "ACOSH", + "SINH", + "ASINH", + "ATANH", + "SIGN", + "ROUND", + "COSH", + "ERF", + "ERFC", + "ERFINV", + "EXPM1", + nullptr + }; + return names; +} + +inline const char *EnumNameUnaryOpOperation(UnaryOpOperation e) { + if (e < UnaryOpOperation_ABS || e > UnaryOpOperation_EXPM1) return ""; + const size_t index = static_cast(e); + return EnumNamesUnaryOpOperation()[index]; +} + +enum CropAndResizeMethod { + CropAndResizeMethod_BILINEAR = 0, + CropAndResizeMethod_NEAREST = 1, + CropAndResizeMethod_MIN = CropAndResizeMethod_BILINEAR, + CropAndResizeMethod_MAX = CropAndResizeMethod_NEAREST +}; + +inline const CropAndResizeMethod (&EnumValuesCropAndResizeMethod())[2] { + static const CropAndResizeMethod values[] = { + CropAndResizeMethod_BILINEAR, + CropAndResizeMethod_NEAREST + }; + return values; +} + +inline const char * const *EnumNamesCropAndResizeMethod() { + static const char * const names[] = { + "BILINEAR", + "NEAREST", + nullptr + }; + return names; +} + +inline const char *EnumNameCropAndResizeMethod(CropAndResizeMethod e) { + if (e < CropAndResizeMethod_BILINEAR || e > CropAndResizeMethod_NEAREST) return ""; + const size_t index = static_cast(e); + return EnumNamesCropAndResizeMethod()[index]; +} + +enum PadValueMode { + PadValueMode_CONSTANT = 0, + PadValueMode_REFLECT = 1, + PadValueMode_SYMMETRIC = 2, + PadValueMode_MIN = PadValueMode_CONSTANT, + PadValueMode_MAX = PadValueMode_SYMMETRIC +}; + +inline const PadValueMode (&EnumValuesPadValueMode())[3] { + static const PadValueMode values[] = { + PadValueMode_CONSTANT, + PadValueMode_REFLECT, + PadValueMode_SYMMETRIC + }; + return values; +} + +inline const char * const *EnumNamesPadValueMode() { + static const char * const names[] = { + "CONSTANT", + "REFLECT", + "SYMMETRIC", + nullptr + }; + return names; +} + +inline const char *EnumNamePadValueMode(PadValueMode e) { + if (e < PadValueMode_CONSTANT || e > PadValueMode_SYMMETRIC) return ""; + const size_t index = static_cast(e); + return EnumNamesPadValueMode()[index]; +} + +struct BinaryOpT : public flatbuffers::NativeTable { + typedef BinaryOp TableType; + int32_t opType; + DataType T; + BinaryOpT() + : opType(0), + T(DataType_DT_FLOAT) { + } +}; + +struct BinaryOp FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef BinaryOpT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return BinaryOpTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_OPTYPE = 4, + VT_T = 6 + }; + int32_t opType() const { + return GetField(VT_OPTYPE, 0); + } + DataType T() const { + return static_cast(GetField(VT_T, 1)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_OPTYPE) && + VerifyField(verifier, VT_T) && + verifier.EndTable(); + } + BinaryOpT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(BinaryOpT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BinaryOpT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct BinaryOpBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_opType(int32_t opType) { + fbb_.AddElement(BinaryOp::VT_OPTYPE, opType, 0); + } + void add_T(DataType T) { + fbb_.AddElement(BinaryOp::VT_T, static_cast(T), 1); + } + explicit BinaryOpBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + BinaryOpBuilder &operator=(const BinaryOpBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateBinaryOp( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t opType = 0, + DataType T = DataType_DT_FLOAT) { + BinaryOpBuilder builder_(_fbb); + builder_.add_T(T); + builder_.add_opType(opType); + return builder_.Finish(); +} + +flatbuffers::Offset CreateBinaryOp(flatbuffers::FlatBufferBuilder &_fbb, const BinaryOpT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct PackParamT : public flatbuffers::NativeTable { + typedef PackParam TableType; + DataType dataType; + int32_t axis; + PackParamT() + : dataType(DataType_DT_INVALID), + axis(0) { + } +}; + +struct PackParam FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef PackParamT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return PackParamTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_DATATYPE = 4, + VT_AXIS = 6 + }; + DataType dataType() const { + return static_cast(GetField(VT_DATATYPE, 0)); + } + int32_t axis() const { + return GetField(VT_AXIS, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_DATATYPE) && + VerifyField(verifier, VT_AXIS) && + verifier.EndTable(); + } + PackParamT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(PackParamT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PackParamT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct PackParamBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_dataType(DataType dataType) { + fbb_.AddElement(PackParam::VT_DATATYPE, static_cast(dataType), 0); + } + void add_axis(int32_t axis) { + fbb_.AddElement(PackParam::VT_AXIS, axis, 0); + } + explicit PackParamBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + PackParamBuilder &operator=(const PackParamBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreatePackParam( + flatbuffers::FlatBufferBuilder &_fbb, + DataType dataType = DataType_DT_INVALID, + int32_t axis = 0) { + PackParamBuilder builder_(_fbb); + builder_.add_axis(axis); + builder_.add_dataType(dataType); + return builder_.Finish(); +} + +flatbuffers::Offset CreatePackParam(flatbuffers::FlatBufferBuilder &_fbb, const PackParamT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct StridedSliceParamT : public flatbuffers::NativeTable { + typedef StridedSliceParam TableType; + DataType Index; + DataType T; + int32_t beginMask; + int32_t endMask; + int32_t ellipsisMask; + int32_t newAxisMask; + int32_t shrinkAxisMask; + StridedSliceParamT() + : Index(DataType_DT_INVALID), + T(DataType_DT_INVALID), + beginMask(0), + endMask(0), + ellipsisMask(0), + newAxisMask(0), + shrinkAxisMask(0) { + } +}; + +struct StridedSliceParam FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef StridedSliceParamT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return StridedSliceParamTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_INDEX = 4, + VT_T = 6, + VT_BEGINMASK = 8, + VT_ENDMASK = 10, + VT_ELLIPSISMASK = 12, + VT_NEWAXISMASK = 14, + VT_SHRINKAXISMASK = 16 + }; + DataType Index() const { + return static_cast(GetField(VT_INDEX, 0)); + } + DataType T() const { + return static_cast(GetField(VT_T, 0)); + } + int32_t beginMask() const { + return GetField(VT_BEGINMASK, 0); + } + int32_t endMask() const { + return GetField(VT_ENDMASK, 0); + } + int32_t ellipsisMask() const { + return GetField(VT_ELLIPSISMASK, 0); + } + int32_t newAxisMask() const { + return GetField(VT_NEWAXISMASK, 0); + } + int32_t shrinkAxisMask() const { + return GetField(VT_SHRINKAXISMASK, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_INDEX) && + VerifyField(verifier, VT_T) && + VerifyField(verifier, VT_BEGINMASK) && + VerifyField(verifier, VT_ENDMASK) && + VerifyField(verifier, VT_ELLIPSISMASK) && + VerifyField(verifier, VT_NEWAXISMASK) && + VerifyField(verifier, VT_SHRINKAXISMASK) && + verifier.EndTable(); + } + StridedSliceParamT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(StridedSliceParamT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceParamT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct StridedSliceParamBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_Index(DataType Index) { + fbb_.AddElement(StridedSliceParam::VT_INDEX, static_cast(Index), 0); + } + void add_T(DataType T) { + fbb_.AddElement(StridedSliceParam::VT_T, static_cast(T), 0); + } + void add_beginMask(int32_t beginMask) { + fbb_.AddElement(StridedSliceParam::VT_BEGINMASK, beginMask, 0); + } + void add_endMask(int32_t endMask) { + fbb_.AddElement(StridedSliceParam::VT_ENDMASK, endMask, 0); + } + void add_ellipsisMask(int32_t ellipsisMask) { + fbb_.AddElement(StridedSliceParam::VT_ELLIPSISMASK, ellipsisMask, 0); + } + void add_newAxisMask(int32_t newAxisMask) { + fbb_.AddElement(StridedSliceParam::VT_NEWAXISMASK, newAxisMask, 0); + } + void add_shrinkAxisMask(int32_t shrinkAxisMask) { + fbb_.AddElement(StridedSliceParam::VT_SHRINKAXISMASK, shrinkAxisMask, 0); + } + explicit StridedSliceParamBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + StridedSliceParamBuilder &operator=(const StridedSliceParamBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateStridedSliceParam( + flatbuffers::FlatBufferBuilder &_fbb, + DataType Index = DataType_DT_INVALID, + DataType T = DataType_DT_INVALID, + int32_t beginMask = 0, + int32_t endMask = 0, + int32_t ellipsisMask = 0, + int32_t newAxisMask = 0, + int32_t shrinkAxisMask = 0) { + StridedSliceParamBuilder builder_(_fbb); + builder_.add_shrinkAxisMask(shrinkAxisMask); + builder_.add_newAxisMask(newAxisMask); + builder_.add_ellipsisMask(ellipsisMask); + builder_.add_endMask(endMask); + builder_.add_beginMask(beginMask); + builder_.add_T(T); + builder_.add_Index(Index); + return builder_.Finish(); +} + +flatbuffers::Offset CreateStridedSliceParam(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceParamT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SqueezeParamT : public flatbuffers::NativeTable { + typedef SqueezeParam TableType; + std::vector squeezeDims; + SqueezeParamT() { + } +}; + +struct SqueezeParam FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SqueezeParamT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return SqueezeParamTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SQUEEZEDIMS = 4 + }; + const flatbuffers::Vector *squeezeDims() const { + return GetPointer *>(VT_SQUEEZEDIMS); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_SQUEEZEDIMS) && + verifier.VerifyVector(squeezeDims()) && + verifier.EndTable(); + } + SqueezeParamT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SqueezeParamT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeParamT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SqueezeParamBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_squeezeDims(flatbuffers::Offset> squeezeDims) { + fbb_.AddOffset(SqueezeParam::VT_SQUEEZEDIMS, squeezeDims); + } + explicit SqueezeParamBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + SqueezeParamBuilder &operator=(const SqueezeParamBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSqueezeParam( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> squeezeDims = 0) { + SqueezeParamBuilder builder_(_fbb); + builder_.add_squeezeDims(squeezeDims); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateSqueezeParamDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *squeezeDims = nullptr) { + auto squeezeDims__ = squeezeDims ? _fbb.CreateVector(*squeezeDims) : 0; + return MNN::CreateSqueezeParam( + _fbb, + squeezeDims__); +} + +flatbuffers::Offset CreateSqueezeParam(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeParamT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct CastParamT : public flatbuffers::NativeTable { + typedef CastParam TableType; + DataType srcT; + DataType dstT; + CastParamT() + : srcT(DataType_DT_INVALID), + dstT(DataType_DT_INVALID) { + } +}; + +struct CastParam FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef CastParamT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return CastParamTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SRCT = 4, + VT_DSTT = 6 + }; + DataType srcT() const { + return static_cast(GetField(VT_SRCT, 0)); + } + DataType dstT() const { + return static_cast(GetField(VT_DSTT, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_SRCT) && + VerifyField(verifier, VT_DSTT) && + verifier.EndTable(); + } + CastParamT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(CastParamT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CastParamT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct CastParamBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_srcT(DataType srcT) { + fbb_.AddElement(CastParam::VT_SRCT, static_cast(srcT), 0); + } + void add_dstT(DataType dstT) { + fbb_.AddElement(CastParam::VT_DSTT, static_cast(dstT), 0); + } + explicit CastParamBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + CastParamBuilder &operator=(const CastParamBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateCastParam( + flatbuffers::FlatBufferBuilder &_fbb, + DataType srcT = DataType_DT_INVALID, + DataType dstT = DataType_DT_INVALID) { + CastParamBuilder builder_(_fbb); + builder_.add_dstT(dstT); + builder_.add_srcT(srcT); + return builder_.Finish(); +} + +flatbuffers::Offset CreateCastParam(flatbuffers::FlatBufferBuilder &_fbb, const CastParamT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ReductionParamT : public flatbuffers::NativeTable { + typedef ReductionParam TableType; + ReductionType operation; + std::vector dim; + float coeff; + bool keepDims; + DataType dType; + ReductionParamT() + : operation(ReductionType_SUM), + coeff(0.0f), + keepDims(false), + dType(DataType_DT_FLOAT) { + } +}; + +struct ReductionParam FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ReductionParamT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return ReductionParamTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_OPERATION = 4, + VT_DIM = 6, + VT_COEFF = 8, + VT_KEEPDIMS = 10, + VT_DTYPE = 12 + }; + ReductionType operation() const { + return static_cast(GetField(VT_OPERATION, 0)); + } + const flatbuffers::Vector *dim() const { + return GetPointer *>(VT_DIM); + } + float coeff() const { + return GetField(VT_COEFF, 0.0f); + } + bool keepDims() const { + return GetField(VT_KEEPDIMS, 0) != 0; + } + DataType dType() const { + return static_cast(GetField(VT_DTYPE, 1)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_OPERATION) && + VerifyOffset(verifier, VT_DIM) && + verifier.VerifyVector(dim()) && + VerifyField(verifier, VT_COEFF) && + VerifyField(verifier, VT_KEEPDIMS) && + VerifyField(verifier, VT_DTYPE) && + verifier.EndTable(); + } + ReductionParamT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ReductionParamT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReductionParamT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ReductionParamBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_operation(ReductionType operation) { + fbb_.AddElement(ReductionParam::VT_OPERATION, static_cast(operation), 0); + } + void add_dim(flatbuffers::Offset> dim) { + fbb_.AddOffset(ReductionParam::VT_DIM, dim); + } + void add_coeff(float coeff) { + fbb_.AddElement(ReductionParam::VT_COEFF, coeff, 0.0f); + } + void add_keepDims(bool keepDims) { + fbb_.AddElement(ReductionParam::VT_KEEPDIMS, static_cast(keepDims), 0); + } + void add_dType(DataType dType) { + fbb_.AddElement(ReductionParam::VT_DTYPE, static_cast(dType), 1); + } + explicit ReductionParamBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ReductionParamBuilder &operator=(const ReductionParamBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateReductionParam( + flatbuffers::FlatBufferBuilder &_fbb, + ReductionType operation = ReductionType_SUM, + flatbuffers::Offset> dim = 0, + float coeff = 0.0f, + bool keepDims = false, + DataType dType = DataType_DT_FLOAT) { + ReductionParamBuilder builder_(_fbb); + builder_.add_dType(dType); + builder_.add_coeff(coeff); + builder_.add_dim(dim); + builder_.add_keepDims(keepDims); + builder_.add_operation(operation); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateReductionParamDirect( + flatbuffers::FlatBufferBuilder &_fbb, + ReductionType operation = ReductionType_SUM, + const std::vector *dim = nullptr, + float coeff = 0.0f, + bool keepDims = false, + DataType dType = DataType_DT_FLOAT) { + auto dim__ = dim ? _fbb.CreateVector(*dim) : 0; + return MNN::CreateReductionParam( + _fbb, + operation, + dim__, + coeff, + keepDims, + dType); +} + +flatbuffers::Offset CreateReductionParam(flatbuffers::FlatBufferBuilder &_fbb, const ReductionParamT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct GatherT : public flatbuffers::NativeTable { + typedef Gather TableType; + DataType Tindices; + DataType Tparams; + bool validateIndices; + int32_t axis; + GatherT() + : Tindices(DataType_DT_INVALID), + Tparams(DataType_DT_INVALID), + validateIndices(false), + axis(0) { + } +}; + +struct Gather FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef GatherT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return GatherTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TINDICES = 4, + VT_TPARAMS = 6, + VT_VALIDATEINDICES = 8, + VT_AXIS = 10 + }; + DataType Tindices() const { + return static_cast(GetField(VT_TINDICES, 0)); + } + DataType Tparams() const { + return static_cast(GetField(VT_TPARAMS, 0)); + } + bool validateIndices() const { + return GetField(VT_VALIDATEINDICES, 0) != 0; + } + int32_t axis() const { + return GetField(VT_AXIS, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_TINDICES) && + VerifyField(verifier, VT_TPARAMS) && + VerifyField(verifier, VT_VALIDATEINDICES) && + VerifyField(verifier, VT_AXIS) && + verifier.EndTable(); + } + GatherT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(GatherT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct GatherBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_Tindices(DataType Tindices) { + fbb_.AddElement(Gather::VT_TINDICES, static_cast(Tindices), 0); + } + void add_Tparams(DataType Tparams) { + fbb_.AddElement(Gather::VT_TPARAMS, static_cast(Tparams), 0); + } + void add_validateIndices(bool validateIndices) { + fbb_.AddElement(Gather::VT_VALIDATEINDICES, static_cast(validateIndices), 0); + } + void add_axis(int32_t axis) { + fbb_.AddElement(Gather::VT_AXIS, axis, 0); + } + explicit GatherBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + GatherBuilder &operator=(const GatherBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateGather( + flatbuffers::FlatBufferBuilder &_fbb, + DataType Tindices = DataType_DT_INVALID, + DataType Tparams = DataType_DT_INVALID, + bool validateIndices = false, + int32_t axis = 0) { + GatherBuilder builder_(_fbb); + builder_.add_axis(axis); + builder_.add_Tparams(Tparams); + builder_.add_Tindices(Tindices); + builder_.add_validateIndices(validateIndices); + return builder_.Finish(); +} + +flatbuffers::Offset CreateGather(flatbuffers::FlatBufferBuilder &_fbb, const GatherT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ExpandDimsT : public flatbuffers::NativeTable { + typedef ExpandDims TableType; + DataType T; + DataType Tdim; + int32_t axis; + ExpandDimsT() + : T(DataType_DT_INVALID), + Tdim(DataType_DT_INVALID), + axis(0) { + } +}; + +struct ExpandDims FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ExpandDimsT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return ExpandDimsTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_T = 4, + VT_TDIM = 6, + VT_AXIS = 8 + }; + DataType T() const { + return static_cast(GetField(VT_T, 0)); + } + DataType Tdim() const { + return static_cast(GetField(VT_TDIM, 0)); + } + int32_t axis() const { + return GetField(VT_AXIS, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_T) && + VerifyField(verifier, VT_TDIM) && + VerifyField(verifier, VT_AXIS) && + verifier.EndTable(); + } + ExpandDimsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ExpandDimsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ExpandDimsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_T(DataType T) { + fbb_.AddElement(ExpandDims::VT_T, static_cast(T), 0); + } + void add_Tdim(DataType Tdim) { + fbb_.AddElement(ExpandDims::VT_TDIM, static_cast(Tdim), 0); + } + void add_axis(int32_t axis) { + fbb_.AddElement(ExpandDims::VT_AXIS, axis, 0); + } + explicit ExpandDimsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ExpandDimsBuilder &operator=(const ExpandDimsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateExpandDims( + flatbuffers::FlatBufferBuilder &_fbb, + DataType T = DataType_DT_INVALID, + DataType Tdim = DataType_DT_INVALID, + int32_t axis = 0) { + ExpandDimsBuilder builder_(_fbb); + builder_.add_axis(axis); + builder_.add_Tdim(Tdim); + builder_.add_T(T); + return builder_.Finish(); +} + +flatbuffers::Offset CreateExpandDims(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SeluT : public flatbuffers::NativeTable { + typedef Selu TableType; + float scale; + float alpha; + SeluT() + : scale(0.0f), + alpha(0.0f) { + } +}; + +struct Selu FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SeluT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return SeluTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SCALE = 4, + VT_ALPHA = 6 + }; + float scale() const { + return GetField(VT_SCALE, 0.0f); + } + float alpha() const { + return GetField(VT_ALPHA, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_SCALE) && + VerifyField(verifier, VT_ALPHA) && + verifier.EndTable(); + } + SeluT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SeluT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SeluT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SeluBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_scale(float scale) { + fbb_.AddElement(Selu::VT_SCALE, scale, 0.0f); + } + void add_alpha(float alpha) { + fbb_.AddElement(Selu::VT_ALPHA, alpha, 0.0f); + } + explicit SeluBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + SeluBuilder &operator=(const SeluBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSelu( + flatbuffers::FlatBufferBuilder &_fbb, + float scale = 0.0f, + float alpha = 0.0f) { + SeluBuilder builder_(_fbb); + builder_.add_alpha(alpha); + builder_.add_scale(scale); + return builder_.Finish(); +} + +flatbuffers::Offset CreateSelu(flatbuffers::FlatBufferBuilder &_fbb, const SeluT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct AsStringT : public flatbuffers::NativeTable { + typedef AsString TableType; + DataType T; + int32_t precision; + bool scientific; + bool shortest; + int32_t width; + std::string fillString; + AsStringT() + : T(DataType_DT_INVALID), + precision(0), + scientific(false), + shortest(false), + width(0) { + } +}; + +struct AsString FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef AsStringT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return AsStringTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_T = 4, + VT_PRECISION = 6, + VT_SCIENTIFIC = 8, + VT_SHORTEST = 10, + VT_WIDTH = 12, + VT_FILLSTRING = 14 + }; + DataType T() const { + return static_cast(GetField(VT_T, 0)); + } + int32_t precision() const { + return GetField(VT_PRECISION, 0); + } + bool scientific() const { + return GetField(VT_SCIENTIFIC, 0) != 0; + } + bool shortest() const { + return GetField(VT_SHORTEST, 0) != 0; + } + int32_t width() const { + return GetField(VT_WIDTH, 0); + } + const flatbuffers::String *fillString() const { + return GetPointer(VT_FILLSTRING); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_T) && + VerifyField(verifier, VT_PRECISION) && + VerifyField(verifier, VT_SCIENTIFIC) && + VerifyField(verifier, VT_SHORTEST) && + VerifyField(verifier, VT_WIDTH) && + VerifyOffset(verifier, VT_FILLSTRING) && + verifier.VerifyString(fillString()) && + verifier.EndTable(); + } + AsStringT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(AsStringT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const AsStringT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct AsStringBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_T(DataType T) { + fbb_.AddElement(AsString::VT_T, static_cast(T), 0); + } + void add_precision(int32_t precision) { + fbb_.AddElement(AsString::VT_PRECISION, precision, 0); + } + void add_scientific(bool scientific) { + fbb_.AddElement(AsString::VT_SCIENTIFIC, static_cast(scientific), 0); + } + void add_shortest(bool shortest) { + fbb_.AddElement(AsString::VT_SHORTEST, static_cast(shortest), 0); + } + void add_width(int32_t width) { + fbb_.AddElement(AsString::VT_WIDTH, width, 0); + } + void add_fillString(flatbuffers::Offset fillString) { + fbb_.AddOffset(AsString::VT_FILLSTRING, fillString); + } + explicit AsStringBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + AsStringBuilder &operator=(const AsStringBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateAsString( + flatbuffers::FlatBufferBuilder &_fbb, + DataType T = DataType_DT_INVALID, + int32_t precision = 0, + bool scientific = false, + bool shortest = false, + int32_t width = 0, + flatbuffers::Offset fillString = 0) { + AsStringBuilder builder_(_fbb); + builder_.add_fillString(fillString); + builder_.add_width(width); + builder_.add_precision(precision); + builder_.add_T(T); + builder_.add_shortest(shortest); + builder_.add_scientific(scientific); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateAsStringDirect( + flatbuffers::FlatBufferBuilder &_fbb, + DataType T = DataType_DT_INVALID, + int32_t precision = 0, + bool scientific = false, + bool shortest = false, + int32_t width = 0, + const char *fillString = nullptr) { + auto fillString__ = fillString ? _fbb.CreateString(fillString) : 0; + return MNN::CreateAsString( + _fbb, + T, + precision, + scientific, + shortest, + width, + fillString__); +} + +flatbuffers::Offset CreateAsString(flatbuffers::FlatBufferBuilder &_fbb, const AsStringT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ReduceJoinT : public flatbuffers::NativeTable { + typedef ReduceJoin TableType; + bool keepDims; + std::string separator; + ReduceJoinT() + : keepDims(false) { + } +}; + +struct ReduceJoin FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ReduceJoinT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return ReduceJoinTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_KEEPDIMS = 4, + VT_SEPARATOR = 6 + }; + bool keepDims() const { + return GetField(VT_KEEPDIMS, 0) != 0; + } + const flatbuffers::String *separator() const { + return GetPointer(VT_SEPARATOR); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_KEEPDIMS) && + VerifyOffset(verifier, VT_SEPARATOR) && + verifier.VerifyString(separator()) && + verifier.EndTable(); + } + ReduceJoinT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ReduceJoinT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReduceJoinT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ReduceJoinBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_keepDims(bool keepDims) { + fbb_.AddElement(ReduceJoin::VT_KEEPDIMS, static_cast(keepDims), 0); + } + void add_separator(flatbuffers::Offset separator) { + fbb_.AddOffset(ReduceJoin::VT_SEPARATOR, separator); + } + explicit ReduceJoinBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ReduceJoinBuilder &operator=(const ReduceJoinBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateReduceJoin( + flatbuffers::FlatBufferBuilder &_fbb, + bool keepDims = false, + flatbuffers::Offset separator = 0) { + ReduceJoinBuilder builder_(_fbb); + builder_.add_separator(separator); + builder_.add_keepDims(keepDims); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateReduceJoinDirect( + flatbuffers::FlatBufferBuilder &_fbb, + bool keepDims = false, + const char *separator = nullptr) { + auto separator__ = separator ? _fbb.CreateString(separator) : 0; + return MNN::CreateReduceJoin( + _fbb, + keepDims, + separator__); +} + +flatbuffers::Offset CreateReduceJoin(flatbuffers::FlatBufferBuilder &_fbb, const ReduceJoinT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct UnaryOpT : public flatbuffers::NativeTable { + typedef UnaryOp TableType; + UnaryOpOperation opType; + DataType T; + UnaryOpT() + : opType(UnaryOpOperation_ABS), + T(DataType_DT_INVALID) { + } +}; + +struct UnaryOp FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef UnaryOpT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return UnaryOpTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_OPTYPE = 4, + VT_T = 6 + }; + UnaryOpOperation opType() const { + return static_cast(GetField(VT_OPTYPE, 0)); + } + DataType T() const { + return static_cast(GetField(VT_T, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_OPTYPE) && + VerifyField(verifier, VT_T) && + verifier.EndTable(); + } + UnaryOpT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(UnaryOpT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnaryOpT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct UnaryOpBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_opType(UnaryOpOperation opType) { + fbb_.AddElement(UnaryOp::VT_OPTYPE, static_cast(opType), 0); + } + void add_T(DataType T) { + fbb_.AddElement(UnaryOp::VT_T, static_cast(T), 0); + } + explicit UnaryOpBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + UnaryOpBuilder &operator=(const UnaryOpBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateUnaryOp( + flatbuffers::FlatBufferBuilder &_fbb, + UnaryOpOperation opType = UnaryOpOperation_ABS, + DataType T = DataType_DT_INVALID) { + UnaryOpBuilder builder_(_fbb); + builder_.add_T(T); + builder_.add_opType(opType); + return builder_.Finish(); +} + +flatbuffers::Offset CreateUnaryOp(flatbuffers::FlatBufferBuilder &_fbb, const UnaryOpT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct TopKV2T : public flatbuffers::NativeTable { + typedef TopKV2 TableType; + DataType T; + bool sorted; + TopKV2T() + : T(DataType_DT_FLOAT), + sorted(false) { + } +}; + +struct TopKV2 FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef TopKV2T NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return TopKV2TypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_T = 4, + VT_SORTED = 6 + }; + DataType T() const { + return static_cast(GetField(VT_T, 1)); + } + bool sorted() const { + return GetField(VT_SORTED, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_T) && + VerifyField(verifier, VT_SORTED) && + verifier.EndTable(); + } + TopKV2T *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(TopKV2T *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2T* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct TopKV2Builder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_T(DataType T) { + fbb_.AddElement(TopKV2::VT_T, static_cast(T), 1); + } + void add_sorted(bool sorted) { + fbb_.AddElement(TopKV2::VT_SORTED, static_cast(sorted), 0); + } + explicit TopKV2Builder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + TopKV2Builder &operator=(const TopKV2Builder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateTopKV2( + flatbuffers::FlatBufferBuilder &_fbb, + DataType T = DataType_DT_FLOAT, + bool sorted = false) { + TopKV2Builder builder_(_fbb); + builder_.add_T(T); + builder_.add_sorted(sorted); + return builder_.Finish(); +} + +flatbuffers::Offset CreateTopKV2(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2T *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct CropAndResizeT : public flatbuffers::NativeTable { + typedef CropAndResize TableType; + float extrapolationValue; + CropAndResizeMethod method; + CropAndResizeT() + : extrapolationValue(0.0f), + method(CropAndResizeMethod_BILINEAR) { + } +}; + +struct CropAndResize FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef CropAndResizeT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return CropAndResizeTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_EXTRAPOLATIONVALUE = 4, + VT_METHOD = 6 + }; + float extrapolationValue() const { + return GetField(VT_EXTRAPOLATIONVALUE, 0.0f); + } + CropAndResizeMethod method() const { + return static_cast(GetField(VT_METHOD, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_EXTRAPOLATIONVALUE) && + VerifyField(verifier, VT_METHOD) && + verifier.EndTable(); + } + CropAndResizeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(CropAndResizeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CropAndResizeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct CropAndResizeBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_extrapolationValue(float extrapolationValue) { + fbb_.AddElement(CropAndResize::VT_EXTRAPOLATIONVALUE, extrapolationValue, 0.0f); + } + void add_method(CropAndResizeMethod method) { + fbb_.AddElement(CropAndResize::VT_METHOD, static_cast(method), 0); + } + explicit CropAndResizeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + CropAndResizeBuilder &operator=(const CropAndResizeBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateCropAndResize( + flatbuffers::FlatBufferBuilder &_fbb, + float extrapolationValue = 0.0f, + CropAndResizeMethod method = CropAndResizeMethod_BILINEAR) { + CropAndResizeBuilder builder_(_fbb); + builder_.add_extrapolationValue(extrapolationValue); + builder_.add_method(method); + return builder_.Finish(); +} + +flatbuffers::Offset CreateCropAndResize(flatbuffers::FlatBufferBuilder &_fbb, const CropAndResizeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct FillT : public flatbuffers::NativeTable { + typedef Fill TableType; + FillT() { + } +}; + +struct Fill FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef FillT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return FillTypeTable(); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + FillT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(FillT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const FillT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct FillBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit FillBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + FillBuilder &operator=(const FillBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateFill( + flatbuffers::FlatBufferBuilder &_fbb) { + FillBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateFill(flatbuffers::FlatBufferBuilder &_fbb, const FillT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct GatherV2T : public flatbuffers::NativeTable { + typedef GatherV2 TableType; + DataType Taxis; + DataType Tindices; + DataType Tparams; + GatherV2T() + : Taxis(DataType_DT_INVALID), + Tindices(DataType_DT_INVALID), + Tparams(DataType_DT_INVALID) { + } +}; + +struct GatherV2 FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef GatherV2T NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return GatherV2TypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TAXIS = 4, + VT_TINDICES = 6, + VT_TPARAMS = 8 + }; + DataType Taxis() const { + return static_cast(GetField(VT_TAXIS, 0)); + } + DataType Tindices() const { + return static_cast(GetField(VT_TINDICES, 0)); + } + DataType Tparams() const { + return static_cast(GetField(VT_TPARAMS, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_TAXIS) && + VerifyField(verifier, VT_TINDICES) && + VerifyField(verifier, VT_TPARAMS) && + verifier.EndTable(); + } + GatherV2T *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(GatherV2T *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherV2T* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct GatherV2Builder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_Taxis(DataType Taxis) { + fbb_.AddElement(GatherV2::VT_TAXIS, static_cast(Taxis), 0); + } + void add_Tindices(DataType Tindices) { + fbb_.AddElement(GatherV2::VT_TINDICES, static_cast(Tindices), 0); + } + void add_Tparams(DataType Tparams) { + fbb_.AddElement(GatherV2::VT_TPARAMS, static_cast(Tparams), 0); + } + explicit GatherV2Builder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + GatherV2Builder &operator=(const GatherV2Builder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateGatherV2( + flatbuffers::FlatBufferBuilder &_fbb, + DataType Taxis = DataType_DT_INVALID, + DataType Tindices = DataType_DT_INVALID, + DataType Tparams = DataType_DT_INVALID) { + GatherV2Builder builder_(_fbb); + builder_.add_Tparams(Tparams); + builder_.add_Tindices(Tindices); + builder_.add_Taxis(Taxis); + return builder_.Finish(); +} + +flatbuffers::Offset CreateGatherV2(flatbuffers::FlatBufferBuilder &_fbb, const GatherV2T *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct NonMaxSuppressionV2T : public flatbuffers::NativeTable { + typedef NonMaxSuppressionV2 TableType; + NonMaxSuppressionV2T() { + } +}; + +struct NonMaxSuppressionV2 FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef NonMaxSuppressionV2T NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return NonMaxSuppressionV2TypeTable(); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + NonMaxSuppressionV2T *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(NonMaxSuppressionV2T *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV2T* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct NonMaxSuppressionV2Builder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit NonMaxSuppressionV2Builder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + NonMaxSuppressionV2Builder &operator=(const NonMaxSuppressionV2Builder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateNonMaxSuppressionV2( + flatbuffers::FlatBufferBuilder &_fbb) { + NonMaxSuppressionV2Builder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateNonMaxSuppressionV2(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV2T *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct RangeT : public flatbuffers::NativeTable { + typedef Range TableType; + DataType Tidx; + RangeT() + : Tidx(DataType_DT_INVALID) { + } +}; + +struct Range FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef RangeT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return RangeTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TIDX = 4 + }; + DataType Tidx() const { + return static_cast(GetField(VT_TIDX, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_TIDX) && + verifier.EndTable(); + } + RangeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(RangeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const RangeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct RangeBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_Tidx(DataType Tidx) { + fbb_.AddElement(Range::VT_TIDX, static_cast(Tidx), 0); + } + explicit RangeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + RangeBuilder &operator=(const RangeBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateRange( + flatbuffers::FlatBufferBuilder &_fbb, + DataType Tidx = DataType_DT_INVALID) { + RangeBuilder builder_(_fbb); + builder_.add_Tidx(Tidx); + return builder_.Finish(); +} + +flatbuffers::Offset CreateRange(flatbuffers::FlatBufferBuilder &_fbb, const RangeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct RankT : public flatbuffers::NativeTable { + typedef Rank TableType; + RankT() { + } +}; + +struct Rank FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef RankT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return RankTypeTable(); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + RankT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(RankT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const RankT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct RankBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit RankBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + RankBuilder &operator=(const RankBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateRank( + flatbuffers::FlatBufferBuilder &_fbb) { + RankBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateRank(flatbuffers::FlatBufferBuilder &_fbb, const RankT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SizeT : public flatbuffers::NativeTable { + typedef Size TableType; + DataType outputDataType; + SizeT() + : outputDataType(DataType_DT_INVALID) { + } +}; + +struct Size FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SizeT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return SizeTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_OUTPUTDATATYPE = 4 + }; + DataType outputDataType() const { + return static_cast(GetField(VT_OUTPUTDATATYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_OUTPUTDATATYPE) && + verifier.EndTable(); + } + SizeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SizeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SizeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SizeBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_outputDataType(DataType outputDataType) { + fbb_.AddElement(Size::VT_OUTPUTDATATYPE, static_cast(outputDataType), 0); + } + explicit SizeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + SizeBuilder &operator=(const SizeBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSize( + flatbuffers::FlatBufferBuilder &_fbb, + DataType outputDataType = DataType_DT_INVALID) { + SizeBuilder builder_(_fbb); + builder_.add_outputDataType(outputDataType); + return builder_.Finish(); +} + +flatbuffers::Offset CreateSize(flatbuffers::FlatBufferBuilder &_fbb, const SizeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct TransposeT : public flatbuffers::NativeTable { + typedef Transpose TableType; + DataType Tperm; + TransposeT() + : Tperm(DataType_DT_INVALID) { + } +}; + +struct Transpose FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef TransposeT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return TransposeTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TPERM = 4 + }; + DataType Tperm() const { + return static_cast(GetField(VT_TPERM, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_TPERM) && + verifier.EndTable(); + } + TransposeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(TransposeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct TransposeBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_Tperm(DataType Tperm) { + fbb_.AddElement(Transpose::VT_TPERM, static_cast(Tperm), 0); + } + explicit TransposeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + TransposeBuilder &operator=(const TransposeBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateTranspose( + flatbuffers::FlatBufferBuilder &_fbb, + DataType Tperm = DataType_DT_INVALID) { + TransposeBuilder builder_(_fbb); + builder_.add_Tperm(Tperm); + return builder_.Finish(); +} + +flatbuffers::Offset CreateTranspose(flatbuffers::FlatBufferBuilder &_fbb, const TransposeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SliceTfT : public flatbuffers::NativeTable { + typedef SliceTf TableType; + DataType T; + SliceTfT() + : T(DataType_DT_INVALID) { + } +}; + +struct SliceTf FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SliceTfT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return SliceTfTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_T = 4 + }; + DataType T() const { + return static_cast(GetField(VT_T, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_T) && + verifier.EndTable(); + } + SliceTfT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SliceTfT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SliceTfT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SliceTfBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_T(DataType T) { + fbb_.AddElement(SliceTf::VT_T, static_cast(T), 0); + } + explicit SliceTfBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + SliceTfBuilder &operator=(const SliceTfBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSliceTf( + flatbuffers::FlatBufferBuilder &_fbb, + DataType T = DataType_DT_INVALID) { + SliceTfBuilder builder_(_fbb); + builder_.add_T(T); + return builder_.Finish(); +} + +flatbuffers::Offset CreateSliceTf(flatbuffers::FlatBufferBuilder &_fbb, const SliceTfT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct QuantizeMaxMinT : public flatbuffers::NativeTable { + typedef QuantizeMaxMin TableType; + DataType T; + QuantizeMaxMinT() + : T(DataType_DT_INVALID) { + } +}; + +struct QuantizeMaxMin FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef QuantizeMaxMinT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return QuantizeMaxMinTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_T = 4 + }; + DataType T() const { + return static_cast(GetField(VT_T, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_T) && + verifier.EndTable(); + } + QuantizeMaxMinT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(QuantizeMaxMinT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeMaxMinT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct QuantizeMaxMinBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_T(DataType T) { + fbb_.AddElement(QuantizeMaxMin::VT_T, static_cast(T), 0); + } + explicit QuantizeMaxMinBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + QuantizeMaxMinBuilder &operator=(const QuantizeMaxMinBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateQuantizeMaxMin( + flatbuffers::FlatBufferBuilder &_fbb, + DataType T = DataType_DT_INVALID) { + QuantizeMaxMinBuilder builder_(_fbb); + builder_.add_T(T); + return builder_.Finish(); +} + +flatbuffers::Offset CreateQuantizeMaxMin(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeMaxMinT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct CropT : public flatbuffers::NativeTable { + typedef Crop TableType; + int32_t axis; + std::vector offset; + CropT() + : axis(2) { + } +}; + +struct Crop FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef CropT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return CropTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4, + VT_OFFSET = 6 + }; + int32_t axis() const { + return GetField(VT_AXIS, 2); + } + const flatbuffers::Vector *offset() const { + return GetPointer *>(VT_OFFSET); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_AXIS) && + VerifyOffset(verifier, VT_OFFSET) && + verifier.VerifyVector(offset()) && + verifier.EndTable(); + } + CropT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(CropT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CropT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct CropBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(int32_t axis) { + fbb_.AddElement(Crop::VT_AXIS, axis, 2); + } + void add_offset(flatbuffers::Offset> offset) { + fbb_.AddOffset(Crop::VT_OFFSET, offset); + } + explicit CropBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + CropBuilder &operator=(const CropBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateCrop( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t axis = 2, + flatbuffers::Offset> offset = 0) { + CropBuilder builder_(_fbb); + builder_.add_offset(offset); + builder_.add_axis(axis); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateCropDirect( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t axis = 2, + const std::vector *offset = nullptr) { + auto offset__ = offset ? _fbb.CreateVector(*offset) : 0; + return MNN::CreateCrop( + _fbb, + axis, + offset__); +} + +flatbuffers::Offset CreateCrop(flatbuffers::FlatBufferBuilder &_fbb, const CropT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SpaceBatchT : public flatbuffers::NativeTable { + typedef SpaceBatch TableType; + std::unique_ptr blockShape; + std::unique_ptr padding; + SpaceBatchT() { + } +}; + +struct SpaceBatch FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SpaceBatchT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return SpaceBatchTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BLOCKSHAPE = 4, + VT_PADDING = 6 + }; + const Blob *blockShape() const { + return GetPointer(VT_BLOCKSHAPE); + } + const Blob *padding() const { + return GetPointer(VT_PADDING); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_BLOCKSHAPE) && + verifier.VerifyTable(blockShape()) && + VerifyOffset(verifier, VT_PADDING) && + verifier.VerifyTable(padding()) && + verifier.EndTable(); + } + SpaceBatchT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SpaceBatchT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceBatchT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SpaceBatchBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_blockShape(flatbuffers::Offset blockShape) { + fbb_.AddOffset(SpaceBatch::VT_BLOCKSHAPE, blockShape); + } + void add_padding(flatbuffers::Offset padding) { + fbb_.AddOffset(SpaceBatch::VT_PADDING, padding); + } + explicit SpaceBatchBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + SpaceBatchBuilder &operator=(const SpaceBatchBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSpaceBatch( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset blockShape = 0, + flatbuffers::Offset padding = 0) { + SpaceBatchBuilder builder_(_fbb); + builder_.add_padding(padding); + builder_.add_blockShape(blockShape); + return builder_.Finish(); +} + +flatbuffers::Offset CreateSpaceBatch(flatbuffers::FlatBufferBuilder &_fbb, const SpaceBatchT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct MatMulT : public flatbuffers::NativeTable { + typedef MatMul TableType; + DataType T; + bool transposeA; + bool transposeB; + std::vector weight; + std::vector bias; + MatMulT() + : T(DataType_DT_INVALID), + transposeA(false), + transposeB(false) { + } +}; + +struct MatMul FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef MatMulT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return MatMulTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_T = 4, + VT_TRANSPOSEA = 6, + VT_TRANSPOSEB = 8, + VT_WEIGHT = 10, + VT_BIAS = 12 + }; + DataType T() const { + return static_cast(GetField(VT_T, 0)); + } + bool transposeA() const { + return GetField(VT_TRANSPOSEA, 0) != 0; + } + bool transposeB() const { + return GetField(VT_TRANSPOSEB, 0) != 0; + } + const flatbuffers::Vector *weight() const { + return GetPointer *>(VT_WEIGHT); + } + const flatbuffers::Vector *bias() const { + return GetPointer *>(VT_BIAS); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_T) && + VerifyField(verifier, VT_TRANSPOSEA) && + VerifyField(verifier, VT_TRANSPOSEB) && + VerifyOffset(verifier, VT_WEIGHT) && + verifier.VerifyVector(weight()) && + VerifyOffset(verifier, VT_BIAS) && + verifier.VerifyVector(bias()) && + verifier.EndTable(); + } + MatMulT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(MatMulT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatMulT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct MatMulBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_T(DataType T) { + fbb_.AddElement(MatMul::VT_T, static_cast(T), 0); + } + void add_transposeA(bool transposeA) { + fbb_.AddElement(MatMul::VT_TRANSPOSEA, static_cast(transposeA), 0); + } + void add_transposeB(bool transposeB) { + fbb_.AddElement(MatMul::VT_TRANSPOSEB, static_cast(transposeB), 0); + } + void add_weight(flatbuffers::Offset> weight) { + fbb_.AddOffset(MatMul::VT_WEIGHT, weight); + } + void add_bias(flatbuffers::Offset> bias) { + fbb_.AddOffset(MatMul::VT_BIAS, bias); + } + explicit MatMulBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + MatMulBuilder &operator=(const MatMulBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateMatMul( + flatbuffers::FlatBufferBuilder &_fbb, + DataType T = DataType_DT_INVALID, + bool transposeA = false, + bool transposeB = false, + flatbuffers::Offset> weight = 0, + flatbuffers::Offset> bias = 0) { + MatMulBuilder builder_(_fbb); + builder_.add_bias(bias); + builder_.add_weight(weight); + builder_.add_T(T); + builder_.add_transposeB(transposeB); + builder_.add_transposeA(transposeA); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateMatMulDirect( + flatbuffers::FlatBufferBuilder &_fbb, + DataType T = DataType_DT_INVALID, + bool transposeA = false, + bool transposeB = false, + const std::vector *weight = nullptr, + const std::vector *bias = nullptr) { + auto weight__ = weight ? _fbb.CreateVector(*weight) : 0; + auto bias__ = bias ? _fbb.CreateVector(*bias) : 0; + return MNN::CreateMatMul( + _fbb, + T, + transposeA, + transposeB, + weight__, + bias__); +} + +flatbuffers::Offset CreateMatMul(flatbuffers::FlatBufferBuilder &_fbb, const MatMulT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct MomentsParamT : public flatbuffers::NativeTable { + typedef MomentsParam TableType; + std::vector dim; + bool keepDims; + DataType dType; + MomentsParamT() + : keepDims(true), + dType(DataType_DT_FLOAT) { + } +}; + +struct MomentsParam FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef MomentsParamT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return MomentsParamTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_DIM = 4, + VT_KEEPDIMS = 6, + VT_DTYPE = 8 + }; + const flatbuffers::Vector *dim() const { + return GetPointer *>(VT_DIM); + } + bool keepDims() const { + return GetField(VT_KEEPDIMS, 1) != 0; + } + DataType dType() const { + return static_cast(GetField(VT_DTYPE, 1)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_DIM) && + verifier.VerifyVector(dim()) && + VerifyField(verifier, VT_KEEPDIMS) && + VerifyField(verifier, VT_DTYPE) && + verifier.EndTable(); + } + MomentsParamT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(MomentsParamT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MomentsParamT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct MomentsParamBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_dim(flatbuffers::Offset> dim) { + fbb_.AddOffset(MomentsParam::VT_DIM, dim); + } + void add_keepDims(bool keepDims) { + fbb_.AddElement(MomentsParam::VT_KEEPDIMS, static_cast(keepDims), 1); + } + void add_dType(DataType dType) { + fbb_.AddElement(MomentsParam::VT_DTYPE, static_cast(dType), 1); + } + explicit MomentsParamBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + MomentsParamBuilder &operator=(const MomentsParamBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateMomentsParam( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> dim = 0, + bool keepDims = true, + DataType dType = DataType_DT_FLOAT) { + MomentsParamBuilder builder_(_fbb); + builder_.add_dType(dType); + builder_.add_dim(dim); + builder_.add_keepDims(keepDims); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateMomentsParamDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *dim = nullptr, + bool keepDims = true, + DataType dType = DataType_DT_FLOAT) { + auto dim__ = dim ? _fbb.CreateVector(*dim) : 0; + return MNN::CreateMomentsParam( + _fbb, + dim__, + keepDims, + dType); +} + +flatbuffers::Offset CreateMomentsParam(flatbuffers::FlatBufferBuilder &_fbb, const MomentsParamT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct RNNParamT : public flatbuffers::NativeTable { + typedef RNNParam TableType; + int32_t numUnits; + bool isBidirectionalRNN; + bool keepAllOutputs; + std::unique_ptr fwGateWeight; + std::unique_ptr fwGateBias; + std::unique_ptr fwCandidateWeight; + std::unique_ptr fwCandidateBias; + std::unique_ptr bwGateWeight; + std::unique_ptr bwGateBias; + std::unique_ptr bwCandidateWeight; + std::unique_ptr bwCandidateBias; + RNNParamT() + : numUnits(0), + isBidirectionalRNN(false), + keepAllOutputs(false) { + } +}; + +struct RNNParam FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef RNNParamT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return RNNParamTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NUMUNITS = 4, + VT_ISBIDIRECTIONALRNN = 6, + VT_KEEPALLOUTPUTS = 8, + VT_FWGATEWEIGHT = 10, + VT_FWGATEBIAS = 12, + VT_FWCANDIDATEWEIGHT = 14, + VT_FWCANDIDATEBIAS = 16, + VT_BWGATEWEIGHT = 18, + VT_BWGATEBIAS = 20, + VT_BWCANDIDATEWEIGHT = 22, + VT_BWCANDIDATEBIAS = 24 + }; + int32_t numUnits() const { + return GetField(VT_NUMUNITS, 0); + } + bool isBidirectionalRNN() const { + return GetField(VT_ISBIDIRECTIONALRNN, 0) != 0; + } + bool keepAllOutputs() const { + return GetField(VT_KEEPALLOUTPUTS, 0) != 0; + } + const Blob *fwGateWeight() const { + return GetPointer(VT_FWGATEWEIGHT); + } + const Blob *fwGateBias() const { + return GetPointer(VT_FWGATEBIAS); + } + const Blob *fwCandidateWeight() const { + return GetPointer(VT_FWCANDIDATEWEIGHT); + } + const Blob *fwCandidateBias() const { + return GetPointer(VT_FWCANDIDATEBIAS); + } + const Blob *bwGateWeight() const { + return GetPointer(VT_BWGATEWEIGHT); + } + const Blob *bwGateBias() const { + return GetPointer(VT_BWGATEBIAS); + } + const Blob *bwCandidateWeight() const { + return GetPointer(VT_BWCANDIDATEWEIGHT); + } + const Blob *bwCandidateBias() const { + return GetPointer(VT_BWCANDIDATEBIAS); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_NUMUNITS) && + VerifyField(verifier, VT_ISBIDIRECTIONALRNN) && + VerifyField(verifier, VT_KEEPALLOUTPUTS) && + VerifyOffset(verifier, VT_FWGATEWEIGHT) && + verifier.VerifyTable(fwGateWeight()) && + VerifyOffset(verifier, VT_FWGATEBIAS) && + verifier.VerifyTable(fwGateBias()) && + VerifyOffset(verifier, VT_FWCANDIDATEWEIGHT) && + verifier.VerifyTable(fwCandidateWeight()) && + VerifyOffset(verifier, VT_FWCANDIDATEBIAS) && + verifier.VerifyTable(fwCandidateBias()) && + VerifyOffset(verifier, VT_BWGATEWEIGHT) && + verifier.VerifyTable(bwGateWeight()) && + VerifyOffset(verifier, VT_BWGATEBIAS) && + verifier.VerifyTable(bwGateBias()) && + VerifyOffset(verifier, VT_BWCANDIDATEWEIGHT) && + verifier.VerifyTable(bwCandidateWeight()) && + VerifyOffset(verifier, VT_BWCANDIDATEBIAS) && + verifier.VerifyTable(bwCandidateBias()) && + verifier.EndTable(); + } + RNNParamT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(RNNParamT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const RNNParamT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct RNNParamBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_numUnits(int32_t numUnits) { + fbb_.AddElement(RNNParam::VT_NUMUNITS, numUnits, 0); + } + void add_isBidirectionalRNN(bool isBidirectionalRNN) { + fbb_.AddElement(RNNParam::VT_ISBIDIRECTIONALRNN, static_cast(isBidirectionalRNN), 0); + } + void add_keepAllOutputs(bool keepAllOutputs) { + fbb_.AddElement(RNNParam::VT_KEEPALLOUTPUTS, static_cast(keepAllOutputs), 0); + } + void add_fwGateWeight(flatbuffers::Offset fwGateWeight) { + fbb_.AddOffset(RNNParam::VT_FWGATEWEIGHT, fwGateWeight); + } + void add_fwGateBias(flatbuffers::Offset fwGateBias) { + fbb_.AddOffset(RNNParam::VT_FWGATEBIAS, fwGateBias); + } + void add_fwCandidateWeight(flatbuffers::Offset fwCandidateWeight) { + fbb_.AddOffset(RNNParam::VT_FWCANDIDATEWEIGHT, fwCandidateWeight); + } + void add_fwCandidateBias(flatbuffers::Offset fwCandidateBias) { + fbb_.AddOffset(RNNParam::VT_FWCANDIDATEBIAS, fwCandidateBias); + } + void add_bwGateWeight(flatbuffers::Offset bwGateWeight) { + fbb_.AddOffset(RNNParam::VT_BWGATEWEIGHT, bwGateWeight); + } + void add_bwGateBias(flatbuffers::Offset bwGateBias) { + fbb_.AddOffset(RNNParam::VT_BWGATEBIAS, bwGateBias); + } + void add_bwCandidateWeight(flatbuffers::Offset bwCandidateWeight) { + fbb_.AddOffset(RNNParam::VT_BWCANDIDATEWEIGHT, bwCandidateWeight); + } + void add_bwCandidateBias(flatbuffers::Offset bwCandidateBias) { + fbb_.AddOffset(RNNParam::VT_BWCANDIDATEBIAS, bwCandidateBias); + } + explicit RNNParamBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + RNNParamBuilder &operator=(const RNNParamBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateRNNParam( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t numUnits = 0, + bool isBidirectionalRNN = false, + bool keepAllOutputs = false, + flatbuffers::Offset fwGateWeight = 0, + flatbuffers::Offset fwGateBias = 0, + flatbuffers::Offset fwCandidateWeight = 0, + flatbuffers::Offset fwCandidateBias = 0, + flatbuffers::Offset bwGateWeight = 0, + flatbuffers::Offset bwGateBias = 0, + flatbuffers::Offset bwCandidateWeight = 0, + flatbuffers::Offset bwCandidateBias = 0) { + RNNParamBuilder builder_(_fbb); + builder_.add_bwCandidateBias(bwCandidateBias); + builder_.add_bwCandidateWeight(bwCandidateWeight); + builder_.add_bwGateBias(bwGateBias); + builder_.add_bwGateWeight(bwGateWeight); + builder_.add_fwCandidateBias(fwCandidateBias); + builder_.add_fwCandidateWeight(fwCandidateWeight); + builder_.add_fwGateBias(fwGateBias); + builder_.add_fwGateWeight(fwGateWeight); + builder_.add_numUnits(numUnits); + builder_.add_keepAllOutputs(keepAllOutputs); + builder_.add_isBidirectionalRNN(isBidirectionalRNN); + return builder_.Finish(); +} + +flatbuffers::Offset CreateRNNParam(flatbuffers::FlatBufferBuilder &_fbb, const RNNParamT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct BatchMatMulParamT : public flatbuffers::NativeTable { + typedef BatchMatMulParam TableType; + bool adjX; + bool adjY; + BatchMatMulParamT() + : adjX(false), + adjY(false) { + } +}; + +struct BatchMatMulParam FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef BatchMatMulParamT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return BatchMatMulParamTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ADJX = 4, + VT_ADJY = 6 + }; + bool adjX() const { + return GetField(VT_ADJX, 0) != 0; + } + bool adjY() const { + return GetField(VT_ADJY, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_ADJX) && + VerifyField(verifier, VT_ADJY) && + verifier.EndTable(); + } + BatchMatMulParamT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(BatchMatMulParamT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulParamT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct BatchMatMulParamBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_adjX(bool adjX) { + fbb_.AddElement(BatchMatMulParam::VT_ADJX, static_cast(adjX), 0); + } + void add_adjY(bool adjY) { + fbb_.AddElement(BatchMatMulParam::VT_ADJY, static_cast(adjY), 0); + } + explicit BatchMatMulParamBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + BatchMatMulParamBuilder &operator=(const BatchMatMulParamBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateBatchMatMulParam( + flatbuffers::FlatBufferBuilder &_fbb, + bool adjX = false, + bool adjY = false) { + BatchMatMulParamBuilder builder_(_fbb); + builder_.add_adjY(adjY); + builder_.add_adjX(adjX); + return builder_.Finish(); +} + +flatbuffers::Offset CreateBatchMatMulParam(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulParamT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct DepthSpaceParamT : public flatbuffers::NativeTable { + typedef DepthSpaceParam TableType; + int32_t blockSize; + DepthSpaceParamT() + : blockSize(0) { + } +}; + +struct DepthSpaceParam FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef DepthSpaceParamT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return DepthSpaceParamTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BLOCKSIZE = 4 + }; + int32_t blockSize() const { + return GetField(VT_BLOCKSIZE, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_BLOCKSIZE) && + verifier.EndTable(); + } + DepthSpaceParamT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(DepthSpaceParamT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthSpaceParamT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct DepthSpaceParamBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_blockSize(int32_t blockSize) { + fbb_.AddElement(DepthSpaceParam::VT_BLOCKSIZE, blockSize, 0); + } + explicit DepthSpaceParamBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + DepthSpaceParamBuilder &operator=(const DepthSpaceParamBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateDepthSpaceParam( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t blockSize = 0) { + DepthSpaceParamBuilder builder_(_fbb); + builder_.add_blockSize(blockSize); + return builder_.Finish(); +} + +flatbuffers::Offset CreateDepthSpaceParam(flatbuffers::FlatBufferBuilder &_fbb, const DepthSpaceParamT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ReverseSequenceParamT : public flatbuffers::NativeTable { + typedef ReverseSequenceParam TableType; + int32_t batchDim; + int32_t seqDim; + ReverseSequenceParamT() + : batchDim(0), + seqDim(0) { + } +}; + +struct ReverseSequenceParam FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ReverseSequenceParamT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return ReverseSequenceParamTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BATCHDIM = 4, + VT_SEQDIM = 6 + }; + int32_t batchDim() const { + return GetField(VT_BATCHDIM, 0); + } + int32_t seqDim() const { + return GetField(VT_SEQDIM, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_BATCHDIM) && + VerifyField(verifier, VT_SEQDIM) && + verifier.EndTable(); + } + ReverseSequenceParamT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ReverseSequenceParamT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceParamT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ReverseSequenceParamBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_batchDim(int32_t batchDim) { + fbb_.AddElement(ReverseSequenceParam::VT_BATCHDIM, batchDim, 0); + } + void add_seqDim(int32_t seqDim) { + fbb_.AddElement(ReverseSequenceParam::VT_SEQDIM, seqDim, 0); + } + explicit ReverseSequenceParamBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ReverseSequenceParamBuilder &operator=(const ReverseSequenceParamBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateReverseSequenceParam( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t batchDim = 0, + int32_t seqDim = 0) { + ReverseSequenceParamBuilder builder_(_fbb); + builder_.add_seqDim(seqDim); + builder_.add_batchDim(batchDim); + return builder_.Finish(); +} + +flatbuffers::Offset CreateReverseSequenceParam(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceParamT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct DetectionPostProcessParamT : public flatbuffers::NativeTable { + typedef DetectionPostProcessParam TableType; + int32_t maxDetections; + int32_t maxClassesPerDetection; + int32_t detectionsPerClass; + float nmsScoreThreshold; + float iouThreshold; + int32_t numClasses; + bool useRegularNMS; + std::vector centerSizeEncoding; + DetectionPostProcessParamT() + : maxDetections(0), + maxClassesPerDetection(0), + detectionsPerClass(0), + nmsScoreThreshold(0.0f), + iouThreshold(0.0f), + numClasses(0), + useRegularNMS(false) { + } +}; + +struct DetectionPostProcessParam FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef DetectionPostProcessParamT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return DetectionPostProcessParamTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_MAXDETECTIONS = 4, + VT_MAXCLASSESPERDETECTION = 6, + VT_DETECTIONSPERCLASS = 8, + VT_NMSSCORETHRESHOLD = 10, + VT_IOUTHRESHOLD = 12, + VT_NUMCLASSES = 14, + VT_USEREGULARNMS = 16, + VT_CENTERSIZEENCODING = 18 + }; + int32_t maxDetections() const { + return GetField(VT_MAXDETECTIONS, 0); + } + int32_t maxClassesPerDetection() const { + return GetField(VT_MAXCLASSESPERDETECTION, 0); + } + int32_t detectionsPerClass() const { + return GetField(VT_DETECTIONSPERCLASS, 0); + } + float nmsScoreThreshold() const { + return GetField(VT_NMSSCORETHRESHOLD, 0.0f); + } + float iouThreshold() const { + return GetField(VT_IOUTHRESHOLD, 0.0f); + } + int32_t numClasses() const { + return GetField(VT_NUMCLASSES, 0); + } + bool useRegularNMS() const { + return GetField(VT_USEREGULARNMS, 0) != 0; + } + const flatbuffers::Vector *centerSizeEncoding() const { + return GetPointer *>(VT_CENTERSIZEENCODING); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_MAXDETECTIONS) && + VerifyField(verifier, VT_MAXCLASSESPERDETECTION) && + VerifyField(verifier, VT_DETECTIONSPERCLASS) && + VerifyField(verifier, VT_NMSSCORETHRESHOLD) && + VerifyField(verifier, VT_IOUTHRESHOLD) && + VerifyField(verifier, VT_NUMCLASSES) && + VerifyField(verifier, VT_USEREGULARNMS) && + VerifyOffset(verifier, VT_CENTERSIZEENCODING) && + verifier.VerifyVector(centerSizeEncoding()) && + verifier.EndTable(); + } + DetectionPostProcessParamT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(DetectionPostProcessParamT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DetectionPostProcessParamT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct DetectionPostProcessParamBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_maxDetections(int32_t maxDetections) { + fbb_.AddElement(DetectionPostProcessParam::VT_MAXDETECTIONS, maxDetections, 0); + } + void add_maxClassesPerDetection(int32_t maxClassesPerDetection) { + fbb_.AddElement(DetectionPostProcessParam::VT_MAXCLASSESPERDETECTION, maxClassesPerDetection, 0); + } + void add_detectionsPerClass(int32_t detectionsPerClass) { + fbb_.AddElement(DetectionPostProcessParam::VT_DETECTIONSPERCLASS, detectionsPerClass, 0); + } + void add_nmsScoreThreshold(float nmsScoreThreshold) { + fbb_.AddElement(DetectionPostProcessParam::VT_NMSSCORETHRESHOLD, nmsScoreThreshold, 0.0f); + } + void add_iouThreshold(float iouThreshold) { + fbb_.AddElement(DetectionPostProcessParam::VT_IOUTHRESHOLD, iouThreshold, 0.0f); + } + void add_numClasses(int32_t numClasses) { + fbb_.AddElement(DetectionPostProcessParam::VT_NUMCLASSES, numClasses, 0); + } + void add_useRegularNMS(bool useRegularNMS) { + fbb_.AddElement(DetectionPostProcessParam::VT_USEREGULARNMS, static_cast(useRegularNMS), 0); + } + void add_centerSizeEncoding(flatbuffers::Offset> centerSizeEncoding) { + fbb_.AddOffset(DetectionPostProcessParam::VT_CENTERSIZEENCODING, centerSizeEncoding); + } + explicit DetectionPostProcessParamBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + DetectionPostProcessParamBuilder &operator=(const DetectionPostProcessParamBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateDetectionPostProcessParam( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t maxDetections = 0, + int32_t maxClassesPerDetection = 0, + int32_t detectionsPerClass = 0, + float nmsScoreThreshold = 0.0f, + float iouThreshold = 0.0f, + int32_t numClasses = 0, + bool useRegularNMS = false, + flatbuffers::Offset> centerSizeEncoding = 0) { + DetectionPostProcessParamBuilder builder_(_fbb); + builder_.add_centerSizeEncoding(centerSizeEncoding); + builder_.add_numClasses(numClasses); + builder_.add_iouThreshold(iouThreshold); + builder_.add_nmsScoreThreshold(nmsScoreThreshold); + builder_.add_detectionsPerClass(detectionsPerClass); + builder_.add_maxClassesPerDetection(maxClassesPerDetection); + builder_.add_maxDetections(maxDetections); + builder_.add_useRegularNMS(useRegularNMS); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateDetectionPostProcessParamDirect( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t maxDetections = 0, + int32_t maxClassesPerDetection = 0, + int32_t detectionsPerClass = 0, + float nmsScoreThreshold = 0.0f, + float iouThreshold = 0.0f, + int32_t numClasses = 0, + bool useRegularNMS = false, + const std::vector *centerSizeEncoding = nullptr) { + auto centerSizeEncoding__ = centerSizeEncoding ? _fbb.CreateVector(*centerSizeEncoding) : 0; + return MNN::CreateDetectionPostProcessParam( + _fbb, + maxDetections, + maxClassesPerDetection, + detectionsPerClass, + nmsScoreThreshold, + iouThreshold, + numClasses, + useRegularNMS, + centerSizeEncoding__); +} + +flatbuffers::Offset CreateDetectionPostProcessParam(flatbuffers::FlatBufferBuilder &_fbb, const DetectionPostProcessParamT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct OneHotParamT : public flatbuffers::NativeTable { + typedef OneHotParam TableType; + DataType dType; + int32_t axis; + OneHotParamT() + : dType(DataType_DT_FLOAT), + axis(-1) { + } +}; + +struct OneHotParam FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef OneHotParamT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return OneHotParamTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_DTYPE = 4, + VT_AXIS = 6 + }; + DataType dType() const { + return static_cast(GetField(VT_DTYPE, 1)); + } + int32_t axis() const { + return GetField(VT_AXIS, -1); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_DTYPE) && + VerifyField(verifier, VT_AXIS) && + verifier.EndTable(); + } + OneHotParamT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(OneHotParamT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const OneHotParamT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct OneHotParamBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_dType(DataType dType) { + fbb_.AddElement(OneHotParam::VT_DTYPE, static_cast(dType), 1); + } + void add_axis(int32_t axis) { + fbb_.AddElement(OneHotParam::VT_AXIS, axis, -1); + } + explicit OneHotParamBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + OneHotParamBuilder &operator=(const OneHotParamBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateOneHotParam( + flatbuffers::FlatBufferBuilder &_fbb, + DataType dType = DataType_DT_FLOAT, + int32_t axis = -1) { + OneHotParamBuilder builder_(_fbb); + builder_.add_axis(axis); + builder_.add_dType(dType); + return builder_.Finish(); +} + +flatbuffers::Offset CreateOneHotParam(flatbuffers::FlatBufferBuilder &_fbb, const OneHotParamT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct PadParamT : public flatbuffers::NativeTable { + typedef PadParam TableType; + PadValueMode mode; + PadParamT() + : mode(PadValueMode_CONSTANT) { + } +}; + +struct PadParam FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef PadParamT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return PadParamTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_MODE = 4 + }; + PadValueMode mode() const { + return static_cast(GetField(VT_MODE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_MODE) && + verifier.EndTable(); + } + PadParamT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(PadParamT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadParamT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct PadParamBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_mode(PadValueMode mode) { + fbb_.AddElement(PadParam::VT_MODE, static_cast(mode), 0); + } + explicit PadParamBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + PadParamBuilder &operator=(const PadParamBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreatePadParam( + flatbuffers::FlatBufferBuilder &_fbb, + PadValueMode mode = PadValueMode_CONSTANT) { + PadParamBuilder builder_(_fbb); + builder_.add_mode(mode); + return builder_.Finish(); +} + +flatbuffers::Offset CreatePadParam(flatbuffers::FlatBufferBuilder &_fbb, const PadParamT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +inline BinaryOpT *BinaryOp::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new BinaryOpT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void BinaryOp::UnPackTo(BinaryOpT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = opType(); _o->opType = _e; }; + { auto _e = T(); _o->T = _e; }; +} + +inline flatbuffers::Offset BinaryOp::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BinaryOpT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateBinaryOp(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateBinaryOp(flatbuffers::FlatBufferBuilder &_fbb, const BinaryOpT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BinaryOpT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _opType = _o->opType; + auto _T = _o->T; + return MNN::CreateBinaryOp( + _fbb, + _opType, + _T); +} + +inline PackParamT *PackParam::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new PackParamT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void PackParam::UnPackTo(PackParamT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = dataType(); _o->dataType = _e; }; + { auto _e = axis(); _o->axis = _e; }; +} + +inline flatbuffers::Offset PackParam::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PackParamT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreatePackParam(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreatePackParam(flatbuffers::FlatBufferBuilder &_fbb, const PackParamT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PackParamT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _dataType = _o->dataType; + auto _axis = _o->axis; + return MNN::CreatePackParam( + _fbb, + _dataType, + _axis); +} + +inline StridedSliceParamT *StridedSliceParam::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new StridedSliceParamT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void StridedSliceParam::UnPackTo(StridedSliceParamT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = Index(); _o->Index = _e; }; + { auto _e = T(); _o->T = _e; }; + { auto _e = beginMask(); _o->beginMask = _e; }; + { auto _e = endMask(); _o->endMask = _e; }; + { auto _e = ellipsisMask(); _o->ellipsisMask = _e; }; + { auto _e = newAxisMask(); _o->newAxisMask = _e; }; + { auto _e = shrinkAxisMask(); _o->shrinkAxisMask = _e; }; +} + +inline flatbuffers::Offset StridedSliceParam::Pack(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceParamT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateStridedSliceParam(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateStridedSliceParam(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceParamT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const StridedSliceParamT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _Index = _o->Index; + auto _T = _o->T; + auto _beginMask = _o->beginMask; + auto _endMask = _o->endMask; + auto _ellipsisMask = _o->ellipsisMask; + auto _newAxisMask = _o->newAxisMask; + auto _shrinkAxisMask = _o->shrinkAxisMask; + return MNN::CreateStridedSliceParam( + _fbb, + _Index, + _T, + _beginMask, + _endMask, + _ellipsisMask, + _newAxisMask, + _shrinkAxisMask); +} + +inline SqueezeParamT *SqueezeParam::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new SqueezeParamT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void SqueezeParam::UnPackTo(SqueezeParamT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = squeezeDims(); if (_e) { _o->squeezeDims.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->squeezeDims[_i] = _e->Get(_i); } } }; +} + +inline flatbuffers::Offset SqueezeParam::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeParamT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSqueezeParam(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateSqueezeParam(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeParamT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SqueezeParamT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _squeezeDims = _o->squeezeDims.size() ? _fbb.CreateVector(_o->squeezeDims) : 0; + return MNN::CreateSqueezeParam( + _fbb, + _squeezeDims); +} + +inline CastParamT *CastParam::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new CastParamT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void CastParam::UnPackTo(CastParamT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = srcT(); _o->srcT = _e; }; + { auto _e = dstT(); _o->dstT = _e; }; +} + +inline flatbuffers::Offset CastParam::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CastParamT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateCastParam(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateCastParam(flatbuffers::FlatBufferBuilder &_fbb, const CastParamT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CastParamT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _srcT = _o->srcT; + auto _dstT = _o->dstT; + return MNN::CreateCastParam( + _fbb, + _srcT, + _dstT); +} + +inline ReductionParamT *ReductionParam::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new ReductionParamT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void ReductionParam::UnPackTo(ReductionParamT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = operation(); _o->operation = _e; }; + { auto _e = dim(); if (_e) { _o->dim.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->dim[_i] = _e->Get(_i); } } }; + { auto _e = coeff(); _o->coeff = _e; }; + { auto _e = keepDims(); _o->keepDims = _e; }; + { auto _e = dType(); _o->dType = _e; }; +} + +inline flatbuffers::Offset ReductionParam::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReductionParamT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateReductionParam(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateReductionParam(flatbuffers::FlatBufferBuilder &_fbb, const ReductionParamT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReductionParamT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _operation = _o->operation; + auto _dim = _o->dim.size() ? _fbb.CreateVector(_o->dim) : 0; + auto _coeff = _o->coeff; + auto _keepDims = _o->keepDims; + auto _dType = _o->dType; + return MNN::CreateReductionParam( + _fbb, + _operation, + _dim, + _coeff, + _keepDims, + _dType); +} + +inline GatherT *Gather::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new GatherT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Gather::UnPackTo(GatherT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = Tindices(); _o->Tindices = _e; }; + { auto _e = Tparams(); _o->Tparams = _e; }; + { auto _e = validateIndices(); _o->validateIndices = _e; }; + { auto _e = axis(); _o->axis = _e; }; +} + +inline flatbuffers::Offset Gather::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateGather(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateGather(flatbuffers::FlatBufferBuilder &_fbb, const GatherT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GatherT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _Tindices = _o->Tindices; + auto _Tparams = _o->Tparams; + auto _validateIndices = _o->validateIndices; + auto _axis = _o->axis; + return MNN::CreateGather( + _fbb, + _Tindices, + _Tparams, + _validateIndices, + _axis); +} + +inline ExpandDimsT *ExpandDims::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new ExpandDimsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void ExpandDims::UnPackTo(ExpandDimsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = T(); _o->T = _e; }; + { auto _e = Tdim(); _o->Tdim = _e; }; + { auto _e = axis(); _o->axis = _e; }; +} + +inline flatbuffers::Offset ExpandDims::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateExpandDims(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateExpandDims(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ExpandDimsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _T = _o->T; + auto _Tdim = _o->Tdim; + auto _axis = _o->axis; + return MNN::CreateExpandDims( + _fbb, + _T, + _Tdim, + _axis); +} + +inline SeluT *Selu::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new SeluT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Selu::UnPackTo(SeluT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = scale(); _o->scale = _e; }; + { auto _e = alpha(); _o->alpha = _e; }; +} + +inline flatbuffers::Offset Selu::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SeluT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSelu(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateSelu(flatbuffers::FlatBufferBuilder &_fbb, const SeluT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SeluT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _scale = _o->scale; + auto _alpha = _o->alpha; + return MNN::CreateSelu( + _fbb, + _scale, + _alpha); +} + +inline AsStringT *AsString::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new AsStringT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void AsString::UnPackTo(AsStringT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = T(); _o->T = _e; }; + { auto _e = precision(); _o->precision = _e; }; + { auto _e = scientific(); _o->scientific = _e; }; + { auto _e = shortest(); _o->shortest = _e; }; + { auto _e = width(); _o->width = _e; }; + { auto _e = fillString(); if (_e) _o->fillString = _e->str(); }; +} + +inline flatbuffers::Offset AsString::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AsStringT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateAsString(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateAsString(flatbuffers::FlatBufferBuilder &_fbb, const AsStringT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const AsStringT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _T = _o->T; + auto _precision = _o->precision; + auto _scientific = _o->scientific; + auto _shortest = _o->shortest; + auto _width = _o->width; + auto _fillString = _o->fillString.empty() ? 0 : _fbb.CreateString(_o->fillString); + return MNN::CreateAsString( + _fbb, + _T, + _precision, + _scientific, + _shortest, + _width, + _fillString); +} + +inline ReduceJoinT *ReduceJoin::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new ReduceJoinT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void ReduceJoin::UnPackTo(ReduceJoinT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = keepDims(); _o->keepDims = _e; }; + { auto _e = separator(); if (_e) _o->separator = _e->str(); }; +} + +inline flatbuffers::Offset ReduceJoin::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReduceJoinT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateReduceJoin(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateReduceJoin(flatbuffers::FlatBufferBuilder &_fbb, const ReduceJoinT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReduceJoinT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _keepDims = _o->keepDims; + auto _separator = _o->separator.empty() ? 0 : _fbb.CreateString(_o->separator); + return MNN::CreateReduceJoin( + _fbb, + _keepDims, + _separator); +} + +inline UnaryOpT *UnaryOp::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new UnaryOpT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void UnaryOp::UnPackTo(UnaryOpT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = opType(); _o->opType = _e; }; + { auto _e = T(); _o->T = _e; }; +} + +inline flatbuffers::Offset UnaryOp::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnaryOpT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateUnaryOp(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateUnaryOp(flatbuffers::FlatBufferBuilder &_fbb, const UnaryOpT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UnaryOpT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _opType = _o->opType; + auto _T = _o->T; + return MNN::CreateUnaryOp( + _fbb, + _opType, + _T); +} + +inline TopKV2T *TopKV2::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new TopKV2T(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void TopKV2::UnPackTo(TopKV2T *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = T(); _o->T = _e; }; + { auto _e = sorted(); _o->sorted = _e; }; +} + +inline flatbuffers::Offset TopKV2::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2T* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateTopKV2(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateTopKV2(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2T *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TopKV2T* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _T = _o->T; + auto _sorted = _o->sorted; + return MNN::CreateTopKV2( + _fbb, + _T, + _sorted); +} + +inline CropAndResizeT *CropAndResize::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new CropAndResizeT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void CropAndResize::UnPackTo(CropAndResizeT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = extrapolationValue(); _o->extrapolationValue = _e; }; + { auto _e = method(); _o->method = _e; }; +} + +inline flatbuffers::Offset CropAndResize::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CropAndResizeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateCropAndResize(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateCropAndResize(flatbuffers::FlatBufferBuilder &_fbb, const CropAndResizeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CropAndResizeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _extrapolationValue = _o->extrapolationValue; + auto _method = _o->method; + return MNN::CreateCropAndResize( + _fbb, + _extrapolationValue, + _method); +} + +inline FillT *Fill::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new FillT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Fill::UnPackTo(FillT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset Fill::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FillT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateFill(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateFill(flatbuffers::FlatBufferBuilder &_fbb, const FillT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FillT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return MNN::CreateFill( + _fbb); +} + +inline GatherV2T *GatherV2::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new GatherV2T(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void GatherV2::UnPackTo(GatherV2T *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = Taxis(); _o->Taxis = _e; }; + { auto _e = Tindices(); _o->Tindices = _e; }; + { auto _e = Tparams(); _o->Tparams = _e; }; +} + +inline flatbuffers::Offset GatherV2::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherV2T* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateGatherV2(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateGatherV2(flatbuffers::FlatBufferBuilder &_fbb, const GatherV2T *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GatherV2T* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _Taxis = _o->Taxis; + auto _Tindices = _o->Tindices; + auto _Tparams = _o->Tparams; + return MNN::CreateGatherV2( + _fbb, + _Taxis, + _Tindices, + _Tparams); +} + +inline NonMaxSuppressionV2T *NonMaxSuppressionV2::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new NonMaxSuppressionV2T(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void NonMaxSuppressionV2::UnPackTo(NonMaxSuppressionV2T *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset NonMaxSuppressionV2::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV2T* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateNonMaxSuppressionV2(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateNonMaxSuppressionV2(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV2T *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NonMaxSuppressionV2T* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return MNN::CreateNonMaxSuppressionV2( + _fbb); +} + +inline RangeT *Range::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new RangeT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Range::UnPackTo(RangeT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = Tidx(); _o->Tidx = _e; }; +} + +inline flatbuffers::Offset Range::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RangeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateRange(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateRange(flatbuffers::FlatBufferBuilder &_fbb, const RangeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RangeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _Tidx = _o->Tidx; + return MNN::CreateRange( + _fbb, + _Tidx); +} + +inline RankT *Rank::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new RankT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Rank::UnPackTo(RankT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset Rank::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RankT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateRank(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateRank(flatbuffers::FlatBufferBuilder &_fbb, const RankT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RankT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return MNN::CreateRank( + _fbb); +} + +inline SizeT *Size::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new SizeT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Size::UnPackTo(SizeT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = outputDataType(); _o->outputDataType = _e; }; +} + +inline flatbuffers::Offset Size::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SizeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSize(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateSize(flatbuffers::FlatBufferBuilder &_fbb, const SizeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SizeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _outputDataType = _o->outputDataType; + return MNN::CreateSize( + _fbb, + _outputDataType); +} + +inline TransposeT *Transpose::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new TransposeT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Transpose::UnPackTo(TransposeT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = Tperm(); _o->Tperm = _e; }; +} + +inline flatbuffers::Offset Transpose::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateTranspose(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateTranspose(flatbuffers::FlatBufferBuilder &_fbb, const TransposeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TransposeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _Tperm = _o->Tperm; + return MNN::CreateTranspose( + _fbb, + _Tperm); +} + +inline SliceTfT *SliceTf::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new SliceTfT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void SliceTf::UnPackTo(SliceTfT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = T(); _o->T = _e; }; +} + +inline flatbuffers::Offset SliceTf::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SliceTfT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSliceTf(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateSliceTf(flatbuffers::FlatBufferBuilder &_fbb, const SliceTfT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SliceTfT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _T = _o->T; + return MNN::CreateSliceTf( + _fbb, + _T); +} + +inline QuantizeMaxMinT *QuantizeMaxMin::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new QuantizeMaxMinT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void QuantizeMaxMin::UnPackTo(QuantizeMaxMinT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = T(); _o->T = _e; }; +} + +inline flatbuffers::Offset QuantizeMaxMin::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeMaxMinT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateQuantizeMaxMin(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateQuantizeMaxMin(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeMaxMinT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizeMaxMinT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _T = _o->T; + return MNN::CreateQuantizeMaxMin( + _fbb, + _T); +} + +inline CropT *Crop::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new CropT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Crop::UnPackTo(CropT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = axis(); _o->axis = _e; }; + { auto _e = offset(); if (_e) { _o->offset.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->offset[_i] = _e->Get(_i); } } }; +} + +inline flatbuffers::Offset Crop::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CropT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateCrop(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateCrop(flatbuffers::FlatBufferBuilder &_fbb, const CropT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CropT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _axis = _o->axis; + auto _offset = _o->offset.size() ? _fbb.CreateVector(_o->offset) : 0; + return MNN::CreateCrop( + _fbb, + _axis, + _offset); +} + +inline SpaceBatchT *SpaceBatch::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new SpaceBatchT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void SpaceBatch::UnPackTo(SpaceBatchT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = blockShape(); if (_e) _o->blockShape = std::unique_ptr(_e->UnPack(_resolver)); }; + { auto _e = padding(); if (_e) _o->padding = std::unique_ptr(_e->UnPack(_resolver)); }; +} + +inline flatbuffers::Offset SpaceBatch::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceBatchT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSpaceBatch(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateSpaceBatch(flatbuffers::FlatBufferBuilder &_fbb, const SpaceBatchT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SpaceBatchT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _blockShape = _o->blockShape ? CreateBlob(_fbb, _o->blockShape.get(), _rehasher) : 0; + auto _padding = _o->padding ? CreateBlob(_fbb, _o->padding.get(), _rehasher) : 0; + return MNN::CreateSpaceBatch( + _fbb, + _blockShape, + _padding); +} + +inline MatMulT *MatMul::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new MatMulT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void MatMul::UnPackTo(MatMulT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = T(); _o->T = _e; }; + { auto _e = transposeA(); _o->transposeA = _e; }; + { auto _e = transposeB(); _o->transposeB = _e; }; + { auto _e = weight(); if (_e) { _o->weight.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->weight[_i] = _e->Get(_i); } } }; + { auto _e = bias(); if (_e) { _o->bias.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->bias[_i] = _e->Get(_i); } } }; +} + +inline flatbuffers::Offset MatMul::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatMulT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateMatMul(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateMatMul(flatbuffers::FlatBufferBuilder &_fbb, const MatMulT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MatMulT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _T = _o->T; + auto _transposeA = _o->transposeA; + auto _transposeB = _o->transposeB; + auto _weight = _o->weight.size() ? _fbb.CreateVector(_o->weight) : 0; + auto _bias = _o->bias.size() ? _fbb.CreateVector(_o->bias) : 0; + return MNN::CreateMatMul( + _fbb, + _T, + _transposeA, + _transposeB, + _weight, + _bias); +} + +inline MomentsParamT *MomentsParam::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new MomentsParamT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void MomentsParam::UnPackTo(MomentsParamT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = dim(); if (_e) { _o->dim.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->dim[_i] = _e->Get(_i); } } }; + { auto _e = keepDims(); _o->keepDims = _e; }; + { auto _e = dType(); _o->dType = _e; }; +} + +inline flatbuffers::Offset MomentsParam::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MomentsParamT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateMomentsParam(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateMomentsParam(flatbuffers::FlatBufferBuilder &_fbb, const MomentsParamT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MomentsParamT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _dim = _o->dim.size() ? _fbb.CreateVector(_o->dim) : 0; + auto _keepDims = _o->keepDims; + auto _dType = _o->dType; + return MNN::CreateMomentsParam( + _fbb, + _dim, + _keepDims, + _dType); +} + +inline RNNParamT *RNNParam::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new RNNParamT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void RNNParam::UnPackTo(RNNParamT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = numUnits(); _o->numUnits = _e; }; + { auto _e = isBidirectionalRNN(); _o->isBidirectionalRNN = _e; }; + { auto _e = keepAllOutputs(); _o->keepAllOutputs = _e; }; + { auto _e = fwGateWeight(); if (_e) _o->fwGateWeight = std::unique_ptr(_e->UnPack(_resolver)); }; + { auto _e = fwGateBias(); if (_e) _o->fwGateBias = std::unique_ptr(_e->UnPack(_resolver)); }; + { auto _e = fwCandidateWeight(); if (_e) _o->fwCandidateWeight = std::unique_ptr(_e->UnPack(_resolver)); }; + { auto _e = fwCandidateBias(); if (_e) _o->fwCandidateBias = std::unique_ptr(_e->UnPack(_resolver)); }; + { auto _e = bwGateWeight(); if (_e) _o->bwGateWeight = std::unique_ptr(_e->UnPack(_resolver)); }; + { auto _e = bwGateBias(); if (_e) _o->bwGateBias = std::unique_ptr(_e->UnPack(_resolver)); }; + { auto _e = bwCandidateWeight(); if (_e) _o->bwCandidateWeight = std::unique_ptr(_e->UnPack(_resolver)); }; + { auto _e = bwCandidateBias(); if (_e) _o->bwCandidateBias = std::unique_ptr(_e->UnPack(_resolver)); }; +} + +inline flatbuffers::Offset RNNParam::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RNNParamT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateRNNParam(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateRNNParam(flatbuffers::FlatBufferBuilder &_fbb, const RNNParamT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RNNParamT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _numUnits = _o->numUnits; + auto _isBidirectionalRNN = _o->isBidirectionalRNN; + auto _keepAllOutputs = _o->keepAllOutputs; + auto _fwGateWeight = _o->fwGateWeight ? CreateBlob(_fbb, _o->fwGateWeight.get(), _rehasher) : 0; + auto _fwGateBias = _o->fwGateBias ? CreateBlob(_fbb, _o->fwGateBias.get(), _rehasher) : 0; + auto _fwCandidateWeight = _o->fwCandidateWeight ? CreateBlob(_fbb, _o->fwCandidateWeight.get(), _rehasher) : 0; + auto _fwCandidateBias = _o->fwCandidateBias ? CreateBlob(_fbb, _o->fwCandidateBias.get(), _rehasher) : 0; + auto _bwGateWeight = _o->bwGateWeight ? CreateBlob(_fbb, _o->bwGateWeight.get(), _rehasher) : 0; + auto _bwGateBias = _o->bwGateBias ? CreateBlob(_fbb, _o->bwGateBias.get(), _rehasher) : 0; + auto _bwCandidateWeight = _o->bwCandidateWeight ? CreateBlob(_fbb, _o->bwCandidateWeight.get(), _rehasher) : 0; + auto _bwCandidateBias = _o->bwCandidateBias ? CreateBlob(_fbb, _o->bwCandidateBias.get(), _rehasher) : 0; + return MNN::CreateRNNParam( + _fbb, + _numUnits, + _isBidirectionalRNN, + _keepAllOutputs, + _fwGateWeight, + _fwGateBias, + _fwCandidateWeight, + _fwCandidateBias, + _bwGateWeight, + _bwGateBias, + _bwCandidateWeight, + _bwCandidateBias); +} + +inline BatchMatMulParamT *BatchMatMulParam::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new BatchMatMulParamT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void BatchMatMulParam::UnPackTo(BatchMatMulParamT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = adjX(); _o->adjX = _e; }; + { auto _e = adjY(); _o->adjY = _e; }; +} + +inline flatbuffers::Offset BatchMatMulParam::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulParamT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateBatchMatMulParam(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateBatchMatMulParam(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulParamT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BatchMatMulParamT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _adjX = _o->adjX; + auto _adjY = _o->adjY; + return MNN::CreateBatchMatMulParam( + _fbb, + _adjX, + _adjY); +} + +inline DepthSpaceParamT *DepthSpaceParam::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new DepthSpaceParamT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void DepthSpaceParam::UnPackTo(DepthSpaceParamT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = blockSize(); _o->blockSize = _e; }; +} + +inline flatbuffers::Offset DepthSpaceParam::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthSpaceParamT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateDepthSpaceParam(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateDepthSpaceParam(flatbuffers::FlatBufferBuilder &_fbb, const DepthSpaceParamT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DepthSpaceParamT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _blockSize = _o->blockSize; + return MNN::CreateDepthSpaceParam( + _fbb, + _blockSize); +} + +inline ReverseSequenceParamT *ReverseSequenceParam::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new ReverseSequenceParamT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void ReverseSequenceParam::UnPackTo(ReverseSequenceParamT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = batchDim(); _o->batchDim = _e; }; + { auto _e = seqDim(); _o->seqDim = _e; }; +} + +inline flatbuffers::Offset ReverseSequenceParam::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceParamT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateReverseSequenceParam(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateReverseSequenceParam(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceParamT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReverseSequenceParamT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _batchDim = _o->batchDim; + auto _seqDim = _o->seqDim; + return MNN::CreateReverseSequenceParam( + _fbb, + _batchDim, + _seqDim); +} + +inline DetectionPostProcessParamT *DetectionPostProcessParam::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new DetectionPostProcessParamT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void DetectionPostProcessParam::UnPackTo(DetectionPostProcessParamT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = maxDetections(); _o->maxDetections = _e; }; + { auto _e = maxClassesPerDetection(); _o->maxClassesPerDetection = _e; }; + { auto _e = detectionsPerClass(); _o->detectionsPerClass = _e; }; + { auto _e = nmsScoreThreshold(); _o->nmsScoreThreshold = _e; }; + { auto _e = iouThreshold(); _o->iouThreshold = _e; }; + { auto _e = numClasses(); _o->numClasses = _e; }; + { auto _e = useRegularNMS(); _o->useRegularNMS = _e; }; + { auto _e = centerSizeEncoding(); if (_e) { _o->centerSizeEncoding.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->centerSizeEncoding[_i] = _e->Get(_i); } } }; +} + +inline flatbuffers::Offset DetectionPostProcessParam::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DetectionPostProcessParamT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateDetectionPostProcessParam(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateDetectionPostProcessParam(flatbuffers::FlatBufferBuilder &_fbb, const DetectionPostProcessParamT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DetectionPostProcessParamT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _maxDetections = _o->maxDetections; + auto _maxClassesPerDetection = _o->maxClassesPerDetection; + auto _detectionsPerClass = _o->detectionsPerClass; + auto _nmsScoreThreshold = _o->nmsScoreThreshold; + auto _iouThreshold = _o->iouThreshold; + auto _numClasses = _o->numClasses; + auto _useRegularNMS = _o->useRegularNMS; + auto _centerSizeEncoding = _o->centerSizeEncoding.size() ? _fbb.CreateVector(_o->centerSizeEncoding) : 0; + return MNN::CreateDetectionPostProcessParam( + _fbb, + _maxDetections, + _maxClassesPerDetection, + _detectionsPerClass, + _nmsScoreThreshold, + _iouThreshold, + _numClasses, + _useRegularNMS, + _centerSizeEncoding); +} + +inline OneHotParamT *OneHotParam::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new OneHotParamT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void OneHotParam::UnPackTo(OneHotParamT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = dType(); _o->dType = _e; }; + { auto _e = axis(); _o->axis = _e; }; +} + +inline flatbuffers::Offset OneHotParam::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OneHotParamT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateOneHotParam(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateOneHotParam(flatbuffers::FlatBufferBuilder &_fbb, const OneHotParamT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const OneHotParamT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _dType = _o->dType; + auto _axis = _o->axis; + return MNN::CreateOneHotParam( + _fbb, + _dType, + _axis); +} + +inline PadParamT *PadParam::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new PadParamT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void PadParam::UnPackTo(PadParamT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = mode(); _o->mode = _e; }; +} + +inline flatbuffers::Offset PadParam::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadParamT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreatePadParam(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreatePadParam(flatbuffers::FlatBufferBuilder &_fbb, const PadParamT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PadParamT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _mode = _o->mode; + return MNN::CreatePadParam( + _fbb, + _mode); +} + +inline const flatbuffers::TypeTable *BinaryOpOperationTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + BinaryOpOperationTypeTable + }; + static const int64_t values[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 19, 20, 21, 22 }; + static const char * const names[] = { + "ADD", + "SUB", + "MUL", + "DIV", + "MAX_TEMP", + "MIN_TEMP", + "POW", + "REALDIV", + "MINIMUM", + "MAXIMUM", + "GREATER", + "GREATER_EQUAL", + "LESS", + "FLOORDIV", + "SquaredDifference", + "EQUAL", + "LESS_EQUAL", + "FLOORMOD", + "MOD", + "ATAN2", + "LOGICALOR", + "NOTEQUAL" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_ENUM, 22, type_codes, type_refs, values, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *ReductionTypeTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + ReductionTypeTypeTable + }; + static const char * const names[] = { + "SUM", + "ASUM", + "SUMSQ", + "MEAN", + "MAXIMUM", + "MINIMUM", + "PROD", + "ANY", + "ALL" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_ENUM, 9, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *UnaryOpOperationTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + UnaryOpOperationTypeTable + }; + static const char * const names[] = { + "ABS", + "NEG", + "FLOOR", + "CEIL", + "SQUARE", + "SQRT", + "RSQRT", + "EXP", + "LOG", + "SIN", + "COS", + "TAN", + "ASIN", + "ACOS", + "ATAN", + "RECIPROCAL", + "LOG1P", + "BNLL", + "ACOSH", + "SINH", + "ASINH", + "ATANH", + "SIGN", + "ROUND", + "COSH", + "ERF", + "ERFC", + "ERFINV", + "EXPM1" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_ENUM, 29, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *CropAndResizeMethodTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + CropAndResizeMethodTypeTable + }; + static const char * const names[] = { + "BILINEAR", + "NEAREST" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_ENUM, 2, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *PadValueModeTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + PadValueModeTypeTable + }; + static const char * const names[] = { + "CONSTANT", + "REFLECT", + "SYMMETRIC" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_ENUM, 3, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *BinaryOpTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + DataTypeTypeTable + }; + static const char * const names[] = { + "opType", + "T" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *PackParamTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, -1 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + DataTypeTypeTable + }; + static const char * const names[] = { + "dataType", + "axis" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *StridedSliceParamTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + DataTypeTypeTable + }; + static const char * const names[] = { + "Index", + "T", + "beginMask", + "endMask", + "ellipsisMask", + "newAxisMask", + "shrinkAxisMask" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 7, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *SqueezeParamTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 1, -1 } + }; + static const char * const names[] = { + "squeezeDims" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *CastParamTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + DataTypeTypeTable + }; + static const char * const names[] = { + "srcT", + "dstT" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *ReductionParamTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_INT, 1, -1 }, + { flatbuffers::ET_FLOAT, 0, -1 }, + { flatbuffers::ET_BOOL, 0, -1 }, + { flatbuffers::ET_INT, 0, 1 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + ReductionTypeTypeTable, + DataTypeTypeTable + }; + static const char * const names[] = { + "operation", + "dim", + "coeff", + "keepDims", + "dType" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 5, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *GatherTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_BOOL, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + DataTypeTypeTable + }; + static const char * const names[] = { + "Tindices", + "Tparams", + "validateIndices", + "axis" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 4, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *ExpandDimsTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, -1 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + DataTypeTypeTable + }; + static const char * const names[] = { + "T", + "Tdim", + "axis" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 3, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *SeluTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_FLOAT, 0, -1 }, + { flatbuffers::ET_FLOAT, 0, -1 } + }; + static const char * const names[] = { + "scale", + "alpha" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *AsStringTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_BOOL, 0, -1 }, + { flatbuffers::ET_BOOL, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_STRING, 0, -1 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + DataTypeTypeTable + }; + static const char * const names[] = { + "T", + "precision", + "scientific", + "shortest", + "width", + "fillString" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 6, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *ReduceJoinTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_BOOL, 0, -1 }, + { flatbuffers::ET_STRING, 0, -1 } + }; + static const char * const names[] = { + "keepDims", + "separator" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *UnaryOpTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 1 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + UnaryOpOperationTypeTable, + DataTypeTypeTable + }; + static const char * const names[] = { + "opType", + "T" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *TopKV2TypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_BOOL, 0, -1 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + DataTypeTypeTable + }; + static const char * const names[] = { + "T", + "sorted" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *CropAndResizeTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_FLOAT, 0, -1 }, + { flatbuffers::ET_CHAR, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + CropAndResizeMethodTypeTable + }; + static const char * const names[] = { + "extrapolationValue", + "method" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *FillTypeTable() { + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr + }; + return &tt; +} + +inline const flatbuffers::TypeTable *GatherV2TypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + DataTypeTypeTable + }; + static const char * const names[] = { + "Taxis", + "Tindices", + "Tparams" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 3, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *NonMaxSuppressionV2TypeTable() { + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr + }; + return &tt; +} + +inline const flatbuffers::TypeTable *RangeTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + DataTypeTypeTable + }; + static const char * const names[] = { + "Tidx" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *RankTypeTable() { + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr + }; + return &tt; +} + +inline const flatbuffers::TypeTable *SizeTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + DataTypeTypeTable + }; + static const char * const names[] = { + "outputDataType" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *TransposeTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + DataTypeTypeTable + }; + static const char * const names[] = { + "Tperm" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *SliceTfTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + DataTypeTypeTable + }; + static const char * const names[] = { + "T" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *QuantizeMaxMinTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + DataTypeTypeTable + }; + static const char * const names[] = { + "T" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *CropTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 1, -1 } + }; + static const char * const names[] = { + "axis", + "offset" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *SpaceBatchTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_SEQUENCE, 0, 0 }, + { flatbuffers::ET_SEQUENCE, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + BlobTypeTable + }; + static const char * const names[] = { + "blockShape", + "padding" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *MatMulTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_BOOL, 0, -1 }, + { flatbuffers::ET_BOOL, 0, -1 }, + { flatbuffers::ET_FLOAT, 1, -1 }, + { flatbuffers::ET_FLOAT, 1, -1 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + DataTypeTypeTable + }; + static const char * const names[] = { + "T", + "transposeA", + "transposeB", + "weight", + "bias" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 5, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *MomentsParamTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 1, -1 }, + { flatbuffers::ET_BOOL, 0, -1 }, + { flatbuffers::ET_INT, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + DataTypeTypeTable + }; + static const char * const names[] = { + "dim", + "keepDims", + "dType" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 3, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *RNNParamTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_BOOL, 0, -1 }, + { flatbuffers::ET_BOOL, 0, -1 }, + { flatbuffers::ET_SEQUENCE, 0, 0 }, + { flatbuffers::ET_SEQUENCE, 0, 0 }, + { flatbuffers::ET_SEQUENCE, 0, 0 }, + { flatbuffers::ET_SEQUENCE, 0, 0 }, + { flatbuffers::ET_SEQUENCE, 0, 0 }, + { flatbuffers::ET_SEQUENCE, 0, 0 }, + { flatbuffers::ET_SEQUENCE, 0, 0 }, + { flatbuffers::ET_SEQUENCE, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + BlobTypeTable + }; + static const char * const names[] = { + "numUnits", + "isBidirectionalRNN", + "keepAllOutputs", + "fwGateWeight", + "fwGateBias", + "fwCandidateWeight", + "fwCandidateBias", + "bwGateWeight", + "bwGateBias", + "bwCandidateWeight", + "bwCandidateBias" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 11, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *BatchMatMulParamTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_BOOL, 0, -1 }, + { flatbuffers::ET_BOOL, 0, -1 } + }; + static const char * const names[] = { + "adjX", + "adjY" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *DepthSpaceParamTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, -1 } + }; + static const char * const names[] = { + "blockSize" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *ReverseSequenceParamTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 } + }; + static const char * const names[] = { + "batchDim", + "seqDim" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *DetectionPostProcessParamTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_FLOAT, 0, -1 }, + { flatbuffers::ET_FLOAT, 0, -1 }, + { flatbuffers::ET_INT, 0, -1 }, + { flatbuffers::ET_BOOL, 0, -1 }, + { flatbuffers::ET_FLOAT, 1, -1 } + }; + static const char * const names[] = { + "maxDetections", + "maxClassesPerDetection", + "detectionsPerClass", + "nmsScoreThreshold", + "iouThreshold", + "numClasses", + "useRegularNMS", + "centerSizeEncoding" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 8, type_codes, nullptr, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *OneHotParamTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, -1 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + DataTypeTypeTable + }; + static const char * const names[] = { + "dType", + "axis" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *PadParamTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_CHAR, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + PadValueModeTypeTable + }; + static const char * const names[] = { + "mode" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +} // namespace MNN + +#endif // FLATBUFFERS_GENERATED_TENSORFLOWOP_MNN_H_ diff --git a/schema/current/Type_generated.h b/schema/current/Type_generated.h new file mode 100644 index 000000000..e68bb8ff1 --- /dev/null +++ b/schema/current/Type_generated.h @@ -0,0 +1,219 @@ +// automatically generated by the FlatBuffers compiler, do not modify + + +#ifndef FLATBUFFERS_GENERATED_TYPE_MNN_H_ +#define FLATBUFFERS_GENERATED_TYPE_MNN_H_ + +#include "flatbuffers/flatbuffers.h" + +namespace MNN { + +enum NetSource { + NetSource_CAFFE = 0, + NetSource_TENSORFLOW = 1, + NetSource_TFLITE = 2, + NetSource_ONNX = 3, + NetSource_MIN = NetSource_CAFFE, + NetSource_MAX = NetSource_ONNX +}; + +inline const NetSource (&EnumValuesNetSource())[4] { + static const NetSource values[] = { + NetSource_CAFFE, + NetSource_TENSORFLOW, + NetSource_TFLITE, + NetSource_ONNX + }; + return values; +} + +inline const char * const *EnumNamesNetSource() { + static const char * const names[] = { + "CAFFE", + "TENSORFLOW", + "TFLITE", + "ONNX", + nullptr + }; + return names; +} + +inline const char *EnumNameNetSource(NetSource e) { + if (e < NetSource_CAFFE || e > NetSource_ONNX) return ""; + const size_t index = static_cast(e); + return EnumNamesNetSource()[index]; +} + +enum DataType { + DataType_DT_INVALID = 0, + DataType_DT_FLOAT = 1, + DataType_DT_DOUBLE = 2, + DataType_DT_INT32 = 3, + DataType_DT_UINT8 = 4, + DataType_DT_INT16 = 5, + DataType_DT_INT8 = 6, + DataType_DT_STRING = 7, + DataType_DT_COMPLEX64 = 8, + DataType_DT_INT64 = 9, + DataType_DT_BOOL = 10, + DataType_DT_QINT8 = 11, + DataType_DT_QUINT8 = 12, + DataType_DT_QINT32 = 13, + DataType_DT_BFLOAT16 = 14, + DataType_DT_QINT16 = 15, + DataType_DT_QUINT16 = 16, + DataType_DT_UINT16 = 17, + DataType_DT_COMPLEX128 = 18, + DataType_DT_HALF = 19, + DataType_DT_RESOURCE = 20, + DataType_DT_VARIANT = 21, + DataType_MIN = DataType_DT_INVALID, + DataType_MAX = DataType_DT_VARIANT +}; + +inline const DataType (&EnumValuesDataType())[22] { + static const DataType values[] = { + DataType_DT_INVALID, + DataType_DT_FLOAT, + DataType_DT_DOUBLE, + DataType_DT_INT32, + DataType_DT_UINT8, + DataType_DT_INT16, + DataType_DT_INT8, + DataType_DT_STRING, + DataType_DT_COMPLEX64, + DataType_DT_INT64, + DataType_DT_BOOL, + DataType_DT_QINT8, + DataType_DT_QUINT8, + DataType_DT_QINT32, + DataType_DT_BFLOAT16, + DataType_DT_QINT16, + DataType_DT_QUINT16, + DataType_DT_UINT16, + DataType_DT_COMPLEX128, + DataType_DT_HALF, + DataType_DT_RESOURCE, + DataType_DT_VARIANT + }; + return values; +} + +inline const char * const *EnumNamesDataType() { + static const char * const names[] = { + "DT_INVALID", + "DT_FLOAT", + "DT_DOUBLE", + "DT_INT32", + "DT_UINT8", + "DT_INT16", + "DT_INT8", + "DT_STRING", + "DT_COMPLEX64", + "DT_INT64", + "DT_BOOL", + "DT_QINT8", + "DT_QUINT8", + "DT_QINT32", + "DT_BFLOAT16", + "DT_QINT16", + "DT_QUINT16", + "DT_UINT16", + "DT_COMPLEX128", + "DT_HALF", + "DT_RESOURCE", + "DT_VARIANT", + nullptr + }; + return names; +} + +inline const char *EnumNameDataType(DataType e) { + if (e < DataType_DT_INVALID || e > DataType_DT_VARIANT) return ""; + const size_t index = static_cast(e); + return EnumNamesDataType()[index]; +} + +inline const flatbuffers::TypeTable *NetSourceTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + NetSourceTypeTable + }; + static const char * const names[] = { + "CAFFE", + "TENSORFLOW", + "TFLITE", + "ONNX" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_ENUM, 4, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +inline const flatbuffers::TypeTable *DataTypeTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 }, + { flatbuffers::ET_INT, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + DataTypeTypeTable + }; + static const char * const names[] = { + "DT_INVALID", + "DT_FLOAT", + "DT_DOUBLE", + "DT_INT32", + "DT_UINT8", + "DT_INT16", + "DT_INT8", + "DT_STRING", + "DT_COMPLEX64", + "DT_INT64", + "DT_BOOL", + "DT_QINT8", + "DT_QUINT8", + "DT_QINT32", + "DT_BFLOAT16", + "DT_QINT16", + "DT_QUINT16", + "DT_UINT16", + "DT_COMPLEX128", + "DT_HALF", + "DT_RESOURCE", + "DT_VARIANT" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_ENUM, 22, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +} // namespace MNN + +#endif // FLATBUFFERS_GENERATED_TYPE_MNN_H_ diff --git a/schema/current/UserDefine_generated.h b/schema/current/UserDefine_generated.h new file mode 100644 index 000000000..10e461c90 --- /dev/null +++ b/schema/current/UserDefine_generated.h @@ -0,0 +1,137 @@ +// automatically generated by the FlatBuffers compiler, do not modify + + +#ifndef FLATBUFFERS_GENERATED_USERDEFINE_MNN_H_ +#define FLATBUFFERS_GENERATED_USERDEFINE_MNN_H_ + +#include "flatbuffers/flatbuffers.h" + +#include "Tensor_generated.h" +#include "Type_generated.h" + +namespace MNN { + +struct TensorConvertInfo; +struct TensorConvertInfoT; + +inline const flatbuffers::TypeTable *TensorConvertInfoTypeTable(); + +struct TensorConvertInfoT : public flatbuffers::NativeTable { + typedef TensorConvertInfo TableType; + MNN_DATA_FORMAT source; + MNN_DATA_FORMAT dest; + TensorConvertInfoT() + : source(MNN_DATA_FORMAT_NCHW), + dest(MNN_DATA_FORMAT_NCHW) { + } +}; + +struct TensorConvertInfo FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef TensorConvertInfoT NativeTableType; + static const flatbuffers::TypeTable *MiniReflectTypeTable() { + return TensorConvertInfoTypeTable(); + } + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SOURCE = 4, + VT_DEST = 6 + }; + MNN_DATA_FORMAT source() const { + return static_cast(GetField(VT_SOURCE, 0)); + } + MNN_DATA_FORMAT dest() const { + return static_cast(GetField(VT_DEST, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_SOURCE) && + VerifyField(verifier, VT_DEST) && + verifier.EndTable(); + } + TensorConvertInfoT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(TensorConvertInfoT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorConvertInfoT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct TensorConvertInfoBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_source(MNN_DATA_FORMAT source) { + fbb_.AddElement(TensorConvertInfo::VT_SOURCE, static_cast(source), 0); + } + void add_dest(MNN_DATA_FORMAT dest) { + fbb_.AddElement(TensorConvertInfo::VT_DEST, static_cast(dest), 0); + } + explicit TensorConvertInfoBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + TensorConvertInfoBuilder &operator=(const TensorConvertInfoBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateTensorConvertInfo( + flatbuffers::FlatBufferBuilder &_fbb, + MNN_DATA_FORMAT source = MNN_DATA_FORMAT_NCHW, + MNN_DATA_FORMAT dest = MNN_DATA_FORMAT_NCHW) { + TensorConvertInfoBuilder builder_(_fbb); + builder_.add_dest(dest); + builder_.add_source(source); + return builder_.Finish(); +} + +flatbuffers::Offset CreateTensorConvertInfo(flatbuffers::FlatBufferBuilder &_fbb, const TensorConvertInfoT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +inline TensorConvertInfoT *TensorConvertInfo::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new TensorConvertInfoT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void TensorConvertInfo::UnPackTo(TensorConvertInfoT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = source(); _o->source = _e; }; + { auto _e = dest(); _o->dest = _e; }; +} + +inline flatbuffers::Offset TensorConvertInfo::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorConvertInfoT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateTensorConvertInfo(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateTensorConvertInfo(flatbuffers::FlatBufferBuilder &_fbb, const TensorConvertInfoT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TensorConvertInfoT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _source = _o->source; + auto _dest = _o->dest; + return MNN::CreateTensorConvertInfo( + _fbb, + _source, + _dest); +} + +inline const flatbuffers::TypeTable *TensorConvertInfoTypeTable() { + static const flatbuffers::TypeCode type_codes[] = { + { flatbuffers::ET_CHAR, 0, 0 }, + { flatbuffers::ET_CHAR, 0, 0 } + }; + static const flatbuffers::TypeFunction type_refs[] = { + MNN_DATA_FORMATTypeTable + }; + static const char * const names[] = { + "source", + "dest" + }; + static const flatbuffers::TypeTable tt = { + flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names + }; + return &tt; +} + +} // namespace MNN + +#endif // FLATBUFFERS_GENERATED_USERDEFINE_MNN_H_ diff --git a/schema/default/MNN.fbs b/schema/default/MNN.fbs index 096ba9e37..5412f7372 100644 --- a/schema/default/MNN.fbs +++ b/schema/default/MNN.fbs @@ -19,7 +19,7 @@ enum OpType : int { QuantizedAdd, ArgMax, AsString, - BatchNorm, + InstanceNorm, BatchToSpaceND, Bias, BinaryOp, @@ -133,8 +133,16 @@ enum OpType : int { MatrixBandPart, GatherND, DetectionPostProcess, + UnravelIndex, + ScatterNd, + OneHot, + BroadcastTo, + Dilation2D, MaxLayerCount = 128, // this count must bigger than the layer id of last layer ConvertTensor = 129, + ArgMin = 130, + LinSpace = 131, + PLUGIN = 256, //The Type load from plugin //Training Op Start from 257 Select = 257, @@ -146,6 +154,8 @@ enum OpType : int { PoolGrad, SoftmaxGrad, Conv2DBackPropFilter, + TrainableParam, + BatchNorm, Extra = 512, // quantization @@ -251,7 +261,9 @@ union OpParameter { Pool3D, Convolution3D, ELU, - DetectionPostProcessParam + DetectionPostProcessParam, + OneHotParam, + PadParam } table Op { @@ -276,7 +288,10 @@ enum ForwardType : byte { OPENGLES, VULKAN, } - +enum Usage : byte { + INFERENCE = 0, + TRAIN = 1 +} table Net { bizCode: string; extraTensorDescribe: [TensorDescribe]; @@ -287,6 +302,7 @@ table Net { sourceType: NetSource = CAFFE; tensorName: [string]; tensorNumber: int = 0; + usage:Usage = INFERENCE; // used to more compatibility in future } root_type Net; diff --git a/schema/default/TensorflowOp.fbs b/schema/default/TensorflowOp.fbs index 83c4bfa61..cc279ae01 100644 --- a/schema/default/TensorflowOp.fbs +++ b/schema/default/TensorflowOp.fbs @@ -20,6 +20,10 @@ enum BinaryOpOperation : byte { EQUAL = 15, LESS_EQUAL = 16, FLOORMOD = 17, + MOD = 19, + ATAN2 = 20, + LOGICALOR = 21, + NOTEQUAL = 22, } table BinaryOp { @@ -121,6 +125,18 @@ enum UnaryOpOperation : int { ATAN = 14, RECIPROCAL = 15, LOG1P = 16, + BNLL = 17, + ACOSH = 18, + SINH = 19, + ASINH = 20, + ATANH = 21, + SIGN = 22, + ROUND = 23, + COSH = 24, + ERF = 25, + ERFC = 26, + ERFINV = 27, + EXPM1 = 28, } table UnaryOp { @@ -245,3 +261,18 @@ table DetectionPostProcessParam{ // always size == 4 centerSizeEncoding:[float]; } + +table OneHotParam{ + dType:DataType=DT_FLOAT; + axis:int=-1; +} + +enum PadValueMode : byte{ + CONSTANT = 0, + REFLECT = 1, + SYMMETRIC = 2 +} + +table PadParam{ + mode: PadValueMode = CONSTANT; +} diff --git a/schema/generate.ps1 b/schema/generate.ps1 index 50c7feafc..1a90105ac 100644 --- a/schema/generate.ps1 +++ b/schema/generate.ps1 @@ -44,7 +44,7 @@ rm -force current\*.h # flatc all fbs pushd current echo "*** generating fbs under $DIR ***" -Get-ChildItem ..\$DIR\*.fbs | %{Invoke-Expression "..\$FLATC -c -b --gen-object-api $_"} +Get-ChildItem ..\$DIR\*.fbs | %{Invoke-Expression "..\$FLATC -c -b --gen-object-api --reflect-names $_"} popd # finish diff --git a/source/backend/arm82/Arm82Backend.cpp b/source/backend/arm82/Arm82Backend.cpp index a3e70f819..4e0c7eff5 100644 --- a/source/backend/arm82/Arm82Backend.cpp +++ b/source/backend/arm82/Arm82Backend.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Arm82Backend.hpp" +#include "backend/arm82/Arm82Backend.hpp" #include -#include "Arm82Convolution1x1.hpp" +#include "backend/arm82/Arm82Convolution1x1.hpp" #include "MNN_generated.h" namespace MNN { static const MNNForwardType gForwardType = MNN_FORWARD_USER_1; diff --git a/source/backend/arm82/Arm82Backend.hpp b/source/backend/arm82/Arm82Backend.hpp index 99df7fa84..df98a50a8 100644 --- a/source/backend/arm82/Arm82Backend.hpp +++ b/source/backend/arm82/Arm82Backend.hpp @@ -9,7 +9,7 @@ #ifndef Arm82Backend_hpp #define Arm82Backend_hpp -#include "Backend.hpp" +#include "core/Backend.hpp" namespace MNN { class Arm82Backend : public Backend { public: diff --git a/source/backend/arm82/Arm82Convolution1x1.cpp b/source/backend/arm82/Arm82Convolution1x1.cpp index 5db924cf7..11de7df5d 100644 --- a/source/backend/arm82/Arm82Convolution1x1.cpp +++ b/source/backend/arm82/Arm82Convolution1x1.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Arm82Convolution1x1.hpp" -#include "Arm82Backend.hpp" +#include "backend/arm82/Arm82Convolution1x1.hpp" +#include "backend/arm82/Arm82Backend.hpp" #include "MNN_generated.h" -#include "Macro.h" +#include "core/Macro.h" #define SRC_Z_UNIT 4 #define DST_Z_UNIT 8 #define DST_X_UNIT 24 diff --git a/source/backend/arm82/Arm82Convolution1x1.hpp b/source/backend/arm82/Arm82Convolution1x1.hpp index 12ab53e47..e8210cf4a 100644 --- a/source/backend/arm82/Arm82Convolution1x1.hpp +++ b/source/backend/arm82/Arm82Convolution1x1.hpp @@ -8,8 +8,8 @@ #ifndef Arm82Convolution1x1_hpp #define Arm82Convolution1x1_hpp -#include "CPUConvolution.hpp" -#include "Execution.hpp" +#include "backend/cpu/CPUConvolution.hpp" +#include "core/Execution.hpp" namespace MNN { class Arm82Convolution1x1 : public Execution { public: diff --git a/source/backend/arm82/CMakeLists.txt b/source/backend/arm82/CMakeLists.txt index f07e077df..af841ea12 100644 --- a/source/backend/arm82/CMakeLists.txt +++ b/source/backend/arm82/CMakeLists.txt @@ -1,42 +1,18 @@ -cmake_minimum_required(VERSION 2.8) -project(MNN_Arm82) -file(GLOB SRCS "*.hpp" "*.cpp") -include_directories("../../include/") -include_directories("../../source/core") -include_directories("../../source/backend/cpu") -set(CMAKE_C_STANDARD 99) -set(CMAKE_CXX_STANDARD 11) -enable_language(ASM) -if(SYSTEM.Android) - if(CMAKE_SYSTEM_PROCESSOR MATCHES "^armv7") - file(GLOB SRCS_ASM "asm/arm32/*") - add_definitions(-mfloat-abi=softfp -mfpu=neon) - add_definitions(-march=armv8.2a+fp16) - elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^aarch64") - add_definitions(-march=armv8.2a+fp16) - file(GLOB SRCS_ASM "asm/arm64/*") - endif() - add_definitions(-DMNN_BUILD_FOR_ANDROID) -else() - file(GLOB SRCS_ASM "asm/arm32/*" "asm/arm64/*") -endif() - - -if(SYSTEM.Android AND NOT MNN_BUILD_FOR_ANDROID_COMMAND) - set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${NATIVE_LIBRARY_OUTPUT}/${ANDROID_ABI}) -endif() -add_library( - MNN_Arm82 - SHARED - ${SRCS} - ${SRCS_ASM} -) -set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fomit-frame-pointer -fstrict-aliasing -ffunction-sections -fdata-sections -ffast-math") -set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility-inlines-hidden -fomit-frame-pointer -fstrict-aliasing -ffunction-sections -fdata-sections -ffast-math -fno-rtti -fno-exceptions") -add_definitions(-fvisibility=hidden) -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility-inlines-hidden") - -target_link_libraries(MNN_Arm82 - MNN +if(MNN_ARM82) + if(CMAKE_SYSTEM_PROCESSOR MATCHES "^aarch64" OR IOS_ARCH STREQUAL "arm64") + file(GLOB MNN_ARM82_SRCS "${CMAKE_CURRENT_LIST_DIR}/*.cpp") + file(GLOB MNN_ARM82_SRCS_ASM "${CMAKE_CURRENT_LIST_DIR}/asm/arm64/*") + add_definitions(-march=armv8.2a+fp16) + add_library( + MNNARM82 + OBJECT + ${MNN_ARM82_SRCS} + ${MNN_ARM82_SRCS_ASM} ) - + target_include_directories(MNNARM82 PRIVATE ${CMAKE_CURRENT_LIST_DIR}/asm/) + list(APPEND MNN_OBJECTS_TO_LINK $) + list(APPEND MNN_TARGETS MNNARM82) + SET(MNN_OBJECTS_TO_LINK "${MNN_OBJECTS_TO_LINK}" PARENT_SCOPE) + SET(MNN_TARGETS "${MNN_TARGETS}" PARENT_SCOPE) + endif() +endif() diff --git a/source/backend/cpu/CPUArgMax.cpp b/source/backend/cpu/CPUArgMax.cpp index 1eabff9be..46e48b9cc 100644 --- a/source/backend/cpu/CPUArgMax.cpp +++ b/source/backend/cpu/CPUArgMax.cpp @@ -6,16 +6,16 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUArgMax.hpp" +#include "backend/cpu/CPUArgMax.hpp" #include -#include "CPUBackend.hpp" -#include "CommonOptFunction.h" -#include "TensorUtils.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/TensorUtils.hpp" namespace MNN { -CPUArgMax::CPUArgMax(Backend *backend, int topk, int outMaxVal, int softmaxThreshold, int axis) - : Execution(backend), mTopk(topk), mOutMaxVal(outMaxVal), mSoftmaxThreshold(softmaxThreshold), mAxis(axis) { +CPUArgMax::CPUArgMax(Backend *backend, ArgMinOrMax mode, int topk, int outMaxVal, int softmaxThreshold, int axis) + : Execution(backend), mMode(mode), mTopk(topk), mOutMaxVal(outMaxVal), mSoftmaxThreshold(softmaxThreshold), mAxis(axis) { // nothing to do } @@ -25,7 +25,7 @@ ErrorCode CPUArgMax::onResize(const std::vector &inputs, const std::ve auto output = outputs[0]; auto inputDimensionFromat = TensorUtils::getDescribe(input)->dimensionFormat; - mFromNHWC = inputDimensionFromat == MNN_DATA_FORMAT_NHWC; + mFromNHWC = inputDimensionFromat != MNN_DATA_FORMAT_NC4HW4; if (!mFromNHWC) { // if the input format is NC4HW4, convert to be NCHW from NC4HW4 firstly @@ -44,7 +44,7 @@ ErrorCode CPUArgMax::onResize(const std::vector &inputs, const std::ve mNum = 1; mDim = 1; mKeyExtent = 1; - + if(mAxis < 0){ mAxis = mAxis + input->dimensions(); } @@ -95,24 +95,50 @@ ErrorCode CPUArgMax::onExecute(const std::vector &inputs, const std::v }; if (mFromNHWC) { - auto srcOrigin = input->host(); - auto dstOrigin = output->host(); - for (int i = 0; i < mNum; ++i) { - auto iptr = srcOrigin + i * mDim * mKeyExtent; - auto optr = dstOrigin + i * mKeyExtent; - - int index = 0; - float maxValue = -FLT_MAX; - for (int j = 0; j < mDim; ++j) { - auto val = iptr[j * mKeyExtent]; - if (val > maxValue) { - maxValue = val; - index = j; + if (mMode == ARGMAX) { + auto srcOrigin = input->host(); + auto dstOrigin = output->host(); + for (int i = 0; i < mNum; ++i) { + auto iptr = srcOrigin + i * mDim * mKeyExtent; + auto optr = dstOrigin + i * mKeyExtent; + + for(int k = 0; k < mKeyExtent; ++k){ + int index = 0; + float maxValue = -FLT_MAX; + for (int j = 0; j < mDim; ++j) { + auto val = iptr[k + j * mKeyExtent]; + if (val > maxValue) { + maxValue = val; + index = j; + } + } + optr[k] = index; + } + } + } else { + auto srcOrigin = input->host(); + auto dstOrigin = output->host(); + for (int i = 0; i < mNum; ++i) { + auto iptr = srcOrigin + i * mDim * mKeyExtent; + auto optr = dstOrigin + i * mKeyExtent; + + for(int k = 0; k < mKeyExtent; ++k){ + int index = 0; + float minValue = FLT_MAX; + for (int j = 0; j < mDim; ++j) { + auto val = iptr[k + j * mKeyExtent]; + if (val < minValue) { + minValue = val; + index = j; + } + } + optr[k] = index; } } - optr[0] = index; } + } else { + MNN_ASSERT(mMode == ARGMAX); // caffe does not have argmin layer // Legacy code for CAFFE backend()->onCopyBuffer(input, &mInputBuffer); @@ -177,8 +203,15 @@ class CPUArgMaxCreator : public CPUBackend::Creator { virtual Execution *onCreate(const std::vector &inputs, const std::vector &outputs, const MNN::Op *op, Backend *backend) const { auto argMax = op->main_as_ArgMax(); - return new CPUArgMax(backend, argMax->topK(), argMax->outMaxVal(), argMax->softmaxThreshold(), argMax->axis()); + if (op->type() == OpType_ArgMin) { + return new CPUArgMax(backend, CPUArgMax::ArgMinOrMax::ARGMIN, + argMax->topK(), argMax->outMaxVal(), argMax->softmaxThreshold(), argMax->axis()); + } else { + return new CPUArgMax(backend, CPUArgMax::ArgMinOrMax::ARGMAX, + argMax->topK(), argMax->outMaxVal(), argMax->softmaxThreshold(), argMax->axis()); + } } }; REGISTER_CPU_OP_CREATOR(CPUArgMaxCreator, OpType_ArgMax); +REGISTER_CPU_OP_CREATOR(CPUArgMaxCreator, OpType_ArgMin); } // namespace MNN diff --git a/source/backend/cpu/CPUArgMax.hpp b/source/backend/cpu/CPUArgMax.hpp index 1482fff42..0314091bd 100644 --- a/source/backend/cpu/CPUArgMax.hpp +++ b/source/backend/cpu/CPUArgMax.hpp @@ -9,13 +9,17 @@ #ifndef CPUArgMax_hpp #define CPUArgMax_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { class CPUArgMax : public Execution { public: - CPUArgMax(Backend *backend, int topk, int outMaxVal, int softmaxThreshold, int axis); + enum ArgMinOrMax { + ARGMIN, + ARGMAX + }; + CPUArgMax(Backend *backend, ArgMinOrMax mode, int topk, int outMaxVal, int softmaxThreshold, int axis); virtual ~CPUArgMax() = default; virtual ErrorCode onResize(const std::vector &inputs, const std::vector &outputs) override; virtual ErrorCode onExecute(const std::vector &inputs, const std::vector &outputs) override; @@ -31,6 +35,7 @@ class CPUArgMax : public Execution { int mDim; int mKeyExtent; bool mFromNHWC; + ArgMinOrMax mMode; }; } // namespace MNN diff --git a/source/backend/cpu/CPUAsString.cpp b/source/backend/cpu/CPUAsString.cpp index 76cae659f..1b8119e8f 100644 --- a/source/backend/cpu/CPUAsString.cpp +++ b/source/backend/cpu/CPUAsString.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUAsString.hpp" +#include "backend/cpu/CPUAsString.hpp" #include #include -#include "Macro.h" -#include "TensorUtils.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { #define INT_CAPACITY 10 diff --git a/source/backend/cpu/CPUAsString.hpp b/source/backend/cpu/CPUAsString.hpp index 9f0f2d888..c61402ba1 100644 --- a/source/backend/cpu/CPUAsString.hpp +++ b/source/backend/cpu/CPUAsString.hpp @@ -9,7 +9,7 @@ #ifndef CPUAsString_hpp #define CPUAsString_hpp -#include "CPUBackend.hpp" +#include "backend/cpu/CPUBackend.hpp" namespace MNN { class CPUAsStringCreator : public CPUBackend::Creator { diff --git a/source/backend/cpu/CPUBackend.cpp b/source/backend/cpu/CPUBackend.cpp index 1eefc38c3..b000dad14 100644 --- a/source/backend/cpu/CPUBackend.cpp +++ b/source/backend/cpu/CPUBackend.cpp @@ -6,20 +6,20 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUBackend.hpp" +#include "backend/cpu/CPUBackend.hpp" #include #include -#include "BufferAllocator.hpp" -#include "CPUConcat.hpp" -#include "CPUTensorConvert.hpp" -#include "CommonOptFunction.h" -#include "TensorUtils.hpp" -#include "ThreadPool.hpp" -#include "SizeComputer.hpp" +#include "core/BufferAllocator.hpp" +#include "backend/cpu/CPUConcat.hpp" +#include "backend/cpu/CPUTensorConvert.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/TensorUtils.hpp" +#include "backend/cpu/ThreadPool.hpp" +#include "core/SizeComputer.hpp" #ifdef _OPENMP #include #endif // _OPENMP -#include "CPURuntime.hpp" +#include "backend/cpu/CPURuntime.hpp" #define MAX_THREAD_NUMBER 32 @@ -117,6 +117,9 @@ void CPUBackend::onExecuteEnd() const { } bool CPUBackend::onAcquireBuffer(const MNN::Tensor* nativeTensorConst, StorageType storageType) { + if (nativeTensorConst == nullptr) { + return false; + } auto nativeTensor = (Tensor*)nativeTensorConst; auto& buffer = nativeTensor->buffer(); @@ -154,6 +157,9 @@ bool CPUBackend::onAcquireBuffer(const MNN::Tensor* nativeTensorConst, StorageTy } bool CPUBackend::onReleaseBuffer(const MNN::Tensor* nativeTensor, StorageType storageType) { + if (nativeTensor == nullptr) { + return false; + } if (nullptr == nativeTensor->buffer().host) { return false; } @@ -279,7 +285,10 @@ void CPUBackend::onCopyBuffer(const Tensor* srcTensor, const Tensor* dstTensor) return; } - CPUTensorConverter::convert(srcTensor, dstTensor); + auto code = CPUTensorConverter::convert(srcTensor, dstTensor); + if (NO_ERROR != code) { + MNN_ERROR("Error in CPUBackend::onCopyBuffer\n"); + } } struct CPUBackendCreator : BackendCreator { diff --git a/source/backend/cpu/CPUBackend.hpp b/source/backend/cpu/CPUBackend.hpp index 8967eecdd..3ce6b3866 100644 --- a/source/backend/cpu/CPUBackend.hpp +++ b/source/backend/cpu/CPUBackend.hpp @@ -12,8 +12,8 @@ #include #include #include -#include "Backend.hpp" -#include "Execution.hpp" +#include "core/Backend.hpp" +#include "core/Execution.hpp" #include "MNN_generated.h" namespace MNN { @@ -38,7 +38,6 @@ class CPUBackend final : public Backend { const MNN::Op* op) override; virtual void onExecuteBegin() const override; virtual void onExecuteEnd() const override; - public: class Creator { public: diff --git a/source/backend/cpu/CPUBatchMatMul.cpp b/source/backend/cpu/CPUBatchMatMul.cpp index dfdce28a7..f8d24eb69 100644 --- a/source/backend/cpu/CPUBatchMatMul.cpp +++ b/source/backend/cpu/CPUBatchMatMul.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUBatchMatMul.hpp" -#include "CPUBackend.hpp" -#include "Matrix.hpp" +#include "backend/cpu/CPUBatchMatMul.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "math/Matrix.hpp" namespace MNN { @@ -29,7 +29,7 @@ ErrorCode CPUBatchMatMul::onResize(const std::vector& inputs, const std auto res = backend()->onAcquireBuffer(mMatrixA.get(), Backend::DYNAMIC); res = res && backend()->onAcquireBuffer(mMatrixB.get(), Backend::DYNAMIC); res = res && backend()->onAcquireBuffer(mMatrixC.get(), Backend::DYNAMIC); - + if (!res) { return OUT_OF_MEMORY; } diff --git a/source/backend/cpu/CPUBatchMatMul.hpp b/source/backend/cpu/CPUBatchMatMul.hpp index f72030ea2..2e8922e95 100644 --- a/source/backend/cpu/CPUBatchMatMul.hpp +++ b/source/backend/cpu/CPUBatchMatMul.hpp @@ -9,7 +9,7 @@ #ifndef CPUBatchMatMul_hpp #define CPUBatchMatMul_hpp -#include "CPUMatMul.hpp" +#include "backend/cpu/CPUMatMul.hpp" namespace MNN { diff --git a/source/backend/cpu/CPUBatchToSpaceND.cpp b/source/backend/cpu/CPUBatchToSpaceND.cpp index 234067627..92cebe303 100644 --- a/source/backend/cpu/CPUBatchToSpaceND.cpp +++ b/source/backend/cpu/CPUBatchToSpaceND.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUBatchToSpaceND.hpp" -#include "CPUBackend.hpp" -#include "CommonOptFunction.h" -#include "Macro.h" +#include "backend/cpu/CPUBatchToSpaceND.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Macro.h" namespace MNN { diff --git a/source/backend/cpu/CPUBatchToSpaceND.hpp b/source/backend/cpu/CPUBatchToSpaceND.hpp index 0356b4424..fdfa58a67 100644 --- a/source/backend/cpu/CPUBatchToSpaceND.hpp +++ b/source/backend/cpu/CPUBatchToSpaceND.hpp @@ -10,7 +10,7 @@ #define CPUBatchToSpaceND_hpp #include -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { class CPUBatchToSpaceND : public Execution { diff --git a/source/backend/cpu/CPUBinary.cpp b/source/backend/cpu/CPUBinary.cpp index ba2a129c6..4f9e0e5e6 100644 --- a/source/backend/cpu/CPUBinary.cpp +++ b/source/backend/cpu/CPUBinary.cpp @@ -6,13 +6,13 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUBinary.hpp" +#include "backend/cpu/CPUBinary.hpp" #include #include -#include "CPUBackend.hpp" -#include "CommonOptFunction.h" -#include "Macro.h" -#include "CPUEltwise.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Macro.h" +#include "backend/cpu/CPUEltwise.hpp" namespace MNN { template @@ -32,7 +32,7 @@ ErrorCode CPUBinary::onResize(const std::vector& inputs, const std:: mEltWise.reset(new CPUEltwise(backend(), EltwiseType_SUM, {})); break; case BinaryOpOperation_MAXIMUM: - mEltWise.reset(new CPUEltwise(backend(), EltwiseType_MAX, {})); + mEltWise.reset(new CPUEltwise(backend(), EltwiseType_MAXIMUM, {})); break; case BinaryOpOperation_SUB: mEltWise.reset(new CPUEltwise(backend(), EltwiseType_SUB, {})); @@ -174,6 +174,13 @@ struct BinaryRealDiv : std::binary_function<_Arg1, _Arg2, _ErrorCode> { } }; +template +struct BinaryMod : std::binary_function<_Arg1, _Arg2, _ErrorCode> { + _ErrorCode operator()(const _Arg1& x, const _Arg2& y) const { + return x - x / y; + } +}; + template struct BinaryGreater : std::binary_function<_Arg1, _Arg2, _ErrorCode> { _ErrorCode operator()(const _Arg1& x, const _Arg2& y) const { @@ -232,6 +239,27 @@ struct BinaryPow : std::binary_function<_Arg1, _Arg2, _ErrorCode> { } }; +template +struct BinaryAtan2 : std::binary_function<_Arg1, _Arg2, _ErrorCode> { + _ErrorCode operator()(const _Arg1& x, const _Arg2& y) const { + return atan(x / y); + } +}; + +template +struct BinaryLogicalOr : std::binary_function<_Arg1, _Arg2, _ErrorCode> { + _ErrorCode operator()(const _Arg1& x, const _Arg2& y) const { + return (_ErrorCode)((x || y) ? 1 : 0); + } +}; + +template +struct BinaryNotEqual : std::binary_function<_Arg1, _Arg2, _ErrorCode> { + _ErrorCode operator()(const _Arg1& x, const _Arg2& y) const { + return (_ErrorCode)((x != y) ? 1 : 0); + } +}; + template ErrorCode CPUBinary::onExecute(const std::vector& inputs, const std::vector& outputs) { if (nullptr != mEltWise.get()) { @@ -288,6 +316,18 @@ ErrorCode CPUBinary::onExecute(const std::vector& inputs, const std: case BinaryOpOperation_SquaredDifference: _binaryOp>(input, input1, output); break; + case BinaryOpOperation_ATAN2: + _binaryOp>(input, input1, output); + break; + case BinaryOpOperation_LOGICALOR: + _binaryOp>(input, input1, output); + break; + case BinaryOpOperation_NOTEQUAL: + _binaryOp>(input, input1, output); + break; + case BinaryOpOperation_MOD: + _binaryOp>(input, input1, output); + break; default: MNN_ASSERT(false); break; diff --git a/source/backend/cpu/CPUBinary.hpp b/source/backend/cpu/CPUBinary.hpp index d33a67d54..924362167 100644 --- a/source/backend/cpu/CPUBinary.hpp +++ b/source/backend/cpu/CPUBinary.hpp @@ -9,7 +9,7 @@ #ifndef CPUBinary_hpp #define CPUBinary_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { diff --git a/source/backend/cpu/CPUBroadcastTo.cpp b/source/backend/cpu/CPUBroadcastTo.cpp new file mode 100644 index 000000000..58ac7dc09 --- /dev/null +++ b/source/backend/cpu/CPUBroadcastTo.cpp @@ -0,0 +1,77 @@ +// +// CPUBroadcastTo.cpp +// MNN +// +// Created by MNN on 2019/12/2. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "backend/cpu/CPUBroadcastTo.hpp" +#include "backend/cpu/CPUBackend.hpp" + +namespace MNN { + +static void bcastImpl(int curDim, int* flag, const std::vector& dimElements, const int bytes, const Tensor* input, + Tensor* output) { + if (curDim < 0) { + return; + } + int bcastNum = output->length(curDim) / input->length(curDim); + if (bcastNum == 1) { + bcastImpl(curDim - 1, flag, dimElements, bytes, input, output); + return; + } + + const auto srcStart = input->host(); + auto dstStart = output->host(); + + // flag == 0, represent the first broadcast + for (int i = 0; i < dimElements[curDim]; ++i) { + int k = 0; + if (*flag) { + k = 1; + } + auto dstCurStart = dstStart + i * output->length(curDim) * output->stride(curDim) * bytes; + + for (; k < bcastNum; ++k) { + auto copyedPtr = dstCurStart + k * output->stride(curDim) * bytes; + if (*flag == 0) { + memcpy(copyedPtr, srcStart + i * input->stride(curDim) * bytes, input->stride(curDim) * bytes); + } else { + memcpy(copyedPtr, dstCurStart, output->stride(curDim) * bytes); + } + } + } + *flag = 1; + + bcastImpl(curDim - 1, flag, dimElements, bytes, input, output); +} + +ErrorCode CPUBroadcastTo::onExecute(const std::vector& inputs, const std::vector& outputs) { + auto input = inputs[0]; + auto output = outputs[0]; + const int dimension = input->dimensions(); + + auto bytes = input->getType().bytes(); + + std::vector dimElements(dimension, 1); + for (int i = 1; i < dimension; ++i) { + dimElements[i] = dimElements[i - 1] * input->length(i - 1); + } + + int flag = 0; + bcastImpl(dimension - 1, &flag, dimElements, bytes, input, output); + return NO_ERROR; +} + +class CPUBroadcastToCreator : public CPUBackend::Creator { +public: + virtual Execution* onCreate(const std::vector& inputs, const std::vector& outputs, + const MNN::Op* op, Backend* backend) const override { + return new CPUBroadcastTo(backend); + } +}; + +REGISTER_CPU_OP_CREATOR(CPUBroadcastToCreator, OpType_BroadcastTo); + +} // namespace MNN diff --git a/source/backend/cpu/CPUBroadcastTo.hpp b/source/backend/cpu/CPUBroadcastTo.hpp new file mode 100644 index 000000000..cda3ff54b --- /dev/null +++ b/source/backend/cpu/CPUBroadcastTo.hpp @@ -0,0 +1,27 @@ +// +// CPUBroadcastTo.hpp +// MNN +// +// Created by MNN on 2019/12/2. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef CPUBroadcastTo_hpp +#define CPUBroadcastTo_hpp + + +#include "core/Execution.hpp" + +namespace MNN { + +class CPUBroadcastTo : public Execution{ +public: + CPUBroadcastTo(Backend *b):Execution(b){ + } + virtual ~CPUBroadcastTo() = default; + virtual ErrorCode onExecute(const std::vector &inputs, const std::vector &outputs) override; +}; + +} // namespace MNN + +#endif /* CPUBroadcastTo_hpp */ diff --git a/source/backend/cpu/CPUCast.cpp b/source/backend/cpu/CPUCast.cpp index 19824df6d..a8b9e7d49 100644 --- a/source/backend/cpu/CPUCast.cpp +++ b/source/backend/cpu/CPUCast.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUCast.hpp" -#include "Macro.h" +#include "backend/cpu/CPUCast.hpp" +#include "core/Macro.h" namespace MNN { @@ -114,6 +114,9 @@ Execution *CPUCastCreator::onCreate(const std::vector &inputs, const s if (dstT == MNN::DataType_DT_FLOAT && halide_type_of() == inputDataType) { return new CastDataType(backend); } + if (dstT == MNN::DataType_DT_INT32 && halide_type_of() == inputDataType) { + return new CastDataType(backend); + } MNN_PRINT("Don't support cast form %d to %d\n", cast->srcT(), cast->dstT()); return nullptr; } diff --git a/source/backend/cpu/CPUCast.hpp b/source/backend/cpu/CPUCast.hpp index 7f873e810..92b7b717d 100644 --- a/source/backend/cpu/CPUCast.hpp +++ b/source/backend/cpu/CPUCast.hpp @@ -9,7 +9,7 @@ #ifndef CPUCast_hpp #define CPUCast_hpp -#include "CPUBackend.hpp" +#include "backend/cpu/CPUBackend.hpp" namespace MNN { class CPUCastCreator : public CPUBackend::Creator { diff --git a/source/backend/cpu/CPUConcat.cpp b/source/backend/cpu/CPUConcat.cpp index d04cc8c8c..d9b29c859 100644 --- a/source/backend/cpu/CPUConcat.cpp +++ b/source/backend/cpu/CPUConcat.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUConcat.hpp" -#include "CPUBackend.hpp" -#include "CommonOptFunction.h" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/cpu/CPUConcat.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" using namespace std; namespace MNN { diff --git a/source/backend/cpu/CPUConcat.hpp b/source/backend/cpu/CPUConcat.hpp index 23a66814a..8b7c5876f 100644 --- a/source/backend/cpu/CPUConcat.hpp +++ b/source/backend/cpu/CPUConcat.hpp @@ -9,7 +9,7 @@ #ifndef CPUConcat_hpp #define CPUConcat_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { class CPUConcat : public Execution { diff --git a/source/backend/cpu/CPUConst.cpp b/source/backend/cpu/CPUConst.cpp index 8b9cacbbc..025cc0eaf 100644 --- a/source/backend/cpu/CPUConst.cpp +++ b/source/backend/cpu/CPUConst.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUConst.hpp" -#include "CPUBackend.hpp" -#include "CommonOptFunction.h" -#include "Macro.h" +#include "backend/cpu/CPUConst.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Macro.h" namespace MNN { // get data pointer from blob @@ -58,5 +58,6 @@ class CPUConstCreator : public CPUBackend::Creator { }; REGISTER_CPU_OP_CREATOR(CPUConstCreator, OpType_Const); +REGISTER_CPU_OP_CREATOR(CPUConstCreator, OpType_TrainableParam); } // namespace MNN diff --git a/source/backend/cpu/CPUConst.hpp b/source/backend/cpu/CPUConst.hpp index 247f7f474..28ddd1366 100644 --- a/source/backend/cpu/CPUConst.hpp +++ b/source/backend/cpu/CPUConst.hpp @@ -9,7 +9,7 @@ #ifndef CPUConst_hpp #define CPUConst_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { class CPUConst : public Execution { diff --git a/source/backend/cpu/CPUConv2DBackPropFilter.cpp b/source/backend/cpu/CPUConv2DBackPropFilter.cpp index e2898102d..bd257d80d 100644 --- a/source/backend/cpu/CPUConv2DBackPropFilter.cpp +++ b/source/backend/cpu/CPUConv2DBackPropFilter.cpp @@ -6,13 +6,13 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUConv2DBackPropFilter.hpp" -#include "CPUMatMul.hpp" -#include "Concurrency.h" -#include "Macro.h" -#include "Vec4.hpp" -#include "compute/CommonOptFunction.h" -#include "BufferAllocator.hpp" +#include "backend/cpu/CPUConv2DBackPropFilter.hpp" +#include "backend/cpu/CPUMatMul.hpp" +#include "core/Concurrency.h" +#include "core/Macro.h" +#include "math/Vec4.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/BufferAllocator.hpp" using namespace MNN::Math; namespace MNN { CPUConv2DBackPropFilter::CPUConv2DBackPropFilter(const Convolution2DCommon *convOp, Backend *bn) @@ -90,7 +90,7 @@ ErrorCode CPUConv2DBackPropFilter::onResize(const std::vector &inputs, continue; } auto dst = dstX + kx + ky*kw; - auto src = srcBatch + (sy * ih + sx) * ic; + auto src = srcBatch + (sy * iw + sx) * ic; for (int sz=0; sz namespace MNN { class CPUConv2DBackPropFilter : public CPUConvolution { diff --git a/source/backend/cpu/CPUConvInt8.cpp b/source/backend/cpu/CPUConvInt8.cpp index 12509c6c0..115ce0039 100644 --- a/source/backend/cpu/CPUConvInt8.cpp +++ b/source/backend/cpu/CPUConvInt8.cpp @@ -6,12 +6,12 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUConvInt8.hpp" -#include "CPUBackend.hpp" -#include "CommonOptFunction.h" -#include "Concurrency.h" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/cpu/CPUConvInt8.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Concurrency.h" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" #include #define UNIT 4 @@ -279,7 +279,7 @@ ErrorCode CPUConvInt8::onResize(const std::vector& inputs, const std::v CPUConvolution::onResize(inputs, outputs); auto input = inputs[0]; auto output = outputs[0]; - + mIm2ColParamter.padX = mPadX; mIm2ColParamter.padY = mPadY; diff --git a/source/backend/cpu/CPUConvInt8.hpp b/source/backend/cpu/CPUConvInt8.hpp index 0ea77c432..b9aeea456 100644 --- a/source/backend/cpu/CPUConvInt8.hpp +++ b/source/backend/cpu/CPUConvInt8.hpp @@ -9,7 +9,7 @@ #ifndef CPUConvInt8_hpp #define CPUConvInt8_hpp -#include "CPUConvolution.hpp" +#include "backend/cpu/CPUConvolution.hpp" namespace MNN { diff --git a/source/backend/cpu/CPUConvolution.cpp b/source/backend/cpu/CPUConvolution.cpp index 0d51303eb..b71715a3c 100644 --- a/source/backend/cpu/CPUConvolution.cpp +++ b/source/backend/cpu/CPUConvolution.cpp @@ -6,14 +6,14 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUConvolution.hpp" +#include "backend/cpu/CPUConvolution.hpp" #include -#include "CPUBackend.hpp" -#include "CommonOptFunction.h" -#include "Macro.h" -#include "compute/ConvolutionFloatFactory.h" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Macro.h" +#include "backend/cpu/compute/ConvolutionFloatFactory.h" //#define MNN_OPEN_TIME_TRACE -#include "AutoTime.hpp" +#include namespace MNN { diff --git a/source/backend/cpu/CPUConvolution.hpp b/source/backend/cpu/CPUConvolution.hpp index eded21136..5882da08d 100644 --- a/source/backend/cpu/CPUConvolution.hpp +++ b/source/backend/cpu/CPUConvolution.hpp @@ -9,7 +9,7 @@ #ifndef CPUConvolution_hpp #define CPUConvolution_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" #include "MNN_generated.h" namespace MNN { diff --git a/source/backend/cpu/CPUConvolution3D.cpp b/source/backend/cpu/CPUConvolution3D.cpp index 0308c71dc..b553b38f0 100644 --- a/source/backend/cpu/CPUConvolution3D.cpp +++ b/source/backend/cpu/CPUConvolution3D.cpp @@ -7,18 +7,18 @@ // #include -#include "CPUConvolution3D.hpp" -#include "ConvolutionWinograd.hpp" -#include "ConvolutionWinograd3D.hpp" -#include "Convolution1x1Strassen.hpp" -#include "ConvolutionTiledExecutor.hpp" -#include "Convolution3D3x3.hpp" -#include "CommonOptFunction.h" -#include "Concurrency.h" -#include "ConvOpt.h" -#include "CPUBackend.hpp" -#include "compute/ConvolutionFloatFactory.h" -#include "Vec4.hpp" +#include "backend/cpu/CPUConvolution3D.hpp" +#include "backend/cpu/compute/ConvolutionWinograd.hpp" +#include "backend/cpu/compute/ConvolutionWinograd3D.hpp" +#include "backend/cpu/compute/Convolution1x1Strassen.hpp" +#include "backend/cpu/compute/ConvolutionTiledExecutor.hpp" +#include "backend/cpu/compute/Convolution3D3x3.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Concurrency.h" +#include "backend/cpu/compute/ConvOpt.h" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/compute/ConvolutionFloatFactory.h" +#include "math/Vec4.hpp" #define MIN_CON_PLANESIZE 256 @@ -27,11 +27,7 @@ namespace MNN { // when C4 == true, NC4DHW4 --> DNC4HW4 // when C4 == false, NCDHW --> DNCHW, used by kernel transform. void CPUConvolution3D::convertToDepthMajor(float* dst, const float* src, uint32_t planeNumber, - uint32_t depth, uint32_t outsideNumber, bool C4) { - if (C4) { - outsideNumber = UP_DIV(outsideNumber, 4); - planeNumber *= 4; - } + uint32_t depth, uint32_t outsideNumber) { if (depth == 1 && planeNumber == 1) { memcpy(dst, src, outsideNumber * sizeof(float)); return; @@ -47,8 +43,6 @@ namespace MNN { // outsideNumber = N*C, planeNumber = H*W void CPUConvolution3D::convertDNC4HW4toNC4DHW4(float* dst, const float* src, uint32_t planeNumber, uint32_t depth, uint32_t outsideNumber, bool add) { - outsideNumber = UP_DIV(outsideNumber, 4); - planeNumber *= 4; const int threadNumber = ((CPUBackend*)backend())->threadNumber(); for (uint32_t o = 0; o < outsideNumber; ++o) { auto dstData = dst + o * depth * planeNumber; @@ -74,7 +68,7 @@ namespace MNN { } } } - + static Convolution2DCommon* createConvolution2DCommon(flatbuffers::FlatBufferBuilder& fbb, int kernelY, int kernelX, PadMode padMode, int padY, int padX, int inputChannel, int outputChannel) { auto builder = Convolution2DCommonBuilder(fbb); @@ -88,7 +82,7 @@ namespace MNN { auto offset = builder.Finish(); return reinterpret_cast(fbb.GetCurrentBufferPointer() + fbb.GetSize() - offset.o); } - + CPUConvolution3D::CPUConvolution3D(const std::vector &inputs, const std::vector &outputs, const MNN::Op *op, Backend *b) : MNN::Execution(b) { auto convOp = op->main_as_Convolution3D(); @@ -113,7 +107,7 @@ namespace MNN { mInputCount = mCommon->inputCount(); mOutputCount = mCommon->outputCount(); mPostFunction = getPostFunction(mCommon); - + int kernelDepth = mKernels[0]; mWeights.reset(Tensor::createDevice({kernelDepth, (int)convOp->weight()->size() / kernelDepth})); mBias.reset(Tensor::createDevice({ALIGN_UP4(mOutputCount)})); @@ -122,20 +116,20 @@ namespace MNN { if (!valid) { return; } - convertToDepthMajor(mWeights->host(), convOp->weight()->data(), mKernels[1] * mKernels[2], kernelDepth, mInputCount * mOutputCount, false); + convertToDepthMajor(mWeights->host(), convOp->weight()->data(), mKernels[1] * mKernels[2], kernelDepth, mInputCount * mOutputCount); memset(mBias->host(), 0, mBias->size()); memcpy(mBias->host(), convOp->bias()->data(), convOp->bias()->size() * sizeof(float)); } - + CPUConvolution3D::~CPUConvolution3D() { backend()->onReleaseBuffer(mWeights.get(), Backend::STATIC); backend()->onReleaseBuffer(mBias.get(), Backend::STATIC); } - + ErrorCode CPUConvolution3D::onResize(const std::vector &inputs, const std::vector &outputs) { auto input = inputs[0]; auto output = outputs[0]; - + if (mPadMode == PadMode_SAME) { mPads.clear(); for (int i = 0; i < 3; ++i) { @@ -143,17 +137,17 @@ namespace MNN { mPads.push_back((inputNeeded - input->length(i + 2)) / 2); } } - + const int batch = input->length(0), inputChannel = input->length(1), outputChannel = output->length(1); const int inputDepth = input->length(2), inputHeight = input->length(3), inputWidth = input->length(4); const int outputDepth = output->length(2), outputHeight = output->length(3), outputWidth = output->length(4); const int depthPad = mPads[0], kernelDepth = mKernels[0], kernelHeight = mKernels[1], kernelWidth = mKernels[2]; auto cpuBackend = (CPUBackend*)backend(); - + mBreakDown = true; mSubInputTensors.clear(); mSubExecution.clear(); - + do { bool useWinograd = ConvolutionWinograd3D::canUseWinograd(mCommon) || cpuBackend->memoryMode() != BackendConfig::Memory_Low; if (!useWinograd) { @@ -174,10 +168,10 @@ namespace MNN { mBreakDown = false; return NO_ERROR; } while(0); - - const bool crossDepth = (kernelDepth != 1 || depthPad != 0 || mPads[1] != 0); - - if (!crossDepth) { + + mCrossDepth = (kernelDepth != 1 || kernelHeight != 1 || depthPad != 0 || mPads[1] != 0); + + if (!mCrossDepth) { mSubInputTensors.emplace_back(Tensor::create({batch, inputChannel, inputDepth * inputHeight, inputWidth}, (void*)(input->host()), Tensor::CAFFE_C4)); mSubOutputTensor.reset(Tensor::create({batch, outputChannel, outputDepth * outputHeight, outputWidth}, @@ -193,11 +187,11 @@ namespace MNN { } const float* data = mInputStorage->host(); for (int d = 0; d < kernelDepth; ++d) { - mSubInputTensors.emplace_back(Tensor::create({inputDepth * batch, inputChannel, inputHeight, inputWidth}, (void*)data, Tensor::CAFFE_C4)); + mSubInputTensors.emplace_back(Tensor::create({outputDepth * batch, inputChannel, inputHeight, inputWidth}, (void*)data, Tensor::CAFFE_C4)); data += mInputStorage->stride(0); } } - + { std::shared_ptr zerosLikeBias(Tensor::createDevice({mOutputCount})); bool valid = backend()->onAcquireBuffer(zerosLikeBias.get(), Backend::DYNAMIC); @@ -207,10 +201,10 @@ namespace MNN { memset(zerosLikeBias->host(), 0, mOutputCount * sizeof(float)); for (int d = 0; d < kernelDepth; ++d) { flatbuffers::FlatBufferBuilder fbb; - auto common = createConvolution2DCommon(fbb, mKernels[1], mKernels[2], mPadMode, mPads[1], mPads[2], inputChannel, outputChannel); + auto common = createConvolution2DCommon(fbb, kernelHeight, kernelWidth, mPadMode, mPads[1], mPads[2], inputChannel, outputChannel); auto originWeightSize = mWeights->stride(0), biasSize = mOutputCount; auto originWeight = mWeights->host() + d * originWeightSize, bias = zerosLikeBias->host(); - + Execution* subExec = nullptr; if (common->kernelX() == 1 && common->kernelY() == 1) { subExec = new Convolution1x1Strassen(common, backend(), originWeight, originWeightSize, bias, biasSize); @@ -218,56 +212,57 @@ namespace MNN { subExec = new ConvolutionTiledExecutor(common, backend(), originWeight, originWeightSize, bias, biasSize); } mSubExecution.emplace_back(subExec); - } - backend()->onReleaseBuffer(zerosLikeBias.get(), Backend::DYNAMIC); - for (int d = 0; d < kernelDepth; ++d) { mSubExecution[d]->onResize({mSubInputTensors[d].get()}, {mSubOutputTensor.get()}); } + backend()->onReleaseBuffer(zerosLikeBias.get(), Backend::DYNAMIC); } - - if (crossDepth) { + + if (mCrossDepth) { backend()->onReleaseBuffer(mInputStorage.get(), Backend::DYNAMIC); backend()->onReleaseBuffer(mSubOutputTensor.get(), Backend::DYNAMIC); } return NO_ERROR; } - + ErrorCode CPUConvolution3D::onExecute(const std::vector &inputs, const std::vector &outputs) { if (!mBreakDown) { auto code = mSubExecution[0]->onExecute(inputs, outputs); return code; } - + auto input = inputs[0]; auto output = outputs[0]; const int batch = input->length(0), inputChannel = input->length(1), outputChannel = output->length(1); const int inputDepth = input->length(2), inputHeight = input->length(3), inputWidth = input->length(4); const int outputDepth = output->length(2), outputHeight = output->length(3), outputWidth = output->length(4); const int depthPad = mPads[0], kernelDepth = mKernels[0]; - - if (kernelDepth != 1 || depthPad != 0) { + + if (mCrossDepth) { float* data = mInputStorage->host(); const int stride = mInputStorage->stride(0); memset(data, 0, depthPad * stride * sizeof(float)); data += depthPad * stride; - convertToDepthMajor(data, input->host(), inputHeight * inputWidth, inputDepth, batch * inputChannel, true); + convertToDepthMajor(data, input->host(), 4 * inputHeight * inputWidth, inputDepth, batch * UP_DIV(inputChannel, 4)); data += inputDepth * stride; memset(data, 0, depthPad * stride * sizeof(float)); } - + for (unsigned int d = 0; d < kernelDepth; ++d) { mSubExecution[d]->onExecute({mSubInputTensors[d].get()}, {mSubOutputTensor.get()}); - if (kernelDepth != 1 || depthPad != 0) { + if (mCrossDepth) { convertDNC4HW4toNC4DHW4(output->host(), mSubOutputTensor->host(), - outputHeight * outputWidth, outputDepth, batch * outputChannel, d != 0); + 4 * outputHeight * outputWidth, outputDepth, batch * UP_DIV(outputChannel, 4), d != 0); } } - - mPostFunction(output->host(), mBias->host(), outputDepth * outputHeight * outputWidth, UP_DIV(outputChannel, 4)); - + + for (int b = 0; b < batch; ++b) { + mPostFunction(output->host() + b * output->stride(0), mBias->host(), + outputDepth * outputHeight * outputWidth, UP_DIV(outputChannel, 4)); + } + return NO_ERROR; } - + CPUConvolution3D::POSTFUNCTION CPUConvolution3D::getPostFunction(const Convolution3DCommon* common) { if (common->relu()) { return MNNAddBiasRelu; @@ -277,7 +272,7 @@ namespace MNN { } return MNNAddBias; } - + class Convolution3DCreator : public CPUBackend::Creator { public: virtual Execution *onCreate(const std::vector &inputs, const std::vector &outputs, @@ -285,6 +280,6 @@ namespace MNN { return new CPUConvolution3D(inputs, outputs, op, backend); } }; - + REGISTER_CPU_OP_CREATOR(Convolution3DCreator, OpType_Convolution3D); } // namespace MNN diff --git a/source/backend/cpu/CPUConvolution3D.hpp b/source/backend/cpu/CPUConvolution3D.hpp index 40bd47e8b..786cd5682 100644 --- a/source/backend/cpu/CPUConvolution3D.hpp +++ b/source/backend/cpu/CPUConvolution3D.hpp @@ -10,7 +10,7 @@ #define CPUConvolution3D_hpp #include -#include "Execution.hpp" +#include "core/Execution.hpp" #include "MNN_generated.h" namespace MNN { @@ -25,7 +25,7 @@ namespace MNN { static POSTFUNCTION getPostFunction(const Convolution3DCommon* common); private: - void convertToDepthMajor(float* dst, const float* src, uint32_t planeNumber, uint32_t depth, uint32_t outsideNumber, bool C4); + void convertToDepthMajor(float* dst, const float* src, uint32_t planeNumber, uint32_t depth, uint32_t outsideNumber); void convertDNC4HW4toNC4DHW4(float* dst, const float* src, uint32_t planeNumber, uint32_t depth, uint32_t outsideNumber, bool add); const Convolution3DCommon* mCommon; @@ -44,6 +44,7 @@ namespace MNN { std::vector> mSubInputTensors; std::vector> mSubExecution; bool mBreakDown; + bool mCrossDepth; }; } // namespace MNN diff --git a/source/backend/cpu/CPUConvolutionDepthwise.cpp b/source/backend/cpu/CPUConvolutionDepthwise.cpp index 1a0205f8d..35a637f82 100644 --- a/source/backend/cpu/CPUConvolutionDepthwise.cpp +++ b/source/backend/cpu/CPUConvolutionDepthwise.cpp @@ -6,15 +6,15 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUConvolutionDepthwise.hpp" +#include "backend/cpu/CPUConvolutionDepthwise.hpp" #include -#include "Concurrency.h" -#include "Int8FunctionsOpt.h" -#include "Macro.h" -#include "TensorUtils.hpp" -#include "compute/CommonOptFunction.h" -#include "compute/ConvOpt.h" -#include "compute/ConvolutionDepthwise3x3.hpp" +#include "core/Concurrency.h" +#include "backend/cpu/compute/Int8FunctionsOpt.h" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "backend/cpu/compute/ConvOpt.h" +#include "backend/cpu/compute/ConvolutionDepthwise3x3.hpp" static const int gIntUnit = 4; extern "C" { void MNNConvRunForLineDepthWiseInt8(float* dst, const int8_t* src, const int8_t* weight, size_t width, @@ -153,7 +153,9 @@ ErrorCode CPUConvolutionDepthwise::MultiInputFloatExecution::onExecute(const std auto kh = mWeight->length(1); auto kw = mWeight->length(2); ::memset(mBias->host(), 0, mBias->size()); - ::memcpy(mBias->host(), inputs[2]->host(), inputs[2]->size()); + if (inputs.size() > 2) { + ::memcpy(mBias->host(), inputs[2]->host(), inputs[2]->size()); + } // Reorder weight from whc -> pwhc4 ::memset(mWeight->host(), 0, mWeight->size()); auto outputCount = inputs[0]->channel(); @@ -353,7 +355,7 @@ ErrorCode CPUConvolutionDepthwise::Int8Execution::onResize(const std::vector src_height && b > t; b--) { // do nothing } - + auto postFunction = getPostFunction(); for (int i=0; i<4; ++i) { mQuanScale[i] = mQuan->quantScale(); } - + auto runBasic = [=](float* dst_z, const int8_t* src_z, const int8_t* weight_dz, const float* alpha_z, int L, int T, int R, int B) { for (int dy = T; dy < B; ++dy) { @@ -400,16 +402,16 @@ ErrorCode CPUConvolutionDepthwise::Int8Execution::onResize(const std::vectorbatch(); ++batchIndex) { const float* srcOrigin = inputTensor->host() + batchIndex * src_z_step * dst_depth_quad; float* dstOrigin = outputTensor->host() + batchIndex * dst_z_step * dst_depth_quad; - + MNN_CONCURRENCY_BEGIN(dz, dst_depth_quad) { float* dst_z_float = dstOrigin + dst_z_step * dz; const float* src_z_float = srcOrigin + src_z_step * dz; - + auto dst_z = dst_z_float; auto src_z = (int8_t*)mInputTempBuffer.buffer().host + dz * mInputTempBuffer.buffer().dim[0].stride; - + MNNFloat2Int8(src_z_float, src_z, src_z_step / 4, mQuanScale, aMin, aMax); - + const float* bias_z = mBias.get() + gIntUnit * dz; const float* alpha_z = mAlpha.get() + gIntUnit * dz; const int8_t* weight_dz = mWeight.get() + dz * weight_z_step; @@ -427,7 +429,7 @@ ErrorCode CPUConvolutionDepthwise::Int8Execution::onResize(const std::vectormain_as_Convolution2D(); auto conv = op->main_as_Convolution2D()->common(); - if (3 == inputs.size()) { + if (1 < inputs.size()) { return new CPUConvolutionDepthwise::MultiInputFloatExecution(conv, backend); } if (conv->dilateX() == 1 && conv->dilateY() == 1 && conv->strideX() == 1 && conv->strideY() == 1 && diff --git a/source/backend/cpu/CPUConvolutionDepthwise.hpp b/source/backend/cpu/CPUConvolutionDepthwise.hpp index 69de9aa0f..88a2f9e1d 100644 --- a/source/backend/cpu/CPUConvolutionDepthwise.hpp +++ b/source/backend/cpu/CPUConvolutionDepthwise.hpp @@ -9,9 +9,9 @@ #ifndef CPUConvolutionDepthwise_hpp #define CPUConvolutionDepthwise_hpp -#include "AutoStorage.h" -#include "CPUConvolution.hpp" -#include "compute/ConvolutionIntFactory.hpp" +#include "core/AutoStorage.h" +#include "backend/cpu/CPUConvolution.hpp" +#include "backend/cpu/compute/ConvolutionIntFactory.hpp" namespace MNN { class CPUConvolutionDepthwise : public Execution { diff --git a/source/backend/cpu/CPUCosineSimilarity.cpp b/source/backend/cpu/CPUCosineSimilarity.cpp index 11757ffb2..2f88f7646 100644 --- a/source/backend/cpu/CPUCosineSimilarity.cpp +++ b/source/backend/cpu/CPUCosineSimilarity.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUCosineSimilarity.hpp" +#include "backend/cpu/CPUCosineSimilarity.hpp" #include -#include "CPUBackend.hpp" -#include "Macro.h" -#include "Vec4.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "core/Macro.h" +#include "math/Vec4.hpp" namespace MNN { diff --git a/source/backend/cpu/CPUCosineSimilarity.hpp b/source/backend/cpu/CPUCosineSimilarity.hpp index 0bda6d6dc..9bbd29fe0 100644 --- a/source/backend/cpu/CPUCosineSimilarity.hpp +++ b/source/backend/cpu/CPUCosineSimilarity.hpp @@ -9,7 +9,7 @@ #ifndef CPUCosineSimilarity_hpp #define CPUCosineSimilarity_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { diff --git a/source/backend/cpu/CPUCrop.cpp b/source/backend/cpu/CPUCrop.cpp index b48ca25dd..e6de2a218 100644 --- a/source/backend/cpu/CPUCrop.cpp +++ b/source/backend/cpu/CPUCrop.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUCrop.hpp" -#include "CPUBackend.hpp" -#include "Macro.h" +#include "backend/cpu/CPUCrop.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "core/Macro.h" namespace MNN { diff --git a/source/backend/cpu/CPUCrop.hpp b/source/backend/cpu/CPUCrop.hpp index 12c9baeb6..5d8edd8d1 100644 --- a/source/backend/cpu/CPUCrop.hpp +++ b/source/backend/cpu/CPUCrop.hpp @@ -9,7 +9,7 @@ #ifndef CPUCrop_hpp #define CPUCrop_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { class CPUCrop : public Execution { diff --git a/source/backend/cpu/CPUCropAndResize.cpp b/source/backend/cpu/CPUCropAndResize.cpp index 77d7b4f47..ad6100b5c 100644 --- a/source/backend/cpu/CPUCropAndResize.cpp +++ b/source/backend/cpu/CPUCropAndResize.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUCropAndResize.hpp" +#include "backend/cpu/CPUCropAndResize.hpp" #include -#include "CPUBackend.hpp" +#include "backend/cpu/CPUBackend.hpp" namespace MNN { diff --git a/source/backend/cpu/CPUCropAndResize.hpp b/source/backend/cpu/CPUCropAndResize.hpp index 1f421a2db..1e5caadbf 100644 --- a/source/backend/cpu/CPUCropAndResize.hpp +++ b/source/backend/cpu/CPUCropAndResize.hpp @@ -9,7 +9,7 @@ #ifndef CPUCropAndResize_hpp #define CPUCropAndResize_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" #include "MNN_generated.h" namespace MNN { diff --git a/source/backend/cpu/CPUDeconvolution.cpp b/source/backend/cpu/CPUDeconvolution.cpp index ffb97007c..6a8a319bc 100644 --- a/source/backend/cpu/CPUDeconvolution.cpp +++ b/source/backend/cpu/CPUDeconvolution.cpp @@ -6,19 +6,19 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUDeconvolution.hpp" -#include "BufferAllocator.hpp" -#include "CPUBackend.hpp" -#include "Concurrency.h" -#include "Macro.h" -#include "Matrix.hpp" -#include "TensorUtils.hpp" -#include "Vec4.hpp" -#include "compute/CommonOptFunction.h" -#include "compute/ConvOpt.h" -#include "compute/DeconvolutionWithStride.hpp" +#include "backend/cpu/CPUDeconvolution.hpp" +#include "core/BufferAllocator.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "core/Concurrency.h" +#include "core/Macro.h" +#include "math/Matrix.hpp" +#include "core/TensorUtils.hpp" +#include "math/Vec4.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "backend/cpu/compute/ConvOpt.h" +#include "backend/cpu/compute/DeconvolutionWithStride.hpp" //#define MNN_OPEN_TIME_TRACE -#include "AutoTime.hpp" +#include using namespace MNN::Math; namespace MNN { @@ -125,7 +125,9 @@ ErrorCode CPUDeconvolutionMultiInput::onExecute(const std::vector& inpu _transformWeight(inputs[1]->host(), mWeight->host(), outputCount, srcCount, fh, fw, mCacheWeight->host()); ::memset(mBias->host(), 0, mBias->size()); - ::memcpy(mBias->host(), inputs[2]->host(), inputs[2]->size()); + if (inputs.size() > 2) { + ::memcpy(mBias->host(), inputs[2]->host(), inputs[2]->size()); + } return mOrigin->onExecute(mTempInputs, outputs); } ErrorCode CPUDeconvolutionMultiInput::onResize(const std::vector& inputs, @@ -278,7 +280,7 @@ class CPUDeconvolutionCreator : public CPUBackend::Creator { public: virtual Execution* onCreate(const std::vector& inputs, const std::vector& outputs, const MNN::Op* op, Backend* backend) const { - if (inputs.size() == 3) { + if (inputs.size() > 1) { return new CPUDeconvolutionMultiInput(inputs[0], op, backend); } auto convOp = op->main_as_Convolution2D(); diff --git a/source/backend/cpu/CPUDeconvolution.hpp b/source/backend/cpu/CPUDeconvolution.hpp index 0d077920a..a603e39e2 100644 --- a/source/backend/cpu/CPUDeconvolution.hpp +++ b/source/backend/cpu/CPUDeconvolution.hpp @@ -9,8 +9,8 @@ #ifndef CPUDeconvolution_hpp #define CPUDeconvolution_hpp -#include "CPUConvolution.hpp" -#include "compute/StrassenMatmulComputor.hpp" +#include "backend/cpu/CPUConvolution.hpp" +#include "backend/cpu/compute/StrassenMatmulComputor.hpp" namespace MNN { class CPUDeconvolutionBasic : public CPUConvolution { diff --git a/source/backend/cpu/CPUDeconvolutionDepthwise.cpp b/source/backend/cpu/CPUDeconvolutionDepthwise.cpp index 33615ec67..0698bcd38 100644 --- a/source/backend/cpu/CPUDeconvolutionDepthwise.cpp +++ b/source/backend/cpu/CPUDeconvolutionDepthwise.cpp @@ -6,12 +6,12 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUDeconvolutionDepthwise.hpp" +#include "backend/cpu/CPUDeconvolutionDepthwise.hpp" #include -#include "CPUBackend.hpp" +#include "backend/cpu/CPUBackend.hpp" #include "MNN_generated.h" -#include "Macro.h" -#include "compute/ConvOpt.h" +#include "core/Macro.h" +#include "backend/cpu/compute/ConvOpt.h" namespace MNN { CPUDeconvolutionDepthwise::CPUDeconvolutionDepthwise(const Tensor* input, const Op* convOp, Backend* b) @@ -70,7 +70,9 @@ ErrorCode CPUDeconvolutionDepthwiseMultiInput::onResize(const std::vector& inputs, const std::vector& outputs) { ::memset(mBias->host(), 0, mBias->size()); - ::memcpy(mBias->host(), inputs[2]->host(), inputs[2]->size()); + if (inputs.size() > 2) { + ::memcpy(mBias->host(), inputs[2]->host(), inputs[2]->size()); + } ::memset(mWeight->host(), 0, mWeight->size()); auto weight = mWeight->host(); auto outputCount = inputs[0]->channel(); @@ -203,7 +205,7 @@ class CPUDeconvolutionDepthwiseCreator : public CPUBackend::Creator { public: virtual Execution* onCreate(const std::vector& inputs, const std::vector& outputs, const MNN::Op* op, Backend* backend) const { - if (3 == inputs.size()) { + if (1 < inputs.size()) { return new CPUDeconvolutionDepthwiseMultiInput(inputs[0], op, backend); } return new CPUDeconvolutionDepthwise(inputs[0], op, backend); diff --git a/source/backend/cpu/CPUDeconvolutionDepthwise.hpp b/source/backend/cpu/CPUDeconvolutionDepthwise.hpp index 3d8d77a97..1ddf23d5c 100644 --- a/source/backend/cpu/CPUDeconvolutionDepthwise.hpp +++ b/source/backend/cpu/CPUDeconvolutionDepthwise.hpp @@ -9,7 +9,7 @@ #ifndef CPUDeconvolutionDepthwise_hpp #define CPUDeconvolutionDepthwise_hpp -#include "CPUDeconvolution.hpp" +#include "backend/cpu/CPUDeconvolution.hpp" namespace MNN { class CPUDeconvolutionDepthwiseBasic : public CPUDeconvolutionBasic { diff --git a/source/backend/cpu/CPUDepthToSpace.cpp b/source/backend/cpu/CPUDepthToSpace.cpp index 73b952533..9674de340 100644 --- a/source/backend/cpu/CPUDepthToSpace.cpp +++ b/source/backend/cpu/CPUDepthToSpace.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUDepthToSpace.hpp" -#include "Backend.hpp" -#include "CPUBackend.hpp" -#include "Macro.h" +#include "backend/cpu/CPUDepthToSpace.hpp" +#include "core/Backend.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "core/Macro.h" namespace MNN { @@ -56,7 +56,7 @@ ErrorCode CPUDepthToSpace::onExecute(const std::vector &inputs, cons const int offsetC = (offsetH * blockSize + offsetW) * outputChannels; for (int c = 0; c < outputChannels; c++) { const int ic = c + offsetC; - const int offsetO = b * outputHeight * outputWidth * outputChannels + const int offsetO = b * outputHeight * outputWidth * outputChannels + h * outputWidth * outputChannels + w * outputChannels + c; const int offsetI = b * inputHeight * inputWidth * inputChannels + ih * inputWidth * inputChannels + iw * inputChannels + ic; @@ -65,13 +65,13 @@ ErrorCode CPUDepthToSpace::onExecute(const std::vector &inputs, cons } } } - + return NO_ERROR; } class DepthToSpaceCreator : public CPUBackend::Creator { public: - virtual Execution* onCreate(const std::vector& inputs, const std::vector& outputs, + virtual Execution* onCreate(const std::vector& inputs, const std::vector& outputs, const MNN::Op* op, Backend* backend) const override { auto dataType = inputs[0]->getType(); if (dataType.bits == 32) { diff --git a/source/backend/cpu/CPUDepthToSpace.hpp b/source/backend/cpu/CPUDepthToSpace.hpp index 68e2b8228..20d68b08c 100644 --- a/source/backend/cpu/CPUDepthToSpace.hpp +++ b/source/backend/cpu/CPUDepthToSpace.hpp @@ -9,7 +9,7 @@ #ifndef CPUDepthToSpace_hpp #define CPUDepthToSpace_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { diff --git a/source/backend/cpu/CPUDepthwiseConvInt8.cpp b/source/backend/cpu/CPUDepthwiseConvInt8.cpp index 4e7d3e7df..4be7cb1e5 100644 --- a/source/backend/cpu/CPUDepthwiseConvInt8.cpp +++ b/source/backend/cpu/CPUDepthwiseConvInt8.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUDepthwiseConvInt8.hpp" -#include "CPUBackend.hpp" -#include "CommonOptFunction.h" -#include "Concurrency.h" -#include "Macro.h" +#include "backend/cpu/CPUDepthwiseConvInt8.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Concurrency.h" +#include "core/Macro.h" #include #define UNIT 4 diff --git a/source/backend/cpu/CPUDepthwiseConvInt8.hpp b/source/backend/cpu/CPUDepthwiseConvInt8.hpp index 565d305b2..d401bb10a 100644 --- a/source/backend/cpu/CPUDepthwiseConvInt8.hpp +++ b/source/backend/cpu/CPUDepthwiseConvInt8.hpp @@ -9,7 +9,7 @@ #ifndef CPUDepthwiseConvInt8_hpp #define CPUDepthwiseConvInt8_hpp -#include "CPUConvolution.hpp" +#include "backend/cpu/CPUConvolution.hpp" namespace MNN { diff --git a/source/backend/cpu/CPUDequantize.cpp b/source/backend/cpu/CPUDequantize.cpp index a0822ff19..fd7c3e925 100644 --- a/source/backend/cpu/CPUDequantize.cpp +++ b/source/backend/cpu/CPUDequantize.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUDequantize.hpp" +#include "backend/cpu/CPUDequantize.hpp" #include -#include "CPUBackend.hpp" -#include "Macro.h" +#include "backend/cpu/CPUBackend.hpp" +#include "core/Macro.h" #define UNIT 4 #define TILE 2 diff --git a/source/backend/cpu/CPUDequantize.hpp b/source/backend/cpu/CPUDequantize.hpp index 4d765f715..3484efbe6 100644 --- a/source/backend/cpu/CPUDequantize.hpp +++ b/source/backend/cpu/CPUDequantize.hpp @@ -9,7 +9,7 @@ #ifndef CPUDequantize_hpp #define CPUDequantize_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" #include "TFQuantizeOp_generated.h" namespace MNN { diff --git a/source/backend/cpu/CPUDetectionOutput.cpp b/source/backend/cpu/CPUDetectionOutput.cpp index cf3a2813f..ebfb5dc85 100644 --- a/source/backend/cpu/CPUDetectionOutput.cpp +++ b/source/backend/cpu/CPUDetectionOutput.cpp @@ -11,13 +11,13 @@ #pragma optimize("", off) #endif -#include "CPUDetectionOutput.hpp" +#include "backend/cpu/CPUDetectionOutput.hpp" #include #include -#include "AutoTime.hpp" -#include "CPUBackend.hpp" -#include "CommonOptFunction.h" -#include "TensorUtils.hpp" +#include +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/TensorUtils.hpp" namespace MNN { diff --git a/source/backend/cpu/CPUDetectionOutput.hpp b/source/backend/cpu/CPUDetectionOutput.hpp index 2fd806584..5c1a15c19 100644 --- a/source/backend/cpu/CPUDetectionOutput.hpp +++ b/source/backend/cpu/CPUDetectionOutput.hpp @@ -9,7 +9,7 @@ #ifndef CPUDetectionOutput_hpp #define CPUDetectionOutput_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { diff --git a/source/backend/cpu/CPUDetectionPostProcess.cpp b/source/backend/cpu/CPUDetectionPostProcess.cpp index 09b8830c7..fcf6c6f21 100644 --- a/source/backend/cpu/CPUDetectionPostProcess.cpp +++ b/source/backend/cpu/CPUDetectionPostProcess.cpp @@ -8,9 +8,9 @@ #include #include -#include "CPUBackend.hpp" -#include "CPUDetectionPostProcess.hpp" -#include "CPUNonMaxSuppressionV2.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/CPUDetectionPostProcess.hpp" +#include "backend/cpu/CPUNonMaxSuppressionV2.hpp" namespace MNN { diff --git a/source/backend/cpu/CPUDetectionPostProcess.hpp b/source/backend/cpu/CPUDetectionPostProcess.hpp index c51b005c8..a059c9ec5 100644 --- a/source/backend/cpu/CPUDetectionPostProcess.hpp +++ b/source/backend/cpu/CPUDetectionPostProcess.hpp @@ -8,7 +8,7 @@ #ifndef CPUDetectionPostProcess_hpp #define CPUDetectionPostProcess_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" #include "MNN_generated.h" namespace MNN { diff --git a/source/backend/cpu/CPUDilation2D.cpp b/source/backend/cpu/CPUDilation2D.cpp new file mode 100644 index 000000000..c3d4bdff7 --- /dev/null +++ b/source/backend/cpu/CPUDilation2D.cpp @@ -0,0 +1,124 @@ +// +// CPUDilation2D.cpp +// MNN +// +// Created by MNN on 2018/08/01. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/CPUDilation2D.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Concurrency.h" +#include "core/Macro.h" + +#include "math/Vec4.hpp" +using MNN::Math::Vec4; + +namespace MNN { + +CPUDilation2D::CPUDilation2D(Backend *b, const MNN::Op *op) : Execution(b) { + auto convOp = op->main_as_Convolution2D(); + auto common = convOp->common(); + const int kh = common->kernelY(), kw = common->kernelX(); + const int depth = common->outputCount(); + mWeight.reset(Tensor::createDevice({UP_DIV(depth, 4), kh * kw * 4})); + bool succ = b->onAcquireBuffer(mWeight.get(), Backend::STATIC); + if (!succ) { + MNN_ERROR("Failed to acquire memory for filters\n"); + return; + } + MNNPackC4(mWeight->host(), convOp->weight()->data(), kh * kw, depth); + mPadMode = common->padMode(); + mKernelSize[0] = kh; + mKernelSize[1] = kw; + mStrides[0] = common->strideY(); + mStrides[1] = common->strideX(); + mDilations[0] = common->dilateY(); + mDilations[1] = common->dilateX(); +} + +CPUDilation2D::~CPUDilation2D() { + backend()->onReleaseBuffer(mWeight.get(), Backend::STATIC); +} + +ErrorCode CPUDilation2D::onResize(const std::vector &inputs, const std::vector &outputs) { + mPads[0] = mPads[1] = 0; + if (mPadMode == PadMode_SAME) { + int inputHeightNeed = (outputs[0]->height() - 1) * mStrides[0] + (mKernelSize[0] - 1) * mDilations[0] + 1; + int inputWidthNeed = (outputs[0]->width() - 1) * mStrides[1] + (mKernelSize[1] - 1) * mDilations[1] + 1; + mPads[0] = (inputHeightNeed - inputs[0]->height()) / 2; + mPads[1] = (inputWidthNeed - inputs[0]->height()) / 2; + } + return NO_ERROR; +} + +ErrorCode CPUDilation2D::onExecute(const std::vector &inputs, const std::vector &outputs) { + auto input = inputs[0], output = outputs[0]; + + const int threadNumber = reinterpret_cast(backend())->threadNumber(); + const int inputHeight = input->height(), inputWidth = input->width(); + const int outputHeight = output->height(), outputWidth = output->width(); + const int outputDepth4 = UP_DIV(output->channel(), 4), depthStep = UP_DIV(outputDepth4, threadNumber); + const int kernelY = mKernelSize[0], kernelX = mKernelSize[1]; + const int strideY = mStrides[0], strideX = mStrides[1]; + const int dilationY = mDilations[0], dilationX = mDilations[1]; + const int padY = mPads[0], padX = mPads[1]; + + auto computeFunc = [=](int tId, const float* inputOrigin, const float* weight, float* outputOrigin) { + const int depthFrom = tId * depthStep, depthEnd = ALIMIN(depthFrom + depthStep, outputDepth4); + if (depthFrom >= depthEnd) { + return; + } + for (int d = depthFrom; d < depthEnd; ++d) { + auto inputData = inputOrigin + d * inputHeight * inputWidth * 4; + auto weightData = weight + d * kernelY * kernelX * 4; + auto outputData = outputOrigin + d * outputHeight * outputWidth * 4; + for (int h = 0; h < outputHeight; ++h) { + const int hOffset = h * strideY - padY; + for (int w = 0; w < outputWidth; ++w) { + const int wOffset = w * strideX - padX; + Vec4 result = 0; + for (int kh = 0; kh < kernelY; ++kh) { + const int hOffset_ = hOffset + kh * dilationY; + if (hOffset_ < 0 || hOffset_ >= inputHeight) { + continue; + } + for (int kw = 0; kw < kernelX; ++kw) { + const int wOffset_ = wOffset + kw * dilationX; + if (wOffset_ < 0 || wOffset_ >= inputWidth) { + continue; + } + auto tmp = Vec4::load(inputData + (hOffset_ * inputWidth + wOffset_) * 4) + Vec4::load(weightData + (kh * kernelX + kw) * 4); + result = Vec4::max(result, tmp); + } + } + Vec4::save(outputData + (h * outputWidth + w) * 4, result); + } + } + } + }; + + for (int batch = 0; batch < output->batch(); ++batch) { + const float* inputOrigin = input->host() + batch * input->stride(0); + const float* weight = mWeight->host(); + float* outputOrigin = output->host() + batch * output->stride(0); + MNN_CONCURRENCY_BEGIN(tId, threadNumber) { + computeFunc((int)tId, inputOrigin, weight, outputOrigin); + } + MNN_CONCURRENCY_END() + } + return NO_ERROR; +} + +class CPUDilation2DCreator : public CPUBackend::Creator { +public: + virtual Execution *onCreate(const std::vector &inputs, const std::vector &outputs, + const MNN::Op *op, Backend *backend) const override { + return new CPUDilation2D(backend, op); + } +}; + +REGISTER_CPU_OP_CREATOR(CPUDilation2DCreator, OpType_Dilation2D); + +} diff --git a/source/backend/cpu/CPUDilation2D.hpp b/source/backend/cpu/CPUDilation2D.hpp new file mode 100644 index 000000000..df69634a0 --- /dev/null +++ b/source/backend/cpu/CPUDilation2D.hpp @@ -0,0 +1,35 @@ +// +// CPUDilation2D.hpp +// MNN +// +// Created by MNN on 2019/11/29. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef CPUDilation2D_hpp +#define CPUDilation2D_hpp + +#include +#include "core/Execution.hpp" +#include "MNN_generated.h" + +namespace MNN { +class CPUDilation2D : public Execution { +public: + CPUDilation2D(Backend *b, const MNN::Op *op); + virtual ~CPUDilation2D(); + virtual ErrorCode onResize(const std::vector &inputs, const std::vector &outputs) override; + virtual ErrorCode onExecute(const std::vector &inputs, const std::vector &outputs) override; + +private: + std::shared_ptr mWeight; + std::array mKernelSize; + std::array mStrides; + std::array mDilations; + std::array mPads; + PadMode mPadMode; +}; + +} // namespace MNN + +#endif /* CPUDilation2D_hpp */ diff --git a/source/backend/cpu/CPUEltwise.cpp b/source/backend/cpu/CPUEltwise.cpp index 5293eb4db..c07a3adeb 100644 --- a/source/backend/cpu/CPUEltwise.cpp +++ b/source/backend/cpu/CPUEltwise.cpp @@ -6,15 +6,15 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUEltwise.hpp" +#include "backend/cpu/CPUEltwise.hpp" #include #include -#include "Concurrency.h" +#include "core/Concurrency.h" #include -#include "CPUBackend.hpp" -#include "CommonOptFunction.h" -#include "ConvOpt.h" -#include "Macro.h" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "backend/cpu/compute/ConvOpt.h" +#include "core/Macro.h" #ifdef MNN_USE_NEON #include #endif diff --git a/source/backend/cpu/CPUEltwise.hpp b/source/backend/cpu/CPUEltwise.hpp index 3d8e12644..3e79681d5 100644 --- a/source/backend/cpu/CPUEltwise.hpp +++ b/source/backend/cpu/CPUEltwise.hpp @@ -9,7 +9,7 @@ #ifndef CPUEltwise_hpp #define CPUEltwise_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" #include "MNN_generated.h" namespace MNN { diff --git a/source/backend/cpu/CPUEltwiseInt8.cpp b/source/backend/cpu/CPUEltwiseInt8.cpp index 2b2682404..95b699c8d 100644 --- a/source/backend/cpu/CPUEltwiseInt8.cpp +++ b/source/backend/cpu/CPUEltwiseInt8.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUEltwiseInt8.hpp" -#include "CPUBackend.hpp" -#include "Concurrency.h" -#include "Macro.h" +#include "backend/cpu/CPUEltwiseInt8.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "core/Concurrency.h" +#include "core/Macro.h" extern "C" { void MNNScaleAddInt8(int8_t* dst, const int8_t* src0, const int8_t* src1, const float* scale0, const float* scale1, diff --git a/source/backend/cpu/CPUEltwiseInt8.hpp b/source/backend/cpu/CPUEltwiseInt8.hpp index 6914ff71e..4dc8940f6 100644 --- a/source/backend/cpu/CPUEltwiseInt8.hpp +++ b/source/backend/cpu/CPUEltwiseInt8.hpp @@ -9,7 +9,7 @@ #ifndef CPUEltwiseInt8_hpp #define CPUEltwiseInt8_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { diff --git a/source/backend/cpu/CPUElu.cpp b/source/backend/cpu/CPUElu.cpp index 905c0f6cd..d546a7012 100644 --- a/source/backend/cpu/CPUElu.cpp +++ b/source/backend/cpu/CPUElu.cpp @@ -1,5 +1,5 @@ // -// CPURelu.cpp +// CPUElu.cpp // MNN // // Created by MNN on 2019/09/23. @@ -7,18 +7,18 @@ // #include -#include "CPUElu.hpp" -#include "CPUBackend.hpp" +#include "backend/cpu/CPUElu.hpp" +#include "backend/cpu/CPUBackend.hpp" namespace MNN { ErrorCode CPUElu::onExecute(const std::vector& inputs, const std::vector& outputs) { auto input = inputs[0]; auto output = outputs[0]; - + const float* srcData = input->host(); float* dstData = output->host(); const int size = input->elementSize(); - + for (int i = 0; i < size; ++i) { if (srcData[i] >= 0) { dstData[i] = srcData[i]; diff --git a/source/backend/cpu/CPUElu.hpp b/source/backend/cpu/CPUElu.hpp index 5b04ee8ff..e721a2a6e 100644 --- a/source/backend/cpu/CPUElu.hpp +++ b/source/backend/cpu/CPUElu.hpp @@ -1,5 +1,5 @@ // -// CPURelu.hpp +// CPUElu.hpp // MNN // // Created by MNN on 2019/09/23. @@ -9,7 +9,7 @@ #ifndef CPUElu_hpp #define CPUElu_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { class CPUElu : public Execution { diff --git a/source/backend/cpu/CPUExpandDims.cpp b/source/backend/cpu/CPUExpandDims.cpp index cae9af268..28fb4b702 100644 --- a/source/backend/cpu/CPUExpandDims.cpp +++ b/source/backend/cpu/CPUExpandDims.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUExpandDims.hpp" -#include "CPUBackend.hpp" -#include "CommonOptFunction.h" -#include "Macro.h" +#include "backend/cpu/CPUExpandDims.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Macro.h" namespace MNN { diff --git a/source/backend/cpu/CPUExpandDims.hpp b/source/backend/cpu/CPUExpandDims.hpp index 4f16c8011..5ed4f2fb4 100644 --- a/source/backend/cpu/CPUExpandDims.hpp +++ b/source/backend/cpu/CPUExpandDims.hpp @@ -9,7 +9,7 @@ #ifndef CPUExpandDims_hpp #define CPUExpandDims_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { class CPUExpandDims : public Execution { diff --git a/source/backend/cpu/CPUFill.cpp b/source/backend/cpu/CPUFill.cpp index e5e22bb26..608bd6779 100644 --- a/source/backend/cpu/CPUFill.cpp +++ b/source/backend/cpu/CPUFill.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUFill.hpp" -#include "CPUBackend.hpp" -#include "Macro.h" +#include "backend/cpu/CPUFill.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "core/Macro.h" namespace MNN { @@ -46,7 +46,7 @@ ErrorCode CPUFill::onExecute(const std::vector &inputs, const std::vec default: return INPUT_DATA_ERROR; } - + return NO_ERROR; } diff --git a/source/backend/cpu/CPUFill.hpp b/source/backend/cpu/CPUFill.hpp index de67a9c08..c3162a690 100644 --- a/source/backend/cpu/CPUFill.hpp +++ b/source/backend/cpu/CPUFill.hpp @@ -9,7 +9,7 @@ #ifndef CPUFill_hpp #define CPUFill_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { class CPUFill : public Execution { diff --git a/source/backend/cpu/CPUFixedPoint.hpp b/source/backend/cpu/CPUFixedPoint.hpp index d1f013bb7..a5c44f906 100644 --- a/source/backend/cpu/CPUFixedPoint.hpp +++ b/source/backend/cpu/CPUFixedPoint.hpp @@ -21,7 +21,7 @@ limitations under the License. #include #include #include -#include "Macro.h" +#include "core/Macro.h" #ifdef MNN_USE_NEON #include #endif diff --git a/source/backend/cpu/CPUFloatToInt8.cpp b/source/backend/cpu/CPUFloatToInt8.cpp index ff9477ab2..7591232b1 100644 --- a/source/backend/cpu/CPUFloatToInt8.cpp +++ b/source/backend/cpu/CPUFloatToInt8.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUFloatToInt8.hpp" -#include "CPUBackend.hpp" -#include "Concurrency.h" -#include "Int8FunctionsOpt.h" -#include "Macro.h" +#include "backend/cpu/CPUFloatToInt8.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "core/Concurrency.h" +#include "backend/cpu/compute/Int8FunctionsOpt.h" +#include "core/Macro.h" namespace MNN { diff --git a/source/backend/cpu/CPUFloatToInt8.hpp b/source/backend/cpu/CPUFloatToInt8.hpp index 57c7498d1..ac2042c47 100644 --- a/source/backend/cpu/CPUFloatToInt8.hpp +++ b/source/backend/cpu/CPUFloatToInt8.hpp @@ -9,7 +9,7 @@ #ifndef CPUFloatToInt8_hpp #define CPUFloatToInt8_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { diff --git a/source/backend/cpu/CPUGather.cpp b/source/backend/cpu/CPUGather.cpp index df3adbec0..9dcda13cc 100644 --- a/source/backend/cpu/CPUGather.cpp +++ b/source/backend/cpu/CPUGather.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUGather.hpp" -#include "CPUBackend.hpp" -#include "CommonOptFunction.h" -#include "Macro.h" +#include "backend/cpu/CPUGather.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Macro.h" namespace MNN { diff --git a/source/backend/cpu/CPUGather.hpp b/source/backend/cpu/CPUGather.hpp index 7bf76bc9b..e3d8fb59f 100644 --- a/source/backend/cpu/CPUGather.hpp +++ b/source/backend/cpu/CPUGather.hpp @@ -9,7 +9,7 @@ #ifndef CPUGather_hpp #define CPUGather_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { class CPUGather : public Execution { diff --git a/source/backend/cpu/CPUGatherND.cpp b/source/backend/cpu/CPUGatherND.cpp index 61d03f5c2..8f039f01b 100644 --- a/source/backend/cpu/CPUGatherND.cpp +++ b/source/backend/cpu/CPUGatherND.cpp @@ -11,7 +11,7 @@ https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite/kernels/internal/reference/reference_ops.h */ -#include "CPUGatherND.hpp" +#include "backend/cpu/CPUGatherND.hpp" #include namespace MNN { diff --git a/source/backend/cpu/CPUGatherND.hpp b/source/backend/cpu/CPUGatherND.hpp index 732322ccc..d957d426e 100644 --- a/source/backend/cpu/CPUGatherND.hpp +++ b/source/backend/cpu/CPUGatherND.hpp @@ -1,4 +1,4 @@ -#include "CPUBackend.hpp" +#include "backend/cpu/CPUBackend.hpp" namespace MNN { class CPUGatherND : public Execution { public: diff --git a/source/backend/cpu/CPUGatherV2.cpp b/source/backend/cpu/CPUGatherV2.cpp index 2539a794d..445fc3234 100644 --- a/source/backend/cpu/CPUGatherV2.cpp +++ b/source/backend/cpu/CPUGatherV2.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUGatherV2.hpp" -#include "CPUBackend.hpp" -#include "CommonOptFunction.h" -#include "Macro.h" +#include "backend/cpu/CPUGatherV2.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Macro.h" namespace MNN { diff --git a/source/backend/cpu/CPUGatherV2.hpp b/source/backend/cpu/CPUGatherV2.hpp index 6994bf4ff..cf3fb8895 100644 --- a/source/backend/cpu/CPUGatherV2.hpp +++ b/source/backend/cpu/CPUGatherV2.hpp @@ -9,7 +9,7 @@ #ifndef CPUGatherV2_hpp #define CPUGatherV2_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { class CPUGatherV2 : public Execution { diff --git a/source/backend/cpu/CPUInnerProduct.cpp b/source/backend/cpu/CPUInnerProduct.cpp index f40b45f3a..03847b5c4 100644 --- a/source/backend/cpu/CPUInnerProduct.cpp +++ b/source/backend/cpu/CPUInnerProduct.cpp @@ -6,12 +6,12 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUInnerProduct.hpp" -#include "AutoStorage.h" -#include "CPUConvolution.hpp" -#include "CommonOptFunction.h" -#include "ConvOpt.h" -#include "Macro.h" +#include "backend/cpu/CPUInnerProduct.hpp" +#include "core/AutoStorage.h" +#include "backend/cpu/CPUConvolution.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "backend/cpu/compute/ConvOpt.h" +#include "core/Macro.h" namespace MNN { diff --git a/source/backend/cpu/CPUInnerProduct.hpp b/source/backend/cpu/CPUInnerProduct.hpp index 48b3b3acf..42dc81d17 100644 --- a/source/backend/cpu/CPUInnerProduct.hpp +++ b/source/backend/cpu/CPUInnerProduct.hpp @@ -9,7 +9,7 @@ #ifndef CPUInnerProduct_hpp #define CPUInnerProduct_hpp -#include "CPUBackend.hpp" +#include "backend/cpu/CPUBackend.hpp" namespace MNN { class CPUInnerProductCreator : public CPUBackend::Creator { diff --git a/source/backend/cpu/CPUInstanceNorm.cpp b/source/backend/cpu/CPUInstanceNorm.cpp index 4b217ee81..851d97831 100644 --- a/source/backend/cpu/CPUInstanceNorm.cpp +++ b/source/backend/cpu/CPUInstanceNorm.cpp @@ -6,13 +6,13 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUInstanceNorm.hpp" +#include "backend/cpu/CPUInstanceNorm.hpp" #include -#include "CPUBackend.hpp" -#include "Concurrency.h" -#include "MNNDefine.h" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "core/Concurrency.h" +#include +#include "core/Macro.h" +#include "core/TensorUtils.hpp" #ifdef MNN_USE_NEON #include @@ -107,6 +107,6 @@ class CPUInstanceNormCreator : public CPUBackend::Creator { } }; -REGISTER_CPU_OP_CREATOR(CPUInstanceNormCreator, OpType_BatchNorm); +REGISTER_CPU_OP_CREATOR(CPUInstanceNormCreator, OpType_InstanceNorm); } // namespace MNN diff --git a/source/backend/cpu/CPUInstanceNorm.hpp b/source/backend/cpu/CPUInstanceNorm.hpp index 5ef2557cd..af012d5a3 100644 --- a/source/backend/cpu/CPUInstanceNorm.hpp +++ b/source/backend/cpu/CPUInstanceNorm.hpp @@ -9,8 +9,8 @@ #ifndef CPUInstanceNorm_hpp #define CPUInstanceNorm_hpp -#include "AutoStorage.h" -#include "Execution.hpp" +#include "core/AutoStorage.h" +#include "core/Execution.hpp" namespace MNN { class CPUInstanceNorm : public Execution { diff --git a/source/backend/cpu/CPUInt8ToFloat.cpp b/source/backend/cpu/CPUInt8ToFloat.cpp index f48694bfa..ed15b9d02 100644 --- a/source/backend/cpu/CPUInt8ToFloat.cpp +++ b/source/backend/cpu/CPUInt8ToFloat.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUInt8ToFloat.hpp" -#include "CPUBackend.hpp" -#include "Concurrency.h" -#include "Macro.h" +#include "backend/cpu/CPUInt8ToFloat.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "core/Concurrency.h" +#include "core/Macro.h" extern "C" { void MNNInt8ScaleToFloat(float* dst, const int8_t* src, const float* scale, size_t size); diff --git a/source/backend/cpu/CPUInt8ToFloat.hpp b/source/backend/cpu/CPUInt8ToFloat.hpp index 4a9f49f32..23d567df7 100644 --- a/source/backend/cpu/CPUInt8ToFloat.hpp +++ b/source/backend/cpu/CPUInt8ToFloat.hpp @@ -9,8 +9,8 @@ #ifndef CPUInt8ToFloat_hpp #define CPUInt8ToFloat_hpp -#include "Execution.hpp" -#include "Tensor.hpp" +#include "core/Execution.hpp" +#include namespace MNN { diff --git a/source/backend/cpu/CPUInterp.cpp b/source/backend/cpu/CPUInterp.cpp index 755afaf48..79f448746 100644 --- a/source/backend/cpu/CPUInterp.cpp +++ b/source/backend/cpu/CPUInterp.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUInterp.hpp" +#include "backend/cpu/CPUInterp.hpp" #include -#include "CPUBackend.hpp" -#include "CPUResize.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/CPUResize.hpp" namespace MNN { @@ -40,7 +40,7 @@ ErrorCode CPUInterp::onExecute(const std::vector &inputs, const std::v if (mResizeType == 1) { // Nearstneighbor - CPUReiseNearstneighborC4(input, output, mWidthScale, mHeightScale); + CPUResizeNearestneighborC4(input, output, mWidthScale, mHeightScale); } else if (mResizeType == 2) { // bilinear CPUResizeBilinearC4(input, output, mWidthPosition.host(), mWidthFactor.host(), diff --git a/source/backend/cpu/CPUInterp.hpp b/source/backend/cpu/CPUInterp.hpp index 382af3316..9a3b1524d 100644 --- a/source/backend/cpu/CPUInterp.hpp +++ b/source/backend/cpu/CPUInterp.hpp @@ -9,7 +9,7 @@ #ifndef CPUInterp_hpp #define CPUInterp_hpp -#include "CPUResize.hpp" +#include "backend/cpu/CPUResize.hpp" namespace MNN { diff --git a/source/backend/cpu/CPULRN.cpp b/source/backend/cpu/CPULRN.cpp index 621f97e7b..300946a02 100644 --- a/source/backend/cpu/CPULRN.cpp +++ b/source/backend/cpu/CPULRN.cpp @@ -6,12 +6,12 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPULRN.hpp" +#include "backend/cpu/CPULRN.hpp" #include -#include "CPUBackend.hpp" -#include "CommonOptFunction.h" -#include "Concurrency.h" -#include "Macro.h" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Concurrency.h" +#include "core/Macro.h" #ifdef MNN_USE_NEON #include @@ -34,7 +34,7 @@ static void initPowfContext(float beta, float* powfParam) { } powfParam[6] = powf(1.5, -beta); } - + // dst = src^(-beta), src >= 1, beta > 0 /* f(x) = x^(-beta), x >= 1, beta > 0 @@ -134,7 +134,7 @@ void CPULRN::executeAcrossChannels(const float* srcData, float* dstData, const i float* outChannel = dstData + c * size; auto startChanenl = std::max((int)c - mLocalSize / 2, 0); auto endChannel = std::min((int)c + mLocalSize / 2, channels - 1); - + for (int lc = startChanenl; lc <= endChannel; lc++) { auto sqrtChannel = mSquare.host() + lc * size; int i = 0; @@ -173,7 +173,7 @@ void CPULRN::executeWithInChannels(const float* srcData, float* dstData, const i // clear square and output memset(mSquare.host(), 0, mSquare.size()); memset(dstData, 0, size * channels * sizeof(float)); - + // calc output auto outFactor = mAlpha / area; MNN_CONCURRENCY_BEGIN(tId, threadNum) { diff --git a/source/backend/cpu/CPULRN.hpp b/source/backend/cpu/CPULRN.hpp index e16bcda0e..2f954301c 100644 --- a/source/backend/cpu/CPULRN.hpp +++ b/source/backend/cpu/CPULRN.hpp @@ -9,7 +9,7 @@ #ifndef CPULRN_hpp #define CPULRN_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { diff --git a/source/backend/cpu/CPULSTM.cpp b/source/backend/cpu/CPULSTM.cpp index 8a3fcda92..bc7d972ce 100644 --- a/source/backend/cpu/CPULSTM.cpp +++ b/source/backend/cpu/CPULSTM.cpp @@ -6,14 +6,14 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPULSTM.hpp" +#include "backend/cpu/CPULSTM.hpp" #include -#include "CPUBackend.hpp" -#include "BufferAllocator.hpp" -#include "CommonOptFunction.h" -#include "Concurrency.h" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "core/BufferAllocator.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Concurrency.h" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" #ifdef MNN_USE_NEON #include @@ -56,7 +56,7 @@ static void copyWeightAlignUp4x4(float* dst, const float* src, int numUnits, int } } } - + CPULSTM::CPULSTM(Backend *backend, const LSTM *LSTM) : Execution(backend), mLSTM(LSTM) { const int hiddenSize = mLSTM->outputCount(); int biasLength = 0; @@ -102,7 +102,7 @@ ErrorCode CPULSTM::onResize(const std::vector &inputs, const std::vect memset(dst + 4 * numFeatures, 0, remainBytes); } }; - + // cont transform space if (inputs.size() > 1) { auto &cont = inputs[1]; @@ -132,7 +132,7 @@ ErrorCode CPULSTM::onResize(const std::vector &inputs, const std::vect if (!success) { return OUT_OF_MEMORY; } - + if (!mInit) { mInit = true; auto devide = weightI && !weightH && weightSize == 4 * numUnits * (numFeatures + numUnits + 2); @@ -180,13 +180,13 @@ ErrorCode CPULSTM::onResize(const std::vector &inputs, const std::vect ::memcpy(mWeightH->host(), mLSTM->weightH()->float32s()->data(), mWeightH->size()); } } - + if (inputs.size() > 1) { backend()->onReleaseBuffer(&mCont, Backend::DYNAMIC); } backend()->onReleaseBuffer(&mOutput, Backend::DYNAMIC); backend()->onReleaseBuffer(&mCell, Backend::DYNAMIC); - + const int maxDepth = 5; const bool cacheB = false; BufferAllocator* memoryPool = ((CPUBackend *)backend())->getBufferAllocator(); @@ -205,7 +205,7 @@ ErrorCode CPULSTM::onResize(const std::vector &inputs, const std::vect std::shared_ptr __b(nullptr, [memoryPool](void *) { memoryPool->endGroup(); }); mUnits[i].mStracssenComputor->onEncode(mUnits[i].mTempInputVector, mUnits[i].mTempOutputVector); } - + Tensor tempInternalTensor; // just for acquire memory efficiently tempInternalTensor.buffer().dim[0].extent = 4 * batch * ALIGN_UP4(timeSteps) * numUnits; tempInternalTensor.buffer().dimensions = 1; @@ -215,7 +215,7 @@ ErrorCode CPULSTM::onResize(const std::vector &inputs, const std::vect } float* tempData = tempInternalTensor.host(); backend()->onReleaseBuffer(&tempInternalTensor, Backend::DYNAMIC); - + mRetriveOutputFunction = [batch, timeSteps, numUnits, tempData](float* gateData, const float* bias) { const int itemSize = batch * ALIGN_UP4(timeSteps) * numUnits; for (int i = 0; i < 4; ++i) { @@ -238,10 +238,10 @@ ErrorCode CPULSTM::onResize(const std::vector &inputs, const std::vect } } }; - + backend()->onReleaseBuffer(&mInput, Backend::DYNAMIC); backend()->onReleaseBuffer(&mGates, Backend::DYNAMIC); - + return NO_ERROR; } @@ -252,24 +252,24 @@ ErrorCode CPULSTM::onExecute(const std::vector &inputs, const std::vec const int timeSteps = input->buffer().dim[1].extent; const int numUnits = output->buffer().dim[3].extent; const int threadNumber = ((CPUBackend*)backend())->threadNumber(); - + mTransposeInputFunction(input->host(), mInput.host()); MNN_CONCURRENCY_BEGIN(index, 4) { mUnits[index].mStracssenComputor->onExecute(); } MNN_CONCURRENCY_END(); - + float* biasStartPtr = mBiasC->host(); if(!mGateHaveBias){ biasStartPtr = nullptr; } mRetriveOutputFunction(mGates.host(), biasStartPtr); - + float* recurrenceBiasStartPtr = mBiasC->host(); if(mGateHaveBias){ recurrenceBiasStartPtr += 4 * numUnits; } - + // tranform const float *contData = nullptr; if (inputs.size() > 1) { @@ -277,7 +277,7 @@ ErrorCode CPULSTM::onExecute(const std::vector &inputs, const std::vec MNNUnpackC4(mCont.host(), cont->host(), cont->width() * cont->height(), cont->channel()); contData = mCont.host(); } - + // calc weightHC auto cellData = mCell.host(); memset(cellData, 0, numUnits * sizeof(float)); @@ -298,7 +298,7 @@ ErrorCode CPULSTM::onExecute(const std::vector &inputs, const std::vec auto weightHCO = weightHCF + hcStep; auto weightHCG = weightHCO + hcStep; auto hiddenPtr = mOutput.host() + (ic - 1) * numUnits; - + int i = 0; #ifdef MNN_USE_NEON float32x4_t Ix4 = vdupq_n_f32(0); @@ -328,7 +328,7 @@ ErrorCode CPULSTM::onExecute(const std::vector &inputs, const std::vec G += weightHCG[i] * hiddenData; } } - + // add bias auto biasPtr = recurrenceBiasStartPtr + oc; I = sigmoid(*biasPtr + I); @@ -338,7 +338,7 @@ ErrorCode CPULSTM::onExecute(const std::vector &inputs, const std::vec O = sigmoid(*biasPtr + O); biasPtr = biasPtr + numUnits; G = tanhf(*biasPtr + G); - + auto newCell = F * cellData[oc] + I * G; cellData[oc] = newCell; auto H = O * tanhf(newCell); diff --git a/source/backend/cpu/CPULSTM.hpp b/source/backend/cpu/CPULSTM.hpp index fb75c56e1..f1842a385 100644 --- a/source/backend/cpu/CPULSTM.hpp +++ b/source/backend/cpu/CPULSTM.hpp @@ -9,8 +9,8 @@ #ifndef CPULSTM_hpp #define CPULSTM_hpp -#include "StrassenMatmulComputor.hpp" -#include "Execution.hpp" +#include "backend/cpu/compute/StrassenMatmulComputor.hpp" +#include "core/Execution.hpp" #include "MNN_generated.h" namespace MNN { @@ -38,7 +38,7 @@ class CPULSTM : public Execution { Tensor mGates; Tensor mCell; Tensor mOutput; - + struct Unit { std::shared_ptr mTempWeight; std::shared_ptr mTempGates; @@ -46,7 +46,7 @@ class CPULSTM : public Execution { std::vector mTempOutputVector; std::shared_ptr mStracssenComputor; }; - + Unit mUnits[4]; std::function mTransposeInputFunction; std::function mRetriveOutputFunction; diff --git a/source/backend/cpu/CPULinSpace.cpp b/source/backend/cpu/CPULinSpace.cpp new file mode 100644 index 000000000..4d6ae12ab --- /dev/null +++ b/source/backend/cpu/CPULinSpace.cpp @@ -0,0 +1,54 @@ +// +// CPULinSpace.cpp +// MNN +// +// Created by MNN on 2019/12/11. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "backend/cpu/CPULinSpace.hpp" +#include "backend/cpu/CPUBackend.hpp" + +namespace MNN { +ErrorCode CPULinSpace::onExecute(const std::vector& inputs, const std::vector& outputs) { + MNN_ASSERT(inputs.size() == 3); + MNN_ASSERT(outputs.size() == 1); + const float start = inputs[0]->host()[0]; + const float stop = inputs[1]->host()[0]; + const int num = inputs[2]->host()[0]; + MNN_ASSERT(num > 0); + const float step = (stop - start) / num; + + float* outputData = outputs[0]->host(); + + if (num == 1) { + outputData[0] = start; + return NO_ERROR; + } + + if (num == 2) { + outputData[0] = start; + outputData[1] = stop; + return NO_ERROR; + } + + // make sure that start with the first and end with the last. + outputData[0] = start; + outputData[num - 1] = stop; + for (int i = 1; i < num - 1; ++i) { + outputData[i] = start + i * step; + } + + return NO_ERROR; +} + +class CPULinSpaceCreator : public CPUBackend::Creator { +public: + virtual Execution* onCreate(const std::vector& inputs, const std::vector& outputs, + const MNN::Op* op, Backend* backend) const { + return new CPULinSpace(backend); + } +}; + +REGISTER_CPU_OP_CREATOR(CPULinSpaceCreator, OpType_LinSpace); +} // namespace MNN diff --git a/source/backend/cpu/CPULinSpace.hpp b/source/backend/cpu/CPULinSpace.hpp new file mode 100644 index 000000000..57d9aab1d --- /dev/null +++ b/source/backend/cpu/CPULinSpace.hpp @@ -0,0 +1,26 @@ +// +// CPULinSpace.hpp +// MNN +// +// Created by MNN on 2019/12/11. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef CPULinSpace_hpp +#define CPULinSpace_hpp + +#include "core/Execution.hpp" + +namespace MNN { +class CPULinSpace : public Execution { +public: + CPULinSpace(Backend *b) : Execution(b) { + // nothing to do + } + virtual ~CPULinSpace() = default; + virtual ErrorCode onExecute(const std::vector &inputs, const std::vector &outputs) override; +}; + +} // namespace MNN + +#endif /* CPULinSpace_hpp */ diff --git a/source/backend/cpu/CPUMatMul.cpp b/source/backend/cpu/CPUMatMul.cpp index 23f5d9c67..594e55945 100644 --- a/source/backend/cpu/CPUMatMul.cpp +++ b/source/backend/cpu/CPUMatMul.cpp @@ -6,12 +6,12 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUMatMul.hpp" -#include "CPUBackend.hpp" -#include "Matrix.hpp" -#include "compute/CommonOptFunction.h" -#include "compute/StrassenMatmulComputor.hpp" -#include "Macro.h" +#include "backend/cpu/CPUMatMul.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "math/Matrix.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "backend/cpu/compute/StrassenMatmulComputor.hpp" +#include "core/Macro.h" namespace MNN { CPUMatMul::CPUMatMul(Backend* backend, bool transposeA, bool transposeB) @@ -72,7 +72,7 @@ ErrorCode CPUMatMul::onResize(const std::vector& inputs, const std::vec auto dst = BTPtr + 16*lC4 * y; auto src = BTempPtr + 4 * l * y; ::memcpy(dst, src, 4*l*sizeof(float)); - ::memset(dst+4*l, 0, (lC4*4-l) * sizeof(float)); + ::memset(dst+4*l, 0, 4 * (lC4*4-l) * sizeof(float)); } }); backend()->onReleaseBuffer(BTemp.get(), Backend::DYNAMIC); diff --git a/source/backend/cpu/CPUMatMul.hpp b/source/backend/cpu/CPUMatMul.hpp index 722b580c2..71b889e25 100644 --- a/source/backend/cpu/CPUMatMul.hpp +++ b/source/backend/cpu/CPUMatMul.hpp @@ -10,7 +10,7 @@ #define CPUMATMUL_HPP #include -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { class CPUMatMul : public Execution { diff --git a/source/backend/cpu/CPUMatrixBandPart.cpp b/source/backend/cpu/CPUMatrixBandPart.cpp index 2ff39bb2f..97ef7bea8 100644 --- a/source/backend/cpu/CPUMatrixBandPart.cpp +++ b/source/backend/cpu/CPUMatrixBandPart.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUMatrixBandPart.hpp" -#include "ConvOpt.h" -#include "TensorUtils.hpp" -#include "Macro.h" +#include "backend/cpu/CPUMatrixBandPart.hpp" +#include "backend/cpu/compute/ConvOpt.h" +#include "core/TensorUtils.hpp" +#include "core/Macro.h" namespace MNN { ErrorCode CPUMatrixBandPart::onResize(const std::vector &inputs, const std::vector &outputs) { MNN_ASSERT(3 == inputs.size()); diff --git a/source/backend/cpu/CPUMatrixBandPart.hpp b/source/backend/cpu/CPUMatrixBandPart.hpp index 8a20f47ca..df873d38a 100644 --- a/source/backend/cpu/CPUMatrixBandPart.hpp +++ b/source/backend/cpu/CPUMatrixBandPart.hpp @@ -8,7 +8,7 @@ #ifndef CPUMatrixBandPart_hpp #define CPUMatrixBandPart_hpp -#include "CPUBackend.hpp" +#include "backend/cpu/CPUBackend.hpp" namespace MNN { class CPUMatrixBandPart : public Execution { diff --git a/source/backend/cpu/CPUMoments.cpp b/source/backend/cpu/CPUMoments.cpp index 79c28d50e..40c2cccf2 100644 --- a/source/backend/cpu/CPUMoments.cpp +++ b/source/backend/cpu/CPUMoments.cpp @@ -6,13 +6,13 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUMoments.hpp" +#include "backend/cpu/CPUMoments.hpp" #include -#include "CPUBackend.hpp" -#include "Concurrency.h" -#include "MNNDefine.h" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "core/Concurrency.h" +#include +#include "core/Macro.h" +#include "core/TensorUtils.hpp" #ifdef MNN_USE_NEON #include diff --git a/source/backend/cpu/CPUMoments.hpp b/source/backend/cpu/CPUMoments.hpp index ef9d3c27f..c5478217e 100644 --- a/source/backend/cpu/CPUMoments.hpp +++ b/source/backend/cpu/CPUMoments.hpp @@ -9,7 +9,7 @@ #ifndef CPUMoments_hpp #define CPUMoments_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { diff --git a/source/backend/cpu/CPUNonMaxSuppressionV2.cpp b/source/backend/cpu/CPUNonMaxSuppressionV2.cpp index e83bbe7a3..8faa05c30 100644 --- a/source/backend/cpu/CPUNonMaxSuppressionV2.cpp +++ b/source/backend/cpu/CPUNonMaxSuppressionV2.cpp @@ -12,11 +12,11 @@ // edited from tensorflow - non_max_suppression_op.cc by MNN. -#include "CPUNonMaxSuppressionV2.hpp" +#include "backend/cpu/CPUNonMaxSuppressionV2.hpp" #include #include -#include "CPUBackend.hpp" -#include "Macro.h" +#include "backend/cpu/CPUBackend.hpp" +#include "core/Macro.h" namespace MNN { diff --git a/source/backend/cpu/CPUNonMaxSuppressionV2.hpp b/source/backend/cpu/CPUNonMaxSuppressionV2.hpp index 3f8a3f786..cb9be6773 100644 --- a/source/backend/cpu/CPUNonMaxSuppressionV2.hpp +++ b/source/backend/cpu/CPUNonMaxSuppressionV2.hpp @@ -9,7 +9,7 @@ #ifndef CPUNonMaxSuppressionV2_hpp #define CPUNonMaxSuppressionV2_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { diff --git a/source/backend/cpu/CPUNormalize.cpp b/source/backend/cpu/CPUNormalize.cpp index 5ba7b6282..8d6556b5b 100644 --- a/source/backend/cpu/CPUNormalize.cpp +++ b/source/backend/cpu/CPUNormalize.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUNormalize.hpp" +#include "backend/cpu/CPUNormalize.hpp" #include -#include "CPUBackend.hpp" -#include "CommonOptFunction.h" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" namespace MNN { CPUNormalize::CPUNormalize(Backend* b, const MNN::Op* op) : MNN::Execution(b) { diff --git a/source/backend/cpu/CPUNormalize.hpp b/source/backend/cpu/CPUNormalize.hpp index 88b33b925..e8147cdf2 100644 --- a/source/backend/cpu/CPUNormalize.hpp +++ b/source/backend/cpu/CPUNormalize.hpp @@ -9,8 +9,8 @@ #ifndef CPUNormalize_hpp #define CPUNormalize_hpp -#include "AutoStorage.h" -#include "Execution.hpp" +#include "core/AutoStorage.h" +#include "core/Execution.hpp" namespace MNN { class CPUNormalize : public Execution { diff --git a/source/backend/cpu/CPUOPRegister.cpp b/source/backend/cpu/CPUOPRegister.cpp index a68f1828f..653a5966d 100644 --- a/source/backend/cpu/CPUOPRegister.cpp +++ b/source/backend/cpu/CPUOPRegister.cpp @@ -2,13 +2,16 @@ namespace MNN { #ifdef MNN_CODEGEN_REGISTER extern void ___CPUArgMaxCreator__OpType_ArgMax__(); +extern void ___CPUArgMaxCreator__OpType_ArgMin__(); extern void ___CPUAsStringCreator__OpType_AsString__(); extern void ___CPUBatchMatMulCreator__OpType_BatchMatMul__(); extern void ___CPUBatchToSpaceNDCreator__OpType_BatchToSpaceND__(); extern void ___CPUBinaryCreator__OpType_BinaryOp__(); +extern void ___CPUBroadcastToCreator__OpType_BroadcastTo__(); extern void ___CPUCastCreator__OpType_Cast__(); extern void ___CPUConcatCreator__OpType_Concat__(); extern void ___CPUConstCreator__OpType_Const__(); +extern void ___CPUConstCreator__OpType_TrainableParam__(); extern void ___CPUConv2DBackPropFilterCreator__OpType_Conv2DBackPropFilter__(); extern void ___CPUConvInt8Creator__OpType_ConvInt8__(); extern void ___ConvolutionFactory__OpType_Convolution__(); @@ -24,6 +27,7 @@ extern void ___CPUDepthwiseConvInt8Creator__OpType_DepthwiseConvInt8__(); extern void ___CPUDequantizeCreator__OpType_Dequantize__(); extern void ___CPUDetectionOutputCreator__OpType_DetectionOutput__(); extern void ___CPUDetectionPostProcessCreator__OpType_DetectionPostProcess__(); +extern void ___CPUDilation2DCreator__OpType_Dilation2D__(); extern void ___CPUEltwiesCreator__OpType_Eltwise__(); extern void ___CPUEltwiseInt8Creator__OpType_EltwiseInt8__(); extern void ___CPUEluCreator__OpType_ELU__(); @@ -34,16 +38,18 @@ extern void ___CPUGatherCreator__OpType_Gather__(); extern void ___CPUGatherNDCreator__OpType_GatherND__(); extern void ___CPUGatherV2Creator__OpType_GatherV2__(); extern void ___CPUInnerProductCreator__OpType_InnerProduct__(); -extern void ___CPUInstanceNormCreator__OpType_BatchNorm__(); +extern void ___CPUInstanceNormCreator__OpType_InstanceNorm__(); extern void ___CPUInt8ToFloatCreator__OpType_Int8ToFloat__(); extern void ___CPUInterpCreator__OpType_Interp__(); extern void ___CPULRNCreator__OpType_LRN__(); extern void ___CPULSTMCreator__OpType_LSTM__(); +extern void ___CPULinSpaceCreator__OpType_LinSpace__(); extern void ___CPUMatMulCreator__OpType_MatMul__(); extern void ___CPUMatrixBandPartCreator__OpType_MatrixBandPart__(); extern void ___CPUMomentsCreator__OpType_Moments__(); extern void ___CPUNonMaxSuppressionV2Creator__OpType_NonMaxSuppressionV2__(); extern void ___CPUNormalizeCreator__OpType_Normalize__(); +extern void ___CPUOneHotCreator__OpType_OneHot__(); extern void ___CPUPackCreator__OpType_Pack__(); extern void ___CPUPaddingCreator__OpType_Padding__(); extern void ___CPUPermuteCreator__OpType_Permute__(); @@ -76,6 +82,7 @@ extern void ___CPUReshapeCreator__OpType_Reshape__(); extern void ___CPUResizeCreator__OpType_Resize__(); extern void ___CPUReverseSequenceCreator__OpType_ReverseSequence__(); extern void ___CPUScaleCreator__OpType_Scale__(); +extern void ___CPUScatterNdCreator__OpType_ScatterNd__(); extern void ___CPUSelectCreator__OpType_Select__(); extern void ___CPUSeluCreator__OpType_Selu__(); extern void ___CPUSetDiff1DCreator__OpType_SetDiff1D__(); @@ -95,23 +102,28 @@ extern void ___CPUStridedSliceCreator__OpType_StridedSlice__(); extern void ___CPUTFQuantizedConv2DCreator__OpType_TfQuantizedConv2D__(); extern void ___CPUTanhCreator__OpType_TanH__(); extern void ___CPUTensorConvertFactory__OpType_ConvertTensor__(); +extern void ___CPUThresholdCreator__OpType_Threshold__(); extern void ___CPUTileCreator__OpType_Tile__(); extern void ___CPUTopKV2Creator__OpType_TopKV2__(); extern void ___CPUTransposeeCreator__OpType_Transpose__(); extern void ___CPUUnaryCreator__OpType_UnaryOp__(); extern void ___CPUUnpackCreator__OpType_Unpack__(); +extern void ___CPUUnravelIndexCreator__OpType_UnravelIndex__(); extern void ___CPUWhereCreator__OpType_Where__(); extern void ___CPUZeroLikeCreator__OpType_ZerosLike__(); void registerCPUOps() { ___CPUArgMaxCreator__OpType_ArgMax__(); +___CPUArgMaxCreator__OpType_ArgMin__(); ___CPUAsStringCreator__OpType_AsString__(); ___CPUBatchMatMulCreator__OpType_BatchMatMul__(); ___CPUBatchToSpaceNDCreator__OpType_BatchToSpaceND__(); ___CPUBinaryCreator__OpType_BinaryOp__(); +___CPUBroadcastToCreator__OpType_BroadcastTo__(); ___CPUCastCreator__OpType_Cast__(); ___CPUConcatCreator__OpType_Concat__(); ___CPUConstCreator__OpType_Const__(); +___CPUConstCreator__OpType_TrainableParam__(); ___CPUConv2DBackPropFilterCreator__OpType_Conv2DBackPropFilter__(); ___CPUConvInt8Creator__OpType_ConvInt8__(); ___ConvolutionFactory__OpType_Convolution__(); @@ -127,6 +139,7 @@ ___CPUDepthwiseConvInt8Creator__OpType_DepthwiseConvInt8__(); ___CPUDequantizeCreator__OpType_Dequantize__(); ___CPUDetectionOutputCreator__OpType_DetectionOutput__(); ___CPUDetectionPostProcessCreator__OpType_DetectionPostProcess__(); +___CPUDilation2DCreator__OpType_Dilation2D__(); ___CPUEltwiesCreator__OpType_Eltwise__(); ___CPUEltwiseInt8Creator__OpType_EltwiseInt8__(); ___CPUEluCreator__OpType_ELU__(); @@ -137,16 +150,18 @@ ___CPUGatherCreator__OpType_Gather__(); ___CPUGatherNDCreator__OpType_GatherND__(); ___CPUGatherV2Creator__OpType_GatherV2__(); ___CPUInnerProductCreator__OpType_InnerProduct__(); -___CPUInstanceNormCreator__OpType_BatchNorm__(); +___CPUInstanceNormCreator__OpType_InstanceNorm__(); ___CPUInt8ToFloatCreator__OpType_Int8ToFloat__(); ___CPUInterpCreator__OpType_Interp__(); ___CPULRNCreator__OpType_LRN__(); ___CPULSTMCreator__OpType_LSTM__(); +___CPULinSpaceCreator__OpType_LinSpace__(); ___CPUMatMulCreator__OpType_MatMul__(); ___CPUMatrixBandPartCreator__OpType_MatrixBandPart__(); ___CPUMomentsCreator__OpType_Moments__(); ___CPUNonMaxSuppressionV2Creator__OpType_NonMaxSuppressionV2__(); ___CPUNormalizeCreator__OpType_Normalize__(); +___CPUOneHotCreator__OpType_OneHot__(); ___CPUPackCreator__OpType_Pack__(); ___CPUPaddingCreator__OpType_Padding__(); ___CPUPermuteCreator__OpType_Permute__(); @@ -179,6 +194,7 @@ ___CPUReshapeCreator__OpType_Reshape__(); ___CPUResizeCreator__OpType_Resize__(); ___CPUReverseSequenceCreator__OpType_ReverseSequence__(); ___CPUScaleCreator__OpType_Scale__(); +___CPUScatterNdCreator__OpType_ScatterNd__(); ___CPUSelectCreator__OpType_Select__(); ___CPUSeluCreator__OpType_Selu__(); ___CPUSetDiff1DCreator__OpType_SetDiff1D__(); @@ -198,11 +214,13 @@ ___CPUStridedSliceCreator__OpType_StridedSlice__(); ___CPUTFQuantizedConv2DCreator__OpType_TfQuantizedConv2D__(); ___CPUTanhCreator__OpType_TanH__(); ___CPUTensorConvertFactory__OpType_ConvertTensor__(); +___CPUThresholdCreator__OpType_Threshold__(); ___CPUTileCreator__OpType_Tile__(); ___CPUTopKV2Creator__OpType_TopKV2__(); ___CPUTransposeeCreator__OpType_Transpose__(); ___CPUUnaryCreator__OpType_UnaryOp__(); ___CPUUnpackCreator__OpType_Unpack__(); +___CPUUnravelIndexCreator__OpType_UnravelIndex__(); ___CPUWhereCreator__OpType_Where__(); ___CPUZeroLikeCreator__OpType_ZerosLike__(); } diff --git a/source/backend/cpu/CPUOneHot.cpp b/source/backend/cpu/CPUOneHot.cpp new file mode 100644 index 000000000..bc08a7fe0 --- /dev/null +++ b/source/backend/cpu/CPUOneHot.cpp @@ -0,0 +1,72 @@ +// +// CPUOneHot.cpp +// MNN +// +// Created by MNN on 2019/11/29. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "backend/cpu/CPUOneHot.hpp" +#include "backend/cpu/CPUBackend.hpp" + +namespace MNN { + +template +void OneHotImpl(int depth, int outerSize, int innerSize, const int* indices, const Tensor* onValueTensor, + const Tensor* offValueTensor, Tensor* outputTensor) { + const T onValue = onValueTensor->host()[0]; + const T offValue = offValueTensor->host()[0]; + T* outputPtr = outputTensor->host(); + + for (int i = 0; i < outerSize; ++i) { + for (int j = 0; j < depth; ++j) { + for (int k = 0; k < innerSize; ++k) { + *outputPtr = indices[i * innerSize + k] == j ? onValue : offValue; + outputPtr++; + } + } + } +} + +ErrorCode CPUOneHot::onExecute(const std::vector& inputs, const std::vector& outputs) { + auto indices = inputs[0]; + auto depthTensor = inputs[1]; + auto onValueTensor = inputs[2]; + auto offValueTensor = inputs[3]; + + if (mAxis == -1) { + mAxis = indices->dimensions(); + } + int outerSize = 1; + for (int i = 0; i < mAxis; ++i) { + outerSize *= indices->length(i); + } + const int depth = depthTensor->host()[0]; + const int innerSize = indices->elementSize() / outerSize; + const auto indicesPtr = indices->host(); + + auto dataType = onValueTensor->getType(); + auto offDataType = offValueTensor->getType(); + MNN_ASSERT(dataType == offDataType); + + if (dataType == halide_type_of()) { + OneHotImpl(depth, outerSize, innerSize, indicesPtr, onValueTensor, offValueTensor, outputs[0]); + } else if (dataType == halide_type_of()) { + OneHotImpl(depth, outerSize, innerSize, indicesPtr, onValueTensor, offValueTensor, outputs[0]); + } else { + return NOT_SUPPORT; + } + return NO_ERROR; +} + +class CPUOneHotCreator : public CPUBackend::Creator { +public: + virtual Execution* onCreate(const std::vector& inputs, const std::vector& outputs, + const MNN::Op* op, Backend* backend) const override { + return new CPUOneHot(backend, op->main_as_OneHotParam()->axis()); + } +}; + +REGISTER_CPU_OP_CREATOR(CPUOneHotCreator, OpType_OneHot); + +} // namespace MNN diff --git a/source/backend/cpu/CPUOneHot.hpp b/source/backend/cpu/CPUOneHot.hpp new file mode 100644 index 000000000..ff15b73f7 --- /dev/null +++ b/source/backend/cpu/CPUOneHot.hpp @@ -0,0 +1,29 @@ +// +// CPUOneHot.hpp +// MNN +// +// Created by MNN on 2019/11/29. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef CPUOneHot_hpp +#define CPUOneHot_hpp + +#include "core/Execution.hpp" + +namespace MNN { + +class CPUOneHot : public Execution{ +public: + CPUOneHot(Backend* b, int axis):Execution(b), mAxis(axis){} + virtual ~CPUOneHot() = default; + + virtual ErrorCode onExecute(const std::vector &inputs, const std::vector &outputs) override; + +private: + int mAxis; +}; + +} // namespace MNN + +#endif /* CPUOneHot_hpp */ diff --git a/source/backend/cpu/CPUPack.cpp b/source/backend/cpu/CPUPack.cpp index a5603ce4b..0a51a906a 100644 --- a/source/backend/cpu/CPUPack.cpp +++ b/source/backend/cpu/CPUPack.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUPack.hpp" -#include "CPUBackend.hpp" +#include "backend/cpu/CPUPack.hpp" +#include "backend/cpu/CPUBackend.hpp" namespace MNN { diff --git a/source/backend/cpu/CPUPack.hpp b/source/backend/cpu/CPUPack.hpp index c22765f0d..1d6737aac 100644 --- a/source/backend/cpu/CPUPack.hpp +++ b/source/backend/cpu/CPUPack.hpp @@ -9,7 +9,7 @@ #ifndef CPUPack_hpp #define CPUPack_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" #include "Type_generated.h" namespace MNN { diff --git a/source/backend/cpu/CPUPadding.cpp b/source/backend/cpu/CPUPadding.cpp index c7aa7a0b8..54f930d83 100644 --- a/source/backend/cpu/CPUPadding.cpp +++ b/source/backend/cpu/CPUPadding.cpp @@ -6,19 +6,111 @@ // Copyright © 2018 Alibaba. All rights reserved. // -#include "CPUPadding.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/cpu/CPUPadding.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" #include -#include "CPUTensorConvert.hpp" +#include "backend/cpu/CPUTensorConvert.hpp" namespace MNN { -void CPUPadding::execute(const std::vector &inputs, const std::vector &outputs) { + +ErrorCode memsetHelper(const Tensor *padValueTensor, Tensor *output) { + auto dtype = output->getType(); + const int size = output->elementSize(); + if (dtype == halide_type_of()) { + const auto padValue = padValueTensor->host()[0]; + auto ptr = output->host(); + std::fill(ptr, ptr + size, padValue); + } else if (dtype == halide_type_of()) { + const auto padValue = padValueTensor->host()[0]; + auto ptr = output->host(); + std::fill(ptr, ptr + size, padValue); + } else { + MNN_ERROR("TODO, support other data type: %d\n", dtype.code); + return NOT_SUPPORT; + } + return NO_ERROR; +} + +// refer to tflite mirrorPad +struct CacheElement { + int start; + int end; +}; +int MirrorPadImpl(const Tensor *data, CacheElement *cache, Tensor *paddedData, const int *pad, int currentDim, + int flatIndex, int outputIndex, int offset) { + const int bytes = data->getType().bytes(); + if (currentDim == paddedData->dimensions()) { + if (outputIndex >= paddedData->elementSize()) { + return outputIndex; + } + memcpy(paddedData->host() + outputIndex * bytes, data->host() + flatIndex * bytes, bytes); + return outputIndex + 1; + } + const int cacheIndex = currentDim * data->elementSize() + flatIndex; + auto &cacheEntry = cache[cacheIndex]; + if (cacheEntry.start != -1) { + const int size = cacheEntry.end - cacheEntry.start; + memcpy(paddedData->host() + outputIndex * bytes, paddedData->host() + cacheEntry.start * bytes, + size * bytes); + return outputIndex + size; + } + + cacheEntry.start = outputIndex; + int leftPad = pad[2 * currentDim]; + int rightPad = pad[2 * currentDim + 1]; + const int multiplier = data->stride(currentDim); + + for (int i = leftPad + offset - 1; i >= offset && leftPad > 0; --i, --leftPad) { + outputIndex = MirrorPadImpl(data, cache, paddedData, pad, currentDim + 1, flatIndex + i * multiplier, + outputIndex, offset); + } + const int curDimLength = data->length(currentDim); + for (int i = 0; i < curDimLength; ++i) { + outputIndex = MirrorPadImpl(data, cache, paddedData, pad, currentDim + 1, flatIndex + i * multiplier, + outputIndex, offset); + } + for (int i = curDimLength - (1 + offset); i >= 0 && rightPad > 0; --i, --rightPad) { + outputIndex = MirrorPadImpl(data, cache, paddedData, pad, currentDim + 1, flatIndex + i * multiplier, + outputIndex, offset); + } + + cacheEntry.end = outputIndex; + + return outputIndex; +} + +static ErrorCode resizeImpl(Backend *bn, const std::vector &inputs, const std::vector &outputs, + Tensor *cache) { + const int size = inputs[0]->elementSize() * inputs[0]->dimensions() * 2; + cache->setType(DataType_DT_INT32); + cache->buffer().dimensions = 1; + cache->setLength(0, size); + bool success = bn->onAcquireBuffer(cache, Backend::DYNAMIC); + if (!success) { + return OUT_OF_MEMORY; + } + bn->onReleaseBuffer(cache, Backend::DYNAMIC); + return NO_ERROR; +} + +ErrorCode CPUPadding::onResize(const std::vector &inputs, const std::vector &outputs) { + if (mMode != PadValueMode_CONSTANT) { + return resizeImpl(backend(), inputs, outputs, &mCache); + } + return NO_ERROR; +} + +void CPUPadding::execute(const std::vector &inputs, const std::vector &outputs, PadValueMode mode) { auto input = inputs[0]; auto output = outputs[0]; auto padding = inputs[1]->host(); - ::memset(output->host(), 0, output->size()); + if (inputs.size() == 3) { + memsetHelper(inputs[2], output); + } else { + ::memset(output->host(), 0, output->size()); + } auto outputData = output->host(); - auto inputData = input->host(); + auto inputData = input->host(); #define MAX_DIM 6 MNN_ASSERT(output->dimensions() <= MAX_DIM); int dims[MAX_DIM]; @@ -26,25 +118,25 @@ void CPUPadding::execute(const std::vector &inputs, const std::vector< int iStride[MAX_DIM]; int pad[MAX_DIM]; auto bytes = input->getType().bytes(); - for (int i=0; idimensions(); - for (int i=0; idimensions(); ++i) { - pad[offset+i] = padding[2*i]; - dims[offset+i] = input->length(i); - oStride[offset+i] = output->stride(i) * bytes; - iStride[offset+i] = input->stride(i) * bytes; + for (int i = 0; i < input->dimensions(); ++i) { + pad[offset + i] = padding[2 * i]; + dims[offset + i] = input->length(i); + oStride[offset + i] = output->stride(i) * bytes; + iStride[offset + i] = input->stride(i) * bytes; } for (int w = 0; w < dims[0]; ++w) { - auto ow = outputData + (w+pad[0]) * oStride[0]; + auto ow = outputData + (w + pad[0]) * oStride[0]; auto sw = inputData + w * iStride[0]; -#define PTR(x, y, i) \ -auto o##x = o##y + (x+pad[i]) * oStride[i]; \ -auto s##x = s##y + x * iStride[i]; \ +#define PTR(x, y, i) \ + auto o##x = o##y + (x + pad[i]) * oStride[i]; \ + auto s##x = s##y + x * iStride[i]; for (int v = 0; v < dims[1]; ++v) { PTR(v, w, 1); @@ -54,7 +146,7 @@ auto s##x = s##y + x * iStride[i]; \ PTR(z, u, 3); for (int y = 0; y < dims[4]; ++y) { PTR(y, z, 4); - ::memcpy(oy+pad[5]*oStride[5], sy, iStride[4]); + ::memcpy(oy + pad[5] * oStride[5], sy, iStride[4]); } } } @@ -64,10 +156,25 @@ auto s##x = s##y + x * iStride[i]; \ #undef PTR } +ErrorCode CPUPadding::onExecute(const std::vector &inputs, const std::vector &outputs) { + if (mMode == PadValueMode_CONSTANT) { + execute(inputs, outputs, mMode); + } else { + // REFLECT or SYMMETRIC + int offset = mMode == PadValueMode_SYMMETRIC ? 0 : 1; + auto cacheData = reinterpret_cast(mCache.host()); + std::fill(cacheData, cacheData + mCache.elementSize() / 2, CacheElement{-1, -1}); + const int *pad = inputs[1]->host(); + int outputIndex = 0; + MirrorPadImpl(inputs[0], cacheData, outputs[0], pad, 0, 0, outputIndex, offset); + } + return NO_ERROR; +} + ErrorCode CPUPaddingPacked::onResize(const std::vector &inputs, const std::vector &outputs) { auto padding = inputs[1]; auto paddingPtr = padding->host(); - if (paddingPtr[2] != 0 || paddingPtr[3] != 0) { + if (paddingPtr[2] != 0 || paddingPtr[3] != 0 || mMode != PadValueMode_CONSTANT) { mNeedConvert = true; } if (!mNeedConvert) { @@ -76,12 +183,17 @@ ErrorCode CPUPaddingPacked::onResize(const std::vector &inputs, const mTempOutput.reset(Tensor::createDevice(outputs[0]->shape(), Tensor::CAFFE)); mTempInput.reset(Tensor::createDevice(inputs[0]->shape(), Tensor::CAFFE)); bool res = backend()->onAcquireBuffer(mTempOutput.get(), Backend::DYNAMIC); - res = res && backend()->onAcquireBuffer(mTempInput.get(), Backend::DYNAMIC); + res = res && backend()->onAcquireBuffer(mTempInput.get(), Backend::DYNAMIC); if (!res) { return OUT_OF_MEMORY; } - mTempInputs = {mTempInput.get(), inputs[1]}; + mTempInputs = {mTempInput.get(), inputs[1]}; mTempOutputs = {mTempOutput.get()}; + + if (mMode != PadValueMode_CONSTANT) { + resizeImpl(backend(), inputs, outputs, &mCache); + } + backend()->onReleaseBuffer(mTempOutput.get(), Backend::DYNAMIC); backend()->onReleaseBuffer(mTempInput.get(), Backend::DYNAMIC); @@ -93,20 +205,40 @@ ErrorCode CPUPaddingPacked::onExecute(const std::vector &inputs, const auto output = outputs[0]; if (mNeedConvert) { CPUTensorConverter::convert(input, mTempInput.get()); - CPUPadding::execute(mTempInputs, mTempOutputs); + // CPUPadding::execute(mTempInputs, mTempOutputs, mMode); + + if (mMode == PadValueMode_CONSTANT) { + CPUPadding::execute(mTempInputs, mTempOutputs, mMode); + } else { + // REFLECT or SYMMETRIC + int offset = mMode == PadValueMode_SYMMETRIC ? 0 : 1; + auto cacheData = reinterpret_cast(mCache.host()); + std::fill(cacheData, cacheData + mCache.elementSize(), CacheElement{-1, -1}); + const int *pad = inputs[1]->host(); + int outputIndex = 0; + MirrorPadImpl(mTempInput.get(), cacheData, mTempOutput.get(), pad, 0, 0, outputIndex, offset); + } + CPUTensorConverter::convert(mTempOutput.get(), output); return NO_ERROR; } - auto iw = input->width(); - auto ih = input->height(); - auto ic = input->channel(); - auto ib = input->batch(); + auto iw = input->width(); + auto ih = input->height(); + auto ic = input->channel(); + auto ib = input->batch(); auto ow = output->width(); auto oh = output->height(); auto icC4 = UP_DIV(ic, 4); auto padding = inputs[1]->host(); - ::memset(output->host(), 0, output->size()); + if (inputs.size() == 3) { + auto code = memsetHelper(inputs[2], output); + if (code != NO_ERROR) { + return code; + } + } else { + ::memset(output->host(), 0, output->size()); + } for (int n = 0; n < ib; ++n) { auto inputN = input->host() + input->stride(0) * n; auto outputN = output->host() + output->stride(0) * (padding[2 * 0] + n); @@ -129,8 +261,13 @@ class CPUPaddingCreator : public CPUBackend::Creator { public: virtual Execution *onCreate(const std::vector &inputs, const std::vector &outputs, const MNN::Op *op, Backend *backend) const { + auto param = op->main_as_PadParam(); + auto mode = PadValueMode_CONSTANT; + if (param) { + mode = param->mode(); + } if (TensorUtils::getDescribe(inputs[0])->dimensionFormat != MNN_DATA_FORMAT_NC4HW4) { - return new CPUPadding(backend); + return new CPUPadding(backend, mode); } if (inputs[0]->dimensions() != 4) { MNN_ERROR("Currently padding only support 4 dimension for NC4HW4\n"); @@ -140,7 +277,7 @@ class CPUPaddingCreator : public CPUBackend::Creator { MNN_ERROR("Currently padding NC4HW4 only support 32 bit padding\n"); return nullptr; } - return new CPUPaddingPacked(backend); + return new CPUPaddingPacked(backend, mode); } }; diff --git a/source/backend/cpu/CPUPadding.hpp b/source/backend/cpu/CPUPadding.hpp index fb28da42d..67e196f7f 100644 --- a/source/backend/cpu/CPUPadding.hpp +++ b/source/backend/cpu/CPUPadding.hpp @@ -10,11 +10,11 @@ #define CPUPadding_hpp #include -#include "CPUBackend.hpp" +#include "backend/cpu/CPUBackend.hpp" namespace MNN { class CPUPaddingPacked : public Execution { public: - CPUPaddingPacked(Backend *bn) : Execution(bn) { + CPUPaddingPacked(Backend *bn, PadValueMode mode) : Execution(bn), mMode(mode) { // Do nothing } virtual ErrorCode onResize(const std::vector &inputs, const std::vector &outputs) override; @@ -25,17 +25,21 @@ class CPUPaddingPacked : public Execution { std::vector mTempInputs; std::vector mTempOutputs; bool mNeedConvert = false; + PadValueMode mMode; + Tensor mCache; }; class CPUPadding : public Execution { public: - CPUPadding(Backend *bn) : Execution(bn) { + CPUPadding(Backend *bn, PadValueMode mode) : Execution(bn), mMode(mode) { // Do nothing } - static void execute(const std::vector &inputs, const std::vector &outputs); - virtual ErrorCode onExecute(const std::vector &inputs, const std::vector &outputs) override { - execute(inputs, outputs); - return NO_ERROR; - } + static void execute(const std::vector &inputs, const std::vector &outputs, PadValueMode mode = PadValueMode_CONSTANT); + virtual ErrorCode onResize(const std::vector &inputs, const std::vector &outputs) override; + virtual ErrorCode onExecute(const std::vector &inputs, const std::vector &outputs) override; + +private: + Tensor mCache; + PadValueMode mMode; }; }; // namespace MNN diff --git a/source/backend/cpu/CPUPermute.cpp b/source/backend/cpu/CPUPermute.cpp index 454d96097..73c89e0ee 100644 --- a/source/backend/cpu/CPUPermute.cpp +++ b/source/backend/cpu/CPUPermute.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUPermute.hpp" -#include "CPUTranspose.hpp" -#include "CommonOptFunction.h" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/cpu/CPUPermute.hpp" +#include "backend/cpu/CPUTranspose.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { @@ -28,7 +28,7 @@ ErrorCode CPUPermute::onResize(const std::vector &inputs, const std::v ErrorCode CPUPermute::onExecute(const std::vector &inputs, const std::vector &outputs) { MNN_ASSERT(1 == inputs.size()); MNN_ASSERT(1 == outputs.size()); - + auto input = inputs[0]; auto output = outputs[0]; @@ -53,9 +53,9 @@ ErrorCode CPUPermute::onExecute(const std::vector &inputs, const std:: } } const int outputChannel = output->length(1); - + int strides[5][4]; // map from change of output index to change of input index on N, C4, H and W - + for (int i = 0; i < 5; ++i) { if (i >= input->dimensions()) { strides[i][0] = strides[i][1] = strides[i][2] = strides[i][3] = 0; diff --git a/source/backend/cpu/CPUPermute.hpp b/source/backend/cpu/CPUPermute.hpp index cd4af4ab7..e94a29b26 100644 --- a/source/backend/cpu/CPUPermute.hpp +++ b/source/backend/cpu/CPUPermute.hpp @@ -8,7 +8,7 @@ #ifndef CPUPermute_hpp #define CPUPermute_hpp -#include "CPUBackend.hpp" +#include "backend/cpu/CPUBackend.hpp" namespace MNN { class CPUPermute : public Execution { diff --git a/source/backend/cpu/CPUPool.cpp b/source/backend/cpu/CPUPool.cpp index cfada16b6..ca35b1326 100644 --- a/source/backend/cpu/CPUPool.cpp +++ b/source/backend/cpu/CPUPool.cpp @@ -6,16 +6,16 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUPool.hpp" +#include "backend/cpu/CPUPool.hpp" #include #include -#include "Macro.h" +#include "core/Macro.h" #ifdef MNN_USE_NEON #include #endif -#include "Concurrency.h" -#include "Vec4.hpp" +#include "core/Concurrency.h" +#include "math/Vec4.hpp" using Vec4 = MNN::Math::Vec4; @@ -422,7 +422,7 @@ class CPUPoolCreator : public CPUBackend::Creator { }; REGISTER_CPU_OP_CREATOR(CPUPoolCreator, OpType_Pooling); - + CPUPool3D::CPUPool3D(Backend *b, const Pool3D *param) : MNN::Execution(b) { mType = param->type(); mPadType = param->padType(); @@ -438,19 +438,19 @@ CPUPool3D::CPUPool3D(Backend *b, const Pool3D *param) : MNN::Execution(b) { } } } - + ErrorCode CPUPool3D::onResize(const std::vector &inputs, const std::vector &outputs) { auto input = inputs[0]; auto output = outputs[0]; - if (mPadType == PoolPadType_SAME) { + mPads.clear(); for (unsigned int i = 0; i < output->dimensions() - 2; ++i) { const int inputLength = input->length(i + 2), outputLength = output->length(i + 2); const int inputLengthNeed = (outputLength - 1) * mStrides[i] + mKernels[i]; mPads.push_back((inputLengthNeed - inputLength) / 2); } } - + if (mKernels[0] != 1 || mStrides[0] != 1) { const int batch = input->length(0), channel = input->length(1), inputDepth = input->length(2); const int outputHeight = output->length(3), outputWidth = output->length(4); @@ -460,12 +460,12 @@ ErrorCode CPUPool3D::onResize(const std::vector &inputs, const std::ve } return NO_ERROR; } - + ErrorCode CPUPool3D::onExecute(const std::vector &inputs, const std::vector &outputs) { auto input = inputs[0]; auto output = outputs[0]; MNN_ASSERT(input->dimensions() == 5); - + const int kernelDepth = mKernels[0], kernelHeight = mKernels[1], kernelWidth = mKernels[2]; const int strideDepth = mStrides[0], strideHeight = mStrides[1], strideWidth = mStrides[2]; const int outputDepth = output->length(2), outputHeight = output->length(3), outputWidth = output->length(4); @@ -473,7 +473,7 @@ ErrorCode CPUPool3D::onExecute(const std::vector &inputs, const std::v const int channel = input->length(1), batch = input->length(0); const int padDepth = mPads[0], padHeight = mPads[1], padWidth = mPads[2]; const int threadNumber = ((CPUBackend*)backend())->threadNumber(); - + { auto planeFunction = poolingMax; if (mType == PoolType_AVEPOOL) { @@ -484,7 +484,7 @@ ErrorCode CPUPool3D::onExecute(const std::vector &inputs, const std::v auto inputPlaneStride = 4 * inputHeight * inputWidth; auto outputPlaneStride = 4 * outputHeight * outputWidth; auto padType = mPadType; - + auto planeFunc = [=](int tId) { for (int o = tId; o < batch * UP_DIV(channel, 4) * inputDepth; o += threadNumber) { planeFunction(srcData + o * inputPlaneStride, inputWidth, inputHeight, @@ -497,7 +497,7 @@ ErrorCode CPUPool3D::onExecute(const std::vector &inputs, const std::v } MNN_CONCURRENCY_END(); } - + if (mTempStorage.get() != nullptr) { using InnerFuncType = std::function; InnerFuncType innerFunc = [=](float* dst, const float* src, int step, int kernel) { @@ -507,7 +507,7 @@ ErrorCode CPUPool3D::onExecute(const std::vector &inputs, const std::v } Vec4::save(dst, max); }; - + if (mType == PoolType_AVEPOOL) { innerFunc = [=](float* dst, const float* src, int step, int kernel) { Vec4 sum = Vec4::load(src); @@ -517,10 +517,10 @@ ErrorCode CPUPool3D::onExecute(const std::vector &inputs, const std::v Vec4::save(dst, sum * ((float)1 / kernel)); }; } - + const float* srcData = mTempStorage->host(); float* dstData = output->host(); - + auto reduceDepthFunc = [=, &innerFunc](int tId) { const int outputPlaneStride = outputHeight * outputWidth * 4; for (int o = tId; o < batch * UP_DIV(channel, 4); o += threadNumber) { @@ -528,8 +528,9 @@ ErrorCode CPUPool3D::onExecute(const std::vector &inputs, const std::v auto dstZData = dstData + o * outputDepth * outputPlaneStride; for (int i = 0; i < outputHeight * outputWidth; ++i) { for (int d = 0; d < outputDepth; ++d) { - int dSrc = ALIMAX(d * strideDepth - padDepth, 0); - int kernel = ALIMIN(dSrc + kernelDepth, inputDepth) - dSrc; + int dRawSrc = d * strideDepth - padDepth; + int dSrc = ALIMAX(dRawSrc, 0); + int kernel = ALIMIN(dRawSrc + kernelDepth, inputDepth) - dSrc; if (kernel == 0) { Vec4::save(dstZData + d * outputPlaneStride + i * 4, Vec4((float)0)); continue; @@ -545,10 +546,10 @@ ErrorCode CPUPool3D::onExecute(const std::vector &inputs, const std::v } MNN_CONCURRENCY_END(); } - + return NO_ERROR; } - + class CPUPool3DCreator : public CPUBackend::Creator { public: virtual Execution *onCreate(const std::vector &inputs, const std::vector &outputs, diff --git a/source/backend/cpu/CPUPool.hpp b/source/backend/cpu/CPUPool.hpp index 0101deff9..9d49f0361 100644 --- a/source/backend/cpu/CPUPool.hpp +++ b/source/backend/cpu/CPUPool.hpp @@ -9,7 +9,7 @@ #ifndef CPUPool_hpp #define CPUPool_hpp -#include "CPUBackend.hpp" +#include "backend/cpu/CPUBackend.hpp" namespace MNN { class CPUPool : public Execution { diff --git a/source/backend/cpu/CPUPoolGrad.cpp b/source/backend/cpu/CPUPoolGrad.cpp index 0c27387d9..1dc4ed987 100644 --- a/source/backend/cpu/CPUPoolGrad.cpp +++ b/source/backend/cpu/CPUPoolGrad.cpp @@ -6,9 +6,9 @@ // Copyright © 2019 Alibaba. All rights reserved. // -#include "CPUPoolGrad.hpp" -#include "Macro.h" -#include "Vec4.hpp" +#include "backend/cpu/CPUPoolGrad.hpp" +#include "core/Macro.h" +#include "math/Vec4.hpp" namespace MNN { using namespace Math; class CPUMaxPoolGrad : public CPUCommonPoolGrad { @@ -20,14 +20,14 @@ class CPUMaxPoolGrad : public CPUCommonPoolGrad { auto outputOrigin = inputs[1]; auto inputDiff = inputs[2]; auto outputDiff = outputs[0]; - + auto ow = inputDiff->width(); auto oh = inputDiff->height(); auto iw = origin->width(); auto ih = origin->height(); - + //MNN_PRINT("%d, %d, %d, %d\n", origin->width(), outputOrigin->width(), inputDiff->width(), outputDiff->width()); - + auto channelC4 = UP_DIV(inputDiff->channel(), 4); auto batch = inputDiff->batch(); for (int batchIndex = 0; batchIndex < batch; ++batchIndex) { @@ -40,12 +40,13 @@ class CPUMaxPoolGrad : public CPUCommonPoolGrad { auto inputZ1 = input1Ptr + z * ow * oh * 4; auto outputOriZ = outputOriginPtr + z * ow * oh * 4; auto outputZ = outputPtr + z * iw * ih * 4; - + ::memset(outputZ, 0, sizeof(float) * iw * ih * 4); for (int y = 0; y < oh; ++y) { for (int x = 0; x < ow; ++x) { Vec4 maxValue = Vec4::load(outputOriZ + 4 * (x + y * ow)); Vec4 diffValue = Vec4::load(inputZ1 + 4 * (x + y * ow)); + bool unfinished[4] = {true, true, true, true}; for (int ky = 0; ky < mKernelY; ++ky) { auto sy = y * mStrideY + ky; if (sy < 0 || sy >= ih) { @@ -58,10 +59,10 @@ class CPUMaxPoolGrad : public CPUCommonPoolGrad { } Vec4 originValue = Vec4::load(inputZ0 + 4 * (sx + sy * iw)); auto dst = outputZ + 4 * (sx + sy * iw); - Vec4::save(dst, Vec4(0)); for (int j = 0; j < 4; ++j) { - if (originValue[j] >= maxValue[j]) { - dst[j] = diffValue[j]; + if (unfinished[j] && originValue[j] >= maxValue[j]) { + unfinished[j] = false; + dst[j] = dst[j] + diffValue[j]; } } } @@ -73,21 +74,21 @@ class CPUMaxPoolGrad : public CPUCommonPoolGrad { return NO_ERROR; } }; - + class CPUAvgPoolGrad : public CPUCommonPoolGrad { public: CPUAvgPoolGrad(Backend *b, const Pool *parameter) : CPUCommonPoolGrad(b, parameter) {} - + virtual ErrorCode onExecute(const std::vector &inputs, const std::vector &outputs) override { auto origin = inputs[0]; auto inputDiff = inputs[2]; auto outputDiff = outputs[0]; - + auto ow = inputDiff->width(); auto oh = inputDiff->height(); auto iw = origin->width(); auto ih = origin->height(); - + auto channelC4 = UP_DIV(inputDiff->channel(), 4); auto batch = inputDiff->batch(); auto factor = Vec4(1.0f/((float)mKernelY*mKernelX)); @@ -97,7 +98,7 @@ class CPUAvgPoolGrad : public CPUCommonPoolGrad { for (int z = 0; z < channelC4; ++z) { auto inputZ1 = input1Ptr + z * ow * oh * 4; auto outputZ = outputPtr + z * iw * ih * 4; - + ::memset(outputZ, 0, sizeof(float) * iw * ih * 4); for (int y = 0; y < oh; ++y) { for (int x = 0; x < ow; ++x) { @@ -113,7 +114,7 @@ class CPUAvgPoolGrad : public CPUCommonPoolGrad { continue; } auto dst = outputZ + 4 * (sx + sy * iw); - Vec4::save(dst, diffValue); + Vec4::save(dst, Vec4::load(dst) + diffValue); } } } @@ -123,7 +124,7 @@ class CPUAvgPoolGrad : public CPUCommonPoolGrad { return NO_ERROR; } }; - + class CPUPoolGradCreator : public CPUBackend::Creator { public: virtual Execution *onCreate(const std::vector &inputs, const std::vector &outputs, diff --git a/source/backend/cpu/CPUPoolGrad.hpp b/source/backend/cpu/CPUPoolGrad.hpp index 0a5dc7ba7..2f01f8775 100644 --- a/source/backend/cpu/CPUPoolGrad.hpp +++ b/source/backend/cpu/CPUPoolGrad.hpp @@ -9,7 +9,7 @@ #ifndef CPUPoolGrad_hpp #define CPUPoolGrad_hpp -#include "CPUBackend.hpp" +#include "backend/cpu/CPUBackend.hpp" namespace MNN { class CPUCommonPoolGrad : public Execution { diff --git a/source/backend/cpu/CPUPoolInt8.cpp b/source/backend/cpu/CPUPoolInt8.cpp index fefd8d66c..f5d67f7dd 100644 --- a/source/backend/cpu/CPUPoolInt8.cpp +++ b/source/backend/cpu/CPUPoolInt8.cpp @@ -6,13 +6,13 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUPoolInt8.hpp" -#include "Macro.h" +#include "backend/cpu/CPUPoolInt8.hpp" +#include "core/Macro.h" #ifdef MNN_USE_NEON #include #endif -#include "Concurrency.h" +#include "core/Concurrency.h" #define UNIT 4 diff --git a/source/backend/cpu/CPUPoolInt8.hpp b/source/backend/cpu/CPUPoolInt8.hpp index 3b6a2c70e..656cc4127 100644 --- a/source/backend/cpu/CPUPoolInt8.hpp +++ b/source/backend/cpu/CPUPoolInt8.hpp @@ -9,7 +9,7 @@ #ifndef CPUPoolInt8_hpp #define CPUPoolInt8_hpp -#include "CPUBackend.hpp" +#include "backend/cpu/CPUBackend.hpp" namespace MNN { diff --git a/source/backend/cpu/CPUPriorbox.cpp b/source/backend/cpu/CPUPriorbox.cpp index 6d42c8320..bf2bdddc1 100644 --- a/source/backend/cpu/CPUPriorbox.cpp +++ b/source/backend/cpu/CPUPriorbox.cpp @@ -6,12 +6,12 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUPriorbox.hpp" +#include "backend/cpu/CPUPriorbox.hpp" #include -#include "AutoStorage.h" -#include "CPUBackend.hpp" -#include "CommonOptFunction.h" -#include "TensorUtils.hpp" +#include "core/AutoStorage.h" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/TensorUtils.hpp" namespace MNN { diff --git a/source/backend/cpu/CPUPriorbox.hpp b/source/backend/cpu/CPUPriorbox.hpp index 76fa360ea..a6af14a20 100644 --- a/source/backend/cpu/CPUPriorbox.hpp +++ b/source/backend/cpu/CPUPriorbox.hpp @@ -9,7 +9,7 @@ #ifndef CPUPriorbox_hpp #define CPUPriorbox_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" #include "MNN_generated.h" namespace MNN { diff --git a/source/backend/cpu/CPUProposal.cpp b/source/backend/cpu/CPUProposal.cpp index 0f2b098ed..b038545d2 100644 --- a/source/backend/cpu/CPUProposal.cpp +++ b/source/backend/cpu/CPUProposal.cpp @@ -6,13 +6,13 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUProposal.hpp" +#include "backend/cpu/CPUProposal.hpp" #include -#include "CPUBackend.hpp" -#include "CommonOptFunction.h" -#include "Concurrency.h" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Concurrency.h" //#define MNN_OPEN_TIME_TRACE -#include "AutoTime.hpp" +#include namespace MNN { CPUProposal::CPUProposal(Backend *backend, const Proposal *proposal) : Execution(backend), mProposal(proposal) { diff --git a/source/backend/cpu/CPUProposal.hpp b/source/backend/cpu/CPUProposal.hpp index aa8731d95..a1e1b67f5 100644 --- a/source/backend/cpu/CPUProposal.hpp +++ b/source/backend/cpu/CPUProposal.hpp @@ -10,8 +10,8 @@ #define CPUProposal_hpp #include -#include "AutoStorage.h" -#include "Execution.hpp" +#include "core/AutoStorage.h" +#include "core/Execution.hpp" #include "MNN_generated.h" namespace MNN { diff --git a/source/backend/cpu/CPUQuanConvolutionDepthwise.cpp b/source/backend/cpu/CPUQuanConvolutionDepthwise.cpp index 3fde9f4df..a92fa9653 100644 --- a/source/backend/cpu/CPUQuanConvolutionDepthwise.cpp +++ b/source/backend/cpu/CPUQuanConvolutionDepthwise.cpp @@ -6,14 +6,14 @@ // Copyright © 2018, Alibaba Group Holding Limited // #ifdef MNN_SUPPORT_TFLITE_QUAN -#include "CPUQuanConvolutionDepthwise.hpp" -#include "CPUBackend.hpp" -#include "CPUFixedPoint.hpp" -#include "CPUQuantizationUtils.hpp" -#include "CommonOptFunction.h" -#include "Concurrency.h" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/cpu/CPUQuanConvolutionDepthwise.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/CPUFixedPoint.hpp" +#include "backend/cpu/CPUQuantizationUtils.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Concurrency.h" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" #define UNIT 4 extern "C" { @@ -133,7 +133,7 @@ inline int ComputePadding(int stride, int dilationRate, int inSize, int filterSi int padding = ((outSize - 1) * stride + effectiveFilterSize - inSize) / 2; return padding > 0 ? padding : 0; } - + ErrorCode CPUQuanConvolutionDepthwise::onResize(const std::vector& inputs, const std::vector& outputs) { auto input = inputs[0]; @@ -189,16 +189,16 @@ ErrorCode CPUQuanConvolutionDepthwise::onResize(const std::vector& inpu mDilateX = mLayerParam->common()->dilateX(); mDilateY = mLayerParam->common()->dilateY(); mZeroPoint = mLayerParam->inputQuantizedParam()->zeroPoint(); - + const int outputWidth = outputs[0]->width(); const int outputHeight = outputs[0]->height(); - + int filterHeight = (int)mConstParameter->kh; int filterWidth = (int)mConstParameter->kw; - + mPaddingHeight = ComputePadding(mStrideH, 1, inputHeight, filterHeight, outputHeight); mPaddingWidth = ComputePadding(mStrideW, 1, inputWidth, filterWidth, outputWidth); - + // Compute Mid Rect ml = 0; mt = 0; mr = outputWidth; mb = outputHeight; for (; ml * mStrideW - mPaddingWidth < 0; ml++) { @@ -213,11 +213,11 @@ ErrorCode CPUQuanConvolutionDepthwise::onResize(const std::vector& inpu for (; (mb - 1) * mStrideH - mPaddingHeight + filterHeight * mDilateY > inputHeight && mb > mt; mb--) { // do nothing } - + mDstYStep = outputWidth * UNIT; mSrcYStep = inputWidth * UNIT; mWeightZStep = filterHeight * filterWidth * UNIT; - + return NO_ERROR; } diff --git a/source/backend/cpu/CPUQuanConvolutionDepthwise.hpp b/source/backend/cpu/CPUQuanConvolutionDepthwise.hpp index b5651701b..a8eadd3aa 100644 --- a/source/backend/cpu/CPUQuanConvolutionDepthwise.hpp +++ b/source/backend/cpu/CPUQuanConvolutionDepthwise.hpp @@ -9,8 +9,8 @@ #ifndef CPUDepthwise_hpp #define CPUDepthwise_hpp -#include "AutoStorage.h" -#include "Execution.hpp" +#include "core/AutoStorage.h" +#include "core/Execution.hpp" #include "TFQuantizeOp_generated.h" namespace MNN { diff --git a/source/backend/cpu/CPUQuantizedAdd.cpp b/source/backend/cpu/CPUQuantizedAdd.cpp index cdca1e14a..25369db65 100644 --- a/source/backend/cpu/CPUQuantizedAdd.cpp +++ b/source/backend/cpu/CPUQuantizedAdd.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // #ifdef MNN_SUPPORT_TFLITE_QUAN -#include "CPUQuantizedAdd.hpp" -#include "CPUBackend.hpp" -#include "CPUQuantizationUtils.hpp" -#include "Concurrency.h" -#include "Macro.h" +#include "backend/cpu/CPUQuantizedAdd.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/CPUQuantizationUtils.hpp" +#include "core/Concurrency.h" +#include "core/Macro.h" namespace MNN { @@ -37,28 +37,28 @@ ErrorCode CPUQuantizedAdd::onResize(const std::vector &inputs, const s CalculateActivationRangeUint8( mQuantizedAddParam->activationType(), mQuantizedAddParam->outputQuantizedParam()->zeroPoint(), mQuantizedAddParam->outputQuantizedParam()->scale(), &mOutputActivationMin, &mOutputActivationMax); - + int kReverseShiftResult1 = -mInput1Shift; int kReverseShiftResult2 = -mInput2Shift; - + int leftShift1 = kReverseShiftResult1 > 0 ? kReverseShiftResult1 : 0; mRightShift1 = kReverseShiftResult1 > 0 ? 0 : -kReverseShiftResult1; - + int leftShift2 = kReverseShiftResult2 > 0 ? kReverseShiftResult2 : 0; mRightShift2 = kReverseShiftResult2 > 0 ? 0 : -kReverseShiftResult2; - + mLeftShiftOut = -mOutputShift > 0 ? -mOutputShift : 0; mRightShiftOut = -mOutputShift > 0 ? 0 : mOutputShift; - + mLeftShiftResult1 = (1 << leftShift) * ((1 << leftShift1)); mLeftShiftResult2 = (1 << leftShift) * ((1 << leftShift2)); - + const int left1 = leftShift + leftShift1; const int left2 = leftShift + leftShift2; - + MNN_ASSERT(left1 == leftShift); MNN_ASSERT(left2 == leftShift); - + #ifdef MNN_USE_NEON input1OffsetVec = vdupq_n_s16(mInput1Offset); input2OffsetVec = vdupq_n_s16(mInput2Offset); @@ -74,7 +74,7 @@ ErrorCode CPUQuantizedAdd::onResize(const std::vector &inputs, const s rightShift1Vec = vdupq_n_s32(-mRightShift1); rightShift2Vec = vdupq_n_s32(-mRightShift2); #endif - + return NO_ERROR; } @@ -89,7 +89,7 @@ ErrorCode CPUQuantizedAdd::onExecute(const std::vector &inputs, int size = inputs[0]->batch()*inputs[0]->height()*inputs[0]->width()*ROUND_UP(outputChannels, 4); int threadNumber = std::max(((CPUBackend *)backend())->threadNumber(), 1); int countUnit = UP_DIV(size, threadNumber); - + MNN_CONCURRENCY_BEGIN(tId, threadNumber) { int realDstCount = (int)ALIMIN(size - tId * countUnit, countUnit); uint8_t *curInput1Data = input1Data + tId * countUnit; diff --git a/source/backend/cpu/CPUQuantizedAdd.hpp b/source/backend/cpu/CPUQuantizedAdd.hpp index 68c254b6e..b77d46f09 100644 --- a/source/backend/cpu/CPUQuantizedAdd.hpp +++ b/source/backend/cpu/CPUQuantizedAdd.hpp @@ -9,7 +9,7 @@ #ifndef CPUQuantizedAdd_hpp #define CPUQuantizedAdd_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" #include "TFQuantizeOp_generated.h" // have to include after Marco.h diff --git a/source/backend/cpu/CPUQuantizedAvgPool.cpp b/source/backend/cpu/CPUQuantizedAvgPool.cpp index 5cc6795f6..5a189763c 100644 --- a/source/backend/cpu/CPUQuantizedAvgPool.cpp +++ b/source/backend/cpu/CPUQuantizedAvgPool.cpp @@ -6,12 +6,12 @@ // Copyright © 2018, Alibaba Group Holding Limited // #ifdef MNN_SUPPORT_TFLITE_QUAN -#include "CPUQuantizedAvgPool.hpp" -#include "CPUBackend.hpp" -#include "CPUQuantizationUtils.hpp" -#include "CommonOptFunction.h" -#include "Macro.h" -#include "OptimizedComputer.hpp" +#include "backend/cpu/CPUQuantizedAvgPool.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/CPUQuantizationUtils.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Macro.h" +#include "backend/cpu/compute/OptimizedComputer.hpp" namespace MNN { @@ -29,26 +29,26 @@ CPUQuantizedAvgPool::CPUQuantizedAvgPool(Backend *backend, const Op *CPUQuantize mOutputActivationMax = CPUQuantizedAvgPool->outputActivationMax(); } - + ErrorCode CPUQuantizedAvgPool::onResize(const std::vector& inputs, const std::vector& outputs) { - + auto input = inputs[0]; auto output = outputs[0]; - + MNN_ASSERT(input->buffer().dimensions == 4); - + int32_t inBatch = input->buffer().dim[0].extent; int32_t inRows = input->buffer().dim[2].extent; int32_t inCols = input->buffer().dim[3].extent; int32_t inChannel = input->buffer().dim[1].extent; - + const int32_t windowRows = mKernelHeight; const int32_t windowCols = mKernelWidth; const int32_t rowStride = mStrideHeight; const int32_t colStride = mStrideWidth; int32_t outHeight = output->buffer().dim[2].extent; int32_t outWidth = output->buffer().dim[3].extent; - + switch (mPadMode) { case PoolPadType_CAFFE: MNN_ASSERT(false); @@ -63,20 +63,20 @@ ErrorCode CPUQuantizedAvgPool::onResize(const std::vector& inputs, cons mPadHeight = heightNeeded > 0 ? heightNeeded / 2 : 0; break; } - + mInputDims = {inBatch, inRows, inCols, inChannel}; mOutputDims = {output->batch(), output->height(), output->width(), output->channel()}; - + return NO_ERROR; } - + ErrorCode CPUQuantizedAvgPool::onExecute(const std::vector &inputs, const std::vector &outputs) { - - + + uint8_t *inputPtr = inputs[0]->host(); uint8_t *outputPtr = outputs[0]->host(); - + Optimized::AveragePool(inputPtr, mInputDims, mStrideWidth, mStrideHeight, mPadWidth, mPadHeight, mKernelWidth, mKernelHeight, mOutputActivationMin, mOutputActivationMax, outputPtr, mOutputDims); diff --git a/source/backend/cpu/CPUQuantizedAvgPool.hpp b/source/backend/cpu/CPUQuantizedAvgPool.hpp index c7588aa98..36a19b3a1 100644 --- a/source/backend/cpu/CPUQuantizedAvgPool.hpp +++ b/source/backend/cpu/CPUQuantizedAvgPool.hpp @@ -9,7 +9,7 @@ #ifndef CPUQuantizedAvgPool_HPP #define CPUQuantizedAvgPool_HPP -#include "Execution.hpp" +#include "core/Execution.hpp" #include "MNN_generated.h" namespace MNN { diff --git a/source/backend/cpu/CPUQuantizedConcat.cpp b/source/backend/cpu/CPUQuantizedConcat.cpp index 95832d800..7e10e40f9 100644 --- a/source/backend/cpu/CPUQuantizedConcat.cpp +++ b/source/backend/cpu/CPUQuantizedConcat.cpp @@ -6,12 +6,12 @@ // Copyright © 2018, Alibaba Group Holding Limited // #ifdef MNN_SUPPORT_TFLITE_QUAN -#include "CPUQuantizedConcat.hpp" -#include "CPUBackend.hpp" -#include "CPUFixedPoint.hpp" -#include "CPUQuantizationUtils.hpp" -#include "Macro.h" -#include "OptimizedComputer.hpp" +#include "backend/cpu/CPUQuantizedConcat.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/CPUFixedPoint.hpp" +#include "backend/cpu/CPUQuantizationUtils.hpp" +#include "core/Macro.h" +#include "backend/cpu/compute/OptimizedComputer.hpp" namespace MNN { diff --git a/source/backend/cpu/CPUQuantizedConcat.hpp b/source/backend/cpu/CPUQuantizedConcat.hpp index 38db5b06e..74979f82c 100644 --- a/source/backend/cpu/CPUQuantizedConcat.hpp +++ b/source/backend/cpu/CPUQuantizedConcat.hpp @@ -9,7 +9,7 @@ #ifndef CPUQuantizedConcat_hpp #define CPUQuantizedConcat_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { diff --git a/source/backend/cpu/CPUQuantizedLogistic.cpp b/source/backend/cpu/CPUQuantizedLogistic.cpp index bc8fdbfb7..ac03c5559 100644 --- a/source/backend/cpu/CPUQuantizedLogistic.cpp +++ b/source/backend/cpu/CPUQuantizedLogistic.cpp @@ -6,12 +6,12 @@ // Copyright © 2018, Alibaba Group Holding Limited // #ifdef MNN_SUPPORT_TFLITE_QUAN -#include "CPUQuantizedLogistic.hpp" -#include "CPUBackend.hpp" -#include "CPUFixedPoint.hpp" -#include "CPUQuantizationUtils.hpp" -#include "Macro.h" -#include "OptimizedComputer.hpp" +#include "backend/cpu/CPUQuantizedLogistic.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/CPUFixedPoint.hpp" +#include "backend/cpu/CPUQuantizationUtils.hpp" +#include "core/Macro.h" +#include "backend/cpu/compute/OptimizedComputer.hpp" namespace MNN { diff --git a/source/backend/cpu/CPUQuantizedLogistic.hpp b/source/backend/cpu/CPUQuantizedLogistic.hpp index 0cf3d6946..3b5fd52a6 100644 --- a/source/backend/cpu/CPUQuantizedLogistic.hpp +++ b/source/backend/cpu/CPUQuantizedLogistic.hpp @@ -9,7 +9,7 @@ #ifndef CPUQuantizedLogistic_hpp #define CPUQuantizedLogistic_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" #include "TFQuantizeOp_generated.h" namespace MNN { diff --git a/source/backend/cpu/CPUQuantizedMaxPool.cpp b/source/backend/cpu/CPUQuantizedMaxPool.cpp index a8b4e04eb..d1b9679f0 100644 --- a/source/backend/cpu/CPUQuantizedMaxPool.cpp +++ b/source/backend/cpu/CPUQuantizedMaxPool.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // #ifdef MNN_SUPPORT_TFLITE_QUAN -#include "CPUQuantizedMaxPool.hpp" -#include "CPUBackend.hpp" -#include "CPUQuantizationUtils.hpp" -#include "CommonOptFunction.h" -#include "Macro.h" +#include "backend/cpu/CPUQuantizedMaxPool.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/CPUQuantizationUtils.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Macro.h" namespace MNN { diff --git a/source/backend/cpu/CPUQuantizedMaxPool.hpp b/source/backend/cpu/CPUQuantizedMaxPool.hpp index aa3db5788..b96c74c32 100644 --- a/source/backend/cpu/CPUQuantizedMaxPool.hpp +++ b/source/backend/cpu/CPUQuantizedMaxPool.hpp @@ -9,7 +9,7 @@ #ifndef CPUQUANTIZEDMAXPOOL_HPP #define CPUQUANTIZEDMAXPOOL_HPP -#include "Execution.hpp" +#include "core/Execution.hpp" #include "MNN_generated.h" namespace MNN { diff --git a/source/backend/cpu/CPUQuantizedReshape.cpp b/source/backend/cpu/CPUQuantizedReshape.cpp index a23f4c922..90d2ae179 100644 --- a/source/backend/cpu/CPUQuantizedReshape.cpp +++ b/source/backend/cpu/CPUQuantizedReshape.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // #ifdef MNN_SUPPORT_TFLITE_QUAN -#include "CPUQuantizedReshape.hpp" -#include "CommonOptFunction.h" -#include "Macro.h" +#include "backend/cpu/CPUQuantizedReshape.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Macro.h" namespace MNN { diff --git a/source/backend/cpu/CPUQuantizedReshape.hpp b/source/backend/cpu/CPUQuantizedReshape.hpp index 9bfcdebba..208006bdd 100644 --- a/source/backend/cpu/CPUQuantizedReshape.hpp +++ b/source/backend/cpu/CPUQuantizedReshape.hpp @@ -9,7 +9,7 @@ #ifndef CPUQuantizedReshape_hpp #define CPUQuantizedReshape_hpp -#include "CPUBackend.hpp" +#include "backend/cpu/CPUBackend.hpp" namespace MNN { class CPUQuantizedReshape : public Execution { diff --git a/source/backend/cpu/CPUQuantizedSoftmax.cpp b/source/backend/cpu/CPUQuantizedSoftmax.cpp index 1b9657b02..a2d41e641 100644 --- a/source/backend/cpu/CPUQuantizedSoftmax.cpp +++ b/source/backend/cpu/CPUQuantizedSoftmax.cpp @@ -9,11 +9,11 @@ #if defined(_MSC_VER) #include #endif -#include "CPUQuantizedSoftmax.hpp" -#include "CPUBackend.hpp" -#include "CPUFixedPoint.hpp" -#include "CPUQuantizationUtils.hpp" -#include "Macro.h" +#include "backend/cpu/CPUQuantizedSoftmax.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/CPUFixedPoint.hpp" +#include "backend/cpu/CPUQuantizationUtils.hpp" +#include "core/Macro.h" namespace MNN { @@ -33,12 +33,12 @@ ErrorCode CPUQuantizedSoftmax::onResize(const std::vector& inputs, c float scale = mInputScale; PreprocessSoftmaxScaling(beta, scale, kScaledDiffIntegerBits, &mInputMultiplier, &mInputLeftShift); mDiffMin = -1.0 * CalculateInputRadius(kScaledDiffIntegerBits, mInputLeftShift); - + Tensor* input = inputs[0]; Tensor* output = outputs[0]; - + MNN_ASSERT(2 == input->buffer().dimensions || 4 == input->buffer().dimensions); - + mInputDims.clear(); mOutputDims.clear(); if (4 == input->buffer().dimensions) { @@ -53,13 +53,13 @@ ErrorCode CPUQuantizedSoftmax::onResize(const std::vector& inputs, c mInputDims.push_back(1); mInputDims.push_back(1); mInputDims.push_back(input->buffer().dim[1].extent); - + mOutputDims.push_back(input->buffer().dim[0].extent); mOutputDims.push_back(1); mOutputDims.push_back(1); mOutputDims.push_back(input->buffer().dim[1].extent); } - + return NO_ERROR; } diff --git a/source/backend/cpu/CPUQuantizedSoftmax.hpp b/source/backend/cpu/CPUQuantizedSoftmax.hpp index ab7bc5db7..82b63f9c4 100644 --- a/source/backend/cpu/CPUQuantizedSoftmax.hpp +++ b/source/backend/cpu/CPUQuantizedSoftmax.hpp @@ -9,7 +9,7 @@ #ifndef CPUQuantizedSoftmax_hpp #define CPUQuantizedSoftmax_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { diff --git a/source/backend/cpu/CPURNNSequenceGRU.cpp b/source/backend/cpu/CPURNNSequenceGRU.cpp index 97e6afa75..339eb4ed6 100644 --- a/source/backend/cpu/CPURNNSequenceGRU.cpp +++ b/source/backend/cpu/CPURNNSequenceGRU.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPURNNSequenceGRU.hpp" +#include "backend/cpu/CPURNNSequenceGRU.hpp" #include -#include "CPUBackend.hpp" -#include "ConvOpt.h" -#include "Matrix.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/compute/ConvOpt.h" +#include "math/Matrix.hpp" namespace MNN { diff --git a/source/backend/cpu/CPURNNSequenceGRU.hpp b/source/backend/cpu/CPURNNSequenceGRU.hpp index d9d20826a..425ac4995 100644 --- a/source/backend/cpu/CPURNNSequenceGRU.hpp +++ b/source/backend/cpu/CPURNNSequenceGRU.hpp @@ -9,7 +9,7 @@ #ifndef CPURNNSequenceGRU_hpp #define CPURNNSequenceGRU_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { diff --git a/source/backend/cpu/CPUROIPooling.cpp b/source/backend/cpu/CPUROIPooling.cpp index 753040503..28db6f1c5 100644 --- a/source/backend/cpu/CPUROIPooling.cpp +++ b/source/backend/cpu/CPUROIPooling.cpp @@ -6,13 +6,13 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUROIPooling.hpp" +#include "backend/cpu/CPUROIPooling.hpp" #include #include -#include "CPUBackend.hpp" -#include "CommonOptFunction.h" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" #ifdef MNN_USE_NEON #include diff --git a/source/backend/cpu/CPUROIPooling.hpp b/source/backend/cpu/CPUROIPooling.hpp index c4e22b2b0..9696cbcd2 100644 --- a/source/backend/cpu/CPUROIPooling.hpp +++ b/source/backend/cpu/CPUROIPooling.hpp @@ -9,7 +9,7 @@ #ifndef CPUROIPooling_hpp #define CPUROIPooling_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { diff --git a/source/backend/cpu/CPURange.cpp b/source/backend/cpu/CPURange.cpp index 049cff749..cf888552c 100644 --- a/source/backend/cpu/CPURange.cpp +++ b/source/backend/cpu/CPURange.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPURange.hpp" -#include "CPUBackend.hpp" -#include "Macro.h" +#include "backend/cpu/CPURange.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "core/Macro.h" namespace MNN { diff --git a/source/backend/cpu/CPURange.hpp b/source/backend/cpu/CPURange.hpp index 27a5106f7..66063809c 100644 --- a/source/backend/cpu/CPURange.hpp +++ b/source/backend/cpu/CPURange.hpp @@ -9,7 +9,7 @@ #ifndef CPURange_hpp #define CPURange_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { template diff --git a/source/backend/cpu/CPURank.cpp b/source/backend/cpu/CPURank.cpp index 758bbfd46..9f28bcd5c 100644 --- a/source/backend/cpu/CPURank.cpp +++ b/source/backend/cpu/CPURank.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPURank.hpp" -#include "CPUBackend.hpp" +#include "backend/cpu/CPURank.hpp" +#include "backend/cpu/CPUBackend.hpp" namespace MNN { diff --git a/source/backend/cpu/CPURank.hpp b/source/backend/cpu/CPURank.hpp index 51c8eb56c..6289ac74d 100644 --- a/source/backend/cpu/CPURank.hpp +++ b/source/backend/cpu/CPURank.hpp @@ -9,7 +9,7 @@ #ifndef CPURank_hpp #define CPURank_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { class CPURank : public Execution { diff --git a/source/backend/cpu/CPUReduceJoin.cpp b/source/backend/cpu/CPUReduceJoin.cpp index 79f194103..a9777b287 100644 --- a/source/backend/cpu/CPUReduceJoin.cpp +++ b/source/backend/cpu/CPUReduceJoin.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUReduceJoin.hpp" -#include "AutoStorage.h" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/cpu/CPUReduceJoin.hpp" +#include "core/AutoStorage.h" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { diff --git a/source/backend/cpu/CPUReduceJoin.hpp b/source/backend/cpu/CPUReduceJoin.hpp index e8a352d71..f220f5b0b 100644 --- a/source/backend/cpu/CPUReduceJoin.hpp +++ b/source/backend/cpu/CPUReduceJoin.hpp @@ -9,7 +9,7 @@ #ifndef CPUReduceJoin_hpp #define CPUReduceJoin_hpp -#include "CPUBackend.hpp" +#include "backend/cpu/CPUBackend.hpp" namespace MNN { class CPUReduceJoinCreator : public CPUBackend::Creator { diff --git a/source/backend/cpu/CPUReduction.cpp b/source/backend/cpu/CPUReduction.cpp index 1e2f37b04..6e5a51741 100644 --- a/source/backend/cpu/CPUReduction.cpp +++ b/source/backend/cpu/CPUReduction.cpp @@ -6,9 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUReduction.hpp" -#include "CommonOptFunction.h" -#include "Macro.h" +#include "backend/cpu/CPUReduction.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Macro.h" +#include #define UNIT 4 #define UNIT_DUP(value) \ @@ -377,7 +378,7 @@ class AnyReduce : public Reduction { virtual void onReduce(const float* src, float* dst, int inside, int outside, int axisSize) const override { MNN_ASSERT(false); } - + virtual void onReduce(const int32_t* src, int32_t* dst, int inside, int outside, int axisSize) const override { for (int oi = 0; oi < outside; ++oi) { auto srcOutSide = src + oi * axisSize * inside; @@ -408,7 +409,7 @@ class AllReduce : public Reduction { virtual void onReduce(const float* src, float* dst, int inside, int outside, int axisSize) const override { MNN_ASSERT(false); } - + virtual void onReduce(const int32_t* src, int32_t* dst, int inside, int outside, int axisSize) const override { for (int oi = 0; oi < outside; ++oi) { auto srcOutSide = src + oi * axisSize * inside; diff --git a/source/backend/cpu/CPUReduction.hpp b/source/backend/cpu/CPUReduction.hpp index fe9dacb4c..c45b937f3 100644 --- a/source/backend/cpu/CPUReduction.hpp +++ b/source/backend/cpu/CPUReduction.hpp @@ -9,7 +9,7 @@ #ifndef CPUReduction_hpp #define CPUReduction_hpp -#include "CPUBackend.hpp" +#include "backend/cpu/CPUBackend.hpp" namespace MNN { class CPUReductionCreator : public CPUBackend::Creator { diff --git a/source/backend/cpu/CPURelu.cpp b/source/backend/cpu/CPURelu.cpp index 1cc2def36..82932496d 100644 --- a/source/backend/cpu/CPURelu.cpp +++ b/source/backend/cpu/CPURelu.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPURelu.hpp" -#include "CPUBackend.hpp" -#include "CommonOptFunction.h" -#include "Macro.h" +#include "backend/cpu/CPURelu.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Macro.h" namespace MNN { ErrorCode CPURelu::onExecute(const std::vector& inputs, const std::vector& outputs) { diff --git a/source/backend/cpu/CPURelu.hpp b/source/backend/cpu/CPURelu.hpp index a6f22ac78..1b1f51ec2 100644 --- a/source/backend/cpu/CPURelu.hpp +++ b/source/backend/cpu/CPURelu.hpp @@ -9,8 +9,8 @@ #ifndef CPURelu_hpp #define CPURelu_hpp -#include "AutoStorage.h" -#include "Execution.hpp" +#include "core/AutoStorage.h" +#include "core/Execution.hpp" namespace MNN { class CPURelu : public Execution { diff --git a/source/backend/cpu/CPUReluGrad.cpp b/source/backend/cpu/CPUReluGrad.cpp index 3dd6d776e..d4d88bafb 100644 --- a/source/backend/cpu/CPUReluGrad.cpp +++ b/source/backend/cpu/CPUReluGrad.cpp @@ -6,7 +6,7 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUReluGrad.hpp" +#include "backend/cpu/CPUReluGrad.hpp" namespace MNN { ErrorCode CPUReluGrad::onExecute(const std::vector& inputs, const std::vector& outputs) { MNN_ASSERT(0 == mSlope); @@ -39,11 +39,11 @@ class CPURelu6Grad : public Execution { auto reluDiff = inputs[1]; auto outputDiff = outputs[0]; auto size = outputDiff->elementSize(); - + auto reluOriginPtr = reluOrigin->host(); auto reluDiffPtr = reluDiff->host(); auto outputDiffPtr = outputDiff->host(); - + for (int n = 0; n < size; ++n) { if (reluOriginPtr[n] > 0.0f && reluOriginPtr[n] <= 6.0f) { outputDiffPtr[n] = reluDiffPtr[n]; diff --git a/source/backend/cpu/CPUReluGrad.hpp b/source/backend/cpu/CPUReluGrad.hpp index e78654a46..003ad6d6a 100644 --- a/source/backend/cpu/CPUReluGrad.hpp +++ b/source/backend/cpu/CPUReluGrad.hpp @@ -9,7 +9,7 @@ #ifndef CPUReluGrad_hpp #define CPUReluGrad_hpp -#include "CPUBackend.hpp" +#include "backend/cpu/CPUBackend.hpp" namespace MNN { class CPUReluGrad : public Execution { public: diff --git a/source/backend/cpu/CPUReshape.cpp b/source/backend/cpu/CPUReshape.cpp index 1cdc2ffbe..5e8eb0fa6 100644 --- a/source/backend/cpu/CPUReshape.cpp +++ b/source/backend/cpu/CPUReshape.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUReshape.hpp" -#include "CPUBackend.hpp" -#include "CommonOptFunction.h" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/cpu/CPUReshape.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { @@ -24,11 +24,11 @@ ErrorCode CPUReshape::onResize(const std::vector &inputs, const std::v auto input = inputs[0]; auto output = outputs[0]; - + if (TensorUtils::getDescribe(input)->dimensionFormat != MNN_DATA_FORMAT_NC4HW4) { return NO_ERROR; } - + int totalSize = 1; for (int i = 0; i < input->buffer().dimensions; ++i) { totalSize *= input->buffer().dim[i].extent; @@ -40,7 +40,7 @@ ErrorCode CPUReshape::onResize(const std::vector &inputs, const std::v mStorage.buffer().type = input->getType(); backend()->onAcquireBuffer(&mStorage, Backend::DYNAMIC); backend()->onReleaseBuffer(&mStorage, Backend::DYNAMIC); - + auto convertTensorMeta = [&](const Tensor* tensor, Tensor* wrapTensor) { wrapTensor->buffer().host = mStorage.buffer().host; wrapTensor->buffer().dimensions = tensor->dimensions(); @@ -71,7 +71,7 @@ ErrorCode CPUReshape::onResize(const std::vector &inputs, const std::v } TensorUtils::setLinearLayout(wrapTensor); }; - + convertTensorMeta(input, &mWrapTensorForInput); convertTensorMeta(output, &mWrapTensorForOutput); diff --git a/source/backend/cpu/CPUReshape.hpp b/source/backend/cpu/CPUReshape.hpp index 3a4a6c0e2..47d486248 100644 --- a/source/backend/cpu/CPUReshape.hpp +++ b/source/backend/cpu/CPUReshape.hpp @@ -9,7 +9,7 @@ #ifndef CPUReshape_hpp #define CPUReshape_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" #include "Tensor_generated.h" namespace MNN { diff --git a/source/backend/cpu/CPUResize.cpp b/source/backend/cpu/CPUResize.cpp index 070674e54..5125049ac 100644 --- a/source/backend/cpu/CPUResize.cpp +++ b/source/backend/cpu/CPUResize.cpp @@ -6,12 +6,12 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUResize.hpp" +#include "backend/cpu/CPUResize.hpp" #include -#include "AutoStorage.h" -#include "CPUBackend.hpp" -#include "Concurrency.h" -#include "Macro.h" +#include "core/AutoStorage.h" +#include "backend/cpu/CPUBackend.hpp" +#include "core/Concurrency.h" +#include "core/Macro.h" #ifdef MNN_USE_NEON #include @@ -236,7 +236,7 @@ void CPUResizeCommon::CPUResizeBilinearC4(halide_buffer_t& input, halide_buffer_ } } -void CPUResizeCommon::CPUReiseNearstneighborC4(halide_buffer_t& input, halide_buffer_t& output, float wScale, +void CPUResizeCommon::CPUResizeNearestneighborC4(halide_buffer_t& input, halide_buffer_t& output, float wScale, float hScale) { const int batches = input.dim[0].extent; const int inputBatchSize = input.dim[0].stride; diff --git a/source/backend/cpu/CPUResize.hpp b/source/backend/cpu/CPUResize.hpp index a6ed53061..ed86a3d11 100644 --- a/source/backend/cpu/CPUResize.hpp +++ b/source/backend/cpu/CPUResize.hpp @@ -9,8 +9,8 @@ #ifndef CPUResize_hpp #define CPUResize_hpp -#include "AutoStorage.h" -#include "Execution.hpp" +#include "core/AutoStorage.h" +#include "core/Execution.hpp" namespace MNN { @@ -26,7 +26,7 @@ class CPUResizeCommon : public Execution { void CPUResizeBilinearC4(halide_buffer_t &input, halide_buffer_t &output, const int *widthPosition, const float *widthFactor, const int *heightPosition, const float *heightFactor, float *lineBuffer, int threadNumber); - void CPUReiseNearstneighborC4(halide_buffer_t &input, halide_buffer_t &output, float wScale, float hScale); + void CPUResizeNearestneighborC4(halide_buffer_t &input, halide_buffer_t &output, float wScale, float hScale); }; class CPUResize : public CPUResizeCommon { diff --git a/source/backend/cpu/CPUReverseSequence.cpp b/source/backend/cpu/CPUReverseSequence.cpp index 520b5e8cd..381c26f9e 100644 --- a/source/backend/cpu/CPUReverseSequence.cpp +++ b/source/backend/cpu/CPUReverseSequence.cpp @@ -6,7 +6,7 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUReverseSequence.hpp" +#include "backend/cpu/CPUReverseSequence.hpp" namespace MNN { ErrorCode CPUReverseSequence::onResize(const std::vector &inputs, const std::vector &outputs) { if (inputs[1]->length(0) != inputs[0]->length(mBatchDim)) { diff --git a/source/backend/cpu/CPUReverseSequence.hpp b/source/backend/cpu/CPUReverseSequence.hpp index 056a9480b..92303882e 100644 --- a/source/backend/cpu/CPUReverseSequence.hpp +++ b/source/backend/cpu/CPUReverseSequence.hpp @@ -6,7 +6,7 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUBackend.hpp" +#include "backend/cpu/CPUBackend.hpp" namespace MNN { diff --git a/source/backend/cpu/CPURuntime.cpp b/source/backend/cpu/CPURuntime.cpp index 33e04ebb7..d8522dc5f 100644 --- a/source/backend/cpu/CPURuntime.cpp +++ b/source/backend/cpu/CPURuntime.cpp @@ -28,8 +28,8 @@ #include #include #include -#include "CPURuntime.hpp" -#include "MNNDefine.h" +#include "backend/cpu/CPURuntime.hpp" +#include #ifdef __ANDROID__ static int getNumberOfCPU() { diff --git a/source/backend/cpu/CPUScale.cpp b/source/backend/cpu/CPUScale.cpp index 8bcb79585..5b0b737d9 100644 --- a/source/backend/cpu/CPUScale.cpp +++ b/source/backend/cpu/CPUScale.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUScale.hpp" -#include "CPUBackend.hpp" -#include "CommonOptFunction.h" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/cpu/CPUScale.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { CPUScale::CPUScale(const Op* op, Backend* bn) : MNN::Execution(bn) { diff --git a/source/backend/cpu/CPUScale.hpp b/source/backend/cpu/CPUScale.hpp index 0c7e3f958..3c0eb908b 100644 --- a/source/backend/cpu/CPUScale.hpp +++ b/source/backend/cpu/CPUScale.hpp @@ -9,8 +9,8 @@ #ifndef CPUScale_hpp #define CPUScale_hpp -#include "AutoStorage.h" -#include "Execution.hpp" +#include "core/AutoStorage.h" +#include "core/Execution.hpp" namespace MNN { class CPUScale : public Execution { diff --git a/source/backend/cpu/CPUScatterNd.cpp b/source/backend/cpu/CPUScatterNd.cpp new file mode 100644 index 000000000..608fc0324 --- /dev/null +++ b/source/backend/cpu/CPUScatterNd.cpp @@ -0,0 +1,81 @@ +// +// CPUScatterNd.cpp +// MNN +// +// Created by MNN on 2019/11/28. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "backend/cpu/CPUScatterNd.hpp" +#include "backend/cpu/CPUBackend.hpp" + +namespace MNN { + +template +void ScatterNdImpl(const Tensor* indices, const Tensor* updates, const Tensor* shape, Tensor* output) { + const auto indicesPtr = indices->host(); + const auto updatesPtr = updates->host(); + auto outputPtr = output->host(); + const int indicesDimension = indices->dimensions(); + const int indicesLastDim = indices->length(indicesDimension - 1); + const int indexes = indices->elementSize() / indicesLastDim; + int accNumber = 1; + for (int i = indicesDimension - 1; i < updates->dimensions(); ++i) { + accNumber *= updates->length(i); + } + + const int outputElementSize = output->elementSize(); + int remainSize = outputElementSize; + std::vector dimsToCount(indicesLastDim, 0); + for (int i = 0; i < indicesLastDim; ++i) { + dimsToCount[i] = remainSize / output->length(i); + remainSize = dimsToCount[i]; + } + + for (int i = 0; i < indexes; ++i) { + int pos = 0; + for (int j = 0; j < indicesLastDim; ++j) { + auto curIndex = indicesPtr[i * indicesLastDim + j]; + MNN_ASSERT(curIndex >= 0 && curIndex < output->length(j)); + pos += curIndex * dimsToCount[j]; + } + for (int k = 0; k < accNumber; ++k) { + outputPtr[pos + k] += updatesPtr[i * accNumber + k]; + } + } +} + +ErrorCode CPUScatterNd::onExecute(const std::vector& inputs, const std::vector& outputs) { + auto indices = inputs[0]; + auto updates = inputs[1]; + auto shape = inputs[2]; + auto output = outputs[0]; + const int outputSize = output->size(); + + auto outputRawPtr = output->host(); + memset(outputRawPtr, 0, outputSize); + + auto updatesDataType = updates->getType(); + if (updatesDataType == halide_type_of()) { + ScatterNdImpl(indices, updates, shape, output); + } else if (updatesDataType == halide_type_of()) { + ScatterNdImpl(indices, updates, shape, output); + } else { + MNN_ERROR("TODO, ScatterNd support data type: %d\n", updatesDataType.code); + return NOT_SUPPORT; + } + + return NO_ERROR; +} + +class CPUScatterNdCreator : public CPUBackend::Creator { +public: + virtual Execution* onCreate(const std::vector& inputs, const std::vector& outputs, + const MNN::Op* op, Backend* backend) const override { + return new CPUScatterNd(backend); + } +}; + +REGISTER_CPU_OP_CREATOR(CPUScatterNdCreator, OpType_ScatterNd); + +} // namespace MNN diff --git a/source/backend/cpu/CPUScatterNd.hpp b/source/backend/cpu/CPUScatterNd.hpp new file mode 100644 index 000000000..2904e0ac7 --- /dev/null +++ b/source/backend/cpu/CPUScatterNd.hpp @@ -0,0 +1,25 @@ +// +// CPUScatterNd.hpp +// MNN +// +// Created by MNN on 2019/11/28. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef CPUScatterNd_hpp +#define CPUScatterNd_hpp + +#include "core/Execution.hpp" + +namespace MNN { +class CPUScatterNd : public Execution { +public: + CPUScatterNd(Backend *bn):Execution(bn){ + } + virtual ~CPUScatterNd() = default; + virtual ErrorCode onExecute(const std::vector &inputs, const std::vector &outputs) override; +}; + +} // namespace MNN + +#endif /* CPUScatterNd_hpp */ diff --git a/source/backend/cpu/CPUSelect.cpp b/source/backend/cpu/CPUSelect.cpp index 6445173dd..164f58416 100644 --- a/source/backend/cpu/CPUSelect.cpp +++ b/source/backend/cpu/CPUSelect.cpp @@ -6,7 +6,7 @@ // Copyright © 2018 Alibaba. All rights reserved. // -#include "CPUSelect.hpp" +#include "backend/cpu/CPUSelect.hpp" namespace MNN { ErrorCode CPUSelect::onExecute(const std::vector &inputs, const std::vector &outputs) { auto select = inputs[0]; diff --git a/source/backend/cpu/CPUSelect.hpp b/source/backend/cpu/CPUSelect.hpp index dbaf2344b..426bd909a 100644 --- a/source/backend/cpu/CPUSelect.hpp +++ b/source/backend/cpu/CPUSelect.hpp @@ -9,7 +9,7 @@ #ifndef CPUSelect_hpp #define CPUSelect_hpp -#include "CPUBackend.hpp" +#include "backend/cpu/CPUBackend.hpp" namespace MNN { class CPUSelect : public Execution { public: diff --git a/source/backend/cpu/CPUSelu.cpp b/source/backend/cpu/CPUSelu.cpp index 8e8596ca0..b03b85848 100644 --- a/source/backend/cpu/CPUSelu.cpp +++ b/source/backend/cpu/CPUSelu.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUSelu.hpp" +#include "backend/cpu/CPUSelu.hpp" #include -#include "CPUBackend.hpp" -#include "Macro.h" +#include "backend/cpu/CPUBackend.hpp" +#include "core/Macro.h" namespace MNN { diff --git a/source/backend/cpu/CPUSelu.hpp b/source/backend/cpu/CPUSelu.hpp index 1cd7e00f6..5b746c564 100644 --- a/source/backend/cpu/CPUSelu.hpp +++ b/source/backend/cpu/CPUSelu.hpp @@ -9,7 +9,7 @@ #ifndef CPUSelu_hpp #define CPUSelu_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { class CPUSelu : public Execution { diff --git a/source/backend/cpu/CPUSetDiff1D.cpp b/source/backend/cpu/CPUSetDiff1D.cpp index b57e2a2fc..7cd7e6257 100644 --- a/source/backend/cpu/CPUSetDiff1D.cpp +++ b/source/backend/cpu/CPUSetDiff1D.cpp @@ -6,7 +6,7 @@ // Copyright © 2018 Alibaba. All rights reserved. // -#include "CPUSetDiff1D.hpp" +#include "backend/cpu/CPUSetDiff1D.hpp" namespace MNN { ErrorCode CPUSetDiff1D::onExecute(const std::vector &inputs, const std::vector &outputs) { auto input = inputs[0]; diff --git a/source/backend/cpu/CPUSetDiff1D.hpp b/source/backend/cpu/CPUSetDiff1D.hpp index 9c0c19940..851fc956d 100644 --- a/source/backend/cpu/CPUSetDiff1D.hpp +++ b/source/backend/cpu/CPUSetDiff1D.hpp @@ -9,7 +9,7 @@ #ifndef CPUSetDiff1D_hpp #define CPUSetDiff1D_hpp -#include "CPUBackend.hpp" +#include "backend/cpu/CPUBackend.hpp" namespace MNN { class CPUSetDiff1D : public Execution { public: diff --git a/source/backend/cpu/CPUShape.cpp b/source/backend/cpu/CPUShape.cpp index b191cdd08..c8a00660d 100644 --- a/source/backend/cpu/CPUShape.cpp +++ b/source/backend/cpu/CPUShape.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUShape.hpp" -#include "CPUBackend.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/cpu/CPUShape.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { ErrorCode CPUShape::onExecute(const std::vector& inputs, const std::vector& outputs) { diff --git a/source/backend/cpu/CPUShape.hpp b/source/backend/cpu/CPUShape.hpp index 2392b61d2..8ef8be944 100644 --- a/source/backend/cpu/CPUShape.hpp +++ b/source/backend/cpu/CPUShape.hpp @@ -9,7 +9,7 @@ #ifndef CPUShape_hpp #define CPUShape_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { class CPUShape : public Execution { diff --git a/source/backend/cpu/CPUSigmoid.cpp b/source/backend/cpu/CPUSigmoid.cpp index b9ed2b86c..7a85f176f 100644 --- a/source/backend/cpu/CPUSigmoid.cpp +++ b/source/backend/cpu/CPUSigmoid.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUSigmoid.hpp" +#include "backend/cpu/CPUSigmoid.hpp" #include -#include "CPUBackend.hpp" -#include "CommonOptFunction.h" -#include "Macro.h" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Macro.h" namespace MNN { ErrorCode CPUSigmoid::onExecute(const std::vector& inputs, const std::vector& outputs) { diff --git a/source/backend/cpu/CPUSigmoid.hpp b/source/backend/cpu/CPUSigmoid.hpp index c3a77da95..f5924ceaa 100644 --- a/source/backend/cpu/CPUSigmoid.hpp +++ b/source/backend/cpu/CPUSigmoid.hpp @@ -9,7 +9,7 @@ #ifndef CPUSigmoid_hpp #define CPUSigmoid_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { class CPUSigmoid : public Execution { diff --git a/source/backend/cpu/CPUSize.cpp b/source/backend/cpu/CPUSize.cpp index 655a90e4e..c88addec2 100644 --- a/source/backend/cpu/CPUSize.cpp +++ b/source/backend/cpu/CPUSize.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUSize.hpp" -#include "CPUBackend.hpp" +#include "backend/cpu/CPUSize.hpp" +#include "backend/cpu/CPUBackend.hpp" namespace MNN { diff --git a/source/backend/cpu/CPUSize.hpp b/source/backend/cpu/CPUSize.hpp index d3db3d542..4a42b5cd5 100644 --- a/source/backend/cpu/CPUSize.hpp +++ b/source/backend/cpu/CPUSize.hpp @@ -9,7 +9,7 @@ #ifndef CPUSize_hpp #define CPUSize_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { template diff --git a/source/backend/cpu/CPUSlice.cpp b/source/backend/cpu/CPUSlice.cpp index 8742e5246..f63b1d030 100644 --- a/source/backend/cpu/CPUSlice.cpp +++ b/source/backend/cpu/CPUSlice.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUSlice.hpp" -#include "CPUBackend.hpp" -#include "CommonOptFunction.h" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/cpu/CPUSlice.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" using namespace std; diff --git a/source/backend/cpu/CPUSlice.hpp b/source/backend/cpu/CPUSlice.hpp index 1a8c3c8aa..5eb34017b 100644 --- a/source/backend/cpu/CPUSlice.hpp +++ b/source/backend/cpu/CPUSlice.hpp @@ -9,7 +9,7 @@ #ifndef CPUSlice_hpp #define CPUSlice_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { class CPUSlice : public Execution { diff --git a/source/backend/cpu/CPUSliceTf.cpp b/source/backend/cpu/CPUSliceTf.cpp index ddcdb7ba9..4377d58d5 100644 --- a/source/backend/cpu/CPUSliceTf.cpp +++ b/source/backend/cpu/CPUSliceTf.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUSliceTf.hpp" +#include "backend/cpu/CPUSliceTf.hpp" #include -#include "CPUBackend.hpp" -#include "Macro.h" +#include "backend/cpu/CPUBackend.hpp" +#include "core/Macro.h" namespace MNN { diff --git a/source/backend/cpu/CPUSliceTf.hpp b/source/backend/cpu/CPUSliceTf.hpp index cfb76826a..99ce95b70 100644 --- a/source/backend/cpu/CPUSliceTf.hpp +++ b/source/backend/cpu/CPUSliceTf.hpp @@ -9,7 +9,7 @@ #ifndef CPUSliceTf_hpp #define CPUSliceTf_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { class CPUSliceTf : public Execution { diff --git a/source/backend/cpu/CPUSoftmax.cpp b/source/backend/cpu/CPUSoftmax.cpp index b5de60e16..6af6a6e90 100644 --- a/source/backend/cpu/CPUSoftmax.cpp +++ b/source/backend/cpu/CPUSoftmax.cpp @@ -6,13 +6,13 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUSoftmax.hpp" +#include "backend/cpu/CPUSoftmax.hpp" #include -#include "CPUBackend.hpp" -#include "CommonOptFunction.h" -#include "Concurrency.h" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Concurrency.h" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" #ifdef MNN_USE_NEON #include #endif diff --git a/source/backend/cpu/CPUSoftmax.hpp b/source/backend/cpu/CPUSoftmax.hpp index 6db8eca5c..e5fd5f71c 100644 --- a/source/backend/cpu/CPUSoftmax.hpp +++ b/source/backend/cpu/CPUSoftmax.hpp @@ -9,7 +9,7 @@ #ifndef CPUSoftmax_hpp #define CPUSoftmax_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { class CPUSoftmax : public Execution { diff --git a/source/backend/cpu/CPUSoftmaxGrad.cpp b/source/backend/cpu/CPUSoftmaxGrad.cpp index de677ad6d..cba4a660e 100644 --- a/source/backend/cpu/CPUSoftmaxGrad.cpp +++ b/source/backend/cpu/CPUSoftmaxGrad.cpp @@ -6,12 +6,12 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUSoftmaxGrad.hpp" -#include "CommonOptFunction.h" -#include "ConvOpt.h" -#include "Macro.h" -#include "TensorUtils.hpp" -#include "Vec4.hpp" +#include "backend/cpu/CPUSoftmaxGrad.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "backend/cpu/compute/ConvOpt.h" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" +#include "math/Vec4.hpp" using namespace MNN::Math; namespace MNN { ErrorCode CPUSoftmaxGrad::onExecute(const std::vector& inputs, const std::vector& outputs) { diff --git a/source/backend/cpu/CPUSoftmaxGrad.hpp b/source/backend/cpu/CPUSoftmaxGrad.hpp index 2fa26828e..e3f9ea4b7 100644 --- a/source/backend/cpu/CPUSoftmaxGrad.hpp +++ b/source/backend/cpu/CPUSoftmaxGrad.hpp @@ -9,7 +9,7 @@ #ifndef CPUSoftmaxGrad_hpp #define CPUSoftmaxGrad_hpp -#include "CPUBackend.hpp" +#include "backend/cpu/CPUBackend.hpp" namespace MNN { class CPUSoftmaxGrad : public Execution { diff --git a/source/backend/cpu/CPUSpaceToBatchND.cpp b/source/backend/cpu/CPUSpaceToBatchND.cpp index ec90167ca..5822d059d 100644 --- a/source/backend/cpu/CPUSpaceToBatchND.cpp +++ b/source/backend/cpu/CPUSpaceToBatchND.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUSpaceToBatchND.hpp" -#include "CPUBackend.hpp" -#include "CommonOptFunction.h" -#include "Macro.h" +#include "backend/cpu/CPUSpaceToBatchND.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Macro.h" namespace MNN { diff --git a/source/backend/cpu/CPUSpaceToBatchND.hpp b/source/backend/cpu/CPUSpaceToBatchND.hpp index 1560111c7..001a94d37 100644 --- a/source/backend/cpu/CPUSpaceToBatchND.hpp +++ b/source/backend/cpu/CPUSpaceToBatchND.hpp @@ -9,7 +9,7 @@ #ifndef CPUSpaceToBatchND_hpp #define CPUSpaceToBatchND_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { diff --git a/source/backend/cpu/CPUSpaceToDepth.cpp b/source/backend/cpu/CPUSpaceToDepth.cpp index 5a562a08a..2277f494e 100644 --- a/source/backend/cpu/CPUSpaceToDepth.cpp +++ b/source/backend/cpu/CPUSpaceToDepth.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUSpaceToDepth.hpp" -#include "Backend.hpp" -#include "CPUBackend.hpp" -#include "Macro.h" +#include "backend/cpu/CPUSpaceToDepth.hpp" +#include "core/Backend.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "core/Macro.h" namespace MNN { @@ -56,7 +56,7 @@ ErrorCode CPUSpaceToDepth::onExecute(const std::vector &inputs, cons const int offsetC = (offsetH * blockSize + offsetW) * inputChannels; for (int c = 0; c < inputChannels; c++) { const int oc = c + offsetC; - const int offsetO = b * outputHeight * outputWidth * outputChannels + const int offsetO = b * outputHeight * outputWidth * outputChannels + oh * outputWidth * outputChannels + ow * outputChannels + oc; const int offsetI = b * inputHeight * inputWidth * inputChannels + h * inputWidth * inputChannels + w * inputChannels + c; @@ -65,13 +65,13 @@ ErrorCode CPUSpaceToDepth::onExecute(const std::vector &inputs, cons } } } - + return NO_ERROR; } class SpaceToDepthCreator : public CPUBackend::Creator { public: - virtual Execution* onCreate(const std::vector& inputs, const std::vector& outputs, + virtual Execution* onCreate(const std::vector& inputs, const std::vector& outputs, const MNN::Op* op, Backend* backend) const override { auto dataType = inputs[0]->getType(); if (dataType.bits == 32) { @@ -89,4 +89,4 @@ class SpaceToDepthCreator : public CPUBackend::Creator { REGISTER_CPU_OP_CREATOR(SpaceToDepthCreator, OpType_SpaceToDepth); -} // namespace MNN \ No newline at end of file +} // namespace MNN diff --git a/source/backend/cpu/CPUSpaceToDepth.hpp b/source/backend/cpu/CPUSpaceToDepth.hpp index 5f8b39666..580d081a1 100644 --- a/source/backend/cpu/CPUSpaceToDepth.hpp +++ b/source/backend/cpu/CPUSpaceToDepth.hpp @@ -9,7 +9,7 @@ #ifndef CPUSpaceToDepth_hpp #define CPUSpaceToDepth_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { diff --git a/source/backend/cpu/CPUSpatialProduct.cpp b/source/backend/cpu/CPUSpatialProduct.cpp index cadb17a97..b5e476fe4 100644 --- a/source/backend/cpu/CPUSpatialProduct.cpp +++ b/source/backend/cpu/CPUSpatialProduct.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUSpatialProduct.hpp" -#include "CPUBackend.hpp" -#include "Macro.h" +#include "backend/cpu/CPUSpatialProduct.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "core/Macro.h" #ifdef MNN_USE_NEON #include diff --git a/source/backend/cpu/CPUSpatialProduct.hpp b/source/backend/cpu/CPUSpatialProduct.hpp index 374970dea..c2ac95cd0 100644 --- a/source/backend/cpu/CPUSpatialProduct.hpp +++ b/source/backend/cpu/CPUSpatialProduct.hpp @@ -9,7 +9,7 @@ #ifndef CPUSpatialProduct_hpp #define CPUSpatialProduct_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { class CPUSpatialProduct : public Execution { diff --git a/source/backend/cpu/CPUSqueeze.cpp b/source/backend/cpu/CPUSqueeze.cpp index 7ee759922..bd7711dda 100644 --- a/source/backend/cpu/CPUSqueeze.cpp +++ b/source/backend/cpu/CPUSqueeze.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUSqueeze.hpp" -#include "CPUBackend.hpp" +#include "backend/cpu/CPUSqueeze.hpp" +#include "backend/cpu/CPUBackend.hpp" namespace MNN { diff --git a/source/backend/cpu/CPUSqueeze.hpp b/source/backend/cpu/CPUSqueeze.hpp index 7b1bee633..857efe19f 100644 --- a/source/backend/cpu/CPUSqueeze.hpp +++ b/source/backend/cpu/CPUSqueeze.hpp @@ -9,7 +9,7 @@ #ifndef CPUSqueeze_hpp #define CPUSqueeze_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" #include "MNN_generated.h" namespace MNN { diff --git a/source/backend/cpu/CPUStridedSlice.cpp b/source/backend/cpu/CPUStridedSlice.cpp index ce24b2c10..9e0127b4e 100644 --- a/source/backend/cpu/CPUStridedSlice.cpp +++ b/source/backend/cpu/CPUStridedSlice.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUStridedSlice.hpp" -#include "CPUBackend.hpp" +#include "backend/cpu/CPUStridedSlice.hpp" +#include "backend/cpu/CPUBackend.hpp" namespace MNN { diff --git a/source/backend/cpu/CPUStridedSlice.hpp b/source/backend/cpu/CPUStridedSlice.hpp index 4498d7362..170e87915 100644 --- a/source/backend/cpu/CPUStridedSlice.hpp +++ b/source/backend/cpu/CPUStridedSlice.hpp @@ -9,9 +9,8 @@ #ifndef CPUStridedSlice_hpp #define CPUStridedSlice_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" #include "MNN_generated.h" - namespace MNN { class CPUStridedSlice : public Execution { public: diff --git a/source/backend/cpu/CPUTFQuantizedConv2D.cpp b/source/backend/cpu/CPUTFQuantizedConv2D.cpp index 5c91445a9..bb30a9cb3 100644 --- a/source/backend/cpu/CPUTFQuantizedConv2D.cpp +++ b/source/backend/cpu/CPUTFQuantizedConv2D.cpp @@ -6,15 +6,15 @@ // Copyright © 2018, Alibaba Group Holding Limited // #ifdef MNN_SUPPORT_TFLITE_QUAN -#include "CPUTFQuantizedConv2D.hpp" +#include "backend/cpu/CPUTFQuantizedConv2D.hpp" #include -#include "CPUBackend.hpp" -#include "CPUFixedPoint.hpp" -#include "CPUQuantizationUtils.hpp" -#include "CommonOptFunction.h" -#include "Concurrency.h" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/CPUFixedPoint.hpp" +#include "backend/cpu/CPUQuantizationUtils.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Concurrency.h" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" #ifdef MNN_USE_NEON #include diff --git a/source/backend/cpu/CPUTFQuantizedConv2D.hpp b/source/backend/cpu/CPUTFQuantizedConv2D.hpp index 1a25fceef..9868d1e4a 100644 --- a/source/backend/cpu/CPUTFQuantizedConv2D.hpp +++ b/source/backend/cpu/CPUTFQuantizedConv2D.hpp @@ -9,9 +9,9 @@ #ifndef CPUTFQuantizedConv2D_hpp #define CPUTFQuantizedConv2D_hpp -#include "AutoStorage.h" -#include "CPUConvolution.hpp" -#include "Execution.hpp" +#include "core/AutoStorage.h" +#include "backend/cpu/CPUConvolution.hpp" +#include "core/Execution.hpp" #include "TFQuantizeOp_generated.h" namespace MNN { diff --git a/source/backend/cpu/CPUTanh.cpp b/source/backend/cpu/CPUTanh.cpp index 0e5303532..78d61e6b9 100644 --- a/source/backend/cpu/CPUTanh.cpp +++ b/source/backend/cpu/CPUTanh.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUTanh.hpp" +#include "backend/cpu/CPUTanh.hpp" #include -#include "CommonOptFunction.h" -#include "CPUBackend.hpp" -#include "Macro.h" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "backend/cpu/CPUBackend.hpp" +#include "core/Macro.h" namespace MNN { diff --git a/source/backend/cpu/CPUTanh.hpp b/source/backend/cpu/CPUTanh.hpp index 2d1241ab9..b57bcd293 100644 --- a/source/backend/cpu/CPUTanh.hpp +++ b/source/backend/cpu/CPUTanh.hpp @@ -9,7 +9,7 @@ #ifndef CPUTanh_hpp #define CPUTanh_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { class CPUTanh : public Execution { diff --git a/source/backend/cpu/CPUTensorConvert.cpp b/source/backend/cpu/CPUTensorConvert.cpp index 9283fab27..9f94a22ed 100644 --- a/source/backend/cpu/CPUTensorConvert.cpp +++ b/source/backend/cpu/CPUTensorConvert.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUTensorConvert.hpp" -#include "CPUBackend.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" -#include "compute/CommonOptFunction.h" +#include "backend/cpu/CPUTensorConvert.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" namespace MNN { @@ -112,7 +112,7 @@ ErrorCode CPUTensorConverter::convert(const Tensor* input, const Tensor* output) } } const int bitLength = ib.type.bytes(); - + if (MNN_DATA_FORMAT_NC4HW4 == source && MNN_DATA_FORMAT_NCHW == dest) { if (bitLength == 1) { for (int i = 0; i < ib.dim[0].extent; ++i) { @@ -141,7 +141,7 @@ ErrorCode CPUTensorConverter::convert(const Tensor* input, const Tensor* output) } return NO_ERROR; } - + if (MNN_DATA_FORMAT_NHWC == source && MNN_DATA_FORMAT_NC4HW4 == dest) { if (bitLength == 1) { _NHWC2NC4HW4Uint8((uint8_t*)ib.host, (uint8_t*)ob.host, batch, channel, area); @@ -155,16 +155,19 @@ ErrorCode CPUTensorConverter::convert(const Tensor* input, const Tensor* output) NC4HW42NHWC((float*)ib.host, (float*)ob.host, batch, channel, area); } } else if (MNN_DATA_FORMAT_NHWC == source && MNN_DATA_FORMAT_NCHW == dest) { - MNN_ASSERT(bitLength == 4); + if (bitLength != 4) { + return NOT_SUPPORT; + } NHWC2NCHW((float*)ib.host, (float*)ob.host, batch, channel, area); } else if (MNN_DATA_FORMAT_NCHW == source && MNN_DATA_FORMAT_NHWC == dest) { - MNN_ASSERT(bitLength == 4); + if (bitLength != 4) { + return NOT_SUPPORT; + } NCHW2NHWC((float*)ib.host, (float*)ob.host, batch, channel, area); } else { - MNN_ASSERT(false); return NOT_SUPPORT; } - + return NO_ERROR; } @@ -176,8 +179,7 @@ class CPUTensorConvertFactory : public CPUBackend::Creator { public: virtual Execution* onCreate(const std::vector& inputs, const std::vector& outputs, const MNN::Op* op, Backend* backend) const { - return new CPUTensorConverter(backend, op->main_as_TensorConvertInfo()->source(), - op->main_as_TensorConvertInfo()->dest()); + return new CPUTensorConverter(backend); } }; diff --git a/source/backend/cpu/CPUTensorConvert.hpp b/source/backend/cpu/CPUTensorConvert.hpp index 0d0fec780..0cc303d5b 100644 --- a/source/backend/cpu/CPUTensorConvert.hpp +++ b/source/backend/cpu/CPUTensorConvert.hpp @@ -9,16 +9,15 @@ #ifndef CPUTensorConvert_hpp #define CPUTensorConvert_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" #include "Tensor_generated.h" namespace MNN { class CPUTensorConverter : public Execution { public: - CPUTensorConverter(Backend* b, MNN_DATA_FORMAT source, MNN_DATA_FORMAT dest) : Execution(b) { - mSource = source; - mDest = dest; + CPUTensorConverter(Backend* b) : Execution(b) { + // Do nothing } virtual ~CPUTensorConverter() = default; @@ -29,10 +28,6 @@ class CPUTensorConverter : public Execution { static ErrorCode convert(const Tensor* input, const Tensor* output); virtual ErrorCode onExecute(const std::vector& inputs, const std::vector& outputs) override; - -private: - MNN_DATA_FORMAT mSource; - MNN_DATA_FORMAT mDest; }; } // namespace MNN diff --git a/source/backend/cpu/CPUThreshold.cpp b/source/backend/cpu/CPUThreshold.cpp new file mode 100644 index 000000000..a4a4b9742 --- /dev/null +++ b/source/backend/cpu/CPUThreshold.cpp @@ -0,0 +1,41 @@ +// +// CPUThreshold.cpp +// MNN +// +// Created by MNN on 2019/12/06. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "backend/cpu/CPUThreshold.hpp" +#include "backend/cpu/CPUBackend.hpp" + +namespace MNN { +ErrorCode CPUThreshold::onExecute(const std::vector& inputs, const std::vector& outputs) { + auto input = inputs[0]; + auto output = outputs[0]; + + const float* srcData = input->host(); + float* dstData = output->host(); + const int size = input->elementSize(); + + for (int i = 0; i < size; ++i) { + if (srcData[i] > mThreshold) { + dstData[i] = 1.0f; + } else { + dstData[i] = 0.0f; + } + } + + return NO_ERROR; +} + +class CPUThresholdCreator : public CPUBackend::Creator { +public: + virtual Execution* onCreate(const std::vector& inputs, const std::vector& outputs, + const MNN::Op* op, Backend* backend) const { + return new CPUThreshold(backend, op->main_as_ELU()->alpha()); + } +}; + +REGISTER_CPU_OP_CREATOR(CPUThresholdCreator, OpType_Threshold); +} // namespace MNN diff --git a/source/backend/cpu/CPUThreshold.hpp b/source/backend/cpu/CPUThreshold.hpp new file mode 100644 index 000000000..084070c14 --- /dev/null +++ b/source/backend/cpu/CPUThreshold.hpp @@ -0,0 +1,29 @@ +// +// CPUThreshold.hpp +// MNN +// +// Created by MNN on 2019/09/23. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef CPUThreshold_hpp +#define CPUThreshold_hpp + +#include "core/Execution.hpp" + +namespace MNN { +class CPUThreshold : public Execution { +public: + CPUThreshold(Backend *b, float threshold) : Execution(b), mThreshold(threshold) { + // nothing to do + } + virtual ~CPUThreshold() = default; + virtual ErrorCode onExecute(const std::vector &inputs, const std::vector &outputs) override; + +private: + float mThreshold; +}; + +} // namespace MNN + +#endif /* CPUThreshold_hpp */ diff --git a/source/backend/cpu/CPUTile.cpp b/source/backend/cpu/CPUTile.cpp index 0635d021c..ce8a1e01b 100644 --- a/source/backend/cpu/CPUTile.cpp +++ b/source/backend/cpu/CPUTile.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUTile.hpp" -#include "CPUBackend.hpp" +#include "backend/cpu/CPUTile.hpp" +#include "backend/cpu/CPUBackend.hpp" namespace MNN { diff --git a/source/backend/cpu/CPUTile.hpp b/source/backend/cpu/CPUTile.hpp index 8adf9b102..000430f58 100644 --- a/source/backend/cpu/CPUTile.hpp +++ b/source/backend/cpu/CPUTile.hpp @@ -9,7 +9,7 @@ #ifndef CPUTile_hpp #define CPUTile_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { class CPUTile : public Execution { diff --git a/source/backend/cpu/CPUTopKV2.cpp b/source/backend/cpu/CPUTopKV2.cpp index 168b42465..8dba40706 100644 --- a/source/backend/cpu/CPUTopKV2.cpp +++ b/source/backend/cpu/CPUTopKV2.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUTopKV2.hpp" -#include "CPUBackend.hpp" -#include "Macro.h" +#include "backend/cpu/CPUTopKV2.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "core/Macro.h" namespace MNN { diff --git a/source/backend/cpu/CPUTopKV2.hpp b/source/backend/cpu/CPUTopKV2.hpp index 85b953eb9..6b4c498f8 100644 --- a/source/backend/cpu/CPUTopKV2.hpp +++ b/source/backend/cpu/CPUTopKV2.hpp @@ -9,7 +9,7 @@ #ifndef CPUTOPKV2_HPP #define CPUTOPKV2_HPP -#include "Execution.hpp" +#include "core/Execution.hpp" #include "MNN_generated.h" namespace MNN { diff --git a/source/backend/cpu/CPUTranspose.cpp b/source/backend/cpu/CPUTranspose.cpp index 86c83163c..db57d385d 100644 --- a/source/backend/cpu/CPUTranspose.cpp +++ b/source/backend/cpu/CPUTranspose.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUTranspose.hpp" -#include "CPUBackend.hpp" -#include "Macro.h" +#include "backend/cpu/CPUTranspose.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "core/Macro.h" namespace MNN { diff --git a/source/backend/cpu/CPUTranspose.hpp b/source/backend/cpu/CPUTranspose.hpp index 3af451079..75e8744c5 100644 --- a/source/backend/cpu/CPUTranspose.hpp +++ b/source/backend/cpu/CPUTranspose.hpp @@ -9,7 +9,7 @@ #ifndef CPUTranspose_hpp #define CPUTranspose_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" #include "Type_generated.h" namespace MNN { diff --git a/source/backend/cpu/CPUUnary.cpp b/source/backend/cpu/CPUUnary.cpp index 783b1288f..e3bb9e5b1 100644 --- a/source/backend/cpu/CPUUnary.cpp +++ b/source/backend/cpu/CPUUnary.cpp @@ -6,10 +6,12 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUUnary.hpp" +#include "backend/cpu/CPUUnary.hpp" #include -#include "CPUBackend.hpp" -#include "Macro.h" +#include "backend/cpu/CPUBackend.hpp" +#include "core/Macro.h" +#include +#include namespace MNN { CPUUnary::CPUUnary(Backend *b, UnaryOpOperation type) : MNN::Execution(b), mType(type) { @@ -137,6 +139,215 @@ struct UnaryFloor : std::unary_function { } }; +template +struct UnarySign : std::unary_function { + T operator()(const T &x) const { + if (x > 0) { + return 1; + } + if (x < 0) { + return -1; + } + return 0; + } +}; + +template +struct UnaryBNLL : std::unary_function { + T operator()(const T &x) const { + float r = x > 0 ? (x + log(1. + exp(-x))) : log(1. + exp(x)); + return (T)r; + } +}; + +template +struct UnaryAcosh : std::unary_function { + T operator()(const T &x) const { + return (T)acoshf((T)(x)); + } +}; + +template +struct UnarySinh : std::unary_function { + T operator()(const T &x) const { + return (T)sinhf((T)(x)); + } +}; + +template +struct UnaryAsinh : std::unary_function { + T operator()(const T &x) const { + return (T)asinhf((T)(x)); + } +}; + +template +struct UnaryAtanh : std::unary_function { + T operator()(const T &x) const { + return (T)atanhf((T)(x)); + } +}; +template +struct UnaryRound : std::unary_function { + T operator()(const T &x) const { + return (T)roundf((T)(x)); + } +}; + +template +struct UnaryCosh : std::unary_function { + T operator()(const T &x) const { + return (T)coshf((T)(x)); + } +}; + +template +T evalPoly(T x, const std::vector kErfTCoefficient) { + auto poly = 0.0f; + for (auto c : kErfTCoefficient) { + poly = poly * x + c; + } + return poly; +} + +template +T erfImpl(T x) { + // Coefficients for by erf(f32), from Cephes. tensorflow + static const std::vector kErfTCoefficient { + +7.853861353153693E-5, -8.010193625184903E-4, +5.188327685732524E-3, + -2.685381193529856E-2, +1.128358514861418E-1, -3.761262582423300E-1, + +1.128379165726710E+0, + }; + return x * evalPoly(x * x, kErfTCoefficient); +} + +template +T erfcImpl(T x) { + // Coefficients for erfc(f32), from Cephes. tensorflow + const double kMaxlog = 88.72283905206835; + // erfc(x) = exp(-x^2) P(1/x^2), 1 < x < 2 + static const std::vector kErfcPCoefficient{ + +2.326819970068386E-2, -1.387039388740657E-1, +3.687424674597105E-1, + -5.824733027278666E-1, +6.210004621745983E-1, -4.944515323274145E-1, + +3.404879937665872E-1, -2.741127028184656E-1, +5.638259427386472E-1, + }; + // erfc(x) = exp(-x^2) R(1/x^2), 2 <= x < kMaxlog + static const std::vector kErfcRCoefficient{ + -1.047766399936249E+1, +1.297719955372516E+1, -7.495518717768503E+0, + +2.921019019210786E+0, -1.015265279202700E+0, +4.218463358204948E-1, + -2.820767439740514E-1, +5.641895067754075E-1, + }; + float absX = fabsf(x); + float z = expf(-x * x); + float q = 1.0 / absX; + float y = q * q; + float p; + if (absX < 2.0f) { + p = evalPoly(y, kErfcPCoefficient); + } else { + p = evalPoly(y, kErfcRCoefficient); + } + y = z * q * p; + float yClamp; + if (z < -kMaxlog) { + yClamp = 0.0f; + } else { + yClamp = y; + } + if (x < 0) { + return T(2.0f - yClamp); + } else { + return T(yClamp); + } +} + +template +struct UnaryErf : std::unary_function { + T operator()(const T &x) const { + if (abs(x) < T(1.)) { + return erfImpl(x); + } else { + return T(1.) - erfcImpl(x); + } + } +}; + +template +struct UnaryErfc : std::unary_function { + T operator()(const T &x) const { + if (abs(x) > T(1.)) { + return erfcImpl(x); + } else { + return T(1.) - erfImpl(x); + } + } +}; + +template +struct UnaryErfinv : std::unary_function { + // referenced from tensorflow + const int kDegree = 9; + const std::vector w_less_than_5_constants = { + 2.81022636e-08f, 3.43273939e-07f, -3.5233877e-06f, + -4.39150654e-06f, 0.00021858087f, -0.00125372503f, + -0.00417768164f, 0.246640727f, 1.50140941f}; + const std::vector w_greater_than_5_constants = { + -0.000200214257f, 0.000100950558f, 0.00134934322f, + -0.00367342844f, 0.00573950773f, -0.0076224613f, + 0.00943887047f, 1.00167406f, 2.83297682f}; + + T operator()(const T &x) const { + // Compute logarithm of (1+arg) using log1p(arg) which is more precise than + // log(1+arg) when arg is close to zero. For more details, see + // https://en.cppreference.com/w/cpp/numeric/math/log1p + auto w = -log1p(-x * x); + bool lt = (w < 5.0); + auto coefficient = [&](int i) { + if (lt) { + return w_less_than_5_constants[i]; + } else { + return w_greater_than_5_constants[i]; + } + }; + if (lt) { + w = w - 2.5; + } else { + w = sqrt(w) - 3.0; + } + auto p = coefficient(0); + for (int i = 1; i < kDegree; i++) { + p = coefficient(i) + p * w; + } + auto result = p * x; + if (fabsf(fabsf(x) - 1) < 1e-8) { + return std::numeric_limits::infinity(); + } else { + return result; + } + } +}; + +template +struct UnaryExpm1 : std::unary_function { + T operator()(const T &x) const { + return (T)expm1((T)(x)); + } +}; + +template +struct UnaryAsin : std::unary_function { + T operator()(const T &x) const { + return (T)asin((T)(x)); + } +}; + +template +struct UnaryAcos : std::unary_function { + T operator()(const T &x) const { + return (T)acos((T)(x)); + } +}; + ErrorCode CPUUnary::onExecute(const std::vector &inputs, const std::vector &outputs) { auto input = inputs[0]; auto output = outputs[0]; @@ -187,6 +398,34 @@ ErrorCode CPUUnary::onExecute(const std::vector &inputs, const std::ve return _unaryOp, float>(input, output); case UnaryOpOperation_FLOOR: return _unaryOp, float>(input, output); + case UnaryOpOperation_BNLL: + return _unaryOp, float>(input, output); + case UnaryOpOperation_ACOSH: + return _unaryOp, float>(input, output); + case UnaryOpOperation_SINH: + return _unaryOp, float>(input, output); + case UnaryOpOperation_ASINH: + return _unaryOp, float>(input, output); + case UnaryOpOperation_ATANH: + return _unaryOp, float>(input, output); + case UnaryOpOperation_SIGN: + return _unaryOp, float>(input, output); + case UnaryOpOperation_ROUND: + return _unaryOp, float>(input, output); + case UnaryOpOperation_COSH: + return _unaryOp, float>(input, output); + case UnaryOpOperation_ERF: + return _unaryOp, float>(input, output); + case UnaryOpOperation_ERFC: + return _unaryOp, float>(input, output); + case UnaryOpOperation_ERFINV: + return _unaryOp, float>(input, output); + case UnaryOpOperation_EXPM1: + return _unaryOp, float>(input, output); + case UnaryOpOperation_ASIN: + return _unaryOp, float>(input, output); + case UnaryOpOperation_ACOS: + return _unaryOp, float>(input, output); default: MNN_ASSERT(false); break; diff --git a/source/backend/cpu/CPUUnary.hpp b/source/backend/cpu/CPUUnary.hpp index 6c49a4dac..e8276ca55 100644 --- a/source/backend/cpu/CPUUnary.hpp +++ b/source/backend/cpu/CPUUnary.hpp @@ -9,7 +9,7 @@ #ifndef CPUUnary_hpp #define CPUUnary_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" #include "MNN_generated.h" namespace MNN { diff --git a/source/backend/cpu/CPUUnpack.cpp b/source/backend/cpu/CPUUnpack.cpp index e7bc0f1d0..4fdc4debb 100644 --- a/source/backend/cpu/CPUUnpack.cpp +++ b/source/backend/cpu/CPUUnpack.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUUnpack.hpp" -#include "CPUBackend.hpp" +#include "backend/cpu/CPUUnpack.hpp" +#include "backend/cpu/CPUBackend.hpp" namespace MNN { diff --git a/source/backend/cpu/CPUUnpack.hpp b/source/backend/cpu/CPUUnpack.hpp index 5988da66a..98e74f269 100644 --- a/source/backend/cpu/CPUUnpack.hpp +++ b/source/backend/cpu/CPUUnpack.hpp @@ -9,7 +9,7 @@ #ifndef CPUUnpack_hpp #define CPUUnpack_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { class CPUUnpack : public Execution { diff --git a/source/backend/cpu/CPUUnravelIndex.cpp b/source/backend/cpu/CPUUnravelIndex.cpp new file mode 100644 index 000000000..2229fc5f7 --- /dev/null +++ b/source/backend/cpu/CPUUnravelIndex.cpp @@ -0,0 +1,64 @@ +// +// CPUUnravelIndex.cpp +// MNN +// +// Created by MNN on 2019/11/26. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "backend/cpu/CPUUnravelIndex.hpp" +#include "backend/cpu/CPUBackend.hpp" + +namespace MNN { + +static inline void UnravelIndexHelper(std::vector& coordinate, const std::vector mod, int size, + int indice) { + int value = indice; + for (int i = 0; i < size; ++i) { + coordinate[i] = value / mod[i]; + value = value % mod[i]; + } +} + +ErrorCode CPUUnravelIndex::onExecute(const std::vector& inputs, const std::vector& outputs) { + auto indices = inputs[0]; + auto dims = inputs[1]; + + const int elmentSize = indices->elementSize(); + const int dimsSize = dims->length(0); + + const auto indicesPtr = indices->host(); + const auto dimsDataPtr = dims->host(); + std::vector mod(dimsSize); + for (int i = 0; i < dimsSize; ++i) { + int value = 1; + for (int j = i + 1; j < dimsSize; ++j) { + value *= dimsDataPtr[j]; + } + mod[i] = value; + } + + auto outputDataPtr = outputs[0]->host(); + + std::vector coordinate(dimsSize); + for (int i = 0; i < elmentSize; ++i) { + UnravelIndexHelper(coordinate, mod, dimsSize, indicesPtr[i]); + // assign value + for (int k = 0; k < dimsSize; ++k) { + outputDataPtr[i + k * elmentSize] = coordinate[k]; + } + } + return NO_ERROR; +} + +class CPUUnravelIndexCreator : public CPUBackend::Creator { +public: + virtual Execution* onCreate(const std::vector& inputs, const std::vector& outputs, + const MNN::Op* op, Backend* backend) const override { + return new CPUUnravelIndex(backend); + } +}; + +REGISTER_CPU_OP_CREATOR(CPUUnravelIndexCreator, OpType_UnravelIndex); + +} // namespace MNN diff --git a/source/backend/cpu/CPUUnravelIndex.hpp b/source/backend/cpu/CPUUnravelIndex.hpp new file mode 100644 index 000000000..9fc4810e1 --- /dev/null +++ b/source/backend/cpu/CPUUnravelIndex.hpp @@ -0,0 +1,26 @@ +// +// CPUUnravelIndex.hpp +// MNN +// +// Created by MNN on 2018/11/26. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef CPUUnravelIndex_hpp +#define CPUUnravelIndex_hpp + +#include "core/Execution.hpp" + +namespace MNN { + +class CPUUnravelIndex : public Execution{ +public: + CPUUnravelIndex(Backend *b):Execution(b){ + } + virtual ~CPUUnravelIndex() = default; + virtual ErrorCode onExecute(const std::vector &inputs, const std::vector &outputs) override; +}; + +} // namespace MNN + +#endif /* CPUUnravelIndex_hpp */ diff --git a/source/backend/cpu/CPUWhere.cpp b/source/backend/cpu/CPUWhere.cpp index 3d737566a..d2ff72de2 100644 --- a/source/backend/cpu/CPUWhere.cpp +++ b/source/backend/cpu/CPUWhere.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CPUWhere.hpp" -#include "CPUBackend.hpp" +#include "backend/cpu/CPUWhere.hpp" +#include "backend/cpu/CPUBackend.hpp" namespace MNN { diff --git a/source/backend/cpu/CPUWhere.hpp b/source/backend/cpu/CPUWhere.hpp index 3f70f85b1..d1cb178e1 100644 --- a/source/backend/cpu/CPUWhere.hpp +++ b/source/backend/cpu/CPUWhere.hpp @@ -9,7 +9,7 @@ #ifndef CPUWhere_hpp #define CPUWhere_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" namespace MNN { class CPUWhere : public Execution { diff --git a/source/backend/cpu/CPUZeroLike.cpp b/source/backend/cpu/CPUZeroLike.cpp index 39f1ad958..cd58d0a6f 100644 --- a/source/backend/cpu/CPUZeroLike.cpp +++ b/source/backend/cpu/CPUZeroLike.cpp @@ -6,7 +6,7 @@ // Copyright © 2018 Alibaba. All rights reserved. // -#include "CPUZeroLike.hpp" +#include "backend/cpu/CPUZeroLike.hpp" namespace MNN { ErrorCode CPUZeroLike::onExecute(const std::vector &inputs, const std::vector &outputs) { ::memset(outputs[0]->host(), 0, outputs[0]->size()); diff --git a/source/backend/cpu/CPUZeroLike.hpp b/source/backend/cpu/CPUZeroLike.hpp index 1755d1abf..b97517e41 100644 --- a/source/backend/cpu/CPUZeroLike.hpp +++ b/source/backend/cpu/CPUZeroLike.hpp @@ -9,7 +9,7 @@ #ifndef CPUZeroLike_hpp #define CPUZeroLike_hpp -#include "CPUBackend.hpp" +#include "backend/cpu/CPUBackend.hpp" namespace MNN { class CPUZeroLike : public Execution { public: diff --git a/source/backend/cpu/ThreadPool.cpp b/source/backend/cpu/ThreadPool.cpp index 10957a97e..5ac2b1a7d 100644 --- a/source/backend/cpu/ThreadPool.cpp +++ b/source/backend/cpu/ThreadPool.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // #ifdef MNN_USE_THREAD_POOL -#include "ThreadPool.hpp" +#include "backend/cpu/ThreadPool.hpp" #include -#include "MNNDefine.h" +#include #ifdef __ANDROID__ #include #include diff --git a/source/backend/cpu/ThreadPool.hpp b/source/backend/cpu/ThreadPool.hpp index fe2b18b03..c491338b5 100644 --- a/source/backend/cpu/ThreadPool.hpp +++ b/source/backend/cpu/ThreadPool.hpp @@ -15,9 +15,10 @@ #include #include #include +#include namespace MNN { -class ThreadPool { +class MNN_PUBLIC ThreadPool { public: typedef std::pair, int> TASK; diff --git a/source/backend/cpu/arm/CMakeLists.txt b/source/backend/cpu/arm/CMakeLists.txt new file mode 100644 index 000000000..b34e7fd74 --- /dev/null +++ b/source/backend/cpu/arm/CMakeLists.txt @@ -0,0 +1,22 @@ +IF(NOT DEFINED ARCHS) + set(ARCHS "") +ENDIF() +FILE(GLOB MNN_AArch32_SRC ${CMAKE_CURRENT_LIST_DIR}/arm32/*.s ${CMAKE_CURRENT_LIST_DIR}/arm32/*.S) +FILE(GLOB MNN_AArch64_SRC ${CMAKE_CURRENT_LIST_DIR}/arm64/*.s ${CMAKE_CURRENT_LIST_DIR}/arm64/*.S) +if(CMAKE_SYSTEM_PROCESSOR MATCHES "^armv7" OR ARCHS MATCHES "^armv7(;armv7s)?") + message(STATUS "Enabling AArch32 Assemblies") + add_library(MNNARM32 OBJECT ${MNN_AArch32_SRC}) + target_include_directories(MNNARM32 PRIVATE ${CMAKE_CURRENT_LIST_DIR}/) + list(APPEND MNN_OBJECTS_TO_LINK $) + list(APPEND MNN_TARGETS MNNARM32) +elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^aarch64" OR ARCHS STREQUAL "arm64") + message(STATUS "Enabling AArch64 Assemblies") + add_library(MNNARM64 OBJECT ${MNN_AArch64_SRC}) + target_include_directories(MNNARM64 PRIVATE ${CMAKE_CURRENT_LIST_DIR}/) + list(APPEND MNN_OBJECTS_TO_LINK $) + list(APPEND MNN_TARGETS MNNARM64) +else() +# Building fat binary requires multiple seperate builds and lipo-by-hand under CMake's design +endif() +SET(MNN_OBJECTS_TO_LINK "${MNN_OBJECTS_TO_LINK}" PARENT_SCOPE) +SET(MNN_TARGETS "${MNN_TARGETS}" PARENT_SCOPE) diff --git a/source/backend/cpu/arm/arm32/MNNReluWithSlope.S b/source/backend/cpu/arm/arm32/MNNReluWithSlope.S deleted file mode 100644 index 4ae1fb6e4..000000000 --- a/source/backend/cpu/arm/arm32/MNNReluWithSlope.S +++ /dev/null @@ -1,81 +0,0 @@ -// -// MNNReluWithSlope.S -// MNN -// -// Created by MNN on 2019/02/04. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#ifdef __arm__ -#ifndef __aarch64__ -#include "MNNAsmGlobal.h" - -.text -.align 5 - -asm_function MNNReluWithSlope -//void MNNReluWithSlope(float* dst, const float* src, size_t sizeQuad, float slope) - -//Auto Load: -//r0:dst, r1:src, r2:sizeQuad, r3:slope - -push {lr} - -vdup.f32 q15, r3 -vmov.i32 q14, #0 - -ReluL4: -cmp r2, #3 -ble ReluL1 - -ReluL4Loop: -vld1.32 {q0, q1}, [r1]! - -vmul.f32 q8, q15, q0 -vmul.f32 q9, q15, q1 -vcle.f32 q12, q0, q14 - -vld1.f32 {q2, q3}, [r1]! - -vcle.f32 q13, q1, q14 - -vbit.32 q0, q8, q12 -vbit.32 q1, q9, q13 -vmul.f32 q10, q2, q15 - -vst1.32 {q0, q1}, [r0]! - -vcle.f32 q12, q2, q14 -vcle.f32 q13, q3, q14 -vmul.f32 q11, q3, q15 -vbit.32 q2, q10, q12 -vbit.32 q3, q11, q13 - -vst1.32 {q2, q3}, [r0]! - -sub r2, r2, #4 -cmp r2, #4 -bge ReluL4Loop - -ReluL1: -cmp r2, #0 -beq ReluEnd - -ReluL1Loop: -vld1.32 {q0}, [r1]! -vcle.f32 q2, q0, q14 -vmul.f32 q1, q0, q15 -vbit.32 q0, q1, q2 -vst1.32 {q0}, [r0]! -subs r2, r2, #1 -bne ReluL1Loop - - -ReluEnd: - - -pop {pc} - - -#endif -#endif diff --git a/source/backend/cpu/arm/arm64/MNNReluWithSlope.S b/source/backend/cpu/arm/arm64/MNNReluWithSlope.S deleted file mode 100644 index ab3d1d95a..000000000 --- a/source/backend/cpu/arm/arm64/MNNReluWithSlope.S +++ /dev/null @@ -1,75 +0,0 @@ -// -// MNNReluWithSlope.S -// MNN -// -// Created by MNN on 2019/02/04. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#ifdef __aarch64__ -#include "MNNAsmGlobal.h" - -.text -.align 5 - -asm_function MNNReluWithSlope -//void MNNReluWithSlope(float* dst, const float* src, size_t sizeQuad, float slope) - -//Auto Load: -//x0:dst, x1:src, x2:sizeQuad, s0:slope - -dup v23.4s, v0.s[0] - -ReluL4: -cmp x2, #3 -ble ReluL1 - -ReluL4Loop: -ld1 {v0.4s, v1.4s}, [x1], #32 - -fmul v16.4s, v23.4s, v0.4s -fmul v17.4s, v23.4s, v1.4s -fcmle v20.4s, v0.4s, #0 - -ld1 {v2.4s, v3.4s}, [x1], #32 - -fcmle v21.4s, v1.4s, #0 - -bit v0.16b, v16.16b, v20.16b -bit v1.16b, v17.16b, v21.16b -fmul v18.4s, v2.4s, v23.4s - -st1 {v0.4s, v1.4s}, [x0], #32 - -fcmle v20.4s, v2.4s, #0 -fcmle v21.4s, v3.4s, #0 -fmul v19.4s, v3.4s, v23.4s -bit v2.16b, v18.16b, v20.16b -bit v3.16b, v19.16b, v21.16b - -st1 {v2.4s, v3.4s}, [x0], #32 - -sub x2, x2, #4 -cmp x2, #4 -bge ReluL4Loop - -ReluL1: -cmp x2, #0 -beq ReluEnd - -ReluL1Loop: -ld1 {v0.4s}, [x1], #16 -fcmle v2.4s, v0.4s, #0 -fmul v1.4s, v0.4s, v23.4s -bit v0.16b, v1.16b, v2.16b -st1 {v0.4s}, [x0], #16 -subs x2, x2, #1 -bne ReluL1Loop - - -ReluEnd: - - -ret - -#endif diff --git a/source/backend/cpu/compute/CommonOptFunction.cpp b/source/backend/cpu/compute/CommonOptFunction.cpp index ac932c79f..ce975be73 100644 --- a/source/backend/cpu/compute/CommonOptFunction.cpp +++ b/source/backend/cpu/compute/CommonOptFunction.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CommonOptFunction.h" +#include "backend/cpu/compute/CommonOptFunction.h" #include #include -#include "Macro.h" +#include "core/Macro.h" #include #ifdef MNN_USE_NEON #include @@ -136,18 +136,6 @@ void MNNScaleAndAddBias(float* dst, const float* src, const float* bias, const f } } -void MNNReluWithSlope(float* dst, const float* src, size_t sizeQuad, float slope) { - int i; - size_t size = sizeQuad * 4; - for (i = 0; i < size; ++i) { - if (src[i] < 0) { - dst[i] = src[i] * slope; - } else { - dst[i] = src[i]; - } - } -} - void MNNPackC4(float* dst, const float* src, size_t area, size_t depth) { int z, x; int cur = 0; @@ -648,3 +636,11 @@ void MNNTanh(float* dst, const float* src, size_t dataSize) { dst[i] = tanhf_poly(src[i]); } } + +void MNNReluWithSlope(float* dst, const float* src, size_t sizeQuad, float slope) { + float slopeValue[4]; + for (int i=0; i<4; ++i) { + slopeValue[i] = slope; + } + MNNReluWithSlopeChannel(dst, src, slopeValue, sizeQuad, 1); +} diff --git a/source/backend/cpu/compute/ConvOpt.cpp b/source/backend/cpu/compute/ConvOpt.cpp index 823164a45..da9b70710 100644 --- a/source/backend/cpu/compute/ConvOpt.cpp +++ b/source/backend/cpu/compute/ConvOpt.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "ConvOpt.h" +#include "backend/cpu/compute/ConvOpt.h" #include -#include "Macro.h" -#include "Vec4.hpp" +#include "core/Macro.h" +#include "math/Vec4.hpp" using namespace MNN::Math; #ifndef MNN_USE_NEON #ifndef MNN_USE_SSE @@ -296,7 +296,7 @@ void MNNDeconvRunForLineDepthwise(const float* dst, float* src, const float* wei } void MNNMatrixProdCommon(float* C, const float* A, const float* B, size_t width, size_t cStride, size_t aStride, size_t bStride, size_t height) { - int widthC4 = width / 4; + int widthC4 = (int)width / 4; if (widthC4 > 0) { MNNMatrixProd(C, A, B, widthC4, cStride, aStride, bStride, height); width = width - 4*widthC4; @@ -317,7 +317,7 @@ void MNNMatrixProdCommon(float* C, const float* A, const float* B, size_t width, } void MNNMatrixAddCommon(float* C, const float* A, const float* B, size_t width, size_t cStride, size_t aStride, size_t bStride, size_t height) { - int widthC4 = width / 4; + int widthC4 = (int)width / 4; if (widthC4 > 0) { MNNMatrixAdd(C, A, B, widthC4, cStride, aStride, bStride, height); width = width - 4*widthC4; @@ -338,7 +338,7 @@ void MNNMatrixAddCommon(float* C, const float* A, const float* B, size_t width, } void MNNMatrixSubCommon(float* C, const float* A, const float* B, size_t width, size_t cStride, size_t aStride, size_t bStride, size_t height) { - int widthC4 = width / 4; + int widthC4 = (int)width / 4; if (widthC4 > 0) { MNNMatrixSub(C, A, B, widthC4, cStride, aStride, bStride, height); width = width - 4*widthC4; @@ -359,7 +359,7 @@ void MNNMatrixSubCommon(float* C, const float* A, const float* B, size_t width, } void MNNMatrixMaxCommon(float* C, const float* A, const float* B, size_t width, size_t cStride, size_t aStride, size_t bStride, size_t height) { - int widthC4 = width / 4; + int widthC4 = (int)width / 4; if (widthC4 > 0) { MNNMatrixMax(C, A, B, widthC4, cStride, aStride, bStride, height); width = width - 4*widthC4; diff --git a/source/backend/cpu/compute/Convolution1x1Strassen.cpp b/source/backend/cpu/compute/Convolution1x1Strassen.cpp index d498795a3..c54aa4c8f 100644 --- a/source/backend/cpu/compute/Convolution1x1Strassen.cpp +++ b/source/backend/cpu/compute/Convolution1x1Strassen.cpp @@ -6,14 +6,14 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Convolution1x1Strassen.hpp" +#include "backend/cpu/compute/Convolution1x1Strassen.hpp" #include -#include "BufferAllocator.hpp" -#include "CPUBackend.hpp" -#include "CommonOptFunction.h" -#include "Concurrency.h" -#include "ConvOpt.h" -#include "Macro.h" +#include "core/BufferAllocator.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Concurrency.h" +#include "backend/cpu/compute/ConvOpt.h" +#include "core/Macro.h" namespace MNN { Convolution1x1Strassen::Convolution1x1Strassen(const Convolution2DCommon *common, Backend *b, const float *originWeight, size_t originWeightSize, const float *bias, size_t biasSize) diff --git a/source/backend/cpu/compute/Convolution1x1Strassen.hpp b/source/backend/cpu/compute/Convolution1x1Strassen.hpp index d2f484244..c7b7cea1b 100644 --- a/source/backend/cpu/compute/Convolution1x1Strassen.hpp +++ b/source/backend/cpu/compute/Convolution1x1Strassen.hpp @@ -10,8 +10,8 @@ #define Convolution1x1Strassen_hpp #include -#include "../CPUConvolution.hpp" -#include "StrassenMatmulComputor.hpp" +#include "backend/cpu/CPUConvolution.hpp" +#include "backend/cpu/compute/StrassenMatmulComputor.hpp" namespace MNN { class Convolution1x1Strassen : public CPUConvolution { public: diff --git a/source/backend/cpu/compute/Convolution3D3x3.cpp b/source/backend/cpu/compute/Convolution3D3x3.cpp index d9372f5e0..e48cd8add 100644 --- a/source/backend/cpu/compute/Convolution3D3x3.cpp +++ b/source/backend/cpu/compute/Convolution3D3x3.cpp @@ -6,15 +6,15 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Convolution3x3.hpp" -#include "Convolution3D3x3.hpp" -#include "AutoTime.hpp" -#include "CommonOptFunction.h" -#include "Concurrency.h" -#include "ConvOpt.h" -#include "Macro.h" -#include "TensorUtils.hpp" -#include "Vec4.hpp" +#include "backend/cpu/compute/Convolution3x3.hpp" +#include "backend/cpu/compute/Convolution3D3x3.hpp" +#include +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Concurrency.h" +#include "backend/cpu/compute/ConvOpt.h" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" +#include "math/Vec4.hpp" using namespace MNN::Math; typedef Vec4 float4; @@ -34,7 +34,7 @@ Convolution3D3x3::Convolution3D3x3(const Convolution3DCommon* convOp, Backend *b } mKernelDepth = (*(convOp->kernels()))[0]; mPostFunction = CPUConvolution3D::getPostFunction(convOp); - + int inputChannel = convOp->inputCount(); int outputChannel = convOp->outputCount(); @@ -45,10 +45,10 @@ Convolution3D3x3::Convolution3D3x3(const Convolution3DCommon* convOp, Backend *b if (!valid) { return; } - + memset(mBias->host(), 0, mBias->size()); memcpy(mBias->host(), bias, biasSize * sizeof(float)); - + if (inputChannel % 4 != 0 || outputChannel % 4 != 0) { memset(mWeight->host(), 0, mWeight->size()); } @@ -74,7 +74,7 @@ ErrorCode Convolution3D3x3::onResize(const std::vector& inputs, const s const int oc = output->length(1), od = output->length(2); const int ic = input->length(1), id = input->length(2); const int threadNumber = ((CPUBackend*)backend())->threadNumber(); - + if (mPadMode == PadMode_SAME) { mPads.clear(); auto kernels = std::vector({mKernelDepth, 3, 3}); @@ -83,11 +83,11 @@ ErrorCode Convolution3D3x3::onResize(const std::vector& inputs, const s mPads.push_back((inputNeeded - input->length(i + 2)) / 2); } } - + mSourceBuffer.reset(Tensor::createDevice({threadNumber, id, BLOCK_UNIT2, UP_DIV(ic, 4), CONVOLUTION_TILED_NUMBER, 4})); mDestBuffer.reset(Tensor::createDevice({threadNumber, od + 1, BLOCK_UNIT2, UP_DIV(oc, 4), CONVOLUTION_TILED_NUMBER, 4})); mTempBuffer.reset(Tensor::createDevice({threadNumber, BLOCK_UNIT2, 4})); - + bool succ = backend()->onAcquireBuffer(mSourceBuffer.get(), Backend::DYNAMIC); succ = succ && backend()->onAcquireBuffer(mDestBuffer.get(), Backend::DYNAMIC); succ = succ && backend()->onAcquireBuffer(mTempBuffer.get(), Backend::DYNAMIC); @@ -121,20 +121,20 @@ ErrorCode Convolution3D3x3::onExecute(const std::vector& inputs, const for (int xi = 0; xi < xC; ++xi) { auto index = xIndex + xi; auto dstUnit = _srcOrigin + 4 * xi; - + int wIndex = index % wUnit; int hIndex = index / wUnit; - + int srcX = wIndex * 2 - padWidth; int srcY = hIndex * 2 - padHeight; int sy = ALIMAX(0, srcY) - srcY; int ey = ALIMIN(srcY + 4, inputHeight) - srcY; int sx = ALIMAX(0, srcX) - srcX; int ex = ALIMIN(srcX + 4, inputWidth) - srcX; - + auto srcStart = srcOrigin + (srcX + srcY * inputWidth) * 4; memset(dstBlock, 0, SOURCE_BLOCK * sizeof(float)); - + for (int z = 0; z < ic_4; ++z) { auto dstStart = dstUnit + z * 4 * xC; auto src_z = srcStart + z * 4 * inputWidth * inputHeight * inputDepth; @@ -153,20 +153,20 @@ ErrorCode Convolution3D3x3::onExecute(const std::vector& inputs, const } } }; - + auto destTransformFunc = [=](int xIndex, int xC, const float* srcOrigin, float* dstOrigin, float* dstBlock) { for (int xi = 0; xi < xC; ++xi) { auto index = xIndex + xi; auto srcUnit = srcOrigin + 4 * xi; - + int wIndex = index % wUnit; int hIndex = index / wUnit; - + int dstX = wIndex * 2; int dstY = hIndex * 2; - + auto dstStart = dstOrigin + 4 * (dstX + dstY * outputWidth); - + for (int od = 0; od < outputDepth; ++od) { auto _srcUnit = srcUnit + od * BLOCK_UNIT2 * dc_4 * xC * 4; auto _dstStart = dstStart + od * outputHeight * outputWidth * 4; @@ -174,7 +174,7 @@ ErrorCode Convolution3D3x3::onExecute(const std::vector& inputs, const auto srcZ = _srcUnit + z * xC * 4; auto dstZ = _dstStart + z * outputDepth * outputWidth * outputHeight * 4; Convolution3x3::destTransform(srcZ, dstBlock, dc_4 * 4 * xC); - + Vec4::save(dstZ, Vec4::load(dstBlock)); if (wIndex * 2 + 1 < outputWidth) { Vec4::save(dstZ + 4, Vec4::load(dstBlock + 4)); @@ -189,7 +189,7 @@ ErrorCode Convolution3D3x3::onExecute(const std::vector& inputs, const } } }; - + auto gemmFunc = [=](int xC, int start, int end, const float* srcOrigin, const float* weight, float* dstOrigin) { float* tempDst = dstOrigin + outputDepth * BLOCK_UNIT2 * dc_4 * xC * 4; const int element = (end - start) * dc_4 * xC * 4, offset = start * dc_4 * xC * 4; @@ -218,7 +218,7 @@ ErrorCode Convolution3D3x3::onExecute(const std::vector& inputs, const } } }; - + auto gemmConcurrencyFunc = [=, &gemmFunc](int xC, const float* _srcOrigin, const float* weight, float* _dstOrigin) { MNN_CONCURRENCY_BEGIN(tId, threadNumber) { const int step = UP_DIV(BLOCK_UNIT2, threadNumber); @@ -226,7 +226,7 @@ ErrorCode Convolution3D3x3::onExecute(const std::vector& inputs, const } MNN_CONCURRENCY_END() }; - + auto tFunction = [&](const int tId, const int tileStart, const int tileStep, const int tileEnd, const float* srcOrigin, float* dstOrigin) { auto _srcOrigin = mSourceBuffer->host() + tId * mSourceBuffer->stride(0); auto _dstOrigin = mDestBuffer->host() + tId * mDestBuffer->stride(0); @@ -235,15 +235,15 @@ ErrorCode Convolution3D3x3::onExecute(const std::vector& inputs, const int xIndex = (int)tIndex * CONVOLUTION_TILED_NUMBER; int xReamin = totalCount - xIndex; int xC = xReamin > CONVOLUTION_TILED_NUMBER ? CONVOLUTION_TILED_NUMBER : xReamin; - + sourceTransformFunc(xIndex, xC, srcOrigin, _srcOrigin, dstBlock); - + if (threadNumber != tileStep) { gemmConcurrencyFunc(xC, _srcOrigin, mWeight->host(), _dstOrigin); } else { gemmFunc(xC, 0, BLOCK_UNIT2, _srcOrigin, mWeight->host(), _dstOrigin); } - + destTransformFunc(xIndex, xC, _dstOrigin, dstOrigin, dstBlock); } }; @@ -251,18 +251,18 @@ ErrorCode Convolution3D3x3::onExecute(const std::vector& inputs, const for (int batchIndex = 0; batchIndex < input->batch(); ++batchIndex) { auto srcOrigin = input->host() + batchIndex * input->stride(0); auto dstOrigin = output->host() + batchIndex * output->stride(0); - + if (tileCount >= threadNumber) { MNN_CONCURRENCY_BEGIN(tId, threadNumber) { tFunction((int)tId, (int)tId, threadNumber, tileCount / threadNumber * threadNumber, srcOrigin, dstOrigin); } MNN_CONCURRENCY_END(); } - + if (tileCount % threadNumber != 0) { tFunction(0, tileCount / threadNumber * threadNumber, 1, tileCount, srcOrigin, dstOrigin); } - + MNN_CONCURRENCY_BEGIN(tId, threadNumber) { int channelStep = UP_DIV(dc_4, threadNumber); int channelStart = channelStep * tId, channelNum = ALIMIN(channelStep * (tId + 1), dc_4) - channelStart; diff --git a/source/backend/cpu/compute/Convolution3D3x3.hpp b/source/backend/cpu/compute/Convolution3D3x3.hpp index 639390733..a68fb5847 100644 --- a/source/backend/cpu/compute/Convolution3D3x3.hpp +++ b/source/backend/cpu/compute/Convolution3D3x3.hpp @@ -9,7 +9,7 @@ #ifndef Convolution3D3x3_hpp #define Convolution3D3x3_hpp -#include "CPUConvolution3D.hpp" +#include "backend/cpu/CPUConvolution3D.hpp" namespace MNN { class Convolution3D3x3 : public Execution { diff --git a/source/backend/cpu/compute/Convolution3x3.cpp b/source/backend/cpu/compute/Convolution3x3.cpp index ef1eb34bb..4b12d48fe 100644 --- a/source/backend/cpu/compute/Convolution3x3.cpp +++ b/source/backend/cpu/compute/Convolution3x3.cpp @@ -6,14 +6,14 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Convolution3x3.hpp" -#include "AutoTime.hpp" -#include "CommonOptFunction.h" -#include "Concurrency.h" -#include "ConvOpt.h" -#include "Macro.h" -#include "TensorUtils.hpp" -#include "Vec4.hpp" +#include "backend/cpu/compute/Convolution3x3.hpp" +#include +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Concurrency.h" +#include "backend/cpu/compute/ConvOpt.h" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" +#include "math/Vec4.hpp" using namespace MNN::Math; typedef Vec4 float4; @@ -253,29 +253,29 @@ ErrorCode Convolution3x3::onExecute(const std::vector& inputs, const st const int threadNumber = ((CPUBackend*)backend())->threadNumber(); auto postFunction = mPostFunction; - + auto sourceTransformFunc = [=](int xIndex, int xC, const float* srcOrigin, float* dstOrigin, float* dstBlock) { // Source Transform for (int xi = 0; xi < xC; ++xi) { auto index = xIndex + xi; auto dstUnit = dstOrigin + 4 * xi; - + int wIndex = index % wUnit; int hIndex = index / wUnit; - + int srcX = wIndex * 2 - padX; int srcY = hIndex * 2 - padY; int sy = ALIMAX(0, srcY) - srcY; int ey = ALIMIN(srcY + 4, ih) - srcY; int sx = ALIMAX(0, srcX) - srcX; int ex = ALIMIN(srcX + 4, iw) - srcX; - + auto srcStart = srcOrigin + (srcX + srcY * iw) * 4; - + memset(dstBlock, 0, SOURCE_BLOCK * sizeof(float)); for (int z = 0; z < ic_4; ++z) { auto _dstStart = dstUnit + z * 4 * xC; - + auto src_z = srcStart + z * 4 * iw * ih; if (ex > sx) { // Extract One Block @@ -290,26 +290,26 @@ ErrorCode Convolution3x3::onExecute(const std::vector& inputs, const st } } }; - + auto destTransformFunc = [=](int xIndex, int xC, const float* srcOrigin, float* dstOrigin, float* dstBlock) { // Dest Transform for (int xi = 0; xi < xC; ++xi) { auto index = xIndex + xi; auto srcUnit = srcOrigin + 4 * xi; - + int wIndex = index % wUnit; int hIndex = index / wUnit; - + int dstX = wIndex * 2; int dstY = hIndex * 2; - + auto dstStart = dstOrigin + 4 * (dstX + dstY * ow); - + for (int z = 0; z < dc_4; ++z) { auto srcZ = srcUnit + z * xC * 4; auto dstZ = dstStart + z * ow * oh * 4; destTransform(srcZ, dstBlock, dc_4 * 4 * xC); - + Vec4::save(dstZ, Vec4::load(dstBlock)); if (wIndex * 2 + 1 < ow) { Vec4::save(dstZ + 4, Vec4::load(dstBlock + 4)); @@ -323,7 +323,7 @@ ErrorCode Convolution3x3::onExecute(const std::vector& inputs, const st } } }; - + auto gemmFunc = [=](int xC, int start, int end, const float* srcOrigin, const float* weight, float* dstOrigin) { // Multi if (xC == CONVOLUTION_TILED_NUMBER) { @@ -338,7 +338,7 @@ ErrorCode Convolution3x3::onExecute(const std::vector& inputs, const st } } }; - + auto gemmConcurrencyFunc = [=, &gemmFunc](int xC, const float* srcOrigin, const float* weight, float* dstOrigin) { MNN_CONCURRENCY_BEGIN(tId, threadNumber) { const int step = UP_DIV(BLOCK_UNIT2, threadNumber); @@ -346,7 +346,7 @@ ErrorCode Convolution3x3::onExecute(const std::vector& inputs, const st } MNN_CONCURRENCY_END() }; - + auto tFunction = [&](const int tId, const int tileStart, const int tileStep, const int tileEnd, const float* srcOrigin, float* dstOrigin) { auto _srcOrigin = mTempBuffer.host() + tId * mTempBuffer.buffer().dim[0].stride; for (int tIndex = tileStart; tIndex < tileEnd; tIndex += tileStep) { @@ -355,15 +355,15 @@ ErrorCode Convolution3x3::onExecute(const std::vector& inputs, const st int xC = xReamin > CONVOLUTION_TILED_NUMBER ? CONVOLUTION_TILED_NUMBER : xReamin; auto _dstOrigin = _srcOrigin + xC * SOURCE_BLOCK * ic_4; auto dstBlock = _srcOrigin + xC * SOURCE_BLOCK * (ic_4 + dc_4); - + sourceTransformFunc(xIndex, xC, srcOrigin, _srcOrigin, dstBlock); - + if (threadNumber != tileStep) { gemmConcurrencyFunc(xC, _srcOrigin, mWeight->host(), _dstOrigin); } else { gemmFunc(xC, 0, BLOCK_UNIT2, _srcOrigin, mWeight->host(), _dstOrigin); } - + destTransformFunc(xIndex, xC, _dstOrigin, dstOrigin, dstBlock); } }; @@ -371,18 +371,18 @@ ErrorCode Convolution3x3::onExecute(const std::vector& inputs, const st for (int batchIndex = 0; batchIndex < input->batch(); ++batchIndex) { auto srcOrigin = input->host() + iw * ih * ic_4 * 4 * batchIndex; auto dstOrigin = output->host() + ow * oh * dc_4 * 4 * batchIndex; - + if (tileCount >= threadNumber) { MNN_CONCURRENCY_BEGIN(tId, threadNumber) { tFunction((int)tId, (int)tId, threadNumber, tileCount / threadNumber * threadNumber, srcOrigin, dstOrigin); } MNN_CONCURRENCY_END(); } - + if (tileCount % threadNumber != 0) { tFunction(0, tileCount / threadNumber * threadNumber, 1, tileCount, srcOrigin, dstOrigin); } - + MNN_CONCURRENCY_BEGIN(tId, threadNumber) { int channelStep = UP_DIV(dc_4, threadNumber); int channelStart = channelStep * tId, channelNum = ALIMIN(channelStep * (tId + 1), dc_4) - channelStart; diff --git a/source/backend/cpu/compute/Convolution3x3.hpp b/source/backend/cpu/compute/Convolution3x3.hpp index a10652c0d..bde305a59 100644 --- a/source/backend/cpu/compute/Convolution3x3.hpp +++ b/source/backend/cpu/compute/Convolution3x3.hpp @@ -9,8 +9,8 @@ #ifndef Convolution3x3_hpp #define Convolution3x3_hpp -#include "CPUConvolution.hpp" -#include "ConvolutionFloatFactory.h" +#include "backend/cpu/CPUConvolution.hpp" +#include "backend/cpu/compute/ConvolutionFloatFactory.h" namespace MNN { class Convolution3x3 : public CPUConvolution { @@ -30,8 +30,6 @@ class Convolution3x3 : public CPUConvolution { std::shared_ptr mBias; Tensor mTempBuffer; - bool mInsideThread = false; - bool mOutsideThread = true; }; } // namespace MNN #endif /* Convolution3x3_hpp */ diff --git a/source/backend/cpu/compute/ConvolutionDepthwise3x3.cpp b/source/backend/cpu/compute/ConvolutionDepthwise3x3.cpp index 863ec209c..90280b0c3 100644 --- a/source/backend/cpu/compute/ConvolutionDepthwise3x3.cpp +++ b/source/backend/cpu/compute/ConvolutionDepthwise3x3.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "ConvolutionDepthwise3x3.hpp" -#include "CPUBackend.hpp" -#include "Concurrency.h" -#include "Macro.h" -#include "Vec4.hpp" +#include "backend/cpu/compute/ConvolutionDepthwise3x3.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "core/Concurrency.h" +#include "core/Macro.h" +#include "math/Vec4.hpp" using namespace MNN::Math; extern "C" { diff --git a/source/backend/cpu/compute/ConvolutionDepthwise3x3.hpp b/source/backend/cpu/compute/ConvolutionDepthwise3x3.hpp index 9fbbab161..e6630e8ec 100644 --- a/source/backend/cpu/compute/ConvolutionDepthwise3x3.hpp +++ b/source/backend/cpu/compute/ConvolutionDepthwise3x3.hpp @@ -9,7 +9,7 @@ #ifndef ConvolutionDepthwise3x3_hpp #define ConvolutionDepthwise3x3_hpp -#include "CPUConvolution.hpp" +#include "backend/cpu/CPUConvolution.hpp" namespace MNN { class ConvolutionDepthwise3x3 : public CPUConvolution { diff --git a/source/backend/cpu/compute/ConvolutionFloatFactory.cpp b/source/backend/cpu/compute/ConvolutionFloatFactory.cpp index 4bc34e895..8352cb2bc 100644 --- a/source/backend/cpu/compute/ConvolutionFloatFactory.cpp +++ b/source/backend/cpu/compute/ConvolutionFloatFactory.cpp @@ -6,16 +6,16 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "ConvolutionFloatFactory.h" -#include "CPUConvolutionDepthwise.hpp" -#include "ConvOpt.h" -#include "Convolution1x1Strassen.hpp" -#include "Convolution3x3.hpp" -#include "ConvolutionGroup.hpp" -#include "ConvolutionIntFactory.hpp" -#include "ConvolutionTiledExecutor.hpp" -#include "ConvolutionWinograd.hpp" -#include "Macro.h" +#include "backend/cpu/compute/ConvolutionFloatFactory.h" +#include "backend/cpu/CPUConvolutionDepthwise.hpp" +#include "backend/cpu/compute/ConvOpt.h" +#include "backend/cpu/compute/Convolution1x1Strassen.hpp" +#include "backend/cpu/compute/Convolution3x3.hpp" +#include "backend/cpu/compute/ConvolutionGroup.hpp" +#include "backend/cpu/compute/ConvolutionIntFactory.hpp" +#include "backend/cpu/compute/ConvolutionTiledExecutor.hpp" +#include "backend/cpu/compute/ConvolutionWinograd.hpp" +#include "core/Macro.h" namespace MNN { static Execution* _createUnit(const Tensor* input, const Tensor* output, Backend* backend, @@ -37,7 +37,7 @@ static Execution* _createUnit(const Tensor* input, const Tensor* output, Backend if (unit <= 1) { return new ConvolutionTiledExecutor(common, backend, originWeight, originWeightSize, bias, biasSize); } -#if defined(MNN_BUILD_FOR_ANDROID) or defined(__APPLE__) +#if defined(MNN_BUILD_FOR_ANDROID) || defined(__APPLE__) // MNN_PRINT("ic=%d, channel=%d, kx=%d, unit=%d\n", input->channel(), output->channel(), common->kernelX(), unit); if (common->kernelY() == 3 && common->kernelX() == 3 && unit <= 4) { return new Convolution3x3(common, backend, originWeight, originWeightSize, bias, biasSize); @@ -50,7 +50,7 @@ static Execution* _createUnit(const Tensor* input, const Tensor* output, Backend Execution* ConvolutionFloatFactory::create(const std::vector& inputs, const std::vector& outputs, const MNN::Op* op, Backend* backend) { auto conv2d = op->main_as_Convolution2D(); - if (inputs.size() == 3) { + if (inputs.size() > 1) { // Use Input Weight and Bias return new ConvolutionTiledExecutorMultiInput(conv2d->common(), backend); } diff --git a/source/backend/cpu/compute/ConvolutionFloatFactory.h b/source/backend/cpu/compute/ConvolutionFloatFactory.h index 925694902..c28601e94 100644 --- a/source/backend/cpu/compute/ConvolutionFloatFactory.h +++ b/source/backend/cpu/compute/ConvolutionFloatFactory.h @@ -9,7 +9,7 @@ #ifndef ConvolutionFloatFactory_h #define ConvolutionFloatFactory_h -#include "../CPUBackend.hpp" +#include "backend/cpu/CPUBackend.hpp" namespace MNN { class ConvolutionFloatFactory { diff --git a/source/backend/cpu/compute/ConvolutionGroup.cpp b/source/backend/cpu/compute/ConvolutionGroup.cpp index 23658923b..6bcd6961c 100644 --- a/source/backend/cpu/compute/ConvolutionGroup.cpp +++ b/source/backend/cpu/compute/ConvolutionGroup.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "ConvolutionGroup.hpp" -#include "CommonOptFunction.h" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/cpu/compute/ConvolutionGroup.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { ConvolutionGroup::ConvolutionGroup(Backend *b, const std::vector> &subConvolution) @@ -39,6 +39,7 @@ ErrorCode ConvolutionGroup::onResize(const std::vector &inputs, const mInputUnit->buffer().dim[1].extent = ib.dim[1].extent / mSubConvolution.size(); mInputUnit->buffer().dim[0].extent = 1; TensorUtils::getDescribe(mInputUnit.get())->dimensionFormat = MNN_DATA_FORMAT_NC4HW4; + TensorUtils::setLinearLayout(mInputUnit.get()); ::memcpy(mOutputRaw->buffer().dim, ob.dim, ob.dimensions * sizeof(halide_dimension_t)); mOutputRaw->buffer().dimensions = ob.dimensions; @@ -49,6 +50,7 @@ ErrorCode ConvolutionGroup::onResize(const std::vector &inputs, const mOutputUnit->buffer().dim[1].extent = ob.dim[1].extent / mSubConvolution.size(); mOutputUnit->buffer().dim[0].extent = 1; TensorUtils::getDescribe(mOutputUnit.get())->dimensionFormat = MNN_DATA_FORMAT_NC4HW4; + TensorUtils::setLinearLayout(mOutputUnit.get()); backend()->onAcquireBuffer(mOutputUnit.get(), Backend::DYNAMIC); backend()->onAcquireBuffer(mInputUnit.get(), Backend::DYNAMIC); diff --git a/source/backend/cpu/compute/ConvolutionGroup.hpp b/source/backend/cpu/compute/ConvolutionGroup.hpp index 32699334c..c1b49c279 100644 --- a/source/backend/cpu/compute/ConvolutionGroup.hpp +++ b/source/backend/cpu/compute/ConvolutionGroup.hpp @@ -9,7 +9,7 @@ #ifndef ConvolutionGroupInt8_hpp #define ConvolutionGroupInt8_hpp -#include "ConvolutionIntFactory.hpp" +#include "backend/cpu/compute/ConvolutionIntFactory.hpp" namespace MNN { class ConvolutionGroup : public Execution { diff --git a/source/backend/cpu/compute/ConvolutionInt8Executor.cpp b/source/backend/cpu/compute/ConvolutionInt8Executor.cpp index c6fe56b55..35c853167 100644 --- a/source/backend/cpu/compute/ConvolutionInt8Executor.cpp +++ b/source/backend/cpu/compute/ConvolutionInt8Executor.cpp @@ -6,16 +6,16 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "ConvolutionInt8Executor.hpp" -#include "CommonOptFunction.h" -#include "Concurrency.h" -#include "ConvOpt.h" -#include "ConvolutionIntFactory.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" -#include "Int8FunctionsOpt.h" +#include "backend/cpu/compute/ConvolutionInt8Executor.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Concurrency.h" +#include "backend/cpu/compute/ConvOpt.h" +#include "backend/cpu/compute/ConvolutionIntFactory.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" +#include "backend/cpu/compute/Int8FunctionsOpt.h" #define MNN_OPEN_TIME_TRACE -#include "AutoTime.hpp" +#include #ifdef MNN_USE_NEON #include diff --git a/source/backend/cpu/compute/ConvolutionInt8Executor.hpp b/source/backend/cpu/compute/ConvolutionInt8Executor.hpp index 44327b6fb..3ebbe75e9 100644 --- a/source/backend/cpu/compute/ConvolutionInt8Executor.hpp +++ b/source/backend/cpu/compute/ConvolutionInt8Executor.hpp @@ -10,10 +10,10 @@ #define ConvolutionInt8Executor_hpp #include -#include "AutoStorage.h" -#include "ConvolutionFloatFactory.h" -#include "ConvolutionIntFactory.hpp" -#include "../CPUConvolution.hpp" +#include "core/AutoStorage.h" +#include "backend/cpu/compute/ConvolutionFloatFactory.h" +#include "backend/cpu/compute/ConvolutionIntFactory.hpp" +#include "backend/cpu/CPUConvolution.hpp" namespace MNN { class ConvolutionInt8Executor : public CPUConvolution { diff --git a/source/backend/cpu/compute/ConvolutionIntFactory.cpp b/source/backend/cpu/compute/ConvolutionIntFactory.cpp index 70ea3e783..e44001946 100644 --- a/source/backend/cpu/compute/ConvolutionIntFactory.cpp +++ b/source/backend/cpu/compute/ConvolutionIntFactory.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "ConvolutionIntFactory.hpp" +#include "backend/cpu/compute/ConvolutionIntFactory.hpp" #include -#include "ConvolutionGroup.hpp" -#include "ConvolutionInt8Executor.hpp" +#include "backend/cpu/compute/ConvolutionGroup.hpp" +#include "backend/cpu/compute/ConvolutionInt8Executor.hpp" #include "half.hpp" namespace MNN { diff --git a/source/backend/cpu/compute/ConvolutionIntFactory.hpp b/source/backend/cpu/compute/ConvolutionIntFactory.hpp index 92465c3b3..dce55de6d 100644 --- a/source/backend/cpu/compute/ConvolutionIntFactory.hpp +++ b/source/backend/cpu/compute/ConvolutionIntFactory.hpp @@ -11,8 +11,8 @@ #include #include -#include "../CPUBackend.hpp" -#include "AutoStorage.h" +#include "backend/cpu/CPUBackend.hpp" +#include "core/AutoStorage.h" #include "MNN_generated.h" namespace MNN { diff --git a/source/backend/cpu/compute/ConvolutionTiledExecutor.cpp b/source/backend/cpu/compute/ConvolutionTiledExecutor.cpp index f847caebd..78fd6ee2d 100644 --- a/source/backend/cpu/compute/ConvolutionTiledExecutor.cpp +++ b/source/backend/cpu/compute/ConvolutionTiledExecutor.cpp @@ -6,15 +6,15 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "ConvolutionTiledExecutor.hpp" -#include "AutoTime.hpp" -#include "CPUBackend.hpp" -#include "CommonOptFunction.h" -#include "Concurrency.h" -#include "ConvOpt.h" -#include "Macro.h" -#include "TensorUtils.hpp" -#include "Vec4.hpp" +#include "backend/cpu/compute/ConvolutionTiledExecutor.hpp" +#include +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Concurrency.h" +#include "backend/cpu/compute/ConvOpt.h" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" +#include "math/Vec4.hpp" namespace MNN { ErrorCode ConvolutionTiledExecutorMultiInput::onExecute(const std::vector& inputs, @@ -24,7 +24,9 @@ ErrorCode ConvolutionTiledExecutorMultiInput::onExecute(const std::vectorhost(), 0, mTempWeight->size()); if (nullptr != mTempBias) { ::memset(mTempBias->host(), 0, mTempBias->size()); - ::memcpy(mTempBias->host(), inputs[2]->host(), inputs[2]->size()); + if (inputs.size() > 2) { + ::memcpy(mTempBias->host(), inputs[2]->host(), inputs[2]->size()); + } } CPUConvolution::reorderWeight(mTempWeight->host(), inputs[1]->host(), depth, outputCount, inputs[1]->width() * inputs[1]->height(), mTempWeightCache->host()); @@ -33,7 +35,7 @@ ErrorCode ConvolutionTiledExecutorMultiInput::onExecute(const std::vector& inputs, const std::vector& outputs) { int depth = inputs[1]->channel(); - int outputCount = inputs[1]->batch(); + int outputCount = outputs[0]->channel(); mTempWeight.reset(Tensor::createDevice( {UP_DIV(outputCount, 4), UP_DIV(depth, 4), inputs[1]->width() * inputs[1]->height(), 16})); mTempWeightCache.reset(Tensor::createDevice( @@ -41,12 +43,12 @@ ErrorCode ConvolutionTiledExecutorMultiInput::onResize(const std::vectoronAcquireBuffer(mTempWeight.get(), Backend::DYNAMIC); backend()->onAcquireBuffer(mTempWeightCache.get(), Backend::DYNAMIC); mTempBias.reset(); - if (inputs[2]->elementSize() % 4 != 0) { - mTempBias.reset(Tensor::createDevice({ALIGN_UP4(inputs[2]->elementSize())})); + if (inputs.size() > 2 && inputs[2]->elementSize() % 4 == 0) { + mInputs = {inputs[0], mTempWeight.get(), inputs[2]}; + } else { + mTempBias.reset(Tensor::createDevice({ALIGN_UP4(outputCount)})); backend()->onAcquireBuffer(mTempBias.get(), Backend::DYNAMIC); mInputs = {inputs[0], mTempWeight.get(), mTempBias.get()}; - } else { - mInputs = {inputs[0], mTempWeight.get(), inputs[2]}; } backend()->onReleaseBuffer(mTempWeightCache.get(), Backend::DYNAMIC); auto errorCode = mProxy->onResize(mInputs, outputs); diff --git a/source/backend/cpu/compute/ConvolutionTiledExecutor.hpp b/source/backend/cpu/compute/ConvolutionTiledExecutor.hpp index 23597f8aa..1f3a5cf63 100644 --- a/source/backend/cpu/compute/ConvolutionTiledExecutor.hpp +++ b/source/backend/cpu/compute/ConvolutionTiledExecutor.hpp @@ -10,7 +10,7 @@ #define ConvolutionTiledExecutor_hpp #include -#include "../CPUConvolution.hpp" +#include "backend/cpu/CPUConvolution.hpp" // Tiled Slide Window or Im2Col + GEMM namespace MNN { class ConvolutionTiledExecutorBasic : public CPUConvolution { diff --git a/source/backend/cpu/compute/ConvolutionWinograd.cpp b/source/backend/cpu/compute/ConvolutionWinograd.cpp index 86dac559c..19cb92281 100644 --- a/source/backend/cpu/compute/ConvolutionWinograd.cpp +++ b/source/backend/cpu/compute/ConvolutionWinograd.cpp @@ -6,14 +6,14 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "ConvolutionWinograd.hpp" +#include "backend/cpu/compute/ConvolutionWinograd.hpp" #include -#include "CommonOptFunction.h" -#include "Concurrency.h" -#include "ConvOpt.h" -#include "Macro.h" -#include "TensorUtils.hpp" -#include "WingoradGenerater.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Concurrency.h" +#include "backend/cpu/compute/ConvOpt.h" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" +#include "math/WingoradGenerater.hpp" #ifdef MNN_USE_NEON #include #endif diff --git a/source/backend/cpu/compute/ConvolutionWinograd.hpp b/source/backend/cpu/compute/ConvolutionWinograd.hpp index 3c78a053a..5bd439da8 100644 --- a/source/backend/cpu/compute/ConvolutionWinograd.hpp +++ b/source/backend/cpu/compute/ConvolutionWinograd.hpp @@ -9,9 +9,9 @@ #ifndef ConvolutionWinograd_hpp #define ConvolutionWinograd_hpp -#include "CPUConvolution.hpp" -#include "ConvolutionFloatFactory.h" -#include "WinogradOptFunction.hpp" +#include "backend/cpu/CPUConvolution.hpp" +#include "backend/cpu/compute/ConvolutionFloatFactory.h" +#include "backend/cpu/compute/WinogradOptFunction.hpp" namespace MNN { class ConvolutionWinograd : public CPUConvolution { diff --git a/source/backend/cpu/compute/ConvolutionWinograd3D.cpp b/source/backend/cpu/compute/ConvolutionWinograd3D.cpp index 18bfc03f5..3b28d82be 100644 --- a/source/backend/cpu/compute/ConvolutionWinograd3D.cpp +++ b/source/backend/cpu/compute/ConvolutionWinograd3D.cpp @@ -6,15 +6,15 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "ConvolutionWinograd3D.hpp" -#include "CPUBackend.hpp" +#include "backend/cpu/compute/ConvolutionWinograd3D.hpp" +#include "backend/cpu/CPUBackend.hpp" #include -#include "CommonOptFunction.h" -#include "Concurrency.h" -#include "ConvOpt.h" -#include "Macro.h" -#include "TensorUtils.hpp" -#include "WingoradGenerater.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Concurrency.h" +#include "backend/cpu/compute/ConvOpt.h" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" +#include "math/WingoradGenerater.hpp" #ifdef MNN_USE_NEON #include #endif @@ -39,14 +39,14 @@ ConvolutionWinograd3D::ConvolutionWinograd3D(const Convolution3DCommon *convOp, } } mPostFunction = CPUConvolution3D::getPostFunction(convOp); - + const int inputChannel = convOp->inputCount(), outputChannel = convOp->outputCount(); const int kernelDepth = mKernels[0], kernelSize = mKernels[1], alpha = unit + kernelSize - 1, alpha2 = alpha * alpha; mAlpha = alpha; - + mSourceTransform = WinogradFunction::chooseSourceTransform(alpha, alpha); mDestTransform = WinogradFunction::chooseDestTransform(alpha, unit); - + mWeight.reset(Tensor::createDevice({ALIGN_UP4(inputChannel) * ALIGN_UP4(outputChannel) * kernelDepth * alpha2})); mBias.reset(Tensor::createDevice({ALIGN_UP4((int)biasSize)})); bool valid = b->onAcquireBuffer(mWeight.get(), Backend::STATIC); @@ -54,12 +54,12 @@ ConvolutionWinograd3D::ConvolutionWinograd3D(const Convolution3DCommon *convOp, if (!valid) { return; } - + memset(mBias->host(), 0, mBias->size()); memcpy(mBias->host(), bias, biasSize * sizeof(float)); - + WinogradGenerater generator(unit, kernelSize); - + const int srcDepthStep = inputChannel * outputChannel * kernelSize * kernelSize; const int dstDepthStep = ALIGN_UP4(inputChannel) * ALIGN_UP4(outputChannel) * alpha2; std::shared_ptr srcWeight, transWeight; @@ -78,7 +78,7 @@ ConvolutionWinograd3D::~ConvolutionWinograd3D() { backend()->onReleaseBuffer(mWeight.get(), Backend::STATIC); } } - + ErrorCode ConvolutionWinograd3D::onResize(const std::vector &inputs, const std::vector &outputs) { auto input = inputs[0]; auto output = outputs[0]; @@ -86,7 +86,7 @@ ErrorCode ConvolutionWinograd3D::onResize(const std::vector &inputs, c const int ic = input->length(1), id = input->length(2); const int threadNumber = ((CPUBackend*)backend())->threadNumber(); const int alpha2 = mAlpha * mAlpha; - + if (mPadMode == PadMode_SAME) { mPads.clear(); for (int i = 0; i < 3; ++i) { @@ -94,11 +94,11 @@ ErrorCode ConvolutionWinograd3D::onResize(const std::vector &inputs, c mPads.push_back((inputNeeded - input->length(i + 2)) / 2); } } - + mSourceBuffer.reset(Tensor::createDevice({threadNumber, id, alpha2, UP_DIV(ic, 4), CONVOLUTION_TILED_NUMBER, 4})); mDestBuffer.reset(Tensor::createDevice({threadNumber, od + 1, alpha2, UP_DIV(oc, 4), CONVOLUTION_TILED_NUMBER, 4})); mTempBuffer.reset(Tensor::createDevice({threadNumber, 2, alpha2, 4})); - + bool succ = backend()->onAcquireBuffer(mSourceBuffer.get(), Backend::DYNAMIC); succ = succ && backend()->onAcquireBuffer(mDestBuffer.get(), Backend::DYNAMIC); succ = succ && backend()->onAcquireBuffer(mTempBuffer.get(), Backend::DYNAMIC); @@ -110,11 +110,11 @@ ErrorCode ConvolutionWinograd3D::onResize(const std::vector &inputs, c backend()->onReleaseBuffer(mTempBuffer.get(), Backend::DYNAMIC); return NO_ERROR; } - + ErrorCode ConvolutionWinograd3D::onExecute(const std::vector &inputs, const std::vector &outputs) { auto input = inputs[0]; auto output = outputs[0]; - + const int dstUnit = mUnit, srcUnit = mAlpha, srcUnit2 = srcUnit * srcUnit; const int outputWidth = output->length(4), outputHeight = output->length(3), outputDepth = output->length(2); const int inputWidth = input->length(4), inputHeight = input->length(3), inputDepth = input->length(2); @@ -122,10 +122,10 @@ ErrorCode ConvolutionWinograd3D::onExecute(const std::vector &inputs, const int ic_4 = UP_DIV(input->length(1), 4), dc_4 = UP_DIV(output->length(1), 4); const int padY = mPads[1], padX = mPads[2], padDepth = mPads[0], kernelDepth = mKernels[0]; const int totalCount = wUnit * hUnit, tileCount = UP_DIV(totalCount, CONVOLUTION_TILED_NUMBER); - + auto postFunction = mPostFunction; const int threadNumber = std::max(((CPUBackend *)backend())->threadNumber(), 1); - + auto sourceTransformFunc = [=](int xIndex, int xC, const float* srcOrigin, float* dstOrigin, float* midBuffer0, float* midBuffer1) { int sourceZStep = inputDepth * inputWidth * inputHeight * 4; int dstZStep = xC * 4; @@ -136,9 +136,9 @@ ErrorCode ConvolutionWinograd3D::onExecute(const std::vector &inputs, const int sx = ALIMAX(0, srcX) - srcX, ex = ALIMIN(srcX + srcUnit, inputWidth) - srcX; const int sy = ALIMAX(0, srcY) - srcY, ey = ALIMIN(srcY + srcUnit, inputHeight) - srcY; const int count = 4 * (ex - sx); - + auto dst_x = dstOrigin + 4 * xi; - + auto srcStart = srcOrigin + (srcX + srcY * inputWidth) * 4; if (ey - sy < srcUnit) { memset(midBuffer1, 0, srcUnit2 * 4 * sizeof(float)); @@ -189,7 +189,7 @@ ErrorCode ConvolutionWinograd3D::onExecute(const std::vector &inputs, } } }; - + auto destTransformFunc = [=](int xIndex, int xC, const float* srcOrigin, float* dstOrigin, float* midBuffer0, float* midBuffer1) { int dstZStep = outputDepth * outputHeight * outputWidth * 4; int srcZStep = xC * 4; @@ -197,13 +197,13 @@ ErrorCode ConvolutionWinograd3D::onExecute(const std::vector &inputs, for (int xi = 0; xi < xC; ++xi) { const int index = xIndex + xi, wIndex = index % wUnit, hIndex = index / wUnit; auto srcXi = srcOrigin + 4 * xi; - + const int dstX = wIndex * dstUnit, dstY = hIndex * dstUnit; auto dstStart = dstOrigin + 4 * (dstX + dstY * outputWidth); - + const int ey = ALIMIN(dstY + dstUnit, outputHeight) - dstY; const int ex = ALIMIN(dstX + dstUnit, outputWidth) - dstX; - + const int count = ex * 4; if (ex == dstUnit) { for (int z = 0; z < dc_4; ++z) { @@ -236,7 +236,7 @@ ErrorCode ConvolutionWinograd3D::onExecute(const std::vector &inputs, for (int i = 0; i < ey; ++i) { mDestTransform(midBuffer0 + i * 4, midBuffer1 + i * dstUnit * 4, 4 * dstUnit, 4); } - + for (int yy = 0; yy < ey; ++yy) { auto dstYAddr = dst_depth + yy * 4 * outputWidth; auto srcYAddr = midBuffer1 + yy * 4 * dstUnit; @@ -247,7 +247,7 @@ ErrorCode ConvolutionWinograd3D::onExecute(const std::vector &inputs, } } }; - + auto gemmFunc = [=](int xC, int start, int end, const float* srcOrigin, const float* weight, float* dstOrigin) { float* tempDst = dstOrigin + outputDepth * srcUnit2 * dc_4 * xC * 4; const int element = (end - start) * dc_4 * xC * 4, offset = start * dc_4 * xC * 4; @@ -276,7 +276,7 @@ ErrorCode ConvolutionWinograd3D::onExecute(const std::vector &inputs, } } }; - + auto gemmConcurrencyFunc = [=, &gemmFunc](int xC, const float* _srcOrigin, const float* weight, float* _dstOrigin) { MNN_CONCURRENCY_BEGIN(tId, threadNumber) { const int step = UP_DIV(srcUnit2, threadNumber); @@ -284,7 +284,7 @@ ErrorCode ConvolutionWinograd3D::onExecute(const std::vector &inputs, } MNN_CONCURRENCY_END() }; - + auto tFunction = [&](const int tId, const int tileStart, const int tileStep, const int tileEnd, const float* srcOrigin, float* dstOrigin) { auto _srcOrigin = mSourceBuffer->host() + tId * mSourceBuffer->stride(0); auto _dstOrigin = mDestBuffer->host() + tId * mDestBuffer->stride(0); @@ -294,15 +294,15 @@ ErrorCode ConvolutionWinograd3D::onExecute(const std::vector &inputs, int xIndex = (int)tIndex * CONVOLUTION_TILED_NUMBER; int xReamin = totalCount - xIndex; int xC = xReamin > CONVOLUTION_TILED_NUMBER ? CONVOLUTION_TILED_NUMBER : xReamin; - + sourceTransformFunc(xIndex, xC, srcOrigin, _srcOrigin, midBuffer0, midBuffer1); - + if (threadNumber != tileStep) { gemmConcurrencyFunc(xC, _srcOrigin, mWeight->host(), _dstOrigin); } else { gemmFunc(xC, 0, srcUnit2, _srcOrigin, mWeight->host(), _dstOrigin); } - + destTransformFunc(xIndex, xC, _dstOrigin, dstOrigin, midBuffer0, midBuffer1); } }; @@ -317,11 +317,11 @@ ErrorCode ConvolutionWinograd3D::onExecute(const std::vector &inputs, } MNN_CONCURRENCY_END(); } - + if (tileCount % threadNumber != 0) { tFunction(0, tileCount / threadNumber * threadNumber, 1, tileCount, srcOrigin, dstOrigin); } - + MNN_CONCURRENCY_BEGIN(tId, threadNumber) { int channelStep = UP_DIV(dc_4, threadNumber); int channelStart = channelStep * tId, channelNum = ALIMIN(channelStep * (tId + 1), dc_4) - channelStart; diff --git a/source/backend/cpu/compute/ConvolutionWinograd3D.hpp b/source/backend/cpu/compute/ConvolutionWinograd3D.hpp index 022777e54..9d20a205a 100644 --- a/source/backend/cpu/compute/ConvolutionWinograd3D.hpp +++ b/source/backend/cpu/compute/ConvolutionWinograd3D.hpp @@ -9,8 +9,8 @@ #ifndef ConvolutionWinograd3d_hpp #define ConvolutionWinograd3d_hpp -#include "CPUConvolution3D.hpp" -#include "WinogradOptFunction.hpp" +#include "backend/cpu/CPUConvolution3D.hpp" +#include "backend/cpu/compute/WinogradOptFunction.hpp" namespace MNN { class ConvolutionWinograd3D : public Execution { diff --git a/source/backend/cpu/compute/DeconvolutionWithStride.cpp b/source/backend/cpu/compute/DeconvolutionWithStride.cpp index 38740b0ec..2eb6e8250 100644 --- a/source/backend/cpu/compute/DeconvolutionWithStride.cpp +++ b/source/backend/cpu/compute/DeconvolutionWithStride.cpp @@ -6,14 +6,14 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "DeconvolutionWithStride.hpp" -#include "CPUBackend.hpp" -#include "CommonOptFunction.h" -#include "Concurrency.h" -#include "ConvOpt.h" -#include "Macro.h" -#include "WingoradGenerater.hpp" -#include "WinogradOptFunction.hpp" +#include "backend/cpu/compute/DeconvolutionWithStride.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Concurrency.h" +#include "backend/cpu/compute/ConvOpt.h" +#include "core/Macro.h" +#include "math/WingoradGenerater.hpp" +#include "backend/cpu/compute/WinogradOptFunction.hpp" #ifdef MNN_USE_NEON #include diff --git a/source/backend/cpu/compute/DeconvolutionWithStride.hpp b/source/backend/cpu/compute/DeconvolutionWithStride.hpp index e308bdb15..987f2d13e 100644 --- a/source/backend/cpu/compute/DeconvolutionWithStride.hpp +++ b/source/backend/cpu/compute/DeconvolutionWithStride.hpp @@ -9,8 +9,8 @@ #ifndef DeconvolutionWithStride_hpp #define DeconvolutionWithStride_hpp -#include "../CPUDeconvolution.hpp" -#include "Backend.hpp" +#include "backend/cpu/CPUDeconvolution.hpp" +#include "core/Backend.hpp" #include namespace MNN { class DeconvolutionWithStride : public CPUDeconvolutionCommon { diff --git a/source/backend/cpu/compute/Int8FunctionsOpt.cpp b/source/backend/cpu/compute/Int8FunctionsOpt.cpp index 09d97f59c..1c9d094ee 100644 --- a/source/backend/cpu/compute/Int8FunctionsOpt.cpp +++ b/source/backend/cpu/compute/Int8FunctionsOpt.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Int8FunctionsOpt.h" +#include "backend/cpu/compute/Int8FunctionsOpt.h" #include -#include "Macro.h" +#include "core/Macro.h" static const int gUnit = 8; diff --git a/source/backend/cpu/compute/OptimizedComputer.cpp b/source/backend/cpu/compute/OptimizedComputer.cpp index 049afb44d..aac988a2a 100644 --- a/source/backend/cpu/compute/OptimizedComputer.cpp +++ b/source/backend/cpu/compute/OptimizedComputer.cpp @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#include "OptimizedComputer.hpp" +#include "backend/cpu/compute/OptimizedComputer.hpp" #include -#include "Macro.h" +#include "core/Macro.h" #ifdef MNN_USE_NEON #include #endif diff --git a/source/backend/cpu/compute/OptimizedComputer.hpp b/source/backend/cpu/compute/OptimizedComputer.hpp index 7ac888f1c..0cb657b64 100644 --- a/source/backend/cpu/compute/OptimizedComputer.hpp +++ b/source/backend/cpu/compute/OptimizedComputer.hpp @@ -18,7 +18,7 @@ limitations under the License. #include #include -#include "CPUFixedPoint.hpp" +#include "backend/cpu/CPUFixedPoint.hpp" namespace MNN { namespace Optimized { diff --git a/source/backend/cpu/compute/ResizeFunction.cpp b/source/backend/cpu/compute/ResizeFunction.cpp index d2dc09352..1037e7ae9 100644 --- a/source/backend/cpu/compute/ResizeFunction.cpp +++ b/source/backend/cpu/compute/ResizeFunction.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "ResizeFunction.h" +#include "backend/cpu/compute/ResizeFunction.h" #include -#include "AutoStorage.h" -#include "Macro.h" +#include "core/AutoStorage.h" +#include "core/Macro.h" #ifdef MNN_USE_NEON #include #endif diff --git a/source/backend/cpu/compute/StrassenMatmulComputor.cpp b/source/backend/cpu/compute/StrassenMatmulComputor.cpp index ef0c6c9d7..1dcc4d0a1 100644 --- a/source/backend/cpu/compute/StrassenMatmulComputor.cpp +++ b/source/backend/cpu/compute/StrassenMatmulComputor.cpp @@ -6,12 +6,12 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "StrassenMatmulComputor.hpp" +#include "backend/cpu/compute/StrassenMatmulComputor.hpp" #include -#include "ConvOpt.h" -#include "Macro.h" +#include "backend/cpu/compute/ConvOpt.h" +#include "core/Macro.h" //#define MNN_OPEN_TIME_TRACE -#include "AutoTime.hpp" +#include extern "C" { void MNNStrassenMergeCFunction(float* c11, float* c12, float* c21, float* c22, float* xAddr, size_t cStride, size_t eSub, size_t hSub); diff --git a/source/backend/cpu/compute/StrassenMatmulComputor.hpp b/source/backend/cpu/compute/StrassenMatmulComputor.hpp index 453b26e45..3b24eeff6 100644 --- a/source/backend/cpu/compute/StrassenMatmulComputor.hpp +++ b/source/backend/cpu/compute/StrassenMatmulComputor.hpp @@ -10,7 +10,7 @@ #define StrassenMatmulComputor_hpp #include -#include "Backend.hpp" +#include "core/Backend.hpp" namespace MNN { class StrassenMatrixComputor { public: diff --git a/source/backend/cpu/compute/WinogradOptFunction.cpp b/source/backend/cpu/compute/WinogradOptFunction.cpp index 02a2de674..1c5d2aaab 100644 --- a/source/backend/cpu/compute/WinogradOptFunction.cpp +++ b/source/backend/cpu/compute/WinogradOptFunction.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "WinogradOptFunction.hpp" +#include "backend/cpu/compute/WinogradOptFunction.hpp" #include #include -#include "Macro.h" -#include "Vec4.hpp" +#include "core/Macro.h" +#include "math/Vec4.hpp" using namespace MNN::Math; #define DEFAULT_UNIT 8 diff --git a/source/backend/cpu/x86_x64/CMakeLists.txt b/source/backend/cpu/x86_x64/CMakeLists.txt new file mode 100644 index 000000000..bd6a62a20 --- /dev/null +++ b/source/backend/cpu/x86_x64/CMakeLists.txt @@ -0,0 +1,31 @@ +if(CMAKE_SYSTEM_PROCESSOR MATCHES "(x86_64)|(X86_64)|(x64)|(X64)|(amd64)|(AMD64)") + if (APPLE) + add_definitions(-fno-stack-check) # Workaround a Xcode 11.X bug + endif() + add_definitions(-DMNN_USE_SSE) + FILE(GLOB MNN_X8664_SRC ${CMAKE_CURRENT_LIST_DIR}/*.cpp) + add_library(MNNX8664 OBJECT ${MNN_X8664_SRC}) + list(APPEND MNN_OBJECTS_TO_LINK $) + list(APPEND MNN_TARGETS MNNX8664) + IF(MNN_USE_AVX) + FILE(GLOB MNN_AVX_SRC ${CMAKE_CURRENT_LIST_DIR}/avx/*.cpp) + add_library(MNNAVX OBJECT ${MNN_AVX_SRC}) + if(WIN32 OR MSVC) + target_compile_options(MNNAVX PRIVATE /arch:AVX) + else() + target_compile_options(MNNAVX PRIVATE -mavx) + endif() + add_dependencies(MNNX8664 MNNAVX) + list(APPEND MNN_OBJECTS_TO_LINK $) + list(APPEND MNN_TARGETS MNNAVX) + ENDIF() + IF(MNN_USE_SSE) + FILE(GLOB MNN_SSE_SRC ${CMAKE_CURRENT_LIST_DIR}/sse/*.cpp) + add_library(MNNSSE OBJECT ${MNN_SSE_SRC}) + add_dependencies(MNNX8664 MNNSSE) + list(APPEND MNN_OBJECTS_TO_LINK $) + list(APPEND MNN_TARGETS MNNSSE) + ENDIF() + SET(MNN_OBJECTS_TO_LINK "${MNN_OBJECTS_TO_LINK}" PARENT_SCOPE) + SET(MNN_TARGETS "${MNN_TARGETS}" PARENT_SCOPE) +endif() diff --git a/source/backend/cpu/x86_x64/FunctionDispatcher.cpp b/source/backend/cpu/x86_x64/FunctionDispatcher.cpp index 6e52788c2..7734bb983 100644 --- a/source/backend/cpu/x86_x64/FunctionDispatcher.cpp +++ b/source/backend/cpu/x86_x64/FunctionDispatcher.cpp @@ -7,8 +7,8 @@ // #include "DispatchHelper.hpp" -#include "CommonOptFunction.h" -#include "ConvOpt.h" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "backend/cpu/compute/ConvOpt.h" #include "sse/FunctionSummary.hpp" #include "avx/FunctionSummary.hpp" diff --git a/source/backend/metal/CMakeLists.txt b/source/backend/metal/CMakeLists.txt new file mode 100644 index 000000000..7a3558eaa --- /dev/null +++ b/source/backend/metal/CMakeLists.txt @@ -0,0 +1,30 @@ +if(MNN_METAL AND APPLE) + FILE(GLOB MNN_Metal_SRC ${CMAKE_CURRENT_LIST_DIR}/*.mm) + FILE(GLOB MNN_Metal_KERNELS_SRC ${CMAKE_CURRENT_LIST_DIR}/*.metal) + message(STATUS "Generating mnn.metallib at ${CMAKE_CURRENT_BINARY_DIR}/mnn.metallib") + IF(DEFINED SDK_VERSION) + SET(METAL_SDK_PLAT "iphoneos") + ELSE() + SET(METAL_SDK_PLAT "macosx") + ENDIF() + message(STATUS "Compiling Metal Kernels with ${METAL_SDK_PLAT} SDK") + add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/mnn.metallib COMMAND xcrun -sdk ${METAL_SDK_PLAT} metal "${MNN_Metal_KERNELS_SRC}" -o ${CMAKE_CURRENT_BINARY_DIR}/mnn.metallib COMMAND_EXPAND_LISTS) + add_custom_target (MNNMetalLIB DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/mnn.metallib COMMENT "Generating mnn.metallib") + file(REMOVE "${CMAKE_CURRENT_LIST_DIR}/MetalOPRegister.mm") + add_custom_command(OUTPUT "${CMAKE_CURRENT_LIST_DIR}/MetalOPRegister.mm" + COMMAND ${PYTHON_EXECUTABLE} + "${CMAKE_CURRENT_LIST_DIR}/MetalCodeGen.py" + "${CMAKE_CURRENT_LIST_DIR}/" + "${CMAKE_CURRENT_LIST_DIR}/MetalOPRegister.mm" + COMMENT "Metal Code Generation" + ) + add_library(MNNMetal OBJECT ${MNN_Metal_SRC} "${CMAKE_CURRENT_LIST_DIR}/MetalOPRegister.mm") + list(APPEND MNN_OBJECTS_TO_LINK $) + list(APPEND MNN_TARGETS MNNMetal) + add_dependencies(MNNMetal MNNMetalLIB) + SET(MNN_OBJECTS_TO_LINK "${MNN_OBJECTS_TO_LINK}" PARENT_SCOPE) + SET(MNN_TARGETS "${MNN_TARGETS}" PARENT_SCOPE) + + # This is just work around some CMake limitations and is really ugly + #list(APPEND MNN_OBJECTS_TO_LINK ${CMAKE_CURRENT_BINARY_DIR}/mnn.metallib) +endif() diff --git a/source/backend/metal/MNNMetalContext.h b/source/backend/metal/MNNMetalContext.h index 3aed8853b..e32520830 100644 --- a/source/backend/metal/MNNMetalContext.h +++ b/source/backend/metal/MNNMetalContext.h @@ -10,7 +10,7 @@ #define MNNMETALCONTEXT_H #import "MetalDefine.h" -#import "Tensor.hpp" +#import #if MNN_METAL_ENABLED diff --git a/source/backend/metal/MNNMetalContext.mm b/source/backend/metal/MNNMetalContext.mm index 829aea551..c35d0c8ce 100644 --- a/source/backend/metal/MNNMetalContext.mm +++ b/source/backend/metal/MNNMetalContext.mm @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MNNMetalContext.h" -#import "Macro.h" -#import "Macro.h" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "core/Macro.h" #if MNN_METAL_ENABLED diff --git a/source/backend/metal/MetalBackend.hpp b/source/backend/metal/MetalBackend.hpp index ced76b114..7465181ef 100644 --- a/source/backend/metal/MetalBackend.hpp +++ b/source/backend/metal/MetalBackend.hpp @@ -9,7 +9,7 @@ #ifndef MetalBackend_hpp #define MetalBackend_hpp -#include "Backend.hpp" +#include "core/Backend.hpp" #include "MNN_generated.h" #include "MetalDefine.h" diff --git a/source/backend/metal/MetalBackend.mm b/source/backend/metal/MetalBackend.mm index c5771bedb..103eacf43 100644 --- a/source/backend/metal/MetalBackend.mm +++ b/source/backend/metal/MetalBackend.mm @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalBackend.hpp" +#import "backend/metal/MetalBackend.hpp" #import -#import "MNNMetalContext.h" -#import "Macro.h" -#import "TensorUtils.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "core/TensorUtils.hpp" #if MNN_METAL_ENABLED @@ -164,7 +164,7 @@ } auto exe = iter->second->onCreate(inputs, op, this); if (NULL == exe) { - MNN_PRINT("The Creator Don't support type %d, %s\n", op->type(), op->name()->c_str()); + MNN_PRINT("The Creator Don't support type %d, %s\n", op->type(), op->name() ? op->name()->c_str() : ""); return NULL; } return exe; diff --git a/source/backend/metal/MetalBatchToSpaceND.hpp b/source/backend/metal/MetalBatchToSpaceND.hpp index 0d66cae87..3238f510b 100644 --- a/source/backend/metal/MetalBatchToSpaceND.hpp +++ b/source/backend/metal/MetalBatchToSpaceND.hpp @@ -9,7 +9,7 @@ #ifndef MetalBatchToSpaceND_hpp #define MetalBatchToSpaceND_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MetalDefine.h" #if MNN_METAL_ENABLED diff --git a/source/backend/metal/MetalBatchToSpaceND.mm b/source/backend/metal/MetalBatchToSpaceND.mm index 7746d4f92..2f0f0a0f9 100755 --- a/source/backend/metal/MetalBatchToSpaceND.mm +++ b/source/backend/metal/MetalBatchToSpaceND.mm @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalBatchToSpaceND.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalBatchToSpaceND.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalBinary.hpp b/source/backend/metal/MetalBinary.hpp index 514aa7e4e..c41fac4a9 100644 --- a/source/backend/metal/MetalBinary.hpp +++ b/source/backend/metal/MetalBinary.hpp @@ -9,7 +9,7 @@ #ifndef MetalBinary_hpp #define MetalBinary_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MetalDefine.h" #if MNN_METAL_ENABLED diff --git a/source/backend/metal/MetalBinary.mm b/source/backend/metal/MetalBinary.mm index c6c469cd7..dc4c8168a 100755 --- a/source/backend/metal/MetalBinary.mm +++ b/source/backend/metal/MetalBinary.mm @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalBinary.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalBinary.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalCast.hpp b/source/backend/metal/MetalCast.hpp index d1fb1fe0f..9a14ab5de 100644 --- a/source/backend/metal/MetalCast.hpp +++ b/source/backend/metal/MetalCast.hpp @@ -9,7 +9,7 @@ #ifndef MetalCast_hpp #define MetalCast_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MetalDefine.h" #import "Type_generated.h" diff --git a/source/backend/metal/MetalCast.mm b/source/backend/metal/MetalCast.mm index b11dd061d..49129ddaf 100755 --- a/source/backend/metal/MetalCast.mm +++ b/source/backend/metal/MetalCast.mm @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalCast.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalCast.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalCodeGen.py b/source/backend/metal/MetalCodeGen.py new file mode 100644 index 000000000..01dcc6735 --- /dev/null +++ b/source/backend/metal/MetalCodeGen.py @@ -0,0 +1,36 @@ +import os +import sys +from os import listdir +from os.path import isfile, join +shaderPath=sys.argv[1] +cppPath=sys.argv[2] +def main(): + shaders=[] + for root, dirs, files in os.walk(shaderPath): + for file in files: + if file.endswith('.mm'): + shaders.append(os.path.join(root,file)) + with open(cppPath,"w") as f: + f.write("// This file is generated by Shell for ops register\n") + f.write("#import \"backend/metal/MetalDefine.h\"\n") + f.write(" namespace MNN {\n") + f.write("#if MNN_METAL_ENABLED\n") + funcs=[] + for shapath in shaders: + with open(shapath,"r") as sha: + lines=sha.readlines() + for l in lines: + if l.startswith("REGISTER_METAL_OP_CREATOR("): + x=l.replace("REGISTER_METAL_OP_CREATOR(","").replace(")","").replace(" ","").replace(";","").replace("\n","").split(",") + funcname="___"+x[0]+"__"+x[1]+"__();" + funcs.append(funcname) + f.write(" extern void "+funcname+"\n") + break + pass + f.write("void registerMetalOps() {\n") + for func in funcs: + f.write(" "+func+"\n") + f.write("}\n#endif\n}") + +if __name__ == '__main__': + main() diff --git a/source/backend/metal/MetalConcat.hpp b/source/backend/metal/MetalConcat.hpp index e64952fa4..d79564c9a 100644 --- a/source/backend/metal/MetalConcat.hpp +++ b/source/backend/metal/MetalConcat.hpp @@ -10,7 +10,7 @@ #define MetalConcat_hpp #import -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MNN_generated.h" #import "MetalDefine.h" diff --git a/source/backend/metal/MetalConcat.mm b/source/backend/metal/MetalConcat.mm index 44ca3cddd..6b5a544df 100755 --- a/source/backend/metal/MetalConcat.mm +++ b/source/backend/metal/MetalConcat.mm @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalConcat.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "MetalBackend.hpp" -#import "TensorUtils.hpp" +#import "backend/metal/MetalConcat.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" +#import "core/TensorUtils.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalConvolution.mm b/source/backend/metal/MetalConvolution.mm index e036a833e..89c4e6475 100755 --- a/source/backend/metal/MetalConvolution.mm +++ b/source/backend/metal/MetalConvolution.mm @@ -6,12 +6,12 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalConvolution.hpp" -#import "Macro.h" -#import "MetalBackend.hpp" -#import "MetalConvolution1x1.hpp" -#import "MetalConvolutionGEMM.hpp" -#import "MetalConvolutionWinograd.hpp" +#import "backend/metal/MetalConvolution.hpp" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" +#import "backend/metal/MetalConvolution1x1.hpp" +#import "backend/metal/MetalConvolutionGEMM.hpp" +#import "backend/metal/MetalConvolutionWinograd.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalConvolution1x1.mm b/source/backend/metal/MetalConvolution1x1.mm index 33ec42539..70ebddc3d 100644 --- a/source/backend/metal/MetalConvolution1x1.mm +++ b/source/backend/metal/MetalConvolution1x1.mm @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalConvolution1x1.hpp" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalConvolution1x1.hpp" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED @@ -46,7 +46,7 @@ auto backend = static_cast(this->backend()); auto context = (__bridge MNNMetalContext *)backend->context(); auto w = output->width(), h = output->height(), z = UP_DIV(output->channel(), 4), b = output->batch(); - + auto encoder = [context encoder]; auto bandwidth = (MetalBandwidth){}; MTLSize threads = {}; @@ -74,7 +74,7 @@ auto backend = static_cast(this->backend()); auto context = (__bridge MNNMetalContext *)backend->context(); auto w = output->width(), h = output->height(), z = UP_DIV(output->channel(), 4), b = output->batch();; - + auto encoder = [context encoder]; auto bandwidth = (MetalBandwidth){}; MTLSize threads = {}; diff --git a/source/backend/metal/MetalConvolutionCommon.hpp b/source/backend/metal/MetalConvolutionCommon.hpp index 61f4e60b0..4ebaa1364 100644 --- a/source/backend/metal/MetalConvolutionCommon.hpp +++ b/source/backend/metal/MetalConvolutionCommon.hpp @@ -9,8 +9,8 @@ #ifndef MetalConvolutionCommon_hpp #define MetalConvolutionCommon_hpp -#import "ConvolutionIntFactory.hpp" -#import "Execution.hpp" +#import "backend/cpu/compute/ConvolutionIntFactory.hpp" +#import "core/Execution.hpp" #import "MNNMetalContext.h" #import "MNN_generated.h" #import "MetalDefine.h" diff --git a/source/backend/metal/MetalConvolutionCommon.mm b/source/backend/metal/MetalConvolutionCommon.mm index a69d8c1cc..29f916646 100644 --- a/source/backend/metal/MetalConvolutionCommon.mm +++ b/source/backend/metal/MetalConvolutionCommon.mm @@ -6,12 +6,12 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalConvolutionCommon.hpp" -#import "Macro.h" -#import "MetalBackend.hpp" -#import "MetalConvolution1x1.hpp" -#import "MetalConvolutionWinograd.hpp" -#import "TensorUtils.hpp" +#import "backend/metal/MetalConvolutionCommon.hpp" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" +#import "backend/metal/MetalConvolution1x1.hpp" +#import "backend/metal/MetalConvolutionWinograd.hpp" +#import "core/TensorUtils.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalConvolutionDepthwise.mm b/source/backend/metal/MetalConvolutionDepthwise.mm index e00f1064e..17406f79e 100755 --- a/source/backend/metal/MetalConvolutionDepthwise.mm +++ b/source/backend/metal/MetalConvolutionDepthwise.mm @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalConvolutionDepthwise.hpp" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalConvolutionDepthwise.hpp" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED @@ -68,7 +68,7 @@ auto backend = static_cast(this->backend()); auto context = (__bridge MNNMetalContext *)backend->context(); auto w = output->width(), h = output->height(), z = UP_DIV(output->channel(), 4), b = output->batch(); - + auto encoder = [context encoder]; auto bandwidth = [context load:@"qntconv_depthwise" encoder:encoder]; [encoder setBuffer:(__bridge id)(void *)input->deviceId() offset:0 atIndex:0]; @@ -87,7 +87,7 @@ auto backend = static_cast(this->backend()); auto context = (__bridge MNNMetalContext *)backend->context(); auto w = output->width(), h = output->height(), z = UP_DIV(output->channel(), 4), b = output->batch(); - + auto encoder = [context encoder]; auto bandwidth = [context load:@"conv_depthwise" encoder:encoder]; [encoder setBuffer:(__bridge id)(void *)input->deviceId() offset:0 atIndex:0]; diff --git a/source/backend/metal/MetalConvolutionGEMM.mm b/source/backend/metal/MetalConvolutionGEMM.mm index abfc68f8b..c5a20f099 100644 --- a/source/backend/metal/MetalConvolutionGEMM.mm +++ b/source/backend/metal/MetalConvolutionGEMM.mm @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalConvolutionGEMM.hpp" -#import "Macro.h" -#import "Macro.h" -#import "MetalBackend.hpp" -#import "MetalConvolution.hpp" +#import "backend/metal/MetalConvolutionGEMM.hpp" +#import "core/Macro.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" +#import "backend/metal/MetalConvolution.hpp" #if MNN_METAL_ENABLED namespace MNN { @@ -35,7 +35,7 @@ if ((iw * ih * ic) / (ow * oh * oc) > 4) { return false; } - + auto unit = conv->quanParameter() != nullptr ? sizeof(float) : sizeof(metal_float); auto iz = UP_DIV(ic, 4), oz = UP_DIV(oc, 4), batch = input->batch(); return UP_DIV(ow * oh * batch, 4) * kx * ky * iz * 16 * sizeof(metal_float) < (2 << 20) && // tmp input @@ -107,7 +107,7 @@ backend->onReleaseBuffer(mTempOutput.get(), Backend::DYNAMIC); return NO_ERROR; } - + ErrorCode MetalConvolutionGEMM::onExecute(const std::vector &inputs, const std::vector &outputs) { if (mQnt) { return onQuantized(inputs[0], outputs[0]); // handle quantize in GEMM @@ -120,11 +120,11 @@ auto backend = static_cast(this->backend()); auto context = (__bridge MNNMetalContext *)backend->context(); auto encoder = [context encoder]; - + { // im2col NSUInteger iz = UP_DIV(input->channel(), 4), ib = input->batch(); NSUInteger ow = output->width(), oh = output->height(); - + auto bandwidth = [context load:@"qntconv_im2col" encoder:encoder]; [encoder setBuffer:(__bridge id)(void *)input->deviceId() offset:0 atIndex:0]; [encoder setBuffer:(__bridge id)(void *)mTempInput->deviceId() offset:0 atIndex:1]; @@ -140,7 +140,7 @@ { // gemm NSUInteger gw = UP_DIV(output->width() * output->height() * output->batch(), 4); NSUInteger gh = UP_DIV(output->channel(), 4); - + auto bandwidth = [context load:@"qntmatmul4x4" encoder:encoder]; [encoder setBuffer:(__bridge id)(void *)mTempInput->deviceId() offset:0 atIndex:0]; [encoder setBuffer:(__bridge id)(void *)mTempOutput->deviceId() offset:0 atIndex:1]; @@ -150,7 +150,7 @@ } { // col2im NSUInteger ow = output->width(), oh = output->height(), oz = UP_DIV(output->channel(), 4), ob = output->batch(); - + auto bandwidth = [context load:@"qntconv_col2im" encoder:encoder]; [encoder setBuffer:(__bridge id)(void *)mTempOutput->deviceId() offset:0 atIndex:0]; [encoder setBuffer:(__bridge id)(void *)output->deviceId() offset:0 atIndex:1]; diff --git a/source/backend/metal/MetalConvolutionWinograd.mm b/source/backend/metal/MetalConvolutionWinograd.mm index 4c596db71..56c8a6c4d 100644 --- a/source/backend/metal/MetalConvolutionWinograd.mm +++ b/source/backend/metal/MetalConvolutionWinograd.mm @@ -6,12 +6,12 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalConvolutionWinograd.hpp" -#import "Macro.h" -#import "Macro.h" -#import "MetalBackend.hpp" -#import "MetalConvolution.hpp" -#import "WingoradGenerater.hpp" +#import "backend/metal/MetalConvolutionWinograd.hpp" +#import "core/Macro.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" +#import "backend/metal/MetalConvolution.hpp" +#import "math/WingoradGenerater.hpp" #if MNN_METAL_ENABLED diff --git a/source/backend/metal/MetalCrop.hpp b/source/backend/metal/MetalCrop.hpp index 1e46b3bdc..a00e65113 100644 --- a/source/backend/metal/MetalCrop.hpp +++ b/source/backend/metal/MetalCrop.hpp @@ -9,7 +9,7 @@ #ifndef MetalCrop_hpp #define MetalCrop_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MNN_generated.h" #import "MetalDefine.h" diff --git a/source/backend/metal/MetalCrop.mm b/source/backend/metal/MetalCrop.mm index 2ca5131bb..ede0798de 100755 --- a/source/backend/metal/MetalCrop.mm +++ b/source/backend/metal/MetalCrop.mm @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalCrop.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalCrop.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalCropAndResize.hpp b/source/backend/metal/MetalCropAndResize.hpp index 9d2fb3b8b..d5d305e19 100644 --- a/source/backend/metal/MetalCropAndResize.hpp +++ b/source/backend/metal/MetalCropAndResize.hpp @@ -9,7 +9,7 @@ #ifndef MetalCropAndResize_hpp #define MetalCropAndResize_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MNN_generated.h" #import "MetalDefine.h" diff --git a/source/backend/metal/MetalCropAndResize.mm b/source/backend/metal/MetalCropAndResize.mm index 72319a301..0f672d073 100755 --- a/source/backend/metal/MetalCropAndResize.mm +++ b/source/backend/metal/MetalCropAndResize.mm @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalCropAndResize.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalCropAndResize.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalDeconvolution.hpp b/source/backend/metal/MetalDeconvolution.hpp index ed88a0761..61bfa64e1 100644 --- a/source/backend/metal/MetalDeconvolution.hpp +++ b/source/backend/metal/MetalDeconvolution.hpp @@ -9,7 +9,7 @@ #ifndef MetalDeconvolution_hpp #define MetalDeconvolution_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MNN_generated.h" #import "MetalDefine.h" diff --git a/source/backend/metal/MetalDeconvolution.mm b/source/backend/metal/MetalDeconvolution.mm index ada8e74c5..30eff686f 100755 --- a/source/backend/metal/MetalDeconvolution.mm +++ b/source/backend/metal/MetalDeconvolution.mm @@ -6,15 +6,15 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalDeconvolution.hpp" -#import "ConvolutionIntFactory.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalDeconvolution.hpp" +#import "backend/cpu/compute/ConvolutionIntFactory.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { - + static int leastCommonMultiple(int m, int n) { int a = m, b = n; while(a != b){ @@ -161,7 +161,7 @@ static int leastCommonMultiple(int m, int n) { // const buffer auto deltaKy = leastCommonMultiple(mDilateY, mStrideY) / mDilateY; auto deltaKx = leastCommonMultiple(mDilateX, mStrideX) / mDilateX; - + int consts[] = { iw, ih, diff --git a/source/backend/metal/MetalDequantize.hpp b/source/backend/metal/MetalDequantize.hpp index 280a3bbc9..1aa8ae8f1 100644 --- a/source/backend/metal/MetalDequantize.hpp +++ b/source/backend/metal/MetalDequantize.hpp @@ -9,7 +9,7 @@ #ifndef MetalDequantize_hpp #define MetalDequantize_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MNN_generated.h" #import "MetalDefine.h" diff --git a/source/backend/metal/MetalDequantize.mm b/source/backend/metal/MetalDequantize.mm index ace7b6f25..566e96dbb 100755 --- a/source/backend/metal/MetalDequantize.mm +++ b/source/backend/metal/MetalDequantize.mm @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalDequantize.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalDequantize.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalEltwise.hpp b/source/backend/metal/MetalEltwise.hpp index bd24a4887..2a2d04917 100644 --- a/source/backend/metal/MetalEltwise.hpp +++ b/source/backend/metal/MetalEltwise.hpp @@ -9,7 +9,7 @@ #ifndef MetalEltwise_hpp #define MetalEltwise_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MNN_generated.h" #import "MetalDefine.h" diff --git a/source/backend/metal/MetalEltwise.mm b/source/backend/metal/MetalEltwise.mm index d8ceaf2c4..907e83315 100755 --- a/source/backend/metal/MetalEltwise.mm +++ b/source/backend/metal/MetalEltwise.mm @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalEltwise.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalEltwise.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalFill.hpp b/source/backend/metal/MetalFill.hpp index 9dbe93eca..c4ed5204a 100644 --- a/source/backend/metal/MetalFill.hpp +++ b/source/backend/metal/MetalFill.hpp @@ -9,7 +9,7 @@ #ifndef MetalFill_hpp #define MetalFill_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MetalDefine.h" #if MNN_METAL_ENABLED diff --git a/source/backend/metal/MetalFill.mm b/source/backend/metal/MetalFill.mm index 547ce6d26..f04fa2a76 100755 --- a/source/backend/metal/MetalFill.mm +++ b/source/backend/metal/MetalFill.mm @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalFill.hpp" -#import "MNNMetalContext.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalFill.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalGather.hpp b/source/backend/metal/MetalGather.hpp index dcfa885ea..d8799c6df 100644 --- a/source/backend/metal/MetalGather.hpp +++ b/source/backend/metal/MetalGather.hpp @@ -9,7 +9,7 @@ #ifndef MetalGather_hpp #define MetalGather_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MetalDefine.h" #if MNN_METAL_ENABLED diff --git a/source/backend/metal/MetalGather.mm b/source/backend/metal/MetalGather.mm index 80811bf06..b186fed5f 100755 --- a/source/backend/metal/MetalGather.mm +++ b/source/backend/metal/MetalGather.mm @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalGather.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalGather.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalGatherV2.hpp b/source/backend/metal/MetalGatherV2.hpp index 7756cf7dd..5998fe9f3 100644 --- a/source/backend/metal/MetalGatherV2.hpp +++ b/source/backend/metal/MetalGatherV2.hpp @@ -9,7 +9,7 @@ #ifndef MetalGatherV2_hpp #define MetalGatherV2_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MetalDefine.h" #import "Type_generated.h" diff --git a/source/backend/metal/MetalGatherV2.mm b/source/backend/metal/MetalGatherV2.mm index 3cddc9ab6..dfa917745 100755 --- a/source/backend/metal/MetalGatherV2.mm +++ b/source/backend/metal/MetalGatherV2.mm @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalGatherV2.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalGatherV2.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalInterp.hpp b/source/backend/metal/MetalInterp.hpp index 52774ef29..8cc1bf5a5 100644 --- a/source/backend/metal/MetalInterp.hpp +++ b/source/backend/metal/MetalInterp.hpp @@ -9,7 +9,7 @@ #ifndef MetalInterp_hpp #define MetalInterp_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" #include "MetalDefine.h" #if MNN_METAL_ENABLED diff --git a/source/backend/metal/MetalInterp.mm b/source/backend/metal/MetalInterp.mm index bb83e42ef..58957facf 100755 --- a/source/backend/metal/MetalInterp.mm +++ b/source/backend/metal/MetalInterp.mm @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalInterp.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalInterp.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalLRN.hpp b/source/backend/metal/MetalLRN.hpp index 5e481bb99..144284026 100644 --- a/source/backend/metal/MetalLRN.hpp +++ b/source/backend/metal/MetalLRN.hpp @@ -9,7 +9,7 @@ #ifndef MetalLRN_hpp #define MetalLRN_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MetalDefine.h" #if MNN_METAL_ENABLED diff --git a/source/backend/metal/MetalLRN.mm b/source/backend/metal/MetalLRN.mm index 73ae8ef16..5f99f9a6f 100755 --- a/source/backend/metal/MetalLRN.mm +++ b/source/backend/metal/MetalLRN.mm @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalLRN.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalLRN.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalLSTM.hpp b/source/backend/metal/MetalLSTM.hpp index fc056a551..ebfb4a1dc 100644 --- a/source/backend/metal/MetalLSTM.hpp +++ b/source/backend/metal/MetalLSTM.hpp @@ -9,7 +9,7 @@ #ifndef MetalLSTM_hpp #define MetalLSTM_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MNN_generated.h" #import "MetalDefine.h" diff --git a/source/backend/metal/MetalLSTM.mm b/source/backend/metal/MetalLSTM.mm index 362427706..86386911c 100755 --- a/source/backend/metal/MetalLSTM.mm +++ b/source/backend/metal/MetalLSTM.mm @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalLSTM.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalLSTM.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { @@ -127,7 +127,7 @@ auto context = (__bridge MNNMetalContext *)backend->context(); auto input = inputs[0], output = outputs[0]; int iw = input->width(), ow = output->width(), c = input->channel(), z = UP_DIV(c, 4); - + auto constBuffer = [context newDeviceBuffer:4 * sizeof(int) access:CPUWriteOnly]; ((int *)constBuffer.contents)[0] = ow; ((int *)constBuffer.contents)[1] = iw; diff --git a/source/backend/metal/MetalMatMul.hpp b/source/backend/metal/MetalMatMul.hpp index 8e18fbdae..9e7e17f3a 100644 --- a/source/backend/metal/MetalMatMul.hpp +++ b/source/backend/metal/MetalMatMul.hpp @@ -9,7 +9,7 @@ #ifndef MetalMatMul_hpp #define MetalMatMul_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MNN_generated.h" #import "MetalDefine.h" diff --git a/source/backend/metal/MetalMatMul.mm b/source/backend/metal/MetalMatMul.mm index 1663fa340..4254b71b5 100755 --- a/source/backend/metal/MetalMatMul.mm +++ b/source/backend/metal/MetalMatMul.mm @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalMatMul.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalMatMul.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalNormalize.hpp b/source/backend/metal/MetalNormalize.hpp index 973a0818d..2359f5b00 100644 --- a/source/backend/metal/MetalNormalize.hpp +++ b/source/backend/metal/MetalNormalize.hpp @@ -9,7 +9,7 @@ #ifndef MetalNormalize_hpp #define MetalNormalize_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MNN_generated.h" #import "MetalDefine.h" diff --git a/source/backend/metal/MetalNormalize.mm b/source/backend/metal/MetalNormalize.mm index db49d390b..8cfb2f2cf 100755 --- a/source/backend/metal/MetalNormalize.mm +++ b/source/backend/metal/MetalNormalize.mm @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalNormalize.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalNormalize.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalOPRegister.mm b/source/backend/metal/MetalOPRegister.mm index 524d8a2d2..60714f606 100644 --- a/source/backend/metal/MetalOPRegister.mm +++ b/source/backend/metal/MetalOPRegister.mm @@ -1,119 +1,114 @@ // This file is generated by Shell for ops register -#import "MetalDefine.h" -namespace MNN { +#import "backend/metal/MetalDefine.h" + namespace MNN { #if MNN_METAL_ENABLED -extern void ___MetalBatchToSpaceNDCreator__OpType_BatchToSpaceND__(); -extern void ___MetalBinaryCreator__OpType_BinaryOp__(); -extern void ___MetalCastCreator__OpType_Cast__(); -extern void ___MetalConcatCreator__OpType_Concat__(); -extern void ___MetalConvolutionCreator__OpType_Convolution__(); -extern void ___MetalConvolutionDepthwiseCreator__OpType_ConvolutionDepthwise__(); -extern void ___MetalCropCreator__OpType_Crop__(); -extern void ___MetalCropAndResizeCreator__OpType_CropAndResize__(); -extern void ___MetalDeconvolutionCreator__OpType_Deconvolution__(); -extern void ___MetalDeconvolutionCreator__OpType_DeconvolutionDepthwise__(); -extern void ___MetalDequantizeCreator__OpType_Dequantize__(); -extern void ___MetalEltwiseCreator__OpType_Eltwise__(); -extern void ___MetalFillCreator__OpType_Fill__(); -extern void ___MetalGatherCreator__OpType_Gather__(); -extern void ___MetalGatherV2Creator__OpType_GatherV2__(); -extern void ___MetalInterpCreator__OpType_Interp__(); -extern void ___MetalLRNCreator__OpType_LRN__(); -extern void ___MetalLSTMCreator__OpType_LSTM__(); -extern void ___MetalMatMulCreator__OpType_MatMul__(); -extern void ___MetalNormalizeCreator__OpType_Normalize__(); -extern void ___MetalPReLUCreator__OpType_PReLU__(); -extern void ___MetalPackCreator__OpType_Pack__(); -extern void ___MetalPermuteCreator__OpType_Permute__(); -extern void ___MetalPoolingCreator__OpType_Pooling__(); -extern void ___MetalQuantizedAddCreator__OpType_QuantizedAdd__(); -extern void ___MetalQuantizedAvgPoolCreator__OpType_QuantizedAvgPool__(); -extern void ___MetalQuantizedMaxPoolCreator__OpType_QuantizedMaxPool__(); -extern void ___MetalQuantizedReshapeCreator__OpType_QuantizedReshape__(); -extern void ___MetalQuantizedSoftmaxCreator__OpType_QuantizedSoftmax__(); -extern void ___MetalROIPoolingCreator__OpType_ROIPooling__(); -extern void ___MetalRangeCreator__OpType_Range__(); -extern void ___MetalRankCreator__OpType_Rank__(); -extern void ___MetalReLUCreator__OpType_ReLU__(); -extern void ___MetalReLU6Creator__OpType_ReLU6__(); -extern void ___MetalReductionCreator__OpType_Reduction__(); -extern void ___MetalReshapeCreator__OpType_Reshape__(); -extern void ___MetalResizeCreator__OpType_Resize__(); -extern void ___MetalScaleCreator__OpType_Scale__(); -extern void ___MetalSeLUCreator__OpType_Selu__(); -extern void ___MetalSigmoidCreator__OpType_Sigmoid__(); -extern void ___MetalSizeCreator__OpType_Size__(); -extern void ___MetalSliceCreator__OpType_Slice__(); -extern void ___MetalSliceTFCreator__OpType_SliceTf__(); -extern void ___MetalSoftmaxCreator__OpType_Softmax__(); -extern void ___MetalSpaceToBatchNDCreator__OpType_SpaceToBatchND__(); -extern void ___MetalSpatialProductCreator__OpType_SpatialProduct__(); -extern void ___MetalSqueezeCreator__OpType_Squeeze__(); -extern void ___MetalStridedSliceCreator__OpType_StridedSlice__(); -extern void ___MetalTFQuantizedConv2DCreator__OpType_TfQuantizedConv2D__(); -extern void ___MetalTFQuantizedConv2DCreator__OpType_QuantizedDepthwiseConv2D__(); -extern void ___MetalTanHCreator__OpType_TanH__(); -extern void ___MetalTensorConverterCreator__OpType_ConvertTensor__(); -extern void ___MetalTileCreator__OpType_Tile__(); -extern void ___MetalTransposeCreator__OpType_Transpose__(); -extern void ___MetalUnaryCreator__OpType_UnaryOp__(); - + extern void ___MetalSpatialProductCreator__OpType_SpatialProduct__(); + extern void ___MetalFillCreator__OpType_Fill__(); + extern void ___MetalTanHCreator__OpType_TanH__(); + extern void ___MetalSoftmaxCreator__OpType_Softmax__(); + extern void ___MetalGatherV2Creator__OpType_GatherV2__(); + extern void ___MetalRangeCreator__OpType_Range__(); + extern void ___MetalQuantizedSoftmaxCreator__OpType_QuantizedSoftmax__(); + extern void ___MetalCastCreator__OpType_Cast__(); + extern void ___MetalSqueezeCreator__OpType_Squeeze__(); + extern void ___MetalLRNCreator__OpType_LRN__(); + extern void ___MetalNormalizeCreator__OpType_Normalize__(); + extern void ___MetalSigmoidCreator__OpType_Sigmoid__(); + extern void ___MetalPackCreator__OpType_Pack__(); + extern void ___MetalTileCreator__OpType_Tile__(); + extern void ___MetalSizeCreator__OpType_Size__(); + extern void ___MetalReductionCreator__OpType_Reduction__(); + extern void ___MetalSliceCreator__OpType_Slice__(); + extern void ___MetalSliceTFCreator__OpType_SliceTf__(); + extern void ___MetalEltwiseCreator__OpType_Eltwise__(); + extern void ___MetalQuantizedMaxPoolCreator__OpType_QuantizedMaxPool__(); + extern void ___MetalConvolutionCreator__OpType_Convolution__(); + extern void ___MetalCropAndResizeCreator__OpType_CropAndResize__(); + extern void ___MetalConcatCreator__OpType_Concat__(); + extern void ___MetalSpaceToBatchNDCreator__OpType_SpaceToBatchND__(); + extern void ___MetalMatMulCreator__OpType_MatMul__(); + extern void ___MetalBinaryCreator__OpType_BinaryOp__(); + extern void ___MetalBatchToSpaceNDCreator__OpType_BatchToSpaceND__(); + extern void ___MetalGatherCreator__OpType_Gather__(); + extern void ___MetalConvolutionDepthwiseCreator__OpType_ConvolutionDepthwise__(); + extern void ___MetalQuantizedAvgPoolCreator__OpType_QuantizedAvgPool__(); + extern void ___MetalStridedSliceCreator__OpType_StridedSlice__(); + extern void ___MetalTFQuantizedConv2DCreator__OpType_TfQuantizedConv2D__(); + extern void ___MetalDeconvolutionCreator__OpType_Deconvolution__(); + extern void ___MetalReLUCreator__OpType_ReLU__(); + extern void ___MetalPoolingCreator__OpType_Pooling__(); + extern void ___MetalQuantizedReshapeCreator__OpType_QuantizedReshape__(); + extern void ___MetalScaleCreator__OpType_Scale__(); + extern void ___MetalDequantizeCreator__OpType_Dequantize__(); + extern void ___MetalReshapeCreator__OpType_Reshape__(); + extern void ___MetalQuantizedAddCreator__OpType_QuantizedAdd__(); + extern void ___MetalInterpCreator__OpType_Interp__(); + extern void ___MetalUnaryCreator__OpType_UnaryOp__(); + extern void ___MetalCropCreator__OpType_Crop__(); + extern void ___MetalROIPoolingCreator__OpType_ROIPooling__(); + extern void ___MetalSeLUCreator__OpType_Selu__(); + extern void ___MetalTensorConverterCreator__OpType_ConvertTensor__(); + extern void ___MetalRankCreator__OpType_Rank__(); + extern void ___MetalTransposeCreator__OpType_Transpose__(); + extern void ___MetalPermuteCreator__OpType_Permute__(); + extern void ___MetalPReLUCreator__OpType_PReLU__(); + extern void ___MetalLSTMCreator__OpType_LSTM__(); + extern void ___MetalResizeCreator__OpType_Resize__(); + extern void ___MetalReLU6Creator__OpType_ReLU6__(); void registerMetalOps() { -___MetalBatchToSpaceNDCreator__OpType_BatchToSpaceND__(); -___MetalBinaryCreator__OpType_BinaryOp__(); -___MetalCastCreator__OpType_Cast__(); -___MetalConcatCreator__OpType_Concat__(); -___MetalConvolutionCreator__OpType_Convolution__(); -___MetalConvolutionDepthwiseCreator__OpType_ConvolutionDepthwise__(); -___MetalCropCreator__OpType_Crop__(); -___MetalCropAndResizeCreator__OpType_CropAndResize__(); -___MetalDeconvolutionCreator__OpType_Deconvolution__(); -___MetalDeconvolutionCreator__OpType_DeconvolutionDepthwise__(); -___MetalDequantizeCreator__OpType_Dequantize__(); -___MetalEltwiseCreator__OpType_Eltwise__(); -___MetalFillCreator__OpType_Fill__(); -___MetalGatherCreator__OpType_Gather__(); -___MetalGatherV2Creator__OpType_GatherV2__(); -___MetalInterpCreator__OpType_Interp__(); -___MetalLRNCreator__OpType_LRN__(); -___MetalLSTMCreator__OpType_LSTM__(); -___MetalMatMulCreator__OpType_MatMul__(); -___MetalNormalizeCreator__OpType_Normalize__(); -___MetalPReLUCreator__OpType_PReLU__(); -___MetalPackCreator__OpType_Pack__(); -___MetalPermuteCreator__OpType_Permute__(); -___MetalPoolingCreator__OpType_Pooling__(); -___MetalQuantizedAddCreator__OpType_QuantizedAdd__(); -___MetalQuantizedAvgPoolCreator__OpType_QuantizedAvgPool__(); -___MetalQuantizedMaxPoolCreator__OpType_QuantizedMaxPool__(); -___MetalQuantizedReshapeCreator__OpType_QuantizedReshape__(); -___MetalQuantizedSoftmaxCreator__OpType_QuantizedSoftmax__(); -___MetalROIPoolingCreator__OpType_ROIPooling__(); -___MetalRangeCreator__OpType_Range__(); -___MetalRankCreator__OpType_Rank__(); -___MetalReLUCreator__OpType_ReLU__(); -___MetalReLU6Creator__OpType_ReLU6__(); -___MetalReductionCreator__OpType_Reduction__(); -___MetalReshapeCreator__OpType_Reshape__(); -___MetalResizeCreator__OpType_Resize__(); -___MetalScaleCreator__OpType_Scale__(); -___MetalSeLUCreator__OpType_Selu__(); -___MetalSigmoidCreator__OpType_Sigmoid__(); -___MetalSizeCreator__OpType_Size__(); -___MetalSliceCreator__OpType_Slice__(); -___MetalSliceTFCreator__OpType_SliceTf__(); -___MetalSoftmaxCreator__OpType_Softmax__(); -___MetalSpaceToBatchNDCreator__OpType_SpaceToBatchND__(); -___MetalSpatialProductCreator__OpType_SpatialProduct__(); -___MetalSqueezeCreator__OpType_Squeeze__(); -___MetalStridedSliceCreator__OpType_StridedSlice__(); -___MetalTFQuantizedConv2DCreator__OpType_TfQuantizedConv2D__(); -___MetalTFQuantizedConv2DCreator__OpType_QuantizedDepthwiseConv2D__(); -___MetalTanHCreator__OpType_TanH__(); -___MetalTensorConverterCreator__OpType_ConvertTensor__(); -___MetalTileCreator__OpType_Tile__(); -___MetalTransposeCreator__OpType_Transpose__(); -___MetalUnaryCreator__OpType_UnaryOp__(); + ___MetalSpatialProductCreator__OpType_SpatialProduct__(); + ___MetalFillCreator__OpType_Fill__(); + ___MetalTanHCreator__OpType_TanH__(); + ___MetalSoftmaxCreator__OpType_Softmax__(); + ___MetalGatherV2Creator__OpType_GatherV2__(); + ___MetalRangeCreator__OpType_Range__(); + ___MetalQuantizedSoftmaxCreator__OpType_QuantizedSoftmax__(); + ___MetalCastCreator__OpType_Cast__(); + ___MetalSqueezeCreator__OpType_Squeeze__(); + ___MetalLRNCreator__OpType_LRN__(); + ___MetalNormalizeCreator__OpType_Normalize__(); + ___MetalSigmoidCreator__OpType_Sigmoid__(); + ___MetalPackCreator__OpType_Pack__(); + ___MetalTileCreator__OpType_Tile__(); + ___MetalSizeCreator__OpType_Size__(); + ___MetalReductionCreator__OpType_Reduction__(); + ___MetalSliceCreator__OpType_Slice__(); + ___MetalSliceTFCreator__OpType_SliceTf__(); + ___MetalEltwiseCreator__OpType_Eltwise__(); + ___MetalQuantizedMaxPoolCreator__OpType_QuantizedMaxPool__(); + ___MetalConvolutionCreator__OpType_Convolution__(); + ___MetalCropAndResizeCreator__OpType_CropAndResize__(); + ___MetalConcatCreator__OpType_Concat__(); + ___MetalSpaceToBatchNDCreator__OpType_SpaceToBatchND__(); + ___MetalMatMulCreator__OpType_MatMul__(); + ___MetalBinaryCreator__OpType_BinaryOp__(); + ___MetalBatchToSpaceNDCreator__OpType_BatchToSpaceND__(); + ___MetalGatherCreator__OpType_Gather__(); + ___MetalConvolutionDepthwiseCreator__OpType_ConvolutionDepthwise__(); + ___MetalQuantizedAvgPoolCreator__OpType_QuantizedAvgPool__(); + ___MetalStridedSliceCreator__OpType_StridedSlice__(); + ___MetalTFQuantizedConv2DCreator__OpType_TfQuantizedConv2D__(); + ___MetalDeconvolutionCreator__OpType_Deconvolution__(); + ___MetalReLUCreator__OpType_ReLU__(); + ___MetalPoolingCreator__OpType_Pooling__(); + ___MetalQuantizedReshapeCreator__OpType_QuantizedReshape__(); + ___MetalScaleCreator__OpType_Scale__(); + ___MetalDequantizeCreator__OpType_Dequantize__(); + ___MetalReshapeCreator__OpType_Reshape__(); + ___MetalQuantizedAddCreator__OpType_QuantizedAdd__(); + ___MetalInterpCreator__OpType_Interp__(); + ___MetalUnaryCreator__OpType_UnaryOp__(); + ___MetalCropCreator__OpType_Crop__(); + ___MetalROIPoolingCreator__OpType_ROIPooling__(); + ___MetalSeLUCreator__OpType_Selu__(); + ___MetalTensorConverterCreator__OpType_ConvertTensor__(); + ___MetalRankCreator__OpType_Rank__(); + ___MetalTransposeCreator__OpType_Transpose__(); + ___MetalPermuteCreator__OpType_Permute__(); + ___MetalPReLUCreator__OpType_PReLU__(); + ___MetalLSTMCreator__OpType_LSTM__(); + ___MetalResizeCreator__OpType_Resize__(); + ___MetalReLU6Creator__OpType_ReLU6__(); } #endif -} +} \ No newline at end of file diff --git a/source/backend/metal/MetalPReLU.hpp b/source/backend/metal/MetalPReLU.hpp index cd1850d7b..38682d208 100644 --- a/source/backend/metal/MetalPReLU.hpp +++ b/source/backend/metal/MetalPReLU.hpp @@ -9,7 +9,7 @@ #ifndef MetalPReLU_hpp #define MetalPReLU_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MetalDefine.h" #if MNN_METAL_ENABLED diff --git a/source/backend/metal/MetalPReLU.mm b/source/backend/metal/MetalPReLU.mm index 3b0e73fe2..3f4d42276 100755 --- a/source/backend/metal/MetalPReLU.mm +++ b/source/backend/metal/MetalPReLU.mm @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalPReLU.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalPReLU.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalPack.hpp b/source/backend/metal/MetalPack.hpp index c4cbea97f..9182b74e1 100644 --- a/source/backend/metal/MetalPack.hpp +++ b/source/backend/metal/MetalPack.hpp @@ -9,7 +9,7 @@ #ifndef MetalPack_hpp #define MetalPack_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MNN_generated.h" #import "MetalDefine.h" diff --git a/source/backend/metal/MetalPack.mm b/source/backend/metal/MetalPack.mm index 435c60e40..55205c60a 100755 --- a/source/backend/metal/MetalPack.mm +++ b/source/backend/metal/MetalPack.mm @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalPack.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "MetalBackend.hpp" -#import "TensorUtils.hpp" +#import "backend/metal/MetalPack.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" +#import "core/TensorUtils.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalPermute.hpp b/source/backend/metal/MetalPermute.hpp index ba1f1ef37..0f422a0fa 100644 --- a/source/backend/metal/MetalPermute.hpp +++ b/source/backend/metal/MetalPermute.hpp @@ -9,7 +9,7 @@ #ifndef MetalPermute_hpp #define MetalPermute_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MNN_generated.h" #import "MetalDefine.h" diff --git a/source/backend/metal/MetalPermute.mm b/source/backend/metal/MetalPermute.mm index a8e482c0c..8d98447fd 100755 --- a/source/backend/metal/MetalPermute.mm +++ b/source/backend/metal/MetalPermute.mm @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalPermute.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalPermute.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalPooling.hpp b/source/backend/metal/MetalPooling.hpp index 787d18e50..cc1697f65 100644 --- a/source/backend/metal/MetalPooling.hpp +++ b/source/backend/metal/MetalPooling.hpp @@ -9,7 +9,7 @@ #ifndef MetalPooling_hpp #define MetalPooling_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MNN_generated.h" #import "MetalDefine.h" diff --git a/source/backend/metal/MetalPooling.mm b/source/backend/metal/MetalPooling.mm index 297e5b8b7..47cba0f99 100755 --- a/source/backend/metal/MetalPooling.mm +++ b/source/backend/metal/MetalPooling.mm @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalPooling.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalPooling.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalQuantizedAdd.hpp b/source/backend/metal/MetalQuantizedAdd.hpp index 40dced460..b87971762 100644 --- a/source/backend/metal/MetalQuantizedAdd.hpp +++ b/source/backend/metal/MetalQuantizedAdd.hpp @@ -9,7 +9,7 @@ #ifndef MetalQuantizedAdd_hpp #define MetalQuantizedAdd_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MNN_generated.h" #import "MetalDefine.h" diff --git a/source/backend/metal/MetalQuantizedAdd.mm b/source/backend/metal/MetalQuantizedAdd.mm index 5cda51e94..687b5deab 100755 --- a/source/backend/metal/MetalQuantizedAdd.mm +++ b/source/backend/metal/MetalQuantizedAdd.mm @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalQuantizedAdd.hpp" -#import "CPUQuantizationUtils.hpp" -#import "MNNMetalContext.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalQuantizedAdd.hpp" +#import "backend/cpu/CPUQuantizationUtils.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalQuantizedAvgPool.hpp b/source/backend/metal/MetalQuantizedAvgPool.hpp index 6bcdc30a4..99691cb9f 100644 --- a/source/backend/metal/MetalQuantizedAvgPool.hpp +++ b/source/backend/metal/MetalQuantizedAvgPool.hpp @@ -9,7 +9,7 @@ #ifndef MetalQuantizedAvgPool_hpp #define MetalQuantizedAvgPool_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MNN_generated.h" #import "MetalDefine.h" diff --git a/source/backend/metal/MetalQuantizedAvgPool.mm b/source/backend/metal/MetalQuantizedAvgPool.mm index bfb8a7f3c..16cec25f3 100755 --- a/source/backend/metal/MetalQuantizedAvgPool.mm +++ b/source/backend/metal/MetalQuantizedAvgPool.mm @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalQuantizedAvgPool.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalQuantizedAvgPool.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalQuantizedMaxPool.hpp b/source/backend/metal/MetalQuantizedMaxPool.hpp index 825319e75..00c3a6b45 100644 --- a/source/backend/metal/MetalQuantizedMaxPool.hpp +++ b/source/backend/metal/MetalQuantizedMaxPool.hpp @@ -9,7 +9,7 @@ #ifndef MetalQuantizedMaxPool_hpp #define MetalQuantizedMaxPool_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MNN_generated.h" #import "MetalDefine.h" diff --git a/source/backend/metal/MetalQuantizedMaxPool.mm b/source/backend/metal/MetalQuantizedMaxPool.mm index f8107f768..a71018017 100755 --- a/source/backend/metal/MetalQuantizedMaxPool.mm +++ b/source/backend/metal/MetalQuantizedMaxPool.mm @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalQuantizedMaxPool.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalQuantizedMaxPool.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalQuantizedReshape.hpp b/source/backend/metal/MetalQuantizedReshape.hpp index 74e314915..41c255315 100644 --- a/source/backend/metal/MetalQuantizedReshape.hpp +++ b/source/backend/metal/MetalQuantizedReshape.hpp @@ -9,7 +9,7 @@ #ifndef MetalQuantizedReshape_hpp #define MetalQuantizedReshape_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MetalDefine.h" #if MNN_METAL_ENABLED diff --git a/source/backend/metal/MetalQuantizedReshape.mm b/source/backend/metal/MetalQuantizedReshape.mm index 0d893b898..0c5fff225 100755 --- a/source/backend/metal/MetalQuantizedReshape.mm +++ b/source/backend/metal/MetalQuantizedReshape.mm @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalQuantizedReshape.hpp" -#import "MNNMetalContext.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalQuantizedReshape.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalQuantizedSoftmax.hpp b/source/backend/metal/MetalQuantizedSoftmax.hpp index 2a51cb58e..f49a35627 100644 --- a/source/backend/metal/MetalQuantizedSoftmax.hpp +++ b/source/backend/metal/MetalQuantizedSoftmax.hpp @@ -9,7 +9,7 @@ #ifndef MetalQuantizedSoftmax_hpp #define MetalQuantizedSoftmax_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MetalDefine.h" #if MNN_METAL_ENABLED diff --git a/source/backend/metal/MetalQuantizedSoftmax.mm b/source/backend/metal/MetalQuantizedSoftmax.mm index 89c29da25..3f9148c24 100755 --- a/source/backend/metal/MetalQuantizedSoftmax.mm +++ b/source/backend/metal/MetalQuantizedSoftmax.mm @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalQuantizedSoftmax.hpp" -#import "CPUQuantizationUtils.hpp" -#import "MNNMetalContext.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalQuantizedSoftmax.hpp" +#import "backend/cpu/CPUQuantizationUtils.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalROIPooling.hpp b/source/backend/metal/MetalROIPooling.hpp index 25ba5a450..674bbd36b 100644 --- a/source/backend/metal/MetalROIPooling.hpp +++ b/source/backend/metal/MetalROIPooling.hpp @@ -9,7 +9,7 @@ #ifndef MetalROIPooling_hpp #define MetalROIPooling_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MetalDefine.h" #if MNN_METAL_ENABLED diff --git a/source/backend/metal/MetalROIPooling.mm b/source/backend/metal/MetalROIPooling.mm index 65c90ce07..5f6cfbb1d 100755 --- a/source/backend/metal/MetalROIPooling.mm +++ b/source/backend/metal/MetalROIPooling.mm @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalROIPooling.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalROIPooling.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalRange.hpp b/source/backend/metal/MetalRange.hpp index 17bfca65c..6ea99d3c3 100644 --- a/source/backend/metal/MetalRange.hpp +++ b/source/backend/metal/MetalRange.hpp @@ -9,7 +9,7 @@ #ifndef MetalRange_hpp #define MetalRange_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MetalDefine.h" #import "Type_generated.h" diff --git a/source/backend/metal/MetalRange.mm b/source/backend/metal/MetalRange.mm index 79306d8d3..5742a2b8a 100755 --- a/source/backend/metal/MetalRange.mm +++ b/source/backend/metal/MetalRange.mm @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalRange.hpp" -#import "MNNMetalContext.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalRange.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalRank.hpp b/source/backend/metal/MetalRank.hpp index f0f4ac209..9a5b9d916 100644 --- a/source/backend/metal/MetalRank.hpp +++ b/source/backend/metal/MetalRank.hpp @@ -9,7 +9,7 @@ #ifndef MetalRank_hpp #define MetalRank_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MetalDefine.h" #if MNN_METAL_ENABLED diff --git a/source/backend/metal/MetalRank.mm b/source/backend/metal/MetalRank.mm index 85814bc61..f27021bfa 100755 --- a/source/backend/metal/MetalRank.mm +++ b/source/backend/metal/MetalRank.mm @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalRank.hpp" -#import "MNNMetalContext.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalRank.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalReLU.hpp b/source/backend/metal/MetalReLU.hpp index 6d65118ee..ddeb77231 100644 --- a/source/backend/metal/MetalReLU.hpp +++ b/source/backend/metal/MetalReLU.hpp @@ -9,7 +9,7 @@ #ifndef MetalReLU_hpp #define MetalReLU_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MetalDefine.h" #if MNN_METAL_ENABLED diff --git a/source/backend/metal/MetalReLU.mm b/source/backend/metal/MetalReLU.mm index f7b925ba7..2024b708c 100755 --- a/source/backend/metal/MetalReLU.mm +++ b/source/backend/metal/MetalReLU.mm @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalReLU.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalReLU.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalReLU6.hpp b/source/backend/metal/MetalReLU6.hpp index f276545d0..ce0c80f8b 100644 --- a/source/backend/metal/MetalReLU6.hpp +++ b/source/backend/metal/MetalReLU6.hpp @@ -9,7 +9,7 @@ #ifndef MetalReLU6_hpp #define MetalReLU6_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MetalDefine.h" #if MNN_METAL_ENABLED diff --git a/source/backend/metal/MetalReLU6.mm b/source/backend/metal/MetalReLU6.mm index 77814f64b..50325f909 100755 --- a/source/backend/metal/MetalReLU6.mm +++ b/source/backend/metal/MetalReLU6.mm @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalReLU6.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalReLU6.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalReduction.hpp b/source/backend/metal/MetalReduction.hpp index 4c64972b4..6bfdf82bf 100644 --- a/source/backend/metal/MetalReduction.hpp +++ b/source/backend/metal/MetalReduction.hpp @@ -9,7 +9,7 @@ #ifndef MetalReduction_hpp #define MetalReduction_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MNN_generated.h" #import "MetalDefine.h" diff --git a/source/backend/metal/MetalReduction.mm b/source/backend/metal/MetalReduction.mm index deb57d340..a17cfd4f7 100755 --- a/source/backend/metal/MetalReduction.mm +++ b/source/backend/metal/MetalReduction.mm @@ -6,12 +6,12 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalReduction.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "Macro.h" -#import "MetalBackend.hpp" -#import "TensorUtils.hpp" +#import "backend/metal/MetalReduction.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" +#import "core/TensorUtils.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalReshape.hpp b/source/backend/metal/MetalReshape.hpp index 7fe89f594..4e60c3e80 100644 --- a/source/backend/metal/MetalReshape.hpp +++ b/source/backend/metal/MetalReshape.hpp @@ -10,7 +10,7 @@ #define MetalReshape_hpp #include -#include "Execution.hpp" +#include "core/Execution.hpp" #include "MetalDefine.h" #include "Tensor_generated.h" diff --git a/source/backend/metal/MetalReshape.mm b/source/backend/metal/MetalReshape.mm index 132b31954..a0a80c867 100755 --- a/source/backend/metal/MetalReshape.mm +++ b/source/backend/metal/MetalReshape.mm @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalReshape.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "MetalBackend.hpp" -#import "TensorUtils.hpp" +#import "backend/metal/MetalReshape.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" +#import "core/TensorUtils.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalResize.hpp b/source/backend/metal/MetalResize.hpp index 2f00c05f3..2ce846916 100644 --- a/source/backend/metal/MetalResize.hpp +++ b/source/backend/metal/MetalResize.hpp @@ -9,7 +9,7 @@ #ifndef MetalResize_hpp #define MetalResize_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MetalDefine.h" #if MNN_METAL_ENABLED diff --git a/source/backend/metal/MetalResize.mm b/source/backend/metal/MetalResize.mm index c0977af9b..372fa4cc4 100755 --- a/source/backend/metal/MetalResize.mm +++ b/source/backend/metal/MetalResize.mm @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalResize.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalResize.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalScale.hpp b/source/backend/metal/MetalScale.hpp index 554298075..4d58b22fa 100644 --- a/source/backend/metal/MetalScale.hpp +++ b/source/backend/metal/MetalScale.hpp @@ -9,7 +9,7 @@ #ifndef MetalScale_hpp #define MetalScale_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MNN_generated.h" #import "MetalDefine.h" diff --git a/source/backend/metal/MetalScale.mm b/source/backend/metal/MetalScale.mm index 0e67e4f72..5b5b04d37 100755 --- a/source/backend/metal/MetalScale.mm +++ b/source/backend/metal/MetalScale.mm @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalScale.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalScale.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { @@ -38,7 +38,7 @@ auto shape = [context newDeviceBuffer:4 * sizeof(int) access:CPUWriteOnly]; ((int *)shape.contents)[0] = w * h; ((int *)shape.contents)[2] = output->batch(); - + auto encoder = [context encoder]; [encoder setBuffer:(__bridge id)(void *)input->deviceId() offset:0 atIndex:0]; [encoder setBuffer:(__bridge id)(void *)output->deviceId() offset:0 atIndex:1]; diff --git a/source/backend/metal/MetalSeLU.hpp b/source/backend/metal/MetalSeLU.hpp index 0ce8cc87b..08bd83362 100644 --- a/source/backend/metal/MetalSeLU.hpp +++ b/source/backend/metal/MetalSeLU.hpp @@ -9,7 +9,7 @@ #ifndef MetalSeLU_hpp #define MetalSeLU_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MetalDefine.h" #if MNN_METAL_ENABLED diff --git a/source/backend/metal/MetalSeLU.mm b/source/backend/metal/MetalSeLU.mm index 7b9aa6efc..886687e8b 100755 --- a/source/backend/metal/MetalSeLU.mm +++ b/source/backend/metal/MetalSeLU.mm @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalSeLU.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalSeLU.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalSigmoid.hpp b/source/backend/metal/MetalSigmoid.hpp index 0a9737a1a..edbfadea7 100644 --- a/source/backend/metal/MetalSigmoid.hpp +++ b/source/backend/metal/MetalSigmoid.hpp @@ -9,7 +9,7 @@ #ifndef MetalSigmoid_hpp #define MetalSigmoid_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MetalDefine.h" #if MNN_METAL_ENABLED diff --git a/source/backend/metal/MetalSigmoid.mm b/source/backend/metal/MetalSigmoid.mm index 4a3ad08fa..37c39ebf4 100755 --- a/source/backend/metal/MetalSigmoid.mm +++ b/source/backend/metal/MetalSigmoid.mm @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalSigmoid.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalSigmoid.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalSize.hpp b/source/backend/metal/MetalSize.hpp index 91d348ef5..7419b60ef 100644 --- a/source/backend/metal/MetalSize.hpp +++ b/source/backend/metal/MetalSize.hpp @@ -9,7 +9,7 @@ #ifndef MetalSize_hpp #define MetalSize_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MetalDefine.h" #if MNN_METAL_ENABLED diff --git a/source/backend/metal/MetalSize.mm b/source/backend/metal/MetalSize.mm index 9b8cda710..55af5ef8c 100755 --- a/source/backend/metal/MetalSize.mm +++ b/source/backend/metal/MetalSize.mm @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalSize.hpp" -#import "MNNMetalContext.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalSize.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalSlice.hpp b/source/backend/metal/MetalSlice.hpp index 2d5e5da36..e11737a8c 100644 --- a/source/backend/metal/MetalSlice.hpp +++ b/source/backend/metal/MetalSlice.hpp @@ -9,7 +9,7 @@ #ifndef MetalSlice_hpp #define MetalSlice_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MetalDefine.h" #if MNN_METAL_ENABLED diff --git a/source/backend/metal/MetalSlice.mm b/source/backend/metal/MetalSlice.mm index e73254c16..873a9d53e 100755 --- a/source/backend/metal/MetalSlice.mm +++ b/source/backend/metal/MetalSlice.mm @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalSlice.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalSlice.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalSliceTF.hpp b/source/backend/metal/MetalSliceTF.hpp index cf5c52e20..f1c6ec050 100644 --- a/source/backend/metal/MetalSliceTF.hpp +++ b/source/backend/metal/MetalSliceTF.hpp @@ -9,7 +9,7 @@ #ifndef MetalSliceTF_hpp #define MetalSliceTF_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MNN_generated.h" #import "MetalDefine.h" diff --git a/source/backend/metal/MetalSliceTF.mm b/source/backend/metal/MetalSliceTF.mm index 850d2a6fe..250a13a66 100755 --- a/source/backend/metal/MetalSliceTF.mm +++ b/source/backend/metal/MetalSliceTF.mm @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalSliceTF.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalSliceTF.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalSoftmax.hpp b/source/backend/metal/MetalSoftmax.hpp index aa871d92d..1df211175 100644 --- a/source/backend/metal/MetalSoftmax.hpp +++ b/source/backend/metal/MetalSoftmax.hpp @@ -9,7 +9,7 @@ #ifndef MetalSoftmax_hpp #define MetalSoftmax_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MNN_generated.h" #import "MetalDefine.h" diff --git a/source/backend/metal/MetalSoftmax.mm b/source/backend/metal/MetalSoftmax.mm index 6560e3e30..7cc321c66 100644 --- a/source/backend/metal/MetalSoftmax.mm +++ b/source/backend/metal/MetalSoftmax.mm @@ -6,12 +6,12 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MNNMetalContext.h" +#import "backend/metal/MNNMetalContext.h" #if MNN_METAL_ENABLED -#import "MetalSoftmax.hpp" -#import "Macro.h" -#import "MetalBackend.hpp" -#import "TensorUtils.hpp" +#import "backend/metal/MetalSoftmax.hpp" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" +#import "core/TensorUtils.hpp" namespace MNN { MetalSoftmax::MetalSoftmax(Backend *backend, int32_t axis) : Execution(backend), mAxis(axis) { diff --git a/source/backend/metal/MetalSpaceToBatchND.hpp b/source/backend/metal/MetalSpaceToBatchND.hpp index 671b7e2bf..18e351d1f 100644 --- a/source/backend/metal/MetalSpaceToBatchND.hpp +++ b/source/backend/metal/MetalSpaceToBatchND.hpp @@ -9,7 +9,7 @@ #ifndef MetalSpaceToBatchND_hpp #define MetalSpaceToBatchND_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MetalDefine.h" #if MNN_METAL_ENABLED diff --git a/source/backend/metal/MetalSpaceToBatchND.mm b/source/backend/metal/MetalSpaceToBatchND.mm index bddf564dd..ea0660d56 100755 --- a/source/backend/metal/MetalSpaceToBatchND.mm +++ b/source/backend/metal/MetalSpaceToBatchND.mm @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalSpaceToBatchND.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalSpaceToBatchND.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalSpatialProduct.hpp b/source/backend/metal/MetalSpatialProduct.hpp index 214e296d4..c357b4293 100644 --- a/source/backend/metal/MetalSpatialProduct.hpp +++ b/source/backend/metal/MetalSpatialProduct.hpp @@ -9,7 +9,7 @@ #ifndef MetalSpatialProduct_hpp #define MetalSpatialProduct_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MetalDefine.h" #if MNN_METAL_ENABLED diff --git a/source/backend/metal/MetalSpatialProduct.mm b/source/backend/metal/MetalSpatialProduct.mm index 110afd785..8cf387c12 100755 --- a/source/backend/metal/MetalSpatialProduct.mm +++ b/source/backend/metal/MetalSpatialProduct.mm @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalSpatialProduct.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalSpatialProduct.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { @@ -27,7 +27,7 @@ auto shape = [context newDeviceBuffer:2 * sizeof(int) access:CPUWriteOnly]; ((int *)shape.contents)[0] = w * h; ((int *)shape.contents)[1] = z * b; - + auto encoder = [context encoder]; auto bandwidth = [context load:@"spartial_product" encoder:encoder]; [encoder setBuffer:(__bridge id)(void *)input->deviceId() offset:0 atIndex:0]; diff --git a/source/backend/metal/MetalSqueeze.hpp b/source/backend/metal/MetalSqueeze.hpp index 6b824e94c..9e1181758 100644 --- a/source/backend/metal/MetalSqueeze.hpp +++ b/source/backend/metal/MetalSqueeze.hpp @@ -9,7 +9,7 @@ #ifndef MetalSqueeze_hpp #define MetalSqueeze_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MNN_generated.h" #import "MetalDefine.h" diff --git a/source/backend/metal/MetalSqueeze.mm b/source/backend/metal/MetalSqueeze.mm index 2c738cd29..86a12a0c0 100755 --- a/source/backend/metal/MetalSqueeze.mm +++ b/source/backend/metal/MetalSqueeze.mm @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalSqueeze.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalSqueeze.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalStridedSlice.hpp b/source/backend/metal/MetalStridedSlice.hpp index ad33e1d7a..460c1c21f 100644 --- a/source/backend/metal/MetalStridedSlice.hpp +++ b/source/backend/metal/MetalStridedSlice.hpp @@ -9,7 +9,7 @@ #ifndef MetalStridedSlice_hpp #define MetalStridedSlice_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MNN_generated.h" #import "MetalDefine.h" diff --git a/source/backend/metal/MetalStridedSlice.mm b/source/backend/metal/MetalStridedSlice.mm index b4d1a59ee..682da3227 100755 --- a/source/backend/metal/MetalStridedSlice.mm +++ b/source/backend/metal/MetalStridedSlice.mm @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalStridedSlice.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalStridedSlice.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalTFQuantizedConv2D.hpp b/source/backend/metal/MetalTFQuantizedConv2D.hpp index 78a267d79..5d5d07e50 100644 --- a/source/backend/metal/MetalTFQuantizedConv2D.hpp +++ b/source/backend/metal/MetalTFQuantizedConv2D.hpp @@ -9,7 +9,7 @@ #ifndef MetalTFQuantizedConv2D_hpp #define MetalTFQuantizedConv2D_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MNN_generated.h" #import "MetalDefine.h" diff --git a/source/backend/metal/MetalTFQuantizedConv2D.mm b/source/backend/metal/MetalTFQuantizedConv2D.mm index 26340e5e3..00b4ced4e 100755 --- a/source/backend/metal/MetalTFQuantizedConv2D.mm +++ b/source/backend/metal/MetalTFQuantizedConv2D.mm @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalTFQuantizedConv2D.hpp" -#import "CPUQuantizationUtils.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalTFQuantizedConv2D.hpp" +#import "backend/cpu/CPUQuantizationUtils.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalTanH.hpp b/source/backend/metal/MetalTanH.hpp index d8bfbaa82..ea99c0a00 100644 --- a/source/backend/metal/MetalTanH.hpp +++ b/source/backend/metal/MetalTanH.hpp @@ -9,7 +9,7 @@ #ifndef MetalTanH_hpp #define MetalTanH_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MetalDefine.h" #if MNN_METAL_ENABLED diff --git a/source/backend/metal/MetalTanH.mm b/source/backend/metal/MetalTanH.mm index 87436a91a..a28ef8521 100755 --- a/source/backend/metal/MetalTanH.mm +++ b/source/backend/metal/MetalTanH.mm @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalTanH.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalTanH.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalTensorConverter.hpp b/source/backend/metal/MetalTensorConverter.hpp index f1b28bf43..80fff9888 100644 --- a/source/backend/metal/MetalTensorConverter.hpp +++ b/source/backend/metal/MetalTensorConverter.hpp @@ -9,7 +9,7 @@ #ifndef MetalTensorConverter_hpp #define MetalTensorConverter_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MetalDefine.h" #if MNN_METAL_ENABLED diff --git a/source/backend/metal/MetalTensorConverter.mm b/source/backend/metal/MetalTensorConverter.mm index 4a71025c7..f2b31099c 100755 --- a/source/backend/metal/MetalTensorConverter.mm +++ b/source/backend/metal/MetalTensorConverter.mm @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalTensorConverter.hpp" -#import "MNNMetalContext.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalTensorConverter.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalTile.hpp b/source/backend/metal/MetalTile.hpp index 549a1bf50..ef92c2935 100644 --- a/source/backend/metal/MetalTile.hpp +++ b/source/backend/metal/MetalTile.hpp @@ -9,7 +9,7 @@ #ifndef MetalTile_hpp #define MetalTile_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MNN_generated.h" #import "MetalDefine.h" diff --git a/source/backend/metal/MetalTile.mm b/source/backend/metal/MetalTile.mm index 0a378f043..3bb0f0607 100755 --- a/source/backend/metal/MetalTile.mm +++ b/source/backend/metal/MetalTile.mm @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalTile.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalTile.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalTranspose.hpp b/source/backend/metal/MetalTranspose.hpp index e8a45922f..a5759a439 100644 --- a/source/backend/metal/MetalTranspose.hpp +++ b/source/backend/metal/MetalTranspose.hpp @@ -9,7 +9,7 @@ #ifndef MetalTranspose_hpp #define MetalTranspose_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MetalDefine.h" #import "Type_generated.h" diff --git a/source/backend/metal/MetalTranspose.mm b/source/backend/metal/MetalTranspose.mm index 2eb0231a2..7f267d31a 100755 --- a/source/backend/metal/MetalTranspose.mm +++ b/source/backend/metal/MetalTranspose.mm @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalTranspose.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalTranspose.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/metal/MetalUnary.hpp b/source/backend/metal/MetalUnary.hpp index b481033ee..6feeda6ce 100644 --- a/source/backend/metal/MetalUnary.hpp +++ b/source/backend/metal/MetalUnary.hpp @@ -9,7 +9,7 @@ #ifndef MetalUnary_hpp #define MetalUnary_hpp -#import "Execution.hpp" +#import "core/Execution.hpp" #import "MNN_generated.h" #import "MetalDefine.h" diff --git a/source/backend/metal/MetalUnary.mm b/source/backend/metal/MetalUnary.mm index 78fd76f2b..13b99fbe8 100755 --- a/source/backend/metal/MetalUnary.mm +++ b/source/backend/metal/MetalUnary.mm @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#import "MetalUnary.hpp" -#import "MNNMetalContext.h" -#import "Macro.h" -#import "MetalBackend.hpp" +#import "backend/metal/MetalUnary.hpp" +#import "backend/metal/MNNMetalContext.h" +#import "core/Macro.h" +#import "backend/metal/MetalBackend.hpp" #if MNN_METAL_ENABLED namespace MNN { diff --git a/source/backend/opencl/CMakeLists.txt b/source/backend/opencl/CMakeLists.txt index 48630b8dd..acd18b9a2 100644 --- a/source/backend/opencl/CMakeLists.txt +++ b/source/backend/opencl/CMakeLists.txt @@ -1,35 +1,24 @@ -if(SYSTEM.Android AND NOT MNN_BUILD_FOR_ANDROID_COMMAND) - set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${NATIVE_LIBRARY_OUTPUT}/${ANDROID_ABI}) -endif() - -file(GLOB_RECURSE SRCS *.cc *.hpp *.h *.cpp) - -if (MNN_BUILD_SHARED_LIBS AND (NOT MSVC) AND (NOT WIN32)) - add_library(MNN_CL SHARED ${SRCS}) - target_link_libraries(MNN_CL PRIVATE MNN) -else() - add_library(MNN_CL STATIC ${SRCS}) -endif() - -if((NOT MSVC) AND (NOT WIN32)) - set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fomit-frame-pointer -fstrict-aliasing -ffunction-sections -fdata-sections -ffast-math") - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility-inlines-hidden -fomit-frame-pointer -fstrict-aliasing -ffunction-sections -fdata-sections -ffast-math -fno-rtti -fno-exceptions") - target_compile_options(MNN_CL PRIVATE -Wno-deprecated-declarations -Wno-ignored-attributes) -else() - target_compile_options(MNN_CL PRIVATE "/wd4267" "/wd4018" "/wd4251" "/wd4996" "/wd4244") -endif() - -target_include_directories(MNN_CL PRIVATE - ${CMAKE_CURRENT_SOURCE_DIR} - ${CMAKE_CURRENT_SOURCE_DIR}/3rdParty - ${PROJECT_SOURCE_DIR}/3rd_party/half -) - -if (${CMAKE_SYSTEM_NAME} MATCHES "Android") - add_definitions(-DMNN_USE_OPENCL_WRAPPER) - #add_definitions(-DENABLE_OPENCL_TURNING_PROFILER) - #add_definitions(-DLOG_VERBOSE) -else() - find_package(OpenCL) - target_link_libraries(MNN_CL PRIVATE ${OpenCL_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) +if(MNN_OPENCL) + add_custom_command(OUTPUT "${CMAKE_CURRENT_LIST_DIR}/execution/cl/opencl_program.cc" + COMMAND ${PYTHON_EXECUTABLE} + "${CMAKE_CURRENT_LIST_DIR}/execution/cl/opencl_codegen.py" + "${CMAKE_CURRENT_LIST_DIR}/execution/cl/" + "${CMAKE_CURRENT_LIST_DIR}/execution/cl/opencl_program.cc" + COMMENT "OpenCL Code Generation" + ) + add_custom_target (MNNOpenCLCodeGen DEPENDS "${CMAKE_CURRENT_LIST_DIR}/execution/cl/opencl_program.cc") + file(GLOB_RECURSE MNN_OpenCL_SRC ${CMAKE_CURRENT_LIST_DIR}/*.cc ${CMAKE_CURRENT_LIST_DIR}/*.cpp) + add_library(MNNOpenCL OBJECT ${MNN_OpenCL_SRC} "${CMAKE_CURRENT_LIST_DIR}/execution/cl/opencl_program.cc") + add_dependencies(MNNOpenCL MNNOpenCLCodeGen) + target_include_directories(MNNOpenCL PRIVATE + ${CMAKE_SOURCE_DIR}/include/ + ${CMAKE_SOURCE_DIR}/3rd_party/half + ) + if (${CMAKE_SYSTEM_NAME} MATCHES "Android") + add_definitions(-DMNN_USE_OPENCL_WRAPPER) + endif() + list(APPEND MNN_OBJECTS_TO_LINK $) + list(APPEND MNN_TARGETS MNNOpenCL) + SET(MNN_OBJECTS_TO_LINK "${MNN_OBJECTS_TO_LINK}" PARENT_SCOPE) + SET(MNN_TARGETS "${MNN_TARGETS}" PARENT_SCOPE) endif() diff --git a/source/backend/opencl/core/BufferPool.cpp b/source/backend/opencl/core/BufferPool.cpp index cc5c39433..96a3ad3c1 100644 --- a/source/backend/opencl/core/BufferPool.cpp +++ b/source/backend/opencl/core/BufferPool.cpp @@ -6,7 +6,7 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "BufferPool.hpp" +#include "backend/opencl/core/BufferPool.hpp" namespace MNN { namespace OpenCL { cl::Buffer* BufferPool::alloc(int size, bool seperate) { diff --git a/source/backend/opencl/core/BufferPool.hpp b/source/backend/opencl/core/BufferPool.hpp index 1d5fe014a..f66f9e97b 100644 --- a/source/backend/opencl/core/BufferPool.hpp +++ b/source/backend/opencl/core/BufferPool.hpp @@ -12,8 +12,8 @@ #include #include #include -#include "NonCopyable.hpp" -#include "core/runtime/OpenCLWrapper.hpp" +#include "core/NonCopyable.hpp" +#include "backend/opencl/core/runtime/OpenCLWrapper.hpp" namespace MNN { namespace OpenCL { diff --git a/source/backend/opencl/core/ImageBufferConvertor.cpp b/source/backend/opencl/core/ImageBufferConvertor.cpp index f3f4bdd41..82c6eb374 100644 --- a/source/backend/opencl/core/ImageBufferConvertor.cpp +++ b/source/backend/opencl/core/ImageBufferConvertor.cpp @@ -6,7 +6,7 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "ImageBufferConvertor.hpp" +#include "backend/opencl/core/ImageBufferConvertor.hpp" namespace MNN { namespace OpenCL { @@ -124,22 +124,20 @@ bool convertImageToNCHWBuffer(const Tensor *input, Tensor *output, cl::Kernel &i bool convertNC4HW4BufferToImage(const Tensor *input, Tensor *output, cl::Kernel &bufferToImageKernel, OpenCLRuntime *runtime, bool needWait) { - std::vector outputShape = tensorShapeFormat(input); - uint32_t outputGlobalWorkSize[2] = {static_cast(UP_DIV(outputShape[3], 4) * outputShape[2]), - static_cast(outputShape[0] * outputShape[1])}; + uint32_t outputGlobalWorkSize[2] = {static_cast(UP_DIV(input->channel(), 4) * input->width()), + static_cast(input->batch() * input->height())}; if (bufferToImageKernel.get() == nullptr) { std::set buildOptions; bufferToImageKernel = runtime->buildKernel("buffer_to_image", "nc4hw4_buffer_to_image", buildOptions); } - int channelUp4 = ROUND_UP(outputShape[3], 4); uint32_t idx = 0; - int outputImageShape[2] = {outputShape[1], outputShape[2]}; + int outputImageShape[2] = {input->height(), input->width()}; bufferToImageKernel.setArg(idx++, outputGlobalWorkSize[0]); bufferToImageKernel.setArg(idx++, outputGlobalWorkSize[1]); bufferToImageKernel.setArg(idx++, openCLBuffer(input)); bufferToImageKernel.setArg(idx++, sizeof(outputImageShape), outputImageShape); - bufferToImageKernel.setArg(idx++, channelUp4); + bufferToImageKernel.setArg(idx++, UP_DIV(input->channel(), 4)); bufferToImageKernel.setArg(idx++, openCLImage(output)); const uint32_t maxWorkGroupSize = static_cast(runtime->getMaxWorkGroupSize(bufferToImageKernel)); @@ -171,23 +169,22 @@ bool convertNC4HW4BufferToImage(const Tensor *input, Tensor *output, cl::Kernel */ bool convertImageToNC4HW4Buffer(const Tensor *input, Tensor *output, cl::Kernel &imageToBufferKernel, OpenCLRuntime *runtime, bool needWait) { - std::vector inputShape = tensorShapeFormat(input); - uint32_t in_gws[2] = {static_cast(UP_DIV(inputShape[3], 4) * inputShape[2]), - static_cast(inputShape[0] * inputShape[1])}; + auto inputShape = tensorShapeFormat(input); + uint32_t in_gws[2] = {static_cast(UP_DIV(inputShape.at(3), 4) * inputShape.at(2)), + static_cast(inputShape.at(0) * inputShape.at(1))}; if (imageToBufferKernel.get() == nullptr) { std::set buildOptions; imageToBufferKernel = runtime->buildKernel("buffer_to_image", "image_to_nc4hw4_buffer", buildOptions); } - int channelUp4 = ((inputShape[3] + 3) / 4) * 4; uint32_t idx = 0; - int outputImageShape[2] = {inputShape[1], inputShape[2]}; + int outputImageShape[2] = {inputShape.at(1), inputShape.at(2)}; imageToBufferKernel.setArg(idx++, in_gws[0]); imageToBufferKernel.setArg(idx++, in_gws[1]); imageToBufferKernel.setArg(idx++, openCLBuffer(output)); imageToBufferKernel.setArg(idx++, sizeof(outputImageShape), outputImageShape); - imageToBufferKernel.setArg(idx++, static_cast(channelUp4)); + imageToBufferKernel.setArg(idx++, static_cast(UP_DIV(inputShape.at(3), 4))); imageToBufferKernel.setArg(idx++, openCLImage(input)); const uint32_t maxWorkGroupSize = static_cast(runtime->getMaxWorkGroupSize(imageToBufferKernel)); const std::vector lws = {16, std::max((uint32_t)1, maxWorkGroupSize / 16)}; @@ -378,14 +375,14 @@ bool ImageBufferConvertor::convertBufferToImage(const Tensor *buffer, const Open const int channelHeightWidthSumSize = buffer->buffer().dim[1].extent * buffer->buffer().dim[2].extent * buffer->buffer().dim[3].extent; const int heightWidthSumSize = buffer->buffer().dim[2].extent * buffer->buffer().dim[3].extent; - int kernelShape[2] = {buffer->buffer().dim[2].extent, buffer->buffer().dim[3].extent}; + int kernelShape[2] = {buffer->buffer().dim[2].extent, buffer->buffer().dim[3].extent}; mBufferToImageKernel.setArg(idx++, static_cast(buffer->buffer().dim[0].extent)); mBufferToImageKernel.setArg(idx++, sizeof(kernelShape),kernelShape); mBufferToImageKernel.setArg(idx++, static_cast(channelHeightWidthSumSize)); mBufferToImageKernel.setArg(idx++, static_cast(heightWidthSumSize)); } else if (type == DW_CONV2D_FILTER) { const int heightWidthSumSize = buffer->buffer().dim[2].extent * buffer->buffer().dim[3].extent; - int kernelShape[4] = {buffer->buffer().dim[0].extent, buffer->buffer().dim[1].extent, buffer->buffer().dim[2].extent, buffer->buffer().dim[3].extent}; + int kernelShape[4] = {buffer->buffer().dim[0].extent, buffer->buffer().dim[1].extent, buffer->buffer().dim[2].extent, buffer->buffer().dim[3].extent}; mBufferToImageKernel.setArg(idx++, sizeof(kernelShape),kernelShape); mBufferToImageKernel.setArg(idx++, static_cast(heightWidthSumSize)); } else if (type == ARGUMENT) { @@ -394,7 +391,7 @@ bool ImageBufferConvertor::convertBufferToImage(const Tensor *buffer, const Open const int channelHeightWidthSumSize = buffer->buffer().dim[1].extent * buffer->buffer().dim[2].extent * buffer->buffer().dim[3].extent; const int heightWidthSumSize = buffer->buffer().dim[2].extent * buffer->buffer().dim[3].extent; - int kernelShape[2] = {buffer->buffer().dim[2].extent, buffer->buffer().dim[3].extent}; + int kernelShape[2] = {buffer->buffer().dim[2].extent, buffer->buffer().dim[3].extent}; mBufferToImageKernel.setArg(idx++, static_cast(buffer->buffer().dim[1].extent)); mBufferToImageKernel.setArg(idx++, sizeof(kernelShape),kernelShape); mBufferToImageKernel.setArg(idx++, static_cast(channelHeightWidthSumSize)); diff --git a/source/backend/opencl/core/ImageBufferConvertor.hpp b/source/backend/opencl/core/ImageBufferConvertor.hpp index f9850b184..47de6a6da 100644 --- a/source/backend/opencl/core/ImageBufferConvertor.hpp +++ b/source/backend/opencl/core/ImageBufferConvertor.hpp @@ -9,9 +9,9 @@ #ifndef ImageBufferConvertor_hpp #define ImageBufferConvertor_hpp -#include "Macro.h" -#include "Tensor.hpp" -#include "core/OpenCLRunningUtils.hpp" +#include "core/Macro.h" +#include +#include "backend/opencl/core/OpenCLRunningUtils.hpp" namespace MNN { namespace OpenCL { diff --git a/source/backend/opencl/core/ImagePool.cpp b/source/backend/opencl/core/ImagePool.cpp index 8dfd3bfe9..8a06027ac 100644 --- a/source/backend/opencl/core/ImagePool.cpp +++ b/source/backend/opencl/core/ImagePool.cpp @@ -6,7 +6,7 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "ImagePool.hpp" +#include "backend/opencl/core/ImagePool.hpp" namespace MNN { namespace OpenCL { cl::Image* ImagePool::alloc(int w, int h, bool seperate) { @@ -15,7 +15,7 @@ cl::Image* ImagePool::alloc(int w, int h, bool seperate) { auto findIter = mFreeList.end(); for (auto iterP = mFreeList.begin(); iterP != mFreeList.end(); iterP++) { auto& iter = *iterP; - if (iter->w > w && iter->h > h) { + if (iter->w >= w && iter->h >= h) { int waste = iter->w * iter->h - w * h; if (minWaste == 0 || waste < minWaste) { findIter = iterP; @@ -66,7 +66,7 @@ cl::Image* ImagePoolInt8::alloc(int w, int h, bool seperate) { auto findIter = mFreeList.end(); for (auto iterP = mFreeList.begin(); iterP != mFreeList.end(); iterP++) { auto& iter = *iterP; - if (iter->w > w && iter->h > h) { + if (iter->w >= w && iter->h >= h) { int waste = iter->w * iter->h - w * h; if (minWaste == 0 || waste < minWaste) { findIter = iterP; diff --git a/source/backend/opencl/core/ImagePool.hpp b/source/backend/opencl/core/ImagePool.hpp index 67b3a057d..4439d020d 100644 --- a/source/backend/opencl/core/ImagePool.hpp +++ b/source/backend/opencl/core/ImagePool.hpp @@ -11,8 +11,8 @@ #include #include -#include "NonCopyable.hpp" -#include "core/runtime/OpenCLWrapper.hpp" +#include "core/NonCopyable.hpp" +#include "backend/opencl/core/runtime/OpenCLWrapper.hpp" namespace MNN { namespace OpenCL { diff --git a/source/backend/opencl/core/OpenCLBackend.cpp b/source/backend/opencl/core/OpenCLBackend.cpp index 72ef354ab..9cc0c1388 100644 --- a/source/backend/opencl/core/OpenCLBackend.cpp +++ b/source/backend/opencl/core/OpenCLBackend.cpp @@ -6,15 +6,15 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "core/OpenCLBackend.hpp" +#include "backend/opencl/core/OpenCLBackend.hpp" #include "MNN_generated.h" -#include "TensorUtils.hpp" -#include "SizeComputer.hpp" +#include "core/TensorUtils.hpp" +#include "core/SizeComputer.hpp" #include #include #include -#include "Macro.h" +#include "core/Macro.h" namespace MNN { namespace OpenCL { @@ -48,6 +48,13 @@ OpenCLBackend::OpenCLBackend(BackendConfig::PrecisionMode precision, BackendConf mStaticImagePool.reset(new ImagePool(mOpenCLRuntime->context(), dataType)); mBufferPool.reset(new BufferPool(mOpenCLRuntime->context(), CL_MEM_READ_WRITE)); mBufferPoolInt8.reset(new BufferPoolInt8(mOpenCLRuntime->context(), CL_MEM_READ_WRITE)); + std::set buildOptions; + mNC4HW4BufferToImageFloat = mOpenCLRuntime->buildKernel("buffer_to_image", "nc4hw4_buffer_to_image", buildOptions); + mNCHWBufferToImageFloat = mOpenCLRuntime->buildKernel("buffer_to_image", "nchw_buffer_to_image", buildOptions); + mNHWCBufferToImageFloat = mOpenCLRuntime->buildKernel("buffer_to_image", "nhwc_buffer_to_image", buildOptions); + mImageToNC4HW4BufferFloat = mOpenCLRuntime->buildKernel("buffer_to_image", "image_to_nc4hw4_buffer", buildOptions); + mImageToNHWCBufferFloat = mOpenCLRuntime->buildKernel("buffer_to_image", "image_to_nhwc_buffer", buildOptions); + mImageToNCHWBufferFloat = mOpenCLRuntime->buildKernel("buffer_to_image", "image_to_nchw_buffer", buildOptions); } } @@ -119,7 +126,7 @@ bool OpenCLBackend::onAcquireBuffer(const Tensor* nativeTensor, StorageType stor bool OpenCLBackend::onReleaseBuffer(const Tensor* nativeTensor, StorageType storageType) { if(nativeTensor->getType().code == halide_type_int && nativeTensor->getType().bits == 8){ - + return true; } if (storageType == DYNAMIC_SEPERATE) { @@ -174,8 +181,7 @@ Execution* OpenCLBackend::onCreate(const std::vector& inputs, const std for (auto t : inputs) { int imageHeight = t->batch() * t->height(); int imageWidth = t->width() * UP_DIV(t->channel(), 4); - if (TensorUtils::getDescribe(t)->dimensionFormat == MNN_DATA_FORMAT_NC4HW4 && - (imageHeight > maxImageSize.at(0) || imageWidth > maxImageSize.at(1))) { + if (imageHeight > maxImageSize.at(0) || imageWidth > maxImageSize.at(1)) { valid = false; break; } @@ -183,8 +189,7 @@ Execution* OpenCLBackend::onCreate(const std::vector& inputs, const std for (auto t : outputs) { int imageHeight = t->batch() * t->height(); int imageWidth = t->width() * UP_DIV(t->channel(), 4); - if (TensorUtils::getDescribe(t)->dimensionFormat == MNN_DATA_FORMAT_NC4HW4 && - (imageHeight > maxImageSize.at(0) || imageWidth > maxImageSize.at(1))) { + if (imageHeight > maxImageSize.at(0) || imageWidth > maxImageSize.at(1)) { valid = false; break; } @@ -221,7 +226,7 @@ bool OpenCLBackend::isCreateError() const { void OpenCLBackend::_allocHostBuffer(int length) const { MNN_ASSERT(length > 0); - if (nullptr != mHostBuffer.second && length < mHostBuffer.first) { + if (nullptr != mHostBuffer.second && length <= mHostBuffer.first) { return; } mHostBuffer.first = length; @@ -229,113 +234,29 @@ void OpenCLBackend::_allocHostBuffer(int length) const { new cl::Buffer(mOpenCLRuntime->context(), CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, length)); } -void OpenCLBackend::onCopyBuffer(const Tensor* srcTensor, const Tensor* dstTensor) const { -#ifdef LOG_VERBOSE - MNN_PRINT("Start onCopyBuffer !\n"); -#endif +void OpenCLBackend::copyFromDeviceInt8(const Tensor* srcTensor, const Tensor* dstTensor) const{ + auto needSize = dstTensor->size(); + auto hostPtr = dstTensor->host(); + cl_int error = CL_SUCCESS; + auto DeviceBuffer = (cl::Buffer*)srcTensor->deviceId(); + mOpenCLRuntime->commandQueue().enqueueReadBuffer(*DeviceBuffer, CL_TRUE, 0, needSize, hostPtr); +} - //int8 - if(srcTensor->getType().code == halide_type_int && srcTensor->getType().bits == 8){ - if (!srcTensor->deviceId()) { -#ifdef LOG_VERBOSE - MNN_PRINT("Host -> OpenCL !\n"); -#endif +void OpenCLBackend::copyToDeviceInt8(const Tensor* srcTensor, const Tensor* dstTensor) const{ auto needSize = srcTensor->size(); auto hostPtr = srcTensor->host(); cl_int error = CL_SUCCESS; auto DeviceBuffer = (cl::Buffer*)dstTensor->deviceId(); - auto bufferPtr = mOpenCLRuntime->commandQueue().enqueueMapBuffer(*DeviceBuffer, CL_TRUE, CL_MAP_WRITE, 0, - needSize, nullptr, nullptr, &error); - if (error != CL_SUCCESS) { - MNN_ERROR("Error to map buffer in copy buffer, error=%d\n", error); - return; - } - if(bufferPtr != nullptr){ - ::memcpy(bufferPtr, hostPtr, needSize); - } - mOpenCLRuntime->commandQueue().enqueueUnmapMemObject(*DeviceBuffer, bufferPtr); - return; - } -#ifdef LOG_VERBOSE - MNN_PRINT("OpenCL -> Host !\n"); -#endif - // OpenCL -> Host - - auto needSize = dstTensor->size(); - - auto hostPtr = dstTensor->host(); - cl_int error = CL_SUCCESS; - auto DeviceBuffer = (cl::Buffer*)srcTensor->deviceId(); - auto bufferPtr = - mOpenCLRuntime->commandQueue().enqueueMapBuffer(*DeviceBuffer, true, CL_MAP_READ, 0, needSize, nullptr, nullptr, &error); - if (error != CL_SUCCESS) { - MNN_ERROR("Error to map buffer in copy buffer, error=%d\n", error); - return; - } - if(bufferPtr != nullptr && hostPtr != nullptr){ - ::memcpy(hostPtr, bufferPtr, needSize); - } - mOpenCLRuntime->commandQueue().enqueueUnmapMemObject(*DeviceBuffer, bufferPtr); - return; - } + mOpenCLRuntime->commandQueue().enqueueWriteBuffer(*DeviceBuffer, CL_TRUE, 0, needSize, hostPtr); +} +void OpenCLBackend::copyFromDevice(const Tensor* srcTensor, const Tensor* dstTensor) const{ std::vector bufferShape = MNN::OpenCL::tensorShapeFormat(srcTensor); - -#ifdef LOG_VERBOSE - MNN_PRINT("buffer shape : %d, %d, %d, %d \n", bufferShape.at(0), bufferShape.at(1), bufferShape.at(2), - bufferShape.at(3)); -#endif MNN::Tensor interBuffer(0, Tensor::TENSORFLOW); interBuffer.buffer().dimensions = bufferShape.size(); for (int i = 0; i < bufferShape.size(); i++) { interBuffer.buffer().dim[i].extent = bufferShape.at(i); } - - if (!srcTensor->deviceId()) { -#ifdef LOG_VERBOSE - MNN_PRINT("Host -> OpenCL !\n"); -#endif - auto needSize = srcTensor->size(); - _allocHostBuffer(needSize); - interBuffer.buffer().device = (uint64_t)mHostBuffer.second.get(); - auto hostPtr = srcTensor->host(); - cl_int error = CL_SUCCESS; - auto bufferPtr = mOpenCLRuntime->commandQueue().enqueueMapBuffer(*mHostBuffer.second, CL_TRUE, CL_MAP_WRITE, 0, - needSize, nullptr, nullptr, &error); - if (error != CL_SUCCESS) { - MNN_ERROR("Error to map buffer in copy buffer, error=%d\n", error); - return; - } - if(bufferPtr != nullptr){ - ::memcpy(bufferPtr, hostPtr, needSize); - } - mOpenCLRuntime->commandQueue().enqueueUnmapMemObject(*mHostBuffer.second, bufferPtr); - // Host -> OpenCL - MNN_DATA_FORMAT data_format = TensorUtils::getDescribe(srcTensor)->dimensionFormat; - if (MNN_DATA_FORMAT_NHWC == data_format) { - OpenCL::convertNHWCBufferToImage(&interBuffer, const_cast(dstTensor), - *const_cast(&mNHWCBufferToImageFloat), mOpenCLRuntime.get()); - return; - } - if (MNN_DATA_FORMAT_NCHW == data_format) { - OpenCL::convertNCHWBufferToImage(&interBuffer, const_cast(dstTensor), - *const_cast(&mNCHWBufferToImageFloat), mOpenCLRuntime.get()); - return; - } - if (MNN_DATA_FORMAT_NC4HW4 == data_format) { - OpenCL::convertNC4HW4BufferToImage(&interBuffer, const_cast(dstTensor), - *const_cast(&mNC4HW4BufferToImageFloat), - mOpenCLRuntime.get()); - return; - } - MNN_ASSERT(false); - return; - } -#ifdef LOG_VERBOSE - MNN_PRINT("OpenCL -> Host !\n"); -#endif - // OpenCL -> Host - auto needSize = dstTensor->size(); _allocHostBuffer(needSize); interBuffer.buffer().device = (uint64_t)mHostBuffer.second.get(); @@ -359,22 +280,73 @@ void OpenCLBackend::onCopyBuffer(const Tensor* srcTensor, const Tensor* dstTenso } auto hostPtr = dstTensor->host(); cl_int error = CL_SUCCESS; - auto bufferPtr = - mOpenCLRuntime->commandQueue().enqueueMapBuffer(*mHostBuffer.second, true, CL_MAP_READ, 0, needSize, nullptr, nullptr, &error); - if (error != CL_SUCCESS) { - MNN_ERROR("Error to map buffer in copy buffer, error=%d\n", error); + + mOpenCLRuntime->commandQueue().enqueueReadBuffer(*mHostBuffer.second, CL_TRUE, 0, needSize, hostPtr); +} +void OpenCLBackend::copyToDevice(const Tensor* srcTensor, const Tensor* dstTensor) const{ + std::vector bufferShape = MNN::OpenCL::tensorShapeFormat(srcTensor); + MNN::Tensor interBuffer(0, Tensor::TENSORFLOW); + interBuffer.buffer().dimensions = bufferShape.size(); + for (int i = 0; i < bufferShape.size(); i++) { + interBuffer.buffer().dim[i].extent = bufferShape.at(i); + } + auto needSize = srcTensor->size(); + _allocHostBuffer(needSize); + interBuffer.buffer().device = (uint64_t)mHostBuffer.second.get(); + auto hostPtr = srcTensor->host(); + cl_int error = CL_SUCCESS; + mOpenCLRuntime->commandQueue().enqueueWriteBuffer(*mHostBuffer.second, CL_TRUE, 0, needSize, hostPtr); + // Host -> OpenCL + MNN_DATA_FORMAT data_format = TensorUtils::getDescribe(srcTensor)->dimensionFormat; + if (MNN_DATA_FORMAT_NHWC == data_format) { + OpenCL::convertNHWCBufferToImage(&interBuffer, const_cast(dstTensor), + *const_cast(&mNHWCBufferToImageFloat), mOpenCLRuntime.get()); + return; + } + if (MNN_DATA_FORMAT_NCHW == data_format) { + OpenCL::convertNCHWBufferToImage(&interBuffer, const_cast(dstTensor), + *const_cast(&mNCHWBufferToImageFloat), mOpenCLRuntime.get()); return; } - if(bufferPtr != nullptr && hostPtr != nullptr){ - ::memcpy(hostPtr, bufferPtr, needSize); + if (MNN_DATA_FORMAT_NC4HW4 == data_format) { + OpenCL::convertNC4HW4BufferToImage(&interBuffer, const_cast(dstTensor), + *const_cast(&mNC4HW4BufferToImageFloat), + mOpenCLRuntime.get()); + return; + } + MNN_ASSERT(false); + return; +} + +void OpenCLBackend::onCopyBuffer(const Tensor* srcTensor, const Tensor* dstTensor) const { +#ifdef LOG_VERBOSE + MNN_PRINT("Start onCopyBuffer !\n"); +#endif + //int8 + if(srcTensor->getType().code == halide_type_int && srcTensor->getType().bits == 8){ + if (srcTensor->deviceId() == 0 && dstTensor->deviceId() != 0) { + copyToDeviceInt8(srcTensor, dstTensor); + }else if(srcTensor->deviceId() != 0 && dstTensor->deviceId() == 0){ + copyFromDeviceInt8(srcTensor, dstTensor); + }else{ + MNN_PRINT("onCopyBuffer int8 error !!! \n"); + } + }else{ + if (srcTensor->deviceId() == 0 && dstTensor->deviceId() != 0) { + copyToDevice(srcTensor, dstTensor); + }else if(srcTensor->deviceId() != 0 && dstTensor->deviceId() == 0){ + copyFromDevice(srcTensor, dstTensor); + }else{ + MNN_PRINT("onCopyBuffer float error !!! \n"); + } } - mOpenCLRuntime->commandQueue().enqueueUnmapMemObject(*mHostBuffer.second, bufferPtr); #ifdef LOG_VERBOSE MNN_PRINT("end onCopyBuffer !\n"); #endif } + bool OpenCLBackend::addCreator(OpType t, Creator* c) { auto map = gCreator(); if (map->find(t) != map->end()) { @@ -413,7 +385,7 @@ class CLBackendCreator : public BackendCreator { delete backend; } } - return nullptr; + return nullptr; } }; diff --git a/source/backend/opencl/core/OpenCLBackend.hpp b/source/backend/opencl/core/OpenCLBackend.hpp index 33d67ad29..044acd6fd 100644 --- a/source/backend/opencl/core/OpenCLBackend.hpp +++ b/source/backend/opencl/core/OpenCLBackend.hpp @@ -9,17 +9,17 @@ #ifndef OpenCLBackend_hpp #define OpenCLBackend_hpp -#include "Backend.hpp" +#include "core/Backend.hpp" #include "MNN_generated.h" #include #include -#include "BufferPool.hpp" -#include "ImageBufferConvertor.hpp" -#include "ImagePool.hpp" -#include "Macro.h" -#include "core/ImageBufferConvertor.hpp" -#include "core/OpenCLRunningUtils.hpp" +#include "backend/opencl/core/BufferPool.hpp" +#include "backend/opencl/core/ImageBufferConvertor.hpp" +#include "backend/opencl/core/ImagePool.hpp" +#include "core/Macro.h" +#include "backend/opencl/core/ImageBufferConvertor.hpp" +#include "backend/opencl/core/OpenCLRunningUtils.hpp" namespace MNN { namespace OpenCL { @@ -102,6 +102,11 @@ class OpenCLBackend final : public Backend { bool isCreateError() const; private: + void copyFromDevice(const Tensor* srcTensor, const Tensor* dstTensor) const; + void copyToDevice(const Tensor* srcTensor, const Tensor* dstTensor) const; + void copyFromDeviceInt8(const Tensor* srcTensor, const Tensor* dstTensor) const; + void copyToDeviceInt8(const Tensor* srcTensor, const Tensor* dstTensor) const; + void _allocHostBuffer(int length) const; cl::Kernel mImageToNCHWBufferFloat; cl::Kernel mImageToNC4HW4BufferFloat; diff --git a/source/backend/opencl/core/OpenCLRunningUtils.cpp b/source/backend/opencl/core/OpenCLRunningUtils.cpp index d1ace1005..5fb6ffcf0 100644 --- a/source/backend/opencl/core/OpenCLRunningUtils.cpp +++ b/source/backend/opencl/core/OpenCLRunningUtils.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "core/OpenCLRunningUtils.hpp" +#include "backend/opencl/core/OpenCLRunningUtils.hpp" #include #include #include -#include "Macro.h" +#include "core/Macro.h" namespace MNN { namespace OpenCL { @@ -178,7 +178,7 @@ void runTurnKernelLWS2D(const ::cl::Kernel &kernel, const std::vector } void run3DKernelDefault(const ::cl::Kernel &kernel, const std::vector &gws, const std::vector &lws, - OpenCLRuntime *runtime) { + OpenCLRuntime *runtime, cl::Event* eventPtr) { #ifdef LOG_VERBOSE MNN_PRINT("start run3DKernelDefault !\n"); #endif @@ -190,10 +190,16 @@ void run3DKernelDefault(const ::cl::Kernel &kernel, const std::vector } cl_int error = CL_SUCCESS; - error = runtime->commandQueue().enqueueNDRangeKernel( - kernel, cl::NullRange, cl::NDRange(internalGlobalWS[0], internalGlobalWS[1], internalGlobalWS[2]), - cl::NDRange(lws[0], lws[1], lws[2])); - + if(eventPtr == nullptr){ + error = runtime->commandQueue().enqueueNDRangeKernel( + kernel, cl::NullRange, cl::NDRange(internalGlobalWS[0], internalGlobalWS[1], internalGlobalWS[2]), + cl::NDRange(lws[0], lws[1], lws[2])); + + }else{ + error = runtime->commandQueue().enqueueNDRangeKernel( + kernel, cl::NullRange, cl::NDRange(internalGlobalWS[0], internalGlobalWS[1], internalGlobalWS[2]), + cl::NDRange(lws[0], lws[1], lws[2]), nullptr, eventPtr); + } MNN_CHECK_CL_SUCCESS(error); #ifdef LOG_VERBOSE @@ -202,7 +208,7 @@ void run3DKernelDefault(const ::cl::Kernel &kernel, const std::vector } void runKernel2D(const ::cl::Kernel &kernel, const std::vector &gws, const std::vector &lws, - OpenCLRuntime *runtime) { + OpenCLRuntime *runtime, cl::Event* eventPtr) { #ifdef LOG_VERBOSE MNN_PRINT("start run3DKernelDefault !\n"); #endif @@ -213,9 +219,14 @@ void runKernel2D(const ::cl::Kernel &kernel, const std::vector &gws, c } cl_int error = CL_SUCCESS; - error = runtime->commandQueue().enqueueNDRangeKernel( - kernel, cl::NullRange, cl::NDRange(internalGlobalWS[0], internalGlobalWS[1]), cl::NDRange(lws[0], lws[1])); + if(eventPtr == nullptr){ + error = runtime->commandQueue().enqueueNDRangeKernel( + kernel, cl::NullRange, cl::NDRange(internalGlobalWS[0], internalGlobalWS[1]), cl::NDRange(lws[0], lws[1])); + }else{ + error = runtime->commandQueue().enqueueNDRangeKernel( + kernel, cl::NullRange, cl::NDRange(internalGlobalWS[0], internalGlobalWS[1]), cl::NDRange(lws[0], lws[1]), nullptr, eventPtr); + } MNN_CHECK_CL_SUCCESS(error); #ifdef LOG_VERBOSE diff --git a/source/backend/opencl/core/OpenCLRunningUtils.hpp b/source/backend/opencl/core/OpenCLRunningUtils.hpp index 500886626..dfb984ef1 100644 --- a/source/backend/opencl/core/OpenCLRunningUtils.hpp +++ b/source/backend/opencl/core/OpenCLRunningUtils.hpp @@ -11,11 +11,12 @@ #include #include +#include -#include "Macro.h" -#include "TensorUtils.hpp" -#include "core/runtime/OpenCLRuntime.hpp" -#include "core/runtime/OpenCLWrapper.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" +#include "backend/opencl/core/runtime/OpenCLRuntime.hpp" +#include "backend/opencl/core/runtime/OpenCLWrapper.hpp" namespace MNN { namespace OpenCL { @@ -87,13 +88,14 @@ void getImageShape(const std::vector &shape, /* NHWC */ std::vector turnLocalSize(cl::Kernel *kernel, std::vector &gws, OpenCLRuntime *runtime); void run3DKernelDefault(const ::cl::Kernel &kernel, const std::vector &gws, const std::vector &lws, - OpenCLRuntime *runtime); + OpenCLRuntime *runtime, cl::Event* eventPtr = nullptr); void run2DKernelDefault(const ::cl::Kernel &kernel, const uint32_t *gws, const std::vector &lws, OpenCLRuntime *runtime); void runKernel2D(const ::cl::Kernel &kernel, const std::vector &gws, const std::vector &lws, - OpenCLRuntime *runtime); + OpenCLRuntime *runtime, cl::Event* eventPtr = nullptr); + void runTurnKernelLWS2D(const ::cl::Kernel &kernel, const std::vector &gws, const std::vector &lws, OpenCLRuntime *runtime); diff --git a/source/backend/opencl/core/runtime/OpenCLRuntime.cpp b/source/backend/opencl/core/runtime/OpenCLRuntime.cpp index 168d4abcb..2d575cf35 100644 --- a/source/backend/opencl/core/runtime/OpenCLRuntime.cpp +++ b/source/backend/opencl/core/runtime/OpenCLRuntime.cpp @@ -6,7 +6,7 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "core/runtime/OpenCLRuntime.hpp" +#include "backend/opencl/core/runtime/OpenCLRuntime.hpp" #include #include #include @@ -14,9 +14,9 @@ #include #include #include -#include "Macro.h" +#include "core/Macro.h" //#define MNN_OPEN_TIME_TRACE -#include "AutoTime.hpp" +#include namespace MNN { extern const std::map> OpenCLProgramMap; @@ -46,7 +46,7 @@ OpenCLRuntime::OpenCLRuntime(bool permitFloat16) { cl::Platform::setDefault(platforms[0]); std::vector gpuDevices; platforms[0].getDevices(CL_DEVICE_TYPE_GPU, &gpuDevices); - + if(1 <= gpuDevices.size()){ mFirstGPUDevicePtr = std::make_shared(gpuDevices[0]); const std::string deviceName = mFirstGPUDevicePtr->getInfo(); @@ -76,7 +76,7 @@ OpenCLRuntime::OpenCLRuntime(bool permitFloat16) { const std::string deviceVendor = mFirstGPUDevicePtr->getInfo(); cl_command_queue_properties properties = 0; - #ifdef ENABLE_OPENCL_TURNING_PROFILER + #ifdef ENABLE_OPENCL_TIME_PROFILER properties |= CL_QUEUE_PROFILING_ENABLE; #endif cl_int err; @@ -91,8 +91,8 @@ OpenCLRuntime::OpenCLRuntime(bool permitFloat16) { } else { mGpuType = OTHER; } - - if(mGpuType == ADRENO){ + const std::string extensions = platforms[0].getInfo(); + if(mGpuType == ADRENO && " " != extensions){ std::vector context_properties; context_properties.reserve(5); context_properties.push_back(CL_CONTEXT_PERF_HINT_QCOM); @@ -103,8 +103,8 @@ OpenCLRuntime::OpenCLRuntime(bool permitFloat16) { mContext = std::shared_ptr(new cl::Context({*mFirstGPUDevicePtr}, context_properties.data(), nullptr, nullptr, &err)); }else{ mContext = std::shared_ptr(new cl::Context({*mFirstGPUDevicePtr}, nullptr, nullptr, nullptr, &err)); - } - + } + MNN_CHECK_CL_SUCCESS(err); mCommandQueuePtr = std::make_shared(*mContext, *mFirstGPUDevicePtr, properties, &err); @@ -260,4 +260,27 @@ uint64_t OpenCLRuntime::getMaxWorkGroupSize(const cl::Kernel &kernel) { return maxWorkGroupSize; } +uint64_t OpenCLRuntime::GetKernelWaveSize(const cl::Kernel &kernel) { + uint64_t kernelWaveSize = 0; + MNN_ASSERT(0 == kernel.getWorkGroupInfo(*mFirstGPUDevicePtr, CL_KERNEL_WAVE_SIZE_QCOM, &kernelWaveSize)); + return kernelWaveSize; +} + +double OpenCLRuntime::getCostTime(const cl::Event *event){ + mCommandQueuePtr->finish(); + mStartNanos = event->getProfilingInfo(); + mStopNanos = event->getProfilingInfo(); + return (mStopNanos - mStartNanos) / 1000000.0; +} + +double OpenCLRuntime::getQueuedTime(const cl::Event *event){ + mCommandQueuePtr->finish(); + return (event->getProfilingInfo() - event->getProfilingInfo()) / 1000000.0; +} + +double OpenCLRuntime::getSubmitTime(const cl::Event *event){ + mCommandQueuePtr->finish(); + return (event->getProfilingInfo() - event->getProfilingInfo()) / 1000000.0; +} + } // namespace MNN diff --git a/source/backend/opencl/core/runtime/OpenCLRuntime.hpp b/source/backend/opencl/core/runtime/OpenCLRuntime.hpp index bbee42a91..d425a9135 100644 --- a/source/backend/opencl/core/runtime/OpenCLRuntime.hpp +++ b/source/backend/opencl/core/runtime/OpenCLRuntime.hpp @@ -20,9 +20,9 @@ #include #include #include -#include "Macro.h" +#include "core/Macro.h" #include "Type_generated.h" -#include "core/runtime/OpenCLWrapper.hpp" +#include "backend/opencl/core/runtime/OpenCLWrapper.hpp" namespace MNN { @@ -55,6 +55,7 @@ class OpenCLRuntime { uint32_t deviceComputeUnits() const; uint32_t maxFreq() const; uint64_t getMaxWorkGroupSize(const ::cl::Kernel &kernel); + uint64_t GetKernelWaveSize(const cl::Kernel &kernel); uint64_t getMaxLocalMem() const; GpuType getGpuType(); uint64_t maxAllocSize() const; @@ -68,6 +69,11 @@ class OpenCLRuntime { float flops() const { return mFlops; } + + double getCostTime(const cl::Event *event); + double getQueuedTime(const cl::Event *event); + double getSubmitTime(const cl::Event *event); + private: bool loadProgram(const std::string &programName, cl::Program *program); bool buildProgram(const std::string &buildOptionsStr, cl::Program *program); @@ -89,7 +95,11 @@ class OpenCLRuntime { GpuType mGpuType; std::string mDefaultBuildParams; float mFlops = 4.0f; - bool mIsCreateError{false}; + bool mIsCreateError{false}; + + double mStartNanos; + double mStopNanos; + }; } // namespace MNN diff --git a/source/backend/opencl/core/runtime/OpenCLWrapper.cpp b/source/backend/opencl/core/runtime/OpenCLWrapper.cpp index 7d42f8700..9646069e2 100644 --- a/source/backend/opencl/core/runtime/OpenCLWrapper.cpp +++ b/source/backend/opencl/core/runtime/OpenCLWrapper.cpp @@ -7,7 +7,7 @@ // #ifdef MNN_USE_OPENCL_WRAPPER -#include "core/runtime/OpenCLWrapper.hpp" +#include "backend/opencl/core/runtime/OpenCLWrapper.hpp" #include #include #include @@ -130,7 +130,8 @@ bool OpenCLSymbols::LoadLibraryFromPath(const std::string &library_path) { MNN_LOAD_FUNCTION_PTR(clGetEventProfilingInfo); MNN_LOAD_FUNCTION_PTR(clGetImageInfo); MNN_LOAD_FUNCTION_PTR(clEnqueueCopyImage); - + MNN_LOAD_FUNCTION_PTR(clEnqueueReadImage); + MNN_LOAD_FUNCTION_PTR(clEnqueueWriteImage); #undef MNN_LOAD_FUNCTION_PTR return true; @@ -377,6 +378,22 @@ cl_int clEnqueueWriteBuffer(cl_command_queue command_queue, cl_mem buffer, cl_bo event); } +cl_int clEnqueueReadImage(cl_command_queue command_queue, cl_mem cl_image, cl_bool is_block, const size_t * origin, const size_t * region, size_t row_pitch, + size_t slice_pitch, void * ptr, cl_uint num_events_in_wait_list, const cl_event * event_wait_list, cl_event * event) CL_API_SUFFIX__VERSION_1_0 { + auto func = MNN::OpenCLSymbolsOperator::getOpenclSymbolsPtr()->clEnqueueReadImage; + MNN_CHECK_NOTNULL(func); + return func(command_queue, cl_image, is_block, origin, region, row_pitch, slice_pitch, ptr, num_events_in_wait_list, event_wait_list, + event); +} + +cl_int clEnqueueWriteImage(cl_command_queue command_queue, cl_mem cl_image, cl_bool is_block, const size_t * origin, const size_t * region, size_t row_pitch, + size_t slice_pitch, const void * ptr, cl_uint num_events_in_wait_list, const cl_event * event_wait_list, cl_event * event) { + auto func = MNN::OpenCLSymbolsOperator::getOpenclSymbolsPtr()->clEnqueueWriteImage; + MNN_CHECK_NOTNULL(func); + return func(command_queue, cl_image, is_block, origin, region, row_pitch, slice_pitch, ptr, num_events_in_wait_list, event_wait_list, + event); +} + void *clEnqueueMapBuffer(cl_command_queue command_queue, cl_mem buffer, cl_bool blocking_map, cl_map_flags map_flags, size_t offset, size_t size, cl_uint num_events_in_wait_list, const cl_event *event_wait_list, cl_event *event, cl_int *errcode_ret) { diff --git a/source/backend/opencl/core/runtime/OpenCLWrapper.hpp b/source/backend/opencl/core/runtime/OpenCLWrapper.hpp index 63b97db00..af371ee73 100644 --- a/source/backend/opencl/core/runtime/OpenCLWrapper.hpp +++ b/source/backend/opencl/core/runtime/OpenCLWrapper.hpp @@ -11,7 +11,7 @@ #include -#include "Macro.h" +#include "core/Macro.h" #define CL_TARGET_OPENCL_VERSION 200 #define CL_HPP_TARGET_OPENCL_VERSION 110 #define CL_HPP_MINIMUM_OPENCL_VERSION 110 @@ -76,7 +76,11 @@ class OpenCLSymbols { using clEnqueueWriteBufferFunc = cl_int (*)(cl_command_queue, cl_mem, cl_bool, size_t, size_t, const void *, cl_uint, const cl_event *, cl_event *); using clEnqueueReadBufferFunc = cl_int (*)(cl_command_queue, cl_mem, cl_bool, size_t, size_t, void *, cl_uint, - const cl_event *, cl_event *); + const cl_event *, cl_event *); + using clEnqueueReadImageFunc = cl_int (*)(cl_command_queue, cl_mem, cl_bool, const size_t *, const size_t *, size_t, size_t, void *, cl_uint, const cl_event *, cl_event *); + using clEnqueueWriteImageFunc = cl_int (*)(cl_command_queue, cl_mem, cl_bool, const size_t *, const size_t *, size_t, size_t, const void *, + cl_uint, const cl_event *, cl_event * ); + using clGetProgramBuildInfoFunc = cl_int (*)(cl_program, cl_device_id, cl_program_build_info, size_t, void *, size_t *); using clRetainProgramFunc = cl_int (*)(cl_program program); @@ -168,6 +172,8 @@ class OpenCLSymbols { MNN_CL_DEFINE_FUNC_PTR(clGetEventInfo); MNN_CL_DEFINE_FUNC_PTR(clGetEventProfilingInfo); MNN_CL_DEFINE_FUNC_PTR(clGetImageInfo); + MNN_CL_DEFINE_FUNC_PTR(clEnqueueReadImage); + MNN_CL_DEFINE_FUNC_PTR(clEnqueueWriteImage); #undef MNN_CL_DEFINE_FUNC_PTR diff --git a/source/backend/opencl/execution/BatchToSpaceExecution.cpp b/source/backend/opencl/execution/BatchToSpaceExecution.cpp index 0f00bdab2..923b6de11 100644 --- a/source/backend/opencl/execution/BatchToSpaceExecution.cpp +++ b/source/backend/opencl/execution/BatchToSpaceExecution.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "execution/BatchToSpaceExecution.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/opencl/execution/BatchToSpaceExecution.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { namespace OpenCL { @@ -21,7 +21,7 @@ BatchToSpaceExecution::BatchToSpaceExecution(const std::vector &inputs mOpenCLBackend = static_cast(backend); auto param = op->main_as_SpaceBatch(); mPaddings[1] = param->padding()->int32s()->data()[0]; - mPaddings[0] = param->padding()->int32s()->data()[1]; + mPaddings[0] = param->padding()->int32s()->data()[2]; mBlockShape[1] = param->blockShape()->int32s()->data()[0]; mBlockShape[0] = param->blockShape()->int32s()->data()[1]; std::set buildOptions; @@ -38,7 +38,21 @@ ErrorCode BatchToSpaceExecution::onResize(const std::vector &inputs, c #ifdef LOG_VERBOSE MNN_PRINT("Start BatchToSpaceExecution onResize !\n"); #endif + auto input = inputs[0]; + auto output = outputs[0]; + int inputSize[4] = {input->width(), input->height(), UP_DIV(input->channel(), 4), input->batch()}; + int outputSize[4] = {output->width(), output->height(), UP_DIV(output->channel(), 4), output->batch()}; + uint32_t idx = 0; + mKernel.setArg(idx++, inputSize[2]); + mKernel.setArg(idx++, inputSize[0]); + mKernel.setArg(idx++, inputSize[1]*inputSize[3]); + mKernel.setArg(idx++, openCLImage(input)); + mKernel.setArg(idx++, openCLImage(output)); + mKernel.setArg(idx++, sizeof(inputSize), inputSize); + mKernel.setArg(idx++, sizeof(outputSize), outputSize); + mKernel.setArg(idx++, sizeof(mPaddings), mPaddings); + mKernel.setArg(idx++, sizeof(mBlockShape), mBlockShape); #ifdef LOG_VERBOSE MNN_PRINT("end BatchToSpaceExecution onResize !\n"); #endif @@ -49,24 +63,17 @@ ErrorCode BatchToSpaceExecution::onExecute(const std::vector &inputs, #ifdef LOG_VERBOSE MNN_PRINT("Start BatchToSpaceExecution onExecute !\n"); #endif - auto input = outputs[0]; - auto output = inputs[0]; + auto input = inputs[0]; + auto output = outputs[0]; int inputSize[4] = {input->width(), input->height(), UP_DIV(input->channel(), 4), input->batch()}; int outputSize[4] = {output->width(), output->height(), UP_DIV(output->channel(), 4), output->batch()}; - mKernel.setArg(0, openCLImage(input)); - mKernel.setArg(1, openCLImage(output)); - mKernel.setArg(2, sizeof(inputSize), inputSize); - mKernel.setArg(3, sizeof(outputSize), outputSize); - mKernel.setArg(4, sizeof(mPaddings), mPaddings); - mKernel.setArg(5, sizeof(mBlockShape), mBlockShape); - auto runtime = mOpenCLBackend->getOpenCLRuntime(); runtime->commandQueue().enqueueNDRangeKernel( mKernel, cl::NullRange, - cl::NDRange(UP_DIV(outputSize[0], 16) * 16, UP_DIV(outputSize[1], 16) * 16, outputSize[2] * outputSize[3]), + cl::NDRange(UP_DIV(inputSize[2], 16) * 16, UP_DIV(inputSize[0], 16) * 16, inputSize[1] * inputSize[3]), cl::NDRange(16, 16, 1)); #ifdef LOG_VERBOSE diff --git a/source/backend/opencl/execution/BatchToSpaceExecution.hpp b/source/backend/opencl/execution/BatchToSpaceExecution.hpp index 0b5fbef5f..b31aed682 100644 --- a/source/backend/opencl/execution/BatchToSpaceExecution.hpp +++ b/source/backend/opencl/execution/BatchToSpaceExecution.hpp @@ -12,8 +12,8 @@ #include #include #include -#include "Execution.hpp" -#include "core/OpenCLBackend.hpp" +#include "core/Execution.hpp" +#include "backend/opencl/core/OpenCLBackend.hpp" namespace MNN { namespace OpenCL { diff --git a/source/backend/opencl/execution/CommonExecution.cpp b/source/backend/opencl/execution/CommonExecution.cpp index a9a3ecdd6..83c35cf1f 100644 --- a/source/backend/opencl/execution/CommonExecution.cpp +++ b/source/backend/opencl/execution/CommonExecution.cpp @@ -6,7 +6,7 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "CommonExecution.hpp" +#include "backend/opencl/execution/CommonExecution.hpp" namespace MNN { namespace OpenCL { @@ -15,8 +15,9 @@ CommonExecution::CommonExecution(Backend *backend) : Execution(backend) { ErrorCode CommonExecution::onExecute(const std::vector &inputs, const std::vector &outputs) { auto runtime = ((OpenCLBackend *)backend())->getOpenCLRuntime(); for (auto &unit : mUnits) { - runtime->commandQueue().enqueueNDRangeKernel(unit.kernel, cl::NullRange, unit.globalWorkSize, + auto errorCode = runtime->commandQueue().enqueueNDRangeKernel(unit.kernel, cl::NullRange, unit.globalWorkSize, unit.localWorkSize); + MNN_CHECK_CL_SUCCESS(errorCode); } return NO_ERROR; } diff --git a/source/backend/opencl/execution/CommonExecution.hpp b/source/backend/opencl/execution/CommonExecution.hpp index 659b07e22..8d54ef949 100644 --- a/source/backend/opencl/execution/CommonExecution.hpp +++ b/source/backend/opencl/execution/CommonExecution.hpp @@ -8,8 +8,8 @@ #ifndef CommonExecution_hpp #define CommonExecution_hpp -#include "Execution.hpp" -#include "core/OpenCLBackend.hpp" +#include "core/Execution.hpp" +#include "backend/opencl/core/OpenCLBackend.hpp" namespace MNN { namespace OpenCL { diff --git a/source/backend/opencl/execution/ConcatExecution.cpp b/source/backend/opencl/execution/ConcatExecution.cpp index c8dfcd6a6..ccbc5c706 100644 --- a/source/backend/opencl/execution/ConcatExecution.cpp +++ b/source/backend/opencl/execution/ConcatExecution.cpp @@ -6,7 +6,7 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "execution/ConcatExecution.hpp" +#include "backend/opencl/execution/ConcatExecution.hpp" namespace MNN { namespace OpenCL { @@ -140,22 +140,22 @@ class ConcatCreator : public OpenCLBackend::Creator { public: virtual Execution *onCreate(const std::vector &inputs, const std::vector &outputs, const MNN::Op *op, Backend *backend) const override { + if(inputs[0]->dimensions() == 3 || outputs[0]->dimensions() == 3){ + MNN_PRINT("opencl concat not support dim == 3 , callback to cpu !!! \n"); + return nullptr; + } auto axis = op->main_as_Axis()->axis(); if (-1 == axis) { axis = inputs[0]->dimensions() - 1; } if (outputs[0]->getDimensionType() == Tensor::TENSORFLOW) { - if(outputs[0]->dimensions() == 3){ - int index[] = {2, 3, 1}; - return new ConcatBufferExecution(inputs, index[axis], backend); - } if(outputs[0]->dimensions() == 4){ int index[] = {0, 2, 3, 1}; return new ConcatBufferExecution(inputs, index[axis], backend); } return nullptr; } - + if (1 == axis) { for (int i = 0; i < inputs.size() - 1; ++i) { if (inputs[i]->channel() % 4 != 0) { diff --git a/source/backend/opencl/execution/Conv2DBackPropFilter.cpp b/source/backend/opencl/execution/Conv2DBackPropFilter.cpp new file mode 100644 index 000000000..21daa851e --- /dev/null +++ b/source/backend/opencl/execution/Conv2DBackPropFilter.cpp @@ -0,0 +1,148 @@ +// +// Conv2DBackPropFilter.cpp +// MNN +// +// Created by MNN on 2019/10/25. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include +#include "core/TensorUtils.hpp" +#include "backend/opencl/core/OpenCLBackend.hpp" +#include "backend/opencl/execution/Conv2DBackPropFilter.hpp" + +namespace MNN { +namespace OpenCL { + +Conv2DBackPropFilter::Conv2DBackPropFilter(const MNN::Op *op, Backend *backend) : CommonExecution(backend) { + auto common = op->main_as_Convolution2D()->common(); + mStrides = {common->strideY(), common->strideX()}; + mDilations = {common->dilateY(), common->dilateX()}; + mKernels = {common->kernelY(), common->kernelX()}; + + mPaddings = {common->padY(), common->padX()}; + if (common->padMode() == PadMode_VALID) { + mPaddings[0] = mPaddings[1] = 0; + } +} + +Conv2DBackPropFilter::~Conv2DBackPropFilter() { + // do nothing +} + +ErrorCode Conv2DBackPropFilter::onResize(const std::vector &inputs, const std::vector &outputs) { + mUnits.clear(); + mUnits.resize(2); + + auto originLayout = TensorUtils::getDescribe(inputs[0])->dimensionFormat; + auto openclBackend = static_cast(backend()); + auto runtime = openclBackend->getOpenCLRuntime(); + + const int weightSize = inputs[0]->elementSize(); + auto bufferPool = openclBackend->getBufferPool(); + auto bufferPtr = bufferPool->alloc(weightSize * sizeof(float), false); + if (bufferPtr == nullptr) { + return OUT_OF_MEMORY; + } + bufferPool->recycle(bufferPtr, false); + + { + auto inputShape_ = tensorShapeFormat(inputs[1]); + auto shape_ = tensorShapeFormat(inputs[2]); + const int kernelHeight = mKernels[0], kernelWidth = mKernels[1]; + const int outputChannel = inputs[0]->length(0), inputChannel = inputs[0]->length(1); + const int batch = inputs[1]->length(0), kernelSize = kernelWidth * kernelHeight; + + int inputShape[] = {inputShape_.at(2), inputShape_.at(1)}; + int shape[] = {shape_.at(2), shape_.at(1)}; + int kernelShape[] = {kernelWidth, kernelHeight}; + int strides[] = {mStrides[1], mStrides[0]}; + int pads[] = {mPaddings[1], mPaddings[0]}; + int dilates[] = {mDilations[1], mDilations[0]}; + + cl::Kernel kernel = runtime->buildKernel("conv2d_backprop", "conv2d_backprop_filter", {}); + kernel.setArg(0, openCLImage(inputs[1])); + kernel.setArg(1, openCLImage(inputs[2])); + kernel.setArg(2, *bufferPtr); + kernel.setArg(3, batch); + kernel.setArg(4, outputChannel); + kernel.setArg(5, inputChannel); + kernel.setArg(6, sizeof(inputShape), inputShape); + kernel.setArg(7, sizeof(shape), shape); + kernel.setArg(8, sizeof(kernelShape), kernelShape); + kernel.setArg(9, sizeof(strides), strides); + kernel.setArg(10, sizeof(pads), pads); + kernel.setArg(11, sizeof(dilates), dilates); + + const uint32_t maxWorkGroupSize = runtime->getMaxWorkGroupSize(kernel); + std::vector gws = { + static_cast(UP_DIV(outputChannel, 4)), + static_cast(UP_DIV(inputChannel, 4)), + static_cast(kernelSize) + }; + std::vector lws = { + static_cast(ALIMIN(maxWorkGroupSize / kernelSize, 32)), 1, + static_cast(kernelSize) + }; + if (kernelSize == 1) { + lws[1] = ALIMIN(maxWorkGroupSize / lws[0], 4); + } + for (size_t i = 0; i < lws.size(); ++i) { + gws[i] = ROUND_UP(gws[i], lws[i]); + } + + mUnits[0].kernel = kernel; + mUnits[1].localWorkSize = {lws[0], lws[1], lws[2]}; + mUnits[0].globalWorkSize = {gws[0], gws[1], gws[2]}; + } + // transform kernel from normal format (oc,ic,kh,kw) to image2d (NHCW) + { + std::string kernelName = ""; + if (originLayout == MNN_DATA_FORMAT_NCHW) { + kernelName = "nchw_buffer_to_image"; + } else if (originLayout == MNN_DATA_FORMAT_NHWC) { + kernelName = "nhwc_buffer_to_image"; + } + auto shape = tensorShapeFormat(inputs[0]); + std::vector gws = { + static_cast(shape[2] * UP_DIV(shape[3], 4)), + static_cast(shape[0] * shape[1]) + }; + + cl::Kernel kernel = runtime->buildKernel("buffer_to_image", kernelName, {}); + kernel.setArg(0, gws[0]); + kernel.setArg(1, gws[1]); + kernel.setArg(2, *bufferPtr); + kernel.setArg(3, shape[1]); + kernel.setArg(4, shape[2]); + kernel.setArg(5, shape[3]); + kernel.setArg(6, openCLImage(outputs[0])); + + const uint32_t maxWorkGroupSize = runtime->getMaxWorkGroupSize(kernel); + std::vector lws = {16, std::max((uint32_t)1, maxWorkGroupSize / 16)}; + for (size_t i = 0; i < lws.size(); ++i) { + gws[i] = ROUND_UP(gws[i], lws[i]); + } + + mUnits[1].kernel = kernel; + mUnits[1].localWorkSize = {lws[0], lws[1]}; + mUnits[1].globalWorkSize = {gws[0], gws[1]}; + } + //MNN_PRINT("flag\n"); + + return NO_ERROR; +} + +class Conv2DBackPropFilterCreator : public OpenCLBackend::Creator { +public: + virtual Execution *onCreate(const std::vector &inputs, const std::vector &outputs, + const MNN::Op *op, Backend *backend) const override { + return new Conv2DBackPropFilter(op, backend); + } +}; + +OpenCLCreatorRegister __conv_backprop_filter_grad_op(OpType_Conv2DBackPropFilter); + +} +} diff --git a/source/backend/opencl/execution/Conv2DBackPropFilter.hpp b/source/backend/opencl/execution/Conv2DBackPropFilter.hpp new file mode 100644 index 000000000..ebdb3b49f --- /dev/null +++ b/source/backend/opencl/execution/Conv2DBackPropFilter.hpp @@ -0,0 +1,33 @@ +// +// Conv2DBackPropFilter.hpp +// MNN +// +// Created by MNN on 2019/10/25. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef Conv2DBackPropFilter_hpp +#define Conv2DBackPropFilter_hpp + +#include "backend/opencl/execution/CommonExecution.hpp" + +namespace MNN { +namespace OpenCL { + +class Conv2DBackPropFilter : public CommonExecution { +public: + Conv2DBackPropFilter(const MNN::Op *op, Backend *backend); + virtual ~Conv2DBackPropFilter(); + + virtual ErrorCode onResize(const std::vector &inputs, const std::vector &outputs) override; + +private: + std::vector mStrides; + std::vector mPaddings; + std::vector mDilations; + std::vector mKernels; +}; +} +} + +#endif /* Conv2DBackPropFilter_hpp */ diff --git a/source/backend/opencl/execution/ConvExecution.cpp b/source/backend/opencl/execution/ConvExecution.cpp index 5a0ebdac3..9d7891429 100644 --- a/source/backend/opencl/execution/ConvExecution.cpp +++ b/source/backend/opencl/execution/ConvExecution.cpp @@ -6,13 +6,15 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "execution/ConvExecution.hpp" -#include "ConvWinograd.hpp" -#include "ConvolutionIntFactory.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" -#include "core/OpenCLBackend.hpp" -#include "core/OpenCLRunningUtils.hpp" +#include "backend/opencl/execution/ConvExecution.hpp" +#include "backend/opencl/execution/MultiInputConvExecution.hpp" +#include "backend/opencl/execution/ConvWinograd.hpp" +#include "backend/cpu/compute/ConvolutionIntFactory.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" +#include "backend/opencl/core/OpenCLBackend.hpp" +#include "backend/opencl/core/OpenCLRunningUtils.hpp" + #include "half.hpp" #define UNIT 4 @@ -231,7 +233,7 @@ ConvExecution::ConvExecution(const std::vector &inputs, const MNN::Op uint64_t useLocalSize = UNIT*UNIT*4*sizeof(float)*4; if(useLocalSize >= mOpenCLBackend->getOpenCLRuntime()->getMaxLocalMem()){ mUseLocalMem = false; - }else{ + }else{ kernelName = "conv_2d_1x1_local"; mUseLocalMem=true; } @@ -269,7 +271,7 @@ ConvExecution::ConvExecution(const std::vector &inputs, const MNN::Op MNN_ERROR("Map error ptrCL == nullptr \n"); } mOpenCLBackend->getOpenCLRuntime()->commandQueue().enqueueUnmapMemObject(*(mKernelBuffer.get()), kernelBufferPtr); - + //bias int biasSize = conv2dParams->bias()->size(); const float *biasDataPtr = conv2dParams->bias()->data(); @@ -371,9 +373,12 @@ ErrorCode ConvExecution::onResize(const std::vector &inputs, const std int kernelHeight = mConv2dCommonParams->kernelY(); int kernelWidth = mConv2dCommonParams->kernelX(); + mPaddings[0] = std::max(mPaddings[0], 0); + mPaddings[1] = std::max(mPaddings[1], 0); + if (kernelHeight == kernelWidth && kernelHeight == 1 && mPaddings[0] == 0 && mPaddings[1] == 0) { if(mConv1x1Opt){ - + auto kernel = &mKernel; uint32_t idx = 0; @@ -399,7 +404,7 @@ ErrorCode ConvExecution::onResize(const std::vector &inputs, const std kernel->setArg(idx++, *mKernelBuffer.get()); kernel->setArg(idx++, *mBiasBuffer.get()); } - + kernel->setArg(idx++, openCLImage(output)); kernel->setArg(idx++, static_cast(inputChannelBlocks)); kernel->setArg(idx++, height); @@ -479,11 +484,15 @@ class ConvolutionCreator : public OpenCLBackend::Creator { virtual ~ConvolutionCreator() = default; virtual Execution *onCreate(const std::vector &inputs, const std::vector &outputs, const MNN::Op *op, Backend *backend) const override { + if (inputs.size() == 3) { + return new MultiInputConvExecution(op, backend); + } + auto conv2D = op->main_as_Convolution2D(); if (ConvWinograd::valid(conv2D->common(), inputs[0])) { return new ConvWinograd(conv2D, backend); } - + return new ConvExecution(inputs, op, backend); } }; diff --git a/source/backend/opencl/execution/ConvExecution.hpp b/source/backend/opencl/execution/ConvExecution.hpp index 214f9e4f7..44bcf45ca 100644 --- a/source/backend/opencl/execution/ConvExecution.hpp +++ b/source/backend/opencl/execution/ConvExecution.hpp @@ -9,14 +9,14 @@ #ifndef ConvExecution_hpp #define ConvExecution_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" #include #include #include #include -#include "core/OpenCLBackend.hpp" -#include "core/OpenCLRunningUtils.hpp" +#include "backend/opencl/core/OpenCLBackend.hpp" +#include "backend/opencl/core/OpenCLRunningUtils.hpp" namespace MNN { namespace OpenCL { diff --git a/source/backend/opencl/execution/ConvInt8Execution.cpp b/source/backend/opencl/execution/ConvInt8Execution.cpp index cff02b21c..e27ce0251 100644 --- a/source/backend/opencl/execution/ConvInt8Execution.cpp +++ b/source/backend/opencl/execution/ConvInt8Execution.cpp @@ -6,13 +6,13 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "ConvInt8Execution.hpp" -#include "execution/InterpExecution.hpp" -#include "CPUBackend.hpp" -#include "Concurrency.h" -#include "Int8FunctionsOpt.h" -#include "Macro.h" -#include "core/OpenCLBackend.hpp" +#include "backend/opencl/execution/ConvInt8Execution.hpp" +#include "backend/opencl/execution/InterpExecution.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "core/Concurrency.h" +#include "backend/cpu/compute/Int8FunctionsOpt.h" +#include "core/Macro.h" +#include "backend/opencl/core/OpenCLBackend.hpp" namespace MNN { namespace OpenCL { @@ -109,7 +109,7 @@ ConvInt8Execution::ConvInt8Execution(Backend* backend, const MNN::Op* op) : Exec int inputChannel = weightSize / (kernelWidth * kernelHeight * outputChannel); const int8_t* weightSrc = conv2dParams->symmetricQuan()->weight()->data(); -//weight +//weight int needFilterSize = ALIGN_UP4(inputChannel) * kernelHeight * kernelWidth * ALIGN_UP4(outputChannel) * sizeof(int8_t); mFilterBuffer.reset(new cl::Buffer(runtime->context(), CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, needFilterSize)); @@ -126,18 +126,18 @@ ConvInt8Execution::ConvInt8Execution(Backend* backend, const MNN::Op* op) : Exec int outputChannel4 = ALIGN_UP4(outputChannel); int inputChannel4 = ALIGN_UP4(inputChannel); for(int ks = 0; ks < kernelHeight*kernelWidth; ks++){ - for (int ic = 0; ic < inputChannel; ic++){ + for (int ic = 0; ic < inputChannel; ic++){ for(int oc = 0; oc < outputChannel; oc++){ filterBufferPtr[ks*inputChannel4*outputChannel4 + (ic/4)*outputChannel4*4 + (oc/4)*16 + (ic%4)*4 + oc%4] = weightSrc[oc*kernelHeight*kernelWidth*inputChannel + ic*kernelHeight*kernelWidth + ks]; } } } - } - + } + runtime->commandQueue().enqueueUnmapMemObject(*filterDeviceBuffer, filterBufferPtr); - -//Bias + +//Bias int needBiasSize = ALIGN_UP4(outputChannel) * sizeof(int32_t); mBiasBuffer.reset(new cl::Buffer(runtime->context(), CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, needBiasSize)); @@ -155,7 +155,7 @@ ConvInt8Execution::ConvInt8Execution(Backend* backend, const MNN::Op* op) : Exec runtime->commandQueue().enqueueUnmapMemObject(*BiasDeviceBuffer, BiasbufferPtr); -//scale +//scale int needScaleSize = ALIGN_UP4(outputChannel) * sizeof(float); mScaleBuffer.reset(new cl::Buffer(runtime->context(), CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, needScaleSize)); auto scaleDeviceBuffer = (cl::Buffer*)mScaleBuffer.get(); @@ -187,7 +187,7 @@ ConvInt8Execution::ConvInt8Execution(Backend* backend, const MNN::Op* op) : Exec }else{ kernelName = "conv_2d"; } - + mKernel = mOpenCLBackend->getOpenCLRuntime()->buildKernel("conv_2d_int8", kernelName, buildOptions); mMaxWorkGroupSize = static_cast(mOpenCLBackend->getOpenCLRuntime()->getMaxWorkGroupSize(mKernel)); @@ -220,7 +220,7 @@ ErrorCode ConvInt8Execution::onResize(const std::vector& inputs, const mGlobalWorkSize = {static_cast(UP_DIV(output->channel(), 4)), static_cast(UP_DIV(output->width(), 4)), static_cast(output->batch() * output->height())}; - + mLocalWorkSize = conv2dGeneralLocalWS(mGlobalWorkSize, kernelHeight * kernelWidth, mMaxWorkGroupSize); int inputImageShape[2] = {input->height(), input->width()}; @@ -266,7 +266,7 @@ ErrorCode ConvInt8Execution::onResize(const std::vector& inputs, const kernel->setArg(idx++, UP_DIV(output->width(), 4)); kernel->setArg(idx++, UP_DIV(output->channel(), 4)); } - + return NO_ERROR; } diff --git a/source/backend/opencl/execution/ConvInt8Execution.hpp b/source/backend/opencl/execution/ConvInt8Execution.hpp index a35ca1246..c4b2fd4c0 100644 --- a/source/backend/opencl/execution/ConvInt8Execution.hpp +++ b/source/backend/opencl/execution/ConvInt8Execution.hpp @@ -11,8 +11,8 @@ #include "CommonExecution.hpp" #include -#include "Execution.hpp" -#include "core/OpenCLBackend.hpp" +#include "core/Execution.hpp" +#include "backend/opencl/core/OpenCLBackend.hpp" namespace MNN { namespace OpenCL { diff --git a/source/backend/opencl/execution/ConvWinograd.cpp b/source/backend/opencl/execution/ConvWinograd.cpp index e737bf053..3cfef7a5b 100644 --- a/source/backend/opencl/execution/ConvWinograd.cpp +++ b/source/backend/opencl/execution/ConvWinograd.cpp @@ -6,12 +6,12 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "ConvWinograd.hpp" +#include "backend/opencl/execution/ConvWinograd.hpp" #include -#include "Backend.hpp" -#include "ConvolutionIntFactory.hpp" -#include "WingoradGenerater.hpp" -#include "core/OpenCLRunningUtils.hpp" +#include "core/Backend.hpp" +#include "backend/cpu/compute/ConvolutionIntFactory.hpp" +#include "math/WingoradGenerater.hpp" +#include "backend/opencl/core/OpenCLRunningUtils.hpp" #define UNIT 2 #define INTERP 1 namespace MNN { @@ -102,7 +102,7 @@ ConvWinograd::ConvWinograd(const MNN::Convolution2D* op, Backend* backend) : Exe auto biasSize = UP_DIV(co, 4) * 4 * sizeof(float); std::shared_ptr biasBuffer( new cl::Buffer(runTime->context(), CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, biasSize)); - + cl_int error; auto biasC = queue.enqueueMapBuffer(*biasBuffer, CL_TRUE, CL_MAP_WRITE, 0, biasSize, nullptr, nullptr, &error); if(biasC != nullptr && error == CL_SUCCESS){ diff --git a/source/backend/opencl/execution/ConvWinograd.hpp b/source/backend/opencl/execution/ConvWinograd.hpp index ae0ddc93c..03f6a0fea 100644 --- a/source/backend/opencl/execution/ConvWinograd.hpp +++ b/source/backend/opencl/execution/ConvWinograd.hpp @@ -9,12 +9,12 @@ #ifndef conv_winograd_hpp #define conv_winograd_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" #include #include #include -#include "execution/ConvExecution.hpp" +#include "backend/opencl/execution/ConvExecution.hpp" namespace MNN { namespace OpenCL { class ConvWinograd : public Execution { diff --git a/source/backend/opencl/execution/ConvertExecution.cpp b/source/backend/opencl/execution/ConvertExecution.cpp index 12283c46a..fdbc20f69 100644 --- a/source/backend/opencl/execution/ConvertExecution.cpp +++ b/source/backend/opencl/execution/ConvertExecution.cpp @@ -6,83 +6,71 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "execution/ConvertExecution.hpp" -#include -#include "CPUTensorConvert.hpp" -#include "TensorUtils.hpp" +#include "backend/opencl/execution/ConvertExecution.hpp" +#include "core/Macro.h" +#include "backend/cpu/CPUTensorConvert.hpp" +#include "core/TensorUtils.hpp" namespace MNN { namespace OpenCL { - + ConvertExecution::ConvertExecution(const std::vector& inputs, const MNN::Op* op, Backend* backend) : Execution(backend) { mOpenCLBackend = static_cast(backend); std::string kernelName; std::set buildOptions; - + kernelName = "convert"; mKernel = mOpenCLBackend->getOpenCLRuntime()->buildKernel(kernelName, kernelName, buildOptions); mMaxWorkGroupSize = static_cast(mOpenCLBackend->getOpenCLRuntime()->getMaxWorkGroupSize(mKernel)); } - + ErrorCode ConvertExecution::onResize(const std::vector& inputs, const std::vector& outputs) { Tensor* input = inputs[0]; Tensor* output = outputs[0]; - + std::vector inputShape = tensorShapeFormat(input); std::vector outputShape = tensorShapeFormat(output); - + const int batch = inputShape.at(0); const int height = inputShape.at(1); const int width = inputShape.at(2); const int channels = inputShape.at(3); - + const int channelBlocks = UP_DIV(channels, 4); - + const std::vector gws = {static_cast(channelBlocks), static_cast(width), static_cast(height * batch)}; - + int idx = 0; mKernel.setArg(idx++, gws[0]); mKernel.setArg(idx++, gws[1]); mKernel.setArg(idx++, gws[2]); - + mKernel.setArg(idx++, openCLImage(input)); mKernel.setArg(idx++, openCLImage(output)); - + + auto runtime = mOpenCLBackend->getOpenCLRuntime(); + mGlobalWorkSize = {static_cast(channelBlocks), static_cast(width), + static_cast(height * batch)}; + + mLocalWorkSize = localWS3DDefault(gws, mMaxWorkGroupSize, mOpenCLBackend->getOpenCLRuntime()); return NO_ERROR; } - + ErrorCode ConvertExecution::onExecute(const std::vector& inputs, const std::vector& outputs) { #ifdef LOG_VERBOSE MNN_PRINT("Start ConvertExecution onExecute... \n"); #endif - Tensor* input = inputs[0]; - Tensor* output = outputs[0]; - - std::vector inputShape = tensorShapeFormat(input); - std::vector outputShape = tensorShapeFormat(output); - - const int batch = inputShape.at(0); - const int height = inputShape.at(1); - const int width = inputShape.at(2); - const int channels = inputShape.at(3); - - const int channelBlocks = UP_DIV(channels, 4); - - auto runtime = mOpenCLBackend->getOpenCLRuntime(); - const std::vector gws = {static_cast(channelBlocks), static_cast(width), - static_cast(height * batch)}; - - const std::vector lws = localWS3DDefault(gws, mMaxWorkGroupSize, mOpenCLBackend->getOpenCLRuntime()); - run3DKernelDefault(mKernel, gws, lws, mOpenCLBackend->getOpenCLRuntime()); - + + run3DKernelDefault(mKernel, mGlobalWorkSize, mLocalWorkSize, mOpenCLBackend->getOpenCLRuntime()); + #ifdef LOG_VERBOSE MNN_PRINT("End ConvertExecution onExecute... \n"); #endif return NO_ERROR; } - + OpenCLCreatorRegister> __ConvertExecution(OpType_ConvertTensor); OpenCLCreatorRegister> __SqueezeExecution(OpType_Squeeze); diff --git a/source/backend/opencl/execution/ConvertExecution.hpp b/source/backend/opencl/execution/ConvertExecution.hpp index e671ee3da..448a71b00 100644 --- a/source/backend/opencl/execution/ConvertExecution.hpp +++ b/source/backend/opencl/execution/ConvertExecution.hpp @@ -10,8 +10,8 @@ #define ConvertExecution_hpp #include -#include "Execution.hpp" -#include "core/OpenCLBackend.hpp" +#include "core/Execution.hpp" +#include "backend/opencl/core/OpenCLBackend.hpp" namespace MNN { namespace OpenCL { @@ -35,6 +35,8 @@ namespace MNN { cl::Kernel mKernel; uint32_t mMaxWorkGroupSize; OpenCLBackend *mOpenCLBackend; + std::vector mGlobalWorkSize{1, 1, 1}; + std::vector mLocalWorkSize{1, 1, 1, 1}; }; } // namespace OpenCL diff --git a/source/backend/opencl/execution/CropExecution.cpp b/source/backend/opencl/execution/CropExecution.cpp index a896676a8..9b3984702 100644 --- a/source/backend/opencl/execution/CropExecution.cpp +++ b/source/backend/opencl/execution/CropExecution.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "execution/CropExecution.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" -#include "core/OpenCLBackend.hpp" -#include "core/OpenCLRunningUtils.hpp" +#include "backend/opencl/execution/CropExecution.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" +#include "backend/opencl/core/OpenCLBackend.hpp" +#include "backend/opencl/core/OpenCLRunningUtils.hpp" namespace MNN { namespace OpenCL { diff --git a/source/backend/opencl/execution/CropExecution.hpp b/source/backend/opencl/execution/CropExecution.hpp index 05cca08f9..47cee631d 100644 --- a/source/backend/opencl/execution/CropExecution.hpp +++ b/source/backend/opencl/execution/CropExecution.hpp @@ -9,8 +9,8 @@ #ifndef CropExecution_hpp #define CropExecution_hpp -#include "Execution.hpp" -#include "core/OpenCLBackend.hpp" +#include "core/Execution.hpp" +#include "backend/opencl/core/OpenCLBackend.hpp" #include #include diff --git a/source/backend/opencl/execution/DeconvExecution.cpp b/source/backend/opencl/execution/DeconvExecution.cpp index 10478f5d8..f0d71ba6f 100644 --- a/source/backend/opencl/execution/DeconvExecution.cpp +++ b/source/backend/opencl/execution/DeconvExecution.cpp @@ -6,11 +6,12 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "execution/DeconvExecution.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" -#include "core/OpenCLBackend.hpp" -#include "core/OpenCLRunningUtils.hpp" +#include "backend/opencl/execution/MultiInputDeconvExecution.hpp" +#include "backend/opencl/execution/DeconvExecution.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" +#include "backend/opencl/core/OpenCLBackend.hpp" +#include "backend/opencl/core/OpenCLRunningUtils.hpp" namespace MNN { namespace OpenCL { @@ -218,7 +219,19 @@ ErrorCode DeconvExecution::onExecute(const std::vector &inputs, const return NO_ERROR; } -OpenCLCreatorRegister> __deconv_op(OpType_Deconvolution); +class DeconvolutionCreator : public OpenCLBackend::Creator { +public: + virtual ~DeconvolutionCreator() = default; + virtual Execution *onCreate(const std::vector &inputs, const std::vector &outputs, + const MNN::Op *op, Backend *backend) const override { + if (inputs.size() == 3) { + return new MultiInputDeconvExecution(op, backend); + } + return new DeconvExecution(inputs, op, backend); + } +}; + +OpenCLCreatorRegister __deconv_op(OpType_Deconvolution); } // namespace OpenCL } // namespace MNN diff --git a/source/backend/opencl/execution/DepthwiseConvExecution.cpp b/source/backend/opencl/execution/DepthwiseConvExecution.cpp index 4f8ac3eb4..2e1af338b 100644 --- a/source/backend/opencl/execution/DepthwiseConvExecution.cpp +++ b/source/backend/opencl/execution/DepthwiseConvExecution.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "DepthwiseConvExecution.hpp" -#include +#include "backend/opencl/execution/DepthwiseConvExecution.hpp" +#include "core/Macro.h" #include -#include "TensorUtils.hpp" +#include "core/TensorUtils.hpp" namespace MNN { namespace OpenCL { diff --git a/source/backend/opencl/execution/DepthwiseConvInt8Execution.cpp b/source/backend/opencl/execution/DepthwiseConvInt8Execution.cpp index c67dc0c91..d2b29b98c 100644 --- a/source/backend/opencl/execution/DepthwiseConvInt8Execution.cpp +++ b/source/backend/opencl/execution/DepthwiseConvInt8Execution.cpp @@ -6,13 +6,13 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "DepthwiseConvInt8Execution.hpp" -#include "execution/InterpExecution.hpp" -#include "CPUBackend.hpp" -#include "Concurrency.h" -#include "Int8FunctionsOpt.h" -#include "Macro.h" -#include "core/OpenCLBackend.hpp" +#include "backend/opencl/execution/DepthwiseConvInt8Execution.hpp" +#include "backend/opencl/execution/InterpExecution.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "core/Concurrency.h" +#include "backend/cpu/compute/Int8FunctionsOpt.h" +#include "core/Macro.h" +#include "backend/opencl/core/OpenCLBackend.hpp" namespace MNN { namespace OpenCL { @@ -41,7 +41,7 @@ DepthwiseConvInt8Execution::DepthwiseConvInt8Execution(Backend* backend, const M int weightSize = conv2dParams->symmetricQuan()->weight()->size(); const int8_t* weightSrc = conv2dParams->symmetricQuan()->weight()->data(); -//weight +//weight int needFilterSize = kernelHeight * kernelWidth * ALIGN_UP4(outputChannel) * sizeof(int8_t); mFilterBuffer.reset(new cl::Buffer(runtime->context(), CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, needFilterSize)); @@ -61,12 +61,12 @@ DepthwiseConvInt8Execution::DepthwiseConvInt8Execution(Backend* backend, const M filterBufferPtr[ks*outputChannel4 + (oc/4)*4 + oc%4] = weightSrc[oc*kernelHeight*kernelWidth + ks]; } } - } - + } + runtime->commandQueue().enqueueUnmapMemObject(*filterDeviceBuffer, filterBufferPtr); - -//Bias + +//Bias int needBiasSize = ALIGN_UP4(outputChannel) * sizeof(int32_t); mBiasBuffer.reset(new cl::Buffer(runtime->context(), CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, needBiasSize)); @@ -84,7 +84,7 @@ DepthwiseConvInt8Execution::DepthwiseConvInt8Execution(Backend* backend, const M runtime->commandQueue().enqueueUnmapMemObject(*BiasDeviceBuffer, BiasbufferPtr); -//scale +//scale int needScaleSize = ALIGN_UP4(outputChannel) * sizeof(float); mScaleBuffer.reset(new cl::Buffer(runtime->context(), CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, needScaleSize)); auto scaleDeviceBuffer = (cl::Buffer*)mScaleBuffer.get(); diff --git a/source/backend/opencl/execution/DepthwiseConvInt8Execution.hpp b/source/backend/opencl/execution/DepthwiseConvInt8Execution.hpp index f72077d39..55ab3f591 100644 --- a/source/backend/opencl/execution/DepthwiseConvInt8Execution.hpp +++ b/source/backend/opencl/execution/DepthwiseConvInt8Execution.hpp @@ -11,8 +11,8 @@ #include "CommonExecution.hpp" #include -#include "Execution.hpp" -#include "core/OpenCLBackend.hpp" +#include "core/Execution.hpp" +#include "backend/opencl/core/OpenCLBackend.hpp" namespace MNN { namespace OpenCL { diff --git a/source/backend/opencl/execution/DepthwiseDeconvExecution.cpp b/source/backend/opencl/execution/DepthwiseDeconvExecution.cpp index 465e86a84..8f8e510b8 100644 --- a/source/backend/opencl/execution/DepthwiseDeconvExecution.cpp +++ b/source/backend/opencl/execution/DepthwiseDeconvExecution.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "DepthwiseDeconvExecution.hpp" -#include -#include "TensorUtils.hpp" +#include "backend/opencl/execution/DepthwiseDeconvExecution.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { namespace OpenCL { @@ -50,7 +50,7 @@ DepthwiseDeconvExecution::DepthwiseDeconvExecution(const std::vector & ::memcpy(ptrCL, filterDataPtr, filterBuffer->size()); }else{ MNN_ERROR("Map error ptrCL == nullptr \n"); - } + } mOpenCLBackend->getOpenCLRuntime()->commandQueue().enqueueUnmapMemObject(filterBufferCL, ptrCL); mOpenCLBackend->onAcquireBuffer(mFilter.get(), Backend::STATIC); diff --git a/source/backend/opencl/execution/EltwiseExecution.cpp b/source/backend/opencl/execution/EltwiseExecution.cpp index 9f40f8f6c..c67739a22 100644 --- a/source/backend/opencl/execution/EltwiseExecution.cpp +++ b/source/backend/opencl/execution/EltwiseExecution.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "EltwiseExecution.hpp" +#include "backend/opencl/execution/EltwiseExecution.hpp" -#include +#include "core/Macro.h" #include -#include "TensorUtils.hpp" +#include "core/TensorUtils.hpp" namespace MNN { namespace OpenCL { @@ -25,6 +25,19 @@ EltwiseExecution::EltwiseExecution(const std::vector &inputs, const st ErrorCode EltwiseExecution::onResize(const std::vector &inputs, const std::vector &outputs) { MNN_ASSERT(inputs.size() >= 2); mUnits.resize(inputs.size() - 1); + + auto output = outputs[0]; + auto openCLBackend = static_cast(backend()); + std::shared_ptr myTensor; + if (inputs[0] == output) { + myTensor.reset(new Tensor(output, output->getDimensionType(), false)); + auto success = openCLBackend->onAcquireBuffer(myTensor.get(), Backend::DYNAMIC); + if (!success) { + return OUT_OF_MEMORY; + } + openCLBackend->onReleaseBuffer(myTensor.get(), Backend::DYNAMIC); + output = myTensor.get(); + } auto nhwc0 = tensorShapeFormat(inputs[0]); auto nhwc = tensorShapeFormat(outputs[0]); @@ -49,12 +62,11 @@ ErrorCode EltwiseExecution::onResize(const std::vector &inputs, const int dimension = (i >= 2) ? inputs[i]->dimensions() : inputs[i + 1]->dimensions(); const Tensor* input0 = (i >= 2) ? outputs[0] : inputs[0]; if(dimension == 0) { - mOperatorData = (i >= 2) ? - inputs[i]->host()[0] : inputs[i + 1]->host()[0]; + auto input = (i >= 2) ? inputs[i] : inputs[i + 1]; unit.kernel = runTime->buildKernel("binary", "binary_value", mBuildOptions); unit.kernel.setArg(0, openCLImage(input0)); - unit.kernel.setArg(1, mOperatorData); - unit.kernel.setArg(2, openCLImage(outputs[0])); + unit.kernel.setArg(1, openCLImage(input)); + unit.kernel.setArg(2, openCLImage(output)); unit.kernel.setArg(3, nhwcArray); unit.kernel.setArg(4, wh); unit.kernel.setArg(5, input1Stride); @@ -95,7 +107,7 @@ ErrorCode EltwiseExecution::onResize(const std::vector &inputs, const unit.kernel.setArg(4, wh1); unit.kernel.setArg(5, wh_0); } - unit.kernel.setArg(2, openCLImage(outputs[0])); + unit.kernel.setArg(2, openCLImage(output)); unit.kernel.setArg(3, nhwcArray); unit.kernel.setArg(6, wh); } else { @@ -113,7 +125,7 @@ ErrorCode EltwiseExecution::onResize(const std::vector &inputs, const unit.kernel.setArg(4, wh1); unit.kernel.setArg(5, wh_0); } - unit.kernel.setArg(2, openCLImage(outputs[0])); + unit.kernel.setArg(2, openCLImage(output)); unit.kernel.setArg(3, nhwcArray); unit.kernel.setArg(6, wh); } @@ -121,7 +133,7 @@ ErrorCode EltwiseExecution::onResize(const std::vector &inputs, const unit.kernel = runTime->buildKernel("binary", "binary", mBuildOptions); unit.kernel.setArg(0, openCLImage(input0)); unit.kernel.setArg(1, openCLImage(input)); - unit.kernel.setArg(2, openCLImage(outputs[0])); + unit.kernel.setArg(2, openCLImage(output)); unit.kernel.setArg(3, nhwcArray); unit.kernel.setArg(4, wh); unit.kernel.setArg(5, input1Stride); @@ -130,6 +142,15 @@ ErrorCode EltwiseExecution::onResize(const std::vector &inputs, const unit.globalWorkSize = globalSize; unit.localWorkSize = localSize; } + if (output != outputs[0]) { + Unit unit; + unit.kernel = runTime->buildKernel("binary", "imageCopy", mBuildOptions); + unit.kernel.setArg(0, openCLImage(output)); + unit.kernel.setArg(1, openCLImage(outputs[0])); + unit.localWorkSize = cl::NullRange; + unit.globalWorkSize = {static_cast(imageWidth), static_cast(imageHeight)}; + mUnits.push_back(unit); + } return NO_ERROR; } diff --git a/source/backend/opencl/execution/FloatToInt8Execution.cpp b/source/backend/opencl/execution/FloatToInt8Execution.cpp index c8fc489a8..2b4eef2eb 100644 --- a/source/backend/opencl/execution/FloatToInt8Execution.cpp +++ b/source/backend/opencl/execution/FloatToInt8Execution.cpp @@ -6,13 +6,13 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "FloatToInt8Execution.hpp" -#include "execution/InterpExecution.hpp" -#include "CPUBackend.hpp" -#include "Concurrency.h" -#include "Int8FunctionsOpt.h" -#include "Macro.h" -#include "core/OpenCLBackend.hpp" +#include "backend/opencl/execution/FloatToInt8Execution.hpp" +#include "backend/opencl/execution/InterpExecution.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "core/Concurrency.h" +#include "backend/cpu/compute/Int8FunctionsOpt.h" +#include "core/Macro.h" +#include "backend/opencl/core/OpenCLBackend.hpp" namespace MNN { namespace OpenCL { @@ -37,7 +37,7 @@ FloatToInt8Execution::FloatToInt8Execution(Backend* backend, const MNN::Op* para memset(bufferPtr, 0, ALIGN_UP4(scaleLen) * sizeof(float)); memcpy(bufferPtr, scale->tensorScale()->data(), scaleLen * sizeof(float)); } - + runtime->commandQueue().enqueueUnmapMemObject(*DeviceBuffer, bufferPtr); std::set buildOptions; @@ -84,11 +84,11 @@ ErrorCode FloatToInt8Execution::onExecute(const std::vector& inputs, co const int batchStride = input->stride(0); const int width = input->width(); const int height = input->height(); - + auto runtime = mOpenCLBackend->getOpenCLRuntime(); const std::vector gws = {static_cast(icDiv4), static_cast(width), static_cast(height * batch)}; - + const std::vector lws = localWS3DDefault(gws, mMaxWorkGroupSize, mOpenCLBackend->getOpenCLRuntime()); run3DKernelDefault(mKernel, gws, lws, mOpenCLBackend->getOpenCLRuntime()); diff --git a/source/backend/opencl/execution/FloatToInt8Execution.hpp b/source/backend/opencl/execution/FloatToInt8Execution.hpp index 1b65c5cb0..dc999f3bf 100644 --- a/source/backend/opencl/execution/FloatToInt8Execution.hpp +++ b/source/backend/opencl/execution/FloatToInt8Execution.hpp @@ -11,8 +11,8 @@ #include "CommonExecution.hpp" #include -#include "Execution.hpp" -#include "core/OpenCLBackend.hpp" +#include "core/Execution.hpp" +#include "backend/opencl/core/OpenCLBackend.hpp" namespace MNN { namespace OpenCL { diff --git a/source/backend/opencl/execution/Int8ToFloatExecution.cpp b/source/backend/opencl/execution/Int8ToFloatExecution.cpp index 9a821bb79..fa110dc5b 100644 --- a/source/backend/opencl/execution/Int8ToFloatExecution.cpp +++ b/source/backend/opencl/execution/Int8ToFloatExecution.cpp @@ -6,14 +6,13 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Int8ToFloatExecution.hpp" -#include "execution/InterpExecution.hpp" -#include "CPUBackend.hpp" -#include "Concurrency.h" -#include "Int8FunctionsOpt.h" -#include "Macro.h" -#include "core/OpenCLBackend.hpp" - +#include "backend/opencl/execution/Int8ToFloatExecution.hpp" +#include "backend/opencl/execution/InterpExecution.hpp" +#include "backend/cpu/CPUBackend.hpp" +#include "core/Concurrency.h" +#include "backend/cpu/compute/Int8FunctionsOpt.h" +#include "core/Macro.h" +#include "backend/opencl/core/OpenCLBackend.hpp" namespace MNN { namespace OpenCL { @@ -37,7 +36,7 @@ Int8ToFloatExecution::Int8ToFloatExecution(Backend* backend, const MNN::Op* para memset(bufferPtr, 0, ALIGN_UP4(scaleLen) * sizeof(float)); memcpy(bufferPtr, scale->tensorScale()->data(), scaleLen * sizeof(float)); } - + runtime->commandQueue().enqueueUnmapMemObject(*DeviceBuffer, bufferPtr); std::set buildOptions; @@ -84,11 +83,11 @@ ErrorCode Int8ToFloatExecution::onExecute(const std::vector& inputs, co const int batchStride = input->stride(0); const int width = input->width(); const int height = input->height(); - + auto runtime = mOpenCLBackend->getOpenCLRuntime(); const std::vector gws = {static_cast(icDiv4), static_cast(width), static_cast(height * batch)}; - + const std::vector lws = localWS3DDefault(gws, mMaxWorkGroupSize, mOpenCLBackend->getOpenCLRuntime()); run3DKernelDefault(mKernel, gws, lws, mOpenCLBackend->getOpenCLRuntime()); diff --git a/source/backend/opencl/execution/Int8ToFloatExecution.hpp b/source/backend/opencl/execution/Int8ToFloatExecution.hpp index 7e10a382d..231c95706 100644 --- a/source/backend/opencl/execution/Int8ToFloatExecution.hpp +++ b/source/backend/opencl/execution/Int8ToFloatExecution.hpp @@ -11,8 +11,8 @@ #include "CommonExecution.hpp" #include -#include "Execution.hpp" -#include "core/OpenCLBackend.hpp" +#include "core/Execution.hpp" +#include "backend/opencl/core/OpenCLBackend.hpp" namespace MNN { namespace OpenCL { diff --git a/source/backend/opencl/execution/InterpExecution.cpp b/source/backend/opencl/execution/InterpExecution.cpp index cfa660d09..cda421e09 100644 --- a/source/backend/opencl/execution/InterpExecution.cpp +++ b/source/backend/opencl/execution/InterpExecution.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "execution/InterpExecution.hpp" -#include "TensorUtils.hpp" +#include "backend/opencl/execution/InterpExecution.hpp" +#include "core/TensorUtils.hpp" namespace MNN { namespace OpenCL { diff --git a/source/backend/opencl/execution/InterpExecution.hpp b/source/backend/opencl/execution/InterpExecution.hpp index 13b2dbdc6..cbaec5a7e 100644 --- a/source/backend/opencl/execution/InterpExecution.hpp +++ b/source/backend/opencl/execution/InterpExecution.hpp @@ -12,8 +12,8 @@ #include #include #include -#include "Execution.hpp" -#include "core/OpenCLBackend.hpp" +#include "core/Execution.hpp" +#include "backend/opencl/core/OpenCLBackend.hpp" namespace MNN { namespace OpenCL { diff --git a/source/backend/opencl/execution/LrnExecution.cpp b/source/backend/opencl/execution/LrnExecution.cpp index 373d3a4cd..955023aad 100644 --- a/source/backend/opencl/execution/LrnExecution.cpp +++ b/source/backend/opencl/execution/LrnExecution.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "LrnExecution.hpp" -#include -#include "TensorUtils.hpp" +#include "backend/opencl/execution/LrnExecution.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { namespace OpenCL { diff --git a/source/backend/opencl/execution/LrnExecution.hpp b/source/backend/opencl/execution/LrnExecution.hpp index 5c85d87ec..a947d9c9a 100644 --- a/source/backend/opencl/execution/LrnExecution.hpp +++ b/source/backend/opencl/execution/LrnExecution.hpp @@ -10,8 +10,8 @@ #define LrnExecution_hpp #include -#include "Execution.hpp" -#include "core/OpenCLBackend.hpp" +#include "core/Execution.hpp" +#include "backend/opencl/core/OpenCLBackend.hpp" #include diff --git a/source/backend/opencl/execution/MatmulExecution.cpp b/source/backend/opencl/execution/MatmulExecution.cpp new file mode 100644 index 000000000..418151619 --- /dev/null +++ b/source/backend/opencl/execution/MatmulExecution.cpp @@ -0,0 +1,80 @@ +// +// MatMulExecution.cpp +// MNN +// +// Created by MNN on 2019/02/28. +// Copyright © 2018, Alibaba Group Holding Limited +// + + +#include "backend/opencl/execution/MatmulExecution.hpp" + +namespace MNN { +namespace OpenCL { + +MatMulExecution::MatMulExecution(const std::vector &inputs, const MNN::Op *op, Backend *backend) : Execution(backend) { + mOpenCLBackend = static_cast(backend); + mAreadySetArg = false; +} + +ErrorCode MatMulExecution::onResize(const std::vector &inputs, const std::vector &outputs) { + auto runtime = mOpenCLBackend->getOpenCLRuntime(); + + if (mKernel.get() == nullptr) { + + std::set buildOptions; + std::string kernelName = "matmul"; + + mKernel = runtime->buildKernel("matmul", kernelName, buildOptions); + mMaxWorkGroupSize = static_cast(runtime->getMaxWorkGroupSize(mKernel)); + } + + Tensor *input0 = inputs[0]; + Tensor *input1 = inputs[1]; + Tensor *output = outputs[0]; + + std::vector input0Shape = tensorShapeFormat(input0); + std::vector input1Shape = tensorShapeFormat(input1); + std::vector outputShape = tensorShapeFormat(output); + //处理二维矩阵相乘,N C相当于H W + //二维矩阵相乘 + const int height = input0Shape.at(0); + const int outputChannel = input0Shape.at(3); + const int width = input1Shape.at(3); + const int outputChannelBlocks = UP_DIV(outputChannel, 4); + const int widthblocks = UP_DIV(width, 4); + mGlobalWorkSize[0] = static_cast(widthblocks); + mGlobalWorkSize[1] = static_cast(height); + int idx = 0; + mKernel.setArg(idx++, mGlobalWorkSize[0]); + mKernel.setArg(idx++, mGlobalWorkSize[1]); + mKernel.setArg(idx++, openCLImage(input0)); + mKernel.setArg(idx++, openCLImage(input1)); + mKernel.setArg(idx++, openCLImage(output)); + mKernel.setArg(idx++, static_cast(outputChannel)); + mKernel.setArg(idx++, static_cast(outputChannelBlocks)); + mLocalWorkSize = {mMaxWorkGroupSize / 64, 64, 0}; + + return NO_ERROR; +} + +ErrorCode MatMulExecution::onExecute(const std::vector &inputs, const std::vector &outputs) { + +#ifdef LOG_VERBOSE + MNN_PRINT("Start MatMulExecution onExecute... \n"); +#endif + + auto runtime = mOpenCLBackend->getOpenCLRuntime(); + + run2DKernelDefault(mKernel, mGlobalWorkSize, mLocalWorkSize, runtime); + +#ifdef LOG_VERBOSE + MNN_PRINT("End MatMulExecution onExecute... \n"); +#endif + return NO_ERROR; +} + +OpenCLCreatorRegister> __matmul_op(OpType_MatMul); + +} // namespace OpenCL +} // namespace MNN diff --git a/source/backend/opencl/execution/MatmulExecution.hpp b/source/backend/opencl/execution/MatmulExecution.hpp new file mode 100644 index 000000000..f5dbb76f5 --- /dev/null +++ b/source/backend/opencl/execution/MatmulExecution.hpp @@ -0,0 +1,41 @@ +// +// MatMulExecution.hpp +// MNN +// +// Created by MNN on 2019/01/31. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef MatMulExecution_hpp +#define MatMulExecution_hpp + +#include "core/Execution.hpp" +#include "backend/opencl/core/OpenCLBackend.hpp" +#include "backend/opencl/core/OpenCLRunningUtils.hpp" + +namespace MNN { +namespace OpenCL { + +class MatMulExecution : public Execution { +public: + MatMulExecution(const std::vector &inputs, const MNN::Op *op, Backend *backend); + virtual ~MatMulExecution() = default; + + virtual ErrorCode onExecute(const std::vector &inputs, const std::vector &outputs) override; + virtual ErrorCode onResize(const std::vector &inputs, const std::vector &outputs) override; + +private: + cl::Kernel mKernel; + uint32_t mMaxWorkGroupSize; + std::vector mInput0Shape; + std::vector mInput1Shape; + bool mAreadySetArg; + OpenCLBackend *mOpenCLBackend; + uint32_t mGlobalWorkSize[2] = {1, 1}; + std::vector mLocalWorkSize{1, 1, 1, 1}; +}; + +} // namespace OpenCL +} // namespace MNN + +#endif diff --git a/source/backend/opencl/execution/MultiInputConvExecution.cpp b/source/backend/opencl/execution/MultiInputConvExecution.cpp new file mode 100644 index 000000000..4b07d7dbc --- /dev/null +++ b/source/backend/opencl/execution/MultiInputConvExecution.cpp @@ -0,0 +1,164 @@ +// +// MultiInputConvExecution.cpp +// MNN +// +// Created by MNN on 2019/10/22. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include +#include "core/TensorUtils.hpp" +#include "backend/opencl/core/OpenCLBackend.hpp" +#include "backend/opencl/execution/MultiInputConvExecution.hpp" + +namespace MNN { +namespace OpenCL { + +MultiInputConvExecution::MultiInputConvExecution(const MNN::Op *op, Backend *backend) : CommonExecution(backend) { + auto common = op->main_as_Convolution2D()->common(); + mPadMode = common->padMode(); + mStrides = {common->strideY(), common->strideX()}; + mDilations = {common->dilateY(), common->dilateX()}; + if (mPadMode != PadMode_SAME) { + mPaddings = {common->padY() * 2, common->padX() * 2}; + } +} + +MultiInputConvExecution::~MultiInputConvExecution() { + // do nothing +} + +ErrorCode MultiInputConvExecution::onResize(const std::vector &inputs, const std::vector &outputs) { + mUnits.clear(); + mUnits.resize(3); + + auto originLayout = TensorUtils::getDescribe(inputs[1])->dimensionFormat; + auto openclBackend = static_cast(backend()); + auto runtime = openclBackend->getOpenCLRuntime(); + + auto inputShape = tensorShapeFormat(inputs[0]); + auto outputShape = tensorShapeFormat(outputs[0]); + const int batch = outputShape.at(0); + const int outputChannel = outputShape.at(3), inputChannel = inputShape.at(3); + const int inputHeight = inputShape.at(1), inputWidth = inputShape.at(2); + const int height = outputShape.at(1), width = outputShape.at(2); + const int kernelY = inputs[1]->length(2), kernelX = inputs[1]->length(3); + int kernelShape[2] = {kernelY, kernelX}; + + if (mPadMode == PadMode_SAME) { + int padNeededHeight = (height - 1) * mStrides[0] + (kernelY - 1) * mDilations[0] + 1 - inputHeight; + int padNeededWidth = (width - 1) * mStrides[1] + (kernelX - 1) * mDilations[1] + 1 - inputWidth; + mPaddings[0] = padNeededHeight; + mPaddings[1] = padNeededWidth; + } + + const int weightSize = inputs[1]->elementSize(); + auto bufferPool = openclBackend->getBufferPool(); + auto bufferPtr = bufferPool->alloc(weightSize * sizeof(float), false); + if (bufferPtr == nullptr) { + return OUT_OF_MEMORY; + } + mFilter.reset(Tensor::createDevice({1, UP_DIV(outputChannel, 4) * kernelY * kernelX, 1, inputChannel * 4})); + bool succ = openclBackend->onAcquireBuffer(mFilter.get(), Backend::DYNAMIC); + bufferPool->recycle(bufferPtr, false); + if (!succ) { + return OUT_OF_MEMORY; + } + openclBackend->onReleaseBuffer(mFilter.get(), Backend::DYNAMIC); + + // transform kernel from image2d (NHCW) to original form (maybe NCHW or NHWC) + { + std::string kernelName = ""; + if (originLayout == MNN_DATA_FORMAT_NCHW) { + kernelName = "image_to_nchw_buffer"; + } else if (originLayout == MNN_DATA_FORMAT_NHWC) { + kernelName = "image_to_nhwc_buffer"; + } + auto shape = tensorShapeFormat(inputs[1]); + std::vector gws = {static_cast(shape[2] * UP_DIV(shape[3], 4)), static_cast(shape[0] * shape[1])}; + + cl::Kernel kernel = runtime->buildKernel("buffer_to_image", kernelName, {}); + kernel.setArg(0, gws[0]); + kernel.setArg(1, gws[1]); + kernel.setArg(2, *bufferPtr); + kernel.setArg(3, shape[1]); + kernel.setArg(4, shape[2]); + kernel.setArg(5, shape[3]); + kernel.setArg(6, openCLImage(inputs[1])); + + const uint32_t maxWorkGroupSize = runtime->getMaxWorkGroupSize(kernel); + std::vector lws = {16, std::max((uint32_t)1, maxWorkGroupSize / 16)}; + for (size_t i = 0; i < lws.size(); ++i) { + gws[i] = ROUND_UP(gws[i], lws[i]); + } + + mUnits[0].kernel = kernel; + mUnits[0].localWorkSize = {lws[0], lws[1]}; + mUnits[0].globalWorkSize = {gws[0], gws[1]}; + } + + // transform kernel from original form (maybe NCHW or NHWC) to filter format + { + std::vector gws = {static_cast(inputChannel), static_cast(UP_DIV(outputChannel, 4) * kernelY * kernelX)}; + + cl::Kernel kernel = runtime->buildKernel("buffer_to_image", "conv2d_filter_buffer_to_image", {}); + kernel.setArg(0, gws[0]); + kernel.setArg(1, gws[1]); + kernel.setArg(2, *bufferPtr); + kernel.setArg(3, outputChannel); + kernel.setArg(4, sizeof(kernelShape), kernelShape); + kernel.setArg(5, inputChannel * kernelY * kernelX); + kernel.setArg(6, kernelY * kernelX); + kernel.setArg(7, openCLImage(mFilter.get())); + + const uint32_t maxWorkGroupSize = runtime->getMaxWorkGroupSize(kernel); + std::vector lws = {16, std::max((uint32_t)1, maxWorkGroupSize / 16)}; + for (size_t i = 0; i < lws.size(); ++i) { + gws[i] = ROUND_UP(gws[i], lws[i]); + } + + mUnits[1].kernel = kernel; + mUnits[1].localWorkSize = {lws[0], lws[1]}; + mUnits[1].globalWorkSize = {gws[0], gws[1]}; + } + + { + std::vector gws = {static_cast(UP_DIV(outputChannel, 4) * UP_DIV(width, 4)), static_cast(height * batch)}; + int inputImageShape[2] = {inputHeight, inputWidth}; + int outputImageShape[2] = {height, width}; + int strideShape[2] = {mStrides[0], mStrides[1]}; + int paddingShape[2] = {mPaddings[0] / 2, mPaddings[1] / 2}; + int dilationShape[2] = {mDilations[0], mDilations[1]}; + + cl::Kernel kernel = runtime->buildKernel("conv_2d", "conv_2d", {}); + kernel.setArg(0, gws[0]); + kernel.setArg(1, gws[1]); + kernel.setArg(2, openCLImage(inputs[0])); + kernel.setArg(3, openCLImage(mFilter.get())); + kernel.setArg(4, openCLImage(inputs[2])); + kernel.setArg(5, openCLImage(outputs[0])); + kernel.setArg(6, sizeof(inputImageShape), inputImageShape); + kernel.setArg(7, UP_DIV(inputChannel, 4)); + kernel.setArg(8, sizeof(outputImageShape), outputImageShape); + kernel.setArg(9, sizeof(kernelShape), kernelShape); + kernel.setArg(10, sizeof(strideShape), strideShape); + kernel.setArg(11, sizeof(paddingShape), paddingShape); + kernel.setArg(12, sizeof(dilationShape), dilationShape); + kernel.setArg(13, UP_DIV(width, 4)); + + std::vector lws = {runtime->deviceComputeUnits() * 2, 4, 1}; + for (int i = 0; i < 2; ++i) { + gws[i] = ROUND_UP(gws[i], std::max((uint32_t)1, lws[i])); + } + + mUnits[2].kernel = kernel; + mUnits[2].localWorkSize = {lws[0], lws[1]}; + mUnits[2].globalWorkSize = {gws[0], gws[1]}; + } + + return NO_ERROR; +} + +} +} diff --git a/source/backend/opencl/execution/MultiInputConvExecution.hpp b/source/backend/opencl/execution/MultiInputConvExecution.hpp new file mode 100644 index 000000000..28732f069 --- /dev/null +++ b/source/backend/opencl/execution/MultiInputConvExecution.hpp @@ -0,0 +1,35 @@ +// +// MultiInputConvExecution.hpp +// MNN +// +// Created by MNN on 2019/10/22. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef MultiInputConvExecution_hpp +#define MultiInputConvExecution_hpp + +#include "backend/opencl/execution/CommonExecution.hpp" + +namespace MNN { +namespace OpenCL { + +class MultiInputConvExecution : public CommonExecution { +public: + MultiInputConvExecution(const MNN::Op *op, Backend *backend); + virtual ~MultiInputConvExecution(); + + virtual ErrorCode onResize(const std::vector &inputs, const std::vector &outputs) override; + +private: + PadMode mPadMode; + std::vector mStrides{1, 1}; + std::vector mPaddings{0, 0}; + std::vector mDilations{1, 1}; + std::shared_ptr mFilter; + +}; +} +} + +#endif /* MultiInputConvExecution_hpp */ diff --git a/source/backend/opencl/execution/MultiInputDeconvExecution.cpp b/source/backend/opencl/execution/MultiInputDeconvExecution.cpp new file mode 100644 index 000000000..c1fe0fd62 --- /dev/null +++ b/source/backend/opencl/execution/MultiInputDeconvExecution.cpp @@ -0,0 +1,191 @@ +// +// MultiInputDeconvExecution.cpp +// MNN +// +// Created by MNN on 2019/10/25. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "core/TensorUtils.hpp" +#include "backend/opencl/core/OpenCLBackend.hpp" +#include "backend/opencl/execution/MultiInputDeconvExecution.hpp" + +namespace MNN { +namespace OpenCL { + +MultiInputDeconvExecution::MultiInputDeconvExecution(const MNN::Op *op, Backend *backend) : CommonExecution(backend) { + auto common = op->main_as_Convolution2D()->common(); + + mStrides = {common->strideY(), common->strideX()}; + MNN_ASSERT(mStrides[0] > 0 && mStrides[1] > 0); + + mDilations = {common->dilateY(), common->dilateX()}; + mPaddings = { + (common->kernelY() - 1 - common->padY()) * 2, + (common->kernelX() - 1 - common->padX()) * 2 + }; + if (common->padMode() == PadMode_VALID) { + mPaddings[0] = mPaddings[1] = 0; + } +} + +MultiInputDeconvExecution::~MultiInputDeconvExecution() { + // do nothing +} + +ErrorCode MultiInputDeconvExecution::onResize(const std::vector &inputs, const std::vector &outputs) { + mUnits.clear(); + mUnits.resize(4); + + auto originLayout = TensorUtils::getDescribe(inputs[1])->dimensionFormat; + auto openclBackend = static_cast(backend()); + auto runtime = openclBackend->getOpenCLRuntime(); + + auto inputShape = tensorShapeFormat(inputs[0]); + auto outputShape = tensorShapeFormat(outputs[0]); + const int batch = outputShape.at(0); + const int outputChannel = outputShape.at(3), inputChannel = inputShape.at(3); + const int inputHeight = inputShape.at(1), inputWidth = inputShape.at(2); + const int height = outputShape.at(1), width = outputShape.at(2); + const int kernelY = inputs[1]->length(2), kernelX = inputs[1]->length(3); + int kernelShape[2] = {kernelY, kernelX}; + + const int weightSize = inputs[1]->elementSize(); + auto bufferPool = openclBackend->getBufferPool(); + auto rawBufferPtr = bufferPool->alloc(weightSize * sizeof(float), false); + if (rawBufferPtr == nullptr) { + return OUT_OF_MEMORY; + } + auto bufferPtr = bufferPool->alloc(weightSize * sizeof(float), false); + if (bufferPtr == nullptr) { + bufferPool->recycle(rawBufferPtr, false); + return OUT_OF_MEMORY; + } + mFilter.reset(Tensor::createDevice({1, UP_DIV(outputChannel, 4) * kernelY * kernelX, 1, inputChannel * 4})); + bool succ = openclBackend->onAcquireBuffer(mFilter.get(), Backend::DYNAMIC); + bufferPool->recycle(rawBufferPtr, false); + bufferPool->recycle(bufferPtr, false); + if (!succ) { + return OUT_OF_MEMORY; + } + openclBackend->onReleaseBuffer(mFilter.get(), Backend::DYNAMIC); + + // transform kernel from image2d (NHCW) to original form (maybe NCHW or NHWC) + { + std::string kernelName = ""; + if (originLayout == MNN_DATA_FORMAT_NCHW) { + kernelName = "image_to_nchw_buffer"; + } else if (originLayout == MNN_DATA_FORMAT_NHWC) { + kernelName = "image_to_nhwc_buffer"; + } + auto shape = tensorShapeFormat(inputs[1]); + std::vector gws = {static_cast(shape[2] * UP_DIV(shape[3], 4)), static_cast(shape[0] * shape[1])}; + + cl::Kernel kernel = runtime->buildKernel("buffer_to_image", kernelName, {}); + kernel.setArg(0, gws[0]); + kernel.setArg(1, gws[1]); + kernel.setArg(2, *rawBufferPtr); + kernel.setArg(3, shape[1]); + kernel.setArg(4, shape[2]); + kernel.setArg(5, shape[3]); + kernel.setArg(6, openCLImage(inputs[1])); + + const uint32_t maxWorkGroupSize = runtime->getMaxWorkGroupSize(kernel); + std::vector lws = {16, std::max((uint32_t)1, maxWorkGroupSize / 16)}; + for (size_t i = 0; i < lws.size(); ++i) { + gws[i] = ROUND_UP(gws[i], lws[i]); + } + + mUnits[0].kernel = kernel; + mUnits[0].localWorkSize = {lws[0], lws[1]}; + mUnits[0].globalWorkSize = {gws[0], gws[1]}; + } + + // convert kernel from IOHW to OIHW, similar to DeconvExecution.cpp + { + cl::Kernel kernel = runtime->buildKernel("deconv_2d", "iohw2oihw", {}); + kernel.setArg(0, *rawBufferPtr); + kernel.setArg(1, *bufferPtr); + kernel.setArg(2, kernelY * kernelX); + kernel.setArg(3, inputChannel); + kernel.setArg(4, outputChannel); + + mUnits[1].kernel = kernel; + mUnits[1].localWorkSize = cl::NullRange; + mUnits[1].globalWorkSize = { + static_cast(inputChannel), + static_cast(outputChannel) + }; + } + + // transform kernel from original form (maybe NCHW or NHWC) to filter format + { + std::vector gws = {static_cast(inputChannel), static_cast(UP_DIV(outputChannel, 4) * kernelY * kernelX)}; + + cl::Kernel kernel = runtime->buildKernel("buffer_to_image", "conv2d_filter_buffer_to_image", {}); + kernel.setArg(0, gws[0]); + kernel.setArg(1, gws[1]); + kernel.setArg(2, *bufferPtr); + kernel.setArg(3, outputChannel); + kernel.setArg(4, sizeof(kernelShape), kernelShape); + kernel.setArg(5, inputChannel * kernelY * kernelX); + kernel.setArg(6, kernelY * kernelX); + kernel.setArg(7, openCLImage(mFilter.get())); + + const uint32_t maxWorkGroupSize = runtime->getMaxWorkGroupSize(kernel); + std::vector lws = {16, std::max((uint32_t)1, maxWorkGroupSize / 16)}; + for (size_t i = 0; i < lws.size(); ++i) { + gws[i] = ROUND_UP(gws[i], lws[i]); + } + + mUnits[2].kernel = kernel; + mUnits[2].localWorkSize = {lws[0], lws[1]}; + mUnits[2].globalWorkSize = {gws[0], gws[1]}; + } + + { + std::vector gws = { + static_cast(UP_DIV(outputChannel, 4)), + static_cast(width), + static_cast(height * batch) + }; + int inputImageShape[] = {inputHeight, inputWidth}; + int outputImageShape[] = {height, width}; + int strideShape[] = {mStrides[0], mStrides[1]}; + int paddingShape[] = {UP_DIV(mPaddings[0], 2), UP_DIV(mPaddings[1], 2)}; + int alignShape[] = {mStrides[0] - 1 - paddingShape[0], mStrides[1] - 1 - paddingShape[1]}; + + auto kernel = runtime->buildKernel("deconv_2d", "deconv_2d", {}); + kernel.setArg(0, gws[0]); + kernel.setArg(1, gws[1]); + kernel.setArg(2, gws[2]); + kernel.setArg(3, openCLImage(inputs[0])); + kernel.setArg(4, openCLImage(mFilter.get())); + kernel.setArg(5, openCLImage(inputs[2])); + kernel.setArg(6, openCLImage(outputs[0])); + kernel.setArg(7, sizeof(inputImageShape), inputImageShape); + kernel.setArg(8, sizeof(outputImageShape), outputImageShape); + kernel.setArg(9, sizeof(strideShape), strideShape); + kernel.setArg(10, sizeof(alignShape), alignShape); + kernel.setArg(11, sizeof(paddingShape), paddingShape); + kernel.setArg(12, sizeof(kernelShape), kernelShape); + kernel.setArg(13, static_cast(kernelX * kernelY)); + kernel.setArg(14, static_cast(UP_DIV(inputChannel, 4))); + kernel.setArg(15, static_cast(UP_DIV(outputChannel, 4))); + + const uint32_t maxWorkGroupSize = runtime->getMaxWorkGroupSize(kernel); + auto lws = localWS3DDefault(gws, maxWorkGroupSize, runtime); + for (size_t i = 0; i < lws.size(); ++i) { + gws[i] = ROUND_UP(gws[i], std::max((uint32_t)1, lws[i])); + } + + mUnits[3].kernel = kernel; + mUnits[3].localWorkSize = {lws[0], lws[1], lws[2]}; + mUnits[3].globalWorkSize = {gws[0], gws[1], gws[2]}; + } + + return NO_ERROR; +} + +} +} diff --git a/source/backend/opencl/execution/MultiInputDeconvExecution.hpp b/source/backend/opencl/execution/MultiInputDeconvExecution.hpp new file mode 100644 index 000000000..86fbe70b3 --- /dev/null +++ b/source/backend/opencl/execution/MultiInputDeconvExecution.hpp @@ -0,0 +1,32 @@ +// +// MultiInputDeconvExecution.hpp +// MNN +// +// Created by MNN on 2019/10/25. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef MultiInputDeconvExecution_hpp +#define MultiInputDeconvExecution_hpp + +#include "backend/opencl/execution/CommonExecution.hpp" +namespace MNN { +namespace OpenCL { + +class MultiInputDeconvExecution : public CommonExecution { +public: + MultiInputDeconvExecution(const MNN::Op *op, Backend *backend); + virtual ~MultiInputDeconvExecution(); + + virtual ErrorCode onResize(const std::vector &inputs, const std::vector &outputs) override; + +private: + std::vector mStrides; + std::vector mPaddings; + std::vector mDilations; + std::shared_ptr mFilter; +}; + +} // namespace OpenCL +} // namespace MNN +#endif /* MultiInputDeconvExecution_hpp */ diff --git a/source/backend/opencl/execution/NormalizeExecution.cpp b/source/backend/opencl/execution/NormalizeExecution.cpp index 85a9258ef..77656544e 100644 --- a/source/backend/opencl/execution/NormalizeExecution.cpp +++ b/source/backend/opencl/execution/NormalizeExecution.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "execution/NormalizeExecution.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" -#include "core/OpenCLBackend.hpp" -#include "core/OpenCLRunningUtils.hpp" +#include "backend/opencl/execution/NormalizeExecution.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" +#include "backend/opencl/core/OpenCLBackend.hpp" +#include "backend/opencl/core/OpenCLRunningUtils.hpp" namespace MNN { namespace OpenCL { diff --git a/source/backend/opencl/execution/NormalizeExecution.hpp b/source/backend/opencl/execution/NormalizeExecution.hpp index fb8f0f34b..8e9dcb4d6 100644 --- a/source/backend/opencl/execution/NormalizeExecution.hpp +++ b/source/backend/opencl/execution/NormalizeExecution.hpp @@ -12,9 +12,9 @@ #include #include #include -#include "Execution.hpp" -#include "core/OpenCLBackend.hpp" -#include "core/OpenCLRunningUtils.hpp" +#include "core/Execution.hpp" +#include "backend/opencl/core/OpenCLBackend.hpp" +#include "backend/opencl/core/OpenCLRunningUtils.hpp" namespace MNN { namespace OpenCL { diff --git a/source/backend/opencl/execution/PermuteExecution.cpp b/source/backend/opencl/execution/PermuteExecution.cpp index 2e73d2d2a..d365370c3 100644 --- a/source/backend/opencl/execution/PermuteExecution.cpp +++ b/source/backend/opencl/execution/PermuteExecution.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "PermuteExecution.hpp" -#include -#include "TensorUtils.hpp" -#include "core/OpenCLBackend.hpp" +#include "backend/opencl/execution/PermuteExecution.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" +#include "backend/opencl/core/OpenCLBackend.hpp" namespace MNN { namespace OpenCL { diff --git a/source/backend/opencl/execution/PoolExecution.cpp b/source/backend/opencl/execution/PoolExecution.cpp index eebad202a..80439cbe4 100644 --- a/source/backend/opencl/execution/PoolExecution.cpp +++ b/source/backend/opencl/execution/PoolExecution.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "execution/PoolExecution.hpp" -#include -#include "TensorUtils.hpp" -#include "core/OpenCLBackend.hpp" +#include "backend/opencl/execution/PoolExecution.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" +#include "backend/opencl/core/OpenCLBackend.hpp" namespace MNN { namespace OpenCL { diff --git a/source/backend/opencl/execution/PoolExecution.hpp b/source/backend/opencl/execution/PoolExecution.hpp index 32194e6e0..dbf230130 100644 --- a/source/backend/opencl/execution/PoolExecution.hpp +++ b/source/backend/opencl/execution/PoolExecution.hpp @@ -12,9 +12,9 @@ #include #include #include -#include "Execution.hpp" -#include "core/OpenCLBackend.hpp" -#include "core/OpenCLRunningUtils.hpp" +#include "core/Execution.hpp" +#include "backend/opencl/core/OpenCLBackend.hpp" +#include "backend/opencl/core/OpenCLRunningUtils.hpp" namespace MNN { namespace OpenCL { diff --git a/source/backend/opencl/execution/PoolGradExecution.cpp b/source/backend/opencl/execution/PoolGradExecution.cpp new file mode 100644 index 000000000..c92cc06de --- /dev/null +++ b/source/backend/opencl/execution/PoolGradExecution.cpp @@ -0,0 +1,80 @@ +// +// PoolGradExecution.cpp +// MNN +// +// Created by MNN on 2019/10/16. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include "backend/opencl/execution/PoolGradExecution.hpp" +#include "core/TensorUtils.hpp" + +namespace MNN { +namespace OpenCL { + +PoolGradExecution::PoolGradExecution(const MNN::Op *op, Backend *backend) +: CommonExecution(backend) { + auto pool = op->main_as_Pool(); + mType = pool->type(); + mKernels = std::vector({pool->kernelY(), pool->kernelX()}); + mStrides = std::vector({pool->strideY(), pool->strideX()}); +} + +PoolGradExecution::~PoolGradExecution() { + // do nothing +} + +ErrorCode PoolGradExecution::onResize(const std::vector &inputs, const std::vector &outputs) { + MNN_ASSERT(mType == PoolType_MAXPOOL || mType == PoolType_AVEPOOL); + mUnits.clear(); + mUnits.resize(1); + + auto shape = tensorShapeFormat(inputs[0]); + auto poolShape = tensorShapeFormat(inputs[1]); + uint32_t imageHeight = shape[0] * shape[1]; + uint32_t imageWidth = shape[2] * UP_DIV(shape[3], 4); + + auto runTime = ((OpenCLBackend *)backend())->getOpenCLRuntime(); + cl::Kernel kernel; + int idx = 0; + if (mType == PoolType_MAXPOOL) { + kernel = runTime->buildKernel("pool_grad", "maxpool_grad", {}); + kernel.setArg(idx++, openCLImage(inputs[0])); + kernel.setArg(idx++, openCLImage(inputs[1])); + kernel.setArg(idx++, openCLImage(inputs[2])); + kernel.setArg(idx++, openCLImage(outputs[0])); + } else { + kernel = runTime->buildKernel("pool_grad", "avepool_grad", {}); + kernel.setArg(idx++, openCLImage(inputs[2])); + kernel.setArg(idx++, openCLImage(outputs[0])); + } + { + int _shape[] = {shape[1], shape[2]}; + int _poolShape[] = {poolShape[1], poolShape[2]}; + int kernelSize[] = {mKernels[0], mKernels[1]}; + int stride[] = {mStrides[0], mStrides[1]}; + kernel.setArg(idx++, _shape); + kernel.setArg(idx++, _poolShape); + kernel.setArg(idx++, kernelSize); + kernel.setArg(idx++, stride); + } + mUnits[0].kernel = kernel; + mUnits[0].localWorkSize = cl::NullRange; + mUnits[0].globalWorkSize = {imageHeight, imageWidth}; + + return NO_ERROR; +} + +class PoolGradCreator : public OpenCLBackend::Creator { +public: + virtual Execution *onCreate(const std::vector &inputs, const std::vector &outputs, + const MNN::Op *op, Backend *backend) const override { + return new PoolGradExecution(op, backend); + } +}; + +OpenCLCreatorRegister __Pool_grad_op(OpType_PoolGrad); + +} +} diff --git a/source/backend/opencl/execution/PoolGradExecution.hpp b/source/backend/opencl/execution/PoolGradExecution.hpp new file mode 100644 index 000000000..c7809d87f --- /dev/null +++ b/source/backend/opencl/execution/PoolGradExecution.hpp @@ -0,0 +1,32 @@ +// +// PoolGradExecution.hpp +// MNN +// +// Created by MNN on 2019/10/16. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef PoolGradExecution_hpp +#define PoolGradExecution_hpp + +#include +#include "backend/opencl/execution/CommonExecution.hpp" +namespace MNN { +namespace OpenCL { + +class PoolGradExecution : public CommonExecution { +public: + PoolGradExecution(const MNN::Op *op, Backend *backend); + virtual ~PoolGradExecution(); + + virtual ErrorCode onResize(const std::vector &inputs, const std::vector &outputs) override; + +private: + std::vector mKernels; + std::vector mStrides; + PoolType mType; +}; +} +} + +#endif /* PoolGradExecution_hpp */ diff --git a/source/backend/opencl/execution/ReductionExecution.cpp b/source/backend/opencl/execution/ReductionExecution.cpp new file mode 100644 index 000000000..4954baeaa --- /dev/null +++ b/source/backend/opencl/execution/ReductionExecution.cpp @@ -0,0 +1,364 @@ +// +// ReductionExecution.cpp +// MNN +// +// Created by MNN on 2019/10/25. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "backend/opencl/execution/ReductionExecution.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" + +namespace MNN { +namespace OpenCL { + +ReductionExecution::ReductionExecution(const MNN::Op* op, Backend* backend) : CommonExecution(backend) { +#ifdef LOG_VERBOSE + MNN_PRINT("start ReductionExecution init !\n"); +#endif + mOpenCLBackend = static_cast(backend); + auto reduct = op->main_as_ReductionParam(); + if (nullptr != reduct->dim()) { + for (int i = 0; i < reduct->dim()->size(); ++i) { + mAxis.insert(reduct->dim()->data()[i]); + } + } + switch (op->main_as_ReductionParam()->operation()) { + case ReductionType_MEAN: + mReductType = "0"; + break; + case ReductionType_MAXIMUM: + mReductType = "1"; + break; + case ReductionType_MINIMUM: + mReductType = "2"; + break; + case ReductionType_PROD: + mReductType = "3"; + break; + case ReductionType_SUM: + mReductType = "4"; + break; + default: + MNN_ASSERT(false); + break; + } +#ifdef LOG_VERBOSE + MNN_PRINT("end ReductionExecution init !\n"); +#endif +} + +ErrorCode ReductionExecution::generateReductionGWSLWS(const std::vector ¶mArray) { + if (paramArray.empty()) { + return INVALID_VALUE; + } + auto runtime = mOpenCLBackend->getOpenCLRuntime(); + std::vector kernelArray(3); + if (runtime->getGpuType() == ADRENO) { + uint32_t waveSize; + if (mAxis.size() == 1) { + waveSize = static_cast(runtime->GetKernelWaveSize(mReduct1DKernel)); + } else { + waveSize = static_cast(runtime->GetKernelWaveSize(mReduct2DKernel)); + } + mGlobalWorkSize = {4, (waveSize / 4), paramArray[0] * paramArray[2]}; + } else { + mGlobalWorkSize = {4, paramArray[1] / 16, paramArray[0] * paramArray[2]}; + if (mGlobalWorkSize[1] == 0) { + mGlobalWorkSize[1] = 1; + } else if (mGlobalWorkSize[1] > 16) { + mGlobalWorkSize[1] = 16; + } + } + mLocalWorkSize = {mGlobalWorkSize[0], mGlobalWorkSize[1], 1}; + + return NO_ERROR; +} + +ErrorCode ReductionExecution::prepareReduction1Dkernel(const std::vector &inputArray, + const Tensor *input, const Tensor *output) { + if (inputArray.empty()) { + return INVALID_VALUE; + } + const int batch = inputArray[0]; + const int reductSize = inputArray[1]; + const int workNum = inputArray[2]; + const int groupNum = inputArray[3]; + const int channels = inputArray[4]; + std::vector paramArray = {static_cast(batch), static_cast(workNum), static_cast(groupNum)}; + auto code = generateReductionGWSLWS(paramArray); + if (NO_ERROR != code) { + return INVALID_VALUE; + } + const int groupWorkSize = mLocalWorkSize[0] * mLocalWorkSize[1] * mLocalWorkSize[2]; + // Each kernel intends to compute computeNum elements. + const int computeNum = (workNum + groupWorkSize - 1) / groupWorkSize; + const int lastNum = workNum % groupWorkSize; + uint32_t idx = 0; + mReduct1DKernel.setArg(idx++, mGlobalWorkSize[0]); + mReduct1DKernel.setArg(idx++, mGlobalWorkSize[1]); + mReduct1DKernel.setArg(idx++, mGlobalWorkSize[2]); + mReduct1DKernel.setArg(idx++, openCLImage(input)); + mReduct1DKernel.setArg(idx++, openCLImage(output)); + mReduct1DKernel.setArg(idx++, static_cast(groupWorkSize)); + mReduct1DKernel.setArg(idx++, static_cast(computeNum)); + mReduct1DKernel.setArg(idx++, static_cast(lastNum)); + mReduct1DKernel.setArg(idx++, static_cast(reductSize)); + mReduct1DKernel.setArg(idx++, static_cast(workNum)); + mReduct1DKernel.setArg(idx++, static_cast(groupNum)); + mReduct1DKernel.setArg(idx++, channels); + + return NO_ERROR; +} + +ErrorCode ReductionExecution::prepareReduction2Dkernel(const std::vector &inputArray, + const Tensor *input, const Tensor *output) { + if (inputArray.empty()) { + return INVALID_VALUE; + } + auto runtime = mOpenCLBackend->getOpenCLRuntime(); + auto bufferPool = mOpenCLBackend->getBufferPool(); + const int batch = inputArray[0]; + const int inputHeight = inputArray[1]; + const int inputWidth = inputArray[2]; + const int leftSize = inputArray[3]; + const int channels = inputArray[4]; + cl::Buffer* leftBuffer = bufferPool->alloc(leftSize); + const uint32_t imageSize = static_cast(inputHeight * inputWidth); + std::vector paramArray = {static_cast(batch), imageSize, static_cast(leftSize)}; + auto code = generateReductionGWSLWS(paramArray); + if (NO_ERROR != code) { + return INVALID_VALUE; + } + const int groupWorkSize = mLocalWorkSize[0] * mLocalWorkSize[1] * mLocalWorkSize[2]; + // Each kernel intends to compute computeNum elements. + const int computeNum = (imageSize + groupWorkSize - 1) / groupWorkSize; + const int lastNum = imageSize % groupWorkSize; + uint32_t idx = 0; + mReduct2DKernel.setArg(idx++, mGlobalWorkSize[0]); + mReduct2DKernel.setArg(idx++, mGlobalWorkSize[1]); + mReduct2DKernel.setArg(idx++, mGlobalWorkSize[2]); + mReduct2DKernel.setArg(idx++, openCLImage(input)); + mReduct2DKernel.setArg(idx++, openCLImage(output)); + mReduct2DKernel.setArg(idx++, (groupWorkSize * 4 * sizeof(float)), nullptr); + mReduct2DKernel.setArg(idx++, *leftBuffer); + mReduct2DKernel.setArg(idx++, static_cast(groupWorkSize)); + mReduct2DKernel.setArg(idx++, static_cast(computeNum)); + mReduct2DKernel.setArg(idx++, static_cast(lastNum)); + mReduct2DKernel.setArg(idx++, static_cast(inputHeight)); + mReduct2DKernel.setArg(idx++, static_cast(inputWidth)); + mReduct2DKernel.setArg(idx++, static_cast(leftSize)); + mReduct2DKernel.setArg(idx++, channels); + + return NO_ERROR; +} + +ErrorCode ReductionExecution::onResize(const std::vector &inputs, const std::vector &outputs) { + auto runtime = mOpenCLBackend->getOpenCLRuntime(); + auto input = inputs[0]; + auto output = outputs[0]; + std::vector inputShape = tensorShapeFormat(input); + + // For fast test, when there is inputs[1] represent axis to reduce, assume it reduce all dim + // TODO: remove the assumption, support general dims + if (inputs.size() >= 2) { + mAxis.clear(); + for (int i = 0; i < input->dimensions(); ++i) { + mAxis.insert(i); + } + } + + mUnits.resize(1); + if (mAxis.size() == input->dimensions() && mReductType == "4") { + auto kernel = runtime->buildKernel("reduction", "reduce_sum_all", {}); + kernel.setArg(0, openCLImage(input)); + kernel.setArg(1, openCLImage(output)); + kernel.setArg(2, inputShape[2]); + kernel.setArg(3, inputShape[3]); + mUnits[0].kernel = kernel; + mUnits[0].localWorkSize = cl::NullRange; + mUnits[0].globalWorkSize = {static_cast(1)}; + return NO_ERROR; + } + + // only support channel + if (mAxis.size() == 3 && mAxis.find(0) != mAxis.end() && mReductType == "4") { + auto layout = TensorUtils::getDescribe(input)->dimensionFormat; + bool support = true; + if (layout == MNN_DATA_FORMAT_NCHW || layout == MNN_DATA_FORMAT_NC4HW4) { + if (mAxis.find(1) != mAxis.end()) { + support = false; + } + } else { + if (mAxis.find(3) != mAxis.end()) { + support = false; + } + } + if (!support) { + return NOT_SUPPORT; + } + + bool useLocal = false; + /* on Mac Intel Iris Pro 1536 MB, 16x32x128x128 (NCHW) input + * reduce_use_local_along_channel: 140.30ms + * reduce_along_channel: 221.34ms + * NOTE: time cost above include data transfer between CPU and GPU + */ + cl::Kernel kernel; + if (useLocal) { + kernel = runtime->buildKernel("reduction", "reduce_use_local_along_channel", {}); + } else { + kernel = runtime->buildKernel("reduction", "reduce_along_channel", {}); + } + + kernel.setArg(0, openCLImage(input)); + kernel.setArg(1, openCLImage(output)); + kernel.setArg(2, inputShape[2]); + kernel.setArg(3, (mReductType == "0" ? 1 : 0)); + + if (useLocal) { + const int N_H = inputShape[0] * inputShape[1]; + const int tile = ALIMIN(N_H, 16), step = UP_DIV(N_H, tile); + kernel.setArg(4, step); + kernel.setArg(5, cl::Local(tile * 4 * sizeof(float))); + kernel.setArg(6, tile * 4); + mUnits[0].localWorkSize = {static_cast(tile), static_cast(1)}; + mUnits[0].globalWorkSize = { + static_cast(tile), + static_cast(UP_DIV(inputShape[3], 4)) + }; + } else { + mUnits[0].localWorkSize = cl::NullRange; + mUnits[0].globalWorkSize = {static_cast(UP_DIV(inputShape[3], 4))}; + } + + mUnits[0].kernel = kernel; + + return NO_ERROR; + } + + int batch = inputShape.at(0); + int inputHeight = inputShape.at(1); + int inputWidth = inputShape.at(2); + int channels = inputShape.at(3); + int channelBlocks = UP_DIV(channels, 4); + std::vector inputArray(5); + + std::set buildOptions; + buildOptions.emplace("-DREDUCE_TYPE=" + mReductType); + if (runtime->getGpuType() == ADRENO) { + buildOptions.emplace("-DNON_QUALCOMM_ADRENO"); + } + if (mAxis.size() == 0) { + return NOT_SUPPORT; + } + + if (mAxis.size() == 1) { + auto iter = mAxis.find(0); + if (iter != mAxis.end()) { + return NOT_SUPPORT; + } + // reduct H axis + iter = mAxis.find(1); + if (iter != mAxis.end()) { + buildOptions.emplace("-DREDUCTION_H"); + inputArray = {batch, inputHeight, inputWidth, channelBlocks, channels}; + } + // reduct W axis + iter = mAxis.find(2); + if (iter != mAxis.end()) { + buildOptions.emplace("-DREDUCTION_W"); + inputArray = {batch, inputWidth, inputHeight, channelBlocks, channels}; + } + // reduct C axis + iter = mAxis.find(3); + if (iter != mAxis.end()) { + buildOptions.emplace("-DREDUCTION_C"); + inputArray = {batch, channelBlocks, inputWidth, inputHeight, channels}; + } + if (mReduct1DKernel.get() == nullptr) { + mReduct1DKernel = runtime->buildKernel("reduction", "reduct_1d", buildOptions); + } + prepareReduction1Dkernel(inputArray, inputs[0], outputs[0]); + return NO_ERROR; + } + + if (mAxis.size() == 2) { + auto iter = mAxis.find(0); + if (iter != mAxis.end()) { + return NOT_SUPPORT; + } + iter = mAxis.find(1); + if (iter == mAxis.end()) { + buildOptions.emplace("-DREDUCTION_WC"); + inputArray = {batch, channelBlocks, inputWidth, inputHeight, channels}; + } + iter = mAxis.find(2); + if (iter == mAxis.end()) { + buildOptions.emplace("-DREDUCTION_HC"); + inputArray = {batch, inputHeight, channelBlocks, inputWidth, channels}; + } + iter = mAxis.find(3); + if (iter == mAxis.end()) { + buildOptions.emplace("-DREDUCTION_HW"); + inputArray = {batch, inputHeight, inputWidth, channelBlocks, channels}; + } + if (mReduct2DKernel.get() == nullptr) { + mReduct2DKernel = runtime->buildKernel("reduction", "reduct_2d", buildOptions); + } + prepareReduction2Dkernel(inputArray, inputs[0], outputs[0]); + return NO_ERROR; + } + + if (mAxis.size() == 3) { + auto iter = mAxis.find(0); + if (iter != mAxis.end()) { + return NOT_SUPPORT; + } + buildOptions.emplace("-DREDUCTION_HC"); + buildOptions.emplace("-DREDUCE_W=1"); + inputArray = {batch, inputHeight, channelBlocks, inputWidth, channels}; + if (mReduct2DKernel.get() == nullptr) { + mReduct2DKernel = runtime->buildKernel("reduction", "reduct_2d", buildOptions); + } + prepareReduction2Dkernel(inputArray, inputs[0], outputs[0]); + return NO_ERROR; + } + return NOT_SUPPORT; +} + +ErrorCode ReductionExecution::onExecute(const std::vector &inputs, const std::vector &outputs) { +#ifdef LOG_VERBOSE + MNN_PRINT("start ReductionExecution onExecute !\n"); +#endif + if (mReductType == "4" && (mAxis.size() == inputs[0]->dimensions() || (mAxis.size() == 3 && mAxis.find(0) != mAxis.end()))) { + return CommonExecution::onExecute(inputs, outputs); + } + + if (mAxis.size() == 1) { + run3DKernelDefault(mReduct1DKernel, mGlobalWorkSize, mLocalWorkSize, mOpenCLBackend->getOpenCLRuntime()); + } else { + run3DKernelDefault(mReduct2DKernel, mGlobalWorkSize, mLocalWorkSize, mOpenCLBackend->getOpenCLRuntime()); + } +#ifdef LOG_VERBOSE + MNN_PRINT("end ReductionExecution onExecute !\n"); +#endif + return NO_ERROR; +} + +class ReductionCreator : public OpenCLBackend::Creator { +public: + virtual ~ReductionCreator() = default; + virtual Execution *onCreate(const std::vector &inputs, const std::vector &outputs, + const MNN::Op *op, Backend *backend) const override { + if (inputs[0]->getDimensionType() == Tensor::TENSORFLOW) { + return new ReductionExecution(op, backend); + } + return NULL; + } +}; + +OpenCLCreatorRegister __reduction_op(OpType_Reduction); +} // namespace OpenCL +} // namespace MNN diff --git a/source/backend/opencl/execution/ReductionExecution.hpp b/source/backend/opencl/execution/ReductionExecution.hpp new file mode 100644 index 000000000..0d59b7253 --- /dev/null +++ b/source/backend/opencl/execution/ReductionExecution.hpp @@ -0,0 +1,46 @@ +// +// ReductionExecution.hpp +// MNN +// +// Created by MNN on 2019/10/25. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef ReductionExecution_hpp +#define ReductionExecution_hpp + +#include "core/Execution.hpp" +#include +#include +#include +#include +#include "backend/opencl/core/OpenCLBackend.hpp" +#include "backend/opencl/execution/CommonExecution.hpp" + +namespace MNN { +namespace OpenCL { + +class ReductionExecution : public CommonExecution { +public: + ReductionExecution(const MNN::Op* op, Backend* backend); + virtual ~ReductionExecution() = default; + + virtual ErrorCode onResize(const std::vector &inputs, const std::vector &outputs) override; + virtual ErrorCode onExecute(const std::vector &inputs, const std::vector &outputs) override; + ErrorCode prepareReduction2Dkernel(const std::vector &inputArray, const Tensor *input, const Tensor *output); + ErrorCode prepareReduction1Dkernel(const std::vector &inputArray, const Tensor *input, const Tensor *output); + ErrorCode generateReductionGWSLWS(const std::vector ¶mArray); +private: + cl::Kernel mReduct2DKernel; + cl::Kernel mReduct1DKernel; + OpenCLBackend *mOpenCLBackend; + MNN::DataType mdataType; + std::string mReductType; + std::unordered_set mAxis; + std::vector mGlobalWorkSize = {1, 1, 1}; + std::vector mLocalWorkSize{1, 1, 1}; +}; + +} // namespace OpenCL +} // namespace MNN +#endif /* ReductionExecution_hpp */ diff --git a/source/backend/opencl/execution/ReluExecution.cpp b/source/backend/opencl/execution/ReluExecution.cpp index a5c32122f..12a026810 100644 --- a/source/backend/opencl/execution/ReluExecution.cpp +++ b/source/backend/opencl/execution/ReluExecution.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "ReluExecution.hpp" -#include "TensorUtils.hpp" -#include "UnaryExecution.hpp" +#include "backend/opencl/execution/ReluExecution.hpp" +#include "core/TensorUtils.hpp" +#include "backend/opencl/execution/UnaryExecution.hpp" namespace MNN { namespace OpenCL { @@ -69,11 +69,11 @@ class ReluCreator : public OpenCLBackend::Creator { public: virtual Execution *onCreate(const std::vector &inputs, const std::vector &outputs, const MNN::Op *op, Backend *backend) const override { - // There seems to be a bug on OpenCL compiler of AMD Radeon HD 7000 series. - // When use build option -Dname=definition, definition will be truncated by - // a comma, which violate opencl specification (quote, 'In particular, the definition will + // There seems to be a bug on OpenCL compiler of AMD Radeon HD 7000 series. + // When use build option -Dname=definition, definition will be truncated by + // a comma, which violate opencl specification (quote, 'In particular, the definition will // be truncated by embedded newline characters'.) - // So we use ternary operation (A ? B: C) instead of function call with comma + // So we use ternary operation (A ? B: C) instead of function call with comma // (e.g, fmax(in,(float4)(0))), when there is a Radeon GPU. bool isRadeonGpu = (static_cast(backend)->getOpenCLRuntime()->getGpuType() == RADEON); diff --git a/source/backend/opencl/execution/ReluGradExecution.cpp b/source/backend/opencl/execution/ReluGradExecution.cpp new file mode 100644 index 000000000..ced69f457 --- /dev/null +++ b/source/backend/opencl/execution/ReluGradExecution.cpp @@ -0,0 +1,63 @@ +// +// ReluGradExecution.cpp +// MNN +// +// Created by MNN on 2019/10/16. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "backend/opencl/execution/ReluGradExecution.hpp" +#include "core/TensorUtils.hpp" + +namespace MNN { +namespace OpenCL { + +ReluGradExecution::ReluGradExecution(const MNN::Op *op, Backend *backend) + : CommonExecution(backend) { + if (op->type() == OpType_ReluGrad) { + mKernelName = "relu_grad"; + } else if (op->type() == OpType_Relu6Grad) { + mKernelName = "relu6_grad"; + } else { + MNN_ERROR("unknown relu type\n"); + return; + } +} + +ReluGradExecution::~ReluGradExecution() { + // do nothing +} + +ErrorCode ReluGradExecution::onResize(const std::vector &inputs, const std::vector &outputs) { + mUnits.clear(); + mUnits.resize(1); + + auto nhwc = tensorShapeFormat(outputs[0]); + uint32_t imageHeight = nhwc[0] * nhwc[1]; + uint32_t imageWidth = nhwc[2] * UP_DIV(nhwc[3], 4); + + auto runTime = ((OpenCLBackend *)backend())->getOpenCLRuntime(); + cl::Kernel kernel = runTime->buildKernel("binary_grad", mKernelName, {}); + kernel.setArg(0, openCLImage(inputs[0])); // original input + kernel.setArg(1, openCLImage(inputs[1])); // grad for output + kernel.setArg(2, openCLImage(outputs[0])); // grad for input + mUnits[0].kernel = kernel; + mUnits[0].localWorkSize = cl::NullRange; + mUnits[0].globalWorkSize = {imageWidth, imageHeight}; + + return NO_ERROR; +} + +class ReluGradCreator : public OpenCLBackend::Creator { +public: + virtual Execution *onCreate(const std::vector &inputs, const std::vector &outputs, + const MNN::Op *op, Backend *backend) const override { + return new ReluGradExecution(op, backend); + } +}; + +OpenCLCreatorRegister __Relu_grad_op(OpType_ReluGrad); +OpenCLCreatorRegister __Relu6_grad_op(OpType_Relu6Grad); + +} +} diff --git a/source/backend/opencl/execution/ReluGradExecution.hpp b/source/backend/opencl/execution/ReluGradExecution.hpp new file mode 100644 index 000000000..4dd83576f --- /dev/null +++ b/source/backend/opencl/execution/ReluGradExecution.hpp @@ -0,0 +1,31 @@ +// +// ReluGradExecution.hpp +// MNN +// +// Created by MNN on 2019/10/16. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef ReluGradExecution_hpp +#define ReluGradExecution_hpp + +#include +#include "backend/opencl/execution/CommonExecution.hpp" +namespace MNN { +namespace OpenCL { + +class ReluGradExecution : public CommonExecution { +public: + ReluGradExecution(const MNN::Op *op, Backend *backend); + virtual ~ReluGradExecution(); + + virtual ErrorCode onResize(const std::vector &inputs, const std::vector &outputs) override; + +private: + std::string mKernelName; + +}; +} +} + +#endif /* ReluGradExecution_hpp */ diff --git a/source/backend/opencl/execution/ReshapeExecution.cpp b/source/backend/opencl/execution/ReshapeExecution.cpp index 5265fea4d..1fe647658 100644 --- a/source/backend/opencl/execution/ReshapeExecution.cpp +++ b/source/backend/opencl/execution/ReshapeExecution.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "ReshapeExecution.hpp" -#include -#include "TensorUtils.hpp" +#include "backend/opencl/execution/ReshapeExecution.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { namespace OpenCL { @@ -30,19 +30,36 @@ ErrorCode ReshapeExecution::onResize(const std::vector &inputs, const auto output = outputs[0]; #ifdef LOG_VERBOSE MNN_PRINT("mDimType = %d , %d\n", mDimType, TensorUtils::getDescribe(input)->dimensionFormat); - MNN_PRINT("%d, %d, %d -> %d, %d, %d\n", input->width(), input->height(), input->channel(), output->width(), + MNN_PRINT("%d, %d, %d, %d -> %d, %d, %d, %d\n", input->batch(), input->width(), input->height(), input->channel(), output->batch(), output->width(), output->height(), output->channel()); #endif auto runtime = mOpenCLBackend->getOpenCLRuntime(); std::string mImageToBufferKernelname; std::string mBufferToImageKernelname; + { + auto inputFormat = TensorUtils::getDescribe(input)->dimensionFormat; + std::map formatMap = { + {MNN_DATA_FORMAT_NCHW, "image_to_nchw_buffer"}, + {MNN_DATA_FORMAT_NHWC, "image_to_nhwc_buffer"}, + }; + if(inputFormat == MNN_DATA_FORMAT_NC4HW4){ + mImageToBufferKernelname = formatMap[mDimType]; + }else{ + mImageToBufferKernelname = formatMap[inputFormat]; + } + } - if (mDimType == MNN_DATA_FORMAT_NCHW) { - mImageToBufferKernelname = "image_to_nchw_buffer"; - mBufferToImageKernelname = "nchw_buffer_to_image"; - } else { - mImageToBufferKernelname = "image_to_nhwc_buffer"; - mBufferToImageKernelname = "nhwc_buffer_to_image"; + { + auto outputFormat = TensorUtils::getDescribe(output)->dimensionFormat; + std::map formatMap = { + {MNN_DATA_FORMAT_NCHW, "nchw_buffer_to_image"}, + {MNN_DATA_FORMAT_NHWC, "nhwc_buffer_to_image"}, + }; + if(outputFormat == MNN_DATA_FORMAT_NC4HW4){ + mBufferToImageKernelname = formatMap[mDimType]; + }else{ + mBufferToImageKernelname = formatMap[outputFormat]; + } } if (mImageToBufferKernel.get() == nullptr) { @@ -126,6 +143,7 @@ ErrorCode ReshapeExecution::onExecute(const std::vector &inputs, const return NO_ERROR; } + class ReshapeCreator : public OpenCLBackend::Creator { public: virtual ~ReshapeCreator() = default; diff --git a/source/backend/opencl/execution/ReshapeExecution.hpp b/source/backend/opencl/execution/ReshapeExecution.hpp index 2d993e6a8..382ed4d1e 100644 --- a/source/backend/opencl/execution/ReshapeExecution.hpp +++ b/source/backend/opencl/execution/ReshapeExecution.hpp @@ -9,11 +9,11 @@ #ifndef ReshapeExecution_hpp #define ReshapeExecution_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" #include #include -#include "core/OpenCLBackend.hpp" +#include "backend/opencl/core/OpenCLBackend.hpp" namespace MNN { namespace OpenCL { diff --git a/source/backend/opencl/execution/ResizeExecution.cpp b/source/backend/opencl/execution/ResizeExecution.cpp index 4536b36b6..57665d9b3 100644 --- a/source/backend/opencl/execution/ResizeExecution.cpp +++ b/source/backend/opencl/execution/ResizeExecution.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "execution/ResizeExecution.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" -#include "core/OpenCLRunningUtils.hpp" +#include "backend/opencl/execution/ResizeExecution.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" +#include "backend/opencl/core/OpenCLRunningUtils.hpp" namespace MNN { namespace OpenCL { diff --git a/source/backend/opencl/execution/ResizeExecution.hpp b/source/backend/opencl/execution/ResizeExecution.hpp index 2c492f3fb..82603c3cb 100644 --- a/source/backend/opencl/execution/ResizeExecution.hpp +++ b/source/backend/opencl/execution/ResizeExecution.hpp @@ -12,8 +12,8 @@ #include #include #include -#include "Execution.hpp" -#include "core/OpenCLBackend.hpp" +#include "core/Execution.hpp" +#include "backend/opencl/core/OpenCLBackend.hpp" namespace MNN { namespace OpenCL { diff --git a/source/backend/opencl/execution/RoiPoolingExecution.cpp b/source/backend/opencl/execution/RoiPoolingExecution.cpp index a8065b7b3..8e6d4b3f2 100644 --- a/source/backend/opencl/execution/RoiPoolingExecution.cpp +++ b/source/backend/opencl/execution/RoiPoolingExecution.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "RoiPoolingExecution.hpp" -#include +#include "backend/opencl/execution/RoiPoolingExecution.hpp" +#include "core/Macro.h" #include -#include "TensorUtils.hpp" +#include "core/TensorUtils.hpp" namespace MNN { namespace OpenCL { diff --git a/source/backend/opencl/execution/RoiPoolingExecution.hpp b/source/backend/opencl/execution/RoiPoolingExecution.hpp index 7037abffd..b4649127e 100644 --- a/source/backend/opencl/execution/RoiPoolingExecution.hpp +++ b/source/backend/opencl/execution/RoiPoolingExecution.hpp @@ -11,8 +11,8 @@ #include #include -#include "Execution.hpp" -#include "core/OpenCLBackend.hpp" +#include "core/Execution.hpp" +#include "backend/opencl/core/OpenCLBackend.hpp" namespace MNN { namespace OpenCL { diff --git a/source/backend/opencl/execution/ScaleExecution.cpp b/source/backend/opencl/execution/ScaleExecution.cpp index 2b6c6c9bd..7a8d23bd1 100644 --- a/source/backend/opencl/execution/ScaleExecution.cpp +++ b/source/backend/opencl/execution/ScaleExecution.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "execution/ScaleExecution.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" -#include "core/OpenCLRunningUtils.hpp" +#include "backend/opencl/execution/ScaleExecution.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" +#include "backend/opencl/core/OpenCLRunningUtils.hpp" namespace MNN { namespace OpenCL { diff --git a/source/backend/opencl/execution/ScaleExecution.hpp b/source/backend/opencl/execution/ScaleExecution.hpp index bb8a38901..75327c2c4 100644 --- a/source/backend/opencl/execution/ScaleExecution.hpp +++ b/source/backend/opencl/execution/ScaleExecution.hpp @@ -12,8 +12,8 @@ #include #include #include -#include "Execution.hpp" -#include "core/OpenCLBackend.hpp" +#include "core/Execution.hpp" +#include "backend/opencl/core/OpenCLBackend.hpp" namespace MNN { namespace OpenCL { diff --git a/source/backend/opencl/execution/SliceExecution.cpp b/source/backend/opencl/execution/SliceExecution.cpp index 29d12fcea..7d1d3c59c 100644 --- a/source/backend/opencl/execution/SliceExecution.cpp +++ b/source/backend/opencl/execution/SliceExecution.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "SliceExecution.hpp" -#include -#include "TensorUtils.hpp" +#include "backend/opencl/execution/SliceExecution.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { namespace OpenCL { diff --git a/source/backend/opencl/execution/SoftmaxExecution.cpp b/source/backend/opencl/execution/SoftmaxExecution.cpp index dd36b3e7a..6d39aea19 100644 --- a/source/backend/opencl/execution/SoftmaxExecution.cpp +++ b/source/backend/opencl/execution/SoftmaxExecution.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "execution/SoftmaxExecution.hpp" -#include +#include "backend/opencl/execution/SoftmaxExecution.hpp" +#include "core/Macro.h" namespace MNN { namespace OpenCL { @@ -101,15 +101,15 @@ bool SoftmaxExecution::buildSoftmaxKernel() { ErrorCode SoftmaxExecution::onResize(const std::vector &inputs, const std::vector &outputs) { Tensor *input = inputs[0]; Tensor *output = outputs[0]; - + std::vector inputShape = tensorShapeFormat(input); std::vector outputShape = tensorShapeFormat(output); - + const int outputBatch = outputShape.at(0); const int outputHeight = outputShape.at(1); const int outputWidth = outputShape.at(2); const int outputChannels = outputShape.at(3); - + const int channelBlocks = UP_DIV(outputChannels, 4); const int remainChannels = channelBlocks * 4 - outputChannels; if (1 == mAxis) { @@ -119,7 +119,7 @@ ErrorCode SoftmaxExecution::onResize(const std::vector &inputs, const mKernel.setArg(idx++, mGlobalWorkSize[0]); mKernel.setArg(idx++, mGlobalWorkSize[1]); mKernel.setArg(idx++, mGlobalWorkSize[2]); - + mKernel.setArg(idx++, openCLImage(input)); mKernel.setArg(idx++, openCLImage(output)); mKernel.setArg(idx++, static_cast(outputChannels)); @@ -139,7 +139,7 @@ ErrorCode SoftmaxExecution::onResize(const std::vector &inputs, const mKernel.setArg(1, openCLImage(output)); mKernel.setArg(2, shape); } - + return NO_ERROR; } @@ -164,15 +164,31 @@ class SoftmaxCreator : public OpenCLBackend::Creator { MNN_PRINT("softmax not support dimensions == 3 \n"); return nullptr; } - auto axis = op->main_as_Axis()->axis(); - if (-1 == axis) { - axis = inputs[0]->dimensions() - 1; - } - - if (1 == axis || 2 == axis) { - return new SoftmaxExecution(inputs, axis, backend); + auto dimType = inputs[0]->getDimensionType(); + if (dimType == Tensor::TENSORFLOW && inputs[0]->dimensions() == 4) { + int index[4] = {0, 2, 3, 1}; + auto axis = op->main_as_Axis()->axis(); + if (axis < 0) { + axis = inputs[0]->dimensions() + axis; + } + + axis = index[axis]; + //1 : channel //2 : height + if (1 == axis || 2 == axis) { + return new SoftmaxExecution(inputs, axis, backend); + } + return nullptr; + } else { + auto axis = op->main_as_Axis()->axis(); + if (axis < 0) { + axis = inputs[0]->dimensions() + axis; + } + + if (1 == axis || 2 == axis) { + return new SoftmaxExecution(inputs, axis, backend); + } + return nullptr; } - return nullptr; } }; OpenCLCreatorRegister __Softmax_op(OpType_Softmax); diff --git a/source/backend/opencl/execution/SoftmaxExecution.hpp b/source/backend/opencl/execution/SoftmaxExecution.hpp index 1d4c92432..cfb7b4394 100644 --- a/source/backend/opencl/execution/SoftmaxExecution.hpp +++ b/source/backend/opencl/execution/SoftmaxExecution.hpp @@ -10,8 +10,8 @@ #define SoftmaxExecution_hpp #include -#include "Execution.hpp" -#include "core/OpenCLBackend.hpp" +#include "core/Execution.hpp" +#include "backend/opencl/core/OpenCLBackend.hpp" namespace MNN { namespace OpenCL { diff --git a/source/backend/opencl/execution/SoftmaxGradExecution.cpp b/source/backend/opencl/execution/SoftmaxGradExecution.cpp new file mode 100644 index 000000000..f897a5a00 --- /dev/null +++ b/source/backend/opencl/execution/SoftmaxGradExecution.cpp @@ -0,0 +1,130 @@ +// +// SoftmaxGradExecution.cpp +// MNN +// +// Created by MNN on 2019/10/16. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "backend/opencl/execution/SoftmaxGradExecution.hpp" +#include "core/TensorUtils.hpp" + +namespace MNN { +namespace OpenCL { + +// current opencl image2d_t underly layout, support N-D data, (N,[spatial_shape depth (if have), height], C,W) +static std::vector openclTensorShape(const Tensor* input) { + int dim = input->dimensions(); + MNN_ASSERT(dim > 0); + if (dim == 1) { + return std::vector({input->length(0), 1, 1, 1}); + } + std::vector res; // NCHW or NHWC + for (int i = 0; i < dim; ++i) { + res.push_back(input->length(i)); + } + auto layout = TensorUtils::getDescribe(input)->dimensionFormat; + // convert NCHW to NHWC + if (layout == MNN_DATA_FORMAT_NCHW || layout == MNN_DATA_FORMAT_NC4HW4) { + int channel = res[1]; + for (int i = 2; i < dim; ++i) { + res[i - 1] = res[i]; + } + res[dim - 1] = channel; + } + if (dim <= 3) { + res.insert(res.begin() + 1, 1); + } + if (dim <= 2) { + res.insert(res.begin() + 1, 1); + } + dim = res.size(); + std::swap(res[dim - 1], res[dim - 2]); // swap (W,C) to (C,W) + return res; +} + +SoftmaxGradExecution::SoftmaxGradExecution(Backend *backend, int axis) + : CommonExecution(backend), mAxis(axis) { + // do nothing +} + +SoftmaxGradExecution::~SoftmaxGradExecution() { + // do nothing +} + +ErrorCode SoftmaxGradExecution::onResize(const std::vector &inputs, const std::vector &outputs) { + mUnits.clear(); + mUnits.resize(1); + + auto output = outputs[0]; + auto shape = openclTensorShape(output); + int axis = mAxis; + if (mAxis != 0) { + const int dim = output->dimensions(); + auto layout = TensorUtils::getDescribe(output)->dimensionFormat; + // convert axis from NCHW to NHWC + if (layout == MNN_DATA_FORMAT_NCHW || layout == MNN_DATA_FORMAT_NC4HW4) { + if (axis == 1) { + axis = dim - 1; + } else if (axis > 1) { + axis = axis - 1; + } + } + // convert axis from NHWC to NHCW (current opencl image2d_t underly layout, N..H,CW) + if (dim > 2) { + if (axis == dim - 1) { + axis = axis - 1; + } else if (axis == dim - 2) { + axis = axis + 1; + } + } else { + axis = 2; + } + } + const int channelAxis = shape.size() - 2; + int number = shape[axis], step = 1, remain = 1; + int axisOnC4 = (axis == channelAxis ? 1 : 0); // softmax axis is channel dim (NH,CW) + for (int i = 0; i < shape.size(); ++i) { + int temp = shape[i]; + if (i == channelAxis) { // align up channel dim (NH,CW) + temp = UP_DIV(temp, 4); + } + if (i > axis) { + step *= temp; + } else if (i < axis) { + remain *= temp; + } + } + auto runTime = ((OpenCLBackend *)backend())->getOpenCLRuntime(); + cl::Kernel kernel = runTime->buildKernel("softmax_grad", "softmax_grad", {}); + kernel.setArg(0, openCLImage(inputs[0])); // original input + kernel.setArg(1, openCLImage(inputs[1])); // grad for output + kernel.setArg(2, openCLImage(outputs[0])); // grad for input + kernel.setArg(3, step); + kernel.setArg(4, number); + kernel.setArg(5, axisOnC4); + mUnits[0].kernel = kernel; + mUnits[0].localWorkSize = cl::NullRange; + mUnits[0].globalWorkSize = { + static_cast(remain), + static_cast(step) + }; + + return NO_ERROR; +} + +class SoftmaxGradCreator : public OpenCLBackend::Creator { +public: + virtual Execution *onCreate(const std::vector &inputs, const std::vector &outputs, + const MNN::Op *op, Backend *backend) const override { + int axis = op->main_as_Axis()->axis(); + if (axis < 0) { + axis = inputs[0]->dimensions() + axis; + } + return new SoftmaxGradExecution(backend, axis); + } +}; + +OpenCLCreatorRegister __Softmax_grad_op(OpType_SoftmaxGrad); +} +} diff --git a/source/backend/opencl/execution/SoftmaxGradExecution.hpp b/source/backend/opencl/execution/SoftmaxGradExecution.hpp new file mode 100644 index 000000000..50e638154 --- /dev/null +++ b/source/backend/opencl/execution/SoftmaxGradExecution.hpp @@ -0,0 +1,29 @@ +// +// SoftmaxGradExecution.hpp +// MNN +// +// Created by MNN on 2019/10/16. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef SoftmaxGradExecution_hpp +#define SoftmaxGradExecution_hpp + +#include "backend/opencl/execution/CommonExecution.hpp" +namespace MNN { +namespace OpenCL { + +class SoftmaxGradExecution : public CommonExecution { +public: + SoftmaxGradExecution(Backend *backend, int axis); + virtual ~SoftmaxGradExecution(); + + virtual ErrorCode onResize(const std::vector &inputs, const std::vector &outputs) override; + +private: + int mAxis; +}; +} +} + +#endif /* SoftmaxGradExecution_hpp */ diff --git a/source/backend/opencl/execution/SpaceToBatchExecution.cpp b/source/backend/opencl/execution/SpaceToBatchExecution.cpp index 15f6313ef..1816a4a50 100644 --- a/source/backend/opencl/execution/SpaceToBatchExecution.cpp +++ b/source/backend/opencl/execution/SpaceToBatchExecution.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "execution/SpaceToBatchExecution.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/opencl/execution/SpaceToBatchExecution.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { namespace OpenCL { @@ -20,10 +20,10 @@ SpaceToBatchExecution::SpaceToBatchExecution(const std::vector &inputs #endif mOpenCLBackend = static_cast(backend); auto param = op->main_as_SpaceBatch(); - mPaddings[1] = param->padding()->int32s()->data()[0]; - mPaddings[0] = param->padding()->int32s()->data()[1]; - mBlockShape[1] = param->blockShape()->int32s()->data()[0]; - mBlockShape[0] = param->blockShape()->int32s()->data()[1]; + mPaddings[0] = param->padding()->int32s()->data()[0]; + mPaddings[1] = param->padding()->int32s()->data()[2]; + mBlockShape[0] = param->blockShape()->int32s()->data()[0]; + mBlockShape[1] = param->blockShape()->int32s()->data()[1]; std::set buildOptions; std::string kernelName = "space_to_batch"; mKernel = mOpenCLBackend->getOpenCLRuntime()->buildKernel("space_to_batch", kernelName, buildOptions); @@ -36,7 +36,20 @@ ErrorCode SpaceToBatchExecution::onResize(const std::vector &inputs, c #ifdef LOG_VERBOSE MNN_PRINT("Start SpaceToBatchExecution onResize !\n"); #endif - + auto input = inputs[0]; + auto output = outputs[0]; + int inputSize[4] = {input->width(), input->height(), UP_DIV(input->channel(), 4), input->batch()}; + int outputSize[4] = {output->width(), output->height(), UP_DIV(output->channel(), 4), output->batch()}; + uint32_t idx = 0; + mKernel.setArg(idx++, outputSize[2]); + mKernel.setArg(idx++, outputSize[0]); + mKernel.setArg(idx++, outputSize[1]*outputSize[3]); + mKernel.setArg(idx++, openCLImage(input)); + mKernel.setArg(idx++, openCLImage(output)); + mKernel.setArg(idx++, sizeof(inputSize), inputSize); + mKernel.setArg(idx++, sizeof(outputSize), outputSize); + mKernel.setArg(idx++, sizeof(mPaddings), mPaddings); + mKernel.setArg(idx++, sizeof(mBlockShape), mBlockShape); #ifdef LOG_VERBOSE MNN_PRINT("end SpaceToBatchExecution onResize !\n"); #endif @@ -53,18 +66,12 @@ ErrorCode SpaceToBatchExecution::onExecute(const std::vector &inputs, int inputSize[4] = {input->width(), input->height(), UP_DIV(input->channel(), 4), input->batch()}; int outputSize[4] = {output->width(), output->height(), UP_DIV(output->channel(), 4), output->batch()}; - mKernel.setArg(0, openCLImage(input)); - mKernel.setArg(1, openCLImage(output)); - mKernel.setArg(2, sizeof(inputSize), inputSize); - mKernel.setArg(3, sizeof(outputSize), outputSize); - mKernel.setArg(4, sizeof(mPaddings), mPaddings); - mKernel.setArg(5, sizeof(mBlockShape), mBlockShape); - auto runtime = mOpenCLBackend->getOpenCLRuntime(); + runtime->commandQueue().enqueueNDRangeKernel( mKernel, cl::NullRange, - cl::NDRange(UP_DIV(outputSize[0], 16) * 16, UP_DIV(outputSize[1], 16) * 16, outputSize[2] * outputSize[3]), + cl::NDRange(UP_DIV(outputSize[2], 16) * 16, UP_DIV(outputSize[0], 16) * 16, outputSize[1] * outputSize[3]), cl::NDRange(16, 16, 1)); #ifdef LOG_VERBOSE diff --git a/source/backend/opencl/execution/SpaceToBatchExecution.hpp b/source/backend/opencl/execution/SpaceToBatchExecution.hpp index 9f3a1fa0b..87347479f 100644 --- a/source/backend/opencl/execution/SpaceToBatchExecution.hpp +++ b/source/backend/opencl/execution/SpaceToBatchExecution.hpp @@ -12,8 +12,8 @@ #include #include #include -#include "Execution.hpp" -#include "core/OpenCLBackend.hpp" +#include "core/Execution.hpp" +#include "backend/opencl/core/OpenCLBackend.hpp" namespace MNN { namespace OpenCL { diff --git a/source/backend/opencl/execution/SpatialProductExecution.cpp b/source/backend/opencl/execution/SpatialProductExecution.cpp index 9e18165e9..9857a0093 100644 --- a/source/backend/opencl/execution/SpatialProductExecution.cpp +++ b/source/backend/opencl/execution/SpatialProductExecution.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "SpatialProductExecution.hpp" -#include -#include "TensorUtils.hpp" +#include "backend/opencl/execution/SpatialProductExecution.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { namespace OpenCL { diff --git a/source/backend/opencl/execution/SpatialProductExecution.hpp b/source/backend/opencl/execution/SpatialProductExecution.hpp index a3e4fcbc6..c5a5430d6 100644 --- a/source/backend/opencl/execution/SpatialProductExecution.hpp +++ b/source/backend/opencl/execution/SpatialProductExecution.hpp @@ -11,8 +11,8 @@ #include #include -#include "Execution.hpp" -#include "core/OpenCLBackend.hpp" +#include "core/Execution.hpp" +#include "backend/opencl/core/OpenCLBackend.hpp" namespace MNN { namespace OpenCL { diff --git a/source/backend/opencl/execution/TrainableParamExecution.cpp b/source/backend/opencl/execution/TrainableParamExecution.cpp new file mode 100644 index 000000000..9e7ffa455 --- /dev/null +++ b/source/backend/opencl/execution/TrainableParamExecution.cpp @@ -0,0 +1,71 @@ +// +// TrainableParamExecution.cpp +// MNN +// +// Created by MNN on 2019/10/24. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include "backend/opencl/execution/TrainableParamExecution.hpp" +#include "core/TensorUtils.hpp" + +namespace MNN { +namespace OpenCL { + +TrainableParamExecution::TrainableParamExecution(const std::vector &inputs, const MNN::Op *op, Backend *backend) : CommonExecution(backend), mOp(op), mInitialized(false) { + // do nothing +} + +TrainableParamExecution::~TrainableParamExecution() { + // do nothing +} + +ErrorCode TrainableParamExecution::onResize(const std::vector &inputs, const std::vector &outputs) { + MNN_ASSERT(1 == outputs.size()); + if (mInitialized) { + return NO_ERROR; + } + mInitialized = true; + + auto output = outputs[0]; + const int blobSize = output->elementSize(); + const float* blobData = mOp->main_as_Blob()->float32s()->data(); + + auto openclBackend = static_cast(backend()); + auto runtime = openclBackend->getOpenCLRuntime(); + cl::Buffer buffer(runtime->context(), CL_MEM_READ_ONLY | CL_MEM_ALLOC_HOST_PTR, blobSize * sizeof(float)); + cl_int error; + auto bufferPtr = runtime->commandQueue().enqueueMapBuffer(buffer, CL_TRUE, CL_MAP_WRITE, 0, blobSize * sizeof(float), nullptr, nullptr, &error); + if (bufferPtr != nullptr && error == CL_SUCCESS) { + ::memcpy(bufferPtr, blobData, blobSize * sizeof(float)); + } else { + MNN_ERROR("Map error bufferPtr == nullptr \n"); + return OUT_OF_MEMORY; + } + runtime->commandQueue().enqueueUnmapMemObject(buffer, bufferPtr); + + auto format = TensorUtils::getDescribe(output)->dimensionFormat; + if (format != MNN_DATA_FORMAT_NCHW && format != MNN_DATA_FORMAT_NHWC) { + MNN_ERROR("Variable's blob dataFormat should be MNN_DATA_FORMAT_NCHW or MNN_DATA_FORMAT_NHWC\n"); + return NOT_SUPPORT; + } + std::shared_ptr bufferTensor; + MNN::OpenCL::ImageBufferConvertor convertor(runtime); + if (format == MNN_DATA_FORMAT_NCHW) { + bufferTensor.reset(new Tensor(output, Tensor::CAFFE, false)); + bufferTensor->buffer().device = (uint64_t)(&buffer); + convertor.convertBufferToImage(bufferTensor.get(), MNN::OpenCL::NCHW_BUFFER, output, true); + } else { + bufferTensor.reset(new Tensor(output, Tensor::TENSORFLOW, false)); + bufferTensor->buffer().device = (uint64_t)(&buffer); + convertor.convertBufferToImage(bufferTensor.get(), MNN::OpenCL::NHWC_BUFFER, output, true); + } + + return NO_ERROR; +} + +OpenCLCreatorRegister> __TrainParam_op(OpType_TrainableParam); + +} +} diff --git a/source/backend/opencl/execution/TrainableParamExecution.hpp b/source/backend/opencl/execution/TrainableParamExecution.hpp new file mode 100644 index 000000000..c46988456 --- /dev/null +++ b/source/backend/opencl/execution/TrainableParamExecution.hpp @@ -0,0 +1,30 @@ +// +// TrainableParamExecution.hpp +// MNN +// +// Created by MNN on 2019/10/24. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef TrainableParamExecution_hpp +#define TrainableParamExecution_hpp + +#include "backend/opencl/execution/CommonExecution.hpp" +namespace MNN { +namespace OpenCL { + +class TrainableParamExecution : public CommonExecution { +public: + TrainableParamExecution(const std::vector &inputs, const MNN::Op *op, Backend *backend); + virtual ~TrainableParamExecution(); + + virtual ErrorCode onResize(const std::vector &inputs, const std::vector &outputs) override; + +private: + const MNN::Op *mOp; + bool mInitialized; +}; + +} // namespace OpenCL +} // namespace MNN +#endif /* TrainableParamExecution_hpp */ diff --git a/source/backend/opencl/execution/UnaryExecution.cpp b/source/backend/opencl/execution/UnaryExecution.cpp index 23c334368..949a5c56c 100644 --- a/source/backend/opencl/execution/UnaryExecution.cpp +++ b/source/backend/opencl/execution/UnaryExecution.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "execution/UnaryExecution.hpp" -#include -#include "TensorUtils.hpp" -#include "core/OpenCLBackend.hpp" +#include "backend/opencl/execution/UnaryExecution.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" +#include "backend/opencl/core/OpenCLBackend.hpp" namespace MNN { namespace OpenCL { @@ -22,14 +22,8 @@ UnaryExecution::UnaryExecution(const std::string& compute, Backend* backend) : E auto runtime = openCLBackend->getOpenCLRuntime(); mKernel = runtime->buildKernel("unary", "unary", buildOptions); mMaxWorkGroupSize = static_cast(runtime->getMaxWorkGroupSize(mKernel)); - - mAreadySetArg = false; } - -ErrorCode UnaryExecution::onExecute(const std::vector& inputs, const std::vector& outputs) { -#ifdef LOG_VERBOSE - MNN_PRINT("start UnaryExecution onExecute..."); -#endif +ErrorCode UnaryExecution::onResize(const std::vector& inputs, const std::vector& outputs) { Tensor* input = inputs[0]; Tensor* output = outputs[0]; auto openCLBackend = static_cast(backend()); @@ -37,39 +31,45 @@ ErrorCode UnaryExecution::onExecute(const std::vector& inputs, const st std::vector inputShape = tensorShapeFormat(input); std::vector outputShape = tensorShapeFormat(output); - if (!mAreadySetArg) { - int batch = outputShape.at(0); - int outputHeight = outputShape.at(1); - int outputWidth = outputShape.at(2); - int channels = outputShape.at(3); - - int channelBlocks = (channels + 3) / 4; + int batch = outputShape.at(0); + int outputHeight = outputShape.at(1); + int outputWidth = outputShape.at(2); + int channels = outputShape.at(3); - mGlobalWorkSize = { - static_cast(channelBlocks), - static_cast(outputWidth), - static_cast(batch * outputHeight), - }; + int channelBlocks = (channels + 3) / 4; - uint32_t idx = 0; - mKernel.setArg(idx++, mGlobalWorkSize[0]); - mKernel.setArg(idx++, mGlobalWorkSize[1]); - mKernel.setArg(idx++, mGlobalWorkSize[2]); - mKernel.setArg(idx++, openCLImage(input)); - mKernel.setArg(idx++, openCLImage(output)); + mGlobalWorkSize = { + static_cast(channelBlocks), + static_cast(outputWidth), + static_cast(batch * outputHeight), + }; - mAreadySetArg = true; - } + uint32_t idx = 0; + mKernel.setArg(idx++, mGlobalWorkSize[0]); + mKernel.setArg(idx++, mGlobalWorkSize[1]); + mKernel.setArg(idx++, mGlobalWorkSize[2]); + mKernel.setArg(idx++, openCLImage(input)); + mKernel.setArg(idx++, openCLImage(output)); const std::vector lws = localWS3DDefault(mGlobalWorkSize, mMaxWorkGroupSize, openCLBackend->getOpenCLRuntime()); - run3DKernelDefault(mKernel, mGlobalWorkSize, lws, openCLBackend->getOpenCLRuntime()); + mLocalSize = lws; + return NO_ERROR; +} + +ErrorCode UnaryExecution::onExecute(const std::vector& inputs, const std::vector& outputs) { +#ifdef LOG_VERBOSE + MNN_PRINT("start UnaryExecution onExecute..."); +#endif + auto openCLBackend = static_cast(backend()); + run3DKernelDefault(mKernel, mGlobalWorkSize, mLocalSize, openCLBackend->getOpenCLRuntime()); #ifdef LOG_VERBOSE MNN_PRINT("end UnaryExecution onExecute..."); #endif return NO_ERROR; } + class UnaryCreator : public OpenCLBackend::Creator { public: virtual Execution* onCreate(const std::vector& inputs, const std::vector& outputs, diff --git a/source/backend/opencl/execution/UnaryExecution.hpp b/source/backend/opencl/execution/UnaryExecution.hpp index 8583e8700..33d5fcd83 100644 --- a/source/backend/opencl/execution/UnaryExecution.hpp +++ b/source/backend/opencl/execution/UnaryExecution.hpp @@ -9,12 +9,12 @@ #ifndef UnaryExecution_hpp #define UnaryExecution_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" #include #include "MNN_generated.h" -#include "core/OpenCLBackend.hpp" -#include "core/OpenCLRunningUtils.hpp" +#include "backend/opencl/core/OpenCLBackend.hpp" +#include "backend/opencl/core/OpenCLRunningUtils.hpp" namespace MNN { namespace OpenCL { @@ -24,14 +24,14 @@ class UnaryExecution : public Execution { UnaryExecution(const std::string &compute, Backend *backend); virtual ~UnaryExecution() = default; + virtual ErrorCode onResize(const std::vector &inputs, const std::vector &outputs) override; virtual ErrorCode onExecute(const std::vector &inputs, const std::vector &outputs) override; private: - const MNN::Op *mOp; cl::Kernel mKernel; uint32_t mMaxWorkGroupSize; - bool mAreadySetArg; std::vector mGlobalWorkSize = {1, 1, 1}; + std::vector mLocalSize = {1, 1, 1}; }; } // namespace OpenCL diff --git a/source/backend/opencl/execution/ZerosLikeExecution.cpp b/source/backend/opencl/execution/ZerosLikeExecution.cpp new file mode 100644 index 000000000..58bb78ecc --- /dev/null +++ b/source/backend/opencl/execution/ZerosLikeExecution.cpp @@ -0,0 +1,40 @@ +// +// ZerosLikeExecution.cpp +// MNN +// +// Created by MNN on 2019/02/28. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include "backend/opencl/execution/ZerosLikeExecution.hpp" +#include "MNN_generated.h" +namespace MNN { +namespace OpenCL { + +ZerosLikeExecution::ZerosLikeExecution(const std::vector &inputs, const MNN::Op *op, Backend *backend) : Execution(backend) { + // do nothing +} + +ErrorCode ZerosLikeExecution::onExecute(const std::vector &inputs, const std::vector &outputs) { + auto& output = openCLImage(outputs[0]); + const size_t imageWidth = output.getImageInfo(); + const size_t imageHeight = output.getImageInfo(); + std::array origin = {0, 0, 0}, region = {imageWidth, imageHeight, 1}; + size_t row_pitch; + cl_int error; + auto commandQueue = ((OpenCLBackend*)backend())->getOpenCLRuntime()->commandQueue(); + auto dataMapped = commandQueue.enqueueMapImage(output, true, CL_MAP_WRITE, origin, region, &row_pitch, nullptr, nullptr, nullptr, &error); + if (dataMapped == nullptr || error != CL_SUCCESS) { + MNN_ERROR("ZerosLike data map failed\n"); + return OUT_OF_MEMORY; + } + ::memset(dataMapped, 0, imageHeight * row_pitch); + commandQueue.enqueueUnmapMemObject(output, dataMapped); + return NO_ERROR; +} + +OpenCLCreatorRegister> __ZerosLikeExecution(OpType_ZerosLike); + +} // namespace OpenCL +} // namespace MNN diff --git a/source/backend/opencl/execution/ZerosLikeExecution.hpp b/source/backend/opencl/execution/ZerosLikeExecution.hpp new file mode 100644 index 000000000..f145c8380 --- /dev/null +++ b/source/backend/opencl/execution/ZerosLikeExecution.hpp @@ -0,0 +1,30 @@ +// +// ZerosLikeExecution.hpp +// MNN +// +// Created by MNN on 2019/01/31. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef ZerosLikeExecution_hpp +#define ZerosLikeExecution_hpp + +#include "core/Execution.hpp" +#include +#include "backend/opencl/core/OpenCLBackend.hpp" +#include "backend/opencl/core/OpenCLRunningUtils.hpp" + +namespace MNN { +namespace OpenCL { + +class ZerosLikeExecution : public Execution { +public: + ZerosLikeExecution(const std::vector &inputs, const MNN::Op *op, Backend *backend); + virtual ~ZerosLikeExecution() = default; + + virtual ErrorCode onExecute(const std::vector &inputs, const std::vector &outputs) override; +}; + +} // namespace OpenCL +} // namespace MNN +#endif /* ZerosLikeExecution_hpp */ diff --git a/source/backend/opencl/execution/cl/batch_to_space.cl b/source/backend/opencl/execution/cl/batch_to_space.cl index 7ec19b582..a6903be53 100644 --- a/source/backend/opencl/execution/cl/batch_to_space.cl +++ b/source/backend/opencl/execution/cl/batch_to_space.cl @@ -2,33 +2,39 @@ #pragma OPENCL EXTENSION cl_khr_fp16 : enable #endif __constant sampler_t SAMPLER = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST; -// Revert space_to_batch -__kernel void batch_to_space(__write_only image2d_t uInput, __read_only image2d_t uOutput, + +#define DEAL_NON_UNIFORM_DIM3(input1, input2, input3) \ + if (input1 >= global_size_dim0 || input2 >= global_size_dim1 || input3 >= global_size_dim2) { \ + return; \ + } + +#define GLOBAL_SIZE_3_DIMS \ + __private const int global_size_dim0, __private const int global_size_dim1, __private const int global_size_dim2, + +__kernel void batch_to_space(GLOBAL_SIZE_3_DIMS __read_only image2d_t uInput, __write_only image2d_t uOutput, __private const int4 inImageSize, __private const int4 outImgSize, __private const int2 padding, __private const int2 blockShape) { - int3 pos = (int3)(get_global_id(0), get_global_id(1), get_global_id(2)); - if (pos.x < outImgSize.x && pos.y < outImgSize.y) { - // pos.x -> w, pos.y -> h, pos.z -> c4 * b; - int outBatchIndex = pos.z / outImgSize.z; - int outChannelIndex = pos.z % outImgSize.z; - int inBatchIndex = outBatchIndex % inImageSize.w; - int sw = (outBatchIndex / inImageSize.w) % blockShape.y; - int sh = (outBatchIndex / inImageSize.w) / blockShape.y; - int validHeightStart = max(0, ((padding.x - sh + blockShape.x - 1) / blockShape.x)); - int validHeightEnd = min(outImgSize.y, ((inImageSize.y + padding.x - sh + blockShape.x - 1) / blockShape.x)); - int validWidthStart = max(0, ((padding.y - sw + blockShape.y - 1) / blockShape.y)); - int validWidthEnd = min(outImgSize.x, ((inImageSize.x + padding.y - sw + blockShape.y - 1) / blockShape.y)); - - int inPosX = pos.x * blockShape.y + sw - padding.y; - int inPosY = pos.y * blockShape.x + sh - padding.x; - int inPosZ = inBatchIndex * inImageSize.z + outChannelIndex; - - int inputX = select(inPosX + inPosZ * inImageSize.x, -1, pos.x < validWidthStart || pos.x >= validWidthEnd); - int inputY = - select(inPosY + inBatchIndex * inImageSize.y, -1, pos.y < validHeightStart || pos.y >= validHeightEnd); - - FLOAT4 res = RI_F( - uOutput, SAMPLER, (int2)(pos.x + outChannelIndex * outImgSize.x, pos.y + outBatchIndex * outImgSize.y)); - WI_F(uInput, (int2)(inputX, inputY), res); + + const int in_c_idx = get_global_id(0); + const int in_w_idx = get_global_id(1); + const int in_hb_idx = get_global_id(2); + + DEAL_NON_UNIFORM_DIM3(in_c_idx, in_w_idx, in_hb_idx); + + const int in_b_idx = in_hb_idx / inImageSize.s1; + const int in_h_idx = in_hb_idx - mul24(in_b_idx, inImageSize.s1); + + const int r_b_idx = in_b_idx / outImgSize.s3; + const int out_b_idx = in_b_idx - mul24(r_b_idx, outImgSize.s3); + + const int n_h = r_b_idx / blockShape.s1; + const int mod_h = r_b_idx - mul24(n_h, blockShape.s1); + + const int out_h_idx = mad24(in_h_idx, blockShape.s0, n_h - padding.s0); + const int out_w_idx = mad24(in_w_idx, blockShape.s1, mod_h - padding.s1); + + if (0 <= out_w_idx && out_w_idx < outImgSize.s0 && 0 <= out_h_idx && out_h_idx < outImgSize.s1) { + FLOAT4 value = RI_F(uInput, SAMPLER, (int2)(mad24(in_c_idx, inImageSize.s0, in_w_idx), in_hb_idx)); + WI_F(uOutput, (int2)(mad24(in_c_idx, outImgSize.s0, out_w_idx), mad24(out_b_idx, outImgSize.s1, out_h_idx)), value); } } diff --git a/source/backend/opencl/execution/cl/binary.cl b/source/backend/opencl/execution/cl/binary.cl index cc55dfccf..610aa14dc 100644 --- a/source/backend/opencl/execution/cl/binary.cl +++ b/source/backend/opencl/execution/cl/binary.cl @@ -28,7 +28,7 @@ __kernel void binary_same_channel_broadcast(__read_only image2d_t input0, __read pos1 = (whInput1.x != 1) ? (int2)(nhwc.w*whOutput.x+nhwc.z, nhwc.x*whOutput.y+nhwc.y) : (int2)(nhwc.w*whInput1.x, nhwc.x*whOutput.y+nhwc.y); - } + } in1 = RI_F(input1, SAMPLER, pos1); WI_F(output, pos, OPERATOR); } @@ -108,15 +108,25 @@ __kernel void binary(__read_only image2d_t input0, __read_only image2d_t input1, } } -__kernel void binary_value(__read_only image2d_t input0, float input1, __write_only image2d_t output, +__kernel void binary_value(__read_only image2d_t input0, __read_only image2d_t input1, __write_only image2d_t output, int4 shape, int2 whInput1, int4 input1NHWCStep) { int2 pos = (int2)(get_global_id(0), get_global_id(1)); int4 nhwc = (int4)(pos.y/shape.y, pos.y%shape.y, pos.x%shape.z, pos.x/shape.z); if (nhwc.x < shape.x && nhwc.w < shape.w) { int4 nhwc1 = nhwc * input1NHWCStep; int2 pos1 = (int2)(nhwc1.w*whInput1.x+nhwc1.z, nhwc1.x*whInput1.y+nhwc1.y); + const FLOAT input1Data = RI_F(input1, SAMPLER, (int2)(0, 0)).x; FLOAT4 in0 = RI_F(input0, SAMPLER, pos); - FLOAT4 in1 = (FLOAT4)(input1); + FLOAT4 in1 = (FLOAT4)(input1Data); WI_F(output, pos, OPERATOR); } } + +__kernel void imageCopy(__read_only image2d_t input, __write_only image2d_t output) { + const int2 pos = (int2)(get_global_id(0), get_global_id(1)); + const int2 dim = get_image_dim(input); + if (pos.x >= dim.x && pos.y >= dim.y) { + return; + } + WI_F(output, pos, RI_F(input, SAMPLER, pos)); +} diff --git a/source/backend/opencl/execution/cl/binary_grad.cl b/source/backend/opencl/execution/cl/binary_grad.cl new file mode 100644 index 000000000..a9632828e --- /dev/null +++ b/source/backend/opencl/execution/cl/binary_grad.cl @@ -0,0 +1,30 @@ +#ifdef MNN_SUPPORT_FP16 +#pragma OPENCL EXTENSION cl_khr_fp16 : enable +#endif + +__constant sampler_t SAMPLER = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST; + +__kernel void relu_grad(__read_only image2d_t input0, __read_only image2d_t input1, __write_only image2d_t output) { + const int2 pos = (int2)(get_global_id(0), get_global_id(1)); + const int2 imageDim = get_image_dim(output); + if (pos.x >= imageDim.x || pos.y >= imageDim.y) { + return; + } + FLOAT4 in0 = RI_F(input0, SAMPLER, pos); + FLOAT4 in1 = RI_F(input1, SAMPLER, pos); + FLOAT4 out0 = select(in1, (FLOAT4)0, in0 < (FLOAT4)0); + WI_F(output, pos, out0); +} + +__kernel void relu6_grad(__read_only image2d_t input0, __read_only image2d_t input1, __write_only image2d_t output) { + const int2 pos = (int2)(get_global_id(0), get_global_id(1)); + const int2 imageDim = get_image_dim(output); + if (pos.x >= imageDim.x || pos.y >= imageDim.y) { + return; + } + FLOAT4 in0 = RI_F(input0, SAMPLER, pos); + FLOAT4 in1 = RI_F(input1, SAMPLER, pos); + FLOAT4 out0 = select(in1, (FLOAT4)0, in0 <= (FLOAT4)0 || in0 >= (FLOAT4)6); + WI_F(output, pos, out0); +} + diff --git a/source/backend/opencl/execution/cl/buffer_to_image.cl b/source/backend/opencl/execution/cl/buffer_to_image.cl index b6945f2cb..c84f1aee2 100644 --- a/source/backend/opencl/execution/cl/buffer_to_image.cl +++ b/source/backend/opencl/execution/cl/buffer_to_image.cl @@ -7,7 +7,7 @@ __constant sampler_t SAMPLER = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP | __kernel void nc4hw4_buffer_to_image(GLOBAL_SIZE_2_DIMS __global const float *input_ptr, __private const int2 output_shape, - __private const int channel_up_4, __write_only image2d_t output) { + __private const int channel_4, __write_only image2d_t output) { int image_width_idx = get_global_id(0); int image_height_idx = get_global_id(1); @@ -19,7 +19,7 @@ __kernel void nc4hw4_buffer_to_image(GLOBAL_SIZE_2_DIMS __global const float *in const int width_idx = image_width_idx % output_shape.y; const int channel_block_idx = image_width_idx / output_shape.y; int buffer_offset = - (((batch_idx * channel_up_4 + channel_block_idx) * output_shape.x + height_idx) * output_shape.y + width_idx) * 4; + (((batch_idx * channel_4 + channel_block_idx) * output_shape.x + height_idx) * output_shape.y + width_idx) * 4; float4 values = vload4(0, input_ptr + buffer_offset); @@ -29,7 +29,7 @@ __kernel void nc4hw4_buffer_to_image(GLOBAL_SIZE_2_DIMS __global const float *in __kernel void image_to_nc4hw4_buffer(GLOBAL_SIZE_2_DIMS __global float *output, /* nchw */ __private const int2 output_shape, - __private const int channel_up_4, + __private const int channel_4, __read_only image2d_t input_ptr) { int image_width_idx = get_global_id(0); int image_height_idx = get_global_id(1); @@ -42,7 +42,7 @@ __kernel void image_to_nc4hw4_buffer(GLOBAL_SIZE_2_DIMS __global float *output, int channel_block_idx = image_width_idx / output_shape.y; int buffer_offset = - (((batch_idx * channel_up_4 + channel_block_idx) * output_shape.x + height_idx) * output_shape.y + width_idx) * 4; + (((batch_idx * channel_4 + channel_block_idx) * output_shape.x + height_idx) * output_shape.y + width_idx) * 4; int2 coord = (int2)(image_width_idx, image_height_idx); float4 values = read_imagef(input_ptr, SAMPLER, coord); diff --git a/source/backend/opencl/execution/cl/codegen/opencl_program.cc b/source/backend/opencl/execution/cl/codegen/opencl_program.cc deleted file mode 100644 index 0e9b0c10e..000000000 --- a/source/backend/opencl/execution/cl/codegen/opencl_program.cc +++ /dev/null @@ -1,144 +0,0 @@ -#include -#include -#include -namespace MNN { -extern const std::map> OpenCLProgramMap = - { -{ - "softmax_common", - { 0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x4d,0x4e,0x4e,0x5f,0x53,0x55,0x50,0x50,0x4f,0x52,0x54,0x5f,0x46,0x50,0x31,0x36,0xa,0x23,0x70,0x72,0x61,0x67,0x6d,0x61,0x20,0x4f,0x50,0x45,0x4e,0x43,0x4c,0x20,0x45,0x58,0x54,0x45,0x4e,0x53,0x49,0x4f,0x4e,0x20,0x63,0x6c,0x5f,0x6b,0x68,0x72,0x5f,0x66,0x70,0x31,0x36,0x20,0x3a,0x20,0x65,0x6e,0x61,0x62,0x6c,0x65,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x5f,0x5f,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x5f,0x74,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x20,0x3d,0x20,0x43,0x4c,0x4b,0x5f,0x4e,0x4f,0x52,0x4d,0x41,0x4c,0x49,0x5a,0x45,0x44,0x5f,0x43,0x4f,0x4f,0x52,0x44,0x53,0x5f,0x46,0x41,0x4c,0x53,0x45,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x41,0x44,0x44,0x52,0x45,0x53,0x53,0x5f,0x43,0x4c,0x41,0x4d,0x50,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x46,0x49,0x4c,0x54,0x45,0x52,0x5f,0x4e,0x45,0x41,0x52,0x45,0x53,0x54,0x3b,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x73,0x6f,0x66,0x74,0x6d,0x61,0x78,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x28,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x34,0x20,0x73,0x68,0x61,0x70,0x65,0x20,0x2f,0x2f,0x20,0x4e,0x43,0x48,0x57,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x77,0x63,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x62,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x77,0x63,0x20,0x3c,0x20,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x2a,0x73,0x68,0x61,0x70,0x65,0x2e,0x77,0x20,0x26,0x26,0x20,0x62,0x20,0x3c,0x20,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2f,0x2a,0x43,0x6f,0x6d,0x70,0x75,0x74,0x65,0x20,0x4d,0x61,0x78,0x20,0x2a,0x2f,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x61,0x78,0x56,0x61,0x6c,0x75,0x65,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x77,0x63,0x2c,0x20,0x62,0x2a,0x73,0x68,0x61,0x70,0x65,0x2e,0x7a,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x69,0x6e,0x74,0x20,0x69,0x3d,0x31,0x3b,0x20,0x69,0x3c,0x73,0x68,0x61,0x70,0x65,0x2e,0x7a,0x3b,0x20,0x2b,0x2b,0x69,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6d,0x61,0x78,0x56,0x61,0x6c,0x75,0x65,0x20,0x3d,0x20,0x66,0x6d,0x61,0x78,0x28,0x6d,0x61,0x78,0x56,0x61,0x6c,0x75,0x65,0x2c,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x77,0x63,0x2c,0x20,0x62,0x2a,0x73,0x68,0x61,0x70,0x65,0x2e,0x7a,0x2b,0x69,0x29,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2f,0x2a,0x43,0x6f,0x6d,0x70,0x75,0x74,0x65,0x20,0x45,0x78,0x70,0x20,0x53,0x75,0x6d,0x2a,0x2f,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x73,0x75,0x6d,0x56,0x61,0x6c,0x75,0x65,0x20,0x3d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x69,0x6e,0x74,0x20,0x69,0x3d,0x30,0x3b,0x20,0x69,0x3c,0x73,0x68,0x61,0x70,0x65,0x2e,0x7a,0x3b,0x20,0x2b,0x2b,0x69,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x73,0x75,0x6d,0x56,0x61,0x6c,0x75,0x65,0x20,0x2b,0x3d,0x20,0x65,0x78,0x70,0x28,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x77,0x63,0x2c,0x20,0x62,0x2a,0x73,0x68,0x61,0x70,0x65,0x2e,0x7a,0x2b,0x69,0x29,0x29,0x20,0x2d,0x20,0x6d,0x61,0x78,0x56,0x61,0x6c,0x75,0x65,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2f,0x2a,0x43,0x6f,0x6d,0x70,0x75,0x74,0x65,0x20,0x52,0x65,0x73,0x75,0x6c,0x74,0x20,0x2a,0x2f,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x69,0x6e,0x74,0x20,0x69,0x3d,0x30,0x3b,0x20,0x69,0x3c,0x73,0x68,0x61,0x70,0x65,0x2e,0x7a,0x3b,0x20,0x2b,0x2b,0x69,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x76,0x61,0x6c,0x75,0x65,0x20,0x3d,0x20,0x65,0x78,0x70,0x28,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x77,0x63,0x2c,0x20,0x62,0x2a,0x73,0x68,0x61,0x70,0x65,0x2e,0x7a,0x2b,0x69,0x29,0x29,0x20,0x2d,0x20,0x6d,0x61,0x78,0x56,0x61,0x6c,0x75,0x65,0x29,0x20,0x2f,0x20,0x73,0x75,0x6d,0x56,0x61,0x6c,0x75,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x77,0x63,0x2c,0x20,0x62,0x2a,0x73,0x68,0x61,0x70,0x65,0x2e,0x7a,0x2b,0x69,0x29,0x2c,0x20,0x76,0x61,0x6c,0x75,0x65,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x20,0x20,0x20,0xa,0x7d,0xa, } - }, -{ - "nearest", - { 0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x33,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x2c,0xa,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x33,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x33,0x29,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x33,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x29,0x20,0x7b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x74,0x75,0x72,0x6e,0x3b,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x5f,0x5f,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x5f,0x74,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x20,0x3d,0x20,0x43,0x4c,0x4b,0x5f,0x4e,0x4f,0x52,0x4d,0x41,0x4c,0x49,0x5a,0x45,0x44,0x5f,0x43,0x4f,0x4f,0x52,0x44,0x53,0x5f,0x46,0x41,0x4c,0x53,0x45,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x41,0x44,0x44,0x52,0x45,0x53,0x53,0x5f,0x43,0x4c,0x41,0x4d,0x50,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x46,0x49,0x4c,0x54,0x45,0x52,0x5f,0x4e,0x45,0x41,0x52,0x45,0x53,0x54,0x3b,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x69,0x6e,0x74,0x65,0x72,0x70,0x28,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x33,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x73,0x63,0x61,0x6c,0x65,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x63,0x61,0x6c,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x32,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x33,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x73,0x20,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x2f,0x20,0x6f,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x25,0x20,0x6f,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x73,0x63,0x61,0x6c,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2a,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x73,0x63,0x61,0x6c,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x73,0x63,0x61,0x6c,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x20,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x2a,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x63,0x61,0x6c,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x6c,0x66,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x30,0x2c,0x20,0x28,0x69,0x6e,0x74,0x29,0x66,0x6c,0x6f,0x6f,0x72,0x28,0x73,0x63,0x61,0x6c,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x6c,0x66,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x30,0x2c,0x20,0x28,0x69,0x6e,0x74,0x29,0x66,0x6c,0x6f,0x6f,0x72,0x28,0x73,0x63,0x61,0x6c,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x6f,0x75,0x74,0x20,0x3d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x61,0x64,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x6c,0x66,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x6c,0x66,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x77,0x72,0x69,0x74,0x65,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x5f,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x29,0x2c,0x20,0x6f,0x75,0x74,0x29,0x3b,0xa,0x7d,0xa, } - }, -{ - "interp", - { 0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x33,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x2c,0xa,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x33,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x33,0x29,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x33,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x29,0x20,0x7b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x74,0x75,0x72,0x6e,0x3b,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x5f,0x5f,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x5f,0x74,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x20,0x3d,0x20,0x43,0x4c,0x4b,0x5f,0x4e,0x4f,0x52,0x4d,0x41,0x4c,0x49,0x5a,0x45,0x44,0x5f,0x43,0x4f,0x4f,0x52,0x44,0x53,0x5f,0x46,0x41,0x4c,0x53,0x45,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x41,0x44,0x44,0x52,0x45,0x53,0x53,0x5f,0x43,0x4c,0x41,0x4d,0x50,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x46,0x49,0x4c,0x54,0x45,0x52,0x5f,0x4e,0x45,0x41,0x52,0x45,0x53,0x54,0x3b,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x69,0x6e,0x74,0x65,0x72,0x70,0x28,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x33,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x73,0x63,0x61,0x6c,0x65,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x63,0x61,0x6c,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x32,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x33,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x73,0x20,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x2f,0x20,0x6f,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x25,0x20,0x6f,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x73,0x63,0x61,0x6c,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2a,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x73,0x63,0x61,0x6c,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x73,0x63,0x61,0x6c,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x20,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x2a,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x63,0x61,0x6c,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x6c,0x66,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x30,0x2c,0x20,0x28,0x69,0x6e,0x74,0x29,0x66,0x6c,0x6f,0x6f,0x72,0x28,0x73,0x63,0x61,0x6c,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x75,0x66,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x6d,0x69,0x6e,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x20,0x2d,0x20,0x31,0x2c,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x6c,0x66,0x20,0x2b,0x20,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x6c,0x66,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x30,0x2c,0x20,0x28,0x69,0x6e,0x74,0x29,0x66,0x6c,0x6f,0x6f,0x72,0x28,0x73,0x63,0x61,0x6c,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x75,0x66,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x6d,0x69,0x6e,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x20,0x2d,0x20,0x31,0x2c,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x6c,0x66,0x20,0x2b,0x20,0x31,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x67,0x61,0x70,0x20,0x3d,0x20,0x73,0x63,0x61,0x6c,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x20,0x2d,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x6c,0x66,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x67,0x61,0x70,0x20,0x20,0x3d,0x20,0x73,0x63,0x61,0x6c,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x20,0x2d,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x6c,0x66,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x74,0x6f,0x70,0x5f,0x6c,0x65,0x66,0x74,0x20,0x3d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x61,0x64,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x6c,0x66,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x6c,0x66,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x74,0x6f,0x70,0x5f,0x72,0x69,0x67,0x68,0x74,0x20,0x3d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x61,0x64,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x75,0x66,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x6c,0x66,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x62,0x6f,0x74,0x74,0x6f,0x6d,0x5f,0x6c,0x65,0x66,0x74,0x20,0x3d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x61,0x64,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x6c,0x66,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x75,0x66,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x62,0x6f,0x74,0x74,0x6f,0x6d,0x5f,0x72,0x69,0x67,0x68,0x74,0x20,0x3d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x61,0x64,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x75,0x66,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x75,0x66,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x74,0x6f,0x70,0x20,0x20,0x20,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x28,0x74,0x6f,0x70,0x5f,0x72,0x69,0x67,0x68,0x74,0x20,0x2d,0x20,0x74,0x6f,0x70,0x5f,0x6c,0x65,0x66,0x74,0x29,0x2c,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x67,0x61,0x70,0x2c,0x20,0x74,0x6f,0x70,0x5f,0x6c,0x65,0x66,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x62,0x6f,0x74,0x74,0x6f,0x6d,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x28,0x62,0x6f,0x74,0x74,0x6f,0x6d,0x5f,0x72,0x69,0x67,0x68,0x74,0x20,0x2d,0x20,0x62,0x6f,0x74,0x74,0x6f,0x6d,0x5f,0x6c,0x65,0x66,0x74,0x29,0x2c,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x67,0x61,0x70,0x2c,0x20,0x62,0x6f,0x74,0x74,0x6f,0x6d,0x5f,0x6c,0x65,0x66,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x6f,0x75,0x74,0x20,0x20,0x20,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x28,0x62,0x6f,0x74,0x74,0x6f,0x6d,0x20,0x2d,0x20,0x74,0x6f,0x70,0x29,0x2c,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x67,0x61,0x70,0x2c,0x20,0x74,0x6f,0x70,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x77,0x72,0x69,0x74,0x65,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x5f,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x29,0x2c,0x20,0x6f,0x75,0x74,0x29,0x3b,0xa,0x7d,0xa, } - }, -{ - "crop", - { 0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x4d,0x4e,0x4e,0x5f,0x53,0x55,0x50,0x50,0x4f,0x52,0x54,0x5f,0x46,0x50,0x31,0x36,0xa,0x23,0x70,0x72,0x61,0x67,0x6d,0x61,0x20,0x4f,0x50,0x45,0x4e,0x43,0x4c,0x20,0x45,0x58,0x54,0x45,0x4e,0x53,0x49,0x4f,0x4e,0x20,0x63,0x6c,0x5f,0x6b,0x68,0x72,0x5f,0x66,0x70,0x31,0x36,0x20,0x3a,0x20,0x65,0x6e,0x61,0x62,0x6c,0x65,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x32,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x2c,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x32,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x29,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x29,0x20,0x7b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x74,0x75,0x72,0x6e,0x3b,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x5f,0x5f,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x5f,0x74,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x20,0x3d,0x20,0x43,0x4c,0x4b,0x5f,0x4e,0x4f,0x52,0x4d,0x41,0x4c,0x49,0x5a,0x45,0x44,0x5f,0x43,0x4f,0x4f,0x52,0x44,0x53,0x5f,0x46,0x41,0x4c,0x53,0x45,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x41,0x44,0x44,0x52,0x45,0x53,0x53,0x5f,0x43,0x4c,0x41,0x4d,0x50,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x46,0x49,0x4c,0x54,0x45,0x52,0x5f,0x4e,0x45,0x41,0x52,0x45,0x53,0x54,0x3b,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x63,0x72,0x6f,0x70,0x28,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x32,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x48,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x74,0x70,0x75,0x74,0x57,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x42,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x68,0x48,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x57,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x43,0x34,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x48,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x57,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x63,0x72,0x6f,0x70,0x5f,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x63,0x72,0x6f,0x70,0x5f,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x32,0x28,0x63,0x72,0x6f,0x70,0x5f,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x63,0x72,0x6f,0x70,0x5f,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x48,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x20,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x57,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x3d,0x20,0x63,0x72,0x6f,0x70,0x5f,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2f,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x3d,0x20,0x63,0x72,0x6f,0x70,0x5f,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x25,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x3d,0x20,0x63,0x72,0x6f,0x70,0x5f,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x25,0x20,0x77,0x69,0x64,0x74,0x68,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x34,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x63,0x72,0x6f,0x70,0x5f,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x2f,0x20,0x77,0x69,0x64,0x74,0x68,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x73,0x72,0x63,0x49,0x6e,0x64,0x65,0x78,0x43,0x34,0x20,0x3d,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x43,0x34,0x20,0x2b,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x34,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x73,0x72,0x63,0x49,0x6e,0x64,0x65,0x78,0x57,0x20,0x20,0x3d,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x57,0x20,0x2b,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x73,0x72,0x63,0x49,0x6e,0x64,0x65,0x78,0x42,0x20,0x20,0x3d,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x42,0x20,0x2b,0x20,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x73,0x72,0x63,0x49,0x6e,0x64,0x65,0x78,0x48,0x20,0x20,0x3d,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x68,0x48,0x20,0x2b,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x20,0x3d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x74,0x70,0x75,0x74,0x57,0x20,0x2a,0x20,0x73,0x72,0x63,0x49,0x6e,0x64,0x65,0x78,0x43,0x34,0x20,0x2b,0x20,0x73,0x72,0x63,0x49,0x6e,0x64,0x65,0x78,0x57,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x48,0x20,0x2a,0x20,0x73,0x72,0x63,0x49,0x6e,0x64,0x65,0x78,0x42,0x20,0x2b,0x20,0x73,0x72,0x63,0x49,0x6e,0x64,0x65,0x78,0x48,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x63,0x72,0x6f,0x70,0x5f,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x63,0x72,0x6f,0x70,0x5f,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x29,0x3b,0xa,0x7d,0xa, } - }, -{ - "unary", - { 0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x33,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x33,0x29,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x33,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x29,0x20,0x7b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x74,0x75,0x72,0x6e,0x3b,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x33,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x2c,0xa,0xa,0x5f,0x5f,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x5f,0x74,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x20,0x3d,0x20,0x43,0x4c,0x4b,0x5f,0x4e,0x4f,0x52,0x4d,0x41,0x4c,0x49,0x5a,0x45,0x44,0x5f,0x43,0x4f,0x4f,0x52,0x44,0x53,0x5f,0x46,0x41,0x4c,0x53,0x45,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x41,0x44,0x44,0x52,0x45,0x53,0x53,0x5f,0x43,0x4c,0x41,0x4d,0x50,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x46,0x49,0x4c,0x54,0x45,0x52,0x5f,0x4e,0x45,0x41,0x52,0x45,0x53,0x54,0x3b,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x75,0x6e,0x61,0x72,0x79,0x28,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x33,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x62,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x32,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x33,0x28,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x77,0x2c,0x20,0x68,0x62,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x20,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x70,0x6f,0x73,0x20,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x77,0x69,0x64,0x74,0x68,0x2c,0x20,0x77,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x69,0x6e,0x20,0x20,0x3d,0x20,0x72,0x65,0x61,0x64,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x70,0x6f,0x73,0x2c,0x20,0x68,0x62,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x6f,0x75,0x74,0x20,0x3d,0x20,0x4f,0x50,0x45,0x52,0x41,0x54,0x4f,0x52,0x3b,0xa,0x20,0x20,0x20,0x20,0x77,0x72,0x69,0x74,0x65,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x70,0x6f,0x73,0x2c,0x20,0x68,0x62,0x29,0x2c,0x20,0x6f,0x75,0x74,0x29,0x3b,0xa,0x7d,0xa, } - }, -{ - "spatial_product", - { 0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x4d,0x4e,0x4e,0x5f,0x53,0x55,0x50,0x50,0x4f,0x52,0x54,0x5f,0x46,0x50,0x31,0x36,0xa,0x23,0x70,0x72,0x61,0x67,0x6d,0x61,0x20,0x4f,0x50,0x45,0x4e,0x43,0x4c,0x20,0x45,0x58,0x54,0x45,0x4e,0x53,0x49,0x4f,0x4e,0x20,0x63,0x6c,0x5f,0x6b,0x68,0x72,0x5f,0x66,0x70,0x31,0x36,0x20,0x3a,0x20,0x65,0x6e,0x61,0x62,0x6c,0x65,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x33,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x2c,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x33,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x33,0x29,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x33,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x29,0x20,0x7b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x74,0x75,0x72,0x6e,0x3b,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x5f,0x5f,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x5f,0x74,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x20,0x3d,0x20,0x43,0x4c,0x4b,0x5f,0x4e,0x4f,0x52,0x4d,0x41,0x4c,0x49,0x5a,0x45,0x44,0x5f,0x43,0x4f,0x4f,0x52,0x44,0x53,0x5f,0x46,0x41,0x4c,0x53,0x45,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x41,0x44,0x44,0x52,0x45,0x53,0x53,0x5f,0x43,0x4c,0x41,0x4d,0x50,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x46,0x49,0x4c,0x54,0x45,0x52,0x5f,0x4e,0x45,0x41,0x52,0x45,0x53,0x54,0x3b,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x73,0x70,0x61,0x74,0x69,0x61,0x6c,0x5f,0x70,0x72,0x6f,0x64,0x75,0x63,0x74,0x28,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x33,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x31,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x2c,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x62,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x32,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x33,0x28,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x77,0x2c,0x20,0x68,0x62,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x20,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x68,0x62,0x20,0x25,0x20,0x6f,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x70,0x6f,0x73,0x20,0x20,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x77,0x69,0x64,0x74,0x68,0x2c,0x20,0x77,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x69,0x6e,0x5f,0x30,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x70,0x6f,0x73,0x2c,0x20,0x68,0x62,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x69,0x6e,0x5f,0x31,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x77,0x2c,0x20,0x68,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6f,0x75,0x74,0x20,0x20,0x3d,0x20,0x69,0x6e,0x5f,0x30,0x20,0x2a,0x20,0x69,0x6e,0x5f,0x31,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x70,0x6f,0x73,0x2c,0x20,0x68,0x62,0x29,0x2c,0x20,0x6f,0x75,0x74,0x29,0x3b,0xa,0x7d,0xa, } - }, -{ - "blit", - { 0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x4d,0x4e,0x4e,0x5f,0x53,0x55,0x50,0x50,0x4f,0x52,0x54,0x5f,0x46,0x50,0x31,0x36,0xa,0x23,0x70,0x72,0x61,0x67,0x6d,0x61,0x20,0x4f,0x50,0x45,0x4e,0x43,0x4c,0x20,0x45,0x58,0x54,0x45,0x4e,0x53,0x49,0x4f,0x4e,0x20,0x63,0x6c,0x5f,0x6b,0x68,0x72,0x5f,0x66,0x70,0x31,0x36,0x20,0x3a,0x20,0x65,0x6e,0x61,0x62,0x6c,0x65,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x5f,0x5f,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x5f,0x74,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x20,0x3d,0x20,0x43,0x4c,0x4b,0x5f,0x4e,0x4f,0x52,0x4d,0x41,0x4c,0x49,0x5a,0x45,0x44,0x5f,0x43,0x4f,0x4f,0x52,0x44,0x53,0x5f,0x46,0x41,0x4c,0x53,0x45,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x41,0x44,0x44,0x52,0x45,0x53,0x53,0x5f,0x43,0x4c,0x41,0x4d,0x50,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x46,0x49,0x4c,0x54,0x45,0x52,0x5f,0x4e,0x45,0x41,0x52,0x45,0x53,0x54,0x3b,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x62,0x6c,0x69,0x74,0x28,0x20,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x69,0x6e,0x70,0x75,0x74,0x4f,0x66,0x66,0x73,0x65,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x4f,0x66,0x66,0x73,0x65,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x72,0x65,0x67,0x69,0x6f,0x6e,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x69,0x6e,0x70,0x75,0x74,0x57,0x48,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x57,0x48,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x77,0x68,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x77,0x20,0x3d,0x20,0x77,0x68,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x68,0x20,0x3d,0x20,0x77,0x68,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x78,0x79,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x2c,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x2f,0x2f,0x4e,0x2c,0x20,0x43,0x2c,0x20,0x48,0x2c,0x20,0x57,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x70,0x6f,0x73,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x34,0x29,0x28,0x78,0x79,0x2e,0x79,0x2f,0x68,0x2c,0x20,0x78,0x79,0x2e,0x78,0x2f,0x77,0x2c,0x20,0x78,0x79,0x2e,0x79,0x25,0x68,0x2c,0x20,0x78,0x79,0x2e,0x78,0x25,0x77,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x70,0x6f,0x73,0x2e,0x78,0x20,0x3c,0x20,0x72,0x65,0x67,0x69,0x6f,0x6e,0x2e,0x78,0x20,0x26,0x26,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x3c,0x20,0x72,0x65,0x67,0x69,0x6f,0x6e,0x2e,0x79,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x70,0x6f,0x73,0x49,0x6e,0x70,0x75,0x74,0x20,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x4f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x20,0x70,0x6f,0x73,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x70,0x6f,0x73,0x4f,0x75,0x74,0x70,0x75,0x74,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x4f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x20,0x70,0x6f,0x73,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x50,0x6f,0x73,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x70,0x6f,0x73,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x77,0x20,0x2b,0x20,0x70,0x6f,0x73,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x79,0x2a,0x6f,0x75,0x74,0x70,0x75,0x74,0x57,0x48,0x2e,0x78,0x2c,0x20,0x70,0x6f,0x73,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x78,0x2a,0x6f,0x75,0x74,0x70,0x75,0x74,0x57,0x48,0x2e,0x79,0x20,0x2b,0x20,0x70,0x6f,0x73,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x7a,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x69,0x6e,0x70,0x75,0x74,0x50,0x6f,0x73,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x70,0x6f,0x73,0x49,0x6e,0x70,0x75,0x74,0x2e,0x77,0x20,0x2b,0x20,0x70,0x6f,0x73,0x49,0x6e,0x70,0x75,0x74,0x2e,0x79,0x2a,0x69,0x6e,0x70,0x75,0x74,0x57,0x48,0x2e,0x78,0x2c,0x20,0x70,0x6f,0x73,0x49,0x6e,0x70,0x75,0x74,0x2e,0x78,0x2a,0x69,0x6e,0x70,0x75,0x74,0x57,0x48,0x2e,0x79,0x20,0x2b,0x20,0x70,0x6f,0x73,0x49,0x6e,0x70,0x75,0x74,0x2e,0x7a,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x50,0x6f,0x73,0x2c,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x50,0x6f,0x73,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x7d,0xa,0xa, } - }, -{ - "roi_pooling", - { 0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x4d,0x4e,0x4e,0x5f,0x53,0x55,0x50,0x50,0x4f,0x52,0x54,0x5f,0x46,0x50,0x31,0x36,0xa,0x23,0x70,0x72,0x61,0x67,0x6d,0x61,0x20,0x4f,0x50,0x45,0x4e,0x43,0x4c,0x20,0x45,0x58,0x54,0x45,0x4e,0x53,0x49,0x4f,0x4e,0x20,0x63,0x6c,0x5f,0x6b,0x68,0x72,0x5f,0x66,0x70,0x31,0x36,0x20,0x3a,0x20,0x65,0x6e,0x61,0x62,0x6c,0x65,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x33,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x2c,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x33,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x33,0x29,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x33,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x29,0x20,0x7b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x74,0x75,0x72,0x6e,0x3b,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x5f,0x5f,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x5f,0x74,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x20,0x3d,0x20,0x43,0x4c,0x4b,0x5f,0x4e,0x4f,0x52,0x4d,0x41,0x4c,0x49,0x5a,0x45,0x44,0x5f,0x43,0x4f,0x4f,0x52,0x44,0x53,0x5f,0x46,0x41,0x4c,0x53,0x45,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x41,0x44,0x44,0x52,0x45,0x53,0x53,0x5f,0x43,0x4c,0x41,0x4d,0x50,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x46,0x49,0x4c,0x54,0x45,0x52,0x5f,0x4e,0x45,0x41,0x52,0x45,0x53,0x54,0x3b,0xa,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x4d,0x49,0x4e,0x5f,0x56,0x41,0x4c,0x55,0x45,0x20,0x2d,0x46,0x4c,0x54,0x5f,0x4d,0x41,0x58,0xa,0xa,0x2f,0x2f,0x20,0x53,0x75,0x70,0x70,0x6f,0x72,0x74,0x65,0x64,0x20,0x64,0x61,0x74,0x61,0x20,0x74,0x79,0x70,0x65,0x3a,0x20,0x68,0x61,0x6c,0x66,0x2f,0x66,0x6c,0x6f,0x61,0x74,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x72,0x6f,0x69,0x5f,0x70,0x6f,0x6f,0x6c,0x69,0x6e,0x67,0x28,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x33,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x72,0x6f,0x69,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x72,0x6f,0x69,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x73,0x70,0x61,0x74,0x69,0x61,0x6c,0x5f,0x73,0x63,0x61,0x6c,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x68,0x62,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x32,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x33,0x28,0x6f,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x68,0x62,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x20,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x72,0x6f,0x69,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x6f,0x75,0x74,0x5f,0x68,0x62,0x5f,0x69,0x64,0x78,0x20,0x2f,0x20,0x6f,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x72,0x6f,0x69,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x6f,0x75,0x74,0x5f,0x68,0x62,0x5f,0x69,0x64,0x78,0x20,0x25,0x20,0x6f,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x72,0x6f,0x69,0x5f,0x30,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x72,0x6f,0x69,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x30,0x2c,0x20,0x72,0x6f,0x69,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x72,0x6f,0x69,0x5f,0x31,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x72,0x6f,0x69,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x31,0x2c,0x20,0x72,0x6f,0x69,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x72,0x6f,0x69,0x5f,0x32,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x72,0x6f,0x69,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x32,0x2c,0x20,0x72,0x6f,0x69,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x72,0x6f,0x69,0x5f,0x33,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x72,0x6f,0x69,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x33,0x2c,0x20,0x72,0x6f,0x69,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x72,0x6f,0x69,0x5f,0x34,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x72,0x6f,0x69,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x34,0x2c,0x20,0x72,0x6f,0x69,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x20,0x3d,0x20,0x72,0x6f,0x69,0x5f,0x30,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x78,0x31,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x72,0x6f,0x75,0x6e,0x64,0x28,0x72,0x6f,0x69,0x5f,0x31,0x2e,0x78,0x20,0x2a,0x20,0x73,0x70,0x61,0x74,0x69,0x61,0x6c,0x5f,0x73,0x63,0x61,0x6c,0x65,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x79,0x31,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x72,0x6f,0x75,0x6e,0x64,0x28,0x72,0x6f,0x69,0x5f,0x32,0x2e,0x78,0x20,0x2a,0x20,0x73,0x70,0x61,0x74,0x69,0x61,0x6c,0x5f,0x73,0x63,0x61,0x6c,0x65,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x78,0x32,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x72,0x6f,0x75,0x6e,0x64,0x28,0x72,0x6f,0x69,0x5f,0x33,0x2e,0x78,0x20,0x2a,0x20,0x73,0x70,0x61,0x74,0x69,0x61,0x6c,0x5f,0x73,0x63,0x61,0x6c,0x65,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x79,0x32,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x72,0x6f,0x75,0x6e,0x64,0x28,0x72,0x6f,0x69,0x5f,0x34,0x2e,0x78,0x20,0x2a,0x20,0x73,0x70,0x61,0x74,0x69,0x61,0x6c,0x5f,0x73,0x63,0x61,0x6c,0x65,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x72,0x6f,0x69,0x57,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x78,0x32,0x20,0x2d,0x20,0x78,0x31,0x20,0x2b,0x20,0x31,0x2c,0x20,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x72,0x6f,0x69,0x48,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x79,0x32,0x20,0x2d,0x20,0x79,0x31,0x20,0x2b,0x20,0x31,0x2c,0x20,0x31,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x62,0x69,0x6e,0x53,0x69,0x7a,0x65,0x57,0x20,0x3d,0x20,0x28,0x66,0x6c,0x6f,0x61,0x74,0x29,0x72,0x6f,0x69,0x57,0x20,0x2f,0x20,0x28,0x66,0x6c,0x6f,0x61,0x74,0x29,0x6f,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x62,0x69,0x6e,0x53,0x69,0x7a,0x65,0x48,0x20,0x3d,0x20,0x28,0x66,0x6c,0x6f,0x61,0x74,0x29,0x72,0x6f,0x69,0x48,0x20,0x2f,0x20,0x28,0x66,0x6c,0x6f,0x61,0x74,0x29,0x6f,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x68,0x53,0x74,0x61,0x72,0x74,0x20,0x3d,0x20,0x6d,0x69,0x6e,0x28,0x6d,0x61,0x78,0x28,0x79,0x31,0x20,0x2b,0x20,0x28,0x69,0x6e,0x74,0x29,0x66,0x6c,0x6f,0x6f,0x72,0x28,0x6f,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2a,0x20,0x62,0x69,0x6e,0x53,0x69,0x7a,0x65,0x48,0x29,0x2c,0x20,0x30,0x29,0x2c,0x20,0x69,0x6e,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x68,0x45,0x6e,0x64,0x20,0x20,0x20,0x3d,0x20,0x6d,0x69,0x6e,0x28,0x6d,0x61,0x78,0x28,0x79,0x31,0x20,0x2b,0x20,0x28,0x69,0x6e,0x74,0x29,0x63,0x65,0x69,0x6c,0x28,0x28,0x6f,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x31,0x29,0x20,0x2a,0x20,0x62,0x69,0x6e,0x53,0x69,0x7a,0x65,0x48,0x29,0x2c,0x20,0x30,0x29,0x2c,0x20,0x69,0x6e,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x68,0x4c,0x65,0x6e,0x20,0x20,0x20,0x3d,0x20,0x68,0x45,0x6e,0x64,0x20,0x2d,0x20,0x68,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x77,0x53,0x74,0x61,0x72,0x74,0x20,0x3d,0x20,0x6d,0x69,0x6e,0x28,0x6d,0x61,0x78,0x28,0x78,0x31,0x20,0x2b,0x20,0x28,0x69,0x6e,0x74,0x29,0x66,0x6c,0x6f,0x6f,0x72,0x28,0x6f,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x2a,0x20,0x62,0x69,0x6e,0x53,0x69,0x7a,0x65,0x57,0x29,0x2c,0x20,0x30,0x29,0x2c,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x77,0x45,0x6e,0x64,0x20,0x20,0x20,0x3d,0x20,0x6d,0x69,0x6e,0x28,0x6d,0x61,0x78,0x28,0x78,0x31,0x20,0x2b,0x20,0x28,0x69,0x6e,0x74,0x29,0x63,0x65,0x69,0x6c,0x28,0x28,0x6f,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x31,0x29,0x20,0x2a,0x20,0x62,0x69,0x6e,0x53,0x69,0x7a,0x65,0x57,0x29,0x2c,0x20,0x30,0x29,0x2c,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x77,0x4c,0x65,0x6e,0x20,0x20,0x20,0x3d,0x20,0x77,0x45,0x6e,0x64,0x20,0x2d,0x20,0x77,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x70,0x6f,0x73,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6f,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x7a,0x65,0x72,0x6f,0x5f,0x76,0x65,0x63,0x20,0x3d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x77,0x4c,0x65,0x6e,0x20,0x3c,0x3d,0x20,0x30,0x20,0x7c,0x7c,0x20,0x68,0x4c,0x65,0x6e,0x20,0x3c,0x3d,0x20,0x30,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x70,0x6f,0x73,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x68,0x62,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x7a,0x65,0x72,0x6f,0x5f,0x76,0x65,0x63,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x74,0x75,0x72,0x6e,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x72,0x65,0x73,0x20,0x3d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x4d,0x49,0x4e,0x5f,0x56,0x41,0x4c,0x55,0x45,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x73,0x74,0x61,0x72,0x74,0x20,0x20,0x20,0x3d,0x20,0x68,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x74,0x61,0x72,0x74,0x20,0x20,0x20,0x20,0x3d,0x20,0x77,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x6f,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x2c,0x20,0x69,0x6e,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x20,0x3d,0x20,0x30,0x3b,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x20,0x3c,0x20,0x68,0x4c,0x65,0x6e,0x3b,0x20,0x2b,0x2b,0x68,0x65,0x69,0x67,0x68,0x74,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x69,0x6e,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x73,0x74,0x61,0x72,0x74,0x20,0x2b,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x20,0x3d,0x20,0x30,0x3b,0x20,0x77,0x69,0x64,0x74,0x68,0x20,0x3c,0x20,0x77,0x4c,0x65,0x6e,0x3b,0x20,0x2b,0x2b,0x77,0x69,0x64,0x74,0x68,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x74,0x61,0x72,0x74,0x20,0x2b,0x20,0x77,0x69,0x64,0x74,0x68,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x69,0x6e,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6e,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x73,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x66,0x6d,0x61,0x78,0x28,0x72,0x65,0x73,0x2c,0x20,0x69,0x6e,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x70,0x6f,0x73,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x68,0x62,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x72,0x65,0x73,0x29,0x3b,0xa,0x7d,0xa, } - }, -{ - "normalize", - { 0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x33,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x2c,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x33,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x33,0x29,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x33,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x29,0x20,0x7b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x74,0x75,0x72,0x6e,0x3b,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x5f,0x5f,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x5f,0x74,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x20,0x3d,0x20,0x43,0x4c,0x4b,0x5f,0x4e,0x4f,0x52,0x4d,0x41,0x4c,0x49,0x5a,0x45,0x44,0x5f,0x43,0x4f,0x4f,0x52,0x44,0x53,0x5f,0x46,0x41,0x4c,0x53,0x45,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x41,0x44,0x44,0x52,0x45,0x53,0x53,0x5f,0x43,0x4c,0x41,0x4d,0x50,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x46,0x49,0x4c,0x54,0x45,0x52,0x5f,0x4e,0x45,0x41,0x52,0x45,0x53,0x54,0x3b,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x6e,0x6f,0x72,0x6d,0x61,0x6c,0x69,0x7a,0x65,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x28,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x33,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x73,0x63,0x61,0x6c,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x65,0x70,0x73,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x73,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x73,0x2c,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x63,0x68,0x61,0x6e,0x5f,0x62,0x6c,0x6b,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x62,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x32,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x33,0x28,0x63,0x68,0x61,0x6e,0x5f,0x62,0x6c,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x68,0x62,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x63,0x68,0x61,0x6e,0x5f,0x62,0x6c,0x6b,0x73,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x30,0x20,0x3d,0x3d,0x20,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x73,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x68,0x61,0x6e,0x5f,0x62,0x6c,0x6b,0x73,0x20,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x68,0x61,0x6e,0x5f,0x62,0x6c,0x6b,0x73,0x20,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x20,0x2d,0x20,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x20,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x70,0x6f,0x73,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x73,0x75,0x6d,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x73,0x63,0x61,0x6c,0x65,0x5f,0x20,0x3d,0x20,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x64,0x61,0x74,0x61,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x73,0x68,0x6f,0x72,0x74,0x20,0x69,0x20,0x3d,0x20,0x30,0x3b,0x20,0x69,0x20,0x3c,0x20,0x63,0x68,0x61,0x6e,0x5f,0x62,0x6c,0x6b,0x73,0x3b,0x20,0x2b,0x2b,0x69,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x64,0x61,0x74,0x61,0x20,0x3d,0x20,0x72,0x65,0x61,0x64,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x70,0x6f,0x73,0x2c,0x20,0x68,0x62,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x73,0x75,0x6d,0x20,0x2b,0x3d,0x20,0x64,0x61,0x74,0x61,0x2e,0x78,0x20,0x2a,0x20,0x64,0x61,0x74,0x61,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x73,0x75,0x6d,0x20,0x2b,0x3d,0x20,0x64,0x61,0x74,0x61,0x2e,0x79,0x20,0x2a,0x20,0x64,0x61,0x74,0x61,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x73,0x75,0x6d,0x20,0x2b,0x3d,0x20,0x64,0x61,0x74,0x61,0x2e,0x7a,0x20,0x2a,0x20,0x64,0x61,0x74,0x61,0x2e,0x7a,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x73,0x75,0x6d,0x20,0x2b,0x3d,0x20,0x64,0x61,0x74,0x61,0x2e,0x77,0x20,0x2a,0x20,0x64,0x61,0x74,0x61,0x2e,0x77,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x70,0x6f,0x73,0x20,0x2b,0x3d,0x20,0x77,0x69,0x64,0x74,0x68,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x20,0x20,0x20,0x20,0x64,0x61,0x74,0x61,0x20,0x3d,0x20,0x72,0x65,0x61,0x64,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x70,0x6f,0x73,0x2c,0x20,0x68,0x62,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x73,0x77,0x69,0x74,0x63,0x68,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x73,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x61,0x73,0x65,0x20,0x31,0x3a,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x73,0x75,0x6d,0x20,0x2b,0x3d,0x20,0x64,0x61,0x74,0x61,0x2e,0x78,0x20,0x2a,0x20,0x64,0x61,0x74,0x61,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x73,0x75,0x6d,0x20,0x2b,0x3d,0x20,0x64,0x61,0x74,0x61,0x2e,0x79,0x20,0x2a,0x20,0x64,0x61,0x74,0x61,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x73,0x75,0x6d,0x20,0x2b,0x3d,0x20,0x64,0x61,0x74,0x61,0x2e,0x7a,0x20,0x2a,0x20,0x64,0x61,0x74,0x61,0x2e,0x7a,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x61,0x73,0x65,0x20,0x32,0x3a,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x73,0x75,0x6d,0x20,0x2b,0x3d,0x20,0x64,0x61,0x74,0x61,0x2e,0x78,0x20,0x2a,0x20,0x64,0x61,0x74,0x61,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x73,0x75,0x6d,0x20,0x2b,0x3d,0x20,0x64,0x61,0x74,0x61,0x2e,0x79,0x20,0x2a,0x20,0x64,0x61,0x74,0x61,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x61,0x73,0x65,0x20,0x33,0x3a,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x73,0x75,0x6d,0x20,0x2b,0x3d,0x20,0x64,0x61,0x74,0x61,0x2e,0x78,0x20,0x2a,0x20,0x64,0x61,0x74,0x61,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x20,0x20,0x20,0x20,0x73,0x75,0x6d,0x20,0x3d,0x20,0x31,0x2e,0x30,0x66,0x20,0x2f,0x20,0x73,0x71,0x72,0x74,0x28,0x73,0x75,0x6d,0x20,0x2b,0x20,0x65,0x70,0x73,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x70,0x6f,0x73,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x63,0x68,0x61,0x6e,0x5f,0x62,0x6c,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x77,0x69,0x64,0x74,0x68,0x2c,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x64,0x61,0x74,0x61,0x20,0x20,0x20,0x3d,0x20,0x72,0x65,0x61,0x64,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x70,0x6f,0x73,0x2c,0x20,0x68,0x62,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x73,0x63,0x61,0x6c,0x65,0x5f,0x20,0x3d,0x20,0x72,0x65,0x61,0x64,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x73,0x63,0x61,0x6c,0x65,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x63,0x68,0x61,0x6e,0x5f,0x62,0x6c,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x30,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x73,0x75,0x6d,0x5f,0x76,0x65,0x63,0x20,0x3d,0x20,0x28,0x66,0x6c,0x6f,0x61,0x74,0x34,0x29,0x28,0x73,0x75,0x6d,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x64,0x61,0x74,0x61,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x64,0x61,0x74,0x61,0x20,0x2a,0x20,0x73,0x75,0x6d,0x5f,0x76,0x65,0x63,0x20,0x2a,0x20,0x73,0x63,0x61,0x6c,0x65,0x5f,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x77,0x72,0x69,0x74,0x65,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x70,0x6f,0x73,0x2c,0x20,0x68,0x62,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x64,0x61,0x74,0x61,0x29,0x3b,0xa,0x7d,0xa, } - }, -{ - "binary", - { 0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x4d,0x4e,0x4e,0x5f,0x53,0x55,0x50,0x50,0x4f,0x52,0x54,0x5f,0x46,0x50,0x31,0x36,0xa,0x23,0x70,0x72,0x61,0x67,0x6d,0x61,0x20,0x4f,0x50,0x45,0x4e,0x43,0x4c,0x20,0x45,0x58,0x54,0x45,0x4e,0x53,0x49,0x4f,0x4e,0x20,0x63,0x6c,0x5f,0x6b,0x68,0x72,0x5f,0x66,0x70,0x31,0x36,0x20,0x3a,0x20,0x65,0x6e,0x61,0x62,0x6c,0x65,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x5f,0x5f,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x5f,0x74,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x20,0x3d,0x20,0x43,0x4c,0x4b,0x5f,0x4e,0x4f,0x52,0x4d,0x41,0x4c,0x49,0x5a,0x45,0x44,0x5f,0x43,0x4f,0x4f,0x52,0x44,0x53,0x5f,0x46,0x41,0x4c,0x53,0x45,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x41,0x44,0x44,0x52,0x45,0x53,0x53,0x5f,0x43,0x4c,0x41,0x4d,0x50,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x46,0x49,0x4c,0x54,0x45,0x52,0x5f,0x4e,0x45,0x41,0x52,0x45,0x53,0x54,0x3b,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x62,0x69,0x6e,0x61,0x72,0x79,0x5f,0x73,0x61,0x6d,0x65,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x72,0x6f,0x61,0x64,0x63,0x61,0x73,0x74,0x28,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x30,0x2c,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x31,0x2c,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x73,0x68,0x61,0x70,0x65,0x2c,0x20,0x69,0x6e,0x74,0x32,0x20,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x30,0x2c,0x20,0x69,0x6e,0x74,0x32,0x20,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x31,0x2c,0x20,0x69,0x6e,0x74,0x32,0x20,0x77,0x68,0x4f,0x75,0x74,0x70,0x75,0x74,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x70,0x6f,0x73,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x2c,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x6e,0x68,0x77,0x63,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x34,0x29,0x28,0x70,0x6f,0x73,0x2e,0x79,0x2f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x2c,0x20,0x70,0x6f,0x73,0x2e,0x79,0x25,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x2c,0x20,0x70,0x6f,0x73,0x2e,0x78,0x25,0x73,0x68,0x61,0x70,0x65,0x2e,0x7a,0x2c,0x20,0x70,0x6f,0x73,0x2e,0x78,0x2f,0x73,0x68,0x61,0x70,0x65,0x2e,0x7a,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x6e,0x68,0x77,0x63,0x2e,0x78,0x20,0x3e,0x3d,0x20,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x20,0x26,0x26,0x20,0x6e,0x68,0x77,0x63,0x2e,0x77,0x20,0x3e,0x3d,0x20,0x73,0x68,0x61,0x70,0x65,0x2e,0x77,0x29,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x74,0x75,0x72,0x6e,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x69,0x6e,0x30,0x2c,0x20,0x69,0x6e,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x70,0x6f,0x73,0x30,0x2c,0x20,0x70,0x6f,0x73,0x31,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x30,0x2e,0x78,0x20,0x3d,0x3d,0x20,0x31,0x29,0x20,0x7b,0x20,0x2f,0x2f,0x20,0x54,0x65,0x6e,0x73,0x6f,0x72,0x20,0x30,0x20,0x77,0x69,0x64,0x74,0x68,0x20,0x6c,0x65,0x6e,0x67,0x74,0x68,0x20,0x31,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x70,0x6f,0x73,0x30,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6e,0x68,0x77,0x63,0x2e,0x77,0x2a,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x30,0x2e,0x78,0x2c,0x20,0x6e,0x68,0x77,0x63,0x2e,0x78,0x2a,0x77,0x68,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x79,0x2b,0x6e,0x68,0x77,0x63,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x30,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x30,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x70,0x6f,0x73,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x70,0x6f,0x73,0x31,0x20,0x3d,0x20,0x28,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x31,0x2e,0x79,0x20,0x21,0x3d,0x20,0x31,0x29,0x20,0x3f,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6e,0x68,0x77,0x63,0x2e,0x77,0x2a,0x77,0x68,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x78,0x2b,0x6e,0x68,0x77,0x63,0x2e,0x7a,0x2c,0x20,0x6e,0x68,0x77,0x63,0x2e,0x78,0x2a,0x77,0x68,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x79,0x2b,0x6e,0x68,0x77,0x63,0x2e,0x79,0x29,0x20,0x3a,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6e,0x68,0x77,0x63,0x2e,0x77,0x2a,0x77,0x68,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x78,0x2b,0x6e,0x68,0x77,0x63,0x2e,0x7a,0x2c,0x20,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x30,0x2e,0x79,0x20,0x3d,0x3d,0x20,0x31,0x29,0x20,0x7b,0x20,0x2f,0x2f,0x20,0x54,0x65,0x6e,0x73,0x6f,0x72,0x20,0x30,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x20,0x6c,0x65,0x6e,0x67,0x74,0x68,0x20,0x31,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x70,0x6f,0x73,0x30,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6e,0x68,0x77,0x63,0x2e,0x77,0x2a,0x77,0x68,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x78,0x2b,0x6e,0x68,0x77,0x63,0x2e,0x7a,0x2c,0x20,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x30,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x30,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x70,0x6f,0x73,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x70,0x6f,0x73,0x31,0x20,0x3d,0x20,0x28,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x31,0x2e,0x78,0x20,0x21,0x3d,0x20,0x31,0x29,0x20,0x3f,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6e,0x68,0x77,0x63,0x2e,0x77,0x2a,0x77,0x68,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x78,0x2b,0x6e,0x68,0x77,0x63,0x2e,0x7a,0x2c,0x20,0x6e,0x68,0x77,0x63,0x2e,0x78,0x2a,0x77,0x68,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x79,0x2b,0x6e,0x68,0x77,0x63,0x2e,0x79,0x29,0x20,0x3a,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6e,0x68,0x77,0x63,0x2e,0x77,0x2a,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x31,0x2e,0x78,0x2c,0x20,0x6e,0x68,0x77,0x63,0x2e,0x78,0x2a,0x77,0x68,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x79,0x2b,0x6e,0x68,0x77,0x63,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x30,0x2e,0x78,0x20,0x3d,0x3d,0x20,0x31,0x20,0x26,0x26,0x20,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x30,0x2e,0x79,0x20,0x3d,0x3d,0x20,0x31,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x70,0x6f,0x73,0x30,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6e,0x68,0x77,0x63,0x2e,0x77,0x2a,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x30,0x2e,0x78,0x2c,0x20,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x30,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x30,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x70,0x6f,0x73,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x70,0x6f,0x73,0x31,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6e,0x68,0x77,0x63,0x2e,0x77,0x2a,0x77,0x68,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x78,0x2b,0x6e,0x68,0x77,0x63,0x2e,0x7a,0x2c,0x20,0x6e,0x68,0x77,0x63,0x2e,0x78,0x2a,0x77,0x68,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x79,0x2b,0x6e,0x68,0x77,0x63,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x31,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x70,0x6f,0x73,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x70,0x6f,0x73,0x2c,0x20,0x4f,0x50,0x45,0x52,0x41,0x54,0x4f,0x52,0x29,0x3b,0xa,0x7d,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x62,0x69,0x6e,0x61,0x72,0x79,0x5f,0x31,0x74,0x6f,0x4d,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x72,0x6f,0x61,0x64,0x63,0x61,0x73,0x74,0x5f,0x6f,0x6e,0x5f,0x61,0x77,0x68,0x28,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x30,0x2c,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x31,0x2c,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x73,0x68,0x61,0x70,0x65,0x2c,0x20,0x69,0x6e,0x74,0x32,0x20,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x30,0x2c,0x20,0x69,0x6e,0x74,0x32,0x20,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x31,0x2c,0x20,0x69,0x6e,0x74,0x32,0x20,0x77,0x68,0x4f,0x75,0x74,0x70,0x75,0x74,0x29,0x20,0x7b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x70,0x6f,0x73,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x2c,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x6e,0x68,0x77,0x63,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x34,0x29,0x28,0x70,0x6f,0x73,0x2e,0x79,0x2f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x2c,0x20,0x70,0x6f,0x73,0x2e,0x79,0x25,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x2c,0x20,0x70,0x6f,0x73,0x2e,0x78,0x25,0x73,0x68,0x61,0x70,0x65,0x2e,0x7a,0x2c,0x20,0x70,0x6f,0x73,0x2e,0x78,0x2f,0x73,0x68,0x61,0x70,0x65,0x2e,0x7a,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x6e,0x68,0x77,0x63,0x2e,0x78,0x20,0x3e,0x3d,0x20,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x20,0x26,0x26,0x20,0x6e,0x68,0x77,0x63,0x2e,0x77,0x20,0x3e,0x3d,0x20,0x73,0x68,0x61,0x70,0x65,0x2e,0x77,0x29,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x74,0x75,0x72,0x6e,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x69,0x6e,0x30,0x2c,0x20,0x69,0x6e,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x70,0x6f,0x73,0x30,0x2c,0x20,0x70,0x6f,0x73,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x70,0x6f,0x73,0x30,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6e,0x68,0x77,0x63,0x2e,0x7a,0x2c,0x20,0x6e,0x68,0x77,0x63,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x76,0x61,0x6c,0x75,0x65,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x30,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x70,0x6f,0x73,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x30,0x20,0x3d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x76,0x61,0x6c,0x75,0x65,0x2e,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x31,0x2e,0x78,0x20,0x21,0x3d,0x20,0x31,0x20,0x26,0x26,0x20,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x31,0x2e,0x79,0x20,0x3d,0x3d,0x20,0x31,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x70,0x6f,0x73,0x31,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6e,0x68,0x77,0x63,0x2e,0x77,0x2a,0x77,0x68,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x78,0x2b,0x6e,0x68,0x77,0x63,0x2e,0x7a,0x2c,0x20,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x31,0x2e,0x78,0x20,0x3d,0x3d,0x20,0x31,0x20,0x26,0x26,0x20,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x31,0x2e,0x79,0x20,0x21,0x3d,0x20,0x31,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x70,0x6f,0x73,0x31,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6e,0x68,0x77,0x63,0x2e,0x77,0x2a,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x31,0x2e,0x78,0x2c,0x20,0x6e,0x68,0x77,0x63,0x2e,0x78,0x2a,0x77,0x68,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x79,0x2b,0x6e,0x68,0x77,0x63,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x31,0x2e,0x78,0x20,0x3d,0x3d,0x20,0x31,0x20,0x26,0x26,0x20,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x31,0x2e,0x79,0x20,0x3d,0x3d,0x20,0x31,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x70,0x6f,0x73,0x31,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6e,0x68,0x77,0x63,0x2e,0x77,0x2a,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x31,0x2e,0x78,0x2c,0x20,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x70,0x6f,0x73,0x31,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6e,0x68,0x77,0x63,0x2e,0x77,0x2a,0x77,0x68,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x78,0x2b,0x6e,0x68,0x77,0x63,0x2e,0x7a,0x2c,0x20,0x6e,0x68,0x77,0x63,0x2e,0x78,0x2a,0x77,0x68,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x79,0x2b,0x6e,0x68,0x77,0x63,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x31,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x70,0x6f,0x73,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x70,0x6f,0x73,0x2c,0x20,0x4f,0x50,0x45,0x52,0x41,0x54,0x4f,0x52,0x29,0x3b,0xa,0x7d,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x62,0x69,0x6e,0x61,0x72,0x79,0x5f,0x31,0x74,0x6f,0x4d,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x72,0x6f,0x61,0x64,0x63,0x61,0x73,0x74,0x5f,0x6f,0x6e,0x5f,0x31,0x77,0x68,0x28,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x30,0x2c,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x31,0x2c,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x73,0x68,0x61,0x70,0x65,0x2c,0x20,0x69,0x6e,0x74,0x32,0x20,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x30,0x2c,0x20,0x69,0x6e,0x74,0x32,0x20,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x31,0x2c,0x20,0x69,0x6e,0x74,0x32,0x20,0x77,0x68,0x4f,0x75,0x74,0x70,0x75,0x74,0x29,0x20,0x7b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x70,0x6f,0x73,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x2c,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x6e,0x68,0x77,0x63,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x34,0x29,0x28,0x70,0x6f,0x73,0x2e,0x79,0x2f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x2c,0x20,0x70,0x6f,0x73,0x2e,0x79,0x25,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x2c,0x20,0x70,0x6f,0x73,0x2e,0x78,0x25,0x73,0x68,0x61,0x70,0x65,0x2e,0x7a,0x2c,0x20,0x70,0x6f,0x73,0x2e,0x78,0x2f,0x73,0x68,0x61,0x70,0x65,0x2e,0x7a,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x6e,0x68,0x77,0x63,0x2e,0x78,0x20,0x3e,0x3d,0x20,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x20,0x26,0x26,0x20,0x6e,0x68,0x77,0x63,0x2e,0x77,0x20,0x3e,0x3d,0x20,0x73,0x68,0x61,0x70,0x65,0x2e,0x77,0x29,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x74,0x75,0x72,0x6e,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x69,0x6e,0x30,0x2c,0x20,0x69,0x6e,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x70,0x6f,0x73,0x30,0x2c,0x20,0x70,0x6f,0x73,0x31,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x30,0x2e,0x78,0x20,0x3d,0x3d,0x20,0x31,0x29,0x20,0x7b,0x20,0x2f,0x2f,0x20,0x54,0x65,0x6e,0x73,0x6f,0x72,0x20,0x30,0x20,0x77,0x69,0x64,0x74,0x68,0x20,0x6c,0x65,0x6e,0x67,0x74,0x68,0x20,0x31,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x70,0x6f,0x73,0x30,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x30,0x2c,0x20,0x6e,0x68,0x77,0x63,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x76,0x61,0x6c,0x75,0x65,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x30,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x70,0x6f,0x73,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x30,0x20,0x3d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x76,0x61,0x6c,0x75,0x65,0x2e,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x70,0x6f,0x73,0x31,0x20,0x3d,0x20,0x28,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x31,0x2e,0x79,0x20,0x21,0x3d,0x20,0x31,0x29,0x20,0x3f,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6e,0x68,0x77,0x63,0x2e,0x77,0x2a,0x77,0x68,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x78,0x2b,0x6e,0x68,0x77,0x63,0x2e,0x7a,0x2c,0x20,0x6e,0x68,0x77,0x63,0x2e,0x78,0x2a,0x77,0x68,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x79,0x2b,0x6e,0x68,0x77,0x63,0x2e,0x79,0x29,0x20,0x3a,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6e,0x68,0x77,0x63,0x2e,0x77,0x2a,0x77,0x68,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x78,0x2b,0x6e,0x68,0x77,0x63,0x2e,0x7a,0x2c,0x20,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x30,0x2e,0x79,0x20,0x3d,0x3d,0x20,0x31,0x29,0x20,0x7b,0x20,0x2f,0x2f,0x20,0x54,0x65,0x6e,0x73,0x6f,0x72,0x20,0x30,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x20,0x6c,0x65,0x6e,0x67,0x74,0x68,0x20,0x31,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x70,0x6f,0x73,0x30,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6e,0x68,0x77,0x63,0x2e,0x7a,0x2c,0x20,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x76,0x61,0x6c,0x75,0x65,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x30,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x70,0x6f,0x73,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x30,0x20,0x3d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x76,0x61,0x6c,0x75,0x65,0x2e,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x70,0x6f,0x73,0x31,0x20,0x3d,0x20,0x28,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x31,0x2e,0x78,0x20,0x21,0x3d,0x20,0x31,0x29,0x20,0x3f,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6e,0x68,0x77,0x63,0x2e,0x77,0x2a,0x77,0x68,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x78,0x2b,0x6e,0x68,0x77,0x63,0x2e,0x7a,0x2c,0x20,0x6e,0x68,0x77,0x63,0x2e,0x78,0x2a,0x77,0x68,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x79,0x2b,0x6e,0x68,0x77,0x63,0x2e,0x79,0x29,0x20,0x3a,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6e,0x68,0x77,0x63,0x2e,0x77,0x2a,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x31,0x2e,0x78,0x2c,0x20,0x6e,0x68,0x77,0x63,0x2e,0x78,0x2a,0x77,0x68,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x79,0x2b,0x6e,0x68,0x77,0x63,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x30,0x2e,0x78,0x20,0x3d,0x3d,0x20,0x31,0x20,0x26,0x26,0x20,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x30,0x2e,0x79,0x20,0x3d,0x3d,0x20,0x31,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x70,0x6f,0x73,0x30,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x30,0x2c,0x20,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x76,0x61,0x6c,0x75,0x65,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x30,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x70,0x6f,0x73,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x30,0x20,0x3d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x76,0x61,0x6c,0x75,0x65,0x2e,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x70,0x6f,0x73,0x31,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6e,0x68,0x77,0x63,0x2e,0x77,0x2a,0x77,0x68,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x78,0x2b,0x6e,0x68,0x77,0x63,0x2e,0x7a,0x2c,0x20,0x6e,0x68,0x77,0x63,0x2e,0x78,0x2a,0x77,0x68,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x79,0x2b,0x6e,0x68,0x77,0x63,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x31,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x70,0x6f,0x73,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x70,0x6f,0x73,0x2c,0x20,0x4f,0x50,0x45,0x52,0x41,0x54,0x4f,0x52,0x29,0x3b,0xa,0x7d,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x62,0x69,0x6e,0x61,0x72,0x79,0x28,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x30,0x2c,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x31,0x2c,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x73,0x68,0x61,0x70,0x65,0x2c,0x20,0x69,0x6e,0x74,0x32,0x20,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x31,0x2c,0x20,0x69,0x6e,0x74,0x34,0x20,0x69,0x6e,0x70,0x75,0x74,0x31,0x4e,0x48,0x57,0x43,0x53,0x74,0x65,0x70,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x70,0x6f,0x73,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x2c,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x6e,0x68,0x77,0x63,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x34,0x29,0x28,0x70,0x6f,0x73,0x2e,0x79,0x2f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x2c,0x20,0x70,0x6f,0x73,0x2e,0x79,0x25,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x2c,0x20,0x70,0x6f,0x73,0x2e,0x78,0x25,0x73,0x68,0x61,0x70,0x65,0x2e,0x7a,0x2c,0x20,0x70,0x6f,0x73,0x2e,0x78,0x2f,0x73,0x68,0x61,0x70,0x65,0x2e,0x7a,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x6e,0x68,0x77,0x63,0x2e,0x78,0x20,0x3c,0x20,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x20,0x26,0x26,0x20,0x6e,0x68,0x77,0x63,0x2e,0x77,0x20,0x3c,0x20,0x73,0x68,0x61,0x70,0x65,0x2e,0x77,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x6e,0x68,0x77,0x63,0x31,0x20,0x3d,0x20,0x6e,0x68,0x77,0x63,0x20,0x2a,0x20,0x69,0x6e,0x70,0x75,0x74,0x31,0x4e,0x48,0x57,0x43,0x53,0x74,0x65,0x70,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x70,0x6f,0x73,0x31,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6e,0x68,0x77,0x63,0x31,0x2e,0x77,0x2a,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x31,0x2e,0x78,0x2b,0x6e,0x68,0x77,0x63,0x31,0x2e,0x7a,0x2c,0x20,0x6e,0x68,0x77,0x63,0x31,0x2e,0x78,0x2a,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x31,0x2e,0x79,0x2b,0x6e,0x68,0x77,0x63,0x31,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x69,0x6e,0x30,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x30,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x70,0x6f,0x73,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x69,0x6e,0x31,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x70,0x6f,0x73,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x70,0x6f,0x73,0x2c,0x20,0x4f,0x50,0x45,0x52,0x41,0x54,0x4f,0x52,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x7d,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x62,0x69,0x6e,0x61,0x72,0x79,0x5f,0x76,0x61,0x6c,0x75,0x65,0x28,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x30,0x2c,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x31,0x2c,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x73,0x68,0x61,0x70,0x65,0x2c,0x20,0x69,0x6e,0x74,0x32,0x20,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x31,0x2c,0x20,0x69,0x6e,0x74,0x34,0x20,0x69,0x6e,0x70,0x75,0x74,0x31,0x4e,0x48,0x57,0x43,0x53,0x74,0x65,0x70,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x70,0x6f,0x73,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x2c,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x6e,0x68,0x77,0x63,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x34,0x29,0x28,0x70,0x6f,0x73,0x2e,0x79,0x2f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x2c,0x20,0x70,0x6f,0x73,0x2e,0x79,0x25,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x2c,0x20,0x70,0x6f,0x73,0x2e,0x78,0x25,0x73,0x68,0x61,0x70,0x65,0x2e,0x7a,0x2c,0x20,0x70,0x6f,0x73,0x2e,0x78,0x2f,0x73,0x68,0x61,0x70,0x65,0x2e,0x7a,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x6e,0x68,0x77,0x63,0x2e,0x78,0x20,0x3c,0x20,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x20,0x26,0x26,0x20,0x6e,0x68,0x77,0x63,0x2e,0x77,0x20,0x3c,0x20,0x73,0x68,0x61,0x70,0x65,0x2e,0x77,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x6e,0x68,0x77,0x63,0x31,0x20,0x3d,0x20,0x6e,0x68,0x77,0x63,0x20,0x2a,0x20,0x69,0x6e,0x70,0x75,0x74,0x31,0x4e,0x48,0x57,0x43,0x53,0x74,0x65,0x70,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x70,0x6f,0x73,0x31,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6e,0x68,0x77,0x63,0x31,0x2e,0x77,0x2a,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x31,0x2e,0x78,0x2b,0x6e,0x68,0x77,0x63,0x31,0x2e,0x7a,0x2c,0x20,0x6e,0x68,0x77,0x63,0x31,0x2e,0x78,0x2a,0x77,0x68,0x49,0x6e,0x70,0x75,0x74,0x31,0x2e,0x79,0x2b,0x6e,0x68,0x77,0x63,0x31,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x69,0x6e,0x30,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x30,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x70,0x6f,0x73,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x69,0x6e,0x31,0x20,0x3d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x70,0x6f,0x73,0x2c,0x20,0x4f,0x50,0x45,0x52,0x41,0x54,0x4f,0x52,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x7d,0xa, } - }, -{ - "scale", - { 0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x33,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x2c,0xa,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x33,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x33,0x29,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x33,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x29,0x20,0x7b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x74,0x75,0x72,0x6e,0x3b,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x5f,0x5f,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x5f,0x74,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x20,0x3d,0x20,0x43,0x4c,0x4b,0x5f,0x4e,0x4f,0x52,0x4d,0x41,0x4c,0x49,0x5a,0x45,0x44,0x5f,0x43,0x4f,0x4f,0x52,0x44,0x53,0x5f,0x46,0x41,0x4c,0x53,0x45,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x41,0x44,0x44,0x52,0x45,0x53,0x53,0x5f,0x43,0x4c,0x41,0x4d,0x50,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x46,0x49,0x4c,0x54,0x45,0x52,0x5f,0x4e,0x45,0x41,0x52,0x45,0x53,0x54,0x3b,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x73,0x63,0x61,0x6c,0x65,0x28,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x33,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x73,0x63,0x61,0x6c,0x65,0x2c,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x48,0x41,0x53,0x5f,0x42,0x49,0x41,0x53,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x62,0x69,0x61,0x73,0x2c,0x20,0x2f,0x2a,0x20,0x63,0x6f,0x75,0x74,0x25,0x34,0x20,0x2a,0x20,0x63,0x6f,0x75,0x74,0x2f,0x34,0x20,0x2a,0x2f,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x29,0x20,0x7b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x62,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x32,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x33,0x28,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x77,0x2c,0x20,0x68,0x62,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x20,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x70,0x6f,0x73,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x77,0x69,0x64,0x74,0x68,0x2c,0x20,0x77,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x69,0x6e,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x72,0x65,0x61,0x64,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x70,0x6f,0x73,0x2c,0x20,0x68,0x62,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x73,0x63,0x61,0x6c,0x65,0x5f,0x76,0x61,0x6c,0x75,0x65,0x20,0x3d,0x20,0x72,0x65,0x61,0x64,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x73,0x63,0x61,0x6c,0x65,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x30,0x29,0x29,0x3b,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x48,0x41,0x53,0x5f,0x42,0x49,0x41,0x53,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x62,0x69,0x61,0x73,0x5f,0x76,0x61,0x6c,0x75,0x65,0x20,0x3d,0x20,0x72,0x65,0x61,0x64,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x62,0x69,0x61,0x73,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x30,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x6f,0x75,0x74,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x69,0x6e,0x20,0x2a,0x20,0x73,0x63,0x61,0x6c,0x65,0x5f,0x76,0x61,0x6c,0x75,0x65,0x20,0x2b,0x20,0x62,0x69,0x61,0x73,0x5f,0x76,0x61,0x6c,0x75,0x65,0x3b,0xa,0x23,0x65,0x6c,0x73,0x65,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x6f,0x75,0x74,0x20,0x3d,0x20,0x69,0x6e,0x20,0x2a,0x20,0x73,0x63,0x61,0x6c,0x65,0x5f,0x76,0x61,0x6c,0x75,0x65,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x20,0x20,0x20,0x20,0x77,0x72,0x69,0x74,0x65,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x70,0x6f,0x73,0x2c,0x20,0x68,0x62,0x29,0x2c,0x20,0x6f,0x75,0x74,0x29,0x3b,0xa,0x7d,0xa, } - }, -{ - "copy_buffer_to_image2d", - { 0x5f,0x5f,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x5f,0x74,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x20,0x3d,0x20,0x43,0x4c,0x4b,0x5f,0x4e,0x4f,0x52,0x4d,0x41,0x4c,0x49,0x5a,0x45,0x44,0x5f,0x43,0x4f,0x4f,0x52,0x44,0x53,0x5f,0x46,0x41,0x4c,0x53,0x45,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x41,0x44,0x44,0x52,0x45,0x53,0x53,0x5f,0x43,0x4c,0x41,0x4d,0x50,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x46,0x49,0x4c,0x54,0x45,0x52,0x5f,0x4e,0x45,0x41,0x52,0x45,0x53,0x54,0x3b,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x63,0x6f,0x70,0x79,0x5f,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x74,0x6f,0x5f,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x28,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x2a,0x20,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x79,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x78,0x20,0x3c,0x20,0x77,0x69,0x64,0x74,0x68,0x20,0x26,0x26,0x20,0x79,0x20,0x3c,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x77,0x72,0x69,0x74,0x65,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x78,0x2c,0x20,0x79,0x29,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5b,0x78,0x20,0x2b,0x20,0x79,0x20,0x2a,0x20,0x77,0x69,0x64,0x74,0x68,0x5d,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x7d,0xa, } - }, -{ - "space_to_batch", - { 0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x4d,0x4e,0x4e,0x5f,0x53,0x55,0x50,0x50,0x4f,0x52,0x54,0x5f,0x46,0x50,0x31,0x36,0xa,0x23,0x70,0x72,0x61,0x67,0x6d,0x61,0x20,0x4f,0x50,0x45,0x4e,0x43,0x4c,0x20,0x45,0x58,0x54,0x45,0x4e,0x53,0x49,0x4f,0x4e,0x20,0x63,0x6c,0x5f,0x6b,0x68,0x72,0x5f,0x66,0x70,0x31,0x36,0x20,0x3a,0x20,0x65,0x6e,0x61,0x62,0x6c,0x65,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x5f,0x5f,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x5f,0x74,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x20,0x3d,0x20,0x43,0x4c,0x4b,0x5f,0x4e,0x4f,0x52,0x4d,0x41,0x4c,0x49,0x5a,0x45,0x44,0x5f,0x43,0x4f,0x4f,0x52,0x44,0x53,0x5f,0x46,0x41,0x4c,0x53,0x45,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x41,0x44,0x44,0x52,0x45,0x53,0x53,0x5f,0x43,0x4c,0x41,0x4d,0x50,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x46,0x49,0x4c,0x54,0x45,0x52,0x5f,0x4e,0x45,0x41,0x52,0x45,0x53,0x54,0x3b,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x73,0x70,0x61,0x63,0x65,0x5f,0x74,0x6f,0x5f,0x62,0x61,0x74,0x63,0x68,0x28,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x34,0x20,0x69,0x6e,0x49,0x6d,0x61,0x67,0x65,0x53,0x69,0x7a,0x65,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x34,0x20,0x6f,0x75,0x74,0x49,0x6d,0x67,0x53,0x69,0x7a,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x62,0x6c,0x6f,0x63,0x6b,0x53,0x68,0x61,0x70,0x65,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x33,0x20,0x70,0x6f,0x73,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x33,0x29,0x28,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x2c,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x2c,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x32,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x70,0x6f,0x73,0x2e,0x78,0x20,0x3c,0x20,0x6f,0x75,0x74,0x49,0x6d,0x67,0x53,0x69,0x7a,0x65,0x2e,0x78,0x20,0x26,0x26,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x3c,0x20,0x6f,0x75,0x74,0x49,0x6d,0x67,0x53,0x69,0x7a,0x65,0x2e,0x79,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2f,0x2f,0x20,0x70,0x6f,0x73,0x2e,0x78,0x20,0x2d,0x3e,0x20,0x77,0x2c,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x2d,0x3e,0x20,0x68,0x2c,0x20,0x70,0x6f,0x73,0x2e,0x7a,0x20,0x2d,0x3e,0x20,0x63,0x34,0x20,0x2a,0x20,0x62,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x42,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x20,0x20,0x20,0x3d,0x20,0x70,0x6f,0x73,0x2e,0x7a,0x20,0x2f,0x20,0x6f,0x75,0x74,0x49,0x6d,0x67,0x53,0x69,0x7a,0x65,0x2e,0x7a,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x49,0x6e,0x64,0x65,0x78,0x20,0x20,0x3d,0x20,0x70,0x6f,0x73,0x2e,0x7a,0x20,0x25,0x20,0x6f,0x75,0x74,0x49,0x6d,0x67,0x53,0x69,0x7a,0x65,0x2e,0x7a,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x42,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x6f,0x75,0x74,0x42,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x25,0x20,0x69,0x6e,0x49,0x6d,0x61,0x67,0x65,0x53,0x69,0x7a,0x65,0x2e,0x77,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x77,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x28,0x6f,0x75,0x74,0x42,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2f,0x20,0x69,0x6e,0x49,0x6d,0x61,0x67,0x65,0x53,0x69,0x7a,0x65,0x2e,0x77,0x29,0x20,0x25,0x20,0x62,0x6c,0x6f,0x63,0x6b,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x68,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x28,0x6f,0x75,0x74,0x42,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2f,0x20,0x69,0x6e,0x49,0x6d,0x61,0x67,0x65,0x53,0x69,0x7a,0x65,0x2e,0x77,0x29,0x20,0x2f,0x20,0x62,0x6c,0x6f,0x63,0x6b,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x76,0x61,0x6c,0x69,0x64,0x48,0x65,0x69,0x67,0x68,0x74,0x53,0x74,0x61,0x72,0x74,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x30,0x2c,0x20,0x28,0x28,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x2e,0x78,0x20,0x2d,0x20,0x73,0x68,0x20,0x2b,0x20,0x62,0x6c,0x6f,0x63,0x6b,0x53,0x68,0x61,0x70,0x65,0x2e,0x78,0x20,0x2d,0x20,0x31,0x29,0x20,0x2f,0x20,0x62,0x6c,0x6f,0x63,0x6b,0x53,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x76,0x61,0x6c,0x69,0x64,0x48,0x65,0x69,0x67,0x68,0x74,0x45,0x6e,0x64,0x20,0x20,0x20,0x3d,0x20,0x6d,0x69,0x6e,0x28,0x6f,0x75,0x74,0x49,0x6d,0x67,0x53,0x69,0x7a,0x65,0x2e,0x79,0x2c,0x20,0x28,0x28,0x69,0x6e,0x49,0x6d,0x61,0x67,0x65,0x53,0x69,0x7a,0x65,0x2e,0x79,0x20,0x2b,0x20,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x2e,0x78,0x20,0x2d,0x20,0x73,0x68,0x20,0x2b,0x20,0x62,0x6c,0x6f,0x63,0x6b,0x53,0x68,0x61,0x70,0x65,0x2e,0x78,0x20,0x2d,0x20,0x31,0x29,0x20,0x2f,0x20,0x62,0x6c,0x6f,0x63,0x6b,0x53,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x76,0x61,0x6c,0x69,0x64,0x57,0x69,0x64,0x74,0x68,0x53,0x74,0x61,0x72,0x74,0x20,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x30,0x2c,0x20,0x28,0x28,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x2e,0x79,0x20,0x2d,0x20,0x73,0x77,0x20,0x2b,0x20,0x62,0x6c,0x6f,0x63,0x6b,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2d,0x20,0x31,0x29,0x20,0x2f,0x20,0x62,0x6c,0x6f,0x63,0x6b,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x76,0x61,0x6c,0x69,0x64,0x57,0x69,0x64,0x74,0x68,0x45,0x6e,0x64,0x20,0x20,0x20,0x20,0x3d,0x20,0x6d,0x69,0x6e,0x28,0x6f,0x75,0x74,0x49,0x6d,0x67,0x53,0x69,0x7a,0x65,0x2e,0x78,0x2c,0x20,0x28,0x28,0x69,0x6e,0x49,0x6d,0x61,0x67,0x65,0x53,0x69,0x7a,0x65,0x2e,0x78,0x20,0x2b,0x20,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x2e,0x79,0x20,0x2d,0x20,0x73,0x77,0x20,0x2b,0x20,0x62,0x6c,0x6f,0x63,0x6b,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2d,0x20,0x31,0x29,0x20,0x2f,0x20,0x62,0x6c,0x6f,0x63,0x6b,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x50,0x6f,0x73,0x58,0x20,0x3d,0x20,0x70,0x6f,0x73,0x2e,0x78,0x20,0x2a,0x20,0x62,0x6c,0x6f,0x63,0x6b,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2b,0x20,0x73,0x77,0x20,0x2d,0x20,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x50,0x6f,0x73,0x59,0x20,0x3d,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x2a,0x20,0x62,0x6c,0x6f,0x63,0x6b,0x53,0x68,0x61,0x70,0x65,0x2e,0x78,0x20,0x2b,0x20,0x73,0x68,0x20,0x2d,0x20,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x50,0x6f,0x73,0x5a,0x20,0x3d,0x20,0x69,0x6e,0x42,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x69,0x6e,0x49,0x6d,0x61,0x67,0x65,0x53,0x69,0x7a,0x65,0x2e,0x7a,0x20,0x2b,0x20,0x6f,0x75,0x74,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x49,0x6e,0x64,0x65,0x78,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x58,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x69,0x6e,0x50,0x6f,0x73,0x58,0x20,0x2b,0x20,0x69,0x6e,0x50,0x6f,0x73,0x5a,0x20,0x2a,0x20,0x69,0x6e,0x49,0x6d,0x61,0x67,0x65,0x53,0x69,0x7a,0x65,0x2e,0x78,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x70,0x6f,0x73,0x2e,0x78,0x20,0x3c,0x20,0x76,0x61,0x6c,0x69,0x64,0x57,0x69,0x64,0x74,0x68,0x53,0x74,0x61,0x72,0x74,0x20,0x7c,0x7c,0x20,0x70,0x6f,0x73,0x2e,0x78,0x20,0x3e,0x3d,0x20,0x76,0x61,0x6c,0x69,0x64,0x57,0x69,0x64,0x74,0x68,0x45,0x6e,0x64,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x59,0x20,0x3d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x69,0x6e,0x50,0x6f,0x73,0x59,0x20,0x2b,0x20,0x69,0x6e,0x42,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x69,0x6e,0x49,0x6d,0x61,0x67,0x65,0x53,0x69,0x7a,0x65,0x2e,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x3c,0x20,0x76,0x61,0x6c,0x69,0x64,0x48,0x65,0x69,0x67,0x68,0x74,0x53,0x74,0x61,0x72,0x74,0x20,0x7c,0x7c,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x3e,0x3d,0x20,0x76,0x61,0x6c,0x69,0x64,0x48,0x65,0x69,0x67,0x68,0x74,0x45,0x6e,0x64,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x72,0x65,0x73,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x70,0x75,0x74,0x58,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x70,0x6f,0x73,0x2e,0x78,0x20,0x2b,0x20,0x6f,0x75,0x74,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x6f,0x75,0x74,0x49,0x6d,0x67,0x53,0x69,0x7a,0x65,0x2e,0x78,0x2c,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x2b,0x20,0x6f,0x75,0x74,0x42,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x6f,0x75,0x74,0x49,0x6d,0x67,0x53,0x69,0x7a,0x65,0x2e,0x79,0x29,0x2c,0x20,0x72,0x65,0x73,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x7d,0xa, } - }, -{ - "depthwise_deconv2d", - { 0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x52,0x45,0x41,0x44,0x5f,0x49,0x4e,0x50,0x55,0x54,0x5f,0x49,0x4d,0x41,0x47,0x45,0x28,0x69,0x2c,0x20,0x62,0x61,0x73,0x65,0x29,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x76,0x61,0x6c,0x75,0x65,0x23,0x23,0x69,0x20,0x3d,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x23,0x23,0x69,0x20,0x2b,0x20,0x62,0x61,0x73,0x65,0x3b,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x76,0x61,0x6c,0x75,0x65,0x23,0x23,0x69,0x20,0x3d,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x69,0x6e,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x76,0x61,0x6c,0x75,0x65,0x23,0x23,0x69,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x28,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x76,0x61,0x6c,0x75,0x65,0x23,0x23,0x69,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x76,0x61,0x6c,0x75,0x65,0x23,0x23,0x69,0x20,0x3e,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x29,0x3b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x23,0x23,0x69,0x20,0x3d,0x20,0x72,0x65,0x61,0x64,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x76,0x61,0x6c,0x75,0x65,0x23,0x23,0x69,0x2c,0x20,0x69,0x6e,0x5f,0x68,0x62,0x5f,0x76,0x61,0x6c,0x75,0x65,0x29,0x29,0x3b,0xa,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x43,0x41,0x4c,0x43,0x55,0x4c,0x41,0x54,0x45,0x5f,0x4f,0x55,0x54,0x50,0x55,0x54,0x28,0x69,0x29,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x23,0x23,0x69,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x69,0x6e,0x23,0x23,0x69,0x2e,0x78,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x30,0x2c,0x20,0x6f,0x75,0x74,0x23,0x23,0x69,0x29,0x3b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x23,0x23,0x69,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x69,0x6e,0x23,0x23,0x69,0x2e,0x79,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x31,0x2c,0x20,0x6f,0x75,0x74,0x23,0x23,0x69,0x29,0x3b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x23,0x23,0x69,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x69,0x6e,0x23,0x23,0x69,0x2e,0x7a,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x32,0x2c,0x20,0x6f,0x75,0x74,0x23,0x23,0x69,0x29,0x3b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x23,0x23,0x69,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x69,0x6e,0x23,0x23,0x69,0x2e,0x77,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x33,0x2c,0x20,0x6f,0x75,0x74,0x23,0x23,0x69,0x29,0x3b,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x33,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x33,0x29,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x33,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x29,0x20,0x7b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x74,0x75,0x72,0x6e,0x3b,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x33,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x2c,0xa,0xa,0x5f,0x5f,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x5f,0x74,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x20,0x3d,0x20,0x43,0x4c,0x4b,0x5f,0x4e,0x4f,0x52,0x4d,0x41,0x4c,0x49,0x5a,0x45,0x44,0x5f,0x43,0x4f,0x4f,0x52,0x44,0x53,0x5f,0x46,0x41,0x4c,0x53,0x45,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x41,0x44,0x44,0x52,0x45,0x53,0x53,0x5f,0x43,0x4c,0x41,0x4d,0x50,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x46,0x49,0x4c,0x54,0x45,0x52,0x5f,0x4e,0x45,0x41,0x52,0x45,0x53,0x54,0x3b,0xa,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x64,0x65,0x70,0x74,0x68,0x77,0x69,0x73,0x65,0x5f,0x64,0x65,0x63,0x6f,0x6e,0x76,0x32,0x64,0x28,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x33,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x62,0x69,0x61,0x73,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x61,0x6c,0x69,0x67,0x6e,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0x20,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x32,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x33,0x28,0x6f,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x6f,0x75,0x74,0x30,0x20,0x3d,0x20,0x72,0x65,0x61,0x64,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x62,0x69,0x61,0x73,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x5f,0x69,0x64,0x78,0x2c,0x20,0x30,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x6f,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2f,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x6f,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x25,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x74,0x61,0x72,0x74,0x5f,0x78,0x20,0x3d,0x20,0x28,0x6f,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x61,0x6c,0x69,0x67,0x6e,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x20,0x2f,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x74,0x61,0x72,0x74,0x5f,0x79,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x30,0x2c,0x20,0x28,0x6f,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x61,0x6c,0x69,0x67,0x6e,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x20,0x2f,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x64,0x65,0x61,0x6c,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x77,0x69,0x64,0x74,0x68,0x20,0x20,0x3d,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x74,0x61,0x72,0x74,0x5f,0x78,0x2c,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x2c,0x20,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x20,0x2b,0x20,0x6f,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x2d,0x20,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x64,0x65,0x61,0x6c,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x20,0x3d,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x20,0x2d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x74,0x61,0x72,0x74,0x5f,0x79,0x2c,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x2c,0x20,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x20,0x2b,0x20,0x6f,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2d,0x20,0x31,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x69,0x6d,0x61,0x67,0x65,0x5f,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x69,0x6e,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6e,0x5f,0x69,0x64,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x69,0x6e,0x74,0x20,0x6b,0x5f,0x79,0x20,0x3d,0x20,0x64,0x65,0x61,0x6c,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x2c,0x20,0x69,0x64,0x78,0x5f,0x68,0x20,0x3d,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x74,0x61,0x72,0x74,0x5f,0x79,0x3b,0x20,0x6b,0x5f,0x79,0x20,0x3e,0x3d,0x20,0x30,0x3b,0x20,0x6b,0x5f,0x79,0x20,0x2d,0x3d,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x2c,0x20,0x69,0x64,0x78,0x5f,0x68,0x2b,0x2b,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x5f,0x69,0x64,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6f,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x2c,0x20,0x69,0x64,0x78,0x5f,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x68,0x62,0x5f,0x76,0x61,0x6c,0x75,0x65,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x69,0x6e,0x5f,0x69,0x64,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x69,0x64,0x78,0x5f,0x68,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x69,0x64,0x78,0x5f,0x68,0x20,0x3e,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x69,0x6e,0x74,0x20,0x6b,0x5f,0x78,0x20,0x3d,0x20,0x64,0x65,0x61,0x6c,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x77,0x69,0x64,0x74,0x68,0x2c,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x74,0x61,0x72,0x74,0x5f,0x78,0x3b,0x20,0x6b,0x5f,0x78,0x20,0x3e,0x3d,0x20,0x30,0x3b,0x20,0x6b,0x5f,0x78,0x20,0x2d,0x3d,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x2c,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2b,0x2b,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x30,0x20,0x3d,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x6f,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x52,0x45,0x41,0x44,0x5f,0x49,0x4e,0x50,0x55,0x54,0x5f,0x49,0x4d,0x41,0x47,0x45,0x28,0x30,0x2c,0x20,0x30,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x69,0x6d,0x61,0x67,0x65,0x5f,0x78,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6b,0x5f,0x79,0x2c,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x2c,0x20,0x6b,0x5f,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x72,0x65,0x61,0x64,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x69,0x6d,0x61,0x67,0x65,0x5f,0x78,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x30,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x69,0x6e,0x30,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x2c,0x20,0x6f,0x75,0x74,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x30,0x20,0x3d,0x20,0x66,0x6d,0x61,0x78,0x28,0x6f,0x75,0x74,0x30,0x2c,0x20,0x28,0x66,0x6c,0x6f,0x61,0x74,0x34,0x29,0x30,0x29,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0x36,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x30,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x6f,0x75,0x74,0x30,0x2c,0x20,0x28,0x66,0x6c,0x6f,0x61,0x74,0x34,0x29,0x30,0x2c,0x20,0x28,0x66,0x6c,0x6f,0x61,0x74,0x34,0x29,0x36,0x29,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x6d,0x61,0x67,0x65,0x5f,0x78,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6f,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x77,0x72,0x69,0x74,0x65,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x6d,0x61,0x67,0x65,0x5f,0x78,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x7d,0xa, } - }, -{ - "depthwise_conv_2d_int8", - { 0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x4d,0x4e,0x4e,0x5f,0x53,0x55,0x50,0x50,0x4f,0x52,0x54,0x5f,0x46,0x50,0x31,0x36,0xa,0x23,0x70,0x72,0x61,0x67,0x6d,0x61,0x20,0x4f,0x50,0x45,0x4e,0x43,0x4c,0x20,0x45,0x58,0x54,0x45,0x4e,0x53,0x49,0x4f,0x4e,0x20,0x63,0x6c,0x5f,0x6b,0x68,0x72,0x5f,0x66,0x70,0x31,0x36,0x20,0x3a,0x20,0x65,0x6e,0x61,0x62,0x6c,0x65,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0xa,0x5f,0x5f,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x5f,0x74,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x20,0x3d,0x20,0x43,0x4c,0x4b,0x5f,0x4e,0x4f,0x52,0x4d,0x41,0x4c,0x49,0x5a,0x45,0x44,0x5f,0x43,0x4f,0x4f,0x52,0x44,0x53,0x5f,0x46,0x41,0x4c,0x53,0x45,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x41,0x44,0x44,0x52,0x45,0x53,0x53,0x5f,0x43,0x4c,0x41,0x4d,0x50,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x46,0x49,0x4c,0x54,0x45,0x52,0x5f,0x4e,0x45,0x41,0x52,0x45,0x53,0x54,0x3b,0xa,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x33,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x2c,0xa,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x33,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x33,0x29,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x33,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x29,0x20,0x7b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x74,0x75,0x72,0x6e,0x3b,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x64,0x65,0x70,0x74,0x68,0x77,0x69,0x73,0x65,0x5f,0x63,0x6f,0x6e,0x76,0x5f,0x32,0x64,0x28,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x33,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x68,0x61,0x72,0x2a,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x2c,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x68,0x61,0x72,0x2a,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x70,0x74,0x72,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x69,0x6e,0x74,0x2a,0x20,0x62,0x69,0x61,0x73,0x5f,0x70,0x74,0x72,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x68,0x61,0x72,0x2a,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x66,0x6c,0x6f,0x61,0x74,0x2a,0x20,0x73,0x63,0x61,0x6c,0x65,0x5f,0x70,0x74,0x72,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x64,0x69,0x6c,0x61,0x74,0x69,0x6f,0x6e,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x29,0x20,0x7b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x62,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x77,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x32,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x68,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x20,0x25,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x33,0x28,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x62,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x77,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x6f,0x75,0x74,0x30,0x20,0x3d,0x20,0x76,0x6c,0x6f,0x61,0x64,0x34,0x28,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x62,0x5f,0x69,0x64,0x78,0x2c,0x20,0x62,0x69,0x61,0x73,0x5f,0x70,0x74,0x72,0x29,0x3b,0xa,0xa,0x2f,0x2f,0x64,0x65,0x61,0x6c,0x20,0x77,0x69,0x74,0x68,0x20,0x77,0x69,0x64,0x74,0x68,0x20,0x73,0x69,0x7a,0x65,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x74,0x61,0x72,0x74,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6f,0x75,0x74,0x5f,0x77,0x5f,0x69,0x64,0x78,0x2c,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x2c,0x20,0x2d,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x3b,0xa,0xa,0x2f,0x2f,0x64,0x65,0x61,0x6c,0x20,0x77,0x69,0x74,0x68,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x20,0x73,0x69,0x7a,0x65,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x73,0x74,0x61,0x72,0x74,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6f,0x75,0x74,0x5f,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x2c,0x20,0x2d,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x69,0x6e,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x30,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x31,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x32,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x33,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x69,0x6e,0x74,0x20,0x69,0x79,0x20,0x3d,0x20,0x30,0x3b,0x20,0x69,0x79,0x20,0x3c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x3b,0x20,0x69,0x79,0x2b,0x2b,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x69,0x6e,0x74,0x20,0x69,0x78,0x20,0x3d,0x20,0x30,0x3b,0x20,0x69,0x78,0x20,0x3c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x3b,0x20,0x69,0x78,0x2b,0x2b,0x29,0x20,0x7b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x68,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x73,0x74,0x61,0x72,0x74,0x20,0x2b,0x20,0x69,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x77,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x74,0x61,0x72,0x74,0x20,0x2b,0x20,0x69,0x78,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x66,0x28,0x69,0x6e,0x5f,0x68,0x5f,0x69,0x64,0x78,0x20,0x3e,0x3d,0x20,0x30,0x20,0x26,0x26,0x20,0x69,0x6e,0x5f,0x68,0x5f,0x69,0x64,0x78,0x20,0x3c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x20,0x26,0x26,0x20,0x69,0x6e,0x5f,0x77,0x5f,0x69,0x64,0x78,0x20,0x3e,0x3d,0x20,0x30,0x20,0x26,0x26,0x20,0x69,0x6e,0x5f,0x77,0x5f,0x69,0x64,0x78,0x20,0x3c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x62,0x5f,0x69,0x64,0x78,0x2a,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x2a,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2b,0x20,0x69,0x6e,0x5f,0x68,0x5f,0x69,0x64,0x78,0x20,0x2a,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2b,0x20,0x69,0x6e,0x5f,0x77,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x30,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x69,0x6e,0x74,0x34,0x5f,0x73,0x61,0x74,0x28,0x76,0x6c,0x6f,0x61,0x64,0x34,0x28,0x69,0x6e,0x5f,0x69,0x64,0x78,0x2c,0x20,0x28,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x68,0x61,0x72,0x20,0x2a,0x29,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x69,0x79,0x20,0x2a,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x2a,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x20,0x2b,0x20,0x69,0x78,0x2a,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x20,0x2b,0x20,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x62,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x30,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x69,0x6e,0x74,0x34,0x28,0x76,0x6c,0x6f,0x61,0x64,0x34,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x69,0x64,0x78,0x2c,0x20,0x28,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x68,0x61,0x72,0x20,0x2a,0x29,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x70,0x74,0x72,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x30,0x20,0x3d,0x20,0x69,0x6e,0x30,0x2a,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x30,0x20,0x2b,0x20,0x6f,0x75,0x74,0x30,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x30,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x6f,0x75,0x74,0x30,0x2c,0x20,0x28,0x69,0x6e,0x74,0x34,0x29,0x30,0x29,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x20,0x20,0x20,0x20,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x73,0x63,0x61,0x6c,0x65,0x20,0x3d,0x20,0x76,0x6c,0x6f,0x61,0x64,0x34,0x28,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x62,0x5f,0x69,0x64,0x78,0x2c,0x20,0x28,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x66,0x6c,0x6f,0x61,0x74,0x2a,0x29,0x73,0x63,0x61,0x6c,0x65,0x5f,0x70,0x74,0x72,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x6f,0x75,0x74,0x30,0x5f,0x66,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x66,0x6c,0x6f,0x61,0x74,0x34,0x5f,0x72,0x74,0x70,0x28,0x6f,0x75,0x74,0x30,0x29,0x20,0x2a,0x20,0x73,0x63,0x61,0x6c,0x65,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x68,0x61,0x72,0x34,0x20,0x6f,0x75,0x74,0x30,0x5f,0x63,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x63,0x68,0x61,0x72,0x34,0x5f,0x73,0x61,0x74,0x28,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x69,0x6e,0x74,0x34,0x5f,0x72,0x74,0x65,0x28,0x6f,0x75,0x74,0x30,0x5f,0x66,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x62,0x5f,0x69,0x64,0x78,0x20,0x2a,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x20,0x2a,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2b,0x20,0x6f,0x75,0x74,0x5f,0x68,0x5f,0x69,0x64,0x78,0x2a,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2b,0x20,0x6f,0x75,0x74,0x5f,0x77,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x76,0x73,0x74,0x6f,0x72,0x65,0x34,0x28,0x6f,0x75,0x74,0x30,0x5f,0x63,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x69,0x64,0x78,0x2c,0x20,0x28,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x68,0x61,0x72,0x2a,0x29,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x29,0x3b,0xa,0x7d,0xa, } - }, -{ - "performance", - { 0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x4d,0x41,0x44,0x5f,0x56,0x34,0x28,0x78,0x2c,0x20,0x79,0x29,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x78,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x79,0x2c,0x20,0x78,0x2c,0x20,0x79,0x29,0x3b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x79,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x78,0x2c,0x20,0x79,0x2c,0x20,0x78,0x29,0x3b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x78,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x79,0x2c,0x20,0x78,0x2c,0x20,0x79,0x29,0x3b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x79,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x78,0x2c,0x20,0x79,0x2c,0x20,0x78,0x29,0x3b,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x4d,0x41,0x44,0x5f,0x56,0x31,0x36,0x28,0x78,0x2c,0x20,0x79,0x29,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x34,0x28,0x78,0x2c,0x20,0x79,0x29,0x3b,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x34,0x28,0x78,0x2c,0x20,0x79,0x29,0x3b,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x34,0x28,0x78,0x2c,0x20,0x79,0x29,0x3b,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x34,0x28,0x78,0x2c,0x20,0x79,0x29,0x3b,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x4d,0x41,0x44,0x5f,0x56,0x36,0x34,0x28,0x78,0x2c,0x20,0x79,0x29,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x31,0x36,0x28,0x78,0x2c,0x20,0x79,0x29,0x3b,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x31,0x36,0x28,0x78,0x2c,0x20,0x79,0x29,0x3b,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x31,0x36,0x28,0x78,0x2c,0x20,0x79,0x29,0x3b,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x31,0x36,0x28,0x78,0x2c,0x20,0x79,0x29,0x3b,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x4d,0x41,0x44,0x5f,0x56,0x31,0x32,0x38,0x28,0x78,0x2c,0x20,0x79,0x29,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x36,0x34,0x28,0x78,0x2c,0x20,0x79,0x29,0x3b,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x36,0x34,0x28,0x78,0x2c,0x20,0x79,0x29,0x3b,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x36,0x34,0x28,0x78,0x2c,0x20,0x79,0x29,0x3b,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x36,0x34,0x28,0x78,0x2c,0x20,0x79,0x29,0x3b,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x78,0x2c,0x20,0x79,0x29,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x31,0x32,0x38,0x28,0x78,0x2c,0x20,0x79,0x29,0x3b,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x31,0x32,0x38,0x28,0x78,0x2c,0x20,0x79,0x29,0x3b,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x31,0x32,0x38,0x28,0x78,0x2c,0x20,0x79,0x29,0x3b,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x31,0x32,0x38,0x28,0x78,0x2c,0x20,0x79,0x29,0x3b,0xa,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x4d,0x4e,0x4e,0x5f,0x53,0x55,0x50,0x50,0x4f,0x52,0x54,0x5f,0x46,0x50,0x31,0x36,0xa,0x23,0x70,0x72,0x61,0x67,0x6d,0x61,0x20,0x4f,0x50,0x45,0x4e,0x43,0x4c,0x20,0x45,0x58,0x54,0x45,0x4e,0x53,0x49,0x4f,0x4e,0x20,0x63,0x6c,0x5f,0x6b,0x68,0x72,0x5f,0x66,0x70,0x31,0x36,0x20,0x3a,0x20,0x65,0x6e,0x61,0x62,0x6c,0x65,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x70,0x72,0x65,0x63,0x69,0x73,0x69,0x6f,0x6e,0x28,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x66,0x6c,0x6f,0x61,0x74,0x2a,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x2c,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x6d,0x75,0x6c,0x5f,0x76,0x61,0x6c,0x75,0x65,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x6d,0x75,0x6c,0x5f,0x78,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x5f,0x76,0x61,0x6c,0x75,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x20,0x3d,0x20,0x28,0x66,0x6c,0x6f,0x61,0x74,0x29,0x67,0x65,0x74,0x5f,0x6c,0x6f,0x63,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x5b,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x5d,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x3b,0xa,0x7d,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x68,0x61,0x6c,0x66,0x34,0x5f,0x70,0x72,0x65,0x63,0x69,0x73,0x69,0x6f,0x6e,0x28,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x68,0x61,0x6c,0x66,0x2a,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x2c,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x6d,0x75,0x6c,0x5f,0x76,0x61,0x6c,0x75,0x65,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x68,0x61,0x6c,0x66,0x20,0x6d,0x75,0x6c,0x20,0x20,0x20,0x20,0x3d,0x20,0x28,0x68,0x61,0x6c,0x66,0x29,0x6d,0x75,0x6c,0x5f,0x76,0x61,0x6c,0x75,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x68,0x61,0x6c,0x66,0x34,0x20,0x6d,0x75,0x6c,0x5f,0x78,0x20,0x3d,0x20,0x28,0x68,0x61,0x6c,0x66,0x34,0x29,0x28,0x6d,0x75,0x6c,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x68,0x61,0x6c,0x66,0x34,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x20,0x3d,0x20,0x28,0x68,0x61,0x6c,0x66,0x34,0x29,0x67,0x65,0x74,0x5f,0x6c,0x6f,0x63,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x4d,0x41,0x44,0x5f,0x56,0x32,0x35,0x36,0x28,0x6d,0x75,0x6c,0x5f,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x5f,0x79,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x5b,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x5d,0x20,0x3d,0x20,0x28,0x6d,0x75,0x6c,0x5f,0x79,0x2e,0x53,0x30,0x29,0x20,0x2b,0x20,0x28,0x6d,0x75,0x6c,0x5f,0x79,0x2e,0x53,0x31,0x29,0x20,0x2b,0x20,0x28,0x6d,0x75,0x6c,0x5f,0x79,0x2e,0x53,0x32,0x29,0x20,0x2b,0x20,0x28,0x6d,0x75,0x6c,0x5f,0x79,0x2e,0x53,0x33,0x29,0x3b,0xa,0x7d,0xa, } - }, -{ - "gemm", - { 0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x4d,0x4e,0x4e,0x5f,0x53,0x55,0x50,0x50,0x4f,0x52,0x54,0x5f,0x46,0x50,0x31,0x36,0xa,0x23,0x70,0x72,0x61,0x67,0x6d,0x61,0x20,0x4f,0x50,0x45,0x4e,0x43,0x4c,0x20,0x45,0x58,0x54,0x45,0x4e,0x53,0x49,0x4f,0x4e,0x20,0x63,0x6c,0x5f,0x6b,0x68,0x72,0x5f,0x66,0x70,0x31,0x36,0x20,0x3a,0x20,0x65,0x6e,0x61,0x62,0x6c,0x65,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x5f,0x5f,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x5f,0x74,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x20,0x3d,0x20,0x43,0x4c,0x4b,0x5f,0x4e,0x4f,0x52,0x4d,0x41,0x4c,0x49,0x5a,0x45,0x44,0x5f,0x43,0x4f,0x4f,0x52,0x44,0x53,0x5f,0x46,0x41,0x4c,0x53,0x45,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x41,0x44,0x44,0x52,0x45,0x53,0x53,0x5f,0x43,0x4c,0x41,0x4d,0x50,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x46,0x49,0x4c,0x54,0x45,0x52,0x5f,0x4e,0x45,0x41,0x52,0x45,0x53,0x54,0x3b,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x67,0x65,0x6d,0x6d,0x28,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x75,0x4b,0x65,0x72,0x6e,0x65,0x6c,0x2c,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6d,0x75,0x6c,0x74,0x69,0x4c,0x65,0x6e,0x67,0x74,0x68,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x61,0x6c,0x70,0x68,0x61,0x32,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x70,0x6f,0x73,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x2c,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x29,0x3b,0x20,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x70,0x6f,0x73,0x2e,0x78,0x20,0x3c,0x20,0x77,0x69,0x64,0x74,0x68,0x2a,0x68,0x65,0x69,0x67,0x68,0x74,0x20,0x26,0x26,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x3c,0x20,0x61,0x6c,0x70,0x68,0x61,0x32,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x70,0x6f,0x73,0x5f,0x78,0x20,0x3d,0x20,0x70,0x6f,0x73,0x2e,0x78,0x20,0x25,0x20,0x77,0x69,0x64,0x74,0x68,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x70,0x6f,0x73,0x5f,0x79,0x20,0x3d,0x20,0x70,0x6f,0x73,0x2e,0x78,0x20,0x2f,0x20,0x77,0x69,0x64,0x74,0x68,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x70,0x6f,0x73,0x5f,0x7a,0x20,0x3d,0x20,0x70,0x6f,0x73,0x2e,0x79,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6f,0x30,0x20,0x3d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6f,0x31,0x20,0x3d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6f,0x32,0x20,0x3d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6f,0x33,0x20,0x3d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6b,0x65,0x6e,0x65,0x72,0x6c,0x59,0x20,0x20,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x70,0x6f,0x73,0x5f,0x7a,0x2c,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x2c,0x20,0x70,0x6f,0x73,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x72,0x63,0x59,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x70,0x6f,0x73,0x5f,0x7a,0x2c,0x20,0x77,0x69,0x64,0x74,0x68,0x2c,0x20,0x70,0x6f,0x73,0x5f,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x69,0x6e,0x74,0x20,0x6b,0x20,0x3d,0x20,0x30,0x3b,0x20,0x6b,0x20,0x3c,0x20,0x6d,0x75,0x6c,0x74,0x69,0x4c,0x65,0x6e,0x67,0x74,0x68,0x3b,0x20,0x2b,0x2b,0x6b,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x64,0x65,0x78,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x6b,0x2c,0x20,0x34,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6b,0x30,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x4b,0x65,0x72,0x6e,0x65,0x6c,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x64,0x65,0x78,0x2c,0x20,0x6b,0x65,0x6e,0x65,0x72,0x6c,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6b,0x31,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x4b,0x65,0x72,0x6e,0x65,0x6c,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x64,0x65,0x78,0x2b,0x31,0x2c,0x20,0x6b,0x65,0x6e,0x65,0x72,0x6c,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6b,0x32,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x4b,0x65,0x72,0x6e,0x65,0x6c,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x64,0x65,0x78,0x2b,0x32,0x2c,0x20,0x6b,0x65,0x6e,0x65,0x72,0x6c,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6b,0x33,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x4b,0x65,0x72,0x6e,0x65,0x6c,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x64,0x65,0x78,0x2b,0x33,0x2c,0x20,0x6b,0x65,0x6e,0x65,0x72,0x6c,0x59,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x73,0x30,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x64,0x65,0x78,0x2c,0x20,0x73,0x72,0x63,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x73,0x31,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x64,0x65,0x78,0x2b,0x31,0x2c,0x20,0x73,0x72,0x63,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x73,0x32,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x64,0x65,0x78,0x2b,0x32,0x2c,0x20,0x73,0x72,0x63,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x73,0x33,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x64,0x65,0x78,0x2b,0x33,0x2c,0x20,0x73,0x72,0x63,0x59,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x30,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x73,0x30,0x2e,0x78,0x2c,0x20,0x6b,0x30,0x2c,0x20,0x6f,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x30,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x73,0x30,0x2e,0x79,0x2c,0x20,0x6b,0x31,0x2c,0x20,0x6f,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x30,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x73,0x30,0x2e,0x7a,0x2c,0x20,0x6b,0x32,0x2c,0x20,0x6f,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x30,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x73,0x30,0x2e,0x77,0x2c,0x20,0x6b,0x33,0x2c,0x20,0x6f,0x30,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x31,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x73,0x31,0x2e,0x78,0x2c,0x20,0x6b,0x30,0x2c,0x20,0x6f,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x31,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x73,0x31,0x2e,0x79,0x2c,0x20,0x6b,0x31,0x2c,0x20,0x6f,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x31,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x73,0x31,0x2e,0x7a,0x2c,0x20,0x6b,0x32,0x2c,0x20,0x6f,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x31,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x73,0x31,0x2e,0x77,0x2c,0x20,0x6b,0x33,0x2c,0x20,0x6f,0x31,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x32,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x73,0x32,0x2e,0x78,0x2c,0x20,0x6b,0x30,0x2c,0x20,0x6f,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x32,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x73,0x32,0x2e,0x79,0x2c,0x20,0x6b,0x31,0x2c,0x20,0x6f,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x32,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x73,0x32,0x2e,0x7a,0x2c,0x20,0x6b,0x32,0x2c,0x20,0x6f,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x32,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x73,0x32,0x2e,0x77,0x2c,0x20,0x6b,0x33,0x2c,0x20,0x6f,0x32,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x33,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x73,0x33,0x2e,0x78,0x2c,0x20,0x6b,0x30,0x2c,0x20,0x6f,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x33,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x73,0x33,0x2e,0x79,0x2c,0x20,0x6b,0x31,0x2c,0x20,0x6f,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x33,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x73,0x33,0x2e,0x7a,0x2c,0x20,0x6b,0x32,0x2c,0x20,0x6f,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x33,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x73,0x33,0x2e,0x77,0x2c,0x20,0x6b,0x33,0x2c,0x20,0x6f,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x79,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x70,0x6f,0x73,0x5f,0x79,0x2c,0x20,0x34,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x73,0x72,0x63,0x59,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x79,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x73,0x72,0x63,0x59,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x79,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x31,0x29,0x2c,0x20,0x6f,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x73,0x72,0x63,0x59,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x79,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x32,0x29,0x2c,0x20,0x6f,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x73,0x72,0x63,0x59,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x79,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x33,0x29,0x2c,0x20,0x6f,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x7d,0xa, } - }, -{ - "batch_to_space", - { 0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x4d,0x4e,0x4e,0x5f,0x53,0x55,0x50,0x50,0x4f,0x52,0x54,0x5f,0x46,0x50,0x31,0x36,0xa,0x23,0x70,0x72,0x61,0x67,0x6d,0x61,0x20,0x4f,0x50,0x45,0x4e,0x43,0x4c,0x20,0x45,0x58,0x54,0x45,0x4e,0x53,0x49,0x4f,0x4e,0x20,0x63,0x6c,0x5f,0x6b,0x68,0x72,0x5f,0x66,0x70,0x31,0x36,0x20,0x3a,0x20,0x65,0x6e,0x61,0x62,0x6c,0x65,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x5f,0x5f,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x5f,0x74,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x20,0x3d,0x20,0x43,0x4c,0x4b,0x5f,0x4e,0x4f,0x52,0x4d,0x41,0x4c,0x49,0x5a,0x45,0x44,0x5f,0x43,0x4f,0x4f,0x52,0x44,0x53,0x5f,0x46,0x41,0x4c,0x53,0x45,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x41,0x44,0x44,0x52,0x45,0x53,0x53,0x5f,0x43,0x4c,0x41,0x4d,0x50,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x46,0x49,0x4c,0x54,0x45,0x52,0x5f,0x4e,0x45,0x41,0x52,0x45,0x53,0x54,0x3b,0xa,0x2f,0x2f,0x20,0x52,0x65,0x76,0x65,0x72,0x74,0x20,0x73,0x70,0x61,0x63,0x65,0x5f,0x74,0x6f,0x5f,0x62,0x61,0x74,0x63,0x68,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x62,0x61,0x74,0x63,0x68,0x5f,0x74,0x6f,0x5f,0x73,0x70,0x61,0x63,0x65,0x28,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x34,0x20,0x69,0x6e,0x49,0x6d,0x61,0x67,0x65,0x53,0x69,0x7a,0x65,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x34,0x20,0x6f,0x75,0x74,0x49,0x6d,0x67,0x53,0x69,0x7a,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x62,0x6c,0x6f,0x63,0x6b,0x53,0x68,0x61,0x70,0x65,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x33,0x20,0x70,0x6f,0x73,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x33,0x29,0x28,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x2c,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x2c,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x32,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x70,0x6f,0x73,0x2e,0x78,0x20,0x3c,0x20,0x6f,0x75,0x74,0x49,0x6d,0x67,0x53,0x69,0x7a,0x65,0x2e,0x78,0x20,0x26,0x26,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x3c,0x20,0x6f,0x75,0x74,0x49,0x6d,0x67,0x53,0x69,0x7a,0x65,0x2e,0x79,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2f,0x2f,0x20,0x70,0x6f,0x73,0x2e,0x78,0x20,0x2d,0x3e,0x20,0x77,0x2c,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x2d,0x3e,0x20,0x68,0x2c,0x20,0x70,0x6f,0x73,0x2e,0x7a,0x20,0x2d,0x3e,0x20,0x63,0x34,0x20,0x2a,0x20,0x62,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x42,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x20,0x20,0x20,0x3d,0x20,0x70,0x6f,0x73,0x2e,0x7a,0x20,0x2f,0x20,0x6f,0x75,0x74,0x49,0x6d,0x67,0x53,0x69,0x7a,0x65,0x2e,0x7a,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x49,0x6e,0x64,0x65,0x78,0x20,0x20,0x3d,0x20,0x70,0x6f,0x73,0x2e,0x7a,0x20,0x25,0x20,0x6f,0x75,0x74,0x49,0x6d,0x67,0x53,0x69,0x7a,0x65,0x2e,0x7a,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x42,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x6f,0x75,0x74,0x42,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x25,0x20,0x69,0x6e,0x49,0x6d,0x61,0x67,0x65,0x53,0x69,0x7a,0x65,0x2e,0x77,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x77,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x28,0x6f,0x75,0x74,0x42,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2f,0x20,0x69,0x6e,0x49,0x6d,0x61,0x67,0x65,0x53,0x69,0x7a,0x65,0x2e,0x77,0x29,0x20,0x25,0x20,0x62,0x6c,0x6f,0x63,0x6b,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x68,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x28,0x6f,0x75,0x74,0x42,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2f,0x20,0x69,0x6e,0x49,0x6d,0x61,0x67,0x65,0x53,0x69,0x7a,0x65,0x2e,0x77,0x29,0x20,0x2f,0x20,0x62,0x6c,0x6f,0x63,0x6b,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x76,0x61,0x6c,0x69,0x64,0x48,0x65,0x69,0x67,0x68,0x74,0x53,0x74,0x61,0x72,0x74,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x30,0x2c,0x20,0x28,0x28,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x2e,0x78,0x20,0x2d,0x20,0x73,0x68,0x20,0x2b,0x20,0x62,0x6c,0x6f,0x63,0x6b,0x53,0x68,0x61,0x70,0x65,0x2e,0x78,0x20,0x2d,0x20,0x31,0x29,0x20,0x2f,0x20,0x62,0x6c,0x6f,0x63,0x6b,0x53,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x76,0x61,0x6c,0x69,0x64,0x48,0x65,0x69,0x67,0x68,0x74,0x45,0x6e,0x64,0x20,0x20,0x20,0x3d,0x20,0x6d,0x69,0x6e,0x28,0x6f,0x75,0x74,0x49,0x6d,0x67,0x53,0x69,0x7a,0x65,0x2e,0x79,0x2c,0x20,0x28,0x28,0x69,0x6e,0x49,0x6d,0x61,0x67,0x65,0x53,0x69,0x7a,0x65,0x2e,0x79,0x20,0x2b,0x20,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x2e,0x78,0x20,0x2d,0x20,0x73,0x68,0x20,0x2b,0x20,0x62,0x6c,0x6f,0x63,0x6b,0x53,0x68,0x61,0x70,0x65,0x2e,0x78,0x20,0x2d,0x20,0x31,0x29,0x20,0x2f,0x20,0x62,0x6c,0x6f,0x63,0x6b,0x53,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x76,0x61,0x6c,0x69,0x64,0x57,0x69,0x64,0x74,0x68,0x53,0x74,0x61,0x72,0x74,0x20,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x30,0x2c,0x20,0x28,0x28,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x2e,0x79,0x20,0x2d,0x20,0x73,0x77,0x20,0x2b,0x20,0x62,0x6c,0x6f,0x63,0x6b,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2d,0x20,0x31,0x29,0x20,0x2f,0x20,0x62,0x6c,0x6f,0x63,0x6b,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x76,0x61,0x6c,0x69,0x64,0x57,0x69,0x64,0x74,0x68,0x45,0x6e,0x64,0x20,0x20,0x20,0x20,0x3d,0x20,0x6d,0x69,0x6e,0x28,0x6f,0x75,0x74,0x49,0x6d,0x67,0x53,0x69,0x7a,0x65,0x2e,0x78,0x2c,0x20,0x28,0x28,0x69,0x6e,0x49,0x6d,0x61,0x67,0x65,0x53,0x69,0x7a,0x65,0x2e,0x78,0x20,0x2b,0x20,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x2e,0x79,0x20,0x2d,0x20,0x73,0x77,0x20,0x2b,0x20,0x62,0x6c,0x6f,0x63,0x6b,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2d,0x20,0x31,0x29,0x20,0x2f,0x20,0x62,0x6c,0x6f,0x63,0x6b,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x50,0x6f,0x73,0x58,0x20,0x3d,0x20,0x70,0x6f,0x73,0x2e,0x78,0x20,0x2a,0x20,0x62,0x6c,0x6f,0x63,0x6b,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2b,0x20,0x73,0x77,0x20,0x2d,0x20,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x50,0x6f,0x73,0x59,0x20,0x3d,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x2a,0x20,0x62,0x6c,0x6f,0x63,0x6b,0x53,0x68,0x61,0x70,0x65,0x2e,0x78,0x20,0x2b,0x20,0x73,0x68,0x20,0x2d,0x20,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x50,0x6f,0x73,0x5a,0x20,0x3d,0x20,0x69,0x6e,0x42,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x69,0x6e,0x49,0x6d,0x61,0x67,0x65,0x53,0x69,0x7a,0x65,0x2e,0x7a,0x20,0x2b,0x20,0x6f,0x75,0x74,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x49,0x6e,0x64,0x65,0x78,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x58,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x69,0x6e,0x50,0x6f,0x73,0x58,0x20,0x2b,0x20,0x69,0x6e,0x50,0x6f,0x73,0x5a,0x20,0x2a,0x20,0x69,0x6e,0x49,0x6d,0x61,0x67,0x65,0x53,0x69,0x7a,0x65,0x2e,0x78,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x70,0x6f,0x73,0x2e,0x78,0x20,0x3c,0x20,0x76,0x61,0x6c,0x69,0x64,0x57,0x69,0x64,0x74,0x68,0x53,0x74,0x61,0x72,0x74,0x20,0x7c,0x7c,0x20,0x70,0x6f,0x73,0x2e,0x78,0x20,0x3e,0x3d,0x20,0x76,0x61,0x6c,0x69,0x64,0x57,0x69,0x64,0x74,0x68,0x45,0x6e,0x64,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x59,0x20,0x3d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x69,0x6e,0x50,0x6f,0x73,0x59,0x20,0x2b,0x20,0x69,0x6e,0x42,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x69,0x6e,0x49,0x6d,0x61,0x67,0x65,0x53,0x69,0x7a,0x65,0x2e,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x3c,0x20,0x76,0x61,0x6c,0x69,0x64,0x48,0x65,0x69,0x67,0x68,0x74,0x53,0x74,0x61,0x72,0x74,0x20,0x7c,0x7c,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x3e,0x3d,0x20,0x76,0x61,0x6c,0x69,0x64,0x48,0x65,0x69,0x67,0x68,0x74,0x45,0x6e,0x64,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x72,0x65,0x73,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x70,0x6f,0x73,0x2e,0x78,0x20,0x2b,0x20,0x6f,0x75,0x74,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x6f,0x75,0x74,0x49,0x6d,0x67,0x53,0x69,0x7a,0x65,0x2e,0x78,0x2c,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x2b,0x20,0x6f,0x75,0x74,0x42,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x6f,0x75,0x74,0x49,0x6d,0x67,0x53,0x69,0x7a,0x65,0x2e,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x70,0x75,0x74,0x58,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x59,0x29,0x2c,0x20,0x72,0x65,0x73,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x7d,0xa, } - }, -{ - "buffer_to_image", - { 0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x32,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x2c,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x32,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x29,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x29,0x20,0x7b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x74,0x75,0x72,0x6e,0x3b,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x5f,0x5f,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x5f,0x74,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x20,0x3d,0x20,0x43,0x4c,0x4b,0x5f,0x4e,0x4f,0x52,0x4d,0x41,0x4c,0x49,0x5a,0x45,0x44,0x5f,0x43,0x4f,0x4f,0x52,0x44,0x53,0x5f,0x46,0x41,0x4c,0x53,0x45,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x41,0x44,0x44,0x52,0x45,0x53,0x53,0x5f,0x43,0x4c,0x41,0x4d,0x50,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x46,0x49,0x4c,0x54,0x45,0x52,0x5f,0x4e,0x45,0x41,0x52,0x45,0x53,0x54,0x3b,0xa,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x6e,0x63,0x34,0x68,0x77,0x34,0x5f,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x74,0x6f,0x5f,0x69,0x6d,0x61,0x67,0x65,0x28,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x32,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x2a,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x75,0x70,0x5f,0x34,0x2c,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x29,0x20,0x7b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x32,0x28,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2f,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x25,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x25,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x2f,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x3d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x28,0x28,0x28,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x20,0x2a,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x75,0x70,0x5f,0x34,0x20,0x2b,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x29,0x20,0x2a,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x20,0x2b,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x20,0x2a,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2b,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x29,0x20,0x2a,0x20,0x34,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x20,0x3d,0x20,0x76,0x6c,0x6f,0x61,0x64,0x34,0x28,0x30,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x63,0x6f,0x6f,0x72,0x64,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x77,0x72,0x69,0x74,0x65,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x63,0x6f,0x6f,0x72,0x64,0x2c,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x29,0x3b,0xa,0x7d,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x74,0x6f,0x5f,0x6e,0x63,0x34,0x68,0x77,0x34,0x5f,0x62,0x75,0x66,0x66,0x65,0x72,0x28,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x32,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x2a,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x2f,0x2a,0x20,0x6e,0x63,0x68,0x77,0x20,0x2a,0x2f,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x75,0x70,0x5f,0x34,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x32,0x28,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x3d,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2f,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x25,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x3d,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x25,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x2f,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x3d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x28,0x28,0x28,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x20,0x2a,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x75,0x70,0x5f,0x34,0x20,0x2b,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x29,0x20,0x2a,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x20,0x2b,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x20,0x2a,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2b,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x29,0x20,0x2a,0x20,0x34,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x63,0x6f,0x6f,0x72,0x64,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x20,0x3d,0x20,0x72,0x65,0x61,0x64,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x63,0x6f,0x6f,0x72,0x64,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x76,0x73,0x74,0x6f,0x72,0x65,0x34,0x28,0x76,0x61,0x6c,0x75,0x65,0x73,0x2c,0x20,0x30,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x20,0x2b,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x7d,0xa,0xa,0x2f,0x2f,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x3a,0x20,0x66,0x72,0x6f,0x6d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x28,0x6f,0x69,0x20,0x29,0x20,0x74,0x6f,0x20,0x69,0x6d,0x61,0x67,0x65,0x28,0x6f,0x63,0x2c,0x20,0x69,0x63,0x2f,0x34,0x29,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x63,0x6f,0x6e,0x76,0x32,0x64,0x31,0x78,0x31,0x5f,0x6f,0x70,0x74,0x5f,0x66,0x69,0x6c,0x74,0x65,0x72,0x5f,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x74,0x6f,0x5f,0x69,0x6d,0x61,0x67,0x65,0x28,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x32,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x2a,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x63,0x5f,0x68,0x5f,0x77,0x5f,0x73,0x69,0x7a,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x69,0x7a,0x65,0x2c,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x63,0x5f,0x34,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0x20,0x2f,0x2f,0x20,0x69,0x63,0x2f,0x34,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x63,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0x20,0x2f,0x2f,0x20,0x6f,0x63,0xa,0xa,0x20,0x20,0x20,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x32,0x28,0x69,0x63,0x5f,0x34,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x63,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x63,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x69,0x63,0x5f,0x34,0x5f,0x69,0x64,0x78,0x20,0x2a,0x20,0x34,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x3d,0x20,0x6f,0x63,0x5f,0x69,0x64,0x78,0x20,0x2a,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x2b,0x20,0x69,0x63,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x20,0x3d,0x20,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x69,0x63,0x5f,0x69,0x64,0x78,0x20,0x3c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x2d,0x20,0x69,0x63,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3e,0x3d,0x20,0x34,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x78,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x79,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x20,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x7a,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x20,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x77,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x20,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3d,0x3d,0x20,0x33,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x78,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x79,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x20,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x7a,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x20,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x77,0x20,0x3d,0x20,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3d,0x3d,0x20,0x32,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x78,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x79,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x20,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x7a,0x20,0x3d,0x20,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x77,0x20,0x3d,0x20,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3d,0x3d,0x20,0x31,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x78,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x79,0x20,0x3d,0x20,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x7a,0x20,0x3d,0x20,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x77,0x20,0x3d,0x20,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x20,0x20,0x20,0x20,0x77,0x72,0x69,0x74,0x65,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x63,0x5f,0x34,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x63,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x29,0x3b,0xa,0x7d,0xa,0xa,0x2f,0x2f,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x3a,0x20,0x66,0x72,0x6f,0x6d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x28,0x6f,0x69,0x68,0x77,0x29,0x20,0x74,0x6f,0x20,0x69,0x6d,0x61,0x67,0x65,0x28,0x6f,0x63,0x2f,0x34,0x20,0x68,0x20,0x77,0x20,0x2c,0x20,0x69,0x63,0x20,0x6f,0x63,0x34,0x29,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x63,0x6f,0x6e,0x76,0x32,0x64,0x5f,0x66,0x69,0x6c,0x74,0x65,0x72,0x5f,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x74,0x6f,0x5f,0x69,0x6d,0x61,0x67,0x65,0x28,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x32,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x2a,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x63,0x5f,0x68,0x5f,0x77,0x5f,0x73,0x69,0x7a,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x69,0x7a,0x65,0x2c,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0x20,0x2f,0x2f,0x20,0x69,0x63,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0x20,0x2f,0x2f,0x20,0x6f,0x63,0x2f,0x34,0x20,0x68,0x20,0x77,0xa,0xa,0x20,0x20,0x20,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x32,0x28,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x34,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x34,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x28,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2f,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x69,0x7a,0x65,0x29,0x20,0x2a,0x20,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x25,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x69,0x7a,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x3d,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x2f,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x25,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x34,0x5f,0x69,0x64,0x78,0x20,0x2a,0x20,0x69,0x63,0x5f,0x68,0x5f,0x77,0x5f,0x73,0x69,0x7a,0x65,0x20,0x2b,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x34,0x5f,0x69,0x64,0x78,0x20,0x2a,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x69,0x7a,0x65,0x20,0x2b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2a,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2b,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x20,0x3d,0x20,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x34,0x5f,0x69,0x64,0x78,0x20,0x3c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x2d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x34,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3e,0x3d,0x20,0x34,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x78,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x31,0x2c,0x20,0x69,0x63,0x5f,0x68,0x5f,0x77,0x5f,0x73,0x69,0x7a,0x65,0x2c,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x79,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x3d,0x20,0x69,0x63,0x5f,0x68,0x5f,0x77,0x5f,0x73,0x69,0x7a,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x7a,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x3d,0x20,0x69,0x63,0x5f,0x68,0x5f,0x77,0x5f,0x73,0x69,0x7a,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x77,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3d,0x3d,0x20,0x33,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x78,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x31,0x2c,0x20,0x69,0x63,0x5f,0x68,0x5f,0x77,0x5f,0x73,0x69,0x7a,0x65,0x2c,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x79,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x3d,0x20,0x69,0x63,0x5f,0x68,0x5f,0x77,0x5f,0x73,0x69,0x7a,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x7a,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3d,0x3d,0x20,0x32,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x78,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x31,0x2c,0x20,0x69,0x63,0x5f,0x68,0x5f,0x77,0x5f,0x73,0x69,0x7a,0x65,0x2c,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x79,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3d,0x3d,0x20,0x31,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x78,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x20,0x20,0x20,0x20,0x77,0x72,0x69,0x74,0x65,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x29,0x3b,0xa,0x7d,0xa,0xa,0x2f,0x2f,0x20,0x6f,0x6e,0x6c,0x79,0x20,0x66,0x6f,0x72,0x20,0x64,0x65,0x62,0x75,0x67,0xa,0x2f,0x2f,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x3a,0x20,0x66,0x72,0x6f,0x6d,0x20,0x69,0x6d,0x61,0x67,0x65,0x28,0x6f,0x63,0x2f,0x34,0x20,0x68,0x20,0x77,0x20,0x2c,0x20,0x69,0x63,0x20,0x6f,0x63,0x34,0x29,0x20,0x74,0x6f,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x28,0x6f,0x69,0x68,0x77,0x29,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x63,0x6f,0x6e,0x76,0x32,0x64,0x5f,0x66,0x69,0x6c,0x74,0x65,0x72,0x5f,0x69,0x6d,0x61,0x67,0x65,0x5f,0x74,0x6f,0x5f,0x62,0x75,0x66,0x66,0x65,0x72,0x28,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x32,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x2a,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x63,0x5f,0x68,0x5f,0x77,0x5f,0x73,0x69,0x7a,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x69,0x7a,0x65,0x2c,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x32,0x28,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x34,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x34,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2f,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x69,0x7a,0x65,0x20,0x2a,0x20,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x25,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x69,0x7a,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x3d,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x2f,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x25,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x34,0x5f,0x69,0x64,0x78,0x20,0x2a,0x20,0x69,0x63,0x5f,0x68,0x5f,0x77,0x5f,0x73,0x69,0x7a,0x65,0x20,0x2b,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x34,0x5f,0x69,0x64,0x78,0x20,0x2a,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x69,0x7a,0x65,0x20,0x2b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2a,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2b,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x34,0x5f,0x69,0x64,0x78,0x20,0x3c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x63,0x6f,0x6f,0x72,0x64,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x72,0x65,0x61,0x64,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x63,0x6f,0x6f,0x72,0x64,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3d,0x20,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x2d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x34,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3e,0x3d,0x20,0x34,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x5b,0x6f,0x66,0x66,0x73,0x65,0x74,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x31,0x2c,0x20,0x69,0x63,0x5f,0x68,0x5f,0x77,0x5f,0x73,0x69,0x7a,0x65,0x2c,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x5b,0x6f,0x66,0x66,0x73,0x65,0x74,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x3d,0x20,0x69,0x63,0x5f,0x68,0x5f,0x77,0x5f,0x73,0x69,0x7a,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x5b,0x6f,0x66,0x66,0x73,0x65,0x74,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x7a,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x3d,0x20,0x69,0x63,0x5f,0x68,0x5f,0x77,0x5f,0x73,0x69,0x7a,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x5b,0x6f,0x66,0x66,0x73,0x65,0x74,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x77,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3d,0x3d,0x20,0x33,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x5b,0x6f,0x66,0x66,0x73,0x65,0x74,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x31,0x2c,0x20,0x69,0x63,0x5f,0x68,0x5f,0x77,0x5f,0x73,0x69,0x7a,0x65,0x2c,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x5b,0x6f,0x66,0x66,0x73,0x65,0x74,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x3d,0x20,0x69,0x63,0x5f,0x68,0x5f,0x77,0x5f,0x73,0x69,0x7a,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x5b,0x6f,0x66,0x66,0x73,0x65,0x74,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x7a,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3d,0x3d,0x20,0x32,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x5b,0x6f,0x66,0x66,0x73,0x65,0x74,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x31,0x2c,0x20,0x69,0x63,0x5f,0x68,0x5f,0x77,0x5f,0x73,0x69,0x7a,0x65,0x2c,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x5b,0x6f,0x66,0x66,0x73,0x65,0x74,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3d,0x3d,0x20,0x31,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x5b,0x6f,0x66,0x66,0x73,0x65,0x74,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x7d,0xa,0xa,0x2f,0x2f,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x66,0x72,0x6f,0x6d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x28,0x6d,0x69,0x68,0x77,0x29,0x20,0x74,0x6f,0x20,0x69,0x6d,0x61,0x67,0x65,0x28,0x69,0x63,0x2f,0x34,0x2c,0x20,0x69,0x63,0x34,0x20,0x68,0x20,0x77,0x20,0x6d,0x29,0xa,0x2f,0x2f,0x20,0x62,0x75,0x74,0x20,0x6e,0x6f,0x77,0x20,0x64,0x77,0x20,0x6f,0x6e,0x6c,0x79,0x20,0x73,0x75,0x70,0x70,0x6f,0x72,0x74,0x20,0x6d,0x20,0x3d,0x3d,0x20,0x31,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x64,0x77,0x5f,0x66,0x69,0x6c,0x74,0x65,0x72,0x5f,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x74,0x6f,0x5f,0x69,0x6d,0x61,0x67,0x65,0x28,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x32,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x2a,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x34,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x69,0x7a,0x65,0x2c,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x32,0x28,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x20,0x3d,0x20,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x20,0x3d,0x3d,0x20,0x31,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x34,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2a,0x20,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x3d,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x2f,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x77,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x3d,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x25,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x77,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x3d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x34,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x7a,0x2c,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x77,0x2c,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3d,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2d,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x34,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x34,0x5f,0x69,0x64,0x78,0x20,0x3c,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3e,0x3d,0x20,0x34,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x78,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x3d,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x69,0x7a,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x79,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x3d,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x69,0x7a,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x7a,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x3d,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x69,0x7a,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x77,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3d,0x3d,0x20,0x33,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x78,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x3d,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x69,0x7a,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x79,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x3d,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x69,0x7a,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x7a,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3d,0x3d,0x20,0x32,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x78,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x3d,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x69,0x7a,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x79,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3d,0x3d,0x20,0x31,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x78,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x20,0x20,0x20,0x20,0x77,0x72,0x69,0x74,0x65,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x29,0x3b,0xa,0x7d,0xa,0xa,0x2f,0x2f,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x20,0x64,0x61,0x74,0x61,0x20,0x66,0x72,0x6f,0x6d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x28,0x6e,0x68,0x77,0x63,0x29,0x20,0x74,0x6f,0x20,0x69,0x6d,0x61,0x67,0x65,0x28,0x62,0x20,0x68,0x2c,0x20,0x69,0x63,0x2f,0x34,0x20,0x77,0x20,0x69,0x63,0x34,0x29,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x6e,0x68,0x77,0x63,0x5f,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x74,0x6f,0x5f,0x69,0x6d,0x61,0x67,0x65,0x28,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x32,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x2a,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x73,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x32,0x28,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2f,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x3d,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x25,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x25,0x20,0x77,0x69,0x64,0x74,0x68,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x34,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x28,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x2f,0x20,0x77,0x69,0x64,0x74,0x68,0x29,0x20,0x3c,0x3c,0x20,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x3d,0x20,0x28,0x28,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x20,0x2a,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x20,0x2a,0x20,0x77,0x69,0x64,0x74,0x68,0x20,0x2b,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x29,0x20,0x2a,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x73,0x20,0x2b,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x34,0x5f,0x69,0x64,0x78,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x73,0x20,0x2d,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x34,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x2a,0x69,0x6e,0x70,0x75,0x74,0x5f,0x63,0x75,0x72,0x72,0x65,0x6e,0x74,0x5f,0x70,0x74,0x72,0x20,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x76,0x6c,0x6f,0x61,0x64,0x34,0x28,0x30,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x63,0x75,0x72,0x72,0x65,0x6e,0x74,0x5f,0x70,0x74,0x72,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3d,0x3d,0x20,0x33,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x77,0x20,0x3d,0x20,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3d,0x3d,0x20,0x32,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x7a,0x20,0x3d,0x20,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x77,0x20,0x3d,0x20,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3d,0x3d,0x20,0x31,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x79,0x20,0x3d,0x20,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x7a,0x20,0x3d,0x20,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x77,0x20,0x3d,0x20,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x77,0x72,0x69,0x74,0x65,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x29,0x3b,0xa,0x7d,0xa,0xa,0x2f,0x2f,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x20,0x64,0x61,0x74,0x61,0x20,0x66,0x72,0x6f,0x6d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x28,0x6e,0x63,0x68,0x77,0x29,0x20,0x74,0x6f,0x20,0x69,0x6d,0x61,0x67,0x65,0x28,0x62,0x20,0x68,0x2c,0x20,0x69,0x63,0x2f,0x34,0x20,0x77,0x20,0x69,0x63,0x34,0x29,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x6e,0x63,0x68,0x77,0x5f,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x74,0x6f,0x5f,0x69,0x6d,0x61,0x67,0x65,0x28,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x32,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x2a,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x2c,0x20,0x2f,0x2a,0x20,0x6e,0x63,0x68,0x77,0x20,0x2a,0x2f,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x73,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x32,0x28,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2f,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x3d,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x25,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x25,0x20,0x77,0x69,0x64,0x74,0x68,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x34,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x2f,0x20,0x77,0x69,0x64,0x74,0x68,0x20,0x3c,0x3c,0x20,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x3d,0x20,0x28,0x28,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x20,0x2a,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x73,0x20,0x2b,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x34,0x5f,0x69,0x64,0x78,0x29,0x20,0x2a,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x20,0x2a,0x20,0x77,0x69,0x64,0x74,0x68,0x20,0x2b,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x20,0x20,0x20,0x3d,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x73,0x20,0x2d,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x34,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x69,0x7a,0x65,0x20,0x3d,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x77,0x69,0x64,0x74,0x68,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x20,0x20,0x20,0x20,0x3d,0x20,0x30,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3e,0x3d,0x20,0x34,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x78,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x3d,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x69,0x7a,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x79,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x3d,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x69,0x7a,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x7a,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x3d,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x69,0x7a,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x77,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3d,0x3d,0x20,0x33,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x78,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x3d,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x69,0x7a,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x79,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x3d,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x69,0x7a,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x7a,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3d,0x3d,0x20,0x32,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x78,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x3d,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x69,0x7a,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x79,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3d,0x3d,0x20,0x31,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x78,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x20,0x20,0x20,0x20,0x77,0x72,0x69,0x74,0x65,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x76,0x61,0x6c,0x75,0x65,0x73,0x29,0x3b,0xa,0x7d,0xa,0xa,0x2f,0x2f,0x20,0x6f,0x6e,0x6c,0x79,0x20,0x66,0x6f,0x72,0x20,0x64,0x65,0x62,0x75,0x67,0xa,0x2f,0x2f,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x20,0x64,0x61,0x74,0x61,0x20,0x66,0x72,0x6f,0x6d,0x20,0x69,0x6d,0x61,0x67,0x65,0x28,0x62,0x20,0x68,0x2c,0x20,0x69,0x63,0x2f,0x34,0x20,0x77,0x20,0x69,0x63,0x34,0x29,0x20,0x74,0x6f,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x28,0x6e,0x68,0x77,0x63,0x29,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x74,0x6f,0x5f,0x6e,0x68,0x77,0x63,0x5f,0x62,0x75,0x66,0x66,0x65,0x72,0x28,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x32,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x2a,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x2f,0x2a,0x20,0x6e,0x68,0x77,0x63,0x20,0x2a,0x2f,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x73,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x32,0x28,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2f,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x3d,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x25,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x25,0x20,0x77,0x69,0x64,0x74,0x68,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x34,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x28,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x2f,0x20,0x77,0x69,0x64,0x74,0x68,0x29,0x20,0x3c,0x3c,0x20,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x3d,0x20,0x28,0x28,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x20,0x2a,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x20,0x2a,0x20,0x77,0x69,0x64,0x74,0x68,0x20,0x2b,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x29,0x20,0x2a,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x73,0x20,0x2b,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x34,0x5f,0x69,0x64,0x78,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x63,0x6f,0x6f,0x72,0x64,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x72,0x65,0x61,0x64,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x63,0x6f,0x6f,0x72,0x64,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3d,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x73,0x20,0x2d,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x34,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3e,0x3d,0x20,0x34,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x76,0x73,0x74,0x6f,0x72,0x65,0x34,0x28,0x76,0x61,0x6c,0x75,0x65,0x73,0x2c,0x20,0x30,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x20,0x2b,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3d,0x3d,0x20,0x33,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5b,0x6f,0x66,0x66,0x73,0x65,0x74,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x2b,0x2b,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5b,0x6f,0x66,0x66,0x73,0x65,0x74,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x2b,0x2b,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5b,0x6f,0x66,0x66,0x73,0x65,0x74,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x7a,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3d,0x3d,0x20,0x32,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5b,0x6f,0x66,0x66,0x73,0x65,0x74,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x2b,0x2b,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5b,0x6f,0x66,0x66,0x73,0x65,0x74,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3d,0x3d,0x20,0x31,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5b,0x6f,0x66,0x66,0x73,0x65,0x74,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x7d,0xa,0xa,0x2f,0x2f,0x20,0x6f,0x6e,0x6c,0x79,0x20,0x66,0x6f,0x72,0x20,0x64,0x65,0x62,0x75,0x67,0xa,0x2f,0x2f,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x20,0x64,0x61,0x74,0x61,0x20,0x66,0x72,0x6f,0x6d,0x20,0x69,0x6d,0x61,0x67,0x65,0x28,0x62,0x20,0x68,0x2c,0x20,0x69,0x63,0x2f,0x34,0x20,0x77,0x20,0x69,0x63,0x34,0x29,0x20,0x74,0x6f,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x28,0x6e,0x68,0x77,0x63,0x29,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x74,0x6f,0x5f,0x6e,0x63,0x68,0x77,0x5f,0x62,0x75,0x66,0x66,0x65,0x72,0x28,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x32,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x2a,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x2f,0x2a,0x20,0x6e,0x63,0x68,0x77,0x20,0x2a,0x2f,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x73,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x32,0x28,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2f,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x25,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x25,0x20,0x77,0x69,0x64,0x74,0x68,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x34,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x3d,0x20,0x28,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x2f,0x20,0x77,0x69,0x64,0x74,0x68,0x29,0x20,0x2a,0x20,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x3d,0x20,0x28,0x28,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x20,0x2a,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x73,0x20,0x2b,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x34,0x5f,0x69,0x64,0x78,0x29,0x20,0x2a,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x20,0x2a,0x20,0x77,0x69,0x64,0x74,0x68,0x20,0x2b,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x20,0x20,0x20,0x20,0x3d,0x20,0x72,0x65,0x61,0x64,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x69,0x7a,0x65,0x20,0x3d,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x77,0x69,0x64,0x74,0x68,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3d,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x73,0x20,0x2d,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x34,0x5f,0x69,0x64,0x78,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3e,0x3d,0x20,0x34,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5b,0x6f,0x66,0x66,0x73,0x65,0x74,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x3d,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x69,0x7a,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5b,0x6f,0x66,0x66,0x73,0x65,0x74,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x3d,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x69,0x7a,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5b,0x6f,0x66,0x66,0x73,0x65,0x74,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x7a,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x3d,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x69,0x7a,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5b,0x6f,0x66,0x66,0x73,0x65,0x74,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x77,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3d,0x3d,0x20,0x33,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5b,0x6f,0x66,0x66,0x73,0x65,0x74,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x3d,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x69,0x7a,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5b,0x6f,0x66,0x66,0x73,0x65,0x74,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x3d,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x69,0x7a,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5b,0x6f,0x66,0x66,0x73,0x65,0x74,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x7a,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3d,0x3d,0x20,0x32,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5b,0x6f,0x66,0x66,0x73,0x65,0x74,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x3d,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x69,0x7a,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5b,0x6f,0x66,0x66,0x73,0x65,0x74,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x20,0x3d,0x3d,0x20,0x31,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5b,0x6f,0x66,0x66,0x73,0x65,0x74,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x7d,0xa,0xa,0x2f,0x2f,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x20,0x61,0x72,0x67,0x20,0x61,0x73,0x20,0x34,0x20,0x61,0x6c,0x69,0x67,0x6e,0x6d,0x65,0x6e,0x74,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x61,0x72,0x67,0x5f,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x74,0x6f,0x5f,0x69,0x6d,0x61,0x67,0x65,0x28,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x32,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x2a,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x63,0x6f,0x75,0x6e,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x32,0x28,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x34,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x3d,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x3c,0x3c,0x20,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x63,0x6f,0x75,0x6e,0x74,0x20,0x2d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x34,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x34,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x20,0x3d,0x20,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3e,0x3d,0x20,0x34,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x20,0x3d,0x20,0x76,0x6c,0x6f,0x61,0x64,0x34,0x28,0x30,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x3d,0x20,0x33,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x78,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x2b,0x2b,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x79,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x2b,0x2b,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x7a,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x3d,0x20,0x32,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x78,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x2b,0x2b,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x79,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x3d,0x20,0x31,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x78,0x20,0x3d,0x20,0x2a,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x77,0x72,0x69,0x74,0x65,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x29,0x3b,0xa,0x7d,0xa,0xa,0x2f,0x2f,0x20,0x6f,0x6e,0x6c,0x79,0x20,0x66,0x6f,0x72,0x20,0x64,0x65,0x62,0x75,0x67,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x61,0x72,0x67,0x5f,0x69,0x6d,0x61,0x67,0x65,0x5f,0x74,0x6f,0x5f,0x62,0x75,0x66,0x66,0x65,0x72,0x28,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x32,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x2a,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x63,0x6f,0x75,0x6e,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x32,0x28,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x34,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x3d,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x3c,0x3c,0x20,0x32,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x63,0x6f,0x6f,0x72,0x64,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x20,0x3d,0x20,0x72,0x65,0x61,0x64,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x63,0x6f,0x6f,0x72,0x64,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x20,0x3d,0x20,0x63,0x6f,0x75,0x6e,0x74,0x20,0x2d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x34,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3c,0x20,0x34,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x73,0x77,0x69,0x74,0x63,0x68,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x61,0x73,0x65,0x20,0x33,0x3a,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5b,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x34,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x20,0x32,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x73,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x61,0x73,0x65,0x20,0x32,0x3a,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5b,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x34,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x20,0x31,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x73,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x61,0x73,0x65,0x20,0x31,0x3a,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5b,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x34,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x73,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x76,0x73,0x74,0x6f,0x72,0x65,0x34,0x28,0x76,0x61,0x6c,0x75,0x65,0x73,0x2c,0x20,0x30,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x20,0x2b,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x34,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3e,0x3d,0x20,0x34,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x76,0x73,0x74,0x6f,0x72,0x65,0x34,0x28,0x76,0x61,0x6c,0x75,0x65,0x73,0x2c,0x20,0x30,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x20,0x2b,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x34,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x3d,0x20,0x33,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x34,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5b,0x6f,0x66,0x66,0x73,0x65,0x74,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x2b,0x2b,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5b,0x6f,0x66,0x66,0x73,0x65,0x74,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x2b,0x2b,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5b,0x6f,0x66,0x66,0x73,0x65,0x74,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x7a,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x3d,0x20,0x32,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x34,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5b,0x6f,0x66,0x66,0x73,0x65,0x74,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x2b,0x2b,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5b,0x6f,0x66,0x66,0x73,0x65,0x74,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x3d,0x20,0x31,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x5f,0x34,0x5f,0x6f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5b,0x6f,0x66,0x66,0x73,0x65,0x74,0x5d,0x20,0x3d,0x20,0x76,0x61,0x6c,0x75,0x65,0x73,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x7d,0xa, } - }, -{ - "winogradTransformDest2_3_1", - { 0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x4d,0x4e,0x4e,0x5f,0x53,0x55,0x50,0x50,0x4f,0x52,0x54,0x5f,0x46,0x50,0x31,0x36,0xa,0x23,0x70,0x72,0x61,0x67,0x6d,0x61,0x20,0x4f,0x50,0x45,0x4e,0x43,0x4c,0x20,0x45,0x58,0x54,0x45,0x4e,0x53,0x49,0x4f,0x4e,0x20,0x63,0x6c,0x5f,0x6b,0x68,0x72,0x5f,0x66,0x70,0x31,0x36,0x20,0x3a,0x20,0x65,0x6e,0x61,0x62,0x6c,0x65,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x5f,0x5f,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x5f,0x74,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x20,0x3d,0x20,0x43,0x4c,0x4b,0x5f,0x4e,0x4f,0x52,0x4d,0x41,0x4c,0x49,0x5a,0x45,0x44,0x5f,0x43,0x4f,0x4f,0x52,0x44,0x53,0x5f,0x46,0x41,0x4c,0x53,0x45,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x41,0x44,0x44,0x52,0x45,0x53,0x53,0x5f,0x43,0x4c,0x41,0x4d,0x50,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x46,0x49,0x4c,0x54,0x45,0x52,0x5f,0x4e,0x45,0x41,0x52,0x45,0x53,0x54,0x3b,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x77,0x69,0x6e,0x6f,0x67,0x72,0x61,0x64,0x54,0x72,0x61,0x6e,0x73,0x66,0x6f,0x72,0x6d,0x44,0x65,0x73,0x74,0x28,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x2f,0x2f,0x20,0x30,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x75,0x42,0x69,0x61,0x73,0x2c,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2f,0x2f,0x20,0x33,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x75,0x6e,0x69,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x64,0x73,0x74,0x57,0x69,0x64,0x74,0x68,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x2c,0x20,0x2f,0x2f,0x20,0x36,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x64,0x73,0x74,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x43,0x34,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x58,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x59,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x62,0x61,0x74,0x63,0x68,0x4f,0x66,0x66,0x73,0x65,0x74,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x70,0x6f,0x73,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x2c,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x29,0x3b,0x20,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x70,0x6f,0x73,0x2e,0x78,0x20,0x3c,0x20,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x2a,0x75,0x6e,0x69,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x26,0x26,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x3c,0x20,0x64,0x73,0x74,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x43,0x34,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x70,0x6f,0x73,0x2e,0x78,0x20,0x25,0x20,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x75,0x6e,0x69,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x70,0x6f,0x73,0x2e,0x78,0x20,0x2f,0x20,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x72,0x65,0x61,0x6c,0x50,0x6f,0x73,0x20,0x20,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x58,0x2c,0x20,0x75,0x6e,0x69,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x59,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x20,0x20,0x3d,0x20,0x28,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x75,0x6e,0x69,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x33,0x29,0x20,0x2f,0x20,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x64,0x73,0x74,0x58,0x4f,0x72,0x69,0x67,0x69,0x6e,0x20,0x3d,0x20,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x75,0x6e,0x69,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x64,0x73,0x74,0x58,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x64,0x73,0x74,0x58,0x4f,0x72,0x69,0x67,0x69,0x6e,0x20,0x2f,0x20,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x64,0x73,0x74,0x59,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x34,0x20,0x2a,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x2b,0x20,0x64,0x73,0x74,0x58,0x4f,0x72,0x69,0x67,0x69,0x6e,0x20,0x25,0x20,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x7a,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x25,0x20,0x64,0x73,0x74,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x43,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x62,0x69,0x61,0x73,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x42,0x69,0x61,0x73,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x7a,0x2c,0x20,0x30,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x3d,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x2f,0x20,0x64,0x73,0x74,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x43,0x34,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x3d,0x20,0x62,0x61,0x74,0x63,0x68,0x4f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x79,0x53,0x74,0x61,0x72,0x74,0x20,0x3d,0x20,0x72,0x65,0x61,0x6c,0x50,0x6f,0x73,0x2e,0x79,0x20,0x2a,0x20,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x78,0x53,0x74,0x61,0x72,0x74,0x20,0x3d,0x20,0x72,0x65,0x61,0x6c,0x50,0x6f,0x73,0x2e,0x78,0x20,0x2a,0x20,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x30,0x30,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x30,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x31,0x30,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x31,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x32,0x30,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x32,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x33,0x30,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x33,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x30,0x31,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x34,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x31,0x31,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x35,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x32,0x31,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x36,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x33,0x31,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x37,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x30,0x32,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x38,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x31,0x32,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x39,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x32,0x32,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x31,0x30,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x33,0x32,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x31,0x31,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x30,0x33,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x31,0x32,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x31,0x33,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x31,0x33,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x32,0x33,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x31,0x34,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x33,0x33,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x31,0x35,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x30,0x30,0x20,0x20,0x3d,0x20,0x2b,0x53,0x30,0x30,0x20,0x2b,0x20,0x53,0x30,0x31,0x20,0x2b,0x20,0x53,0x30,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x31,0x30,0x20,0x20,0x3d,0x20,0x2b,0x53,0x31,0x30,0x20,0x2b,0x20,0x53,0x31,0x31,0x20,0x2b,0x20,0x53,0x31,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x32,0x30,0x20,0x20,0x3d,0x20,0x2b,0x53,0x32,0x30,0x20,0x2b,0x20,0x53,0x32,0x31,0x20,0x2b,0x20,0x53,0x32,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x33,0x30,0x20,0x20,0x3d,0x20,0x2b,0x53,0x33,0x30,0x20,0x2b,0x20,0x53,0x33,0x31,0x20,0x2b,0x20,0x53,0x33,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x30,0x31,0x20,0x20,0x3d,0x20,0x2b,0x53,0x30,0x31,0x20,0x2d,0x20,0x53,0x30,0x32,0x20,0x2b,0x20,0x53,0x30,0x33,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x31,0x31,0x20,0x20,0x3d,0x20,0x2b,0x53,0x31,0x31,0x20,0x2d,0x20,0x53,0x31,0x32,0x20,0x2b,0x20,0x53,0x31,0x33,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x32,0x31,0x20,0x20,0x3d,0x20,0x2b,0x53,0x32,0x31,0x20,0x2d,0x20,0x53,0x32,0x32,0x20,0x2b,0x20,0x53,0x32,0x33,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x33,0x31,0x20,0x20,0x3d,0x20,0x2b,0x53,0x33,0x31,0x20,0x2d,0x20,0x53,0x33,0x32,0x20,0x2b,0x20,0x53,0x33,0x33,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x78,0x20,0x3d,0x20,0x6f,0x78,0x53,0x74,0x61,0x72,0x74,0x20,0x2b,0x20,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x79,0x20,0x3d,0x20,0x6f,0x79,0x53,0x74,0x61,0x72,0x74,0x20,0x2b,0x20,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x6f,0x78,0x20,0x3c,0x20,0x64,0x73,0x74,0x57,0x69,0x64,0x74,0x68,0x20,0x26,0x26,0x20,0x6f,0x79,0x20,0x3c,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x4f,0x78,0x20,0x3d,0x20,0x6f,0x78,0x20,0x2b,0x20,0x6f,0x7a,0x20,0x2a,0x20,0x64,0x73,0x74,0x57,0x69,0x64,0x74,0x68,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x4f,0x79,0x20,0x3d,0x20,0x6f,0x79,0x20,0x2b,0x20,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x72,0x65,0x73,0x20,0x20,0x3d,0x20,0x62,0x69,0x61,0x73,0x20,0x2b,0x20,0x6d,0x30,0x30,0x20,0x2b,0x20,0x6d,0x31,0x30,0x20,0x2b,0x20,0x6d,0x32,0x30,0x3b,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x73,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x72,0x65,0x73,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x30,0x29,0x29,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0x36,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x73,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x72,0x65,0x73,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x30,0x29,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x36,0x29,0x29,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x4f,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x4f,0x79,0x29,0x2c,0x20,0x72,0x65,0x73,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x78,0x20,0x3d,0x20,0x6f,0x78,0x53,0x74,0x61,0x72,0x74,0x20,0x2b,0x20,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x79,0x20,0x3d,0x20,0x6f,0x79,0x53,0x74,0x61,0x72,0x74,0x20,0x2b,0x20,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x6f,0x78,0x20,0x3c,0x20,0x64,0x73,0x74,0x57,0x69,0x64,0x74,0x68,0x20,0x26,0x26,0x20,0x6f,0x79,0x20,0x3c,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x4f,0x78,0x20,0x3d,0x20,0x6f,0x78,0x20,0x2b,0x20,0x6f,0x7a,0x20,0x2a,0x20,0x64,0x73,0x74,0x57,0x69,0x64,0x74,0x68,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x4f,0x79,0x20,0x3d,0x20,0x6f,0x79,0x20,0x2b,0x20,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x72,0x65,0x73,0x20,0x20,0x3d,0x20,0x62,0x69,0x61,0x73,0x20,0x2b,0x20,0x6d,0x31,0x30,0x20,0x2d,0x20,0x6d,0x32,0x30,0x20,0x2b,0x20,0x6d,0x33,0x30,0x3b,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x73,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x72,0x65,0x73,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x30,0x29,0x29,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0x36,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x73,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x72,0x65,0x73,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x30,0x29,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x36,0x29,0x29,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x4f,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x4f,0x79,0x29,0x2c,0x20,0x72,0x65,0x73,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x78,0x20,0x3d,0x20,0x6f,0x78,0x53,0x74,0x61,0x72,0x74,0x20,0x2b,0x20,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x79,0x20,0x3d,0x20,0x6f,0x79,0x53,0x74,0x61,0x72,0x74,0x20,0x2b,0x20,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x6f,0x78,0x20,0x3c,0x20,0x64,0x73,0x74,0x57,0x69,0x64,0x74,0x68,0x20,0x26,0x26,0x20,0x6f,0x79,0x20,0x3c,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x4f,0x78,0x20,0x3d,0x20,0x6f,0x78,0x20,0x2b,0x20,0x6f,0x7a,0x20,0x2a,0x20,0x64,0x73,0x74,0x57,0x69,0x64,0x74,0x68,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x4f,0x79,0x20,0x3d,0x20,0x6f,0x79,0x20,0x2b,0x20,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x72,0x65,0x73,0x20,0x20,0x3d,0x20,0x62,0x69,0x61,0x73,0x20,0x2b,0x20,0x6d,0x30,0x31,0x20,0x2b,0x20,0x6d,0x31,0x31,0x20,0x2b,0x20,0x6d,0x32,0x31,0x3b,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x73,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x72,0x65,0x73,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x30,0x29,0x29,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0x36,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x73,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x72,0x65,0x73,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x30,0x29,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x36,0x29,0x29,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x4f,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x4f,0x79,0x29,0x2c,0x20,0x72,0x65,0x73,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x78,0x20,0x3d,0x20,0x6f,0x78,0x53,0x74,0x61,0x72,0x74,0x20,0x2b,0x20,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x79,0x20,0x3d,0x20,0x6f,0x79,0x53,0x74,0x61,0x72,0x74,0x20,0x2b,0x20,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x6f,0x78,0x20,0x3c,0x20,0x64,0x73,0x74,0x57,0x69,0x64,0x74,0x68,0x20,0x26,0x26,0x20,0x6f,0x79,0x20,0x3c,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x4f,0x78,0x20,0x3d,0x20,0x6f,0x78,0x20,0x2b,0x20,0x6f,0x7a,0x20,0x2a,0x20,0x64,0x73,0x74,0x57,0x69,0x64,0x74,0x68,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x4f,0x79,0x20,0x3d,0x20,0x6f,0x79,0x20,0x2b,0x20,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x72,0x65,0x73,0x20,0x20,0x3d,0x20,0x62,0x69,0x61,0x73,0x20,0x2b,0x20,0x6d,0x31,0x31,0x20,0x2d,0x20,0x6d,0x32,0x31,0x20,0x2b,0x20,0x6d,0x33,0x31,0x3b,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x73,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x72,0x65,0x73,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x30,0x29,0x29,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0x36,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x73,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x72,0x65,0x73,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x30,0x29,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x36,0x29,0x29,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x4f,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x4f,0x79,0x29,0x2c,0x20,0x72,0x65,0x73,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x7d,0xa, } - }, -{ - "conv_2d", - { 0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x4d,0x4e,0x4e,0x5f,0x53,0x55,0x50,0x50,0x4f,0x52,0x54,0x5f,0x46,0x50,0x31,0x36,0xa,0x23,0x70,0x72,0x61,0x67,0x6d,0x61,0x20,0x4f,0x50,0x45,0x4e,0x43,0x4c,0x20,0x45,0x58,0x54,0x45,0x4e,0x53,0x49,0x4f,0x4e,0x20,0x63,0x6c,0x5f,0x6b,0x68,0x72,0x5f,0x66,0x70,0x31,0x36,0x20,0x3a,0x20,0x65,0x6e,0x61,0x62,0x6c,0x65,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x52,0x45,0x41,0x44,0x5f,0x49,0x4e,0x50,0x55,0x54,0x5f,0x49,0x4d,0x41,0x47,0x45,0x28,0x69,0x2c,0x20,0x62,0x61,0x73,0x65,0x29,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x76,0x61,0x6c,0x75,0x65,0x23,0x23,0x69,0x20,0x3d,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x23,0x23,0x69,0x20,0x2b,0x20,0x62,0x61,0x73,0x65,0x3b,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x76,0x61,0x6c,0x75,0x65,0x23,0x23,0x69,0x20,0x3d,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x69,0x6e,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x76,0x61,0x6c,0x75,0x65,0x23,0x23,0x69,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x28,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x76,0x61,0x6c,0x75,0x65,0x23,0x23,0x69,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x76,0x61,0x6c,0x75,0x65,0x23,0x23,0x69,0x20,0x3e,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x29,0x3b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x23,0x23,0x69,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x76,0x61,0x6c,0x75,0x65,0x23,0x23,0x69,0x2c,0x20,0x69,0x6e,0x5f,0x68,0x62,0x5f,0x76,0x61,0x6c,0x75,0x65,0x29,0x29,0x3b,0xa,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x43,0x41,0x4c,0x43,0x55,0x4c,0x41,0x54,0x45,0x5f,0x4f,0x55,0x54,0x50,0x55,0x54,0x28,0x69,0x29,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x23,0x23,0x69,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x69,0x6e,0x23,0x23,0x69,0x2e,0x78,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x30,0x2c,0x20,0x6f,0x75,0x74,0x23,0x23,0x69,0x29,0x3b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x23,0x23,0x69,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x69,0x6e,0x23,0x23,0x69,0x2e,0x79,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x31,0x2c,0x20,0x6f,0x75,0x74,0x23,0x23,0x69,0x29,0x3b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x23,0x23,0x69,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x69,0x6e,0x23,0x23,0x69,0x2e,0x7a,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x32,0x2c,0x20,0x6f,0x75,0x74,0x23,0x23,0x69,0x29,0x3b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x23,0x23,0x69,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x69,0x6e,0x23,0x23,0x69,0x2e,0x77,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x33,0x2c,0x20,0x6f,0x75,0x74,0x23,0x23,0x69,0x29,0x3b,0x20,0x20,0x20,0x20,0xa,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x43,0x41,0x4c,0x43,0x55,0x4c,0x41,0x54,0x45,0x5f,0x4f,0x55,0x54,0x50,0x55,0x54,0x5f,0x4f,0x50,0x54,0x28,0x69,0x29,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x23,0x23,0x69,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x69,0x6e,0x5f,0x73,0x6d,0x23,0x23,0x69,0x5b,0x6c,0x6f,0x63,0x61,0x6c,0x5f,0x69,0x64,0x78,0x5d,0x2e,0x78,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x30,0x2c,0x20,0x6f,0x75,0x74,0x23,0x23,0x69,0x29,0x3b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x23,0x23,0x69,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x69,0x6e,0x5f,0x73,0x6d,0x23,0x23,0x69,0x5b,0x6c,0x6f,0x63,0x61,0x6c,0x5f,0x69,0x64,0x78,0x5d,0x2e,0x79,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x31,0x2c,0x20,0x6f,0x75,0x74,0x23,0x23,0x69,0x29,0x3b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x23,0x23,0x69,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x69,0x6e,0x5f,0x73,0x6d,0x23,0x23,0x69,0x5b,0x6c,0x6f,0x63,0x61,0x6c,0x5f,0x69,0x64,0x78,0x5d,0x2e,0x7a,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x32,0x2c,0x20,0x6f,0x75,0x74,0x23,0x23,0x69,0x29,0x3b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x23,0x23,0x69,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x69,0x6e,0x5f,0x73,0x6d,0x23,0x23,0x69,0x5b,0x6c,0x6f,0x63,0x61,0x6c,0x5f,0x69,0x64,0x78,0x5d,0x2e,0x77,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x33,0x2c,0x20,0x6f,0x75,0x74,0x23,0x23,0x69,0x29,0x3b,0x20,0x20,0x20,0xa,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x32,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x2c,0xa,0xa,0x5f,0x5f,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x5f,0x74,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x20,0x3d,0x20,0x43,0x4c,0x4b,0x5f,0x4e,0x4f,0x52,0x4d,0x41,0x4c,0x49,0x5a,0x45,0x44,0x5f,0x43,0x4f,0x4f,0x52,0x44,0x53,0x5f,0x46,0x41,0x4c,0x53,0x45,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x41,0x44,0x44,0x52,0x45,0x53,0x53,0x5f,0x43,0x4c,0x41,0x4d,0x50,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x46,0x49,0x4c,0x54,0x45,0x52,0x5f,0x4e,0x45,0x41,0x52,0x45,0x53,0x54,0x3b,0xa,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x32,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x29,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x29,0x20,0x7b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x74,0x75,0x72,0x6e,0x3b,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x33,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x2c,0xa,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x33,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x33,0x29,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x33,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x29,0x20,0x7b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x74,0x75,0x72,0x6e,0x3b,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x55,0x4e,0x49,0x54,0x20,0x34,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x63,0x6f,0x6e,0x76,0x5f,0x32,0x64,0x5f,0x31,0x78,0x31,0x5f,0x6d,0x61,0x6c,0x69,0x28,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x32,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x77,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x2c,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x46,0x4c,0x4f,0x41,0x54,0x20,0x2a,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x70,0x74,0x72,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x46,0x4c,0x4f,0x41,0x54,0x20,0x2a,0x62,0x69,0x61,0x73,0x5f,0x70,0x74,0x72,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x63,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x68,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x77,0x29,0x20,0x7b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x77,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0x20,0x2f,0x2f,0x63,0x2f,0x34,0x20,0x77,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0x20,0x2f,0x2f,0x62,0x20,0x68,0xa,0xa,0x20,0x20,0x20,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x32,0x28,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x77,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x77,0x5f,0x69,0x64,0x78,0x20,0x2f,0x20,0x6f,0x75,0x74,0x5f,0x77,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x77,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x77,0x5f,0x69,0x64,0x78,0x20,0x25,0x20,0x6f,0x75,0x74,0x5f,0x77,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x77,0x34,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x6f,0x75,0x74,0x5f,0x77,0x5f,0x69,0x64,0x78,0x2c,0x20,0x34,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6f,0x75,0x74,0x30,0x20,0x3d,0x20,0x76,0x6c,0x6f,0x61,0x64,0x34,0x28,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x69,0x64,0x78,0x2c,0x20,0x28,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x46,0x4c,0x4f,0x41,0x54,0x20,0x2a,0x29,0x62,0x69,0x61,0x73,0x5f,0x70,0x74,0x72,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6f,0x75,0x74,0x31,0x20,0x3d,0x20,0x6f,0x75,0x74,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6f,0x75,0x74,0x32,0x20,0x3d,0x20,0x6f,0x75,0x74,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6f,0x75,0x74,0x33,0x20,0x3d,0x20,0x6f,0x75,0x74,0x30,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x33,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x69,0x6e,0x30,0x3b,0x20,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x69,0x6e,0x31,0x3b,0x20,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x69,0x6e,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x69,0x6e,0x33,0x3b,0x20,0xa,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x31,0x36,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x31,0x36,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x30,0x20,0x3d,0x20,0x6f,0x75,0x74,0x5f,0x77,0x34,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x31,0x20,0x3d,0x20,0x6f,0x75,0x74,0x5f,0x77,0x34,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x32,0x20,0x3d,0x20,0x6f,0x75,0x74,0x5f,0x77,0x34,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x33,0x20,0x3d,0x20,0x6f,0x75,0x74,0x5f,0x77,0x34,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x33,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x30,0x3b,0x20,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x3c,0x20,0x69,0x6e,0x5f,0x63,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x3b,0x20,0x2b,0x2b,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x61,0x73,0x65,0x20,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x77,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6e,0x5f,0x63,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x2c,0x20,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x29,0x2a,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x30,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x61,0x73,0x65,0x20,0x2b,0x20,0x69,0x6e,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x30,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x31,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x61,0x73,0x65,0x20,0x2b,0x20,0x69,0x6e,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x31,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x32,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x61,0x73,0x65,0x20,0x2b,0x20,0x69,0x6e,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x32,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x33,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x61,0x73,0x65,0x20,0x2b,0x20,0x69,0x6e,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x33,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x30,0x20,0x3d,0x20,0x76,0x6c,0x6f,0x61,0x64,0x34,0x28,0x6f,0x66,0x66,0x73,0x65,0x74,0x2c,0x20,0x28,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x46,0x4c,0x4f,0x41,0x54,0x20,0x2a,0x29,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x70,0x74,0x72,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x31,0x20,0x3d,0x20,0x76,0x6c,0x6f,0x61,0x64,0x34,0x28,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x20,0x31,0x2c,0x20,0x28,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x46,0x4c,0x4f,0x41,0x54,0x20,0x2a,0x29,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x70,0x74,0x72,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x32,0x20,0x3d,0x20,0x76,0x6c,0x6f,0x61,0x64,0x34,0x28,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x20,0x32,0x2c,0x20,0x28,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x46,0x4c,0x4f,0x41,0x54,0x20,0x2a,0x29,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x70,0x74,0x72,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x33,0x20,0x3d,0x20,0x76,0x6c,0x6f,0x61,0x64,0x34,0x28,0x6f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x20,0x33,0x2c,0x20,0x28,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x46,0x4c,0x4f,0x41,0x54,0x20,0x2a,0x29,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x70,0x74,0x72,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x30,0x2e,0x78,0x20,0x2b,0x3d,0x20,0x64,0x6f,0x74,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x30,0x2c,0x20,0x69,0x6e,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x30,0x2e,0x79,0x20,0x2b,0x3d,0x20,0x64,0x6f,0x74,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x31,0x2c,0x20,0x69,0x6e,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x30,0x2e,0x7a,0x20,0x2b,0x3d,0x20,0x64,0x6f,0x74,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x32,0x2c,0x20,0x69,0x6e,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x30,0x2e,0x77,0x20,0x2b,0x3d,0x20,0x64,0x6f,0x74,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x33,0x2c,0x20,0x69,0x6e,0x30,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x31,0x2e,0x78,0x20,0x2b,0x3d,0x20,0x64,0x6f,0x74,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x30,0x2c,0x20,0x69,0x6e,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x31,0x2e,0x79,0x20,0x2b,0x3d,0x20,0x64,0x6f,0x74,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x31,0x2c,0x20,0x69,0x6e,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x31,0x2e,0x7a,0x20,0x2b,0x3d,0x20,0x64,0x6f,0x74,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x32,0x2c,0x20,0x69,0x6e,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x31,0x2e,0x77,0x20,0x2b,0x3d,0x20,0x64,0x6f,0x74,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x33,0x2c,0x20,0x69,0x6e,0x31,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x32,0x2e,0x78,0x20,0x2b,0x3d,0x20,0x64,0x6f,0x74,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x30,0x2c,0x20,0x69,0x6e,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x32,0x2e,0x79,0x20,0x2b,0x3d,0x20,0x64,0x6f,0x74,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x31,0x2c,0x20,0x69,0x6e,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x32,0x2e,0x7a,0x20,0x2b,0x3d,0x20,0x64,0x6f,0x74,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x32,0x2c,0x20,0x69,0x6e,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x32,0x2e,0x77,0x20,0x2b,0x3d,0x20,0x64,0x6f,0x74,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x33,0x2c,0x20,0x69,0x6e,0x32,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x33,0x2e,0x78,0x20,0x2b,0x3d,0x20,0x64,0x6f,0x74,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x30,0x2c,0x20,0x69,0x6e,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x33,0x2e,0x79,0x20,0x2b,0x3d,0x20,0x64,0x6f,0x74,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x31,0x2c,0x20,0x69,0x6e,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x33,0x2e,0x7a,0x20,0x2b,0x3d,0x20,0x64,0x6f,0x74,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x32,0x2c,0x20,0x69,0x6e,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x33,0x2e,0x77,0x20,0x2b,0x3d,0x20,0x64,0x6f,0x74,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x33,0x2c,0x20,0x69,0x6e,0x33,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x30,0x20,0x3d,0x20,0x66,0x6d,0x61,0x78,0x28,0x6f,0x75,0x74,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x31,0x20,0x3d,0x20,0x66,0x6d,0x61,0x78,0x28,0x6f,0x75,0x74,0x31,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x32,0x20,0x3d,0x20,0x66,0x6d,0x61,0x78,0x28,0x6f,0x75,0x74,0x32,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x33,0x20,0x3d,0x20,0x66,0x6d,0x61,0x78,0x28,0x6f,0x75,0x74,0x33,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x29,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0x36,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x30,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x6f,0x75,0x74,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x36,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x31,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x6f,0x75,0x74,0x31,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x36,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x32,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x6f,0x75,0x74,0x32,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x36,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x33,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x6f,0x75,0x74,0x33,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x36,0x29,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x78,0x5f,0x62,0x61,0x73,0x65,0x20,0x3d,0x20,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x69,0x64,0x78,0x2a,0x6f,0x75,0x74,0x5f,0x77,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x20,0x6f,0x75,0x74,0x5f,0x77,0x20,0x2d,0x20,0x6f,0x75,0x74,0x5f,0x77,0x34,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x3d,0x20,0x6f,0x75,0x74,0x5f,0x78,0x5f,0x62,0x61,0x73,0x65,0x20,0x2b,0x20,0x6f,0x75,0x74,0x5f,0x77,0x34,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3e,0x3d,0x20,0x34,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x31,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x32,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x33,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x3d,0x20,0x33,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x31,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x32,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x3d,0x20,0x32,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x31,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x3d,0x20,0x31,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x7d,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x63,0x6f,0x6e,0x76,0x5f,0x32,0x64,0x5f,0x31,0x78,0x31,0x5f,0x6c,0x6f,0x63,0x61,0x6c,0x28,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x33,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x62,0x69,0x61,0x73,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x63,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x68,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x77,0x29,0x20,0x7b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x72,0x6f,0x77,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x6c,0x6f,0x63,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0x20,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x63,0x6f,0x6c,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x6c,0x6f,0x63,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0x20,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0x20,0x2f,0x2f,0x63,0x2f,0x34,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x77,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0x20,0x2f,0x2f,0x77,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x32,0x29,0x3b,0x20,0x2f,0x2f,0x62,0x20,0x68,0xa,0xa,0x20,0x20,0x20,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x33,0x28,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x77,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x77,0x34,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x6f,0x75,0x74,0x5f,0x77,0x5f,0x69,0x64,0x78,0x2c,0x20,0x34,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6f,0x75,0x74,0x30,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x62,0x69,0x61,0x73,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x69,0x64,0x78,0x2c,0x20,0x30,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6f,0x75,0x74,0x31,0x20,0x3d,0x20,0x6f,0x75,0x74,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6f,0x75,0x74,0x32,0x20,0x3d,0x20,0x6f,0x75,0x74,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6f,0x75,0x74,0x33,0x20,0x3d,0x20,0x6f,0x75,0x74,0x30,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x33,0x3b,0xa,0x20,0x20,0x20,0x20,0xa,0x20,0x20,0x20,0x20,0x5f,0x5f,0x6c,0x6f,0x63,0x61,0x6c,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x69,0x6e,0x5f,0x73,0x6d,0x30,0x5b,0x55,0x4e,0x49,0x54,0x2a,0x55,0x4e,0x49,0x54,0x5d,0x3b,0xa,0x20,0x20,0x20,0x20,0x5f,0x5f,0x6c,0x6f,0x63,0x61,0x6c,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x69,0x6e,0x5f,0x73,0x6d,0x31,0x5b,0x55,0x4e,0x49,0x54,0x2a,0x55,0x4e,0x49,0x54,0x5d,0x3b,0xa,0x20,0x20,0x20,0x20,0x5f,0x5f,0x6c,0x6f,0x63,0x61,0x6c,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x69,0x6e,0x5f,0x73,0x6d,0x32,0x5b,0x55,0x4e,0x49,0x54,0x2a,0x55,0x4e,0x49,0x54,0x5d,0x3b,0xa,0x20,0x20,0x20,0x20,0x5f,0x5f,0x6c,0x6f,0x63,0x61,0x6c,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x69,0x6e,0x5f,0x73,0x6d,0x33,0x5b,0x55,0x4e,0x49,0x54,0x2a,0x55,0x4e,0x49,0x54,0x5d,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x74,0x69,0x6c,0x65,0x73,0x20,0x3d,0x20,0x28,0x69,0x6e,0x5f,0x63,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x20,0x2b,0x20,0x55,0x4e,0x49,0x54,0x20,0x2d,0x31,0x29,0x2f,0x20,0x55,0x4e,0x49,0x54,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x63,0x6f,0x6c,0x5f,0x78,0x5f,0x75,0x6e,0x69,0x74,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x63,0x6f,0x6c,0x2c,0x20,0x55,0x4e,0x49,0x54,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x69,0x6e,0x64,0x65,0x78,0x20,0x3d,0x20,0x63,0x6f,0x6c,0x5f,0x78,0x5f,0x75,0x6e,0x69,0x74,0x20,0x2b,0x20,0x72,0x6f,0x77,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x69,0x6e,0x74,0x20,0x74,0x20,0x3d,0x20,0x30,0x3b,0x20,0x74,0x20,0x3c,0x20,0x74,0x69,0x6c,0x65,0x73,0x3b,0x20,0x2b,0x2b,0x74,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x63,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x74,0x2c,0x20,0x55,0x4e,0x49,0x54,0x2c,0x20,0x72,0x6f,0x77,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x63,0x5f,0x77,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x5f,0x63,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x77,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x77,0x34,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x5f,0x73,0x6d,0x30,0x5b,0x69,0x6e,0x5f,0x69,0x6e,0x64,0x65,0x78,0x5d,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x5f,0x63,0x5f,0x77,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x5f,0x73,0x6d,0x31,0x5b,0x69,0x6e,0x5f,0x69,0x6e,0x64,0x65,0x78,0x5d,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x5f,0x63,0x5f,0x77,0x5f,0x69,0x64,0x78,0x2b,0x31,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x5f,0x73,0x6d,0x32,0x5b,0x69,0x6e,0x5f,0x69,0x6e,0x64,0x65,0x78,0x5d,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x5f,0x63,0x5f,0x77,0x5f,0x69,0x64,0x78,0x2b,0x32,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x5f,0x73,0x6d,0x33,0x5b,0x69,0x6e,0x5f,0x69,0x6e,0x64,0x65,0x78,0x5d,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x5f,0x63,0x5f,0x77,0x5f,0x69,0x64,0x78,0x2b,0x33,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x62,0x61,0x72,0x72,0x69,0x65,0x72,0x28,0x43,0x4c,0x4b,0x5f,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x4d,0x45,0x4d,0x5f,0x46,0x45,0x4e,0x43,0x45,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x69,0x6e,0x64,0x65,0x78,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x74,0x2c,0x20,0x55,0x4e,0x49,0x54,0x2a,0x34,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x28,0x69,0x6e,0x74,0x20,0x6b,0x20,0x3d,0x20,0x30,0x3b,0x20,0x6b,0x20,0x3c,0x20,0x55,0x4e,0x49,0x54,0x3b,0x20,0x6b,0x2b,0x2b,0x29,0x7b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x69,0x6e,0x74,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x63,0x78,0x34,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6b,0x2c,0x20,0x34,0x2c,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x69,0x6e,0x64,0x65,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x69,0x6e,0x74,0x20,0x6c,0x6f,0x63,0x61,0x6c,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x63,0x6f,0x6c,0x5f,0x78,0x5f,0x75,0x6e,0x69,0x74,0x20,0x2b,0x20,0x6b,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x30,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x63,0x78,0x34,0x2b,0x2b,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x31,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x63,0x78,0x34,0x2b,0x2b,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x32,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x63,0x78,0x34,0x2b,0x2b,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x33,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x63,0x78,0x34,0x2b,0x2b,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x43,0x41,0x4c,0x43,0x55,0x4c,0x41,0x54,0x45,0x5f,0x4f,0x55,0x54,0x50,0x55,0x54,0x5f,0x4f,0x50,0x54,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x43,0x41,0x4c,0x43,0x55,0x4c,0x41,0x54,0x45,0x5f,0x4f,0x55,0x54,0x50,0x55,0x54,0x5f,0x4f,0x50,0x54,0x28,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x43,0x41,0x4c,0x43,0x55,0x4c,0x41,0x54,0x45,0x5f,0x4f,0x55,0x54,0x50,0x55,0x54,0x5f,0x4f,0x50,0x54,0x28,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x43,0x41,0x4c,0x43,0x55,0x4c,0x41,0x54,0x45,0x5f,0x4f,0x55,0x54,0x50,0x55,0x54,0x5f,0x4f,0x50,0x54,0x28,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x62,0x61,0x72,0x72,0x69,0x65,0x72,0x28,0x43,0x4c,0x4b,0x5f,0x4c,0x4f,0x43,0x41,0x4c,0x5f,0x4d,0x45,0x4d,0x5f,0x46,0x45,0x4e,0x43,0x45,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x30,0x20,0x3d,0x20,0x66,0x6d,0x61,0x78,0x28,0x6f,0x75,0x74,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x31,0x20,0x3d,0x20,0x66,0x6d,0x61,0x78,0x28,0x6f,0x75,0x74,0x31,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x32,0x20,0x3d,0x20,0x66,0x6d,0x61,0x78,0x28,0x6f,0x75,0x74,0x32,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x33,0x20,0x3d,0x20,0x66,0x6d,0x61,0x78,0x28,0x6f,0x75,0x74,0x33,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x29,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0x36,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x30,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x6f,0x75,0x74,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x36,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x31,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x6f,0x75,0x74,0x31,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x36,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x32,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x6f,0x75,0x74,0x32,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x36,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x33,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x6f,0x75,0x74,0x33,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x36,0x29,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x78,0x5f,0x62,0x61,0x73,0x65,0x20,0x3d,0x20,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x69,0x64,0x78,0x2a,0x6f,0x75,0x74,0x5f,0x77,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x20,0x6f,0x75,0x74,0x5f,0x77,0x20,0x2d,0x20,0x6f,0x75,0x74,0x5f,0x77,0x34,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x3d,0x20,0x6f,0x75,0x74,0x5f,0x78,0x5f,0x62,0x61,0x73,0x65,0x20,0x2b,0x20,0x6f,0x75,0x74,0x5f,0x77,0x34,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3e,0x3d,0x20,0x34,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x31,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x32,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x33,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x3d,0x20,0x33,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x31,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x32,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x3d,0x20,0x32,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x31,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x3d,0x20,0x31,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x7d,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x63,0x6f,0x6e,0x76,0x5f,0x32,0x64,0x5f,0x31,0x78,0x31,0x28,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x32,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x62,0x69,0x61,0x73,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x34,0x29,0x20,0x7b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x32,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x2f,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x25,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x34,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6f,0x75,0x74,0x30,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x62,0x69,0x61,0x73,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x30,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6f,0x75,0x74,0x31,0x20,0x3d,0x20,0x6f,0x75,0x74,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6f,0x75,0x74,0x32,0x20,0x3d,0x20,0x6f,0x75,0x74,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6f,0x75,0x74,0x33,0x20,0x3d,0x20,0x6f,0x75,0x74,0x30,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x30,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x2a,0x34,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x31,0x20,0x3d,0x20,0x69,0x6e,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x30,0x20,0x2b,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x32,0x20,0x3d,0x20,0x69,0x6e,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x31,0x20,0x2b,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x33,0x20,0x3d,0x20,0x69,0x6e,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x32,0x20,0x2b,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x30,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x69,0x6e,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x30,0x2c,0x20,0x49,0x4e,0x54,0x5f,0x4d,0x49,0x4e,0x2c,0x20,0x69,0x6e,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x30,0x20,0x3e,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x31,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x69,0x6e,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x31,0x2c,0x20,0x49,0x4e,0x54,0x5f,0x4d,0x49,0x4e,0x2c,0x20,0x69,0x6e,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x31,0x20,0x3e,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x32,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x69,0x6e,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x32,0x2c,0x20,0x49,0x4e,0x54,0x5f,0x4d,0x49,0x4e,0x2c,0x20,0x69,0x6e,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x32,0x20,0x3e,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x33,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x69,0x6e,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x33,0x2c,0x20,0x49,0x4e,0x54,0x5f,0x4d,0x49,0x4e,0x2c,0x20,0x69,0x6e,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x33,0x20,0x3e,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x6e,0x64,0x65,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2f,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x25,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x2c,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x20,0x2b,0x20,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x69,0x6e,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x69,0x6e,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x69,0x6e,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x69,0x6e,0x33,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x33,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x30,0x3b,0x20,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x3c,0x20,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x3b,0x20,0x2b,0x2b,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x61,0x73,0x65,0x20,0x20,0x3d,0x20,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x2a,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x61,0x73,0x65,0x20,0x3d,0x20,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x3c,0x3c,0x20,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x30,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x61,0x73,0x65,0x20,0x2b,0x20,0x69,0x6e,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x30,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x31,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x61,0x73,0x65,0x20,0x2b,0x20,0x69,0x6e,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x31,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x32,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x61,0x73,0x65,0x20,0x2b,0x20,0x69,0x6e,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x32,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x33,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x61,0x73,0x65,0x20,0x2b,0x20,0x69,0x6e,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x33,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x30,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x61,0x73,0x65,0x20,0x2b,0x20,0x30,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x31,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x61,0x73,0x65,0x20,0x2b,0x20,0x31,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x32,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x61,0x73,0x65,0x20,0x2b,0x20,0x32,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x33,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x61,0x73,0x65,0x20,0x2b,0x20,0x33,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x43,0x41,0x4c,0x43,0x55,0x4c,0x41,0x54,0x45,0x5f,0x4f,0x55,0x54,0x50,0x55,0x54,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x43,0x41,0x4c,0x43,0x55,0x4c,0x41,0x54,0x45,0x5f,0x4f,0x55,0x54,0x50,0x55,0x54,0x28,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x43,0x41,0x4c,0x43,0x55,0x4c,0x41,0x54,0x45,0x5f,0x4f,0x55,0x54,0x50,0x55,0x54,0x28,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x43,0x41,0x4c,0x43,0x55,0x4c,0x41,0x54,0x45,0x5f,0x4f,0x55,0x54,0x50,0x55,0x54,0x28,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x30,0x20,0x3d,0x20,0x66,0x6d,0x61,0x78,0x28,0x6f,0x75,0x74,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x31,0x20,0x3d,0x20,0x66,0x6d,0x61,0x78,0x28,0x6f,0x75,0x74,0x31,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x32,0x20,0x3d,0x20,0x66,0x6d,0x61,0x78,0x28,0x6f,0x75,0x74,0x32,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x33,0x20,0x3d,0x20,0x66,0x6d,0x61,0x78,0x28,0x6f,0x75,0x74,0x33,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x29,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0x36,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x30,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x6f,0x75,0x74,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x36,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x31,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x6f,0x75,0x74,0x31,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x36,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x32,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x6f,0x75,0x74,0x32,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x36,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x33,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x6f,0x75,0x74,0x33,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x36,0x29,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x78,0x5f,0x62,0x61,0x73,0x65,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x78,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x3c,0x3c,0x20,0x32,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2d,0x20,0x6f,0x75,0x74,0x5f,0x78,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x3d,0x20,0x6f,0x75,0x74,0x5f,0x78,0x5f,0x62,0x61,0x73,0x65,0x20,0x2b,0x20,0x6f,0x75,0x74,0x5f,0x78,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3e,0x3d,0x20,0x34,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x31,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x32,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x33,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x3d,0x20,0x33,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x31,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x32,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x3d,0x20,0x32,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x31,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x3d,0x20,0x31,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x7d,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x63,0x6f,0x6e,0x76,0x5f,0x32,0x64,0x28,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x32,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x62,0x69,0x61,0x73,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x6c,0x65,0x6e,0x67,0x74,0x68,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x64,0x69,0x6c,0x61,0x74,0x69,0x6f,0x6e,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x29,0x20,0x7b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x32,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x2f,0x20,0x6f,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x25,0x20,0x6f,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6f,0x75,0x74,0x30,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x62,0x69,0x61,0x73,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x30,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6f,0x75,0x74,0x31,0x20,0x3d,0x20,0x6f,0x75,0x74,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6f,0x75,0x74,0x32,0x20,0x3d,0x20,0x6f,0x75,0x74,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6f,0x75,0x74,0x33,0x20,0x3d,0x20,0x6f,0x75,0x74,0x30,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x30,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6f,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x3c,0x3c,0x32,0x2c,0x20,0x2d,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x31,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x30,0x20,0x2b,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x32,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x30,0x20,0x2b,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2a,0x20,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x33,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x30,0x20,0x2b,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2a,0x20,0x33,0x3b,0xa,0x20,0x20,0x20,0x20,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x73,0x74,0x61,0x72,0x74,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x25,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x2c,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x2c,0x20,0x2d,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x73,0x74,0x61,0x72,0x74,0x20,0x20,0x20,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x30,0x2c,0x20,0x28,0x2d,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x73,0x74,0x61,0x72,0x74,0x20,0x2b,0x20,0x64,0x69,0x6c,0x61,0x74,0x69,0x6f,0x6e,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x20,0x2d,0x20,0x31,0x29,0x20,0x2f,0x20,0x64,0x69,0x6c,0x61,0x74,0x69,0x6f,0x6e,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x2c,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x73,0x74,0x61,0x72,0x74,0x20,0x3c,0x20,0x30,0x29,0x2c,0x20,0x64,0x69,0x6c,0x61,0x74,0x69,0x6f,0x6e,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x2c,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x73,0x74,0x61,0x72,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x65,0x6e,0x64,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x6d,0x69,0x6e,0x28,0x6d,0x61,0x64,0x32,0x34,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x2c,0x20,0x64,0x69,0x6c,0x61,0x74,0x69,0x6f,0x6e,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x2c,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x73,0x74,0x61,0x72,0x74,0x29,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2f,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x68,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x6f,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x29,0x20,0x2b,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x30,0x2c,0x20,0x28,0x2d,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x73,0x74,0x61,0x72,0x74,0x20,0x2b,0x20,0x64,0x69,0x6c,0x61,0x74,0x69,0x6f,0x6e,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x20,0x2d,0x20,0x31,0x29,0x20,0x2f,0x20,0x64,0x69,0x6c,0x61,0x74,0x69,0x6f,0x6e,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x2c,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x73,0x74,0x61,0x72,0x74,0x20,0x3c,0x20,0x30,0x29,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x69,0x6e,0x30,0x2c,0x20,0x69,0x6e,0x31,0x2c,0x20,0x69,0x6e,0x32,0x2c,0x20,0x69,0x6e,0x33,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x30,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x31,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x32,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x33,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x30,0x3b,0x20,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x3c,0x20,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x6c,0x65,0x6e,0x67,0x74,0x68,0x3b,0x20,0x2b,0x2b,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x78,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x3c,0x3c,0x20,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x79,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x68,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x69,0x6e,0x74,0x20,0x69,0x79,0x20,0x3d,0x20,0x69,0x6e,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x73,0x74,0x61,0x72,0x74,0x3b,0x20,0x69,0x79,0x20,0x3c,0x20,0x69,0x6e,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x65,0x6e,0x64,0x3b,0x20,0x69,0x79,0x20,0x2b,0x3d,0x20,0x64,0x69,0x6c,0x61,0x74,0x69,0x6f,0x6e,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x68,0x62,0x5f,0x76,0x61,0x6c,0x75,0x65,0x20,0x3d,0x20,0x69,0x79,0x20,0x2b,0x20,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x69,0x6e,0x74,0x20,0x77,0x20,0x3d,0x20,0x30,0x3b,0x20,0x77,0x20,0x3c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x3b,0x20,0x77,0x2b,0x2b,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x61,0x73,0x65,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x77,0x2c,0x20,0x64,0x69,0x6c,0x61,0x74,0x69,0x6f,0x6e,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x52,0x45,0x41,0x44,0x5f,0x49,0x4e,0x50,0x55,0x54,0x5f,0x49,0x4d,0x41,0x47,0x45,0x28,0x30,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x61,0x73,0x65,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x52,0x45,0x41,0x44,0x5f,0x49,0x4e,0x50,0x55,0x54,0x5f,0x49,0x4d,0x41,0x47,0x45,0x28,0x31,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x61,0x73,0x65,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x52,0x45,0x41,0x44,0x5f,0x49,0x4e,0x50,0x55,0x54,0x5f,0x49,0x4d,0x41,0x47,0x45,0x28,0x32,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x61,0x73,0x65,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x52,0x45,0x41,0x44,0x5f,0x49,0x4e,0x50,0x55,0x54,0x5f,0x49,0x4d,0x41,0x47,0x45,0x28,0x33,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x61,0x73,0x65,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x30,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x78,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x30,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x79,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0x20,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x31,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x78,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x31,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x79,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0x20,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x32,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x78,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x32,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x79,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0x20,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x33,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x78,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x33,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x79,0x5f,0x69,0x64,0x78,0x2b,0x2b,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x43,0x41,0x4c,0x43,0x55,0x4c,0x41,0x54,0x45,0x5f,0x4f,0x55,0x54,0x50,0x55,0x54,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x43,0x41,0x4c,0x43,0x55,0x4c,0x41,0x54,0x45,0x5f,0x4f,0x55,0x54,0x50,0x55,0x54,0x28,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x43,0x41,0x4c,0x43,0x55,0x4c,0x41,0x54,0x45,0x5f,0x4f,0x55,0x54,0x50,0x55,0x54,0x28,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x43,0x41,0x4c,0x43,0x55,0x4c,0x41,0x54,0x45,0x5f,0x4f,0x55,0x54,0x50,0x55,0x54,0x28,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x30,0x20,0x3d,0x20,0x66,0x6d,0x61,0x78,0x28,0x6f,0x75,0x74,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x31,0x20,0x3d,0x20,0x66,0x6d,0x61,0x78,0x28,0x6f,0x75,0x74,0x31,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x32,0x20,0x3d,0x20,0x66,0x6d,0x61,0x78,0x28,0x6f,0x75,0x74,0x32,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x33,0x20,0x3d,0x20,0x66,0x6d,0x61,0x78,0x28,0x6f,0x75,0x74,0x33,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x29,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0x36,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x30,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x6f,0x75,0x74,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x36,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x31,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x6f,0x75,0x74,0x31,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x36,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x32,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x6f,0x75,0x74,0x32,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x36,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x33,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x6f,0x75,0x74,0x33,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x36,0x29,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x78,0x5f,0x62,0x61,0x73,0x65,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x6f,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x78,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x6f,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x3c,0x3c,0x20,0x32,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2d,0x20,0x6f,0x75,0x74,0x5f,0x78,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x3d,0x20,0x6f,0x75,0x74,0x5f,0x78,0x5f,0x62,0x61,0x73,0x65,0x20,0x2b,0x20,0x6f,0x75,0x74,0x5f,0x78,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3e,0x3d,0x20,0x34,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x31,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x32,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x33,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x3d,0x20,0x33,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x31,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x32,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x3d,0x20,0x32,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x31,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x3d,0x20,0x31,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x7d, } - }, -{ - "lrn", - { 0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x6c,0x72,0x6e,0x5f,0x62,0x75,0x66,0x66,0x65,0x72,0x28,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x2a,0x69,0x6e,0x70,0x75,0x74,0x54,0x65,0x6d,0x70,0x50,0x74,0x72,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x2a,0x6f,0x75,0x74,0x70,0x75,0x74,0x54,0x65,0x6d,0x70,0x50,0x74,0x72,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x34,0x20,0x69,0x6d,0x67,0x53,0x69,0x7a,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6c,0x6f,0x63,0x61,0x6c,0x53,0x69,0x7a,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x61,0x6c,0x70,0x68,0x61,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x66,0x6c,0x6f,0x61,0x74,0x20,0x62,0x65,0x74,0x61,0x29,0xa,0x7b,0xa,0x9,0x69,0x6e,0x74,0x33,0x20,0x70,0x6f,0x73,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x33,0x29,0x28,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x2c,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x2c,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x32,0x29,0x29,0x3b,0xa,0x9,0x69,0x6e,0x74,0x33,0x20,0x69,0x6d,0x67,0x49,0x6e,0x66,0x6f,0x20,0x3d,0x20,0x69,0x6d,0x67,0x53,0x69,0x7a,0x65,0x2e,0x78,0x79,0x7a,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x68,0x77,0x20,0x3d,0x20,0x69,0x6d,0x67,0x49,0x6e,0x66,0x6f,0x2e,0x78,0x2a,0x69,0x6d,0x67,0x49,0x6e,0x66,0x6f,0x2e,0x79,0x3b,0xa,0x9,0xa,0x9,0x69,0x66,0x28,0x70,0x6f,0x73,0x2e,0x78,0x20,0x3c,0x20,0x69,0x6d,0x67,0x49,0x6e,0x66,0x6f,0x2e,0x78,0x20,0x26,0x26,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x3c,0x20,0x69,0x6d,0x67,0x49,0x6e,0x66,0x6f,0x2e,0x79,0x29,0xa,0x9,0x7b,0xa,0x9,0x9,0x66,0x6c,0x6f,0x61,0x74,0x20,0x73,0x75,0x6d,0x20,0x3d,0x20,0x30,0x2e,0x30,0x66,0x3b,0xa,0x9,0x9,0x69,0x6e,0x74,0x20,0x68,0x61,0x6c,0x66,0x53,0x69,0x7a,0x65,0x20,0x3d,0x20,0x6c,0x6f,0x63,0x61,0x6c,0x53,0x69,0x7a,0x65,0x2f,0x32,0x3b,0xa,0x9,0x9,0x66,0x6f,0x72,0x28,0x69,0x6e,0x74,0x20,0x63,0x20,0x3d,0x20,0x70,0x6f,0x73,0x2e,0x7a,0x20,0x2d,0x20,0x68,0x61,0x6c,0x66,0x53,0x69,0x7a,0x65,0x3b,0x20,0x63,0x20,0x3c,0x20,0x70,0x6f,0x73,0x2e,0x7a,0x20,0x2b,0x20,0x68,0x61,0x6c,0x66,0x53,0x69,0x7a,0x65,0x3b,0x20,0x63,0x2b,0x2b,0x29,0xa,0x9,0x9,0x7b,0xa,0x9,0x9,0x9,0x69,0x66,0x28,0x63,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x63,0x20,0x3e,0x3d,0x20,0x69,0x6d,0x67,0x49,0x6e,0x66,0x6f,0x2e,0x7a,0x29,0x20,0x63,0x6f,0x6e,0x74,0x69,0x6e,0x75,0x65,0x3b,0xa,0x9,0x9,0x9,0x69,0x6e,0x74,0x20,0x69,0x6e,0x64,0x65,0x78,0x20,0x3d,0x20,0x70,0x6f,0x73,0x2e,0x78,0x20,0x2b,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x2a,0x20,0x69,0x6d,0x67,0x49,0x6e,0x66,0x6f,0x2e,0x78,0x20,0x2b,0x20,0x63,0x20,0x2a,0x20,0x68,0x77,0x3b,0xa,0x9,0x9,0x9,0x73,0x75,0x6d,0x20,0x2b,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x54,0x65,0x6d,0x70,0x50,0x74,0x72,0x5b,0x69,0x6e,0x64,0x65,0x78,0x5d,0x20,0x2a,0x20,0x69,0x6e,0x70,0x75,0x74,0x54,0x65,0x6d,0x70,0x50,0x74,0x72,0x5b,0x69,0x6e,0x64,0x65,0x78,0x5d,0x3b,0xa,0x9,0x9,0x7d,0xa,0xa,0x9,0x9,0x69,0x6e,0x74,0x20,0x64,0x61,0x74,0x61,0x49,0x6e,0x64,0x65,0x78,0x20,0x3d,0x20,0x70,0x6f,0x73,0x2e,0x78,0x20,0x2b,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x2a,0x20,0x69,0x6d,0x67,0x49,0x6e,0x66,0x6f,0x2e,0x78,0x20,0x2b,0x20,0x70,0x6f,0x73,0x2e,0x7a,0x20,0x2a,0x20,0x68,0x77,0x3b,0xa,0x9,0x9,0x6f,0x75,0x74,0x70,0x75,0x74,0x54,0x65,0x6d,0x70,0x50,0x74,0x72,0x5b,0x64,0x61,0x74,0x61,0x49,0x6e,0x64,0x65,0x78,0x5d,0x20,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x54,0x65,0x6d,0x70,0x50,0x74,0x72,0x5b,0x64,0x61,0x74,0x61,0x49,0x6e,0x64,0x65,0x78,0x5d,0x20,0x2a,0x20,0x70,0x6f,0x77,0x28,0x31,0x2e,0x30,0x66,0x20,0x2b,0x20,0x61,0x6c,0x70,0x68,0x61,0x20,0x2a,0x20,0x73,0x75,0x6d,0x2c,0x20,0x2d,0x62,0x65,0x74,0x61,0x29,0x3b,0xa,0x9,0x7d,0xa,0x7d,0xa, } - }, -{ - "conv_2d_int8", - { 0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x4d,0x4e,0x4e,0x5f,0x53,0x55,0x50,0x50,0x4f,0x52,0x54,0x5f,0x46,0x50,0x31,0x36,0xa,0x23,0x70,0x72,0x61,0x67,0x6d,0x61,0x20,0x4f,0x50,0x45,0x4e,0x43,0x4c,0x20,0x45,0x58,0x54,0x45,0x4e,0x53,0x49,0x4f,0x4e,0x20,0x63,0x6c,0x5f,0x6b,0x68,0x72,0x5f,0x66,0x70,0x31,0x36,0x20,0x3a,0x20,0x65,0x6e,0x61,0x62,0x6c,0x65,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0xa,0x5f,0x5f,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x5f,0x74,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x20,0x3d,0x20,0x43,0x4c,0x4b,0x5f,0x4e,0x4f,0x52,0x4d,0x41,0x4c,0x49,0x5a,0x45,0x44,0x5f,0x43,0x4f,0x4f,0x52,0x44,0x53,0x5f,0x46,0x41,0x4c,0x53,0x45,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x41,0x44,0x44,0x52,0x45,0x53,0x53,0x5f,0x43,0x4c,0x41,0x4d,0x50,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x46,0x49,0x4c,0x54,0x45,0x52,0x5f,0x4e,0x45,0x41,0x52,0x45,0x53,0x54,0x3b,0xa,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x33,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x2c,0xa,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x33,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x33,0x29,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x33,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x29,0x20,0x7b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x74,0x75,0x72,0x6e,0x3b,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x63,0x6f,0x6e,0x76,0x5f,0x32,0x64,0x31,0x78,0x31,0x28,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x33,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x68,0x61,0x72,0x2a,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x2c,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x68,0x61,0x72,0x2a,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x70,0x74,0x72,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x69,0x6e,0x74,0x2a,0x20,0x62,0x69,0x61,0x73,0x5f,0x70,0x74,0x72,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x68,0x61,0x72,0x2a,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x66,0x6c,0x6f,0x61,0x74,0x2a,0x20,0x73,0x63,0x61,0x6c,0x65,0x5f,0x70,0x74,0x72,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x69,0x7a,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x62,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x77,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x32,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x33,0x28,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x62,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x77,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x68,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x77,0x34,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x6f,0x75,0x74,0x5f,0x77,0x5f,0x69,0x64,0x78,0x2c,0x20,0x34,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x6f,0x75,0x74,0x30,0x20,0x3d,0x20,0x76,0x6c,0x6f,0x61,0x64,0x34,0x28,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x62,0x5f,0x69,0x64,0x78,0x2c,0x20,0x62,0x69,0x61,0x73,0x5f,0x70,0x74,0x72,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x31,0x36,0x20,0x6f,0x75,0x74,0x20,0x3d,0x20,0x7b,0x6f,0x75,0x74,0x30,0x2c,0x20,0x6f,0x75,0x74,0x30,0x2c,0x20,0x6f,0x75,0x74,0x30,0x2c,0x20,0x6f,0x75,0x74,0x30,0x7d,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x31,0x36,0x20,0x69,0x6e,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x68,0x5f,0x77,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6f,0x75,0x74,0x5f,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x77,0x34,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x31,0x36,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x63,0x5f,0x62,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x30,0x3b,0x20,0x69,0x6e,0x5f,0x63,0x5f,0x62,0x5f,0x69,0x64,0x78,0x20,0x3c,0x20,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x3b,0x20,0x69,0x6e,0x5f,0x63,0x5f,0x62,0x5f,0x69,0x64,0x78,0x2b,0x2b,0x29,0x20,0x7b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x5f,0x63,0x5f,0x62,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x69,0x7a,0x65,0x2c,0x20,0x69,0x6e,0x5f,0x68,0x5f,0x77,0x5f,0x69,0x64,0x78,0x29,0x2a,0x34,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x69,0x6e,0x74,0x31,0x36,0x28,0x76,0x6c,0x6f,0x61,0x64,0x31,0x36,0x28,0x30,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x69,0x6e,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x5f,0x63,0x5f,0x62,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x62,0x5f,0x69,0x64,0x78,0x29,0x2c,0x31,0x36,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x69,0x6e,0x74,0x31,0x36,0x28,0x76,0x6c,0x6f,0x61,0x64,0x31,0x36,0x28,0x30,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x73,0x30,0x31,0x32,0x33,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x2e,0x73,0x30,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2e,0x73,0x30,0x31,0x32,0x33,0x2c,0x20,0x6f,0x75,0x74,0x2e,0x73,0x30,0x31,0x32,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x73,0x34,0x35,0x36,0x37,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x2e,0x73,0x34,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2e,0x73,0x30,0x31,0x32,0x33,0x2c,0x20,0x6f,0x75,0x74,0x2e,0x73,0x34,0x35,0x36,0x37,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x73,0x38,0x39,0x61,0x62,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x2e,0x73,0x38,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2e,0x73,0x30,0x31,0x32,0x33,0x2c,0x20,0x6f,0x75,0x74,0x2e,0x73,0x38,0x39,0x61,0x62,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x73,0x63,0x64,0x65,0x66,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x2e,0x73,0x63,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2e,0x73,0x30,0x31,0x32,0x33,0x2c,0x20,0x6f,0x75,0x74,0x2e,0x73,0x63,0x64,0x65,0x66,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x73,0x30,0x31,0x32,0x33,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x2e,0x73,0x31,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2e,0x73,0x34,0x35,0x36,0x37,0x2c,0x20,0x6f,0x75,0x74,0x2e,0x73,0x30,0x31,0x32,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x73,0x34,0x35,0x36,0x37,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x2e,0x73,0x35,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2e,0x73,0x34,0x35,0x36,0x37,0x2c,0x20,0x6f,0x75,0x74,0x2e,0x73,0x34,0x35,0x36,0x37,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x73,0x38,0x39,0x61,0x62,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x2e,0x73,0x39,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2e,0x73,0x34,0x35,0x36,0x37,0x2c,0x20,0x6f,0x75,0x74,0x2e,0x73,0x38,0x39,0x61,0x62,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x73,0x63,0x64,0x65,0x66,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x2e,0x73,0x64,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2e,0x73,0x34,0x35,0x36,0x37,0x2c,0x20,0x6f,0x75,0x74,0x2e,0x73,0x63,0x64,0x65,0x66,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x73,0x30,0x31,0x32,0x33,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x2e,0x73,0x32,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2e,0x73,0x38,0x39,0x61,0x62,0x2c,0x20,0x6f,0x75,0x74,0x2e,0x73,0x30,0x31,0x32,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x73,0x34,0x35,0x36,0x37,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x2e,0x73,0x36,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2e,0x73,0x38,0x39,0x61,0x62,0x2c,0x20,0x6f,0x75,0x74,0x2e,0x73,0x34,0x35,0x36,0x37,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x73,0x38,0x39,0x61,0x62,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x2e,0x73,0x61,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2e,0x73,0x38,0x39,0x61,0x62,0x2c,0x20,0x6f,0x75,0x74,0x2e,0x73,0x38,0x39,0x61,0x62,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x73,0x63,0x64,0x65,0x66,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x2e,0x73,0x65,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2e,0x73,0x38,0x39,0x61,0x62,0x2c,0x20,0x6f,0x75,0x74,0x2e,0x73,0x63,0x64,0x65,0x66,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x73,0x30,0x31,0x32,0x33,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x2e,0x73,0x33,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2e,0x73,0x63,0x64,0x65,0x66,0x2c,0x20,0x6f,0x75,0x74,0x2e,0x73,0x30,0x31,0x32,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x73,0x34,0x35,0x36,0x37,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x2e,0x73,0x37,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2e,0x73,0x63,0x64,0x65,0x66,0x2c,0x20,0x6f,0x75,0x74,0x2e,0x73,0x34,0x35,0x36,0x37,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x73,0x38,0x39,0x61,0x62,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x2e,0x73,0x62,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2e,0x73,0x63,0x64,0x65,0x66,0x2c,0x20,0x6f,0x75,0x74,0x2e,0x73,0x38,0x39,0x61,0x62,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x73,0x63,0x64,0x65,0x66,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x2e,0x73,0x66,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2e,0x73,0x63,0x64,0x65,0x66,0x2c,0x20,0x6f,0x75,0x74,0x2e,0x73,0x63,0x64,0x65,0x66,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0xa,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x6f,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x31,0x36,0x29,0x30,0x29,0x3b,0xa,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x20,0x20,0x20,0x20,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x73,0x63,0x61,0x6c,0x65,0x20,0x3d,0x20,0x76,0x6c,0x6f,0x61,0x64,0x34,0x28,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x62,0x5f,0x69,0x64,0x78,0x2c,0x20,0x28,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x66,0x6c,0x6f,0x61,0x74,0x2a,0x29,0x73,0x63,0x61,0x6c,0x65,0x5f,0x70,0x74,0x72,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x31,0x36,0x20,0x73,0x63,0x61,0x6c,0x65,0x31,0x36,0x20,0x3d,0x20,0x7b,0x73,0x63,0x61,0x6c,0x65,0x2c,0x20,0x73,0x63,0x61,0x6c,0x65,0x2c,0x20,0x73,0x63,0x61,0x6c,0x65,0x2c,0x20,0x73,0x63,0x61,0x6c,0x65,0x7d,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x62,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x69,0x7a,0x65,0x2c,0x20,0x69,0x6e,0x5f,0x68,0x5f,0x77,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x20,0x2d,0x20,0x6f,0x75,0x74,0x5f,0x77,0x34,0x5f,0x69,0x64,0x78,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3e,0x3d,0x20,0x34,0x29,0x20,0x7b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x31,0x36,0x20,0x6f,0x75,0x74,0x5f,0x66,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x66,0x6c,0x6f,0x61,0x74,0x31,0x36,0x5f,0x72,0x74,0x70,0x28,0x6f,0x75,0x74,0x29,0x20,0x2a,0x20,0x73,0x63,0x61,0x6c,0x65,0x31,0x36,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x68,0x61,0x72,0x31,0x36,0x20,0x6f,0x75,0x74,0x5f,0x63,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x63,0x68,0x61,0x72,0x31,0x36,0x5f,0x73,0x61,0x74,0x28,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x69,0x6e,0x74,0x31,0x36,0x5f,0x72,0x74,0x65,0x28,0x6f,0x75,0x74,0x5f,0x66,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x76,0x73,0x74,0x6f,0x72,0x65,0x31,0x36,0x28,0x6f,0x75,0x74,0x5f,0x63,0x2c,0x20,0x30,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x75,0x74,0x5f,0x69,0x64,0x78,0x2a,0x34,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x3d,0x20,0x33,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x6f,0x75,0x74,0x30,0x5f,0x66,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x66,0x6c,0x6f,0x61,0x74,0x34,0x5f,0x72,0x74,0x70,0x28,0x6f,0x75,0x74,0x2e,0x73,0x30,0x31,0x32,0x33,0x29,0x20,0x2a,0x20,0x73,0x63,0x61,0x6c,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x6f,0x75,0x74,0x31,0x5f,0x66,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x66,0x6c,0x6f,0x61,0x74,0x34,0x5f,0x72,0x74,0x70,0x28,0x6f,0x75,0x74,0x2e,0x73,0x34,0x35,0x36,0x37,0x29,0x20,0x2a,0x20,0x73,0x63,0x61,0x6c,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x6f,0x75,0x74,0x32,0x5f,0x66,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x66,0x6c,0x6f,0x61,0x74,0x34,0x5f,0x72,0x74,0x70,0x28,0x6f,0x75,0x74,0x2e,0x73,0x38,0x39,0x61,0x62,0x29,0x20,0x2a,0x20,0x73,0x63,0x61,0x6c,0x65,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x68,0x61,0x72,0x34,0x20,0x6f,0x75,0x74,0x30,0x5f,0x63,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x63,0x68,0x61,0x72,0x34,0x5f,0x73,0x61,0x74,0x28,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x69,0x6e,0x74,0x34,0x5f,0x72,0x74,0x65,0x28,0x6f,0x75,0x74,0x30,0x5f,0x66,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x68,0x61,0x72,0x34,0x20,0x6f,0x75,0x74,0x31,0x5f,0x63,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x63,0x68,0x61,0x72,0x34,0x5f,0x73,0x61,0x74,0x28,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x69,0x6e,0x74,0x34,0x5f,0x72,0x74,0x65,0x28,0x6f,0x75,0x74,0x31,0x5f,0x66,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x68,0x61,0x72,0x34,0x20,0x6f,0x75,0x74,0x32,0x5f,0x63,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x63,0x68,0x61,0x72,0x34,0x5f,0x73,0x61,0x74,0x28,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x69,0x6e,0x74,0x34,0x5f,0x72,0x74,0x65,0x28,0x6f,0x75,0x74,0x32,0x5f,0x66,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x76,0x73,0x74,0x6f,0x72,0x65,0x34,0x28,0x6f,0x75,0x74,0x30,0x5f,0x63,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x69,0x64,0x78,0x2c,0x20,0x28,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x68,0x61,0x72,0x2a,0x29,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x76,0x73,0x74,0x6f,0x72,0x65,0x34,0x28,0x6f,0x75,0x74,0x31,0x5f,0x63,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x69,0x64,0x78,0x2b,0x31,0x2c,0x20,0x28,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x68,0x61,0x72,0x2a,0x29,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x76,0x73,0x74,0x6f,0x72,0x65,0x34,0x28,0x6f,0x75,0x74,0x32,0x5f,0x63,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x69,0x64,0x78,0x2b,0x32,0x2c,0x20,0x28,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x68,0x61,0x72,0x2a,0x29,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x3d,0x20,0x32,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x6f,0x75,0x74,0x30,0x5f,0x66,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x66,0x6c,0x6f,0x61,0x74,0x34,0x5f,0x72,0x74,0x70,0x28,0x6f,0x75,0x74,0x2e,0x73,0x30,0x31,0x32,0x33,0x29,0x20,0x2a,0x20,0x73,0x63,0x61,0x6c,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x6f,0x75,0x74,0x31,0x5f,0x66,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x66,0x6c,0x6f,0x61,0x74,0x34,0x5f,0x72,0x74,0x70,0x28,0x6f,0x75,0x74,0x2e,0x73,0x34,0x35,0x36,0x37,0x29,0x20,0x2a,0x20,0x73,0x63,0x61,0x6c,0x65,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x68,0x61,0x72,0x34,0x20,0x6f,0x75,0x74,0x30,0x5f,0x63,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x63,0x68,0x61,0x72,0x34,0x5f,0x73,0x61,0x74,0x28,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x69,0x6e,0x74,0x34,0x5f,0x72,0x74,0x65,0x28,0x6f,0x75,0x74,0x30,0x5f,0x66,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x68,0x61,0x72,0x34,0x20,0x6f,0x75,0x74,0x31,0x5f,0x63,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x63,0x68,0x61,0x72,0x34,0x5f,0x73,0x61,0x74,0x28,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x69,0x6e,0x74,0x34,0x5f,0x72,0x74,0x65,0x28,0x6f,0x75,0x74,0x31,0x5f,0x66,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x76,0x73,0x74,0x6f,0x72,0x65,0x34,0x28,0x6f,0x75,0x74,0x30,0x5f,0x63,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x69,0x64,0x78,0x2c,0x20,0x28,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x68,0x61,0x72,0x2a,0x29,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x76,0x73,0x74,0x6f,0x72,0x65,0x34,0x28,0x6f,0x75,0x74,0x31,0x5f,0x63,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x69,0x64,0x78,0x2b,0x31,0x2c,0x20,0x28,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x68,0x61,0x72,0x2a,0x29,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x3d,0x20,0x31,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x6f,0x75,0x74,0x30,0x5f,0x66,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x66,0x6c,0x6f,0x61,0x74,0x34,0x5f,0x72,0x74,0x70,0x28,0x6f,0x75,0x74,0x2e,0x73,0x30,0x31,0x32,0x33,0x29,0x20,0x2a,0x20,0x73,0x63,0x61,0x6c,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x68,0x61,0x72,0x34,0x20,0x6f,0x75,0x74,0x30,0x5f,0x63,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x63,0x68,0x61,0x72,0x34,0x5f,0x73,0x61,0x74,0x28,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x69,0x6e,0x74,0x34,0x5f,0x72,0x74,0x65,0x28,0x6f,0x75,0x74,0x30,0x5f,0x66,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x76,0x73,0x74,0x6f,0x72,0x65,0x34,0x28,0x6f,0x75,0x74,0x30,0x5f,0x63,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x69,0x64,0x78,0x2c,0x20,0x28,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x68,0x61,0x72,0x2a,0x29,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0xa,0x7d,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x63,0x6f,0x6e,0x76,0x5f,0x32,0x64,0x28,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x33,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x68,0x61,0x72,0x2a,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x2c,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x68,0x61,0x72,0x2a,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x70,0x74,0x72,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x69,0x6e,0x74,0x2a,0x20,0x62,0x69,0x61,0x73,0x5f,0x70,0x74,0x72,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x68,0x61,0x72,0x2a,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x66,0x6c,0x6f,0x61,0x74,0x2a,0x20,0x73,0x63,0x61,0x6c,0x65,0x5f,0x70,0x74,0x72,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x64,0x69,0x6c,0x61,0x74,0x69,0x6f,0x6e,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x29,0x20,0x7b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x62,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x77,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x32,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x68,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x20,0x25,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x33,0x28,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x62,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x77,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x5f,0x68,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x77,0x34,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x6f,0x75,0x74,0x5f,0x77,0x5f,0x69,0x64,0x78,0x2c,0x20,0x34,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x6f,0x75,0x74,0x30,0x20,0x3d,0x20,0x76,0x6c,0x6f,0x61,0x64,0x34,0x28,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x62,0x5f,0x69,0x64,0x78,0x2c,0x20,0x62,0x69,0x61,0x73,0x5f,0x70,0x74,0x72,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x31,0x36,0x20,0x6f,0x75,0x74,0x20,0x3d,0x20,0x7b,0x6f,0x75,0x74,0x30,0x2c,0x20,0x6f,0x75,0x74,0x30,0x2c,0x20,0x6f,0x75,0x74,0x30,0x2c,0x20,0x6f,0x75,0x74,0x30,0x7d,0x3b,0xa,0xa,0x2f,0x2f,0x64,0x65,0x61,0x6c,0x20,0x77,0x69,0x74,0x68,0x20,0x77,0x69,0x64,0x74,0x68,0x20,0x73,0x69,0x7a,0x65,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x74,0x61,0x72,0x74,0x30,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6f,0x75,0x74,0x5f,0x77,0x34,0x5f,0x69,0x64,0x78,0x2c,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x2c,0x20,0x2d,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x74,0x61,0x72,0x74,0x31,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6f,0x75,0x74,0x5f,0x77,0x34,0x5f,0x69,0x64,0x78,0x2b,0x31,0x2c,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x2c,0x20,0x2d,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x74,0x61,0x72,0x74,0x32,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6f,0x75,0x74,0x5f,0x77,0x34,0x5f,0x69,0x64,0x78,0x2b,0x32,0x2c,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x2c,0x20,0x2d,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x74,0x61,0x72,0x74,0x33,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6f,0x75,0x74,0x5f,0x77,0x34,0x5f,0x69,0x64,0x78,0x2b,0x33,0x2c,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x2c,0x20,0x2d,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x3b,0xa,0xa,0x2f,0x2f,0x64,0x65,0x61,0x6c,0x20,0x77,0x69,0x74,0x68,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x20,0x73,0x69,0x7a,0x65,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x73,0x74,0x61,0x72,0x74,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6f,0x75,0x74,0x5f,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x2c,0x20,0x2d,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x69,0x6e,0x30,0x2c,0x20,0x69,0x6e,0x31,0x2c,0x20,0x69,0x6e,0x32,0x2c,0x20,0x69,0x6e,0x33,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x31,0x36,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x63,0x5f,0x62,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x30,0x3b,0x20,0x69,0x6e,0x5f,0x63,0x5f,0x62,0x5f,0x69,0x64,0x78,0x20,0x3c,0x20,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x3b,0x20,0x69,0x6e,0x5f,0x63,0x5f,0x62,0x5f,0x69,0x64,0x78,0x2b,0x2b,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x69,0x6e,0x74,0x20,0x69,0x79,0x20,0x3d,0x20,0x30,0x3b,0x20,0x69,0x79,0x20,0x3c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x3b,0x20,0x69,0x79,0x2b,0x2b,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x69,0x6e,0x74,0x20,0x69,0x78,0x20,0x3d,0x20,0x30,0x3b,0x20,0x69,0x78,0x20,0x3c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x3b,0x20,0x69,0x78,0x2b,0x2b,0x29,0x20,0x7b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x68,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x73,0x74,0x61,0x72,0x74,0x20,0x2b,0x20,0x69,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x77,0x5f,0x69,0x64,0x78,0x30,0x20,0x3d,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x74,0x61,0x72,0x74,0x30,0x20,0x2b,0x20,0x69,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x77,0x5f,0x69,0x64,0x78,0x31,0x20,0x3d,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x74,0x61,0x72,0x74,0x31,0x20,0x2b,0x20,0x69,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x77,0x5f,0x69,0x64,0x78,0x32,0x20,0x3d,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x74,0x61,0x72,0x74,0x32,0x20,0x2b,0x20,0x69,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x77,0x5f,0x69,0x64,0x78,0x33,0x20,0x3d,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x74,0x61,0x72,0x74,0x33,0x20,0x2b,0x20,0x69,0x78,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x66,0x28,0x69,0x6e,0x5f,0x68,0x5f,0x69,0x64,0x78,0x20,0x3e,0x3d,0x20,0x30,0x20,0x26,0x26,0x20,0x69,0x6e,0x5f,0x68,0x5f,0x69,0x64,0x78,0x20,0x3c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x20,0x26,0x26,0x20,0x69,0x6e,0x5f,0x77,0x5f,0x69,0x64,0x78,0x30,0x20,0x3e,0x3d,0x20,0x30,0x20,0x26,0x26,0x20,0x69,0x6e,0x5f,0x77,0x5f,0x69,0x64,0x78,0x30,0x20,0x3c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x26,0x26,0x20,0x69,0x6e,0x5f,0x77,0x5f,0x69,0x64,0x78,0x31,0x20,0x3e,0x3d,0x20,0x30,0x20,0x26,0x26,0x20,0x69,0x6e,0x5f,0x77,0x5f,0x69,0x64,0x78,0x31,0x20,0x3c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x26,0x26,0x20,0x69,0x6e,0x5f,0x77,0x5f,0x69,0x64,0x78,0x32,0x20,0x3e,0x3d,0x20,0x30,0x20,0x26,0x26,0x20,0x69,0x6e,0x5f,0x77,0x5f,0x69,0x64,0x78,0x33,0x20,0x3c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x26,0x26,0x20,0x69,0x6e,0x5f,0x77,0x5f,0x69,0x64,0x78,0x33,0x20,0x3e,0x3d,0x20,0x30,0x20,0x26,0x26,0x20,0x69,0x6e,0x5f,0x77,0x5f,0x69,0x64,0x78,0x33,0x20,0x3c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x69,0x64,0x78,0x30,0x20,0x3d,0x20,0x69,0x6e,0x5f,0x63,0x5f,0x62,0x5f,0x69,0x64,0x78,0x2a,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x2a,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2b,0x20,0x69,0x6e,0x5f,0x68,0x5f,0x69,0x64,0x78,0x20,0x2a,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2b,0x20,0x69,0x6e,0x5f,0x77,0x5f,0x69,0x64,0x78,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x69,0x64,0x78,0x31,0x20,0x3d,0x20,0x69,0x6e,0x5f,0x63,0x5f,0x62,0x5f,0x69,0x64,0x78,0x2a,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x2a,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2b,0x20,0x69,0x6e,0x5f,0x68,0x5f,0x69,0x64,0x78,0x20,0x2a,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2b,0x20,0x69,0x6e,0x5f,0x77,0x5f,0x69,0x64,0x78,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x69,0x64,0x78,0x32,0x20,0x3d,0x20,0x69,0x6e,0x5f,0x63,0x5f,0x62,0x5f,0x69,0x64,0x78,0x2a,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x2a,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2b,0x20,0x69,0x6e,0x5f,0x68,0x5f,0x69,0x64,0x78,0x20,0x2a,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2b,0x20,0x69,0x6e,0x5f,0x77,0x5f,0x69,0x64,0x78,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x69,0x64,0x78,0x33,0x20,0x3d,0x20,0x69,0x6e,0x5f,0x63,0x5f,0x62,0x5f,0x69,0x64,0x78,0x2a,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x2a,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2b,0x20,0x69,0x6e,0x5f,0x68,0x5f,0x69,0x64,0x78,0x20,0x2a,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2b,0x20,0x69,0x6e,0x5f,0x77,0x5f,0x69,0x64,0x78,0x33,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x30,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x69,0x6e,0x74,0x34,0x5f,0x73,0x61,0x74,0x28,0x76,0x6c,0x6f,0x61,0x64,0x34,0x28,0x69,0x6e,0x5f,0x69,0x64,0x78,0x30,0x2c,0x20,0x28,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x68,0x61,0x72,0x20,0x2a,0x29,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x31,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x69,0x6e,0x74,0x34,0x5f,0x73,0x61,0x74,0x28,0x76,0x6c,0x6f,0x61,0x64,0x34,0x28,0x69,0x6e,0x5f,0x69,0x64,0x78,0x31,0x2c,0x20,0x28,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x68,0x61,0x72,0x20,0x2a,0x29,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x32,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x69,0x6e,0x74,0x34,0x5f,0x73,0x61,0x74,0x28,0x76,0x6c,0x6f,0x61,0x64,0x34,0x28,0x69,0x6e,0x5f,0x69,0x64,0x78,0x32,0x2c,0x20,0x28,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x68,0x61,0x72,0x20,0x2a,0x29,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x33,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x69,0x6e,0x74,0x34,0x5f,0x73,0x61,0x74,0x28,0x76,0x6c,0x6f,0x61,0x64,0x34,0x28,0x69,0x6e,0x5f,0x69,0x64,0x78,0x33,0x2c,0x20,0x28,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x68,0x61,0x72,0x20,0x2a,0x29,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x28,0x69,0x79,0x20,0x2a,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x2a,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x2a,0x6f,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x20,0x2b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x78,0x2a,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x2a,0x6f,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x20,0x2b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x5f,0x63,0x5f,0x62,0x5f,0x69,0x64,0x78,0x2a,0x6f,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x20,0x2b,0x20,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x62,0x5f,0x69,0x64,0x78,0x29,0x2a,0x31,0x36,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x69,0x6e,0x74,0x31,0x36,0x28,0x76,0x6c,0x6f,0x61,0x64,0x31,0x36,0x28,0x30,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x73,0x30,0x31,0x32,0x33,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x30,0x2e,0x78,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2e,0x73,0x30,0x31,0x32,0x33,0x2c,0x20,0x6f,0x75,0x74,0x2e,0x73,0x30,0x31,0x32,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x73,0x34,0x35,0x36,0x37,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x31,0x2e,0x78,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2e,0x73,0x30,0x31,0x32,0x33,0x2c,0x20,0x6f,0x75,0x74,0x2e,0x73,0x34,0x35,0x36,0x37,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x73,0x38,0x39,0x61,0x62,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x32,0x2e,0x78,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2e,0x73,0x30,0x31,0x32,0x33,0x2c,0x20,0x6f,0x75,0x74,0x2e,0x73,0x38,0x39,0x61,0x62,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x73,0x63,0x64,0x65,0x66,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x33,0x2e,0x78,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2e,0x73,0x30,0x31,0x32,0x33,0x2c,0x20,0x6f,0x75,0x74,0x2e,0x73,0x63,0x64,0x65,0x66,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x73,0x30,0x31,0x32,0x33,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x30,0x2e,0x79,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2e,0x73,0x34,0x35,0x36,0x37,0x2c,0x20,0x6f,0x75,0x74,0x2e,0x73,0x30,0x31,0x32,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x73,0x34,0x35,0x36,0x37,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x31,0x2e,0x79,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2e,0x73,0x34,0x35,0x36,0x37,0x2c,0x20,0x6f,0x75,0x74,0x2e,0x73,0x34,0x35,0x36,0x37,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x73,0x38,0x39,0x61,0x62,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x32,0x2e,0x79,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2e,0x73,0x34,0x35,0x36,0x37,0x2c,0x20,0x6f,0x75,0x74,0x2e,0x73,0x38,0x39,0x61,0x62,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x73,0x63,0x64,0x65,0x66,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x33,0x2e,0x79,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2e,0x73,0x34,0x35,0x36,0x37,0x2c,0x20,0x6f,0x75,0x74,0x2e,0x73,0x63,0x64,0x65,0x66,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x73,0x30,0x31,0x32,0x33,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x30,0x2e,0x7a,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2e,0x73,0x38,0x39,0x61,0x62,0x2c,0x20,0x6f,0x75,0x74,0x2e,0x73,0x30,0x31,0x32,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x73,0x34,0x35,0x36,0x37,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x31,0x2e,0x7a,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2e,0x73,0x38,0x39,0x61,0x62,0x2c,0x20,0x6f,0x75,0x74,0x2e,0x73,0x34,0x35,0x36,0x37,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x73,0x38,0x39,0x61,0x62,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x32,0x2e,0x7a,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2e,0x73,0x38,0x39,0x61,0x62,0x2c,0x20,0x6f,0x75,0x74,0x2e,0x73,0x38,0x39,0x61,0x62,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x73,0x63,0x64,0x65,0x66,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x33,0x2e,0x7a,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2e,0x73,0x38,0x39,0x61,0x62,0x2c,0x20,0x6f,0x75,0x74,0x2e,0x73,0x63,0x64,0x65,0x66,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x73,0x30,0x31,0x32,0x33,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x30,0x2e,0x77,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2e,0x73,0x63,0x64,0x65,0x66,0x2c,0x20,0x6f,0x75,0x74,0x2e,0x73,0x30,0x31,0x32,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x73,0x34,0x35,0x36,0x37,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x31,0x2e,0x77,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2e,0x73,0x63,0x64,0x65,0x66,0x2c,0x20,0x6f,0x75,0x74,0x2e,0x73,0x34,0x35,0x36,0x37,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x73,0x38,0x39,0x61,0x62,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x32,0x2e,0x77,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2e,0x73,0x63,0x64,0x65,0x66,0x2c,0x20,0x6f,0x75,0x74,0x2e,0x73,0x38,0x39,0x61,0x62,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x2e,0x73,0x63,0x64,0x65,0x66,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x69,0x6e,0x33,0x2e,0x77,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2e,0x73,0x63,0x64,0x65,0x66,0x2c,0x20,0x6f,0x75,0x74,0x2e,0x73,0x63,0x64,0x65,0x66,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0xa,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x6f,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x31,0x36,0x29,0x30,0x29,0x3b,0xa,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x20,0x20,0x20,0x20,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x73,0x63,0x61,0x6c,0x65,0x20,0x3d,0x20,0x76,0x6c,0x6f,0x61,0x64,0x34,0x28,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x62,0x5f,0x69,0x64,0x78,0x2c,0x20,0x28,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x66,0x6c,0x6f,0x61,0x74,0x2a,0x29,0x73,0x63,0x61,0x6c,0x65,0x5f,0x70,0x74,0x72,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x31,0x36,0x20,0x73,0x63,0x61,0x6c,0x65,0x31,0x36,0x20,0x3d,0x20,0x7b,0x73,0x63,0x61,0x6c,0x65,0x2c,0x20,0x73,0x63,0x61,0x6c,0x65,0x2c,0x20,0x73,0x63,0x61,0x6c,0x65,0x2c,0x20,0x73,0x63,0x61,0x6c,0x65,0x7d,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2d,0x20,0x6f,0x75,0x74,0x5f,0x77,0x34,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x6f,0x75,0x74,0x5f,0x63,0x5f,0x62,0x5f,0x69,0x64,0x78,0x20,0x2a,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x20,0x2a,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2b,0x20,0x6f,0x75,0x74,0x5f,0x68,0x5f,0x69,0x64,0x78,0x2a,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2b,0x20,0x6f,0x75,0x74,0x5f,0x77,0x34,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3e,0x3d,0x20,0x34,0x29,0x20,0x7b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x31,0x36,0x20,0x6f,0x75,0x74,0x5f,0x66,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x66,0x6c,0x6f,0x61,0x74,0x31,0x36,0x5f,0x72,0x74,0x70,0x28,0x6f,0x75,0x74,0x29,0x20,0x2a,0x20,0x73,0x63,0x61,0x6c,0x65,0x31,0x36,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x68,0x61,0x72,0x31,0x36,0x20,0x6f,0x75,0x74,0x5f,0x63,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x63,0x68,0x61,0x72,0x31,0x36,0x5f,0x73,0x61,0x74,0x28,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x69,0x6e,0x74,0x31,0x36,0x5f,0x72,0x74,0x65,0x28,0x6f,0x75,0x74,0x5f,0x66,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x76,0x73,0x74,0x6f,0x72,0x65,0x31,0x36,0x28,0x6f,0x75,0x74,0x5f,0x63,0x2c,0x20,0x30,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x20,0x2b,0x20,0x6f,0x75,0x74,0x5f,0x69,0x64,0x78,0x2a,0x34,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x3d,0x20,0x33,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x6f,0x75,0x74,0x30,0x5f,0x66,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x66,0x6c,0x6f,0x61,0x74,0x34,0x5f,0x72,0x74,0x70,0x28,0x6f,0x75,0x74,0x2e,0x73,0x30,0x31,0x32,0x33,0x29,0x20,0x2a,0x20,0x73,0x63,0x61,0x6c,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x6f,0x75,0x74,0x31,0x5f,0x66,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x66,0x6c,0x6f,0x61,0x74,0x34,0x5f,0x72,0x74,0x70,0x28,0x6f,0x75,0x74,0x2e,0x73,0x34,0x35,0x36,0x37,0x29,0x20,0x2a,0x20,0x73,0x63,0x61,0x6c,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x6f,0x75,0x74,0x32,0x5f,0x66,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x66,0x6c,0x6f,0x61,0x74,0x34,0x5f,0x72,0x74,0x70,0x28,0x6f,0x75,0x74,0x2e,0x73,0x38,0x39,0x61,0x62,0x29,0x20,0x2a,0x20,0x73,0x63,0x61,0x6c,0x65,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x68,0x61,0x72,0x34,0x20,0x6f,0x75,0x74,0x30,0x5f,0x63,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x63,0x68,0x61,0x72,0x34,0x5f,0x73,0x61,0x74,0x28,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x69,0x6e,0x74,0x34,0x5f,0x72,0x74,0x65,0x28,0x6f,0x75,0x74,0x30,0x5f,0x66,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x68,0x61,0x72,0x34,0x20,0x6f,0x75,0x74,0x31,0x5f,0x63,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x63,0x68,0x61,0x72,0x34,0x5f,0x73,0x61,0x74,0x28,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x69,0x6e,0x74,0x34,0x5f,0x72,0x74,0x65,0x28,0x6f,0x75,0x74,0x31,0x5f,0x66,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x68,0x61,0x72,0x34,0x20,0x6f,0x75,0x74,0x32,0x5f,0x63,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x63,0x68,0x61,0x72,0x34,0x5f,0x73,0x61,0x74,0x28,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x69,0x6e,0x74,0x34,0x5f,0x72,0x74,0x65,0x28,0x6f,0x75,0x74,0x32,0x5f,0x66,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x76,0x73,0x74,0x6f,0x72,0x65,0x34,0x28,0x6f,0x75,0x74,0x30,0x5f,0x63,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x69,0x64,0x78,0x2c,0x20,0x28,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x68,0x61,0x72,0x2a,0x29,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x76,0x73,0x74,0x6f,0x72,0x65,0x34,0x28,0x6f,0x75,0x74,0x31,0x5f,0x63,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x69,0x64,0x78,0x2b,0x31,0x2c,0x20,0x28,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x68,0x61,0x72,0x2a,0x29,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x76,0x73,0x74,0x6f,0x72,0x65,0x34,0x28,0x6f,0x75,0x74,0x32,0x5f,0x63,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x69,0x64,0x78,0x2b,0x32,0x2c,0x20,0x28,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x68,0x61,0x72,0x2a,0x29,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x3d,0x20,0x32,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x6f,0x75,0x74,0x30,0x5f,0x66,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x66,0x6c,0x6f,0x61,0x74,0x34,0x5f,0x72,0x74,0x70,0x28,0x6f,0x75,0x74,0x2e,0x73,0x30,0x31,0x32,0x33,0x29,0x20,0x2a,0x20,0x73,0x63,0x61,0x6c,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x6f,0x75,0x74,0x31,0x5f,0x66,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x66,0x6c,0x6f,0x61,0x74,0x34,0x5f,0x72,0x74,0x70,0x28,0x6f,0x75,0x74,0x2e,0x73,0x34,0x35,0x36,0x37,0x29,0x20,0x2a,0x20,0x73,0x63,0x61,0x6c,0x65,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x68,0x61,0x72,0x34,0x20,0x6f,0x75,0x74,0x30,0x5f,0x63,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x63,0x68,0x61,0x72,0x34,0x5f,0x73,0x61,0x74,0x28,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x69,0x6e,0x74,0x34,0x5f,0x72,0x74,0x65,0x28,0x6f,0x75,0x74,0x30,0x5f,0x66,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x68,0x61,0x72,0x34,0x20,0x6f,0x75,0x74,0x31,0x5f,0x63,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x63,0x68,0x61,0x72,0x34,0x5f,0x73,0x61,0x74,0x28,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x69,0x6e,0x74,0x34,0x5f,0x72,0x74,0x65,0x28,0x6f,0x75,0x74,0x31,0x5f,0x66,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x76,0x73,0x74,0x6f,0x72,0x65,0x34,0x28,0x6f,0x75,0x74,0x30,0x5f,0x63,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x69,0x64,0x78,0x2c,0x20,0x28,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x68,0x61,0x72,0x2a,0x29,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x76,0x73,0x74,0x6f,0x72,0x65,0x34,0x28,0x6f,0x75,0x74,0x31,0x5f,0x63,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x69,0x64,0x78,0x2b,0x31,0x2c,0x20,0x28,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x68,0x61,0x72,0x2a,0x29,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x3d,0x20,0x31,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x6f,0x75,0x74,0x30,0x5f,0x66,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x66,0x6c,0x6f,0x61,0x74,0x34,0x5f,0x72,0x74,0x70,0x28,0x6f,0x75,0x74,0x2e,0x73,0x30,0x31,0x32,0x33,0x29,0x20,0x2a,0x20,0x73,0x63,0x61,0x6c,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x68,0x61,0x72,0x34,0x20,0x6f,0x75,0x74,0x30,0x5f,0x63,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x63,0x68,0x61,0x72,0x34,0x5f,0x73,0x61,0x74,0x28,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x69,0x6e,0x74,0x34,0x5f,0x72,0x74,0x65,0x28,0x6f,0x75,0x74,0x30,0x5f,0x66,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x76,0x73,0x74,0x6f,0x72,0x65,0x34,0x28,0x6f,0x75,0x74,0x30,0x5f,0x63,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x69,0x64,0x78,0x2c,0x20,0x28,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x68,0x61,0x72,0x2a,0x29,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x7d,0xa, } - }, -{ - "depthwise_conv2d", - { 0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x4d,0x4e,0x4e,0x5f,0x53,0x55,0x50,0x50,0x4f,0x52,0x54,0x5f,0x46,0x50,0x31,0x36,0xa,0x23,0x70,0x72,0x61,0x67,0x6d,0x61,0x20,0x4f,0x50,0x45,0x4e,0x43,0x4c,0x20,0x45,0x58,0x54,0x45,0x4e,0x53,0x49,0x4f,0x4e,0x20,0x63,0x6c,0x5f,0x6b,0x68,0x72,0x5f,0x66,0x70,0x31,0x36,0x20,0x3a,0x20,0x65,0x6e,0x61,0x62,0x6c,0x65,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x52,0x45,0x41,0x44,0x5f,0x49,0x4e,0x50,0x55,0x54,0x5f,0x49,0x4d,0x41,0x47,0x45,0x28,0x69,0x2c,0x20,0x62,0x61,0x73,0x65,0x29,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x4f,0x66,0x66,0x73,0x65,0x74,0x23,0x23,0x69,0x20,0x3d,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x4f,0x66,0x66,0x73,0x65,0x74,0x23,0x23,0x69,0x20,0x2b,0x20,0x62,0x61,0x73,0x65,0x3b,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x4f,0x66,0x66,0x73,0x65,0x74,0x23,0x23,0x69,0x20,0x3d,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x69,0x6e,0x43,0x75,0x72,0x49,0x64,0x78,0x20,0x2b,0x20,0x69,0x6e,0x4f,0x66,0x66,0x73,0x65,0x74,0x23,0x23,0x69,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x28,0x69,0x6e,0x4f,0x66,0x66,0x73,0x65,0x74,0x23,0x23,0x69,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x4f,0x66,0x66,0x73,0x65,0x74,0x23,0x23,0x69,0x20,0x3e,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x29,0x3b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x56,0x61,0x6c,0x75,0x65,0x23,0x23,0x69,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x4f,0x66,0x66,0x73,0x65,0x74,0x23,0x23,0x69,0x2c,0x20,0x69,0x6e,0x48,0x65,0x69,0x67,0x68,0x74,0x49,0x64,0x78,0x29,0x29,0x3b,0xa,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x43,0x41,0x4c,0x43,0x55,0x4c,0x41,0x54,0x45,0x5f,0x4f,0x55,0x54,0x50,0x55,0x54,0x28,0x69,0x29,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x23,0x23,0x69,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x69,0x6e,0x56,0x61,0x6c,0x75,0x65,0x23,0x23,0x69,0x2e,0x78,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x30,0x2c,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x23,0x23,0x69,0x29,0x3b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x23,0x23,0x69,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x69,0x6e,0x56,0x61,0x6c,0x75,0x65,0x23,0x23,0x69,0x2e,0x79,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x31,0x2c,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x23,0x23,0x69,0x29,0x3b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x23,0x23,0x69,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x69,0x6e,0x56,0x61,0x6c,0x75,0x65,0x23,0x23,0x69,0x2e,0x7a,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x32,0x2c,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x23,0x23,0x69,0x29,0x3b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x23,0x23,0x69,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x69,0x6e,0x56,0x61,0x6c,0x75,0x65,0x23,0x23,0x69,0x2e,0x77,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x33,0x2c,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x23,0x23,0x69,0x29,0x3b,0xa,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x32,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x2c,0xa,0xa,0x5f,0x5f,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x5f,0x74,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x20,0x3d,0x20,0x43,0x4c,0x4b,0x5f,0x4e,0x4f,0x52,0x4d,0x41,0x4c,0x49,0x5a,0x45,0x44,0x5f,0x43,0x4f,0x4f,0x52,0x44,0x53,0x5f,0x46,0x41,0x4c,0x53,0x45,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x41,0x44,0x44,0x52,0x45,0x53,0x53,0x5f,0x43,0x4c,0x41,0x4d,0x50,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x46,0x49,0x4c,0x54,0x45,0x52,0x5f,0x4e,0x45,0x41,0x52,0x45,0x53,0x54,0x3b,0xa,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x32,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x29,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x29,0x20,0x7b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x74,0x75,0x72,0x6e,0x3b,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x64,0x65,0x70,0x74,0x68,0x77,0x69,0x73,0x65,0x5f,0x63,0x6f,0x6e,0x76,0x32,0x64,0x5f,0x73,0x31,0x28,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x32,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x66,0x69,0x6c,0x74,0x65,0x72,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x62,0x69,0x61,0x73,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x69,0x6e,0x70,0x75,0x74,0x53,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x42,0x6c,0x6f,0x63,0x6b,0x73,0x2c,0x20,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x53,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x66,0x69,0x6c,0x74,0x65,0x72,0x53,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x53,0x68,0x61,0x70,0x65,0x29,0x20,0x7b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x42,0x6c,0x6f,0x63,0x6b,0x49,0x64,0x78,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x32,0x28,0x6f,0x75,0x74,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x42,0x6c,0x6f,0x63,0x6b,0x49,0x64,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x77,0x34,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2b,0x20,0x33,0x29,0x20,0x2f,0x20,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x42,0x6c,0x6f,0x63,0x6b,0x49,0x64,0x78,0x20,0x3d,0x20,0x6f,0x75,0x74,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x20,0x2f,0x20,0x6f,0x77,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x42,0x6c,0x6f,0x63,0x6b,0x69,0x64,0x78,0x20,0x20,0x20,0x3d,0x20,0x6f,0x75,0x74,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x20,0x25,0x20,0x6f,0x77,0x34,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x42,0x6c,0x6f,0x63,0x6b,0x49,0x64,0x78,0x20,0x3d,0x20,0x6f,0x75,0x74,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x42,0x6c,0x6f,0x63,0x6b,0x49,0x64,0x78,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x30,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x62,0x69,0x61,0x73,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x42,0x6c,0x6f,0x63,0x6b,0x49,0x64,0x78,0x2c,0x20,0x30,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x31,0x20,0x3d,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x32,0x20,0x3d,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x33,0x20,0x3d,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x30,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x42,0x6c,0x6f,0x63,0x6b,0x69,0x64,0x78,0x34,0x20,0x3d,0x20,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x42,0x6c,0x6f,0x63,0x6b,0x69,0x64,0x78,0x20,0x3c,0x3c,0x20,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x4f,0x66,0x66,0x73,0x65,0x74,0x30,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x42,0x6c,0x6f,0x63,0x6b,0x69,0x64,0x78,0x34,0x20,0x2d,0x20,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x4f,0x66,0x66,0x73,0x65,0x74,0x31,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x4f,0x66,0x66,0x73,0x65,0x74,0x30,0x20,0x2b,0x20,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x4f,0x66,0x66,0x73,0x65,0x74,0x32,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x4f,0x66,0x66,0x73,0x65,0x74,0x30,0x20,0x2b,0x20,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x4f,0x66,0x66,0x73,0x65,0x74,0x33,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x4f,0x66,0x66,0x73,0x65,0x74,0x30,0x20,0x2b,0x20,0x33,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x49,0x64,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x6f,0x75,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x42,0x6c,0x6f,0x63,0x6b,0x49,0x64,0x78,0x20,0x25,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x53,0x68,0x61,0x70,0x65,0x2e,0x78,0x20,0x2d,0x20,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x53,0x68,0x61,0x70,0x65,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x42,0x61,0x74,0x63,0x68,0x49,0x64,0x78,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x28,0x6f,0x75,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x42,0x6c,0x6f,0x63,0x6b,0x49,0x64,0x78,0x20,0x2f,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x53,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x53,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x43,0x75,0x72,0x49,0x64,0x78,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x69,0x6e,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x42,0x6c,0x6f,0x63,0x6b,0x49,0x64,0x78,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x30,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x69,0x6e,0x43,0x75,0x72,0x49,0x64,0x78,0x20,0x2b,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x4f,0x66,0x66,0x73,0x65,0x74,0x30,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x28,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x4f,0x66,0x66,0x73,0x65,0x74,0x30,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x4f,0x66,0x66,0x73,0x65,0x74,0x30,0x20,0x3e,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x31,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x69,0x6e,0x43,0x75,0x72,0x49,0x64,0x78,0x20,0x2b,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x4f,0x66,0x66,0x73,0x65,0x74,0x31,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x28,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x4f,0x66,0x66,0x73,0x65,0x74,0x31,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x4f,0x66,0x66,0x73,0x65,0x74,0x31,0x20,0x3e,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x32,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x69,0x6e,0x43,0x75,0x72,0x49,0x64,0x78,0x20,0x2b,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x4f,0x66,0x66,0x73,0x65,0x74,0x32,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x28,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x4f,0x66,0x66,0x73,0x65,0x74,0x32,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x4f,0x66,0x66,0x73,0x65,0x74,0x32,0x20,0x3e,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x69,0x6e,0x56,0x61,0x6c,0x75,0x65,0x30,0x2c,0x20,0x69,0x6e,0x56,0x61,0x6c,0x75,0x65,0x31,0x2c,0x20,0x69,0x6e,0x56,0x61,0x6c,0x75,0x65,0x32,0x2c,0x20,0x69,0x6e,0x56,0x61,0x6c,0x75,0x65,0x33,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x69,0x6e,0x74,0x20,0x6b,0x68,0x20,0x3d,0x20,0x30,0x3b,0x20,0x6b,0x68,0x20,0x3c,0x20,0x66,0x69,0x6c,0x74,0x65,0x72,0x53,0x68,0x61,0x70,0x65,0x2e,0x78,0x3b,0x20,0x6b,0x68,0x2b,0x2b,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x48,0x65,0x69,0x67,0x68,0x74,0x49,0x64,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x68,0x65,0x69,0x67,0x68,0x74,0x49,0x64,0x78,0x20,0x2b,0x20,0x6f,0x75,0x74,0x42,0x61,0x74,0x63,0x68,0x49,0x64,0x78,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x28,0x68,0x65,0x69,0x67,0x68,0x74,0x49,0x64,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x49,0x64,0x78,0x20,0x3e,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x53,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x49,0x64,0x78,0x2b,0x2b,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x56,0x61,0x6c,0x75,0x65,0x31,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x30,0x2c,0x20,0x69,0x6e,0x48,0x65,0x69,0x67,0x68,0x74,0x49,0x64,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x56,0x61,0x6c,0x75,0x65,0x32,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x31,0x2c,0x20,0x69,0x6e,0x48,0x65,0x69,0x67,0x68,0x74,0x49,0x64,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x56,0x61,0x6c,0x75,0x65,0x33,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x32,0x2c,0x20,0x69,0x6e,0x48,0x65,0x69,0x67,0x68,0x74,0x49,0x64,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x69,0x6e,0x74,0x20,0x6b,0x77,0x20,0x3d,0x20,0x30,0x3b,0x20,0x6b,0x77,0x20,0x3c,0x20,0x66,0x69,0x6c,0x74,0x65,0x72,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x3b,0x20,0x6b,0x77,0x2b,0x2b,0x29,0x20,0x7b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x66,0x69,0x6c,0x74,0x65,0x72,0x49,0x64,0x78,0x20,0x20,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6b,0x68,0x2c,0x20,0x66,0x69,0x6c,0x74,0x65,0x72,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x2c,0x20,0x6b,0x77,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x56,0x61,0x6c,0x75,0x65,0x30,0x20,0x3d,0x20,0x69,0x6e,0x56,0x61,0x6c,0x75,0x65,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x56,0x61,0x6c,0x75,0x65,0x31,0x20,0x3d,0x20,0x69,0x6e,0x56,0x61,0x6c,0x75,0x65,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x56,0x61,0x6c,0x75,0x65,0x32,0x20,0x3d,0x20,0x69,0x6e,0x56,0x61,0x6c,0x75,0x65,0x33,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x20,0x3d,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x4f,0x66,0x66,0x73,0x65,0x74,0x33,0x20,0x2b,0x20,0x6b,0x77,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x69,0x6e,0x43,0x75,0x72,0x49,0x64,0x78,0x20,0x2b,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x28,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x20,0x3e,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x56,0x61,0x6c,0x75,0x65,0x33,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x2c,0x20,0x69,0x6e,0x48,0x65,0x69,0x67,0x68,0x74,0x49,0x64,0x78,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x66,0x69,0x6c,0x74,0x65,0x72,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x66,0x69,0x6c,0x74,0x65,0x72,0x49,0x64,0x78,0x2c,0x20,0x69,0x6e,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x42,0x6c,0x6f,0x63,0x6b,0x49,0x64,0x78,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x30,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x69,0x6e,0x56,0x61,0x6c,0x75,0x65,0x30,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2c,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x31,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x69,0x6e,0x56,0x61,0x6c,0x75,0x65,0x31,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2c,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x32,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x69,0x6e,0x56,0x61,0x6c,0x75,0x65,0x32,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2c,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x33,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x69,0x6e,0x56,0x61,0x6c,0x75,0x65,0x33,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2c,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x30,0x20,0x3d,0x20,0x66,0x6d,0x61,0x78,0x28,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x31,0x20,0x3d,0x20,0x66,0x6d,0x61,0x78,0x28,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x31,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x32,0x20,0x3d,0x20,0x66,0x6d,0x61,0x78,0x28,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x32,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x33,0x20,0x3d,0x20,0x66,0x6d,0x61,0x78,0x28,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x33,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x29,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0x36,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x30,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x36,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x31,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x31,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x36,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x32,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x32,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x36,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x33,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x33,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x36,0x29,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2d,0x20,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x42,0x6c,0x6f,0x63,0x6b,0x69,0x64,0x78,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x6f,0x75,0x74,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x42,0x6c,0x6f,0x63,0x6b,0x49,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x20,0x2b,0x20,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x42,0x6c,0x6f,0x63,0x6b,0x69,0x64,0x78,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3e,0x3d,0x20,0x34,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x42,0x6c,0x6f,0x63,0x6b,0x49,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x20,0x2b,0x20,0x31,0x2c,0x20,0x6f,0x75,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x42,0x6c,0x6f,0x63,0x6b,0x49,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x20,0x2b,0x20,0x32,0x2c,0x20,0x6f,0x75,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x42,0x6c,0x6f,0x63,0x6b,0x49,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x20,0x2b,0x20,0x33,0x2c,0x20,0x6f,0x75,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x42,0x6c,0x6f,0x63,0x6b,0x49,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x3d,0x20,0x33,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x42,0x6c,0x6f,0x63,0x6b,0x49,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x20,0x2b,0x20,0x31,0x2c,0x20,0x6f,0x75,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x42,0x6c,0x6f,0x63,0x6b,0x49,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x20,0x2b,0x20,0x32,0x2c,0x20,0x6f,0x75,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x42,0x6c,0x6f,0x63,0x6b,0x49,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x3d,0x20,0x32,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x42,0x6c,0x6f,0x63,0x6b,0x49,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x20,0x2b,0x20,0x31,0x2c,0x20,0x6f,0x75,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x42,0x6c,0x6f,0x63,0x6b,0x49,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x3d,0x20,0x31,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x42,0x6c,0x6f,0x63,0x6b,0x49,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x7d,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x64,0x65,0x70,0x74,0x68,0x77,0x69,0x73,0x65,0x5f,0x63,0x6f,0x6e,0x76,0x32,0x64,0x28,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x32,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x66,0x69,0x6c,0x74,0x65,0x72,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x62,0x69,0x61,0x73,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x69,0x6e,0x70,0x75,0x74,0x53,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x42,0x6c,0x6f,0x63,0x6b,0x73,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x53,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x66,0x69,0x6c,0x74,0x65,0x72,0x53,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x53,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x64,0x69,0x6c,0x61,0x74,0x69,0x6f,0x6e,0x53,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x53,0x68,0x61,0x70,0x65,0x29,0x20,0x7b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x49,0x64,0x78,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x32,0x28,0x6f,0x75,0x74,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x49,0x64,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x77,0x34,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2b,0x20,0x33,0x29,0x20,0x2f,0x20,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x42,0x6c,0x6f,0x63,0x6b,0x49,0x64,0x78,0x20,0x3d,0x20,0x6f,0x75,0x74,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x20,0x2f,0x20,0x6f,0x77,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x42,0x6c,0x6f,0x63,0x6b,0x69,0x64,0x78,0x20,0x20,0x20,0x3d,0x20,0x6f,0x75,0x74,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x20,0x25,0x20,0x6f,0x77,0x34,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x42,0x6c,0x6f,0x63,0x6b,0x49,0x64,0x78,0x20,0x3d,0x20,0x6f,0x75,0x74,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x42,0x6c,0x6f,0x63,0x6b,0x49,0x64,0x78,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x30,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x62,0x69,0x61,0x73,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x42,0x6c,0x6f,0x63,0x6b,0x49,0x64,0x78,0x2c,0x20,0x30,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x31,0x20,0x3d,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x32,0x20,0x3d,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x33,0x20,0x3d,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x30,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x4f,0x66,0x66,0x73,0x65,0x74,0x30,0x20,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x42,0x6c,0x6f,0x63,0x6b,0x69,0x64,0x78,0x2c,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x3c,0x3c,0x20,0x32,0x2c,0x20,0x2d,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x4f,0x66,0x66,0x73,0x65,0x74,0x31,0x20,0x20,0x3d,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x4f,0x66,0x66,0x73,0x65,0x74,0x30,0x20,0x2b,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x4f,0x66,0x66,0x73,0x65,0x74,0x32,0x20,0x20,0x3d,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x4f,0x66,0x66,0x73,0x65,0x74,0x31,0x20,0x2b,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x4f,0x66,0x66,0x73,0x65,0x74,0x33,0x20,0x20,0x3d,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x4f,0x66,0x66,0x73,0x65,0x74,0x32,0x20,0x2b,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x49,0x64,0x78,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6f,0x75,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x49,0x64,0x78,0x20,0x25,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x53,0x68,0x61,0x70,0x65,0x2e,0x78,0x2c,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x53,0x68,0x61,0x70,0x65,0x2e,0x78,0x2c,0x20,0x2d,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x53,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x42,0x61,0x74,0x63,0x68,0x49,0x64,0x78,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x28,0x6f,0x75,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x49,0x64,0x78,0x20,0x2f,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x53,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x53,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x43,0x75,0x72,0x49,0x64,0x78,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x69,0x6e,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x42,0x6c,0x6f,0x63,0x6b,0x49,0x64,0x78,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x69,0x6e,0x74,0x20,0x6b,0x68,0x20,0x3d,0x20,0x30,0x3b,0x20,0x6b,0x68,0x20,0x3c,0x20,0x66,0x69,0x6c,0x74,0x65,0x72,0x53,0x68,0x61,0x70,0x65,0x2e,0x78,0x3b,0x20,0x6b,0x68,0x2b,0x2b,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x48,0x65,0x69,0x67,0x68,0x74,0x49,0x64,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x68,0x65,0x69,0x67,0x68,0x74,0x49,0x64,0x78,0x20,0x2b,0x20,0x6f,0x75,0x74,0x42,0x61,0x74,0x63,0x68,0x49,0x64,0x78,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x28,0x68,0x65,0x69,0x67,0x68,0x74,0x49,0x64,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x49,0x64,0x78,0x20,0x3e,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x53,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x49,0x64,0x78,0x20,0x2b,0x3d,0x20,0x64,0x69,0x6c,0x61,0x74,0x69,0x6f,0x6e,0x53,0x68,0x61,0x70,0x65,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x69,0x6e,0x74,0x20,0x6b,0x77,0x20,0x3d,0x20,0x30,0x3b,0x20,0x6b,0x77,0x20,0x3c,0x20,0x66,0x69,0x6c,0x74,0x65,0x72,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x3b,0x20,0x6b,0x77,0x2b,0x2b,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x66,0x69,0x6c,0x74,0x65,0x72,0x49,0x64,0x78,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6b,0x68,0x2c,0x20,0x66,0x69,0x6c,0x74,0x65,0x72,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x2c,0x20,0x6b,0x77,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x69,0x6e,0x56,0x61,0x6c,0x75,0x65,0x30,0x2c,0x20,0x69,0x6e,0x56,0x61,0x6c,0x75,0x65,0x31,0x2c,0x20,0x69,0x6e,0x56,0x61,0x6c,0x75,0x65,0x32,0x2c,0x20,0x69,0x6e,0x56,0x61,0x6c,0x75,0x65,0x33,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x6b,0x77,0x2c,0x20,0x64,0x69,0x6c,0x61,0x74,0x69,0x6f,0x6e,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x52,0x45,0x41,0x44,0x5f,0x49,0x4e,0x50,0x55,0x54,0x5f,0x49,0x4d,0x41,0x47,0x45,0x28,0x30,0x2c,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x52,0x45,0x41,0x44,0x5f,0x49,0x4e,0x50,0x55,0x54,0x5f,0x49,0x4d,0x41,0x47,0x45,0x28,0x31,0x2c,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x52,0x45,0x41,0x44,0x5f,0x49,0x4e,0x50,0x55,0x54,0x5f,0x49,0x4d,0x41,0x47,0x45,0x28,0x32,0x2c,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x52,0x45,0x41,0x44,0x5f,0x49,0x4e,0x50,0x55,0x54,0x5f,0x49,0x4d,0x41,0x47,0x45,0x28,0x33,0x2c,0x20,0x69,0x6e,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x66,0x69,0x6c,0x74,0x65,0x72,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x66,0x69,0x6c,0x74,0x65,0x72,0x49,0x64,0x78,0x2c,0x20,0x69,0x6e,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x42,0x6c,0x6f,0x63,0x6b,0x49,0x64,0x78,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x30,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x69,0x6e,0x56,0x61,0x6c,0x75,0x65,0x30,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2c,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x31,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x69,0x6e,0x56,0x61,0x6c,0x75,0x65,0x31,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2c,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x32,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x69,0x6e,0x56,0x61,0x6c,0x75,0x65,0x32,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2c,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x33,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x69,0x6e,0x56,0x61,0x6c,0x75,0x65,0x33,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2c,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x30,0x20,0x3d,0x20,0x66,0x6d,0x61,0x78,0x28,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x31,0x20,0x3d,0x20,0x66,0x6d,0x61,0x78,0x28,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x31,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x32,0x20,0x3d,0x20,0x66,0x6d,0x61,0x78,0x28,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x32,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x33,0x20,0x3d,0x20,0x66,0x6d,0x61,0x78,0x28,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x33,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x29,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0x36,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x30,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x36,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x31,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x31,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x36,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x32,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x32,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x36,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x33,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x33,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x30,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x36,0x29,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x42,0x6c,0x6f,0x63,0x6b,0x69,0x64,0x78,0x34,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x42,0x6c,0x6f,0x63,0x6b,0x69,0x64,0x78,0x20,0x3c,0x3c,0x20,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2d,0x20,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x42,0x6c,0x6f,0x63,0x6b,0x69,0x64,0x78,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x20,0x20,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x6f,0x75,0x74,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x42,0x6c,0x6f,0x63,0x6b,0x49,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x53,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x20,0x2b,0x20,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x42,0x6c,0x6f,0x63,0x6b,0x69,0x64,0x78,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3e,0x3d,0x20,0x34,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x49,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x20,0x2b,0x20,0x31,0x2c,0x20,0x6f,0x75,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x49,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x20,0x2b,0x20,0x32,0x2c,0x20,0x6f,0x75,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x49,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x20,0x2b,0x20,0x33,0x2c,0x20,0x6f,0x75,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x49,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x3d,0x20,0x33,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x49,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x20,0x2b,0x20,0x31,0x2c,0x20,0x6f,0x75,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x49,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x20,0x2b,0x20,0x32,0x2c,0x20,0x6f,0x75,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x49,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x3d,0x20,0x32,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x49,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x20,0x2b,0x20,0x31,0x2c,0x20,0x6f,0x75,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x49,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x3d,0x20,0x31,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x57,0x69,0x64,0x74,0x68,0x49,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x49,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x56,0x61,0x6c,0x75,0x65,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x7d,0xa, } - }, -{ - "winogradTransformSource2_3_1", - { 0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x4d,0x4e,0x4e,0x5f,0x53,0x55,0x50,0x50,0x4f,0x52,0x54,0x5f,0x46,0x50,0x31,0x36,0xa,0x23,0x70,0x72,0x61,0x67,0x6d,0x61,0x20,0x4f,0x50,0x45,0x4e,0x43,0x4c,0x20,0x45,0x58,0x54,0x45,0x4e,0x53,0x49,0x4f,0x4e,0x20,0x63,0x6c,0x5f,0x6b,0x68,0x72,0x5f,0x66,0x70,0x31,0x36,0x20,0x3a,0x20,0x65,0x6e,0x61,0x62,0x6c,0x65,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x5f,0x5f,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x5f,0x74,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x20,0x3d,0x20,0x43,0x4c,0x4b,0x5f,0x4e,0x4f,0x52,0x4d,0x41,0x4c,0x49,0x5a,0x45,0x44,0x5f,0x43,0x4f,0x4f,0x52,0x44,0x53,0x5f,0x46,0x41,0x4c,0x53,0x45,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x41,0x44,0x44,0x52,0x45,0x53,0x53,0x5f,0x43,0x4c,0x41,0x4d,0x50,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x46,0x49,0x4c,0x54,0x45,0x52,0x5f,0x4e,0x45,0x41,0x52,0x45,0x53,0x54,0x3b,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x77,0x69,0x6e,0x6f,0x67,0x72,0x61,0x64,0x54,0x72,0x61,0x6e,0x73,0x66,0x6f,0x72,0x6d,0x53,0x6f,0x75,0x72,0x63,0x65,0x28,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x2f,0x2f,0x20,0x30,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x75,0x6e,0x69,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x2c,0x20,0x2f,0x2f,0x20,0x33,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x70,0x61,0x64,0x58,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x70,0x61,0x64,0x59,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2f,0x2f,0x20,0x36,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x73,0x72,0x63,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x43,0x34,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x58,0x2c,0x20,0x2f,0x2f,0x20,0x39,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x59,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x62,0x61,0x74,0x63,0x68,0x4f,0x66,0x66,0x73,0x65,0x74,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x70,0x6f,0x73,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x2c,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x29,0x3b,0x20,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x70,0x6f,0x73,0x2e,0x78,0x20,0x3c,0x20,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x2a,0x75,0x6e,0x69,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x26,0x26,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x3c,0x20,0x73,0x72,0x63,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x43,0x34,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x70,0x6f,0x73,0x2e,0x78,0x20,0x25,0x20,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x75,0x6e,0x69,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x70,0x6f,0x73,0x2e,0x78,0x20,0x2f,0x20,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x72,0x65,0x61,0x6c,0x50,0x6f,0x73,0x20,0x20,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x58,0x2c,0x20,0x75,0x6e,0x69,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x59,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x64,0x73,0x74,0x58,0x4f,0x72,0x69,0x67,0x69,0x6e,0x20,0x3d,0x20,0x70,0x6f,0x73,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x3d,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x2f,0x20,0x73,0x72,0x63,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x43,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x72,0x63,0x5a,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x25,0x20,0x73,0x72,0x63,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x43,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x64,0x73,0x74,0x59,0x4f,0x72,0x69,0x67,0x69,0x6e,0x20,0x3d,0x20,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x75,0x6e,0x69,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x20,0x3d,0x20,0x28,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x75,0x6e,0x69,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x33,0x29,0x20,0x2f,0x20,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x64,0x73,0x74,0x59,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x64,0x73,0x74,0x59,0x4f,0x72,0x69,0x67,0x69,0x6e,0x20,0x2f,0x20,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x64,0x73,0x74,0x58,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x64,0x73,0x74,0x59,0x4f,0x72,0x69,0x67,0x69,0x6e,0x20,0x25,0x20,0x34,0x20,0x2b,0x20,0x34,0x20,0x2a,0x20,0x64,0x73,0x74,0x58,0x4f,0x72,0x69,0x67,0x69,0x6e,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x3d,0x20,0x62,0x61,0x74,0x63,0x68,0x4f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x20,0x3d,0x20,0x28,0x72,0x65,0x61,0x6c,0x50,0x6f,0x73,0x2e,0x78,0x29,0x20,0x2a,0x20,0x32,0x20,0x2d,0x20,0x70,0x61,0x64,0x58,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x20,0x3d,0x20,0x28,0x72,0x65,0x61,0x6c,0x50,0x6f,0x73,0x2e,0x79,0x29,0x20,0x2a,0x20,0x32,0x20,0x2d,0x20,0x70,0x61,0x64,0x59,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x30,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x31,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x32,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x33,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x30,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x31,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x32,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x33,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x30,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x31,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x32,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x33,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x30,0x33,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x31,0x33,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x32,0x33,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x33,0x33,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x30,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x30,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x30,0x30,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x31,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x30,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x31,0x30,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x32,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x30,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x32,0x30,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x33,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x30,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x33,0x30,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x30,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x31,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x30,0x31,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x31,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x31,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x31,0x31,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x32,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x31,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x32,0x31,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x33,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x31,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x33,0x31,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x30,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x32,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x30,0x32,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x31,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x32,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x31,0x32,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x32,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x32,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x32,0x32,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x33,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x32,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x33,0x32,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x30,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x33,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x30,0x33,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x31,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x33,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x31,0x33,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x32,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x33,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x32,0x33,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x33,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x33,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x33,0x33,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x30,0x30,0x20,0x3d,0x20,0x2b,0x53,0x30,0x30,0x20,0x2d,0x20,0x53,0x30,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x31,0x30,0x20,0x3d,0x20,0x2b,0x53,0x31,0x30,0x20,0x2d,0x20,0x53,0x31,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x32,0x30,0x20,0x3d,0x20,0x2b,0x53,0x32,0x30,0x20,0x2d,0x20,0x53,0x32,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x33,0x30,0x20,0x3d,0x20,0x2b,0x53,0x33,0x30,0x20,0x2d,0x20,0x53,0x33,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x30,0x31,0x20,0x3d,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x35,0x66,0x20,0x2a,0x20,0x53,0x30,0x31,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x35,0x66,0x20,0x2a,0x20,0x53,0x30,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x31,0x31,0x20,0x3d,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x35,0x66,0x20,0x2a,0x20,0x53,0x31,0x31,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x35,0x66,0x20,0x2a,0x20,0x53,0x31,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x32,0x31,0x20,0x3d,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x35,0x66,0x20,0x2a,0x20,0x53,0x32,0x31,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x35,0x66,0x20,0x2a,0x20,0x53,0x32,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x33,0x31,0x20,0x3d,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x35,0x66,0x20,0x2a,0x20,0x53,0x33,0x31,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x35,0x66,0x20,0x2a,0x20,0x53,0x33,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x30,0x32,0x20,0x3d,0x20,0x2d,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x35,0x66,0x20,0x2a,0x20,0x53,0x30,0x31,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x35,0x66,0x20,0x2a,0x20,0x53,0x30,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x31,0x32,0x20,0x3d,0x20,0x2d,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x35,0x66,0x20,0x2a,0x20,0x53,0x31,0x31,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x35,0x66,0x20,0x2a,0x20,0x53,0x31,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x32,0x32,0x20,0x3d,0x20,0x2d,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x35,0x66,0x20,0x2a,0x20,0x53,0x32,0x31,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x35,0x66,0x20,0x2a,0x20,0x53,0x32,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x33,0x32,0x20,0x3d,0x20,0x2d,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x35,0x66,0x20,0x2a,0x20,0x53,0x33,0x31,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x35,0x66,0x20,0x2a,0x20,0x53,0x33,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x30,0x33,0x20,0x3d,0x20,0x2d,0x53,0x30,0x31,0x20,0x2b,0x20,0x53,0x30,0x33,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x31,0x33,0x20,0x3d,0x20,0x2d,0x53,0x31,0x31,0x20,0x2b,0x20,0x53,0x31,0x33,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x32,0x33,0x20,0x3d,0x20,0x2d,0x53,0x32,0x31,0x20,0x2b,0x20,0x53,0x32,0x33,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x33,0x33,0x20,0x3d,0x20,0x2d,0x53,0x33,0x31,0x20,0x2b,0x20,0x53,0x33,0x33,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x30,0x29,0x2c,0x20,0x2b,0x6d,0x30,0x30,0x20,0x2d,0x20,0x6d,0x32,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x31,0x29,0x2c,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x35,0x66,0x20,0x2a,0x20,0x6d,0x31,0x30,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x35,0x66,0x20,0x2a,0x20,0x6d,0x32,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x32,0x29,0x2c,0x20,0x2d,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x35,0x66,0x20,0x2a,0x20,0x6d,0x31,0x30,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x35,0x66,0x20,0x2a,0x20,0x6d,0x32,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x33,0x29,0x2c,0x20,0x2d,0x6d,0x31,0x30,0x20,0x2b,0x20,0x6d,0x33,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x34,0x29,0x2c,0x20,0x2b,0x6d,0x30,0x31,0x20,0x2d,0x20,0x6d,0x32,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x35,0x29,0x2c,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x35,0x66,0x20,0x2a,0x20,0x6d,0x31,0x31,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x35,0x66,0x20,0x2a,0x20,0x6d,0x32,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x36,0x29,0x2c,0x20,0x2d,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x35,0x66,0x20,0x2a,0x20,0x6d,0x31,0x31,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x35,0x66,0x20,0x2a,0x20,0x6d,0x32,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x37,0x29,0x2c,0x20,0x2d,0x6d,0x31,0x31,0x20,0x2b,0x20,0x6d,0x33,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x38,0x29,0x2c,0x20,0x2b,0x6d,0x30,0x32,0x20,0x2d,0x20,0x6d,0x32,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x39,0x29,0x2c,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x35,0x66,0x20,0x2a,0x20,0x6d,0x31,0x32,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x35,0x66,0x20,0x2a,0x20,0x6d,0x32,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x31,0x30,0x29,0x2c,0x20,0x2d,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x35,0x66,0x20,0x2a,0x20,0x6d,0x31,0x32,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x35,0x66,0x20,0x2a,0x20,0x6d,0x32,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x31,0x31,0x29,0x2c,0x20,0x2d,0x6d,0x31,0x32,0x20,0x2b,0x20,0x6d,0x33,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x31,0x32,0x29,0x2c,0x20,0x2b,0x6d,0x30,0x33,0x20,0x2d,0x20,0x6d,0x32,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x31,0x33,0x29,0x2c,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x35,0x66,0x20,0x2a,0x20,0x6d,0x31,0x33,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x35,0x66,0x20,0x2a,0x20,0x6d,0x32,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x31,0x34,0x29,0x2c,0x20,0x2d,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x35,0x66,0x20,0x2a,0x20,0x6d,0x31,0x33,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x35,0x66,0x20,0x2a,0x20,0x6d,0x32,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x31,0x35,0x29,0x2c,0x20,0x2d,0x6d,0x31,0x33,0x20,0x2b,0x20,0x6d,0x33,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x7d,0xa, } - }, -{ - "winogradTransformDest2_5_1", - { 0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x4d,0x4e,0x4e,0x5f,0x53,0x55,0x50,0x50,0x4f,0x52,0x54,0x5f,0x46,0x50,0x31,0x36,0xa,0x23,0x70,0x72,0x61,0x67,0x6d,0x61,0x20,0x4f,0x50,0x45,0x4e,0x43,0x4c,0x20,0x45,0x58,0x54,0x45,0x4e,0x53,0x49,0x4f,0x4e,0x20,0x63,0x6c,0x5f,0x6b,0x68,0x72,0x5f,0x66,0x70,0x31,0x36,0x20,0x3a,0x20,0x65,0x6e,0x61,0x62,0x6c,0x65,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x5f,0x5f,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x5f,0x74,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x20,0x3d,0x20,0x43,0x4c,0x4b,0x5f,0x4e,0x4f,0x52,0x4d,0x41,0x4c,0x49,0x5a,0x45,0x44,0x5f,0x43,0x4f,0x4f,0x52,0x44,0x53,0x5f,0x46,0x41,0x4c,0x53,0x45,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x41,0x44,0x44,0x52,0x45,0x53,0x53,0x5f,0x43,0x4c,0x41,0x4d,0x50,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x46,0x49,0x4c,0x54,0x45,0x52,0x5f,0x4e,0x45,0x41,0x52,0x45,0x53,0x54,0x3b,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x77,0x69,0x6e,0x6f,0x67,0x72,0x61,0x64,0x54,0x72,0x61,0x6e,0x73,0x66,0x6f,0x72,0x6d,0x44,0x65,0x73,0x74,0x28,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x2f,0x2f,0x20,0x30,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x75,0x42,0x69,0x61,0x73,0x2c,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2f,0x2f,0x20,0x33,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x75,0x6e,0x69,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x64,0x73,0x74,0x57,0x69,0x64,0x74,0x68,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x2c,0x20,0x2f,0x2f,0x20,0x36,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x64,0x73,0x74,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x43,0x34,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x58,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x59,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x62,0x61,0x74,0x63,0x68,0x4f,0x66,0x66,0x73,0x65,0x74,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x70,0x6f,0x73,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x2c,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x29,0x3b,0x20,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x70,0x6f,0x73,0x2e,0x78,0x20,0x3c,0x20,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x2a,0x75,0x6e,0x69,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x26,0x26,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x3c,0x20,0x64,0x73,0x74,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x43,0x34,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x70,0x6f,0x73,0x2e,0x78,0x20,0x25,0x20,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x75,0x6e,0x69,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x70,0x6f,0x73,0x2e,0x78,0x20,0x2f,0x20,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x72,0x65,0x61,0x6c,0x50,0x6f,0x73,0x20,0x20,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x58,0x2c,0x20,0x75,0x6e,0x69,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x59,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x20,0x20,0x3d,0x20,0x28,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x75,0x6e,0x69,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x33,0x29,0x20,0x2f,0x20,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x64,0x73,0x74,0x58,0x4f,0x72,0x69,0x67,0x69,0x6e,0x20,0x3d,0x20,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x75,0x6e,0x69,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x64,0x73,0x74,0x58,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x64,0x73,0x74,0x58,0x4f,0x72,0x69,0x67,0x69,0x6e,0x20,0x2f,0x20,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x64,0x73,0x74,0x59,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x34,0x20,0x2a,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x2b,0x20,0x64,0x73,0x74,0x58,0x4f,0x72,0x69,0x67,0x69,0x6e,0x20,0x25,0x20,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x7a,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x25,0x20,0x64,0x73,0x74,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x43,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x62,0x69,0x61,0x73,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x42,0x69,0x61,0x73,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x7a,0x2c,0x20,0x30,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x3d,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x2f,0x20,0x64,0x73,0x74,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x43,0x34,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x3d,0x20,0x62,0x61,0x74,0x63,0x68,0x4f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x79,0x53,0x74,0x61,0x72,0x74,0x20,0x3d,0x20,0x72,0x65,0x61,0x6c,0x50,0x6f,0x73,0x2e,0x79,0x20,0x2a,0x20,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x78,0x53,0x74,0x61,0x72,0x74,0x20,0x3d,0x20,0x72,0x65,0x61,0x6c,0x50,0x6f,0x73,0x2e,0x78,0x20,0x2a,0x20,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x30,0x30,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x30,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x31,0x30,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x31,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x32,0x30,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x32,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x33,0x30,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x33,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x34,0x30,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x34,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x35,0x30,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x35,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x30,0x31,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x36,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x31,0x31,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x37,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x32,0x31,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x38,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x33,0x31,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x39,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x34,0x31,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x31,0x30,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x35,0x31,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x31,0x31,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x30,0x32,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x31,0x32,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x31,0x32,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x31,0x33,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x32,0x32,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x31,0x34,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x33,0x32,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x31,0x35,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x34,0x32,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x31,0x36,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x35,0x32,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x31,0x37,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x30,0x33,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x31,0x38,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x31,0x33,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x31,0x39,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x32,0x33,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x32,0x30,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x33,0x33,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x32,0x31,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x34,0x33,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x32,0x32,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x35,0x33,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x32,0x33,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x30,0x34,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x32,0x34,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x31,0x34,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x32,0x35,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x32,0x34,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x32,0x36,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x33,0x34,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x32,0x37,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x34,0x34,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x32,0x38,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x35,0x34,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x32,0x39,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x30,0x35,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x33,0x30,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x31,0x35,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x33,0x31,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x32,0x35,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x33,0x32,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x33,0x35,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x33,0x33,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x34,0x35,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x33,0x34,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x35,0x35,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x20,0x2b,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x33,0x35,0x2c,0x20,0x64,0x73,0x74,0x59,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x30,0x30,0x20,0x20,0x3d,0x20,0x2b,0x53,0x30,0x30,0x20,0x2b,0x20,0x53,0x30,0x31,0x20,0x2b,0x20,0x53,0x30,0x32,0x20,0x2b,0x20,0x53,0x30,0x33,0x20,0x2b,0x20,0x53,0x30,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x31,0x30,0x20,0x20,0x3d,0x20,0x2b,0x53,0x31,0x30,0x20,0x2b,0x20,0x53,0x31,0x31,0x20,0x2b,0x20,0x53,0x31,0x32,0x20,0x2b,0x20,0x53,0x31,0x33,0x20,0x2b,0x20,0x53,0x31,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x32,0x30,0x20,0x20,0x3d,0x20,0x2b,0x53,0x32,0x30,0x20,0x2b,0x20,0x53,0x32,0x31,0x20,0x2b,0x20,0x53,0x32,0x32,0x20,0x2b,0x20,0x53,0x32,0x33,0x20,0x2b,0x20,0x53,0x32,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x33,0x30,0x20,0x20,0x3d,0x20,0x2b,0x53,0x33,0x30,0x20,0x2b,0x20,0x53,0x33,0x31,0x20,0x2b,0x20,0x53,0x33,0x32,0x20,0x2b,0x20,0x53,0x33,0x33,0x20,0x2b,0x20,0x53,0x33,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x34,0x30,0x20,0x20,0x3d,0x20,0x2b,0x53,0x34,0x30,0x20,0x2b,0x20,0x53,0x34,0x31,0x20,0x2b,0x20,0x53,0x34,0x32,0x20,0x2b,0x20,0x53,0x34,0x33,0x20,0x2b,0x20,0x53,0x34,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x35,0x30,0x20,0x20,0x3d,0x20,0x2b,0x53,0x35,0x30,0x20,0x2b,0x20,0x53,0x35,0x31,0x20,0x2b,0x20,0x53,0x35,0x32,0x20,0x2b,0x20,0x53,0x35,0x33,0x20,0x2b,0x20,0x53,0x35,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x30,0x31,0x20,0x20,0x3d,0x20,0x2b,0x53,0x30,0x31,0x20,0x2d,0x20,0x53,0x30,0x32,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x32,0x2e,0x30,0x20,0x2a,0x20,0x53,0x30,0x33,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x32,0x2e,0x30,0x20,0x2a,0x20,0x53,0x30,0x34,0x20,0x2b,0x20,0x53,0x30,0x35,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x31,0x31,0x20,0x20,0x3d,0x20,0x2b,0x53,0x31,0x31,0x20,0x2d,0x20,0x53,0x31,0x32,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x32,0x2e,0x30,0x20,0x2a,0x20,0x53,0x31,0x33,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x32,0x2e,0x30,0x20,0x2a,0x20,0x53,0x31,0x34,0x20,0x2b,0x20,0x53,0x31,0x35,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x32,0x31,0x20,0x20,0x3d,0x20,0x2b,0x53,0x32,0x31,0x20,0x2d,0x20,0x53,0x32,0x32,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x32,0x2e,0x30,0x20,0x2a,0x20,0x53,0x32,0x33,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x32,0x2e,0x30,0x20,0x2a,0x20,0x53,0x32,0x34,0x20,0x2b,0x20,0x53,0x32,0x35,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x33,0x31,0x20,0x20,0x3d,0x20,0x2b,0x53,0x33,0x31,0x20,0x2d,0x20,0x53,0x33,0x32,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x32,0x2e,0x30,0x20,0x2a,0x20,0x53,0x33,0x33,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x32,0x2e,0x30,0x20,0x2a,0x20,0x53,0x33,0x34,0x20,0x2b,0x20,0x53,0x33,0x35,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x34,0x31,0x20,0x20,0x3d,0x20,0x2b,0x53,0x34,0x31,0x20,0x2d,0x20,0x53,0x34,0x32,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x32,0x2e,0x30,0x20,0x2a,0x20,0x53,0x34,0x33,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x32,0x2e,0x30,0x20,0x2a,0x20,0x53,0x34,0x34,0x20,0x2b,0x20,0x53,0x34,0x35,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x35,0x31,0x20,0x20,0x3d,0x20,0x2b,0x53,0x35,0x31,0x20,0x2d,0x20,0x53,0x35,0x32,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x32,0x2e,0x30,0x20,0x2a,0x20,0x53,0x35,0x33,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x32,0x2e,0x30,0x20,0x2a,0x20,0x53,0x35,0x34,0x20,0x2b,0x20,0x53,0x35,0x35,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x78,0x20,0x3d,0x20,0x6f,0x78,0x53,0x74,0x61,0x72,0x74,0x20,0x2b,0x20,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x79,0x20,0x3d,0x20,0x6f,0x79,0x53,0x74,0x61,0x72,0x74,0x20,0x2b,0x20,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x6f,0x78,0x20,0x3c,0x20,0x64,0x73,0x74,0x57,0x69,0x64,0x74,0x68,0x20,0x26,0x26,0x20,0x6f,0x79,0x20,0x3c,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x4f,0x78,0x20,0x3d,0x20,0x6f,0x78,0x20,0x2b,0x20,0x6f,0x7a,0x20,0x2a,0x20,0x64,0x73,0x74,0x57,0x69,0x64,0x74,0x68,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x4f,0x79,0x20,0x3d,0x20,0x6f,0x79,0x20,0x2b,0x20,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x72,0x65,0x73,0x20,0x20,0x3d,0x20,0x62,0x69,0x61,0x73,0x20,0x2b,0x20,0x6d,0x30,0x30,0x20,0x2b,0x20,0x6d,0x31,0x30,0x20,0x2b,0x20,0x6d,0x32,0x30,0x20,0x2b,0x20,0x6d,0x33,0x30,0x20,0x2b,0x20,0x6d,0x34,0x30,0x3b,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x73,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x72,0x65,0x73,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x30,0x29,0x29,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0x36,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x73,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x72,0x65,0x73,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x30,0x29,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x36,0x29,0x29,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x4f,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x4f,0x79,0x29,0x2c,0x20,0x72,0x65,0x73,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x78,0x20,0x3d,0x20,0x6f,0x78,0x53,0x74,0x61,0x72,0x74,0x20,0x2b,0x20,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x79,0x20,0x3d,0x20,0x6f,0x79,0x53,0x74,0x61,0x72,0x74,0x20,0x2b,0x20,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x6f,0x78,0x20,0x3c,0x20,0x64,0x73,0x74,0x57,0x69,0x64,0x74,0x68,0x20,0x26,0x26,0x20,0x6f,0x79,0x20,0x3c,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x4f,0x78,0x20,0x3d,0x20,0x6f,0x78,0x20,0x2b,0x20,0x6f,0x7a,0x20,0x2a,0x20,0x64,0x73,0x74,0x57,0x69,0x64,0x74,0x68,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x4f,0x79,0x20,0x3d,0x20,0x6f,0x79,0x20,0x2b,0x20,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x72,0x65,0x73,0x20,0x20,0x3d,0x20,0x62,0x69,0x61,0x73,0x20,0x2b,0x20,0x6d,0x31,0x30,0x20,0x2d,0x20,0x6d,0x32,0x30,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x32,0x2e,0x30,0x20,0x2a,0x20,0x6d,0x33,0x30,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x32,0x2e,0x30,0x20,0x2a,0x20,0x6d,0x34,0x30,0x20,0x2b,0x20,0x6d,0x35,0x30,0x3b,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x73,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x72,0x65,0x73,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x30,0x29,0x29,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0x36,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x73,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x72,0x65,0x73,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x30,0x29,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x36,0x29,0x29,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x4f,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x4f,0x79,0x29,0x2c,0x20,0x72,0x65,0x73,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x78,0x20,0x3d,0x20,0x6f,0x78,0x53,0x74,0x61,0x72,0x74,0x20,0x2b,0x20,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x79,0x20,0x3d,0x20,0x6f,0x79,0x53,0x74,0x61,0x72,0x74,0x20,0x2b,0x20,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x6f,0x78,0x20,0x3c,0x20,0x64,0x73,0x74,0x57,0x69,0x64,0x74,0x68,0x20,0x26,0x26,0x20,0x6f,0x79,0x20,0x3c,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x4f,0x78,0x20,0x3d,0x20,0x6f,0x78,0x20,0x2b,0x20,0x6f,0x7a,0x20,0x2a,0x20,0x64,0x73,0x74,0x57,0x69,0x64,0x74,0x68,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x4f,0x79,0x20,0x3d,0x20,0x6f,0x79,0x20,0x2b,0x20,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x72,0x65,0x73,0x20,0x20,0x3d,0x20,0x62,0x69,0x61,0x73,0x20,0x2b,0x20,0x6d,0x30,0x31,0x20,0x2b,0x20,0x6d,0x31,0x31,0x20,0x2b,0x20,0x6d,0x32,0x31,0x20,0x2b,0x20,0x6d,0x33,0x31,0x20,0x2b,0x20,0x6d,0x34,0x31,0x3b,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x73,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x72,0x65,0x73,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x30,0x29,0x29,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0x36,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x73,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x72,0x65,0x73,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x30,0x29,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x36,0x29,0x29,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x4f,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x4f,0x79,0x29,0x2c,0x20,0x72,0x65,0x73,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x78,0x20,0x3d,0x20,0x6f,0x78,0x53,0x74,0x61,0x72,0x74,0x20,0x2b,0x20,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x79,0x20,0x3d,0x20,0x6f,0x79,0x53,0x74,0x61,0x72,0x74,0x20,0x2b,0x20,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x6f,0x78,0x20,0x3c,0x20,0x64,0x73,0x74,0x57,0x69,0x64,0x74,0x68,0x20,0x26,0x26,0x20,0x6f,0x79,0x20,0x3c,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x4f,0x78,0x20,0x3d,0x20,0x6f,0x78,0x20,0x2b,0x20,0x6f,0x7a,0x20,0x2a,0x20,0x64,0x73,0x74,0x57,0x69,0x64,0x74,0x68,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x4f,0x79,0x20,0x3d,0x20,0x6f,0x79,0x20,0x2b,0x20,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x72,0x65,0x73,0x20,0x20,0x3d,0x20,0x62,0x69,0x61,0x73,0x20,0x2b,0x20,0x6d,0x31,0x31,0x20,0x2d,0x20,0x6d,0x32,0x31,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x32,0x2e,0x30,0x20,0x2a,0x20,0x6d,0x33,0x31,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x32,0x2e,0x30,0x20,0x2a,0x20,0x6d,0x34,0x31,0x20,0x2b,0x20,0x6d,0x35,0x31,0x3b,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x73,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x72,0x65,0x73,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x30,0x29,0x29,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0x36,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x73,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x72,0x65,0x73,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x30,0x29,0x2c,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x36,0x29,0x29,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x4f,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x4f,0x79,0x29,0x2c,0x20,0x72,0x65,0x73,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x7d,0xa, } - }, -{ - "convert", - { 0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x4d,0x4e,0x4e,0x5f,0x53,0x55,0x50,0x50,0x4f,0x52,0x54,0x5f,0x46,0x50,0x31,0x36,0xa,0x23,0x70,0x72,0x61,0x67,0x6d,0x61,0x20,0x4f,0x50,0x45,0x4e,0x43,0x4c,0x20,0x45,0x58,0x54,0x45,0x4e,0x53,0x49,0x4f,0x4e,0x20,0x63,0x6c,0x5f,0x6b,0x68,0x72,0x5f,0x66,0x70,0x31,0x36,0x20,0x3a,0x20,0x65,0x6e,0x61,0x62,0x6c,0x65,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x5f,0x5f,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x5f,0x74,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x20,0x3d,0x20,0x43,0x4c,0x4b,0x5f,0x4e,0x4f,0x52,0x4d,0x41,0x4c,0x49,0x5a,0x45,0x44,0x5f,0x43,0x4f,0x4f,0x52,0x44,0x53,0x5f,0x46,0x41,0x4c,0x53,0x45,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x41,0x44,0x44,0x52,0x45,0x53,0x53,0x5f,0x43,0x4c,0x41,0x4d,0x50,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x46,0x49,0x4c,0x54,0x45,0x52,0x5f,0x4e,0x45,0x41,0x52,0x45,0x53,0x54,0x3b,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x28,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x62,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x3c,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x20,0x26,0x26,0x20,0x77,0x20,0x3c,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x20,0x26,0x26,0x20,0x68,0x62,0x20,0x3c,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x20,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x70,0x6f,0x73,0x20,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x77,0x69,0x64,0x74,0x68,0x2c,0x20,0x77,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x69,0x6e,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x70,0x6f,0x73,0x2c,0x20,0x68,0x62,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6f,0x75,0x74,0x20,0x3d,0x20,0x69,0x6e,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x70,0x6f,0x73,0x2c,0x20,0x68,0x62,0x29,0x2c,0x20,0x6f,0x75,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0xa,0x7d,0xa, } - }, -{ - "FloatToInt8", - { 0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x4d,0x4e,0x4e,0x5f,0x53,0x55,0x50,0x50,0x4f,0x52,0x54,0x5f,0x46,0x50,0x31,0x36,0xa,0x23,0x70,0x72,0x61,0x67,0x6d,0x61,0x20,0x4f,0x50,0x45,0x4e,0x43,0x4c,0x20,0x45,0x58,0x54,0x45,0x4e,0x53,0x49,0x4f,0x4e,0x20,0x63,0x6c,0x5f,0x6b,0x68,0x72,0x5f,0x66,0x70,0x31,0x36,0x20,0x3a,0x20,0x65,0x6e,0x61,0x62,0x6c,0x65,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x5f,0x5f,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x5f,0x74,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x20,0x3d,0x20,0x43,0x4c,0x4b,0x5f,0x4e,0x4f,0x52,0x4d,0x41,0x4c,0x49,0x5a,0x45,0x44,0x5f,0x43,0x4f,0x4f,0x52,0x44,0x53,0x5f,0x46,0x41,0x4c,0x53,0x45,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x41,0x44,0x44,0x52,0x45,0x53,0x53,0x5f,0x43,0x4c,0x41,0x4d,0x50,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x46,0x49,0x4c,0x54,0x45,0x52,0x5f,0x4e,0x45,0x41,0x52,0x45,0x53,0x54,0x3b,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x74,0x6f,0x5f,0x69,0x6e,0x74,0x38,0x28,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x68,0x61,0x72,0x2a,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x2c,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x2a,0x20,0x73,0x63,0x61,0x6c,0x65,0x5f,0x70,0x74,0x72,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x62,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x3c,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x20,0x26,0x26,0x20,0x77,0x20,0x3c,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x20,0x26,0x26,0x20,0x68,0x62,0x20,0x3c,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x70,0x6f,0x73,0x20,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x77,0x69,0x64,0x74,0x68,0x2c,0x20,0x77,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x69,0x6e,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x70,0x6f,0x73,0x2c,0x20,0x68,0x62,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x73,0x63,0x61,0x6c,0x65,0x20,0x3d,0x20,0x76,0x6c,0x6f,0x61,0x64,0x34,0x28,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x28,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x46,0x4c,0x4f,0x41,0x54,0x20,0x2a,0x29,0x73,0x63,0x61,0x6c,0x65,0x5f,0x70,0x74,0x72,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x72,0x65,0x73,0x75,0x6c,0x74,0x5f,0x66,0x6c,0x6f,0x61,0x74,0x20,0x3d,0x20,0x69,0x6e,0x20,0x2a,0x20,0x73,0x63,0x61,0x6c,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x72,0x65,0x73,0x75,0x6c,0x74,0x5f,0x69,0x6e,0x74,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x69,0x6e,0x74,0x34,0x5f,0x72,0x74,0x65,0x28,0x72,0x65,0x73,0x75,0x6c,0x74,0x5f,0x66,0x6c,0x6f,0x61,0x74,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x68,0x61,0x72,0x34,0x20,0x6f,0x75,0x74,0x20,0x3d,0x20,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x63,0x68,0x61,0x72,0x34,0x5f,0x73,0x61,0x74,0x28,0x72,0x65,0x73,0x75,0x6c,0x74,0x5f,0x69,0x6e,0x74,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x64,0x65,0x78,0x20,0x3d,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2a,0x68,0x65,0x69,0x67,0x68,0x74,0x2a,0x77,0x69,0x64,0x74,0x68,0x20,0x2b,0x20,0x68,0x62,0x2a,0x77,0x69,0x64,0x74,0x68,0x20,0x2b,0x20,0x77,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x76,0x73,0x74,0x6f,0x72,0x65,0x34,0x28,0x6f,0x75,0x74,0x2c,0x20,0x69,0x6e,0x64,0x65,0x78,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0xa,0x7d,0xa, } - }, -{ - "deconv_2d", - { 0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x33,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x2c,0xa,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x33,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x33,0x29,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x33,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x29,0x20,0x7b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x74,0x75,0x72,0x6e,0x3b,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x5f,0x5f,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x5f,0x74,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x20,0x3d,0x20,0x43,0x4c,0x4b,0x5f,0x4e,0x4f,0x52,0x4d,0x41,0x4c,0x49,0x5a,0x45,0x44,0x5f,0x43,0x4f,0x4f,0x52,0x44,0x53,0x5f,0x46,0x41,0x4c,0x53,0x45,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x41,0x44,0x44,0x52,0x45,0x53,0x53,0x5f,0x43,0x4c,0x41,0x4d,0x50,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x46,0x49,0x4c,0x54,0x45,0x52,0x5f,0x4e,0x45,0x41,0x52,0x45,0x53,0x54,0x3b,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x64,0x65,0x63,0x6f,0x6e,0x76,0x5f,0x32,0x64,0x28,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x33,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x62,0x69,0x61,0x73,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x61,0x6c,0x69,0x67,0x6e,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0x20,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x29,0x20,0x7b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x32,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x33,0x28,0x6f,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x6f,0x75,0x74,0x30,0x20,0x3d,0x20,0x72,0x65,0x61,0x64,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x62,0x69,0x61,0x73,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x5f,0x69,0x64,0x78,0x2c,0x20,0x30,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x3d,0x20,0x6f,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2f,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x6f,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x25,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x74,0x61,0x72,0x74,0x5f,0x78,0x20,0x3d,0x20,0x28,0x6f,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x61,0x6c,0x69,0x67,0x6e,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x20,0x2f,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x74,0x61,0x72,0x74,0x5f,0x79,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x30,0x2c,0x20,0x28,0x6f,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x61,0x6c,0x69,0x67,0x6e,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x20,0x2f,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x64,0x65,0x61,0x6c,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x77,0x69,0x64,0x74,0x68,0x20,0x20,0x3d,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x20,0x2d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x74,0x61,0x72,0x74,0x5f,0x78,0x2c,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x2c,0x20,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x20,0x2b,0x20,0x6f,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x2d,0x20,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x64,0x65,0x61,0x6c,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x20,0x3d,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x20,0x2d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x74,0x61,0x72,0x74,0x5f,0x79,0x2c,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x2c,0x20,0x70,0x61,0x64,0x64,0x69,0x6e,0x67,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x20,0x2b,0x20,0x6f,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2d,0x20,0x31,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x78,0x5f,0x30,0x2c,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x78,0x5f,0x31,0x2c,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x78,0x5f,0x32,0x2c,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x78,0x5f,0x33,0x2c,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x69,0x6e,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x34,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x30,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x31,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x32,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x33,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x69,0x6e,0x74,0x20,0x69,0x63,0x20,0x3d,0x20,0x30,0x3b,0x20,0x69,0x63,0x20,0x3c,0x20,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x3b,0x20,0x69,0x63,0x2b,0x2b,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x78,0x5f,0x30,0x20,0x3d,0x20,0x69,0x63,0x20,0x3c,0x3c,0x20,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x78,0x5f,0x31,0x20,0x3d,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x78,0x5f,0x30,0x20,0x2b,0x20,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x78,0x5f,0x32,0x20,0x3d,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x78,0x5f,0x30,0x20,0x2b,0x20,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x78,0x5f,0x33,0x20,0x3d,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x78,0x5f,0x30,0x20,0x2b,0x20,0x33,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x69,0x6e,0x74,0x20,0x6b,0x5f,0x79,0x20,0x3d,0x20,0x64,0x65,0x61,0x6c,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x2c,0x20,0x69,0x64,0x78,0x5f,0x68,0x20,0x3d,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x74,0x61,0x72,0x74,0x5f,0x79,0x3b,0x20,0x6b,0x5f,0x79,0x20,0x3e,0x3d,0x20,0x30,0x3b,0x20,0x6b,0x5f,0x79,0x20,0x2d,0x3d,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x2c,0x20,0x69,0x64,0x78,0x5f,0x68,0x2b,0x2b,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x69,0x64,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6f,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x2c,0x20,0x69,0x64,0x78,0x5f,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x68,0x62,0x5f,0x76,0x61,0x6c,0x75,0x65,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x69,0x6e,0x5f,0x69,0x64,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x69,0x64,0x78,0x5f,0x68,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x69,0x64,0x78,0x5f,0x68,0x20,0x3e,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x30,0x20,0x20,0x20,0x3d,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x74,0x61,0x72,0x74,0x5f,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x69,0x6e,0x74,0x20,0x6b,0x5f,0x78,0x20,0x3d,0x20,0x64,0x65,0x61,0x6c,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x77,0x69,0x64,0x74,0x68,0x3b,0x20,0x6b,0x5f,0x78,0x20,0x3e,0x3d,0x20,0x30,0x3b,0x20,0x6b,0x5f,0x78,0x20,0x2d,0x3d,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x79,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6b,0x5f,0x79,0x2c,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x2c,0x20,0x6b,0x5f,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x79,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6f,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x2c,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x30,0x20,0x3d,0x20,0x72,0x65,0x61,0x64,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x78,0x5f,0x30,0x2c,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x31,0x20,0x3d,0x20,0x72,0x65,0x61,0x64,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x78,0x5f,0x31,0x2c,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x32,0x20,0x3d,0x20,0x72,0x65,0x61,0x64,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x78,0x5f,0x32,0x2c,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x33,0x20,0x3d,0x20,0x72,0x65,0x61,0x64,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x78,0x5f,0x33,0x2c,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x79,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x69,0x63,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x76,0x61,0x6c,0x75,0x65,0x30,0x20,0x3d,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x30,0x3b,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x76,0x61,0x6c,0x75,0x65,0x30,0x20,0x3d,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x69,0x6e,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x76,0x61,0x6c,0x75,0x65,0x30,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x28,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x76,0x61,0x6c,0x75,0x65,0x30,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x76,0x61,0x6c,0x75,0x65,0x30,0x20,0x3e,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x29,0x3b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x30,0x20,0x3d,0x20,0x72,0x65,0x61,0x64,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x76,0x61,0x6c,0x75,0x65,0x30,0x2c,0x20,0x69,0x6e,0x5f,0x68,0x62,0x5f,0x76,0x61,0x6c,0x75,0x65,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x30,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x69,0x6e,0x30,0x2e,0x78,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x30,0x2c,0x20,0x6f,0x75,0x74,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x30,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x69,0x6e,0x30,0x2e,0x79,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x31,0x2c,0x20,0x6f,0x75,0x74,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x30,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x69,0x6e,0x30,0x2e,0x7a,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x32,0x2c,0x20,0x6f,0x75,0x74,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x30,0x20,0x3d,0x20,0x6d,0x61,0x64,0x28,0x69,0x6e,0x30,0x2e,0x77,0x2c,0x20,0x77,0x65,0x69,0x67,0x68,0x74,0x73,0x33,0x2c,0x20,0x6f,0x75,0x74,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x5f,0x77,0x69,0x64,0x74,0x68,0x30,0x2b,0x2b,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x30,0x20,0x3d,0x20,0x66,0x6d,0x61,0x78,0x28,0x6f,0x75,0x74,0x30,0x2c,0x20,0x28,0x66,0x6c,0x6f,0x61,0x74,0x34,0x29,0x30,0x29,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x52,0x45,0x4c,0x55,0x36,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x30,0x20,0x3d,0x20,0x63,0x6c,0x61,0x6d,0x70,0x28,0x6f,0x75,0x74,0x30,0x2c,0x20,0x28,0x66,0x6c,0x6f,0x61,0x74,0x34,0x29,0x30,0x2c,0x20,0x28,0x66,0x6c,0x6f,0x61,0x74,0x34,0x29,0x36,0x29,0x3b,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x5f,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6f,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x73,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x77,0x72,0x69,0x74,0x65,0x5f,0x69,0x6d,0x61,0x67,0x65,0x66,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x5f,0x69,0x6d,0x61,0x67,0x65,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x7d,0xa, } - }, -{ - "winogradTransformSource2_5_1", - { 0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x4d,0x4e,0x4e,0x5f,0x53,0x55,0x50,0x50,0x4f,0x52,0x54,0x5f,0x46,0x50,0x31,0x36,0xa,0x23,0x70,0x72,0x61,0x67,0x6d,0x61,0x20,0x4f,0x50,0x45,0x4e,0x43,0x4c,0x20,0x45,0x58,0x54,0x45,0x4e,0x53,0x49,0x4f,0x4e,0x20,0x63,0x6c,0x5f,0x6b,0x68,0x72,0x5f,0x66,0x70,0x31,0x36,0x20,0x3a,0x20,0x65,0x6e,0x61,0x62,0x6c,0x65,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x5f,0x5f,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x5f,0x74,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x20,0x3d,0x20,0x43,0x4c,0x4b,0x5f,0x4e,0x4f,0x52,0x4d,0x41,0x4c,0x49,0x5a,0x45,0x44,0x5f,0x43,0x4f,0x4f,0x52,0x44,0x53,0x5f,0x46,0x41,0x4c,0x53,0x45,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x41,0x44,0x44,0x52,0x45,0x53,0x53,0x5f,0x43,0x4c,0x41,0x4d,0x50,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x46,0x49,0x4c,0x54,0x45,0x52,0x5f,0x4e,0x45,0x41,0x52,0x45,0x53,0x54,0x3b,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x77,0x69,0x6e,0x6f,0x67,0x72,0x61,0x64,0x54,0x72,0x61,0x6e,0x73,0x66,0x6f,0x72,0x6d,0x53,0x6f,0x75,0x72,0x63,0x65,0x28,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x2f,0x2f,0x20,0x30,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x75,0x6e,0x69,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x2c,0x20,0x2f,0x2f,0x20,0x33,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x70,0x61,0x64,0x58,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x70,0x61,0x64,0x59,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2f,0x2f,0x20,0x36,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x73,0x72,0x63,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x43,0x34,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x58,0x2c,0x20,0x2f,0x2f,0x20,0x39,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x59,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x62,0x61,0x74,0x63,0x68,0x4f,0x66,0x66,0x73,0x65,0x74,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x70,0x6f,0x73,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x2c,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x29,0x3b,0x20,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x70,0x6f,0x73,0x2e,0x78,0x20,0x3c,0x20,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x2a,0x75,0x6e,0x69,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x26,0x26,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x3c,0x20,0x73,0x72,0x63,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x43,0x34,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x70,0x6f,0x73,0x2e,0x78,0x20,0x25,0x20,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x75,0x6e,0x69,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x70,0x6f,0x73,0x2e,0x78,0x20,0x2f,0x20,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x72,0x65,0x61,0x6c,0x50,0x6f,0x73,0x20,0x20,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x58,0x2c,0x20,0x75,0x6e,0x69,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x6f,0x66,0x66,0x73,0x65,0x74,0x59,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x64,0x73,0x74,0x58,0x4f,0x72,0x69,0x67,0x69,0x6e,0x20,0x3d,0x20,0x70,0x6f,0x73,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x3d,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x2f,0x20,0x73,0x72,0x63,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x43,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x72,0x63,0x5a,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x25,0x20,0x73,0x72,0x63,0x43,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x43,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x64,0x73,0x74,0x59,0x4f,0x72,0x69,0x67,0x69,0x6e,0x20,0x3d,0x20,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x75,0x6e,0x69,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x20,0x3d,0x20,0x28,0x75,0x6e,0x69,0x74,0x57,0x69,0x64,0x74,0x68,0x20,0x2a,0x20,0x75,0x6e,0x69,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x33,0x29,0x20,0x2f,0x20,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x64,0x73,0x74,0x59,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x64,0x73,0x74,0x59,0x4f,0x72,0x69,0x67,0x69,0x6e,0x20,0x2f,0x20,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x64,0x73,0x74,0x58,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x64,0x73,0x74,0x59,0x4f,0x72,0x69,0x67,0x69,0x6e,0x20,0x25,0x20,0x34,0x20,0x2b,0x20,0x34,0x20,0x2a,0x20,0x64,0x73,0x74,0x58,0x4f,0x72,0x69,0x67,0x69,0x6e,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x3d,0x20,0x62,0x61,0x74,0x63,0x68,0x4f,0x66,0x66,0x73,0x65,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x20,0x3d,0x20,0x28,0x72,0x65,0x61,0x6c,0x50,0x6f,0x73,0x2e,0x78,0x29,0x20,0x2a,0x20,0x32,0x20,0x2d,0x20,0x70,0x61,0x64,0x58,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x20,0x3d,0x20,0x28,0x72,0x65,0x61,0x6c,0x50,0x6f,0x73,0x2e,0x79,0x29,0x20,0x2a,0x20,0x32,0x20,0x2d,0x20,0x70,0x61,0x64,0x59,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x30,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x31,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x32,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x33,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x34,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x35,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x30,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x31,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x32,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x33,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x34,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x35,0x31,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x30,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x31,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x32,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x33,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x34,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x35,0x32,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x30,0x33,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x31,0x33,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x32,0x33,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x33,0x33,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x34,0x33,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x35,0x33,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x30,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x31,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x32,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x33,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x34,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x35,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x30,0x35,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x31,0x35,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x32,0x35,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x33,0x35,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x34,0x35,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x53,0x35,0x35,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x30,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x30,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x30,0x30,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x31,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x30,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x31,0x30,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x32,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x30,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x32,0x30,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x33,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x30,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x33,0x30,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x34,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x30,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x34,0x30,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x35,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x30,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x35,0x30,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x30,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x31,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x30,0x31,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x31,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x31,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x31,0x31,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x32,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x31,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x32,0x31,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x33,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x31,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x33,0x31,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x34,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x31,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x34,0x31,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x35,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x31,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x35,0x31,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x30,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x32,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x30,0x32,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x31,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x32,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x31,0x32,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x32,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x32,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x32,0x32,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x33,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x32,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x33,0x32,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x34,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x32,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x34,0x32,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x35,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x32,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x35,0x32,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x30,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x33,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x30,0x33,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x31,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x33,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x31,0x33,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x32,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x33,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x32,0x33,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x33,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x33,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x33,0x33,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x34,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x33,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x34,0x33,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x35,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x33,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x35,0x33,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x30,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x34,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x30,0x34,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x31,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x34,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x31,0x34,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x32,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x34,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x32,0x34,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x33,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x34,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x33,0x34,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x34,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x34,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x34,0x34,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x35,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x34,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x35,0x34,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x30,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x35,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x30,0x35,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x31,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x35,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x31,0x35,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x32,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x35,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x32,0x35,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x33,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x35,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x33,0x35,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x34,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x35,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x34,0x35,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x35,0x20,0x2b,0x20,0x73,0x78,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x73,0x79,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x35,0x20,0x2b,0x20,0x73,0x79,0x53,0x74,0x61,0x72,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x73,0x78,0x20,0x2b,0x20,0x73,0x72,0x63,0x5a,0x20,0x2a,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x78,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x57,0x69,0x64,0x74,0x68,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x62,0x61,0x74,0x63,0x68,0x49,0x6e,0x64,0x65,0x78,0x20,0x2a,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2b,0x20,0x73,0x79,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x73,0x79,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x73,0x79,0x20,0x3e,0x3d,0x20,0x73,0x72,0x63,0x48,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x53,0x35,0x35,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x75,0x49,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6d,0x61,0x67,0x65,0x53,0x78,0x2c,0x20,0x69,0x6d,0x61,0x67,0x65,0x53,0x79,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x30,0x30,0x20,0x3d,0x20,0x2b,0x53,0x30,0x30,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x31,0x2e,0x32,0x35,0x20,0x2a,0x20,0x53,0x30,0x32,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x32,0x35,0x20,0x2a,0x20,0x53,0x30,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x31,0x30,0x20,0x3d,0x20,0x2b,0x53,0x31,0x30,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x31,0x2e,0x32,0x35,0x20,0x2a,0x20,0x53,0x31,0x32,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x32,0x35,0x20,0x2a,0x20,0x53,0x31,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x32,0x30,0x20,0x3d,0x20,0x2b,0x53,0x32,0x30,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x31,0x2e,0x32,0x35,0x20,0x2a,0x20,0x53,0x32,0x32,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x32,0x35,0x20,0x2a,0x20,0x53,0x32,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x33,0x30,0x20,0x3d,0x20,0x2b,0x53,0x33,0x30,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x31,0x2e,0x32,0x35,0x20,0x2a,0x20,0x53,0x33,0x32,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x32,0x35,0x20,0x2a,0x20,0x53,0x33,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x34,0x30,0x20,0x3d,0x20,0x2b,0x53,0x34,0x30,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x31,0x2e,0x32,0x35,0x20,0x2a,0x20,0x53,0x34,0x32,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x32,0x35,0x20,0x2a,0x20,0x53,0x34,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x35,0x30,0x20,0x3d,0x20,0x2b,0x53,0x35,0x30,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x31,0x2e,0x32,0x35,0x20,0x2a,0x20,0x53,0x35,0x32,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x32,0x35,0x20,0x2a,0x20,0x53,0x35,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x30,0x31,0x20,0x3d,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x30,0x31,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x30,0x32,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x30,0x33,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x30,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x31,0x31,0x20,0x3d,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x31,0x31,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x31,0x32,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x31,0x33,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x31,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x32,0x31,0x20,0x3d,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x32,0x31,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x32,0x32,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x32,0x33,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x32,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x33,0x31,0x20,0x3d,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x33,0x31,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x33,0x32,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x33,0x33,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x33,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x34,0x31,0x20,0x3d,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x34,0x31,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x34,0x32,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x34,0x33,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x34,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x35,0x31,0x20,0x3d,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x35,0x31,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x35,0x32,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x35,0x33,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x35,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x30,0x32,0x20,0x3d,0x20,0x2d,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x30,0x31,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x30,0x32,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x30,0x33,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x30,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x31,0x32,0x20,0x3d,0x20,0x2d,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x31,0x31,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x31,0x32,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x31,0x33,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x31,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x32,0x32,0x20,0x3d,0x20,0x2d,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x32,0x31,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x32,0x32,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x32,0x33,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x32,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x33,0x32,0x20,0x3d,0x20,0x2d,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x33,0x31,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x33,0x32,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x33,0x33,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x33,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x34,0x32,0x20,0x3d,0x20,0x2d,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x34,0x31,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x34,0x32,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x34,0x33,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x34,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x35,0x32,0x20,0x3d,0x20,0x2d,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x35,0x31,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x35,0x32,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x35,0x33,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x35,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x30,0x33,0x20,0x3d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2d,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x53,0x30,0x31,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x30,0x32,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x53,0x30,0x33,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x30,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x31,0x33,0x20,0x3d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2d,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x53,0x31,0x31,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x31,0x32,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x53,0x31,0x33,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x31,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x32,0x33,0x20,0x3d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2d,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x53,0x32,0x31,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x32,0x32,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x53,0x32,0x33,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x32,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x33,0x33,0x20,0x3d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2d,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x53,0x33,0x31,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x33,0x32,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x53,0x33,0x33,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x33,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x34,0x33,0x20,0x3d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2d,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x53,0x34,0x31,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x34,0x32,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x53,0x34,0x33,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x34,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x35,0x33,0x20,0x3d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2d,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x53,0x35,0x31,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x35,0x32,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x53,0x35,0x33,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x35,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x30,0x34,0x20,0x3d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x53,0x30,0x31,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x30,0x32,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x53,0x30,0x33,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x30,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x31,0x34,0x20,0x3d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x53,0x31,0x31,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x31,0x32,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x53,0x31,0x33,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x31,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x32,0x34,0x20,0x3d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x53,0x32,0x31,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x32,0x32,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x53,0x32,0x33,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x32,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x33,0x34,0x20,0x3d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x53,0x33,0x31,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x33,0x32,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x53,0x33,0x33,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x33,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x34,0x34,0x20,0x3d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x53,0x34,0x31,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x34,0x32,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x53,0x34,0x33,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x34,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x35,0x34,0x20,0x3d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x53,0x35,0x31,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x35,0x32,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x53,0x35,0x33,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x53,0x35,0x34,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x30,0x35,0x20,0x3d,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x34,0x2e,0x30,0x20,0x2a,0x20,0x53,0x30,0x31,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x35,0x2e,0x30,0x20,0x2a,0x20,0x53,0x30,0x33,0x20,0x2b,0x20,0x53,0x30,0x35,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x31,0x35,0x20,0x3d,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x34,0x2e,0x30,0x20,0x2a,0x20,0x53,0x31,0x31,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x35,0x2e,0x30,0x20,0x2a,0x20,0x53,0x31,0x33,0x20,0x2b,0x20,0x53,0x31,0x35,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x32,0x35,0x20,0x3d,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x34,0x2e,0x30,0x20,0x2a,0x20,0x53,0x32,0x31,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x35,0x2e,0x30,0x20,0x2a,0x20,0x53,0x32,0x33,0x20,0x2b,0x20,0x53,0x32,0x35,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x33,0x35,0x20,0x3d,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x34,0x2e,0x30,0x20,0x2a,0x20,0x53,0x33,0x31,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x35,0x2e,0x30,0x20,0x2a,0x20,0x53,0x33,0x33,0x20,0x2b,0x20,0x53,0x33,0x35,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x34,0x35,0x20,0x3d,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x34,0x2e,0x30,0x20,0x2a,0x20,0x53,0x34,0x31,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x35,0x2e,0x30,0x20,0x2a,0x20,0x53,0x34,0x33,0x20,0x2b,0x20,0x53,0x34,0x35,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6d,0x35,0x35,0x20,0x3d,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x34,0x2e,0x30,0x20,0x2a,0x20,0x53,0x35,0x31,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x35,0x2e,0x30,0x20,0x2a,0x20,0x53,0x35,0x33,0x20,0x2b,0x20,0x53,0x35,0x35,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x30,0x29,0x2c,0x20,0x2b,0x6d,0x30,0x30,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x31,0x2e,0x32,0x35,0x20,0x2a,0x20,0x6d,0x32,0x30,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x32,0x35,0x20,0x2a,0x20,0x6d,0x34,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x31,0x29,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x31,0x30,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x32,0x30,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x33,0x30,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x34,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x32,0x29,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2d,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x31,0x30,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x32,0x30,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x33,0x30,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x34,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x33,0x29,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2d,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x6d,0x31,0x30,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x32,0x30,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x6d,0x33,0x30,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x34,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x34,0x29,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x6d,0x31,0x30,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x32,0x30,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x6d,0x33,0x30,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x34,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x35,0x29,0x2c,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x34,0x2e,0x30,0x20,0x2a,0x20,0x6d,0x31,0x30,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x35,0x2e,0x30,0x20,0x2a,0x20,0x6d,0x33,0x30,0x20,0x2b,0x20,0x6d,0x35,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x36,0x29,0x2c,0x20,0x2b,0x6d,0x30,0x31,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x31,0x2e,0x32,0x35,0x20,0x2a,0x20,0x6d,0x32,0x31,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x32,0x35,0x20,0x2a,0x20,0x6d,0x34,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x37,0x29,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x31,0x31,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x32,0x31,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x33,0x31,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x34,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x38,0x29,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2d,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x31,0x31,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x32,0x31,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x33,0x31,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x34,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x39,0x29,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2d,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x6d,0x31,0x31,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x32,0x31,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x6d,0x33,0x31,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x34,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x31,0x30,0x29,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x6d,0x31,0x31,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x32,0x31,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x6d,0x33,0x31,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x34,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x31,0x31,0x29,0x2c,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x34,0x2e,0x30,0x20,0x2a,0x20,0x6d,0x31,0x31,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x35,0x2e,0x30,0x20,0x2a,0x20,0x6d,0x33,0x31,0x20,0x2b,0x20,0x6d,0x35,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x31,0x32,0x29,0x2c,0x20,0x2b,0x6d,0x30,0x32,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x31,0x2e,0x32,0x35,0x20,0x2a,0x20,0x6d,0x32,0x32,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x32,0x35,0x20,0x2a,0x20,0x6d,0x34,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x31,0x33,0x29,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x31,0x32,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x32,0x32,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x33,0x32,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x34,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x31,0x34,0x29,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2d,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x31,0x32,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x32,0x32,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x33,0x32,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x34,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x31,0x35,0x29,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2d,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x6d,0x31,0x32,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x32,0x32,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x6d,0x33,0x32,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x34,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x31,0x36,0x29,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x6d,0x31,0x32,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x32,0x32,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x6d,0x33,0x32,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x34,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x31,0x37,0x29,0x2c,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x34,0x2e,0x30,0x20,0x2a,0x20,0x6d,0x31,0x32,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x35,0x2e,0x30,0x20,0x2a,0x20,0x6d,0x33,0x32,0x20,0x2b,0x20,0x6d,0x35,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x31,0x38,0x29,0x2c,0x20,0x2b,0x6d,0x30,0x33,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x31,0x2e,0x32,0x35,0x20,0x2a,0x20,0x6d,0x32,0x33,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x32,0x35,0x20,0x2a,0x20,0x6d,0x34,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x31,0x39,0x29,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x31,0x33,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x32,0x33,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x33,0x33,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x34,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x32,0x30,0x29,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2d,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x31,0x33,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x32,0x33,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x33,0x33,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x34,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x32,0x31,0x29,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2d,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x6d,0x31,0x33,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x32,0x33,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x6d,0x33,0x33,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x34,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x32,0x32,0x29,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x6d,0x31,0x33,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x32,0x33,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x6d,0x33,0x33,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x34,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x32,0x33,0x29,0x2c,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x34,0x2e,0x30,0x20,0x2a,0x20,0x6d,0x31,0x33,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x35,0x2e,0x30,0x20,0x2a,0x20,0x6d,0x33,0x33,0x20,0x2b,0x20,0x6d,0x35,0x33,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x32,0x34,0x29,0x2c,0x20,0x2b,0x6d,0x30,0x34,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x31,0x2e,0x32,0x35,0x20,0x2a,0x20,0x6d,0x32,0x34,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x32,0x35,0x20,0x2a,0x20,0x6d,0x34,0x34,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x32,0x35,0x29,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x31,0x34,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x32,0x34,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x33,0x34,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x34,0x34,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x32,0x36,0x29,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2d,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x31,0x34,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x32,0x34,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x33,0x34,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x34,0x34,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x32,0x37,0x29,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2d,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x6d,0x31,0x34,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x32,0x34,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x6d,0x33,0x34,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x34,0x34,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x32,0x38,0x29,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x6d,0x31,0x34,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x32,0x34,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x6d,0x33,0x34,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x34,0x34,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x32,0x39,0x29,0x2c,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x34,0x2e,0x30,0x20,0x2a,0x20,0x6d,0x31,0x34,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x35,0x2e,0x30,0x20,0x2a,0x20,0x6d,0x33,0x34,0x20,0x2b,0x20,0x6d,0x35,0x34,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x33,0x30,0x29,0x2c,0x20,0x2b,0x6d,0x30,0x35,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x31,0x2e,0x32,0x35,0x20,0x2a,0x20,0x6d,0x32,0x35,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x32,0x35,0x20,0x2a,0x20,0x6d,0x34,0x35,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x33,0x31,0x29,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x31,0x35,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x32,0x35,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x33,0x35,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x34,0x35,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x33,0x32,0x29,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2d,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x31,0x35,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x36,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x32,0x35,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x33,0x35,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x31,0x36,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x34,0x35,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x33,0x33,0x29,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2d,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x6d,0x31,0x35,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x32,0x35,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x6d,0x33,0x35,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x34,0x35,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x33,0x34,0x29,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x6d,0x31,0x35,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x32,0x35,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x38,0x33,0x33,0x33,0x33,0x33,0x20,0x2a,0x20,0x6d,0x33,0x35,0x20,0x2b,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x30,0x2e,0x30,0x34,0x31,0x36,0x36,0x36,0x37,0x20,0x2a,0x20,0x6d,0x34,0x35,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x75,0x4f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x64,0x73,0x74,0x58,0x2c,0x20,0x64,0x73,0x74,0x59,0x20,0x2b,0x20,0x64,0x73,0x74,0x48,0x65,0x69,0x67,0x68,0x74,0x20,0x2a,0x20,0x33,0x35,0x29,0x2c,0x20,0x2b,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x34,0x2e,0x30,0x20,0x2a,0x20,0x6d,0x31,0x35,0x20,0x2d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x35,0x2e,0x30,0x20,0x2a,0x20,0x6d,0x33,0x35,0x20,0x2b,0x20,0x6d,0x35,0x35,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x7d,0xa, } - }, -{ - "Int8ToFloat", - { 0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x4d,0x4e,0x4e,0x5f,0x53,0x55,0x50,0x50,0x4f,0x52,0x54,0x5f,0x46,0x50,0x31,0x36,0xa,0x23,0x70,0x72,0x61,0x67,0x6d,0x61,0x20,0x4f,0x50,0x45,0x4e,0x43,0x4c,0x20,0x45,0x58,0x54,0x45,0x4e,0x53,0x49,0x4f,0x4e,0x20,0x63,0x6c,0x5f,0x6b,0x68,0x72,0x5f,0x66,0x70,0x31,0x36,0x20,0x3a,0x20,0x65,0x6e,0x61,0x62,0x6c,0x65,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x5f,0x5f,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x5f,0x74,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x20,0x3d,0x20,0x43,0x4c,0x4b,0x5f,0x4e,0x4f,0x52,0x4d,0x41,0x4c,0x49,0x5a,0x45,0x44,0x5f,0x43,0x4f,0x4f,0x52,0x44,0x53,0x5f,0x46,0x41,0x4c,0x53,0x45,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x41,0x44,0x44,0x52,0x45,0x53,0x53,0x5f,0x43,0x4c,0x41,0x4d,0x50,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x46,0x49,0x4c,0x54,0x45,0x52,0x5f,0x4e,0x45,0x41,0x52,0x45,0x53,0x54,0x3b,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x69,0x6e,0x74,0x38,0x5f,0x74,0x6f,0x5f,0x66,0x6c,0x6f,0x61,0x74,0x28,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x63,0x68,0x61,0x72,0x2a,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x2c,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x46,0x4c,0x4f,0x41,0x54,0x2a,0x20,0x73,0x63,0x61,0x6c,0x65,0x5f,0x70,0x74,0x72,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x68,0x62,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x32,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x3c,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x20,0x26,0x26,0x20,0x77,0x20,0x3c,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x20,0x26,0x26,0x20,0x68,0x62,0x20,0x3c,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x64,0x65,0x78,0x20,0x3d,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2a,0x68,0x65,0x69,0x67,0x68,0x74,0x2a,0x77,0x69,0x64,0x74,0x68,0x20,0x2b,0x20,0x68,0x62,0x2a,0x77,0x69,0x64,0x74,0x68,0x20,0x2b,0x20,0x77,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x68,0x61,0x72,0x34,0x20,0x69,0x6e,0x20,0x3d,0x20,0x76,0x6c,0x6f,0x61,0x64,0x34,0x28,0x69,0x6e,0x64,0x65,0x78,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x70,0x74,0x72,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x73,0x63,0x61,0x6c,0x65,0x20,0x3d,0x20,0x76,0x6c,0x6f,0x61,0x64,0x34,0x28,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x28,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x46,0x4c,0x4f,0x41,0x54,0x20,0x2a,0x29,0x73,0x63,0x61,0x6c,0x65,0x5f,0x70,0x74,0x72,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x72,0x65,0x73,0x75,0x6c,0x74,0x5f,0x66,0x6c,0x6f,0x61,0x74,0x20,0x3d,0x20,0x43,0x4f,0x4e,0x56,0x45,0x52,0x54,0x5f,0x46,0x4c,0x4f,0x41,0x54,0x34,0x28,0x63,0x6f,0x6e,0x76,0x65,0x72,0x74,0x5f,0x69,0x6e,0x74,0x34,0x5f,0x72,0x74,0x65,0x28,0x69,0x6e,0x29,0x29,0x20,0x2a,0x20,0x73,0x63,0x61,0x6c,0x65,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x70,0x6f,0x73,0x20,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x77,0x69,0x64,0x74,0x68,0x2c,0x20,0x77,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x70,0x6f,0x73,0x2c,0x20,0x68,0x62,0x29,0x2c,0x20,0x72,0x65,0x73,0x75,0x6c,0x74,0x5f,0x66,0x6c,0x6f,0x61,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0xa,0x7d,0xa, } - }, -{ - "pooling", - { 0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x4d,0x4e,0x4e,0x5f,0x53,0x55,0x50,0x50,0x4f,0x52,0x54,0x5f,0x46,0x50,0x31,0x36,0xa,0x23,0x70,0x72,0x61,0x67,0x6d,0x61,0x20,0x4f,0x50,0x45,0x4e,0x43,0x4c,0x20,0x45,0x58,0x54,0x45,0x4e,0x53,0x49,0x4f,0x4e,0x20,0x63,0x6c,0x5f,0x6b,0x68,0x72,0x5f,0x66,0x70,0x31,0x36,0x20,0x3a,0x20,0x65,0x6e,0x61,0x62,0x6c,0x65,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x33,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x2c,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x33,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x33,0x29,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x33,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x29,0x20,0x7b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x74,0x75,0x72,0x6e,0x3b,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x5f,0x5f,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x5f,0x74,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x20,0x3d,0x20,0x43,0x4c,0x4b,0x5f,0x4e,0x4f,0x52,0x4d,0x41,0x4c,0x49,0x5a,0x45,0x44,0x5f,0x43,0x4f,0x4f,0x52,0x44,0x53,0x5f,0x46,0x41,0x4c,0x53,0x45,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x41,0x44,0x44,0x52,0x45,0x53,0x53,0x5f,0x43,0x4c,0x41,0x4d,0x50,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x46,0x49,0x4c,0x54,0x45,0x52,0x5f,0x4e,0x45,0x41,0x52,0x45,0x53,0x54,0x3b,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x70,0x6f,0x6f,0x6c,0x69,0x6e,0x67,0x28,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x33,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x70,0x61,0x64,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x32,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x68,0x61,0x70,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x32,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x33,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x20,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2f,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x2d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x74,0x61,0x72,0x74,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x73,0x74,0x61,0x72,0x74,0x20,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x2c,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x2c,0x20,0x2d,0x70,0x61,0x64,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x74,0x61,0x72,0x74,0x20,0x20,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x73,0x74,0x72,0x69,0x64,0x65,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x2c,0x20,0x2d,0x70,0x61,0x64,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x73,0x74,0x61,0x72,0x74,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x3b,0xa,0xa,0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x50,0x4f,0x4f,0x4c,0x5f,0x41,0x56,0x47,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x72,0x65,0x73,0x75,0x6c,0x74,0x20,0x3d,0x20,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x20,0x3d,0x20,0x30,0x3b,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x20,0x3c,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x3b,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x2b,0x2b,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x73,0x74,0x61,0x72,0x74,0x20,0x2b,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x3d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x74,0x61,0x72,0x74,0x20,0x2b,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x3e,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x20,0x3d,0x20,0x30,0x3b,0x20,0x77,0x69,0x64,0x74,0x68,0x20,0x3c,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x3b,0x20,0x77,0x69,0x64,0x74,0x68,0x2b,0x2b,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x74,0x61,0x72,0x74,0x20,0x2b,0x20,0x77,0x69,0x64,0x74,0x68,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x3d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x73,0x74,0x61,0x72,0x74,0x20,0x2b,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x3e,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x72,0x65,0x73,0x75,0x6c,0x74,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x72,0x65,0x73,0x75,0x6c,0x74,0x20,0x2b,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x73,0x74,0x61,0x72,0x74,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x30,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x73,0x74,0x61,0x72,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x74,0x61,0x72,0x74,0x20,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x30,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x74,0x61,0x72,0x74,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x65,0x6e,0x64,0x20,0x20,0x20,0x3d,0x20,0x6d,0x69,0x6e,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x73,0x74,0x61,0x72,0x74,0x20,0x2b,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x65,0x6e,0x64,0x20,0x20,0x20,0x20,0x3d,0x20,0x6d,0x69,0x6e,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x74,0x61,0x72,0x74,0x20,0x2b,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x73,0x69,0x7a,0x65,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x28,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x65,0x6e,0x64,0x20,0x2d,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x73,0x74,0x61,0x72,0x74,0x29,0x2c,0x20,0x28,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x65,0x6e,0x64,0x20,0x2d,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x74,0x61,0x72,0x74,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x46,0x4c,0x4f,0x41,0x54,0x20,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x72,0x65,0x71,0x20,0x3d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x31,0x2e,0x30,0x66,0x20,0x2f,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x29,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x73,0x69,0x7a,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x72,0x65,0x73,0x75,0x6c,0x74,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x72,0x65,0x73,0x75,0x6c,0x74,0x20,0x2a,0x20,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x72,0x65,0x71,0x3b,0xa,0x23,0x65,0x6c,0x73,0x65,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x72,0x65,0x73,0x75,0x6c,0x74,0x20,0x3d,0x20,0x28,0x46,0x4c,0x4f,0x41,0x54,0x34,0x29,0x28,0x2d,0x46,0x4c,0x54,0x5f,0x4d,0x41,0x58,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x69,0x6e,0x74,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x20,0x3d,0x20,0x30,0x3b,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x20,0x3c,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x3b,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x2b,0x2b,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x73,0x74,0x61,0x72,0x74,0x20,0x2b,0x20,0x68,0x65,0x69,0x67,0x68,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x3d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x74,0x61,0x72,0x74,0x20,0x2b,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x2c,0x20,0x2d,0x31,0x2c,0x20,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x3e,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x21,0x3d,0x20,0x2d,0x31,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x20,0x3d,0x20,0x30,0x3b,0x20,0x77,0x69,0x64,0x74,0x68,0x20,0x3c,0x20,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x3b,0x20,0x77,0x69,0x64,0x74,0x68,0x2b,0x2b,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x73,0x74,0x61,0x72,0x74,0x20,0x2b,0x20,0x77,0x69,0x64,0x74,0x68,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x73,0x65,0x6c,0x65,0x63,0x74,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x73,0x74,0x61,0x72,0x74,0x20,0x2b,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x2d,0x31,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x3c,0x20,0x30,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x3e,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x73,0x68,0x61,0x70,0x65,0x2e,0x79,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x21,0x3d,0x20,0x2d,0x31,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x72,0x65,0x73,0x75,0x6c,0x74,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x66,0x6d,0x61,0x78,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x72,0x65,0x73,0x75,0x6c,0x74,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x72,0x65,0x73,0x75,0x6c,0x74,0x29,0x3b,0xa,0x7d,0xa, } - }, -{ - "softmax", - { 0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x4d,0x4e,0x4e,0x5f,0x53,0x55,0x50,0x50,0x4f,0x52,0x54,0x5f,0x46,0x50,0x31,0x36,0xa,0x23,0x70,0x72,0x61,0x67,0x6d,0x61,0x20,0x4f,0x50,0x45,0x4e,0x43,0x4c,0x20,0x45,0x58,0x54,0x45,0x4e,0x53,0x49,0x4f,0x4e,0x20,0x63,0x6c,0x5f,0x6b,0x68,0x72,0x5f,0x66,0x70,0x31,0x36,0x20,0x3a,0x20,0x65,0x6e,0x61,0x62,0x6c,0x65,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x45,0x58,0x50,0x20,0x65,0x78,0x70,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x33,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x2c,0xa,0xa,0x23,0x64,0x65,0x66,0x69,0x6e,0x65,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x33,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x33,0x29,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x69,0x6e,0x70,0x75,0x74,0x31,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x32,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x20,0x7c,0x7c,0x20,0x69,0x6e,0x70,0x75,0x74,0x33,0x20,0x3e,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x32,0x29,0x20,0x7b,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x72,0x65,0x74,0x75,0x72,0x6e,0x3b,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5c,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x5f,0x5f,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x5f,0x74,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x20,0x3d,0x20,0x43,0x4c,0x4b,0x5f,0x4e,0x4f,0x52,0x4d,0x41,0x4c,0x49,0x5a,0x45,0x44,0x5f,0x43,0x4f,0x4f,0x52,0x44,0x53,0x5f,0x46,0x41,0x4c,0x53,0x45,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x41,0x44,0x44,0x52,0x45,0x53,0x53,0x5f,0x43,0x4c,0x41,0x4d,0x50,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x46,0x49,0x4c,0x54,0x45,0x52,0x5f,0x4e,0x45,0x41,0x52,0x45,0x53,0x54,0x3b,0xa,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x73,0x6f,0x66,0x74,0x6d,0x61,0x78,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x28,0x47,0x4c,0x4f,0x42,0x41,0x4c,0x5f,0x53,0x49,0x5a,0x45,0x5f,0x33,0x5f,0x44,0x49,0x4d,0x53,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x73,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x70,0x72,0x69,0x76,0x61,0x74,0x65,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x73,0x29,0x20,0x7b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x32,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x44,0x45,0x41,0x4c,0x5f,0x4e,0x4f,0x4e,0x5f,0x55,0x4e,0x49,0x46,0x4f,0x52,0x4d,0x5f,0x44,0x49,0x4d,0x33,0x28,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x2c,0x20,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x77,0x69,0x64,0x74,0x68,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x20,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x6d,0x61,0x78,0x5f,0x76,0x61,0x6c,0x75,0x65,0x20,0x3d,0x20,0x2d,0x46,0x4c,0x54,0x5f,0x4d,0x41,0x58,0x3b,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x73,0x68,0x6f,0x72,0x74,0x20,0x69,0x20,0x3d,0x20,0x30,0x3b,0x20,0x69,0x20,0x3c,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x20,0x2d,0x20,0x31,0x3b,0x20,0x2b,0x2b,0x69,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x69,0x20,0x2a,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x2c,0x20,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x6d,0x61,0x78,0x5f,0x76,0x61,0x6c,0x75,0x65,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x6d,0x61,0x78,0x5f,0x76,0x61,0x6c,0x75,0x65,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x6d,0x61,0x78,0x5f,0x76,0x61,0x6c,0x75,0x65,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x6d,0x61,0x78,0x5f,0x76,0x61,0x6c,0x75,0x65,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x6d,0x61,0x78,0x5f,0x76,0x61,0x6c,0x75,0x65,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x6d,0x61,0x78,0x5f,0x76,0x61,0x6c,0x75,0x65,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x7a,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x6d,0x61,0x78,0x5f,0x76,0x61,0x6c,0x75,0x65,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x6d,0x61,0x78,0x5f,0x76,0x61,0x6c,0x75,0x65,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x77,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x28,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x20,0x2d,0x20,0x31,0x29,0x20,0x2a,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x20,0x2c,0x20,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x73,0x20,0x3d,0x3d,0x20,0x30,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x6d,0x61,0x78,0x5f,0x76,0x61,0x6c,0x75,0x65,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x6d,0x61,0x78,0x5f,0x76,0x61,0x6c,0x75,0x65,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x77,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x6d,0x61,0x78,0x5f,0x76,0x61,0x6c,0x75,0x65,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x6d,0x61,0x78,0x5f,0x76,0x61,0x6c,0x75,0x65,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x7a,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x6d,0x61,0x78,0x5f,0x76,0x61,0x6c,0x75,0x65,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x6d,0x61,0x78,0x5f,0x76,0x61,0x6c,0x75,0x65,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x6d,0x61,0x78,0x5f,0x76,0x61,0x6c,0x75,0x65,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x6d,0x61,0x78,0x5f,0x76,0x61,0x6c,0x75,0x65,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x73,0x20,0x3d,0x3d,0x20,0x31,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x6d,0x61,0x78,0x5f,0x76,0x61,0x6c,0x75,0x65,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x6d,0x61,0x78,0x5f,0x76,0x61,0x6c,0x75,0x65,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x7a,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x6d,0x61,0x78,0x5f,0x76,0x61,0x6c,0x75,0x65,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x6d,0x61,0x78,0x5f,0x76,0x61,0x6c,0x75,0x65,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x6d,0x61,0x78,0x5f,0x76,0x61,0x6c,0x75,0x65,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x6d,0x61,0x78,0x5f,0x76,0x61,0x6c,0x75,0x65,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x73,0x20,0x3d,0x3d,0x20,0x32,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x6d,0x61,0x78,0x5f,0x76,0x61,0x6c,0x75,0x65,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x6d,0x61,0x78,0x5f,0x76,0x61,0x6c,0x75,0x65,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x6d,0x61,0x78,0x5f,0x76,0x61,0x6c,0x75,0x65,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x6d,0x61,0x78,0x5f,0x76,0x61,0x6c,0x75,0x65,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x73,0x20,0x3d,0x3d,0x20,0x33,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x6d,0x61,0x78,0x5f,0x76,0x61,0x6c,0x75,0x65,0x20,0x3d,0x20,0x6d,0x61,0x78,0x28,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x6d,0x61,0x78,0x5f,0x76,0x61,0x6c,0x75,0x65,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x20,0x61,0x63,0x63,0x75,0x6d,0x5f,0x72,0x65,0x73,0x75,0x6c,0x74,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x3d,0x20,0x30,0x3b,0xa,0x20,0x20,0x20,0x20,0x66,0x6f,0x72,0x20,0x28,0x73,0x68,0x6f,0x72,0x74,0x20,0x69,0x20,0x3d,0x20,0x30,0x3b,0x20,0x69,0x20,0x3c,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x20,0x2d,0x20,0x31,0x3b,0x20,0x2b,0x2b,0x69,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x69,0x20,0x2a,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x2c,0x20,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x20,0x3d,0x20,0x45,0x58,0x50,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x20,0x2d,0x20,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x6d,0x61,0x78,0x5f,0x76,0x61,0x6c,0x75,0x65,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x61,0x63,0x63,0x75,0x6d,0x5f,0x72,0x65,0x73,0x75,0x6c,0x74,0x20,0x2b,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x61,0x63,0x63,0x75,0x6d,0x5f,0x72,0x65,0x73,0x75,0x6c,0x74,0x20,0x2b,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x61,0x63,0x63,0x75,0x6d,0x5f,0x72,0x65,0x73,0x75,0x6c,0x74,0x20,0x2b,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x7a,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x61,0x63,0x63,0x75,0x6d,0x5f,0x72,0x65,0x73,0x75,0x6c,0x74,0x20,0x2b,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x77,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x20,0x2b,0x20,0x28,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x30,0x20,0x2d,0x20,0x31,0x29,0x20,0x2a,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x2c,0x20,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x20,0x2d,0x3d,0x20,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x6d,0x61,0x78,0x5f,0x76,0x61,0x6c,0x75,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x73,0x20,0x3d,0x3d,0x20,0x30,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x61,0x63,0x63,0x75,0x6d,0x5f,0x72,0x65,0x73,0x75,0x6c,0x74,0x20,0x2b,0x3d,0x20,0x45,0x58,0x50,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x77,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x61,0x63,0x63,0x75,0x6d,0x5f,0x72,0x65,0x73,0x75,0x6c,0x74,0x20,0x2b,0x3d,0x20,0x45,0x58,0x50,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x7a,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x61,0x63,0x63,0x75,0x6d,0x5f,0x72,0x65,0x73,0x75,0x6c,0x74,0x20,0x2b,0x3d,0x20,0x45,0x58,0x50,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x61,0x63,0x63,0x75,0x6d,0x5f,0x72,0x65,0x73,0x75,0x6c,0x74,0x20,0x2b,0x3d,0x20,0x45,0x58,0x50,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x73,0x20,0x3d,0x3d,0x20,0x31,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x61,0x63,0x63,0x75,0x6d,0x5f,0x72,0x65,0x73,0x75,0x6c,0x74,0x20,0x2b,0x3d,0x20,0x45,0x58,0x50,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x7a,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x61,0x63,0x63,0x75,0x6d,0x5f,0x72,0x65,0x73,0x75,0x6c,0x74,0x20,0x2b,0x3d,0x20,0x45,0x58,0x50,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x61,0x63,0x63,0x75,0x6d,0x5f,0x72,0x65,0x73,0x75,0x6c,0x74,0x20,0x2b,0x3d,0x20,0x45,0x58,0x50,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x73,0x20,0x3d,0x3d,0x20,0x32,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x61,0x63,0x63,0x75,0x6d,0x5f,0x72,0x65,0x73,0x75,0x6c,0x74,0x20,0x2b,0x3d,0x20,0x45,0x58,0x50,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x79,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x61,0x63,0x63,0x75,0x6d,0x5f,0x72,0x65,0x73,0x75,0x6c,0x74,0x20,0x2b,0x3d,0x20,0x45,0x58,0x50,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x72,0x65,0x6d,0x61,0x69,0x6e,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x73,0x20,0x3d,0x3d,0x20,0x33,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x61,0x63,0x63,0x75,0x6d,0x5f,0x72,0x65,0x73,0x75,0x6c,0x74,0x20,0x2b,0x3d,0x20,0x45,0x58,0x50,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x63,0x75,0x72,0x5f,0x6f,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x70,0x6f,0x73,0x20,0x20,0x3d,0x20,0x6d,0x61,0x64,0x32,0x34,0x28,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x73,0x69,0x7a,0x65,0x5f,0x64,0x69,0x6d,0x31,0x2c,0x20,0x77,0x69,0x64,0x74,0x68,0x5f,0x69,0x64,0x78,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x63,0x75,0x72,0x5f,0x6f,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x70,0x6f,0x73,0x2c,0x20,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x29,0x20,0x2d,0x20,0x66,0x6c,0x6f,0x61,0x74,0x5f,0x6d,0x61,0x78,0x5f,0x76,0x61,0x6c,0x75,0x65,0x3b,0xa,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x20,0x6d,0x75,0x6c,0x32,0x34,0x28,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x5f,0x62,0x6c,0x6f,0x63,0x6b,0x5f,0x69,0x64,0x78,0x2c,0x20,0x34,0x29,0x20,0x2d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x63,0x68,0x61,0x6e,0x6e,0x65,0x6c,0x73,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x3d,0x20,0x31,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x7a,0x20,0x3d,0x20,0x45,0x58,0x50,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x7a,0x29,0x20,0x2f,0x20,0x61,0x63,0x63,0x75,0x6d,0x5f,0x72,0x65,0x73,0x75,0x6c,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x79,0x20,0x3d,0x20,0x45,0x58,0x50,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x79,0x29,0x20,0x2f,0x20,0x61,0x63,0x63,0x75,0x6d,0x5f,0x72,0x65,0x73,0x75,0x6c,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x78,0x20,0x3d,0x20,0x45,0x58,0x50,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x78,0x29,0x20,0x2f,0x20,0x61,0x63,0x63,0x75,0x6d,0x5f,0x72,0x65,0x73,0x75,0x6c,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x3d,0x20,0x32,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x79,0x20,0x3d,0x20,0x45,0x58,0x50,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x79,0x29,0x20,0x2f,0x20,0x61,0x63,0x63,0x75,0x6d,0x5f,0x72,0x65,0x73,0x75,0x6c,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x78,0x20,0x3d,0x20,0x45,0x58,0x50,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x78,0x29,0x20,0x2f,0x20,0x61,0x63,0x63,0x75,0x6d,0x5f,0x72,0x65,0x73,0x75,0x6c,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x20,0x69,0x66,0x20,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x5f,0x72,0x65,0x6d,0x61,0x69,0x6e,0x20,0x3d,0x3d,0x20,0x33,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x78,0x20,0x3d,0x20,0x45,0x58,0x50,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x2e,0x78,0x29,0x20,0x2f,0x20,0x61,0x63,0x63,0x75,0x6d,0x5f,0x72,0x65,0x73,0x75,0x6c,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0x20,0x65,0x6c,0x73,0x65,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x20,0x3d,0x20,0x45,0x58,0x50,0x28,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x29,0x20,0x2f,0x20,0x61,0x63,0x63,0x75,0x6d,0x5f,0x72,0x65,0x73,0x75,0x6c,0x74,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0xa,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x63,0x75,0x72,0x5f,0x6f,0x75,0x74,0x5f,0x77,0x69,0x64,0x74,0x68,0x5f,0x70,0x6f,0x73,0x2c,0x20,0x62,0x61,0x74,0x63,0x68,0x5f,0x68,0x65,0x69,0x67,0x68,0x74,0x5f,0x69,0x64,0x78,0x29,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x5f,0x64,0x61,0x74,0x61,0x29,0x3b,0xa,0x7d,0xa, } - }, -{ - "blitBuffer", - { 0x23,0x69,0x66,0x64,0x65,0x66,0x20,0x4d,0x4e,0x4e,0x5f,0x53,0x55,0x50,0x50,0x4f,0x52,0x54,0x5f,0x46,0x50,0x31,0x36,0xa,0x23,0x70,0x72,0x61,0x67,0x6d,0x61,0x20,0x4f,0x50,0x45,0x4e,0x43,0x4c,0x20,0x45,0x58,0x54,0x45,0x4e,0x53,0x49,0x4f,0x4e,0x20,0x63,0x6c,0x5f,0x6b,0x68,0x72,0x5f,0x66,0x70,0x31,0x36,0x20,0x3a,0x20,0x65,0x6e,0x61,0x62,0x6c,0x65,0xa,0x23,0x65,0x6e,0x64,0x69,0x66,0xa,0x5f,0x5f,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x20,0x73,0x61,0x6d,0x70,0x6c,0x65,0x72,0x5f,0x74,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x20,0x3d,0x20,0x43,0x4c,0x4b,0x5f,0x4e,0x4f,0x52,0x4d,0x41,0x4c,0x49,0x5a,0x45,0x44,0x5f,0x43,0x4f,0x4f,0x52,0x44,0x53,0x5f,0x46,0x41,0x4c,0x53,0x45,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x41,0x44,0x44,0x52,0x45,0x53,0x53,0x5f,0x43,0x4c,0x41,0x4d,0x50,0x20,0x7c,0x20,0x43,0x4c,0x4b,0x5f,0x46,0x49,0x4c,0x54,0x45,0x52,0x5f,0x4e,0x45,0x41,0x52,0x45,0x53,0x54,0x3b,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x62,0x6c,0x69,0x74,0x42,0x75,0x66,0x66,0x65,0x72,0x28,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x6f,0x6e,0x73,0x74,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x46,0x4c,0x4f,0x41,0x54,0x20,0x2a,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x46,0x4c,0x4f,0x41,0x54,0x20,0x2a,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x69,0x6e,0x70,0x75,0x74,0x4f,0x66,0x66,0x73,0x65,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x4f,0x66,0x66,0x73,0x65,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x72,0x65,0x67,0x69,0x6f,0x6e,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x69,0x6e,0x70,0x75,0x74,0x53,0x74,0x72,0x69,0x64,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x53,0x74,0x72,0x69,0x64,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x77,0x68,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x77,0x20,0x3d,0x20,0x77,0x68,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x68,0x20,0x3d,0x20,0x77,0x68,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x78,0x79,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x2c,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x2f,0x2f,0x4e,0x2c,0x20,0x43,0x2c,0x20,0x48,0x2c,0x20,0x57,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x70,0x6f,0x73,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x34,0x29,0x28,0x78,0x79,0x2e,0x79,0x2f,0x68,0x2c,0x20,0x78,0x79,0x2e,0x78,0x2f,0x77,0x2c,0x20,0x78,0x79,0x2e,0x79,0x25,0x68,0x2c,0x20,0x78,0x79,0x2e,0x78,0x25,0x77,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x70,0x6f,0x73,0x2e,0x78,0x20,0x3c,0x20,0x72,0x65,0x67,0x69,0x6f,0x6e,0x2e,0x78,0x20,0x26,0x26,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x3c,0x20,0x72,0x65,0x67,0x69,0x6f,0x6e,0x2e,0x79,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x70,0x6f,0x73,0x49,0x6e,0x70,0x75,0x74,0x20,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x4f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x20,0x70,0x6f,0x73,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x70,0x6f,0x73,0x4f,0x75,0x74,0x70,0x75,0x74,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x4f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x20,0x70,0x6f,0x73,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x50,0x6f,0x73,0x20,0x3d,0x20,0x70,0x6f,0x73,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x78,0x20,0x2a,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x53,0x74,0x72,0x69,0x64,0x65,0x2e,0x78,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2b,0x20,0x70,0x6f,0x73,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x79,0x20,0x2a,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x53,0x74,0x72,0x69,0x64,0x65,0x2e,0x79,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2b,0x20,0x70,0x6f,0x73,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x7a,0x20,0x2a,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x53,0x74,0x72,0x69,0x64,0x65,0x2e,0x7a,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2b,0x20,0x70,0x6f,0x73,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x77,0x20,0x2a,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x53,0x74,0x72,0x69,0x64,0x65,0x2e,0x77,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x50,0x6f,0x73,0x20,0x3d,0x20,0x70,0x6f,0x73,0x49,0x6e,0x70,0x75,0x74,0x2e,0x78,0x20,0x2a,0x20,0x69,0x6e,0x70,0x75,0x74,0x53,0x74,0x72,0x69,0x64,0x65,0x2e,0x78,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2b,0x20,0x70,0x6f,0x73,0x49,0x6e,0x70,0x75,0x74,0x2e,0x79,0x20,0x2a,0x20,0x69,0x6e,0x70,0x75,0x74,0x53,0x74,0x72,0x69,0x64,0x65,0x2e,0x79,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2b,0x20,0x70,0x6f,0x73,0x49,0x6e,0x70,0x75,0x74,0x2e,0x7a,0x20,0x2a,0x20,0x69,0x6e,0x70,0x75,0x74,0x53,0x74,0x72,0x69,0x64,0x65,0x2e,0x7a,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2b,0x20,0x70,0x6f,0x73,0x49,0x6e,0x70,0x75,0x74,0x2e,0x77,0x20,0x2a,0x20,0x69,0x6e,0x70,0x75,0x74,0x53,0x74,0x72,0x69,0x64,0x65,0x2e,0x77,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5b,0x6f,0x75,0x74,0x70,0x75,0x74,0x50,0x6f,0x73,0x5d,0x20,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x5b,0x69,0x6e,0x70,0x75,0x74,0x50,0x6f,0x73,0x5d,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x7d,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x62,0x6c,0x69,0x74,0x49,0x6d,0x61,0x67,0x65,0x54,0x6f,0x42,0x75,0x66,0x66,0x65,0x72,0x28,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x72,0x65,0x61,0x64,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x46,0x4c,0x4f,0x41,0x54,0x20,0x2a,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x69,0x6e,0x70,0x75,0x74,0x4f,0x66,0x66,0x73,0x65,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x4f,0x66,0x66,0x73,0x65,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x72,0x65,0x67,0x69,0x6f,0x6e,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x69,0x6e,0x70,0x75,0x74,0x57,0x48,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x53,0x74,0x72,0x69,0x64,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x53,0x69,0x7a,0x65,0x2f,0x2a,0x6e,0x68,0x77,0x63,0x2a,0x2f,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x77,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x53,0x69,0x7a,0x65,0x2e,0x7a,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x68,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x53,0x69,0x7a,0x65,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x63,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x53,0x69,0x7a,0x65,0x2e,0x77,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6e,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x53,0x69,0x7a,0x65,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x78,0x79,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x2c,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x2f,0x2f,0x4e,0x2c,0x20,0x43,0x2c,0x20,0x48,0x2c,0x20,0x57,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x70,0x6f,0x73,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x34,0x29,0x28,0x78,0x79,0x2e,0x79,0x2f,0x68,0x2c,0x20,0x78,0x79,0x2e,0x78,0x2f,0x77,0x2c,0x20,0x78,0x79,0x2e,0x79,0x25,0x68,0x2c,0x20,0x78,0x79,0x2e,0x78,0x25,0x77,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x50,0x6f,0x73,0x20,0x3d,0x20,0x70,0x6f,0x73,0x20,0x2a,0x20,0x28,0x69,0x6e,0x74,0x34,0x29,0x28,0x31,0x2c,0x20,0x34,0x2c,0x20,0x31,0x2c,0x20,0x31,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x70,0x6f,0x73,0x2e,0x78,0x20,0x3c,0x20,0x72,0x65,0x67,0x69,0x6f,0x6e,0x2e,0x78,0x20,0x26,0x26,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x3c,0x20,0x72,0x65,0x67,0x69,0x6f,0x6e,0x2e,0x79,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x70,0x6f,0x73,0x49,0x6e,0x70,0x75,0x74,0x20,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x4f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x20,0x70,0x6f,0x73,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x70,0x6f,0x73,0x4f,0x75,0x74,0x70,0x75,0x74,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x4f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x50,0x6f,0x73,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x69,0x6e,0x70,0x75,0x74,0x50,0x6f,0x73,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x70,0x6f,0x73,0x49,0x6e,0x70,0x75,0x74,0x2e,0x77,0x20,0x2b,0x20,0x70,0x6f,0x73,0x49,0x6e,0x70,0x75,0x74,0x2e,0x79,0x2a,0x69,0x6e,0x70,0x75,0x74,0x57,0x48,0x2e,0x78,0x2c,0x20,0x70,0x6f,0x73,0x49,0x6e,0x70,0x75,0x74,0x2e,0x78,0x2a,0x69,0x6e,0x70,0x75,0x74,0x57,0x48,0x2e,0x79,0x20,0x2b,0x20,0x70,0x6f,0x73,0x49,0x6e,0x70,0x75,0x74,0x2e,0x7a,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x63,0x6f,0x6c,0x6f,0x72,0x20,0x3d,0x20,0x52,0x49,0x5f,0x46,0x28,0x69,0x6e,0x70,0x75,0x74,0x2c,0x20,0x53,0x41,0x4d,0x50,0x4c,0x45,0x52,0x2c,0x20,0x69,0x6e,0x70,0x75,0x74,0x50,0x6f,0x73,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x50,0x6f,0x73,0x42,0x61,0x73,0x69,0x63,0x20,0x3d,0x20,0x70,0x6f,0x73,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x78,0x2a,0x6f,0x75,0x74,0x70,0x75,0x74,0x53,0x74,0x72,0x69,0x64,0x65,0x2e,0x78,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2b,0x20,0x70,0x6f,0x73,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x79,0x2a,0x6f,0x75,0x74,0x70,0x75,0x74,0x53,0x74,0x72,0x69,0x64,0x65,0x2e,0x79,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2b,0x20,0x70,0x6f,0x73,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x7a,0x2a,0x6f,0x75,0x74,0x70,0x75,0x74,0x53,0x74,0x72,0x69,0x64,0x65,0x2e,0x7a,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2b,0x20,0x70,0x6f,0x73,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x77,0x2a,0x6f,0x75,0x74,0x70,0x75,0x74,0x53,0x74,0x72,0x69,0x64,0x65,0x2e,0x77,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x50,0x6f,0x73,0x30,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x50,0x6f,0x73,0x42,0x61,0x73,0x69,0x63,0x20,0x2b,0x20,0x30,0x2a,0x6f,0x75,0x74,0x70,0x75,0x74,0x53,0x74,0x72,0x69,0x64,0x65,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5b,0x6f,0x75,0x74,0x70,0x75,0x74,0x50,0x6f,0x73,0x30,0x5d,0x20,0x3d,0x20,0x63,0x6f,0x6c,0x6f,0x72,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x70,0x6f,0x73,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x79,0x20,0x2b,0x20,0x31,0x20,0x3c,0x20,0x63,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x50,0x6f,0x73,0x31,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x50,0x6f,0x73,0x42,0x61,0x73,0x69,0x63,0x20,0x2b,0x20,0x31,0x2a,0x6f,0x75,0x74,0x70,0x75,0x74,0x53,0x74,0x72,0x69,0x64,0x65,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5b,0x6f,0x75,0x74,0x70,0x75,0x74,0x50,0x6f,0x73,0x31,0x5d,0x20,0x3d,0x20,0x63,0x6f,0x6c,0x6f,0x72,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x70,0x6f,0x73,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x79,0x20,0x2b,0x20,0x32,0x20,0x3c,0x20,0x63,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x50,0x6f,0x73,0x31,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x50,0x6f,0x73,0x42,0x61,0x73,0x69,0x63,0x20,0x2b,0x20,0x32,0x2a,0x6f,0x75,0x74,0x70,0x75,0x74,0x53,0x74,0x72,0x69,0x64,0x65,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5b,0x6f,0x75,0x74,0x70,0x75,0x74,0x50,0x6f,0x73,0x31,0x5d,0x20,0x3d,0x20,0x63,0x6f,0x6c,0x6f,0x72,0x2e,0x7a,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x70,0x6f,0x73,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x79,0x20,0x2b,0x20,0x33,0x20,0x3c,0x20,0x63,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x50,0x6f,0x73,0x31,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x50,0x6f,0x73,0x42,0x61,0x73,0x69,0x63,0x20,0x2b,0x20,0x33,0x2a,0x6f,0x75,0x74,0x70,0x75,0x74,0x53,0x74,0x72,0x69,0x64,0x65,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x5b,0x6f,0x75,0x74,0x70,0x75,0x74,0x50,0x6f,0x73,0x31,0x5d,0x20,0x3d,0x20,0x63,0x6f,0x6c,0x6f,0x72,0x2e,0x77,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x7d,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x7d,0xa,0xa,0x5f,0x5f,0x6b,0x65,0x72,0x6e,0x65,0x6c,0x20,0x76,0x6f,0x69,0x64,0x20,0x62,0x6c,0x69,0x74,0x42,0x75,0x66,0x66,0x65,0x72,0x54,0x6f,0x49,0x6d,0x61,0x67,0x65,0x28,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x20,0x46,0x4c,0x4f,0x41,0x54,0x20,0x2a,0x69,0x6e,0x70,0x75,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x5f,0x5f,0x77,0x72,0x69,0x74,0x65,0x5f,0x6f,0x6e,0x6c,0x79,0x20,0x69,0x6d,0x61,0x67,0x65,0x32,0x64,0x5f,0x74,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x69,0x6e,0x70,0x75,0x74,0x4f,0x66,0x66,0x73,0x65,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x4f,0x66,0x66,0x73,0x65,0x74,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x72,0x65,0x67,0x69,0x6f,0x6e,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x69,0x6e,0x70,0x75,0x74,0x53,0x74,0x72,0x69,0x64,0x65,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x57,0x48,0x2c,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x77,0x68,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x77,0x20,0x3d,0x20,0x77,0x68,0x2e,0x78,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x68,0x20,0x3d,0x20,0x77,0x68,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x78,0x79,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x30,0x29,0x2c,0x20,0x67,0x65,0x74,0x5f,0x67,0x6c,0x6f,0x62,0x61,0x6c,0x5f,0x69,0x64,0x28,0x31,0x29,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x2f,0x2f,0x4e,0x2c,0x20,0x43,0x2c,0x20,0x48,0x2c,0x20,0x57,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x70,0x6f,0x73,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x34,0x29,0x28,0x78,0x79,0x2e,0x79,0x2f,0x68,0x2c,0x20,0x78,0x79,0x2e,0x78,0x2f,0x77,0x2c,0x20,0x78,0x79,0x2e,0x79,0x25,0x68,0x2c,0x20,0x78,0x79,0x2e,0x78,0x25,0x77,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x50,0x6f,0x73,0x20,0x3d,0x20,0x70,0x6f,0x73,0x20,0x2a,0x20,0x28,0x69,0x6e,0x74,0x34,0x29,0x28,0x31,0x2c,0x20,0x34,0x2c,0x20,0x31,0x2c,0x20,0x31,0x29,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x69,0x66,0x20,0x28,0x70,0x6f,0x73,0x2e,0x78,0x20,0x3c,0x20,0x72,0x65,0x67,0x69,0x6f,0x6e,0x2e,0x78,0x20,0x26,0x26,0x20,0x70,0x6f,0x73,0x2e,0x79,0x20,0x3c,0x20,0x72,0x65,0x67,0x69,0x6f,0x6e,0x2e,0x79,0x29,0x20,0x7b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x70,0x6f,0x73,0x49,0x6e,0x70,0x75,0x74,0x20,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x4f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x20,0x62,0x75,0x66,0x66,0x65,0x72,0x50,0x6f,0x73,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x34,0x20,0x70,0x6f,0x73,0x4f,0x75,0x74,0x70,0x75,0x74,0x20,0x3d,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x4f,0x66,0x66,0x73,0x65,0x74,0x20,0x2b,0x20,0x70,0x6f,0x73,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x32,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x50,0x6f,0x73,0x20,0x3d,0x20,0x28,0x69,0x6e,0x74,0x32,0x29,0x28,0x70,0x6f,0x73,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x77,0x20,0x2b,0x20,0x70,0x6f,0x73,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x79,0x2a,0x6f,0x75,0x74,0x70,0x75,0x74,0x57,0x48,0x2e,0x78,0x2c,0x20,0x70,0x6f,0x73,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x78,0x2a,0x6f,0x75,0x74,0x70,0x75,0x74,0x57,0x48,0x2e,0x79,0x20,0x2b,0x20,0x70,0x6f,0x73,0x4f,0x75,0x74,0x70,0x75,0x74,0x2e,0x7a,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x50,0x6f,0x73,0x42,0x61,0x73,0x69,0x63,0x20,0x3d,0x20,0x70,0x6f,0x73,0x49,0x6e,0x70,0x75,0x74,0x2e,0x78,0x2a,0x69,0x6e,0x70,0x75,0x74,0x53,0x74,0x72,0x69,0x64,0x65,0x2e,0x78,0x20,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2b,0x70,0x6f,0x73,0x49,0x6e,0x70,0x75,0x74,0x2e,0x79,0x2a,0x69,0x6e,0x70,0x75,0x74,0x53,0x74,0x72,0x69,0x64,0x65,0x2e,0x79,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2b,0x70,0x6f,0x73,0x49,0x6e,0x70,0x75,0x74,0x2e,0x7a,0x2a,0x69,0x6e,0x70,0x75,0x74,0x53,0x74,0x72,0x69,0x64,0x65,0x2e,0x7a,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x2b,0x70,0x6f,0x73,0x49,0x6e,0x70,0x75,0x74,0x2e,0x77,0x2a,0x69,0x6e,0x70,0x75,0x74,0x53,0x74,0x72,0x69,0x64,0x65,0x2e,0x77,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x50,0x6f,0x73,0x30,0x20,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x50,0x6f,0x73,0x42,0x61,0x73,0x69,0x63,0x20,0x2b,0x20,0x30,0x2a,0x69,0x6e,0x70,0x75,0x74,0x53,0x74,0x72,0x69,0x64,0x65,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x50,0x6f,0x73,0x31,0x20,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x50,0x6f,0x73,0x42,0x61,0x73,0x69,0x63,0x20,0x2b,0x20,0x31,0x2a,0x69,0x6e,0x70,0x75,0x74,0x53,0x74,0x72,0x69,0x64,0x65,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x50,0x6f,0x73,0x32,0x20,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x50,0x6f,0x73,0x42,0x61,0x73,0x69,0x63,0x20,0x2b,0x20,0x32,0x2a,0x69,0x6e,0x70,0x75,0x74,0x53,0x74,0x72,0x69,0x64,0x65,0x2e,0x79,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x69,0x6e,0x74,0x20,0x69,0x6e,0x70,0x75,0x74,0x50,0x6f,0x73,0x33,0x20,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x50,0x6f,0x73,0x42,0x61,0x73,0x69,0x63,0x20,0x2b,0x20,0x33,0x2a,0x69,0x6e,0x70,0x75,0x74,0x53,0x74,0x72,0x69,0x64,0x65,0x2e,0x79,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x46,0x4c,0x4f,0x41,0x54,0x34,0x20,0x63,0x6f,0x6c,0x6f,0x72,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x6f,0x6c,0x6f,0x72,0x2e,0x78,0x20,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x5b,0x69,0x6e,0x70,0x75,0x74,0x50,0x6f,0x73,0x30,0x5d,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x6f,0x6c,0x6f,0x72,0x2e,0x79,0x20,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x5b,0x69,0x6e,0x70,0x75,0x74,0x50,0x6f,0x73,0x31,0x5d,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x6f,0x6c,0x6f,0x72,0x2e,0x7a,0x20,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x5b,0x69,0x6e,0x70,0x75,0x74,0x50,0x6f,0x73,0x32,0x5d,0x3b,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x63,0x6f,0x6c,0x6f,0x72,0x2e,0x77,0x20,0x3d,0x20,0x69,0x6e,0x70,0x75,0x74,0x5b,0x69,0x6e,0x70,0x75,0x74,0x50,0x6f,0x73,0x33,0x5d,0x3b,0xa,0xa,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x57,0x49,0x5f,0x46,0x28,0x6f,0x75,0x74,0x70,0x75,0x74,0x2c,0x20,0x6f,0x75,0x74,0x70,0x75,0x74,0x50,0x6f,0x73,0x2c,0x20,0x63,0x6f,0x6c,0x6f,0x72,0x29,0x3b,0xa,0x20,0x20,0x20,0x20,0x7d,0xa,0x7d,0xa,0xa, } - }, - }; -} diff --git a/source/backend/opencl/execution/cl/conv2d_backprop.cl b/source/backend/opencl/execution/cl/conv2d_backprop.cl new file mode 100644 index 000000000..49097c9ee --- /dev/null +++ b/source/backend/opencl/execution/cl/conv2d_backprop.cl @@ -0,0 +1,68 @@ +#ifdef MNN_SUPPORT_FP16 +#pragma OPENCL EXTENSION cl_khr_fp16 : enable +#endif + +__constant sampler_t SAMPLER = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST; + +__kernel void conv2d_backprop_filter(__read_only image2d_t input, __read_only image2d_t grad, __global float* output_ptr, int batch, int outputChannel, int inputChannel, int2 inputShape, int2 shape, int2 kernelShape, int2 strides, int2 pads, int2 dilates) { + const int oc_block = get_global_id(0), ic_block = get_global_id(1); + if (oc_block * 4 >= outputChannel || ic_block * 4 >= inputChannel) { + return; + } + + const int h = get_global_id(2) / kernelShape.x, w = get_global_id(2) % kernelShape.x; + int temp = pads.y - h * dilates.y; + const int ohStart = ceil((float)temp / strides.y); + const int ohEnd = floor((float)(inputShape.y - 1 + temp) / strides.y); + const int ihStart = ohStart * strides.y - temp; + temp = pads.x - w * dilates.x; + const int owStart = ceil((float)temp / strides.x); + const int owEnd = floor((float)(inputShape.x - 1 + temp) / strides.x); + const int iwStart = owStart * strides.x - temp; + const int i_offset_0 = ic_block * inputShape.x, g_offset_0 = oc_block * shape.x; + + FLOAT4 grad0 = 0, grad1 = 0, grad2 = 0, grad3 = 0; + for (int b = 0; b < batch; ++b) { + const int i_offset_1 = b * inputShape.y, g_offset_1 = b * shape.y; + for (int oh = ohStart, ih = ihStart; oh <= ohEnd; ++oh, ih += strides.y) { + for (int ow = owStart, iw = iwStart; ow <= owEnd; ++ow, iw += strides.x) { + FLOAT4 in0 = RI_F(input, SAMPLER, (int2)(i_offset_0 + iw, i_offset_1 + ih)); + FLOAT4 in1 = RI_F(grad, SAMPLER, (int2)(g_offset_0 + ow, g_offset_1 + oh)); + grad0 = mad(in0, (FLOAT4)in1.x, grad0); + grad1 = mad(in0, (FLOAT4)in1.y, grad1); + grad2 = mad(in0, (FLOAT4)in1.z, grad2); + grad3 = mad(in0, (FLOAT4)in1.w, grad3); + } + } + } + + // save image kernel into buffer + { +#define FILL_OUTPUT(grad, index, offset) \ + const int remain_channel_ = inputChannel - ic_block * 4; \ + if (remain_channel_ >= 1) output_ptr[index] = grad.x; \ + if (remain_channel_ >= 2) output_ptr[index + offset] = grad.y; \ + if (remain_channel_ >= 3) output_ptr[index + offset * 2] = grad.z; \ + if (remain_channel_ >= 4) output_ptr[index + offset * 3] = grad.w; + + const int remain_channel = outputChannel - oc_block * 4; + const int kernelSize = kernelShape.x * kernelShape.y; + int index = (oc_block * inputChannel + ic_block) * 4 * kernelSize + h * kernelShape.x + w; + if (remain_channel >= 1) { + FILL_OUTPUT(grad0, index, kernelSize); + } + if (remain_channel >= 2) { + index += kernelSize * inputChannel; + FILL_OUTPUT(grad1, index, kernelSize); + } + if (remain_channel >= 3) { + index += kernelSize * inputChannel; + FILL_OUTPUT(grad2, index, kernelSize); + } + if (remain_channel >= 4) { + index += kernelSize * inputChannel; + FILL_OUTPUT(grad3, index, kernelSize); + } +#undef FILL_OUTPUT + } +} diff --git a/source/backend/opencl/execution/cl/conv_2d.cl b/source/backend/opencl/execution/cl/conv_2d.cl index e49c22e2a..7c41c3bfe 100644 --- a/source/backend/opencl/execution/cl/conv_2d.cl +++ b/source/backend/opencl/execution/cl/conv_2d.cl @@ -449,4 +449,4 @@ __kernel void conv_2d(GLOBAL_SIZE_2_DIMS __read_only image2d_t input, __read_onl } else if (remain == 1) { WI_F(output, (int2)(output_idx, output_batch_height_idx), out0); } -} \ No newline at end of file +} diff --git a/source/backend/opencl/execution/cl/deconv_2d.cl b/source/backend/opencl/execution/cl/deconv_2d.cl index 49332c2a3..67bd4ba9d 100644 --- a/source/backend/opencl/execution/cl/deconv_2d.cl +++ b/source/backend/opencl/execution/cl/deconv_2d.cl @@ -1,6 +1,8 @@ #define GLOBAL_SIZE_3_DIMS \ __private const int global_size_dim0, __private const int global_size_dim1, __private const int global_size_dim2, - +#ifdef MNN_SUPPORT_FP16 +#pragma OPENCL EXTENSION cl_khr_fp16 : enable +#endif #define DEAL_NON_UNIFORM_DIM3(input1, input2, input3) \ if (input1 >= global_size_dim0 || input2 >= global_size_dim1 || input3 >= global_size_dim2) { \ return; \ @@ -21,47 +23,47 @@ __kernel void deconv_2d(GLOBAL_SIZE_3_DIMS __read_only image2d_t input, __read_o __private const int in_channel_blocks, __private const int out_channel_blocks) { const int out_channel_blocks_idx = get_global_id(0); - const int out_width_idx = get_global_id(1); + const int out_w_idx = get_global_id(1); const int out_batch_height_idx = get_global_id(2); - DEAL_NON_UNIFORM_DIM3(out_channel_blocks_idx, out_width_idx, out_batch_height_idx); + DEAL_NON_UNIFORM_DIM3(out_channel_blocks_idx, out_w_idx, out_batch_height_idx); - float4 out0 = read_imagef(bias, SAMPLER, (int2)(out_channel_blocks_idx, 0)); + FLOAT4 out0 = RI_F(bias, SAMPLER, (int2)(out_channel_blocks_idx, 0)); - const int out_batch_idx = out_batch_height_idx / output_shape.x; - const int out_height_idx = out_batch_height_idx % output_shape.x; + const int out_b_idx = out_batch_height_idx / output_shape.x; + const int out_h_idx = out_batch_height_idx % output_shape.x; - int kernel_start_x = (out_width_idx + align_shape.y) / stride_shape.y; - int kernel_start_y = max(0, (out_height_idx + align_shape.x) / stride_shape.x); + int kernel_start_x = max(0, (out_w_idx + align_shape.y) / stride_shape.y); + int kernel_start_y = max(0, (out_h_idx + align_shape.x) / stride_shape.x); - int deal_kernel_width = kernel_shape.y - mad24(kernel_start_x, stride_shape.y, padding_shape.y) + out_width_idx - 1; - int deal_kernel_height = kernel_shape.x - mad24(kernel_start_y, stride_shape.x, padding_shape.x) + out_height_idx - 1; + int deal_kernel_width = kernel_shape.y - mad24(kernel_start_x, stride_shape.y, padding_shape.y) + out_w_idx - 1; + int deal_kernel_height = kernel_shape.x - mad24(kernel_start_y, stride_shape.x, padding_shape.x) + out_h_idx - 1; int kernel_x_0, kernel_x_1, kernel_x_2, kernel_x_3, kernel_y; - float4 in0; - float4 weights0, weights1, weights2, weights3; + FLOAT4 in0; + FLOAT4 weights0, weights1, weights2, weights3; for (int ic = 0; ic < in_channel_blocks; ic++) { kernel_x_0 = ic << 2; kernel_x_1 = kernel_x_0 + 1; kernel_x_2 = kernel_x_0 + 2; kernel_x_3 = kernel_x_0 + 3; for (int k_y = deal_kernel_height, idx_h = kernel_start_y; k_y >= 0; k_y -= stride_shape.x, idx_h++) { - int in_idy = mad24(out_batch_idx, input_shape.x, idx_h); + int in_idy = mad24(out_b_idx, input_shape.x, idx_h); int in_hb_value = select(in_idy, -1, idx_h < 0 || idx_h >= input_shape.x); int in_width0 = kernel_start_x; for (int k_x = deal_kernel_width; k_x >= 0; k_x -= stride_shape.y) { kernel_y = mad24(k_y, kernel_shape.y, k_x); kernel_y = mad24(out_channel_blocks_idx, kernel_size, kernel_y); - weights0 = read_imagef(weights, SAMPLER, (int2)(kernel_x_0, kernel_y)); - weights1 = read_imagef(weights, SAMPLER, (int2)(kernel_x_1, kernel_y)); - weights2 = read_imagef(weights, SAMPLER, (int2)(kernel_x_2, kernel_y)); - weights3 = read_imagef(weights, SAMPLER, (int2)(kernel_x_3, kernel_y)); + weights0 = RI_F(weights, SAMPLER, (int2)(kernel_x_0, kernel_y)); + weights1 = RI_F(weights, SAMPLER, (int2)(kernel_x_1, kernel_y)); + weights2 = RI_F(weights, SAMPLER, (int2)(kernel_x_2, kernel_y)); + weights3 = RI_F(weights, SAMPLER, (int2)(kernel_x_3, kernel_y)); int in_idx = mul24(ic, input_shape.y); int in_width_value0 = in_width0; \ in_width_value0 = \ select(in_idx + in_width_value0, -1, (in_width_value0 < 0 || in_width_value0 >= input_shape.y)); \ - in0 = read_imagef(input, SAMPLER, (int2)(in_width_value0, in_hb_value)); + in0 = RI_F(input, SAMPLER, (int2)(in_width_value0, in_hb_value)); out0 = mad(in0.x, weights0, out0); out0 = mad(in0.y, weights1, out0); @@ -70,16 +72,27 @@ __kernel void deconv_2d(GLOBAL_SIZE_3_DIMS __read_only image2d_t input, __read_o in_width0++; } } - + } #ifdef RELU - out0 = fmax(out0, (float4)0); + out0 = fmax(out0, (FLOAT4)0); #endif #ifdef RELU6 - out0 = clamp(out0, (float4)0, (float4)6); + out0 = clamp(out0, (FLOAT4)0, (FLOAT4)6); #endif - int out_image_width_idx = mad24(out_channel_blocks_idx, output_shape.y, out_width_idx); - write_imagef(output, (int2)(out_image_width_idx, out_batch_height_idx), out0); + int out_image_width_idx = mad24(out_channel_blocks_idx, output_shape.y, out_w_idx); + WI_F(output, (int2)(out_image_width_idx, out_batch_height_idx), out0); +} + +__kernel void iohw2oihw(__global const float* input_ptr, __global float* output_ptr, int plane_number, int input_channel, int output_channel) { + const int ic_index = get_global_id(0), oc_index = get_global_id(1); + if (ic_index >= input_channel || oc_index >= output_channel) { + return; + } + const int input_offset = (ic_index * output_channel + oc_index) * plane_number; + const int output_offset = (oc_index * input_channel + ic_index) * plane_number; + for (int i = 0; i < plane_number; ++i) { + output_ptr[output_offset + i] = input_ptr[input_offset + i]; } } diff --git a/source/backend/opencl/execution/cl/matmul.cl b/source/backend/opencl/execution/cl/matmul.cl new file mode 100644 index 000000000..1e77f1646 --- /dev/null +++ b/source/backend/opencl/execution/cl/matmul.cl @@ -0,0 +1,62 @@ +#ifdef MNN_SUPPORT_FP16 +#pragma OPENCL EXTENSION cl_khr_fp16 : enable +#endif + +#define GLOBAL_SIZE_2_DIMS \ +__private const int global_size_dim0, __private const int global_size_dim1, + +#define DEAL_NON_UNIFORM_DIM2(input1, input2) \ +if (input1 >= global_size_dim0 || input2 >= global_size_dim1) { \ +return; \ +} + +__constant sampler_t SAMPLER = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST; + +__kernel void matmul(GLOBAL_SIZE_2_DIMS __read_only image2d_t input_a, __read_only image2d_t input_b, + __write_only image2d_t output_c, __private const int channels, + __private const int channel_blocks) { + const int width_blocks_idx = get_global_id(0); + const int height_idx = get_global_id(1); + + DEAL_NON_UNIFORM_DIM2(width_blocks_idx, height_idx); + FLOAT4 a; + FLOAT4 b0 = 0, b1 = 0, b2 = 0, b3 = 0; + + FLOAT result0 = 0; + FLOAT result1 = 0; + FLOAT result2 = 0; + FLOAT result3 = 0; + + for (short pos = 0; pos < channel_blocks; pos += 1) { + a = RI_F(input_a, SAMPLER, (int2)(pos, height_idx)); + + short remain = (pos + 1) * 4 - channels; + + b0 = RI_F(input_b, SAMPLER, (int2)(width_blocks_idx, pos * 4)); + b1 = RI_F(input_b, SAMPLER, (int2)(width_blocks_idx, pos * 4 + 1)); + b2 = RI_F(input_b, SAMPLER, (int2)(width_blocks_idx, pos * 4 + 2)); + b3 = RI_F(input_b, SAMPLER, (int2)(width_blocks_idx, pos * 4 + 3)); + + if (remain == 3) { + b1 = 0; + b2 = 0; + b3 = 0; + } else if (remain == 2) { + b2 = 0; + b3 = 0; + } else if (remain == 1) { + b3 = 0; + } + + FLOAT4 btmp0 = (FLOAT4)(b0.s0, b1.s0, b2.s0, b3.s0); + FLOAT4 btmp1 = (FLOAT4)(b0.s1, b1.s1, b2.s1, b3.s1); + FLOAT4 btmp2 = (FLOAT4)(b0.s2, b1.s2, b2.s2, b3.s2); + FLOAT4 btmp3 = (FLOAT4)(b0.s3, b1.s3, b2.s3, b3.s3); + + result0 += dot(a, btmp0); + result1 += dot(a, btmp1); + result2 += dot(a, btmp2); + result3 += dot(a, btmp3); + } + WI_F(output_c, (int2)(width_blocks_idx, height_idx), (FLOAT4)(result0, result1, result2, result3)); +} diff --git a/source/backend/opencl/execution/cl/opencl_codegen.py b/source/backend/opencl/execution/cl/opencl_codegen.py index dc3a0e35b..ed9604d5f 100644 --- a/source/backend/opencl/execution/cl/opencl_codegen.py +++ b/source/backend/opencl/execution/cl/opencl_codegen.py @@ -1,4 +1,6 @@ import os +import sys +major_py_ver = sys.version_info.major def convert_string_to_hex_list(code_str): hex_list = [] @@ -8,8 +10,9 @@ def convert_string_to_hex_list(code_str): return hex_list def opencl_codegen(): - cl_kernel_dir = "./" - output_path = "./codegen/opencl_program.cc" + cl_kernel_dir = sys.argv[1] + output_path = sys.argv[2] + print("Generating OpenCL Kernels in "+cl_kernel_dir+" to "+output_path) if not os.path.exists(cl_kernel_dir): print(cl_kernel_dir + " doesn't exist!") @@ -54,7 +57,11 @@ def opencl_codegen(): opencl_source_map += "namespace MNN { \n" opencl_source_map += "extern const std::map> OpenCLProgramMap = \n { \n" - for file_name, file_source in opencl_code_maps.iteritems(): + if major_py_ver == 2: + items = opencl_code_maps.iteritems() + else: + items = opencl_code_maps.items() + for file_name, file_source in items: opencl_source_map += "{\n \"" opencl_source_map += file_name opencl_source_map += "\", \n" @@ -75,4 +82,3 @@ def opencl_codegen(): if __name__ == '__main__': opencl_codegen() - diff --git a/source/backend/opencl/execution/cl/pool_grad.cl b/source/backend/opencl/execution/cl/pool_grad.cl new file mode 100644 index 000000000..371cca5de --- /dev/null +++ b/source/backend/opencl/execution/cl/pool_grad.cl @@ -0,0 +1,54 @@ +#ifdef MNN_SUPPORT_FP16 +#pragma OPENCL EXTENSION cl_khr_fp16 : enable +#endif + +__constant sampler_t SAMPLER = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST; + +__kernel void maxpool_grad(__read_only image2d_t originInput, __read_only image2d_t originOutput, __read_only image2d_t inputGrad, __write_only image2d_t output, int2 shape, int2 poolShape, int2 kernelSize, int2 stride) { + const int2 pos = (int2)(get_global_id(1), get_global_id(0)); // read_imagef and write_imagef need (w, h) layout position + const int h = pos.y % shape.x, w = pos.x % shape.y; + const int hOffset_ = pos.y - h, wOffset_ = pos.x - w; + const int hOffset = (pos.y / shape.x) * poolShape.x, wOffset = (pos.x / shape.y) * poolShape.y; + const int hStart = ceil((float)(h - kernelSize.x + 1) / stride.x), hEnd = floor((float)h / stride.x); + const int wStart = ceil((float)(w - kernelSize.y + 1) / stride.y), wEnd = floor((float)w / stride.y); + FLOAT4 in0 = RI_F(originInput, SAMPLER, pos), res = 0; + for (int i = hStart; i <= hEnd; ++i) { + for (int j = wStart; j <= wEnd; ++j) { + FLOAT4 in1 = RI_F(originOutput, SAMPLER, (int2)(wOffset + j, hOffset + i)); + if (!any(isequal(in0, in1))) { + continue; + } + FLOAT4 grad = RI_F(inputGrad, SAMPLER, (int2)(wOffset + j, hOffset + i)); + FLOAT4 flag = 1; + const int hStart_ = i * stride.x; + const int wStart_ = j * stride.y, wEnd_ = wStart_ + kernelSize.y; + for (int i_ = hStart_; i_ < h; ++i_) { + for (int j_ = wStart_; j_ < wEnd_; ++j_) { + FLOAT4 in0_ = RI_F(originInput, SAMPLER, (int2)(wOffset_ + j_, hOffset_ + i_)); + flag = flag * select((FLOAT4)1, (FLOAT4)0, isequal(in1, in0_)); + } + } + for (int j_ = wStart_; j_ < w; ++j_) { + FLOAT4 in0_ = RI_F(originInput, SAMPLER, (int2)(wOffset_ + j_, hOffset_ + h)); + flag = flag * select((FLOAT4)1, (FLOAT4)0, isequal(in1, in0_)); + } + res = res + select((FLOAT4)0, grad * flag, isequal(in0, in1)); + } + } + WI_F(output, pos, res); +} + +__kernel void avepool_grad(__read_only image2d_t inputGrad, __write_only image2d_t output, int2 shape, int2 poolShape, int2 kernelSize, int2 stride) { + const int2 pos = (int2)(get_global_id(1), get_global_id(0)); + const int h = pos.y % shape.x, w = pos.x % shape.y; + const int hOffset = (pos.y / shape.x) * poolShape.x, wOffset = (pos.x / shape.y) * poolShape.y; + const int hStart = ceil((float)(h - kernelSize.x + 1) / stride.x), hEnd = floor((float)h / stride.x); + const int wStart = ceil((float)(w - kernelSize.y + 1) / stride.y), wEnd = floor((float)w / stride.y); + FLOAT4 sum = 0; + for (int i = hStart; i <= hEnd; ++i) { + for (int j = wStart; j <= wEnd; ++j) { + sum = sum + RI_F(inputGrad, SAMPLER, (int2)(wOffset + j, hOffset + i)); + } + } + WI_F(output, pos, sum / (kernelSize.x * kernelSize.y)); +} diff --git a/source/backend/opencl/execution/cl/reduction.cl b/source/backend/opencl/execution/cl/reduction.cl new file mode 100644 index 000000000..c1db5aa86 --- /dev/null +++ b/source/backend/opencl/execution/cl/reduction.cl @@ -0,0 +1,455 @@ +// TODO: use INIT_SCALAR_VALUE, OPERATOR, FINAL_OPERATOR_ON_CHANNEL macro abstract and simplify code +// TODO: support reduce dims include batch +// TODO: support keep_dim=False +// TODO: fix channel reduce result re-pack problem +#ifdef MNN_SUPPORT_FP16 +#pragma OPENCL EXTENSION cl_khr_fp16 : enable +#endif +#define GLOBAL_SIZE_3_DIMS \ + __private const int global_size_dim0, __private const int global_size_dim1, __private const int global_size_dim2, + +__constant sampler_t SAMPLER = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST; + +__kernel void reduce_sum_all(__read_only image2d_t input, __write_only image2d_t output, int width, int channel) { + if (get_global_id(0) != 0) { + return; + } + const int total_h = get_image_dim(input).y, channelDiv4 = channel / 4; + FLOAT sum = 0; + for (int c = 0; c < channelDiv4; ++c) { + for (int i = 0; i < total_h; ++i) { + for (int w = 0; w < width; ++w) { + FLOAT4 in = RI_F(input, SAMPLER, (int2)(c * width + w, i)); + sum = sum + in.x + in.y + in.z + in.w; + } + } + } + const int remain = channel % 4; + if (remain != 0) { + const int offset = channelDiv4 * width; + for (int i = 0; i < total_h; ++i) { + for (int w = 0; w < width; ++w) { + FLOAT4 in = RI_F(input, SAMPLER, (int2)(offset + w, i)); + if (remain == 1) { + sum = sum + in.x; + } else if (remain == 2) { + sum = sum + in.x + in.y; + } else if (remain == 3) { + sum = sum + in.x + in.y + in.z; + } + } + } + } + WI_F(output, (int2)(0, 0), (FLOAT4)sum); +} + +__kernel void reduce_along_channel(__read_only image2d_t input, __write_only image2d_t output, int width, int meanAggregation) { + const int channel_block_idx = get_global_id(0), total_h = get_image_dim(input).y; + const int w_offset = channel_block_idx * width; + FLOAT4 out = (FLOAT4){0, 0, 0, 0}; + for (int i = 0; i < total_h; ++i) { + for (int j = 0; j < width; ++j) { + FLOAT4 in = RI_F(input, SAMPLER, (int2)(w_offset + j, i)); + out = out + in; + } + } + int2 pos = (int2)(0, channel_block_idx); + if (get_image_dim(output).y == 1) { + pos = (int2)(channel_block_idx, 0); + } + if (meanAggregation) { + out = out / (total_h * width); + } + WI_F(output, pos, out); +} + +__kernel void reduce_sum_use_local_along_channel(__read_only image2d_t input, __write_only image2d_t output, int width, int meanAggregation, int step, __local float* results, int local_size) { + const int tile_index = get_global_id(0), channel_block_idx = get_global_id(1); + const int h_start = tile_index * step, h_end = min((tile_index + 1) * step, get_image_dim(input).y); + const int w_offset = channel_block_idx * width; + FLOAT4 out = (FLOAT4){0, 0, 0, 0}; + for (int i = h_start; i < h_end; ++i) { + for (int j = 0; j < width; ++j) { + FLOAT4 in = RI_F(input, SAMPLER, (int2)(w_offset + j, i)); + out = out + in; + } + } + results[tile_index * 4 + 0] = out.x; + results[tile_index * 4 + 1] = out.y; + results[tile_index * 4 + 2] = out.z; + results[tile_index * 4 + 3] = out.w; + barrier(CLK_LOCAL_MEM_FENCE); + if (tile_index == 0) { + out = 0; + for (int i = 0; i < local_size; i += 4) { + out.x = out.x + results[i]; + out.y = out.y + results[i + 1]; + out.z = out.z + results[i + 2]; + out.w = out.w + results[i + 3]; + } + if (meanAggregation) { + out = out / (get_image_dim(input).y * width); + } + int2 pos = (int2)(0, channel_block_idx); + if (get_image_dim(output).y == 1) { + pos = (int2)(channel_block_idx, 0); + } + WI_F(output, pos, out); + } +} + +__kernel void reduct_1d(GLOBAL_SIZE_3_DIMS + __read_only image2d_t input, + __write_only image2d_t output, + __private const int groupWorkSize, + __private const int computeNum, + __private const int lastNum, + __private const int reductSize, + __private const int workNum, + __private const int groupNum, + __private const int channels + ) { + const int w = get_local_id(0); + const int h = get_local_id(1); + const int bg= get_global_id(2); + const int width = get_local_size(0); + const int index = mad24(h, width, w); + const int b = bg / groupNum; + const int group_index = mad24(b, -groupNum, bg); + const int remain_channel = channels % 4; + + FLOAT4 in; + FLOAT4 scale; + int pos_x, pos_y; +// MAX +#if REDUCE_TYPE == 1 + FLOAT4 tempResult = (FLOAT4){-MAXFLOAT, -MAXFLOAT, -MAXFLOAT, -MAXFLOAT}; + FLOAT4 out = (FLOAT4){-MAXFLOAT, -MAXFLOAT, -MAXFLOAT, -MAXFLOAT}; +// MIN +#elif REDUCE_TYPE == 2 + FLOAT4 tempResult = (FLOAT4){MAXFLOAT, MAXFLOAT, MAXFLOAT, MAXFLOAT}; + FLOAT4 out = (FLOAT4){MAXFLOAT, MAXFLOAT, MAXFLOAT, MAXFLOAT}; +// PROD +#elif REDUCE_TYPE == 3 + FLOAT4 tempResult = (FLOAT4){1, 1, 1, 1}; + FLOAT4 out = (FLOAT4){1, 1, 1, 1}; +#else +// MEAN or SUM + FLOAT4 tempResult = (FLOAT4){0, 0, 0, 0}; + FLOAT4 out = (FLOAT4){0, 0, 0, 0}; +#endif + const bool greater_last = (lastNum > 0 && index >= lastNum); + const int actual_computeNum = select(computeNum, computeNum - 1, greater_last); + if (actual_computeNum == 0) + return; + const int base_offset = mul24(index, actual_computeNum); + const int offset= select(base_offset, base_offset + lastNum, greater_last); + scale = (FLOAT4)(1.f / reductSize); +#ifdef REDUCTION_C + scale = (FLOAT4)(1.f / channels); +#endif +#pragma unroll + for (int i = 0; i < actual_computeNum; ++i) { + int element_idx = offset + i; +#pragma unroll + for (int j = 0; j < reductSize; j++) { +#ifdef REDUCTION_H + pos_x = mad24(group_index, workNum, element_idx); + pos_y = mad24(b, reductSize, j); + in = RI_F(input, SAMPLER, (int2)(pos_x, pos_y)); +#endif +#ifdef REDUCTION_W + pos_x = mad24(group_index, reductSize, j); + pos_y = mad24(b, workNum, element_idx); + in = RI_F(input, SAMPLER, (int2)(pos_x, pos_y)); +#endif +#ifdef REDUCTION_C + pos_x = mad24(j, workNum, element_idx); + pos_y = mad24(b, groupNum, group_index); + in = RI_F(input, SAMPLER, (int2)(pos_x, pos_y)); + if (remain_channel != 0 && j == (reductSize - 1)) { + if (remain_channel == 1) { +#if REDUCE_TYPE == 1 + in = (FLOAT4){in.x, -MAXFLOAT, -MAXFLOAT, -MAXFLOAT}; +#elif REDUCE_TYPE == 2 + in = (FLOAT4){in.x, MAXFLOAT, MAXFLOAT, MAXFLOAT}; +#elif REDUCE_TYPE == 3 + in = (FLOAT4){in.x, 1, 1, 1}; +#else + in = (FLOAT4){in.x, 0, 0, 0}; +#endif + } else if (remain_channel == 2) { +#if REDUCE_TYPE == 1 + in = (FLOAT4){in.x, in.y, -MAXFLOAT, -MAXFLOAT}; +#elif REDUCE_TYPE == 2 + in = (FLOAT4){in.x, in.y, MAXFLOAT, MAXFLOAT}; +#elif REDUCE_TYPE == 3 + in = (FLOAT4){in.x, in.y, 1, 1}; +#else + in = (FLOAT4){in.x, in.y, 0, 0}; +#endif + } else if (remain_channel == 3) { +#if REDUCE_TYPE == 1 + in.w = -MAXFLOAT; +#elif REDUCE_TYPE == 2 + in.w = MAXFLOAT; +#elif REDUCE_TYPE == 3 + in.w = 1; +#else + in.w = 0; +#endif + } + } +#endif +#if REDUCE_TYPE == 1 + tempResult = fmax(tempResult, in); +#elif REDUCE_TYPE == 2 + tempResult = fmin(tempResult, in); +#elif REDUCE_TYPE == 3 + tempResult = tempResult * in; +#else + tempResult = tempResult + in; +#endif + } + +#if REDUCE_TYPE == 0 + tempResult = tempResult * scale; +#endif + out = tempResult; +#ifdef REDUCTION_H + WI_F(output, (int2)(pos_x, b), out); +#endif +#ifdef REDUCTION_W + WI_F(output, (int2)(group_index, pos_y), out); +#endif +#ifdef REDUCTION_C +#if REDUCE_TYPE == 1 + float tmp_value = fmax(out.x, out.y); + tmp_value = fmax(out.z, tmp_value); + out.x = fmax(out.w, tmp_value); +#elif REDUCE_TYPE == 2 + float tmp_value = fmin(out.x, out.y); + tmp_value = fmin(out.z, tmp_value); + out.x = fmin(out.w, tmp_value); +#elif REDUCE_TYPE == 3 + out.x = out.x * out.y * out.z * out.w; +#else + out.x = out.x + out.y + out.z + out.w; +#endif + out = (FLOAT4){out.x, 0, 0, 0}; + WI_F(output, (int2)(pos_x % workNum, pos_y), out); +#endif + } +} + +__kernel void reduct_2d(GLOBAL_SIZE_3_DIMS + __read_only image2d_t input, + __write_only image2d_t output, + __global FLOAT4 *groupBuffer, + __global FLOAT *leftBuffer, + __private const int groupWorkSize, + __private const int computeNum, + __private const int lastNum, + __private const int inputHeight, + __private const int inputWidth, + __private const int leftSize, + __private const int channels + ) { + const int w = get_local_id(0); + const int h = get_local_id(1); + const int bl= get_global_id(2); + const int width = get_local_size(0); + const int index = mad24(h, width, w); + const int b = bl / leftSize; + const int left_index = mad24(b, -leftSize, bl); + const int remain_channel = channels % 4; + + FLOAT4 in; + bool channel_flag; + FLOAT4 scale; +// MAX +#if REDUCE_TYPE == 1 + FLOAT4 tempResult = (FLOAT4){-MAXFLOAT, -MAXFLOAT, -MAXFLOAT, -MAXFLOAT}; + FLOAT4 allResult = (FLOAT4){-MAXFLOAT, 0, 0, 0}; +// MIN +#elif REDUCE_TYPE == 2 + FLOAT4 tempResult = (FLOAT4){MAXFLOAT, MAXFLOAT, MAXFLOAT, MAXFLOAT}; + FLOAT4 allResult = (FLOAT4){MAXFLOAT, 0, 0, 0}; +// PROD +#elif REDUCE_TYPE == 3 + FLOAT4 tempResult = (FLOAT4){1, 1, 1, 1}; + FLOAT4 allResult = (FLOAT4){1, 0, 0, 0}; +#else +// MEAN or SUM + FLOAT4 tempResult = (FLOAT4){0, 0, 0, 0}; + FLOAT4 allResult = (FLOAT4){0, 0, 0, 0}; +#endif + const bool greater_last = (lastNum > 0 && index >= lastNum); + // After last index, each kernel only computes (computeNum - 1) elements. + const int actual_computeNum = select(computeNum, computeNum - 1, greater_last); + const int base_offset = mul24(index, actual_computeNum); + const int offset= select(base_offset, base_offset + lastNum, greater_last); +#pragma unroll + for (int i = 0; i < actual_computeNum; ++i) { + int element_idx = offset + i; +#ifdef REDUCTION_HW + int h_idx = element_idx / inputWidth; + int w_idx = mad24(h_idx, -inputWidth, element_idx); + int pos_x = mad24(left_index, inputWidth, w_idx); + int pos_y = mad24(b, inputHeight, h_idx); + in = RI_F(input, SAMPLER, (int2)(pos_x, pos_y)); +#endif +#ifdef REDUCTION_HC + int h_idx = element_idx / inputWidth; + int w_idx = mad24(h_idx, -inputWidth, element_idx); + int pos_x = mad24(w_idx, leftSize, left_index); + int pos_y = mad24(b, inputHeight, h_idx); + in = RI_F(input, SAMPLER, (int2)(pos_x, pos_y)); + channel_flag = (remain_channel != 0 && w_idx == (inputWidth - 1)); +#endif +#ifdef REDUCTION_WC + int c_idx = element_idx / inputWidth; + int pos_x = element_idx; + int pos_y = mad24(b, leftSize, left_index); + in = RI_F(input, SAMPLER, (int2)(pos_x, pos_y)); + channel_flag = (remain_channel != 0 && c_idx == (inputHeight - 1)); +#endif +#ifndef REDUCTION_HW + if (channel_flag) { + if (remain_channel == 1) { +#if REDUCE_TYPE == 1 + in = (FLOAT4){in.x, -MAXFLOAT, -MAXFLOAT, -MAXFLOAT}; +#elif REDUCE_TYPE == 2 + in = (FLOAT4){in.x, MAXFLOAT, MAXFLOAT, MAXFLOAT}; +#elif REDUCE_TYPE == 3 + in = (FLOAT4){in.x, 1, 1, 1}; +#else + in = (FLOAT4){in.x, 0, 0, 0}; +#endif + } else if (remain_channel == 2) { +#if REDUCE_TYPE == 1 + in = (FLOAT4){in.x, in.y, -MAXFLOAT, -MAXFLOAT}; +#elif REDUCE_TYPE == 2 + in = (FLOAT4){in.x, in.y, MAXFLOAT, MAXFLOAT}; +#elif REDUCE_TYPE == 3 + in = (FLOAT4){in.x, in.y, 1, 1}; +#else + in = (FLOAT4){in.x, in.y, 0, 0}; +#endif + } else if (remain_channel == 3) { +#if REDUCE_TYPE == 1 + in.w = -MAXFLOAT; +#elif REDUCE_TYPE == 2 + in.w = MAXFLOAT; +#elif REDUCE_TYPE == 3 + in.w = 1; +#else + in.w = 0; +#endif + } + } +#endif +#if REDUCE_TYPE == 1 + tempResult = fmax(tempResult, in); +#elif REDUCE_TYPE == 2 + tempResult = fmin(tempResult, in); +#elif REDUCE_TYPE == 3 + tempResult = tempResult * in; +#else + tempResult = tempResult + in; +#endif + } +#ifdef REDUCTION_HW + scale = (FLOAT4)(1.f / (inputHeight * inputWidth)); +#endif +#ifdef REDUCTION_HC +#if REDUCE_W == 1 + scale = (FLOAT4)(1.f / (inputHeight * channels * leftSize)); +#else + scale = (FLOAT4)(1.f / (inputHeight * channels)); +#endif +#endif +#ifdef REDUCTION_WC + scale = (FLOAT4)(1.f / (inputWidth * channels)); +#endif + +// MEAN +#if REDUCE_TYPE == 0 + tempResult = tempResult * scale; +#endif + groupBuffer[index] = tempResult; + +#ifdef NON_QUALCOMM_ADRENO + barrier(CLK_LOCAL_MEM_FENCE); +#endif + + if (w == 0 && h == 0) { +#if REDUCE_TYPE == 1 + FLOAT4 out = (FLOAT4){-MAXFLOAT, -MAXFLOAT, -MAXFLOAT, -MAXFLOAT}; +#elif REDUCE_TYPE == 2 + FLOAT4 out = (FLOAT4){MAXFLOAT, MAXFLOAT, MAXFLOAT, MAXFLOAT}; +#elif REDUCE_TYPE == 3 + FLOAT4 out = (FLOAT4){1, 1, 1, 1}; +#else + FLOAT4 out = (FLOAT4){0, 0, 0, 0}; +#endif +#pragma unroll + for (int i = 0; i < groupWorkSize; ++i) { +#if REDUCE_TYPE == 1 + out = fmax(out, groupBuffer[i]); +#elif REDUCE_TYPE == 2 + out = fmin(out, groupBuffer[i]); +#elif REDUCE_TYPE == 3 + out = out * groupBuffer[i]; +#else + out = out + groupBuffer[i]; +#endif + } +#ifdef REDUCTION_HW + WI_F(output, (int2)(left_index, b), out); +#endif +#ifndef REDUCTION_HW +#if REDUCE_TYPE == 1 + float tmp_value = fmax(out.x, out.y); + tmp_value = fmax(out.z, tmp_value); + out.x = fmax(out.w, tmp_value); +#elif REDUCE_TYPE == 2 + float tmp_value = fmin(out.x, out.y); + tmp_value = fmin(out.z, tmp_value); + out.x = fmin(out.w, tmp_value); +#elif REDUCE_TYPE == 3 + out.x = out.x * out.y * out.z * out.w; +#else + out.x = out.x + out.y + out.z + out.w; +#endif + out = (FLOAT4){out.x, 0, 0, 0}; +#endif +#ifdef REDUCTION_HC +#if REDUCE_W == 1 + leftBuffer[left_index] = out.x; +#ifdef NON_QUALCOMM_ADRENO + barrier(CLK_LOCAL_MEM_FENCE); +#endif + if (left_index == 0) { + for (int i = 0; i < leftSize; ++i) { +#if REDUCE_TYPE == 1 + allResult.x = fmax(allResult.x, leftBuffer[i]); +#elif REDUCE_TYPE == 2 + allResult.x = fmin(allResult.x, leftBuffer[i]); +#elif REDUCE_TYPE == 3 + allResult.x = allResult.x * leftBuffer[i]; +#else + allResult.x = allResult.x + leftBuffer[i]; +#endif + } + WI_F(output, (int2)(0, 0), allResult); + } +#else + WI_F(output, (int2)(left_index, b), out); +#endif +#endif +#ifdef REDUCTION_WC + WI_F(output, (int2)(0, b * leftSize + left_index), out); +#endif + } +} diff --git a/source/backend/opencl/execution/cl/softmax.cl b/source/backend/opencl/execution/cl/softmax.cl index 24d70a054..4f0b49a8a 100644 --- a/source/backend/opencl/execution/cl/softmax.cl +++ b/source/backend/opencl/execution/cl/softmax.cl @@ -82,16 +82,16 @@ __kernel void softmax_channel(GLOBAL_SIZE_3_DIMS __read_only image2d_t input, __ int cur_out_width_pos = mad24(channel_block_idx, global_size_dim1, width_idx); input_data = RI_F(input, SAMPLER, (int2)(cur_out_width_pos, batch_height_idx)) - float_max_value; - const int output_remain = mul24(channel_block_idx, 4) - output_channels; + const int output_remain = output_channels - mul24(channel_block_idx, 4); if (output_remain == 1) { - input_data.z = EXP(input_data.z) / accum_result; - input_data.y = EXP(input_data.y) / accum_result; input_data.x = EXP(input_data.x) / accum_result; } else if (output_remain == 2) { input_data.y = EXP(input_data.y) / accum_result; input_data.x = EXP(input_data.x) / accum_result; } else if (output_remain == 3) { + input_data.z = EXP(input_data.z) / accum_result; + input_data.y = EXP(input_data.y) / accum_result; input_data.x = EXP(input_data.x) / accum_result; } else{ input_data = EXP(input_data) / accum_result; diff --git a/source/backend/opencl/execution/cl/softmax_grad.cl b/source/backend/opencl/execution/cl/softmax_grad.cl new file mode 100644 index 000000000..5eefca072 --- /dev/null +++ b/source/backend/opencl/execution/cl/softmax_grad.cl @@ -0,0 +1,53 @@ +#ifdef MNN_SUPPORT_FP16 +#pragma OPENCL EXTENSION cl_khr_fp16 : enable +#endif + +__constant sampler_t SAMPLER = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST; + +__kernel void softmax_grad(__read_only image2d_t input0, __read_only image2d_t input1, __write_only image2d_t output, int step, int number, int axisOnC4) { + const int width = get_image_width(output); + const int number4 = (number + 3) / 4, remain = number4 * 4 - number; + const int idx = get_global_id(0) * step * number4 + get_global_id(1); + FLOAT4 sum; + if (axisOnC4) { + FLOAT temp = 0; + for (int i = 0, _idx = idx; i < number4; ++i, _idx += step) { + int2 pos = (int2)(_idx % width, _idx / width); + FLOAT4 out = RI_F(input0, SAMPLER, pos) * RI_F(input1, SAMPLER, pos); + if (i < number4 - 1 || remain == 0) { + temp = temp + out.x + out.y + out.z + out.w; + } else if (remain == 1) { + temp = temp + out.x + out.y + out.z; + } else if (remain == 2) { + temp = temp + out.x + out.y; + } else { + temp = temp + out.x; + } + } + sum = (FLOAT4)(temp); + } else { + sum = 0; + for (int i = 0, _idx = idx; i < number4; ++i, _idx += step) { + int2 pos = (int2)(_idx % width, _idx / width); + FLOAT4 temp = RI_F(input0, SAMPLER, pos) * RI_F(input1, SAMPLER, pos); + if (i < number4 - 1 || remain == 0) { + sum = sum + temp; + } else if (remain == 1) { + sum.x = sum.x + temp.x; + sum.y = sum.y + temp.y; + sum.z = sum.z + temp.z; + } else if (remain == 2) { + sum.x = sum.x + temp.x; + sum.y = sum.y + temp.y; + } else { + sum.x = sum.x + temp.x; + } + } + } + for (int i = 0, _idx = idx; i < number4; ++i, _idx += step) { + int2 pos = (int2)(_idx % width, _idx / width); + FLOAT4 out = RI_F(input0, SAMPLER, pos) * (RI_F(input1, SAMPLER, pos) - sum); + WI_F(output, pos, out); + } +} + diff --git a/source/backend/opencl/execution/cl/space_to_batch.cl b/source/backend/opencl/execution/cl/space_to_batch.cl index a5885abe1..32f96f0b1 100644 --- a/source/backend/opencl/execution/cl/space_to_batch.cl +++ b/source/backend/opencl/execution/cl/space_to_batch.cl @@ -1,33 +1,41 @@ #ifdef MNN_SUPPORT_FP16 #pragma OPENCL EXTENSION cl_khr_fp16 : enable #endif +#define DEAL_NON_UNIFORM_DIM3(input1, input2, input3) \ + if (input1 >= global_size_dim0 || input2 >= global_size_dim1 || input3 >= global_size_dim2) { \ + return; \ + } + +#define GLOBAL_SIZE_3_DIMS \ + __private const int global_size_dim0, __private const int global_size_dim1, __private const int global_size_dim2, __constant sampler_t SAMPLER = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST; -__kernel void space_to_batch(__read_only image2d_t uInput, __write_only image2d_t uOutput, +__kernel void space_to_batch(GLOBAL_SIZE_3_DIMS __read_only image2d_t uInput, __write_only image2d_t uOutput, __private const int4 inImageSize, __private const int4 outImgSize, __private const int2 padding, __private const int2 blockShape) { - int3 pos = (int3)(get_global_id(0), get_global_id(1), get_global_id(2)); - if (pos.x < outImgSize.x && pos.y < outImgSize.y) { - // pos.x -> w, pos.y -> h, pos.z -> c4 * b; - int outBatchIndex = pos.z / outImgSize.z; - int outChannelIndex = pos.z % outImgSize.z; - int inBatchIndex = outBatchIndex % inImageSize.w; - int sw = (outBatchIndex / inImageSize.w) % blockShape.y; - int sh = (outBatchIndex / inImageSize.w) / blockShape.y; - int validHeightStart = max(0, ((padding.x - sh + blockShape.x - 1) / blockShape.x)); - int validHeightEnd = min(outImgSize.y, ((inImageSize.y + padding.x - sh + blockShape.x - 1) / blockShape.x)); - int validWidthStart = max(0, ((padding.y - sw + blockShape.y - 1) / blockShape.y)); - int validWidthEnd = min(outImgSize.x, ((inImageSize.x + padding.y - sw + blockShape.y - 1) / blockShape.y)); - - int inPosX = pos.x * blockShape.y + sw - padding.y; - int inPosY = pos.y * blockShape.x + sh - padding.x; - int inPosZ = inBatchIndex * inImageSize.z + outChannelIndex; - - int inputX = select(inPosX + inPosZ * inImageSize.x, -1, pos.x < validWidthStart || pos.x >= validWidthEnd); - int inputY = - select(inPosY + inBatchIndex * inImageSize.y, -1, pos.y < validHeightStart || pos.y >= validHeightEnd); - - FLOAT4 res = RI_F(uInput, SAMPLER, (int2)(inputX, inputY)); - WI_F(uOutput, (int2)(pos.x + outChannelIndex * outImgSize.x, pos.y + outBatchIndex * outImgSize.y), res); - } + + const int out_c_idx = get_global_id(0); + const int ou_w_idx = get_global_id(1); + const int out_hb_idx = get_global_id(2); + + DEAL_NON_UNIFORM_DIM3(out_c_idx, ou_w_idx, out_hb_idx); + + const int out_b_idx = out_hb_idx / outImgSize.s1; + const int out_h_idx = out_hb_idx - mul24(out_b_idx, outImgSize.s1); + + const int r_b_idx = out_b_idx / inImageSize.s3; + const int in_b_idx = out_b_idx - mul24(r_b_idx, inImageSize.s3); + + const int r_b_w = r_b_idx / blockShape.s1; + const int in_h_idx = r_b_w + mul24(out_h_idx, blockShape.s0) - padding.s0; + const int in_w_idx = r_b_idx - mul24(r_b_w, blockShape.s1) + mul24(ou_w_idx, blockShape.s1) - padding.s1; + + const int input_x = select(mul24(out_c_idx, inImageSize.s0) + in_w_idx, -1, in_w_idx < 0 || in_w_idx >= inImageSize.s0); + const int input_y = select(mul24(in_b_idx, inImageSize.s1) + in_h_idx, -1, in_h_idx < 0 || in_h_idx >= inImageSize.s1); + + FLOAT4 value = RI_F(uInput, SAMPLER, (int2)(input_x, input_y)); + + WI_F(uOutput, (int2)(mul24(out_c_idx, outImgSize.s0) + ou_w_idx, out_hb_idx), value); } + + diff --git a/source/backend/opencl/execution/cl/unary.cl b/source/backend/opencl/execution/cl/unary.cl index f6ebb8298..eaaaf82a8 100644 --- a/source/backend/opencl/execution/cl/unary.cl +++ b/source/backend/opencl/execution/cl/unary.cl @@ -1,3 +1,6 @@ +#ifdef MNN_SUPPORT_FP16 +#pragma OPENCL EXTENSION cl_khr_fp16 : enable +#endif #define DEAL_NON_UNIFORM_DIM3(input1, input2, input3) \ if (input1 >= global_size_dim0 || input2 >= global_size_dim1 || input3 >= global_size_dim2) { \ return; \ diff --git a/source/backend/opengl/AllShader.cpp b/source/backend/opengl/AllShader.c similarity index 99% rename from source/backend/opengl/AllShader.cpp rename to source/backend/opengl/AllShader.c index 1f6371e97..f98133eec 100644 --- a/source/backend/opengl/AllShader.cpp +++ b/source/backend/opengl/AllShader.c @@ -1,4 +1,4 @@ -#include "../AllShader.hpp" +#include "AllShader.hpp" const char* glsl_convlutionDepthwise_glsl = "layout(std430) buffer;\n" "layout(FORMAT, binding=0) writeonly uniform mediump image3D uOutput;\n" diff --git a/source/backend/opengl/CMakeLists.txt b/source/backend/opengl/CMakeLists.txt index e5056c7fb..d04009657 100644 --- a/source/backend/opengl/CMakeLists.txt +++ b/source/backend/opengl/CMakeLists.txt @@ -1,35 +1,27 @@ -set(CMAKE_BUILD_TYPE release) +if(MNN_OPENGL) + FILE(GLOB_RECURSE MNN_OpenGL_SRC ${CMAKE_CURRENT_LIST_DIR}/*.cpp ${CMAKE_CURRENT_LIST_DIR}/*.c) + option(MNN_OPENGL_REGEN "Regenerate OpenGL Shaders." OFF) -file(GLOB_RECURSE SRCS "*") -include_directories("../../../include/") -include_directories("./execution/") - -set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fomit-frame-pointer -fstrict-aliasing -ffunction-sections -fdata-sections -ffast-math") -set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility-inlines-hidden -fomit-frame-pointer -fstrict-aliasing -ffunction-sections -fdata-sections -ffast-math -fno-rtti -fno-exceptions") -option(GL_BUILD_FOR_ANDROID "Build OpenGL For Android" OFF) -add_definitions(-fvisibility=hidden) -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility-inlines-hidden") -if(SYSTEM.Android AND NOT MNN_BUILD_FOR_ANDROID_COMMAND) - set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${NATIVE_LIBRARY_OUTPUT}/${ANDROID_ABI}) -endif() -add_library( - MNN_GL - SHARED - ${SRCS} -) - -target_include_directories(MNN_GL PRIVATE - ${CMAKE_CURRENT_BINARY_DIR} - ${CMAKE_CURRENT_SOURCE_DIR} - ${CMAKE_CURRENT_SOURCE_DIR}/.. -) - -target_link_libraries(MNN_GL - GLESv3 - android - EGL - log - m - MNN + IF(MNN_OPENGL_REGEN) + add_custom_command(OUTPUT "${CMAKE_CURRENT_LIST_DIR}/AllShader.c" + COMMAND ${PYTHON_EXECUTABLE} + "${CMAKE_CURRENT_LIST_DIR}/makeshader.py" + "${CMAKE_CURRENT_LIST_DIR}/glsl/" + "${CMAKE_SOURCE_DIR}/include/MNN/backend/opengl/shaders/AllShader.h" + "${CMAKE_CURRENT_LIST_DIR}/AllShader.cpp" + COMMENT "OpenGL Code Generation" ) + add_custom_target (MNNOpenGLCodeGen DEPENDS "${CMAKE_CURRENT_LIST_DIR}/AllShader.c") + ENDIF() + + add_library(MNNOpenGL OBJECT ${MNN_OpenGL_SRC} "${CMAKE_CURRENT_LIST_DIR}/AllShader.c") + list(APPEND MNN_OBJECTS_TO_LINK $) + list(APPEND MNN_TARGETS MNNOpenGL) + SET(MNN_OBJECTS_TO_LINK "${MNN_OBJECTS_TO_LINK}" PARENT_SCOPE) + SET(MNN_TARGETS "${MNN_TARGETS}" PARENT_SCOPE) + target_include_directories(MNNOpenGL PRIVATE ${CMAKE_CURRENT_LIST_DIR}/) + IF(MNN_OPENGL_REGEN) + add_dependencies(MNNOpenGL MNNOpenGLCodeGen) + ENDIF() +endif() diff --git a/source/backend/opengl/GLBackend.cpp b/source/backend/opengl/GLBackend.cpp index 9759df23a..f752fdd14 100644 --- a/source/backend/opengl/GLBackend.cpp +++ b/source/backend/opengl/GLBackend.cpp @@ -10,12 +10,12 @@ #include "AllShader.hpp" #include "GLSSBOBuffer.hpp" #include "GLTexture.hpp" -#include "AutoTime.hpp" +#include #include "GLBackend.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" #include -#include "Tensor.hpp" +#include namespace MNN { namespace OpenGL { @@ -69,7 +69,7 @@ bool GLBackend::isSupportHalf() const{ GLenum GLBackend::getTextrueFormat() const{ return mTextrueFormat; } - + std::string GLBackend::getImageFormat() const{ return mImageFormat; } @@ -102,12 +102,12 @@ GLBackend::GLBackend(BackendConfig::PrecisionMode precision, BackendConfig::Powe mRuntime->mNchw2ImageProgram = getTreatedProgram(glsl_nchw_buffer_to_image_glsl); mRuntime->mNc4hw42ImageProgram = getTreatedProgram(glsl_nc4hw4_buffer_to_image_glsl); mRuntime->mImage2Nc4hw4Program = getTreatedProgram(glsl_image_to_nc4hw4_buffer_glsl); - + std::vector prefix; setLocalSize(prefix, mLocalSize, 8, 8, 1); mRuntime->mNhwc2ImageProgram = getProgram("nhwc_buffer_to_image", glsl_nhwc_buffer_to_image_glsl, prefix); mRuntime->mImage2NhwcProgram = getProgram("image_to_nhwc_buffer", glsl_image_to_nhwc_buffer_glsl, prefix); - + const GLubyte* renderer = glGetString(GL_RENDERER); if(renderer != nullptr){ MNN_PRINT("gpu type : %s \n", (char*)renderer); @@ -119,7 +119,7 @@ GLBackend::GLBackend(BackendConfig::PrecisionMode precision, BackendConfig::Powe mGpuType = OTHER; } } - + const GLubyte* version = glGetString(GL_VERSION); if(version != nullptr){ MNN_PRINT("gl version : %s \n", version); @@ -151,11 +151,11 @@ void GLBackend::copyImageToNhwcBuffer(GLuint textureId, float *outputData, int w wait(); auto depthQuad = UP_DIV(channel, 4); auto size = depthQuad * 4 * width * height * sizeof(float); - + auto buffer = std::shared_ptr(new GLSSBOBuffer(size)); - + mRuntime->mImage2NhwcProgram->useProgram(); - + glBindImageTexture(0, textureId, 0, GL_TRUE, 0, GL_READ_ONLY, getTextrueFormat()); OPENGL_CHECK_ERROR; glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 1, buffer->getId()); @@ -166,29 +166,29 @@ void GLBackend::copyImageToNhwcBuffer(GLuint textureId, float *outputData, int w OPENGL_CHECK_ERROR; compute(UP_DIV(width, mLocalSize[0]), UP_DIV(height, mLocalSize[1]), UP_DIV(depthQuad, mLocalSize[2])); OPENGL_CHECK_ERROR; - + glMemoryBarrier(GL_SHADER_STORAGE_BARRIER_BIT); OPENGL_CHECK_ERROR; - + auto gpuoutput = buffer->map(GL_MAP_READ_BIT); if(gpuoutput != nullptr){ ::memcpy(outputData, gpuoutput, height * width * channel * sizeof(float)); } buffer->unmap(); } - + void GLBackend::copyNhwcBufferToImage(GLuint textureId, const float *inputData, int width, int height, int channel) const { - + int c_4 = UP_DIV(channel, 4); auto size = ROUND_UP(channel, 4) * width * height * sizeof(float); auto buffer = std::shared_ptr(new GLSSBOBuffer(size)); - + auto gpuoutput = buffer->map(GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_BUFFER_BIT); if(gpuoutput != nullptr){ ::memcpy(gpuoutput, inputData, channel*height*width * sizeof(float)); } buffer->unmap(); - + mRuntime->mNhwc2ImageProgram->useProgram(); glBindImageTexture(0, textureId, 0, GL_TRUE, 0, GL_WRITE_ONLY, getTextrueFormat()); @@ -203,22 +203,22 @@ void GLBackend::copyNhwcBufferToImage(GLuint textureId, const float *inputData, OPENGL_CHECK_ERROR; } - + void GLBackend::wait() const { - + #ifdef USE_GL_FINISH glFinish(); #else glFlush(); #endif - + } - + void GLBackend::compute(int dim1, int dim2, int dim3, bool needWait) const { wait(); glDispatchCompute(dim1, dim2, dim3); } - + void GLBackend::download(GLuint textureId, float *outputData, int d1, int d2, int d3, bool align) const { wait(); auto depthQuad = UP_DIV(d3, 4); @@ -264,7 +264,7 @@ void GLBackend::upload(GLuint textureId, const float *inputData, int width, int mRuntime->mTempBuffer = std::shared_ptr(new GLSSBOBuffer(size)); } auto &buffer = mRuntime->mTempBuffer; - + auto gpuoutput = buffer->map(GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_BUFFER_BIT); if(gpuoutput != nullptr){ if (align) { @@ -317,13 +317,13 @@ void GLBackend::onExecuteBegin() const { } void GLBackend::onCopyBuffer(const Tensor *srcTensor, const Tensor *dstTensor) const { - + std::vector inputShape = tensorShapeFormat(srcTensor); int ib = inputShape.at(0); int ih = inputShape.at(1); int iw = inputShape.at(2); int ic = inputShape.at(3); - + // OpenGL -> Host if (NULL == srcTensor->buffer().host && srcTensor->buffer().device > 0) { if(TensorUtils::getDescribe(dstTensor)->dimensionFormat == MNN_DATA_FORMAT_NHWC){ @@ -344,7 +344,7 @@ void GLBackend::onCopyBuffer(const Tensor *srcTensor, const Tensor *dstTensor) c }else{ MNN_ASSERT(false); } - + } bool GLBackend::onClearBuffer() { @@ -432,7 +432,7 @@ class GLBackendCreator : public BackendCreator { delete backend; } } - return nullptr; + return nullptr; } }; diff --git a/source/backend/opengl/GLBackend.hpp b/source/backend/opengl/GLBackend.hpp index 1b26a5d33..d25309ddc 100644 --- a/source/backend/opengl/GLBackend.hpp +++ b/source/backend/opengl/GLBackend.hpp @@ -12,15 +12,15 @@ #include #include #include -#include "Backend.hpp" -#include "GLContext.hpp" -#include "GLProgram.hpp" -#include "GLSSBOBuffer.hpp" -#include "GLTexture.hpp" +#include "core/Backend.hpp" +#include "backend/opengl/GLContext.hpp" +#include "backend/opengl/GLProgram.hpp" +#include "backend/opengl/GLSSBOBuffer.hpp" +#include "backend/opengl/GLTexture.hpp" #include "MNN_generated.h" -#include "GLUtils.hpp" -#include "TensorUtils.hpp" -#include "GLHead.hpp" +#include "backend/opengl/GLUtils.hpp" +#include "core/TensorUtils.hpp" +#include "backend/opengl/GLHead.hpp" namespace MNN { namespace OpenGL { @@ -34,23 +34,23 @@ class GLBackend : public Backend { void copyImageToNhwcBuffer(GLuint textureId, float *outputData, int width, int height, int channel) const; void copyNhwcBufferToImage(GLuint textureId, const float *inputData, int width, int height, int channel) const; - + std::shared_ptr getProgram(const std::string& key, const char* content); std::shared_ptr getProgram(const std::string& key, const char* content, const std::vector& prefix); - + enum GPUType { ADRENO = 0, MALI = 1, OTHER = 2 }; inline GPUType gpuType() const { return mGpuType; } - + inline int glVersion() const { return mVersion; } - + void wait() const; - + void compute(int dim1, int dim2, int dim3, bool needWait = false) const; /*For Buffer alloc and release*/ @@ -92,7 +92,7 @@ class GLBackend : public Backend { std::shared_ptr mImage2NchwProgram; std::shared_ptr mNc4hw42ImageProgram; std::shared_ptr mImage2Nc4hw4Program; - + std::shared_ptr mNhwc2ImageProgram; std::shared_ptr mImage2NhwcProgram; @@ -107,7 +107,7 @@ class GLBackend : public Backend { GPUType mGpuType = OTHER; int mVersion = 0; int mLocalSize[3]; - bool mIsCreateError{false}; + bool mIsCreateError{false}; bool mIsSupportHalf{false}; GLenum mTextrueFormat{GL_RGBA32F}; std::string mImageFormat{"rgba32f"}; @@ -118,14 +118,14 @@ inline std::vector tensorShapeFormat(const Tensor *input) { int iC = std::max(1, input->channel()); int iH = std::max(1, input->height()); int iW = std::max(1, input->width()); - + if (input->dimensions() == 3) { iN = 1; iH = input->buffer().dim[0].extent; iW = input->buffer().dim[1].extent; iC = input->buffer().dim[2].extent; } - + if (input->dimensions() == 2) { iN = input->buffer().dim[0].extent; iH = 1; @@ -138,15 +138,15 @@ inline std::vector tensorShapeFormat(const Tensor *input) { iW = 1; iC = input->buffer().dim[0].extent; } - + #ifdef LOG_VERBOSE MNN_PRINT("dim %d : [%d, %d, %d, %d] \n",input->dimensions(), iN, iH, iW, iC); #endif std::vector shape_vec{iN, iH, iW, iC}; - + return shape_vec; } - + template class GLCreatorRegister { public: diff --git a/source/backend/opengl/execution/GLBinary.cpp b/source/backend/opengl/GLBinary.cpp similarity index 96% rename from source/backend/opengl/execution/GLBinary.cpp rename to source/backend/opengl/GLBinary.cpp index 49f6a8744..97d0c082e 100644 --- a/source/backend/opengl/execution/GLBinary.cpp +++ b/source/backend/opengl/GLBinary.cpp @@ -8,10 +8,10 @@ #include "GLBinary.hpp" #include -#include "AllShader.hpp" -#include "GLBackend.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/opengl/shaders/AllShader.h" +#include "backend/opengl/GLBackend.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { namespace OpenGL { @@ -20,9 +20,9 @@ GLBinary::GLBinary(const std::vector &inputs, const Op *op, Backend *b } GLBinary::~GLBinary() { - + } - + ErrorCode GLBinary::onResize(const std::vector &inputs, const std::vector &outputs) { std::vector prefix; setLocalSize(prefix, mLocalSize, 8, 8, 1); @@ -47,31 +47,31 @@ ErrorCode GLBinary::onResize(const std::vector &inputs, const std::vec ErrorCode GLBinary::onExecute(const std::vector &inputs, const std::vector &outputs) { MNN_ASSERT(2 == inputs.size()); MNN_ASSERT(1 == outputs.size()); - + auto input0 = inputs[0]; auto input1 = inputs[1]; auto output = outputs[0]; - + std::vector inputShape = tensorShapeFormat(input0); std::vector outputShape = tensorShapeFormat(output); - + int ib = inputShape.at(0); int ih = inputShape.at(1); int iw = inputShape.at(2); int ic = inputShape.at(3); int ic_4 = UP_DIV(ic, 4); - + int ob = outputShape.at(0); int oh = outputShape.at(1); int ow = outputShape.at(2); int oc = outputShape.at(3); int oc_4 = UP_DIV(oc, 4); - + MNN_ASSERT(input0->getType().code == halide_type_float); MNN_ASSERT(input0->dimensions() == input1->dimensions()); const auto intputFormat = TensorUtils::getDescribe(input0)->dimensionFormat; - + mProgram->useProgram(); glBindImageTexture(0, output->deviceId(), 0, GL_TRUE, 0, GL_WRITE_ONLY, ((GLBackend *)backend())->getTextrueFormat()); { @@ -94,13 +94,13 @@ ErrorCode GLBinary::onExecute(const std::vector &inputs, const std::ve return NO_ERROR; } - + class BinaryCreator : public GLBackend::Creator { public: virtual ~BinaryCreator() = default; virtual Execution *onCreate(const std::vector &inputs, const std::vector &outputs, const MNN::Op *op, Backend *backend) const override { - + MNN_ASSERT(inputs.size() > 1); auto input0 = inputs[0]; // Don't support broatcast @@ -120,7 +120,7 @@ class BinaryCreator : public GLBackend::Creator { } } } - + return new GLBinary(inputs, op, backend); } }; diff --git a/source/backend/opengl/execution/GLBinary.hpp b/source/backend/opengl/GLBinary.hpp similarity index 87% rename from source/backend/opengl/execution/GLBinary.hpp rename to source/backend/opengl/GLBinary.hpp index f05904088..5a96e1a4d 100644 --- a/source/backend/opengl/execution/GLBinary.hpp +++ b/source/backend/opengl/GLBinary.hpp @@ -8,9 +8,9 @@ #ifndef GLBinary_H #define GLBinary_H -#include "Execution.hpp" -#include "GLProgram.hpp" -#include "GLTexture.hpp" +#include "core/Execution.hpp" +#include "backend/opengl/GLProgram.hpp" +#include "backend/opengl/GLTexture.hpp" #include "MNN_generated.h" namespace MNN { namespace OpenGL { diff --git a/source/backend/opengl/execution/GLConcat.cpp b/source/backend/opengl/GLConcat.cpp similarity index 95% rename from source/backend/opengl/execution/GLConcat.cpp rename to source/backend/opengl/GLConcat.cpp index 591a65760..f5bfd5620 100644 --- a/source/backend/opengl/execution/GLConcat.cpp +++ b/source/backend/opengl/GLConcat.cpp @@ -7,16 +7,16 @@ // #include "GLConcat.hpp" -#include "AllShader.hpp" -#include "GLBackend.hpp" -#include "Macro.h" +#include "backend/opengl/shaders/AllShader.h" +#include "backend/opengl/GLBackend.hpp" +#include "core/Macro.h" namespace MNN { namespace OpenGL { GLConcat::GLConcat(const std::vector &inputs, const Op *op, Backend *bn): Execution(bn) { mAxis = op->main_as_Axis()->axis(); mProgram = ((GLBackend *)backend())->getProgram("blit", glsl_blit_glsl); } - + GLConcat::~GLConcat() { } @@ -30,11 +30,11 @@ ErrorCode GLConcat::onExecute(const std::vector &inputs, const std::ve auto inputTensor = inputs[i]; std::vector inputShape = tensorShapeFormat(inputTensor); - + int sy = inputShape.at(1); int sx = inputShape.at(2); int ic = inputShape.at(3); - + int sz = UP_DIV(ic, 4); mProgram->useProgram(); @@ -51,7 +51,7 @@ ErrorCode GLConcat::onExecute(const std::vector &inputs, const std::ve OPENGL_CHECK_ERROR; ((GLBackend *)backend())->compute(UP_DIV(sx, 4), UP_DIV(sy, 4), UP_DIV(sz, 4)); - + OPENGL_CHECK_ERROR; if (sx != outputShape.at(2)) { @@ -62,16 +62,16 @@ ErrorCode GLConcat::onExecute(const std::vector &inputs, const std::ve dz += sz; } } - + return NO_ERROR; } - + class ConcatCreator : public GLBackend::Creator { public: virtual ~ConcatCreator() = default; virtual Execution *onCreate(const std::vector &inputs, const std::vector &outputs, const MNN::Op *op, Backend *backend) const override { - + auto axis = op->main_as_Axis()->axis(); if (0 > axis) { axis = outputs[0]->dimensions() + axis; @@ -90,11 +90,11 @@ class ConcatCreator : public GLBackend::Creator { } } } - + return new GLConcat(inputs, op, backend); } }; - + GLCreatorRegister __concat_op(OpType_Concat); } // namespace OpenGL } // namespace MNN diff --git a/source/backend/opengl/execution/GLConcat.hpp b/source/backend/opengl/GLConcat.hpp similarity index 84% rename from source/backend/opengl/execution/GLConcat.hpp rename to source/backend/opengl/GLConcat.hpp index db7eb3eed..6f7a73b3c 100644 --- a/source/backend/opengl/execution/GLConcat.hpp +++ b/source/backend/opengl/GLConcat.hpp @@ -9,9 +9,9 @@ #ifndef MNN_GLCONCAT_H #define MNN_GLCONCAT_H -#include "Execution.hpp" -#include "GLProgram.hpp" -#include "GLTexture.hpp" +#include "core/Execution.hpp" +#include "backend/opengl/GLProgram.hpp" +#include "backend/opengl/GLTexture.hpp" namespace MNN { namespace OpenGL { class GLConcat : public Execution { diff --git a/source/backend/opengl/GLContext.cpp b/source/backend/opengl/GLContext.cpp index c02f8dac5..53be99675 100644 --- a/source/backend/opengl/GLContext.cpp +++ b/source/backend/opengl/GLContext.cpp @@ -6,7 +6,7 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "GLContext.hpp" +#include "backend/opengl/GLContext.hpp" namespace MNN { namespace OpenGL { GLContext::GLContext() { diff --git a/source/backend/opengl/GLContext.hpp b/source/backend/opengl/GLContext.hpp index 7b76a5e19..9800d0229 100644 --- a/source/backend/opengl/GLContext.hpp +++ b/source/backend/opengl/GLContext.hpp @@ -9,7 +9,7 @@ #ifndef GLCONTEXT_H #define GLCONTEXT_H -#include "GLHead.hpp" +#include "backend/opengl/GLHead.hpp" #include #include #include @@ -24,7 +24,7 @@ class GLContext { EGLContext mContext; EGLDisplay mDisplay; EGLSurface mSurface; - bool mIsCreateError{false}; + bool mIsCreateError{false}; }; } // namespace OpenGL } // namespace MNN diff --git a/source/backend/opengl/execution/GLConverter.cpp b/source/backend/opengl/GLConverter.cpp similarity index 92% rename from source/backend/opengl/execution/GLConverter.cpp rename to source/backend/opengl/GLConverter.cpp index e618d613a..9cd207510 100644 --- a/source/backend/opengl/execution/GLConverter.cpp +++ b/source/backend/opengl/GLConverter.cpp @@ -7,20 +7,20 @@ // #include "GLConverter.hpp" -#include "AllShader.hpp" -#include "GLBackend.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/opengl/shaders/AllShader.h" +#include "backend/opengl/GLBackend.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { namespace OpenGL { GLConverter::GLConverter(const std::vector &inputs, const Op *op, Backend *bn) : Execution(bn) { } - + GLConverter::~GLConverter() { } - + ErrorCode GLConverter::onResize(const std::vector &inputs, const std::vector &outputs) { std::vector prefix; setLocalSize(prefix, mLocalSize, 8, 8, 1); @@ -31,15 +31,15 @@ ErrorCode GLConverter::onResize(const std::vector &inputs, const std:: ErrorCode GLConverter::onExecute(const std::vector &inputs, const std::vector &outputs) { auto input = inputs[0]; auto output = outputs[0]; - + std::vector inputShape = tensorShapeFormat(input); - + int ib = inputShape.at(0); int ih = inputShape.at(1); int iw = inputShape.at(2); int ic = inputShape.at(3); int ic_4 = UP_DIV(ic, 4); - + mProgram->useProgram(); glBindImageTexture(0, output->deviceId(), 0, GL_TRUE, 0, GL_WRITE_ONLY, ((GLBackend *)backend())->getTextrueFormat()); OPENGL_CHECK_ERROR; diff --git a/source/backend/opengl/execution/GLConverter.hpp b/source/backend/opengl/GLConverter.hpp similarity index 87% rename from source/backend/opengl/execution/GLConverter.hpp rename to source/backend/opengl/GLConverter.hpp index ff7dc1bbd..36065d4d2 100644 --- a/source/backend/opengl/execution/GLConverter.hpp +++ b/source/backend/opengl/GLConverter.hpp @@ -9,9 +9,9 @@ #ifndef MNNDEMO_GLConverter_H #define MNNDEMO_GLConverter_H -#include "Execution.hpp" -#include "GLProgram.hpp" -#include "GLTexture.hpp" +#include "core/Execution.hpp" +#include "backend/opengl/GLProgram.hpp" +#include "backend/opengl/GLTexture.hpp" #include "MNN_generated.h" namespace MNN { namespace OpenGL { diff --git a/source/backend/opengl/execution/GLConvolution.cpp b/source/backend/opengl/GLConvolution.cpp similarity index 98% rename from source/backend/opengl/execution/GLConvolution.cpp rename to source/backend/opengl/GLConvolution.cpp index fa0492864..a4c57cd1d 100644 --- a/source/backend/opengl/execution/GLConvolution.cpp +++ b/source/backend/opengl/GLConvolution.cpp @@ -7,11 +7,11 @@ // #include "GLConvolution.hpp" -#include "AutoTime.hpp" +#include #include -#include "AllShader.hpp" -#include "Macro.h" +#include "backend/opengl/shaders/AllShader.h" +#include "core/Macro.h" #include "GLConvolutionIm2col.hpp" namespace MNN { namespace OpenGL { @@ -81,7 +81,7 @@ GLConvolution::GLConvolution(const std::vector &inputs, const Op *conv ::memset(dest, 0, alignedWeightSize * sizeof(float)); const float *source = convOp->main_as_Convolution2D()->weight()->data(); int cur = 0; - + //weight : oc ic h w -> oc/4, ic/4 ky kx ic4 oc4 for (int b = 0; b < mCommon->outputCount(); ++b) { int b_4 = b / unit; @@ -101,14 +101,14 @@ GLConvolution::GLConvolution(const std::vector &inputs, const Op *conv } } } - + mKernelBuffer->unmap(); - + int ic_4 = UP_DIV(mInputDepth, unit); //weight image : ky kx, oc/4, ic/4*ic4 oc4 mKernelTexture = std::shared_ptr(new GLTexture(ic_4 * unit, oc_4, fw * fh, ((GLBackend *)backend())->getTextrueFormat() , GL_TEXTURE_3D, false)); - + auto transform = extra->getProgram("transform_kernel_image_adreno", glsl_kernel2image_adreno_glsl); transform->useProgram(); glBindImageTexture(0, mKernelTexture->id(), 0, GL_TRUE, 0, GL_WRITE_ONLY, ((GLBackend *)backend())->getTextrueFormat()); @@ -117,11 +117,11 @@ GLConvolution::GLConvolution(const std::vector &inputs, const Op *conv glUniform1i(3, fw * fh); glUniform1i(4, ic_4); OPENGL_CHECK_ERROR; - + ((GLBackend *)backend())->compute(ic_4, oc_4, fw * fh); OPENGL_CHECK_ERROR; } - + ErrorCode GLConvolution::onResize(const std::vector &inputs, const std::vector &outputs) { GPUConvolution::onResize(inputs, outputs); auto extra = (GLBackend *)backend(); @@ -132,16 +132,16 @@ ErrorCode GLConvolution::onResize(const std::vector &inputs, const std if (mCommon->relu6()) { prefix.push_back("#define RELU6"); } - + auto dstDepthQuad = UP_DIV(outputs[0]->channel(), 4); - + setLocalSize(prefix, mLocalSize, 1, 1, dstDepthQuad); if (1 == mCommon->kernelY() && 1 == mCommon->kernelX() && 1 == mCommon->strideY() && 1 == mCommon->strideX() && 0 == mCommon->padX() && 0 == mCommon->padY()) { mIs1x1 = true; } - + if (mIs1x1) { mProgram = extra->getProgram("convolution1x1", glsl_convolution1x1_glsl, prefix); } else { @@ -153,11 +153,11 @@ ErrorCode GLConvolution::onResize(const std::vector &inputs, const std mDy = mCommon->dilateY(); mProgram = extra->getProgram("convolution", glsl_convolution_glsl, prefix); } - + return NO_ERROR; } - + ErrorCode GLConvolution::onExecute(const std::vector &inputs, const std::vector &outputs) { { auto convLayer = mCommon; @@ -203,7 +203,7 @@ ErrorCode GLConvolution::onExecute(const std::vector &inputs, const st ((GLBackend *)backend())->compute(UP_DIV(output->width(), UNIT*mLocalSize[0]), UP_DIV(output->height(), mLocalSize[1]), UP_DIV(oc_4, mLocalSize[2])); - + OPENGL_CHECK_ERROR; } @@ -217,7 +217,7 @@ class ConvolutionCreator : public GLBackend::Creator { virtual Execution *onCreate(const std::vector &inputs, const std::vector &outputs, const MNN::Op *op, Backend *backend) const override { auto common = op->main_as_Convolution2D()->common(); - + //TODO: bugfix if(common->padX() == 1 || common->strideX() != 1){ return new GLConvolution(inputs, op, backend); @@ -233,7 +233,7 @@ class ConvolutionCreator : public GLBackend::Creator { } } }; - + GLCreatorRegister __gl_conv_op(OpType_Convolution); } // namespace OpenGL } // namespace MNN diff --git a/source/backend/opengl/execution/GLConvolution.hpp b/source/backend/opengl/GLConvolution.hpp similarity index 88% rename from source/backend/opengl/execution/GLConvolution.hpp rename to source/backend/opengl/GLConvolution.hpp index 2e6d52917..97f2ea8f6 100644 --- a/source/backend/opengl/execution/GLConvolution.hpp +++ b/source/backend/opengl/GLConvolution.hpp @@ -10,12 +10,12 @@ #define MNNDEMO_GLCONVOLUTION_H #include -#include "Execution.hpp" -#include "GLProgram.hpp" -#include "GLSSBOBuffer.hpp" -#include "GLTexture.hpp" +#include "core/Execution.hpp" +#include "backend/opengl/GLProgram.hpp" +#include "backend/opengl/GLSSBOBuffer.hpp" +#include "backend/opengl/GLTexture.hpp" +#include "backend/opengl/GLBackend.hpp" #include "MNN_generated.h" -#include "GLBackend.hpp" namespace MNN { namespace OpenGL { diff --git a/source/backend/opengl/execution/GLConvolutionDepthwise.cpp b/source/backend/opengl/GLConvolutionDepthwise.cpp similarity index 97% rename from source/backend/opengl/execution/GLConvolutionDepthwise.cpp rename to source/backend/opengl/GLConvolutionDepthwise.cpp index 9fa5aaa82..a22e9b512 100644 --- a/source/backend/opengl/execution/GLConvolutionDepthwise.cpp +++ b/source/backend/opengl/GLConvolutionDepthwise.cpp @@ -7,12 +7,12 @@ // #include "GLConvolutionDepthwise.hpp" -#include "AutoTime.hpp" +#include #include -#include "AllShader.hpp" -#include "GLBackend.hpp" -#include "Macro.h" +#include "backend/opengl/shaders/AllShader.h" +#include "backend/opengl/GLBackend.hpp" +#include "core/Macro.h" namespace MNN { namespace OpenGL { @@ -88,7 +88,7 @@ GLConvolutionDepthwise::GLConvolutionDepthwise(const std::vector &inpu ((GLBackend *)backend())->compute(srcDepthQuad, fw, fh); OPENGL_CHECK_ERROR; - + } ErrorCode GLConvolutionDepthwise::onResize(const std::vector &inputs, const std::vector &outputs) { diff --git a/source/backend/opengl/execution/GLConvolutionDepthwise.hpp b/source/backend/opengl/GLConvolutionDepthwise.hpp similarity index 97% rename from source/backend/opengl/execution/GLConvolutionDepthwise.hpp rename to source/backend/opengl/GLConvolutionDepthwise.hpp index 75288c242..14347de49 100644 --- a/source/backend/opengl/execution/GLConvolutionDepthwise.hpp +++ b/source/backend/opengl/GLConvolutionDepthwise.hpp @@ -9,7 +9,7 @@ #ifndef MNNDEMO_GLCONVOLUTIONDEPTHWISE_H #define MNNDEMO_GLCONVOLUTIONDEPTHWISE_H -#include "Execution.hpp" +#include "core/Execution.hpp" #include "GLConvolution.hpp" #include "MNN_generated.h" diff --git a/source/backend/opengl/execution/GLConvolutionIm2col.cpp b/source/backend/opengl/GLConvolutionIm2col.cpp similarity index 97% rename from source/backend/opengl/execution/GLConvolutionIm2col.cpp rename to source/backend/opengl/GLConvolutionIm2col.cpp index 80b1b2066..4b2093c2c 100644 --- a/source/backend/opengl/execution/GLConvolutionIm2col.cpp +++ b/source/backend/opengl/GLConvolutionIm2col.cpp @@ -7,14 +7,14 @@ // #include "GLConvolution.hpp" -#include "AutoTime.hpp" +#include #include -#include "AllShader.hpp" -#include "GLBackend.hpp" -#include "Macro.h" +#include "backend/opengl/shaders/AllShader.h" +#include "backend/opengl/GLBackend.hpp" +#include "core/Macro.h" #include "GLConvolutionIm2col.hpp" -#include "GLUtils.hpp" +#include "backend/opengl/GLUtils.hpp" namespace MNN { namespace OpenGL { @@ -37,7 +37,7 @@ GLConvolutionIm2col::GLConvolutionIm2col(const std::vector &inputs, co ::memset(dest, 0, totalWeightSize * sizeof(float)); const float *source = convOp->main_as_Convolution2D()->weight()->data(); int cur = 0; - + //weight : oc ic -> oc/4 ic/4 ic4 oc4 //weight image : oc_4, ic_4 * ic4 oc4 int alignedWeightSize = ic_4 * fw * fh * UNIT2; @@ -78,7 +78,7 @@ GLConvolutionIm2col::GLConvolutionIm2col(const std::vector &inputs, co OPENGL_CHECK_ERROR; ((GLBackend *)backend())->compute(UP_DIV(imageWidth, 4), UP_DIV(oc_4, 4), 1); OPENGL_CHECK_ERROR; - + //bias mBiasBuffer.reset(new GLSSBOBuffer(sizeof(float) * ALIGN_UP4(mCommon->outputCount()))); float* bias = (float*)(mBiasBuffer->map(GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_BUFFER_BIT)); @@ -89,7 +89,7 @@ GLConvolutionIm2col::GLConvolutionIm2col(const std::vector &inputs, co } mBiasBuffer->unmap(); } - + ErrorCode GLConvolutionIm2col::onResize(const std::vector &inputs, const std::vector &outputs) { GPUConvolution::onResize(inputs, outputs); std::vector im2colPrefix; @@ -111,19 +111,19 @@ ErrorCode GLConvolutionIm2col::onResize(const std::vector &inputs, con int oc = outputs[0]->channel(); int oh = outputs[0]->height(); int ow = outputs[0]->width(); - + int ic = inputs[0]->channel(); obxohxow_4 = UP_DIV(ob*oh*ow, 4); int fw = mCommon->kernelX(); int fh = mCommon->kernelY(); - + //input : temp image : (ib*oh*ow)/ 4, ic/4*(ib*oh*ow)%4*ic4 //output : temp image : oc/4 * (ob*oh*ow)%4, (ob*oh*ow)/4 * oc4 mSrcTexture = std::shared_ptr(new GLTexture(UP_DIV(ic, 4)*UNIT*fw*fh, obxohxow_4, 1, ((GLBackend *)backend())->getTextrueFormat(), GL_TEXTURE_2D, false)); mDstTexture = std::shared_ptr(new GLTexture(obxohxow_4, UP_DIV(oc, 4) * UNIT, 1, ((GLBackend *)backend())->getTextrueFormat(), GL_TEXTURE_2D, false)); - + auto transform = mGLBackend->getProgram("clear_texture", glsl_clear_texture_glsl); transform->useProgram(); glBindImageTexture(0, mSrcTexture->id(), 0, GL_TRUE, 0, GL_WRITE_ONLY, ((GLBackend *)backend())->getTextrueFormat()); @@ -134,7 +134,7 @@ ErrorCode GLConvolutionIm2col::onResize(const std::vector &inputs, con OPENGL_CHECK_ERROR; ((GLBackend *)backend())->compute(UP_DIV(UP_DIV(ic, 4)*UNIT*fw*fh, 4), UP_DIV(obxohxow_4, 4), 1); OPENGL_CHECK_ERROR; - + if (true == mIsConv1x1) { setLocalSize(im2colPrefix, mIm2colSize, 8, 8, 1); mIm2ColProgram = mGLBackend->getProgram("image2col1x1", glsl_im2col1x1_glsl, im2colPrefix); @@ -155,7 +155,7 @@ ErrorCode GLConvolutionIm2col::onResize(const std::vector &inputs, con glUniform2i(5, mCommon->dilateX(), mCommon->dilateY()); }; } - + return NO_ERROR; } ErrorCode GLConvolutionIm2col::onExecute(const std::vector &inputs, const std::vector &outputs) { @@ -164,20 +164,20 @@ ErrorCode GLConvolutionIm2col::onExecute(const std::vector &inputs, co auto output = outputs[0]; auto inputTexture = input->deviceId(); auto outputTexture = output->deviceId(); - + int iw = input->width(); int ih = input->height(); int ic = input->channel(); int ib = input->batch(); - + int ow = output->width(); int oh = output->height(); int oc = output->channel(); int ob = output->batch(); - + int ic_4 = UP_DIV(ic, 4); int oc_4 = UP_DIV(oc, 4); - + // image2col { mIm2ColProgram->useProgram(); @@ -189,7 +189,7 @@ ErrorCode GLConvolutionIm2col::onExecute(const std::vector &inputs, co glBindTexture(GL_TEXTURE_3D, inputTexture); OPENGL_CHECK_ERROR; } - + if (mIsConv1x1) { glUniform1i(5, ic_4); glUniform1i(6, ow); @@ -203,7 +203,7 @@ ErrorCode GLConvolutionIm2col::onExecute(const std::vector &inputs, co ((GLBackend *)backend())->compute(UP_DIV(ow, mIm2colSize[0]), UP_DIV(oh, mIm2colSize[1]), UP_DIV(ic_4*ib, mIm2colSize[2])); OPENGL_CHECK_ERROR; } - + //gemm { mGemm16x16Program->useProgram(); @@ -221,7 +221,7 @@ ErrorCode GLConvolutionIm2col::onExecute(const std::vector &inputs, co ((GLBackend *)backend())->compute(UP_DIV(obxohxow_4, mGemmSize[0]), UP_DIV(oc_4, mGemmSize[1]), 1); OPENGL_CHECK_ERROR; } - + //col2image { mCol2ImProgram->useProgram(); @@ -241,9 +241,9 @@ ErrorCode GLConvolutionIm2col::onExecute(const std::vector &inputs, co ((GLBackend *)backend())->compute(UP_DIV(ow, mCol2imSize[0]), UP_DIV(oh, mCol2imSize[1]), UP_DIV(oc_4*ob, mCol2imSize[2])); OPENGL_CHECK_ERROR; } - + return NO_ERROR; } - + } // namespace OpenGL } // namespace MNN diff --git a/source/backend/opengl/execution/GLConvolutionIm2col.hpp b/source/backend/opengl/GLConvolutionIm2col.hpp similarity index 89% rename from source/backend/opengl/execution/GLConvolutionIm2col.hpp rename to source/backend/opengl/GLConvolutionIm2col.hpp index b1a12b117..412c05ada 100644 --- a/source/backend/opengl/execution/GLConvolutionIm2col.hpp +++ b/source/backend/opengl/GLConvolutionIm2col.hpp @@ -10,10 +10,10 @@ #define GLCONVOLUTION_IM2COL_H #include -#include "Execution.hpp" -#include "GLProgram.hpp" -#include "GLSSBOBuffer.hpp" -#include "GLTexture.hpp" +#include "core/Execution.hpp" +#include "backend/opengl/GLProgram.hpp" +#include "backend/opengl/GLSSBOBuffer.hpp" +#include "backend/opengl/GLTexture.hpp" #include "MNN_generated.h" namespace MNN { namespace OpenGL { diff --git a/source/backend/opengl/GLDebug.hpp b/source/backend/opengl/GLDebug.hpp index 3ba1de931..5690a8599 100644 --- a/source/backend/opengl/GLDebug.hpp +++ b/source/backend/opengl/GLDebug.hpp @@ -12,7 +12,7 @@ #include #include #include -#include "Macro.h" +#include "core/Macro.h" #ifdef __cplusplus extern "C" { diff --git a/source/backend/opengl/execution/GLEltwise.cpp b/source/backend/opengl/GLEltwise.cpp similarity index 96% rename from source/backend/opengl/execution/GLEltwise.cpp rename to source/backend/opengl/GLEltwise.cpp index 251695bda..8d64ec69c 100644 --- a/source/backend/opengl/execution/GLEltwise.cpp +++ b/source/backend/opengl/GLEltwise.cpp @@ -8,9 +8,9 @@ #include "GLEltwise.hpp" #include -#include "AllShader.hpp" -#include "GLBackend.hpp" -#include "Macro.h" +#include "backend/opengl/shaders/AllShader.h" +#include "backend/opengl/GLBackend.hpp" +#include "core/Macro.h" namespace MNN { namespace OpenGL { GLEltwise::GLEltwise(const std::vector &inputs, const Op *op, Backend *bn) : Execution(bn) { diff --git a/source/backend/opengl/execution/GLEltwise.hpp b/source/backend/opengl/GLEltwise.hpp similarity index 85% rename from source/backend/opengl/execution/GLEltwise.hpp rename to source/backend/opengl/GLEltwise.hpp index c3994e8d1..4f1c1d457 100644 --- a/source/backend/opengl/execution/GLEltwise.hpp +++ b/source/backend/opengl/GLEltwise.hpp @@ -8,9 +8,9 @@ #ifndef MNNDEMO_GLELTWISE_H #define MNNDEMO_GLELTWISE_H -#include "Execution.hpp" -#include "GLProgram.hpp" -#include "GLTexture.hpp" +#include "core/Execution.hpp" +#include "backend/opengl/GLProgram.hpp" +#include "backend/opengl/GLTexture.hpp" #include "MNN_generated.h" namespace MNN { namespace OpenGL { diff --git a/source/backend/opengl/GLHead.hpp b/source/backend/opengl/GLHead.hpp index e0aef75c5..96be807f6 100644 --- a/source/backend/opengl/GLHead.hpp +++ b/source/backend/opengl/GLHead.hpp @@ -28,7 +28,7 @@ #include #endif #endif -#include "GLDebug.hpp" +#include "backend/opengl/GLDebug.hpp" #define OPENGL_ASSERT(x) assert(x) #endif diff --git a/source/backend/opengl/execution/GLInterp.cpp b/source/backend/opengl/GLInterp.cpp similarity index 95% rename from source/backend/opengl/execution/GLInterp.cpp rename to source/backend/opengl/GLInterp.cpp index 81ea64cd9..3e6df84fa 100644 --- a/source/backend/opengl/execution/GLInterp.cpp +++ b/source/backend/opengl/GLInterp.cpp @@ -8,9 +8,9 @@ #include "GLInterp.hpp" #include -#include "AllShader.hpp" -#include "GLBackend.hpp" -#include "Macro.h" +#include "backend/opengl/shaders/AllShader.h" +#include "backend/opengl/GLBackend.hpp" +#include "core/Macro.h" namespace MNN { namespace OpenGL { GLInterp::GLInterp(const std::vector &inputs, const Op *op, Backend *bn) : Execution(bn) { @@ -18,7 +18,7 @@ GLInterp::GLInterp(const std::vector &inputs, const Op *op, Backend *b mAlignCorners = interpParam->alignCorners(); mResizeType = interpParam->resizeType(); } - + ErrorCode GLInterp::onResize(const std::vector &inputs, const std::vector &outputs) { std::vector prefix; setLocalSize(prefix, mLocalSize, 8, 8, 1); @@ -31,24 +31,24 @@ ErrorCode GLInterp::onResize(const std::vector &inputs, const std::vec } return NO_ERROR; } - + GLInterp::~GLInterp() { } ErrorCode GLInterp::onExecute(const std::vector &inputs, const std::vector &outputs) { auto input = inputs[0]; auto output = outputs[0]; - + int iw = input->width(); int ih = input->height(); int ic_4 = UP_DIV(input->channel(), 4); int ib = input->batch(); - + int ow = output->width(); int oh = output->height(); int oc_4 = UP_DIV(output->channel(), 4); int ob = output->batch(); - + float xScale = 1; float yScale = 1; if (mAlignCorners) { @@ -73,7 +73,7 @@ ErrorCode GLInterp::onExecute(const std::vector &inputs, const std::ve glUniform2f(4, xScale, yScale); OPENGL_CHECK_ERROR; ((GLBackend *)backend())->compute(UP_DIV(ow, mLocalSize[0]), UP_DIV(oh, mLocalSize[1]), UP_DIV(oc_4*ob, mLocalSize[2])); - + return NO_ERROR; } GLCreatorRegister> __interp_op(OpType_Interp); diff --git a/source/backend/opengl/execution/GLInterp.hpp b/source/backend/opengl/GLInterp.hpp similarity index 87% rename from source/backend/opengl/execution/GLInterp.hpp rename to source/backend/opengl/GLInterp.hpp index a7c2fb6c5..26a93eeca 100644 --- a/source/backend/opengl/execution/GLInterp.hpp +++ b/source/backend/opengl/GLInterp.hpp @@ -8,9 +8,9 @@ #ifndef GLInterp_H #define GLInterp_H -#include "Execution.hpp" -#include "GLProgram.hpp" -#include "GLTexture.hpp" +#include "core/Execution.hpp" +#include "backend/opengl/GLProgram.hpp" +#include "backend/opengl/GLTexture.hpp" #include "MNN_generated.h" namespace MNN { namespace OpenGL { diff --git a/source/backend/opengl/GLLock.cpp b/source/backend/opengl/GLLock.cpp index 14c3997f9..957c869b9 100644 --- a/source/backend/opengl/GLLock.cpp +++ b/source/backend/opengl/GLLock.cpp @@ -6,7 +6,7 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "GLLock.hpp" +#include "backend/opengl/GLLock.hpp" #include #include namespace MNN { diff --git a/source/backend/opengl/GLLock.hpp b/source/backend/opengl/GLLock.hpp index b49d54d9f..d7f1c5ac9 100644 --- a/source/backend/opengl/GLLock.hpp +++ b/source/backend/opengl/GLLock.hpp @@ -8,7 +8,7 @@ #ifndef GLLOCK_H #define GLLOCK_H -#include "Macro.h" +#include "core/Macro.h" namespace MNN { namespace OpenGL { class GLLock { diff --git a/source/backend/opengl/execution/GLPermute.cpp b/source/backend/opengl/GLPermute.cpp similarity index 95% rename from source/backend/opengl/execution/GLPermute.cpp rename to source/backend/opengl/GLPermute.cpp index da52259fd..4fa182fe8 100644 --- a/source/backend/opengl/execution/GLPermute.cpp +++ b/source/backend/opengl/GLPermute.cpp @@ -8,15 +8,15 @@ #include "GLPermute.hpp" #include -#include "AllShader.hpp" -#include "GLBackend.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/opengl/shaders/AllShader.h" +#include "backend/opengl/GLBackend.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { namespace OpenGL { GLPermute::GLPermute(const std::vector &inputs, const Op *op, Backend *bn) : Execution(bn) { - + auto newDim = op->main_as_Permute()->dims(); for (int i = 0; i < newDim->size(); ++i) { mDims.push_back(newDim->data()[i]); @@ -24,9 +24,9 @@ GLPermute::GLPermute(const std::vector &inputs, const Op *op, Backend } GLPermute::~GLPermute() { - + } - + ErrorCode GLPermute::onResize(const std::vector &inputs, const std::vector &outputs) { auto input = inputs[0]; auto output = outputs[0]; @@ -34,7 +34,7 @@ ErrorCode GLPermute::onResize(const std::vector &inputs, const std::ve setLocalSize(prefix, mLocalSize, 8, 8, 1); mSrcBuffer.reset(new GLSSBOBuffer(input->size())); mDstBuffer.reset(new GLSSBOBuffer(output->size())); - + mPermuteProgram = ((GLBackend *)backend())->getProgram("permute", glsl_permute_glsl, prefix); mSrcProgram = ((GLBackend *)backend())->getProgram("src", glsl_image_to_nchw_buffer_glsl, prefix); mDstProgram = ((GLBackend *)backend())->getProgram("dst", glsl_nchw_buffer_to_image_glsl, prefix); @@ -50,13 +50,13 @@ ErrorCode GLPermute::onExecute(const std::vector &inputs, const std::v int ic = input->channel(); int ib = input->batch(); int ic_4 = UP_DIV(ic, 4); - + int oh = output->height(); int ow = output->width(); int oc = output->channel(); int ob = output->batch(); int oc_4 = UP_DIV(oc, 4); - + //image -> buffer(nchw) { mSrcProgram->useProgram(); @@ -68,7 +68,7 @@ ErrorCode GLPermute::onExecute(const std::vector &inputs, const std::v ((GLBackend *)backend())->compute(UP_DIV(iw, mLocalSize[0]), UP_DIV(ih, mLocalSize[1]), UP_DIV(ic_4, mLocalSize[2])); OPENGL_CHECK_ERROR; } - + //do permute { mPermuteProgram->useProgram(); @@ -93,7 +93,7 @@ ErrorCode GLPermute::onExecute(const std::vector &inputs, const std::v ((GLBackend *)backend())->compute(UP_DIV(ow, mLocalSize[0]), UP_DIV(oh, mLocalSize[1]), UP_DIV(oc_4, mLocalSize[2])); OPENGL_CHECK_ERROR; } - + return NO_ERROR; } GLCreatorRegister> __permute_op(OpType_Permute); diff --git a/source/backend/opengl/execution/GLPermute.hpp b/source/backend/opengl/GLPermute.hpp similarity index 86% rename from source/backend/opengl/execution/GLPermute.hpp rename to source/backend/opengl/GLPermute.hpp index 6cdfc4f84..89aceb8ea 100644 --- a/source/backend/opengl/execution/GLPermute.hpp +++ b/source/backend/opengl/GLPermute.hpp @@ -8,11 +8,11 @@ #ifndef GLPermute_H #define GLPermute_H -#include "Execution.hpp" -#include "GLProgram.hpp" -#include "GLTexture.hpp" +#include "core/Execution.hpp" +#include "backend/opengl/GLProgram.hpp" +#include "backend/opengl/GLTexture.hpp" +#include "backend/opengl/GLSSBOBuffer.hpp" #include "MNN_generated.h" -#include "GLSSBOBuffer.hpp" namespace MNN { namespace OpenGL { class GLPermute : public MNN::Execution { diff --git a/source/backend/opengl/execution/GLPool.cpp b/source/backend/opengl/GLPool.cpp similarity index 96% rename from source/backend/opengl/execution/GLPool.cpp rename to source/backend/opengl/GLPool.cpp index f44ae405b..ac2133a6a 100644 --- a/source/backend/opengl/execution/GLPool.cpp +++ b/source/backend/opengl/GLPool.cpp @@ -7,9 +7,9 @@ // #include "GLPool.hpp" -#include "AllShader.hpp" -#include "GLBackend.hpp" -#include "Macro.h" +#include "backend/opengl/shaders/AllShader.h" +#include "backend/opengl/GLBackend.hpp" +#include "core/Macro.h" namespace MNN { namespace OpenGL { ErrorCode GLPool::onResize(const std::vector &inputs, const std::vector &outputs) { diff --git a/source/backend/opengl/execution/GLPool.hpp b/source/backend/opengl/GLPool.hpp similarity index 87% rename from source/backend/opengl/execution/GLPool.hpp rename to source/backend/opengl/GLPool.hpp index 53c76705a..ad841faac 100644 --- a/source/backend/opengl/execution/GLPool.hpp +++ b/source/backend/opengl/GLPool.hpp @@ -9,9 +9,9 @@ #ifndef MNNDEMO_GLPOOL_H #define MNNDEMO_GLPOOL_H -#include "Execution.hpp" -#include "GLProgram.hpp" -#include "GLTexture.hpp" +#include "core/Execution.hpp" +#include "backend/opengl/GLProgram.hpp" +#include "backend/opengl/GLTexture.hpp" #include "MNN_generated.h" namespace MNN { namespace OpenGL { diff --git a/source/backend/opengl/GLProgram.cpp b/source/backend/opengl/GLProgram.cpp index bda4e1de4..54ca3dabf 100644 --- a/source/backend/opengl/GLProgram.cpp +++ b/source/backend/opengl/GLProgram.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "GLProgram.hpp" +#include "backend/opengl/GLProgram.hpp" #include #include #include -#include "GLDebug.hpp" +#include "backend/opengl/GLDebug.hpp" using namespace std; namespace MNN { diff --git a/source/backend/opengl/GLProgram.hpp b/source/backend/opengl/GLProgram.hpp index 6cb6644a6..6f73cb533 100644 --- a/source/backend/opengl/GLProgram.hpp +++ b/source/backend/opengl/GLProgram.hpp @@ -10,8 +10,8 @@ #define GLPROGRAM_H #include -#include "GLHead.hpp" -#include "GLLock.hpp" +#include "backend/opengl/GLHead.hpp" +#include "backend/opengl/GLLock.hpp" namespace MNN { namespace OpenGL { class GLProgram { diff --git a/source/backend/opengl/execution/GLROIPooling.cpp b/source/backend/opengl/GLROIPooling.cpp similarity index 94% rename from source/backend/opengl/execution/GLROIPooling.cpp rename to source/backend/opengl/GLROIPooling.cpp index fe7355fe0..c9a6697fe 100644 --- a/source/backend/opengl/execution/GLROIPooling.cpp +++ b/source/backend/opengl/GLROIPooling.cpp @@ -7,9 +7,9 @@ // #include "GLROIPooling.hpp" -#include "AllShader.hpp" -#include "GLBackend.hpp" -#include "Macro.h" +#include "backend/opengl/shaders/AllShader.h" +#include "backend/opengl/GLBackend.hpp" +#include "core/Macro.h" namespace MNN { namespace OpenGL { GLRoiPooling::GLRoiPooling(const std::vector &inputs, const Op *op, Backend *bn) : Execution(bn) { @@ -19,13 +19,13 @@ GLRoiPooling::GLRoiPooling(const std::vector &inputs, const Op *op, Ba mPoolProgram = extra->getProgram("roipooling", glsl_roiPooling_glsl, prefix); mSpatialScale = op->main_as_RoiPooling()->spatialScale(); } - + GLRoiPooling::~GLRoiPooling() { } - + ErrorCode GLRoiPooling::onResize(const std::vector &inputs, const std::vector &outputs) { - + return NO_ERROR; } @@ -33,7 +33,7 @@ ErrorCode GLRoiPooling::onExecute(const std::vector &inputs, const std auto output = outputs[0]; auto input = inputs[0]; auto roi = inputs[0]; - + int ob = output->batch(); int oc = output->channel(); int oh = output->height(); diff --git a/source/backend/opengl/execution/GLROIPooling.hpp b/source/backend/opengl/GLROIPooling.hpp similarity index 88% rename from source/backend/opengl/execution/GLROIPooling.hpp rename to source/backend/opengl/GLROIPooling.hpp index 8d833a512..e10d49758 100644 --- a/source/backend/opengl/execution/GLROIPooling.hpp +++ b/source/backend/opengl/GLROIPooling.hpp @@ -9,9 +9,9 @@ #ifndef GLRoiPooling_H #define GLRoiPooling_H -#include "Execution.hpp" -#include "GLProgram.hpp" -#include "GLTexture.hpp" +#include "core/Execution.hpp" +#include "backend/opengl/GLProgram.hpp" +#include "backend/opengl/GLTexture.hpp" #include "MNN_generated.h" namespace MNN { namespace OpenGL { diff --git a/source/backend/opengl/execution/GLRelu.cpp b/source/backend/opengl/GLRelu.cpp similarity index 96% rename from source/backend/opengl/execution/GLRelu.cpp rename to source/backend/opengl/GLRelu.cpp index d132c8e2d..e2c36ef51 100644 --- a/source/backend/opengl/execution/GLRelu.cpp +++ b/source/backend/opengl/GLRelu.cpp @@ -8,10 +8,10 @@ #include "GLRelu.hpp" #include -#include "AllShader.hpp" -#include "GLBackend.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/opengl/shaders/AllShader.h" +#include "backend/opengl/GLBackend.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { namespace OpenGL { @@ -25,9 +25,9 @@ GLRelu::GLRelu(const std::vector &inputs, const Op *op, Backend *bn) : } GLRelu::~GLRelu() { - + } - + ErrorCode GLRelu::onResize(const std::vector &inputs, const std::vector &outputs) { std::vector prefix; setLocalSize(prefix, mLocalSize, 8, 8, 1); @@ -61,7 +61,7 @@ ErrorCode GLRelu::onExecute(const std::vector &inputs, const std::vect int ih = input->height(); int ic_4 = UP_DIV(input->channel(), 4); int ib = input->batch(); - + if(OpType_PReLU == mType){ mProgram->useProgram(); glBindImageTexture(0, output->deviceId(), 0, GL_TRUE, 0, GL_WRITE_ONLY, ((GLBackend *)backend())->getTextrueFormat()); @@ -91,7 +91,7 @@ ErrorCode GLRelu::onExecute(const std::vector &inputs, const std::vect OPENGL_CHECK_ERROR; ((GLBackend *)backend())->compute(UP_DIV(iw, mLocalSize[0]), UP_DIV(ih, mLocalSize[1]), UP_DIV(ic_4, mLocalSize[2])); } - + return NO_ERROR; } GLCreatorRegister> __relu_op(OpType_ReLU); diff --git a/source/backend/opengl/execution/GLRelu.hpp b/source/backend/opengl/GLRelu.hpp similarity index 84% rename from source/backend/opengl/execution/GLRelu.hpp rename to source/backend/opengl/GLRelu.hpp index 0aa9699e2..38de65974 100644 --- a/source/backend/opengl/execution/GLRelu.hpp +++ b/source/backend/opengl/GLRelu.hpp @@ -8,11 +8,11 @@ #ifndef GLRelu_H #define GLRelu_H -#include "Execution.hpp" -#include "GLProgram.hpp" -#include "GLTexture.hpp" +#include "core/Execution.hpp" +#include "backend/opengl/GLProgram.hpp" +#include "backend/opengl/GLTexture.hpp" +#include "backend/opengl/GLSSBOBuffer.hpp" #include "MNN_generated.h" -#include "GLSSBOBuffer.hpp" namespace MNN { namespace OpenGL { class GLRelu : public MNN::Execution { diff --git a/source/backend/opengl/execution/GLReshape.cpp b/source/backend/opengl/GLReshape.cpp similarity index 96% rename from source/backend/opengl/execution/GLReshape.cpp rename to source/backend/opengl/GLReshape.cpp index 4ab90da41..0edc18cf6 100644 --- a/source/backend/opengl/execution/GLReshape.cpp +++ b/source/backend/opengl/GLReshape.cpp @@ -8,10 +8,10 @@ #include "GLReshape.hpp" #include -#include "AllShader.hpp" -#include "GLBackend.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/opengl/shaders/AllShader.h" +#include "backend/opengl/GLBackend.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { namespace OpenGL { @@ -20,15 +20,15 @@ GLReshape::GLReshape(const std::vector &inputs, const Op *op, Backend } GLReshape::~GLReshape() { - + } - + ErrorCode GLReshape::onResize(const std::vector &inputs, const std::vector &outputs) { std::vector prefix; setLocalSize(prefix, mLocalSize, 8, 8, 1); mTempBuffer.reset(new GLSSBOBuffer(inputs[0]->size())); auto input = inputs[0]; - + if (mDimType == MNN_DATA_FORMAT_NCHW) { mSrcProgram = ((GLBackend *)backend())->getProgram("src", glsl_image_to_nchw_buffer_glsl, prefix); mDstProgram = ((GLBackend *)backend())->getProgram("dst", glsl_nchw_buffer_to_image_glsl, prefix); @@ -36,7 +36,7 @@ ErrorCode GLReshape::onResize(const std::vector &inputs, const std::ve mSrcProgram = ((GLBackend *)backend())->getProgram("src", glsl_image_to_nhwc_buffer_glsl, prefix); mDstProgram = ((GLBackend *)backend())->getProgram("dst", glsl_nhwc_buffer_to_image_glsl, prefix); } - + return NO_ERROR; } @@ -52,7 +52,7 @@ ErrorCode GLReshape::onExecute(const std::vector &inputs, const std::v int iw = inputShape.at(2); int ic = inputShape.at(3); int ic_4 = UP_DIV(ic, 4); - + int ob = outputShape.at(0); int oh = outputShape.at(1); int ow = outputShape.at(2); @@ -109,16 +109,16 @@ ErrorCode GLReshape::onExecute(const std::vector &inputs, const std::v OPENGL_CHECK_ERROR; } } - + return NO_ERROR; } - + class ReshapeCreator : public GLBackend::Creator { public: virtual ~ReshapeCreator() = default; virtual Execution *onCreate(const std::vector &inputs, const std::vector &outputs, const MNN::Op *op, Backend *backend) const override { - + if(inputs[0]->dimensions() == 3 || outputs[0]->dimensions() == 3){ MNN_PRINT("reshape not support dimensions == 3 \n"); return nullptr; diff --git a/source/backend/opengl/execution/GLReshape.hpp b/source/backend/opengl/GLReshape.hpp similarity index 85% rename from source/backend/opengl/execution/GLReshape.hpp rename to source/backend/opengl/GLReshape.hpp index 77860ce4b..2bfdd9216 100644 --- a/source/backend/opengl/execution/GLReshape.hpp +++ b/source/backend/opengl/GLReshape.hpp @@ -8,11 +8,11 @@ #ifndef GLReshape_H #define GLReshape_H -#include "Execution.hpp" -#include "GLProgram.hpp" -#include "GLTexture.hpp" +#include "core/Execution.hpp" +#include "backend/opengl/GLProgram.hpp" +#include "backend/opengl/GLTexture.hpp" +#include "backend/opengl/GLSSBOBuffer.hpp" #include "MNN_generated.h" -#include "GLSSBOBuffer.hpp" namespace MNN { namespace OpenGL { diff --git a/source/backend/opengl/GLSSBOBuffer.cpp b/source/backend/opengl/GLSSBOBuffer.cpp index bb3710199..ec35826fc 100644 --- a/source/backend/opengl/GLSSBOBuffer.cpp +++ b/source/backend/opengl/GLSSBOBuffer.cpp @@ -6,7 +6,7 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "GLSSBOBuffer.hpp" +#include "backend/opengl/GLSSBOBuffer.hpp" namespace MNN { namespace OpenGL { GLSSBOBuffer::GLSSBOBuffer(GLsizeiptr size, GLenum type, GLenum usage) { diff --git a/source/backend/opengl/GLSSBOBuffer.hpp b/source/backend/opengl/GLSSBOBuffer.hpp index 2d79a11e9..8b99e5bd3 100644 --- a/source/backend/opengl/GLSSBOBuffer.hpp +++ b/source/backend/opengl/GLSSBOBuffer.hpp @@ -9,7 +9,7 @@ #ifndef GLSSBOBUFFER_H #define GLSSBOBUFFER_H -#include "GLHead.hpp" +#include "backend/opengl/GLHead.hpp" namespace MNN { namespace OpenGL { class GLSSBOBuffer { diff --git a/source/backend/opengl/execution/GLSoftmax.cpp b/source/backend/opengl/GLSoftmax.cpp similarity index 97% rename from source/backend/opengl/execution/GLSoftmax.cpp rename to source/backend/opengl/GLSoftmax.cpp index 89987f2a0..d3b56624a 100644 --- a/source/backend/opengl/execution/GLSoftmax.cpp +++ b/source/backend/opengl/GLSoftmax.cpp @@ -8,9 +8,9 @@ #include "GLSoftmax.hpp" #include -#include "AllShader.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/opengl/shaders/AllShader.h" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { namespace OpenGL { @@ -22,9 +22,9 @@ GLSoftmax::GLSoftmax(const std::vector &inputs, const Op *op, Backend } GLSoftmax::~GLSoftmax() { - + } - + ErrorCode GLSoftmax::onResize(const std::vector &inputs, const std::vector &outputs) { auto input = inputs[0]; auto output = outputs[0]; @@ -49,22 +49,22 @@ ErrorCode GLSoftmax::onExecute(const std::vector &inputs, const std::v auto input = inputs[0]; auto output = outputs[0]; - + std::vector inputShape = tensorShapeFormat(input); std::vector outputShape = tensorShapeFormat(output); - + int ib = inputShape.at(0); int ih = inputShape.at(1); int iw = inputShape.at(2); int ic = inputShape.at(3); int ic_4 = UP_DIV(ic, 4); - + int ob = outputShape.at(0); int oh = outputShape.at(1); int ow = outputShape.at(2); int oc = outputShape.at(3); int oc_4 = UP_DIV(oc, 4); - + // NC4HW4 input mProgram->useProgram(); glBindImageTexture(0, output->deviceId(), 0, GL_TRUE, 0, GL_WRITE_ONLY, ((GLBackend *)backend())->getTextrueFormat()); @@ -88,10 +88,10 @@ ErrorCode GLSoftmax::onExecute(const std::vector &inputs, const std::v } else { MNN_ASSERT(false); } - + return NO_ERROR; } - + class SoftmaxCreator : public GLBackend::Creator { public: virtual ~SoftmaxCreator() = default; diff --git a/source/backend/opengl/execution/GLSoftmax.hpp b/source/backend/opengl/GLSoftmax.hpp similarity index 83% rename from source/backend/opengl/execution/GLSoftmax.hpp rename to source/backend/opengl/GLSoftmax.hpp index f4084f16b..aa7385e72 100644 --- a/source/backend/opengl/execution/GLSoftmax.hpp +++ b/source/backend/opengl/GLSoftmax.hpp @@ -8,11 +8,11 @@ #ifndef GLSoftmax_H #define GLSoftmax_H -#include "Execution.hpp" -#include "GLProgram.hpp" -#include "GLTexture.hpp" +#include "core/Execution.hpp" +#include "backend/opengl/GLBackend.hpp" +#include "backend/opengl/GLProgram.hpp" +#include "backend/opengl/GLTexture.hpp" #include "MNN_generated.h" -#include "GLBackend.hpp" namespace MNN { namespace OpenGL { class GLSoftmax : public MNN::Execution { diff --git a/source/backend/opengl/execution/GLSqueeze.cpp b/source/backend/opengl/GLSqueeze.cpp similarity index 93% rename from source/backend/opengl/execution/GLSqueeze.cpp rename to source/backend/opengl/GLSqueeze.cpp index 6d43bc788..5ce7e302f 100644 --- a/source/backend/opengl/execution/GLSqueeze.cpp +++ b/source/backend/opengl/GLSqueeze.cpp @@ -8,21 +8,21 @@ #include "GLSqueeze.hpp" #include -#include "AllShader.hpp" -#include "GLBackend.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/opengl/shaders/AllShader.h" +#include "backend/opengl/GLBackend.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { namespace OpenGL { GLSqueeze::GLSqueeze(const std::vector &inputs, const Op *op, Backend *bn) : Execution(bn) { - + } GLSqueeze::~GLSqueeze() { - + } - + ErrorCode GLSqueeze::onResize(const std::vector &inputs, const std::vector &outputs) { auto input = inputs[0]; auto output = outputs[0]; @@ -36,15 +36,15 @@ ErrorCode GLSqueeze::onResize(const std::vector &inputs, const std::ve ErrorCode GLSqueeze::onExecute(const std::vector &inputs, const std::vector &outputs) { auto input = inputs[0]; auto output = outputs[0]; - + std::vector inputShape = tensorShapeFormat(input); - + int ib = inputShape.at(0); int ih = inputShape.at(1); int iw = inputShape.at(2); int ic = inputShape.at(3); int ic_4 = UP_DIV(ic, 4); - + mProgram->useProgram(); glBindImageTexture(0, output->deviceId(), 0, GL_TRUE, 0, GL_WRITE_ONLY, ((GLBackend *)backend())->getTextrueFormat()); { @@ -65,7 +65,7 @@ class SqueezeCreator : public GLBackend::Creator { virtual ~SqueezeCreator() = default; virtual Execution *onCreate(const std::vector &inputs, const std::vector &outputs, const MNN::Op *op, Backend *backend) const override { - + if(inputs[0]->dimensions() == 3 || outputs[0]->dimensions() == 3){ MNN_PRINT("reshape not support dimensions == 3 \n"); return nullptr; diff --git a/source/backend/opengl/execution/GLSqueeze.hpp b/source/backend/opengl/GLSqueeze.hpp similarity index 82% rename from source/backend/opengl/execution/GLSqueeze.hpp rename to source/backend/opengl/GLSqueeze.hpp index 8ae87aeb2..e5bfd4f4e 100644 --- a/source/backend/opengl/execution/GLSqueeze.hpp +++ b/source/backend/opengl/GLSqueeze.hpp @@ -8,11 +8,11 @@ #ifndef GLSqueeze_H #define GLSqueeze_H -#include "Execution.hpp" -#include "GLProgram.hpp" -#include "GLTexture.hpp" +#include "core/Execution.hpp" +#include "backend/opengl/GLProgram.hpp" +#include "backend/opengl/GLTexture.hpp" +#include "backend/opengl/GLSSBOBuffer.hpp" #include "MNN_generated.h" -#include "GLSSBOBuffer.hpp" namespace MNN { namespace OpenGL { class GLSqueeze : public MNN::Execution { diff --git a/source/backend/opengl/GLTexture.cpp b/source/backend/opengl/GLTexture.cpp index e45103fd8..c40d08d82 100644 --- a/source/backend/opengl/GLTexture.cpp +++ b/source/backend/opengl/GLTexture.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "GLTexture.hpp" +#include "backend/opengl/GLTexture.hpp" namespace MNN { namespace OpenGL { -#include "AutoTime.hpp" +#include GLTexture::~GLTexture() { glDeleteTextures(1, &mId); @@ -35,7 +35,7 @@ GLTexture::GLTexture(int w, int h, int d, GLenum textrueFormat, GLenum target, b OPENGL_CHECK_ERROR; glTexParameteri(mTarget, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE); OPENGL_CHECK_ERROR; - + int realW = w; int realH = h; int realD = d; @@ -63,13 +63,13 @@ GLTexture::GLTexture(int w, int h, int d, GLenum textrueFormat, GLenum target, b OPENGL_CHECK_ERROR; glTexParameteri(mTarget, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE); OPENGL_CHECK_ERROR; - + int realW = w; int realH = h; glTexStorage2D(mTarget, 1, mTextrueFormat, realW, realH); OPENGL_CHECK_ERROR; } - + } void GLTexture::sample(GLuint unit, GLuint texId) { diff --git a/source/backend/opengl/GLTexture.hpp b/source/backend/opengl/GLTexture.hpp index 84540f94a..1c3ba44a5 100644 --- a/source/backend/opengl/GLTexture.hpp +++ b/source/backend/opengl/GLTexture.hpp @@ -9,7 +9,7 @@ #ifndef GLTEXTURE_H #define GLTEXTURE_H /*Basic GLTexture, has no mipmap and just support ARGB GLTexture*/ -#include "GLHead.hpp" +#include "backend/opengl/GLHead.hpp" namespace MNN { namespace OpenGL { class GLTexture { diff --git a/source/backend/opengl/execution/GLUnary.cpp b/source/backend/opengl/GLUnary.cpp similarity index 94% rename from source/backend/opengl/execution/GLUnary.cpp rename to source/backend/opengl/GLUnary.cpp index 43ae2bad6..7041250a3 100644 --- a/source/backend/opengl/execution/GLUnary.cpp +++ b/source/backend/opengl/GLUnary.cpp @@ -8,10 +8,10 @@ #include "GLUnary.hpp" #include -#include "AllShader.hpp" -#include "GLBackend.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/opengl/shaders/AllShader.h" +#include "backend/opengl/GLBackend.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { namespace OpenGL { @@ -20,9 +20,9 @@ GLUnary::GLUnary(const std::vector &inputs, const Op *op, Backend *bn) } GLUnary::~GLUnary() { - + } - + ErrorCode GLUnary::onResize(const std::vector &inputs, const std::vector &outputs) { std::vector prefix; setLocalSize(prefix, mLocalSize, 8, 8, 1); @@ -37,7 +37,7 @@ ErrorCode GLUnary::onResize(const std::vector &inputs, const std::vect } ErrorCode GLUnary::onExecute(const std::vector &inputs, const std::vector &outputs) { - + auto input = inputs[0]; auto output = outputs[0]; @@ -65,7 +65,7 @@ ErrorCode GLUnary::onExecute(const std::vector &inputs, const std::vec return NO_ERROR; } - + class UnaryCreator : public GLBackend::Creator { public: virtual ~UnaryCreator() = default; @@ -78,7 +78,7 @@ class UnaryCreator : public GLBackend::Creator { MNN_PRINT("Not Supported Unary Operation: %d\n", type); return nullptr; } - + } }; GLCreatorRegister __unary_op(OpType_UnaryOp); diff --git a/source/backend/opengl/execution/GLUnary.hpp b/source/backend/opengl/GLUnary.hpp similarity index 87% rename from source/backend/opengl/execution/GLUnary.hpp rename to source/backend/opengl/GLUnary.hpp index fd1e2fa65..7f305be39 100644 --- a/source/backend/opengl/execution/GLUnary.hpp +++ b/source/backend/opengl/GLUnary.hpp @@ -8,9 +8,9 @@ #ifndef GLUnary_H #define GLUnary_H -#include "Execution.hpp" -#include "GLProgram.hpp" -#include "GLTexture.hpp" +#include "core/Execution.hpp" +#include "backend/opengl/GLProgram.hpp" +#include "backend/opengl/GLTexture.hpp" #include "MNN_generated.h" namespace MNN { namespace OpenGL { diff --git a/source/backend/opengl/GLUtils.cpp b/source/backend/opengl/GLUtils.cpp index 0a9a2c384..57bd1385f 100644 --- a/source/backend/opengl/GLUtils.cpp +++ b/source/backend/opengl/GLUtils.cpp @@ -5,13 +5,13 @@ // Created by MNN on 2019/01/31. // Copyright © 2018, Alibaba Group Holding Limited // -#include "GLUtils.hpp" -#include "GLBackend.hpp" +#include "backend/opengl/GLUtils.hpp" +#include "backend/opengl/GLBackend.hpp" #include namespace MNN { namespace OpenGL { void setLocalSize(std::vector& prefix, int* localSize, int setLocalSizeX, int setLocalSizeY, int setLocalSizeZ){ - + GLint maxLocalSizeX, maxLocalSizeY, maxLocalSizeZ; glGetIntegeri_v(GL_MAX_COMPUTE_WORK_GROUP_SIZE, 0, &maxLocalSizeX); glGetIntegeri_v(GL_MAX_COMPUTE_WORK_GROUP_SIZE, 1, &maxLocalSizeY); @@ -19,7 +19,7 @@ namespace OpenGL { localSize[0] = setLocalSizeX < maxLocalSizeX ? setLocalSizeX : maxLocalSizeX; localSize[1] = setLocalSizeY < maxLocalSizeY ? setLocalSizeY : maxLocalSizeY; localSize[2] = setLocalSizeZ < maxLocalSizeZ ? setLocalSizeZ : maxLocalSizeZ; - + { std::ostringstream os; os << "#define XLOCAL " << localSize[0]; diff --git a/source/backend/opengl/GLUtils.hpp b/source/backend/opengl/GLUtils.hpp index 397714ee1..46a6b5142 100644 --- a/source/backend/opengl/GLUtils.hpp +++ b/source/backend/opengl/GLUtils.hpp @@ -8,7 +8,7 @@ #ifndef GLUTILS_H #define GLUTILS_H -#include "Macro.h" +#include "core/Macro.h" #include #include namespace MNN { diff --git a/source/backend/opengl/makeshader.py b/source/backend/opengl/makeshader.py index a5a119bd3..48622170b 100755 --- a/source/backend/opengl/makeshader.py +++ b/source/backend/opengl/makeshader.py @@ -1,8 +1,9 @@ #!/usr/bin/python -gDefaultPath = "glsl" -gOutputHeadFile = "AllShader.hpp" -gOutputSourceFile = "AllShader.cpp" +import sys import os +gDefaultPath = sys.argv[1]#"glsl" +gOutputHeadFile = sys.argv[2]#"AllShader.hpp" +gOutputSourceFile = sys.argv[3]#"AllShader.cpp" def findAllShader(path): cmd = "find " + path + " -name \"*.glsl\"" vexs = os.popen(cmd).read().split('\n') @@ -19,7 +20,7 @@ def getName(fileName): def generateFile(headfile, sourcefile, shaders): h = "#ifndef OPENGL_GLSL_SHADER_AUTO_GENERATE_H\n#define OPENGL_GLSL_SHADER_AUTO_GENERATE_H\n" - cpp = "#include \"../" + headfile +"\"\n" + cpp = "#include \"AllShader.hpp\"\n" for s in shaders: name = getName(s) print name diff --git a/source/backend/vulkan/CMakeLists.txt b/source/backend/vulkan/CMakeLists.txt index e1c16fe18..f421854be 100644 --- a/source/backend/vulkan/CMakeLists.txt +++ b/source/backend/vulkan/CMakeLists.txt @@ -1,34 +1,33 @@ -cmake_minimum_required(VERSION 2.8) -project(MNN_Vulkan) -file(GLOB_RECURSE SRCS *.hpp *.cpp) -include_directories("../../../include/") -include_directories("include/") -include_directories("component/") -include_directories("runtime") -include_directories("execution") -include_directories("backend") - -if(SYSTEM.Android AND NOT MNN_BUILD_FOR_ANDROID_COMMAND) - set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${NATIVE_LIBRARY_OUTPUT}/${ANDROID_ABI}) -endif() -add_library( - MNN_Vulkan - SHARED - ${SRCS} -) -set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fomit-frame-pointer -fstrict-aliasing -ffunction-sections -fdata-sections -ffast-math") -set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility-inlines-hidden -fomit-frame-pointer -fstrict-aliasing -ffunction-sections -fdata-sections -ffast-math -fno-rtti -fno-exceptions") - -target_include_directories(MNN_Vulkan PRIVATE - ${CMAKE_CURRENT_BINARY_DIR} - ${CMAKE_CURRENT_SOURCE_DIR} -) - -target_link_libraries(MNN_Vulkan - MNN +if(MNN_VULKAN) + FILE(GLOB_RECURSE MNN_Vulkan_SRC ${CMAKE_CURRENT_LIST_DIR}/*.cpp) + option(MNN_VULKAN_REGEN "Regenerate Vulkan Shader binaries. Requires FULL glslang suite with spirv-tools linked" OFF) + if (CMAKE_SYSTEM_NAME MATCHES "^Android") + add_definitions(-DVK_USE_PLATFORM_ANDROID_KHR) + endif() + IF(MNN_VULKAN_REGEN) + add_custom_command(OUTPUT "${CMAKE_CURRENT_LIST_DIR}/compiler/AllShader.cpp" + COMMAND ${PYTHON_EXECUTABLE} + "${CMAKE_CURRENT_LIST_DIR}/compiler/makeshader.py" + "${CMAKE_CURRENT_LIST_DIR}/execution/glsl/" + "${CMAKE_SOURCE_DIR}/include/MNN/backend/vulkan/shaders/AllShader.h" + "${CMAKE_CURRENT_LIST_DIR}/compiler/AllShader.cpp" + COMMENT "Vulkan Code Generation" ) + add_custom_target (MNNVulkanCodeGen DEPENDS "${CMAKE_CURRENT_LIST_DIR}/compiler/AllShader.cpp") + ENDIF() + add_library( + MNNVulkan + OBJECT + ${MNN_Vulkan_SRC} + "${CMAKE_CURRENT_LIST_DIR}/compiler/AllShader.cpp" -if (SYSTEM.Android) - add_definitions(-DVK_USE_PLATFORM_ANDROID_KHR) + ) + target_include_directories(MNNVulkan PRIVATE "${CMAKE_CURRENT_LIST_DIR}/include/" "${CMAKE_CURRENT_LIST_DIR}/component/" "${CMAKE_CURRENT_LIST_DIR}/runtime" "${CMAKE_CURRENT_LIST_DIR}/execution" "${CMAKE_CURRENT_LIST_DIR}/backend") + IF(MNN_VULKAN_REGEN) + add_dependencies(MNNVulkan MNNVulkanCodeGen) + ENDIF() + list(APPEND MNN_OBJECTS_TO_LINK $) + list(APPEND MNN_TARGETS MNNVulkan) + SET(MNN_OBJECTS_TO_LINK "${MNN_OBJECTS_TO_LINK}" PARENT_SCOPE) + SET(MNN_TARGETS "${MNN_TARGETS}" PARENT_SCOPE) endif() - diff --git a/source/backend/vulkan/backend/VulkanBackend.cpp b/source/backend/vulkan/backend/VulkanBackend.cpp index 7fd7aa579..ff5a3acc6 100644 --- a/source/backend/vulkan/backend/VulkanBackend.cpp +++ b/source/backend/vulkan/backend/VulkanBackend.cpp @@ -6,19 +6,19 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanBackend.hpp" +#include "backend/vulkan/backend/VulkanBackend.hpp" #include -#include "Execution.hpp" -#include "Macro.h" -#include "Tensor.hpp" -#include "TensorUtils.hpp" -#include "SizeComputer.hpp" -#include "VulkanDevice.hpp" -#include "VulkanImageConverter.hpp" -#include "VulkanInstance.hpp" -#include "VulkanBasicExecution.hpp" +#include "core/Execution.hpp" +#include "core/Macro.h" +#include +#include "core/TensorUtils.hpp" +#include "core/SizeComputer.hpp" +#include "backend/vulkan/component/VulkanDevice.hpp" +#include "backend/vulkan/execution/VulkanImageConverter.hpp" +#include "backend/vulkan/component/VulkanInstance.hpp" +#include "backend/vulkan/execution/VulkanBasicExecution.hpp" //#define MNN_OPEN_TIME_TRACE -#include "AutoTime.hpp" +#include #ifdef MNN_USE_NEON #include #endif diff --git a/source/backend/vulkan/backend/VulkanBackend.hpp b/source/backend/vulkan/backend/VulkanBackend.hpp index 098e664a3..5ef665843 100644 --- a/source/backend/vulkan/backend/VulkanBackend.hpp +++ b/source/backend/vulkan/backend/VulkanBackend.hpp @@ -11,17 +11,17 @@ #include #include -#include "Backend.hpp" -#include "MNNSharedContext.h" +#include "core/Backend.hpp" +#include #include "MNN_generated.h" -#include "VulkanBuffer.hpp" -#include "VulkanCommandPool.hpp" -#include "VulkanDevice.hpp" -#include "VulkanFence.hpp" -#include "VulkanImage.hpp" -#include "VulkanInstance.hpp" -#include "VulkanPipeline.hpp" -#include "vulkan_wrapper.h" +#include "backend/vulkan/component/VulkanBuffer.hpp" +#include "backend/vulkan/component/VulkanCommandPool.hpp" +#include "backend/vulkan/component/VulkanDevice.hpp" +#include "backend/vulkan/component/VulkanFence.hpp" +#include "backend/vulkan/component/VulkanImage.hpp" +#include "backend/vulkan/component/VulkanInstance.hpp" +#include "backend/vulkan/component/VulkanPipeline.hpp" +#include "backend/vulkan/vulkan/vulkan_wrapper.h" namespace MNN { class VulkanImageConverter; diff --git a/source/backend/vulkan/compiler/AllShader.cpp b/source/backend/vulkan/compiler/AllShader.cpp index 1e94406be..1d058f3a3 100644 --- a/source/backend/vulkan/compiler/AllShader.cpp +++ b/source/backend/vulkan/compiler/AllShader.cpp @@ -1,4 +1,4 @@ -#include "AllShader.h" +#include "backend/vulkan/shaders/AllShader.h" const unsigned char glsl_nhwcToimage_comp[] = { 0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x07, 0x00, 0x08, 0x00, 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, @@ -19406,4 +19406,3 @@ const unsigned char glsl_softmaxChannel_comp[] = { 0xfd, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00 }; unsigned int glsl_softmaxChannel_comp_len = 4964; - diff --git a/source/backend/vulkan/compiler/VulkanCodeGen.py b/source/backend/vulkan/compiler/VulkanCodeGen.py index 973e26bc5..8ecb29924 100755 --- a/source/backend/vulkan/compiler/VulkanCodeGen.py +++ b/source/backend/vulkan/compiler/VulkanCodeGen.py @@ -35,10 +35,10 @@ def run(self): return def genHeader(self): - + res = '//\n// xxxxx.hpp\n// MNN\n//\n//\n' + \ '#ifndef xxxxx_hpp\n#define xxxxx_hpp\n' + \ - '#include "VulkanBasicExecution.hpp"\n\n' + \ + '#include \n\n' + \ 'namespace MNN {\n\tclass xxxxx : public VulkanBasicExecution\n\t{\n\tpublic:\n' + \ '\t\txxxxx(const Op* op, Backend* bn);\n\t\tvirtual ~ xxxxx();\n' + \ '\t\tErrorCode onEncode(const std::vector &inputs, const std::vector &outputs, const VulkanCommandPool::Buffer* cmdBuffer) override;\n' + \ @@ -47,14 +47,14 @@ def genHeader(self): '\t\tconst VulkanPipeline* mxxxxxPipeline;\n' + \ '\t\tstd::shared_ptr mDescriptorSet;\n' + \ '\t};\n}\n#endif' - + return res.replace('xxxxx', self.cpp_class) def genCpp(self): - + res = '//\n// xxxxx.cpp\n// MNN\n//\n//\n//\n' + \ - '#include "xxxxx.hpp"\n' + \ - '#include "AllShader.h"\n#include "Macro.h"\n\n\n' + \ + '#include \n' + \ + '#include \n#include "Macro.h"\n\n\n' + \ 'namespace MNN {\n' + \ "\tstruct GpuParam {\n" @@ -64,7 +64,7 @@ def genCpp(self): # class construct function res += '\txxxxx::xxxxx(const Op* op, Backend* bn):VulkanBasicExecution(bn)\n\t{\n' + \ '\t\tstd::vector xxxxxTypes {\n' - + for i in range(self.input_size + 1): res += "\t\t\tVK_DESCRIPTOR_TYPE_STORAGE_BUFFER,\n" res += '\t\t\tVK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,\n\t\t};\n' + \ @@ -100,4 +100,4 @@ def genCpp(self): if len(sys.argv) >= 4: op_params = sys.argv[3].split(',') app = CodeGenerator(op_type, input_size, op_params) - app.run() \ No newline at end of file + app.run() diff --git a/source/backend/vulkan/compiler/VulkanShaderMap.cpp b/source/backend/vulkan/compiler/VulkanShaderMap.cpp index 61d90dc1b..0e9f81729 100644 --- a/source/backend/vulkan/compiler/VulkanShaderMap.cpp +++ b/source/backend/vulkan/compiler/VulkanShaderMap.cpp @@ -1,6 +1,6 @@ /*Auto Generated File, Don' Modified.*/ -#include "VulkanShaderMap.hpp" -#include "AllShader.h" +#include "backend/vulkan/shaders/VulkanShaderMap.hpp" +#include "backend/vulkan/shaders/AllShader.h" namespace MNN { void VulkanShaderMap::init() { mMaps.insert(std::make_pair("glsl_nhwcToimage_comp", std::make_pair(glsl_nhwcToimage_comp,glsl_nhwcToimage_comp_len))); diff --git a/source/backend/vulkan/compiler/makeshader.py b/source/backend/vulkan/compiler/makeshader.py index 30d1df19e..1ba30d108 100755 --- a/source/backend/vulkan/compiler/makeshader.py +++ b/source/backend/vulkan/compiler/makeshader.py @@ -1,9 +1,5 @@ #!/usr/bin/python # -*- coding: UTF-8 -*- - -gDefaultPath = "../execution/glsl" -gOutputHeadFile = "AllShader.h" -gOutputSourceFile = "AllShader.cpp" import os import json import sys # sys.argv @@ -12,7 +8,9 @@ import ConfigParser # ini (python 3.0 use configparser) import fcntl # file lock import datetime # format file modify time - +gDefaultPath = sys.argv[1] #"../execution/glsl" +gOutputHeadFile = sys.argv[2]#"AllShader.h" +gOutputSourceFile = sys.argv[3]#"AllShader.cpp" def findAllShader(path): @@ -28,6 +26,7 @@ def findAllShader(path): def getName(fileName): s1 = fileName.replace("/", "_") s1 = s1.replace(".", "_") + s1=s1.replace("__","_") return s1 @@ -210,7 +209,7 @@ def setupShaderCache(self,objs): # cache invalid ,we setup cache file path if not self.cache_valid : self.__setupSpirvCacheFiles__(objs) - return + return for obj in objs: shader = obj.getShaderFile() @@ -314,7 +313,7 @@ def genShaderFileObjs(shaders, macros): shaderObjs.append(obj) simplename = fileName.split('/') simplename = simplename[len(simplename)-1] - + if macros.has_key(simplename): for macro in macros[simplename]: newName = fileName.replace(".comp", "") + "_" + macro + ".comp" @@ -365,8 +364,8 @@ def removeRefCompFiles(objs): def genMapFile(objs): mapFile = "VulkanShaderMap.cpp" cpp = '/*Auto Generated File, Don\' Modified.*/\n' - cpp += "#include \"VulkanShaderMap.hpp\"\n" - cpp += "#include \"AllShader.h\"\n" + cpp += "#include \n" + cpp += "#include \n" cpp += 'namespace MNN {\n' cpp += 'void VulkanShaderMap::init() {\n' for obj in objs: diff --git a/source/backend/vulkan/component/VulkanBuffer.cpp b/source/backend/vulkan/component/VulkanBuffer.cpp index 1a550f9d6..23692557b 100644 --- a/source/backend/vulkan/component/VulkanBuffer.cpp +++ b/source/backend/vulkan/component/VulkanBuffer.cpp @@ -6,7 +6,7 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanBuffer.hpp" +#include "backend/vulkan/component/VulkanBuffer.hpp" #include namespace MNN { diff --git a/source/backend/vulkan/component/VulkanBuffer.hpp b/source/backend/vulkan/component/VulkanBuffer.hpp index 529a29f20..566c56c67 100644 --- a/source/backend/vulkan/component/VulkanBuffer.hpp +++ b/source/backend/vulkan/component/VulkanBuffer.hpp @@ -8,7 +8,7 @@ #ifndef VulkanBuffer_hpp #define VulkanBuffer_hpp -#include "VulkanMemoryPool.hpp" +#include "backend/vulkan/component/VulkanMemoryPool.hpp" namespace MNN { class VulkanBuffer : public NonCopyable { public: diff --git a/source/backend/vulkan/component/VulkanCommandPool.cpp b/source/backend/vulkan/component/VulkanCommandPool.cpp index e175d3d70..730204d23 100644 --- a/source/backend/vulkan/component/VulkanCommandPool.cpp +++ b/source/backend/vulkan/component/VulkanCommandPool.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanCommandPool.hpp" +#include "backend/vulkan/component/VulkanCommandPool.hpp" #include #include -#include "VulkanFence.hpp" +#include "backend/vulkan/component/VulkanFence.hpp" namespace MNN { VulkanCommandPool::VulkanCommandPool(const VulkanDevice& dev) : mDevice(dev), mPool(VK_NULL_HANDLE) { CALL_VK(mDevice.createCommandPool(mPool)); diff --git a/source/backend/vulkan/component/VulkanCommandPool.hpp b/source/backend/vulkan/component/VulkanCommandPool.hpp index 414b7a1d9..090c8d9fe 100644 --- a/source/backend/vulkan/component/VulkanCommandPool.hpp +++ b/source/backend/vulkan/component/VulkanCommandPool.hpp @@ -9,9 +9,9 @@ #ifndef VulkanCommandPool_hpp #define VulkanCommandPool_hpp -#include "NonCopyable.hpp" -#include "VulkanDevice.hpp" -#include "vulkan_wrapper.h" +#include "core/NonCopyable.hpp" +#include "backend/vulkan/component/VulkanDevice.hpp" +#include "backend/vulkan/vulkan/vulkan_wrapper.h" namespace MNN { class VulkanCommandPool : public NonCopyable { public: diff --git a/source/backend/vulkan/component/VulkanDevice.cpp b/source/backend/vulkan/component/VulkanDevice.cpp index 8d21b0df3..427256208 100644 --- a/source/backend/vulkan/component/VulkanDevice.cpp +++ b/source/backend/vulkan/component/VulkanDevice.cpp @@ -6,7 +6,7 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanDevice.hpp" +#include "backend/vulkan/component/VulkanDevice.hpp" #include namespace MNN { diff --git a/source/backend/vulkan/component/VulkanDevice.hpp b/source/backend/vulkan/component/VulkanDevice.hpp index 75446cbd1..bb1358204 100644 --- a/source/backend/vulkan/component/VulkanDevice.hpp +++ b/source/backend/vulkan/component/VulkanDevice.hpp @@ -11,9 +11,9 @@ #include #include -#include "NonCopyable.hpp" -#include "VulkanInstance.hpp" -#include "vulkan_wrapper.h" +#include "core/NonCopyable.hpp" +#include "backend/vulkan/component/VulkanInstance.hpp" +#include "backend/vulkan/vulkan/vulkan_wrapper.h" namespace MNN { class VulkanDevice : public NonCopyable { diff --git a/source/backend/vulkan/component/VulkanFence.cpp b/source/backend/vulkan/component/VulkanFence.cpp index b8f85381b..e4467090e 100644 --- a/source/backend/vulkan/component/VulkanFence.cpp +++ b/source/backend/vulkan/component/VulkanFence.cpp @@ -6,12 +6,12 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanFence.hpp" +#include "backend/vulkan/component/VulkanFence.hpp" #if VK_FENCE_WAIT_FD_IF_SUPPORT #include #include #include -#include "MNNDefine.h" +#include #endif namespace MNN { diff --git a/source/backend/vulkan/component/VulkanFence.hpp b/source/backend/vulkan/component/VulkanFence.hpp index 4bae10dc2..dd85b9869 100644 --- a/source/backend/vulkan/component/VulkanFence.hpp +++ b/source/backend/vulkan/component/VulkanFence.hpp @@ -10,9 +10,9 @@ #define VulkanFence_hpp #include -#include "NonCopyable.hpp" -#include "VulkanDevice.hpp" -#include "vulkan_wrapper.h" +#include "core/NonCopyable.hpp" +#include "backend/vulkan/component/VulkanDevice.hpp" +#include "backend/vulkan/vulkan/vulkan_wrapper.h" // if support Fence FD ,force use FD Wait function, this macro only used for test purpose, // if frameworks is blocked and not async , does not enable this macro diff --git a/source/backend/vulkan/component/VulkanImage.cpp b/source/backend/vulkan/component/VulkanImage.cpp index ca4a5a6b6..d1df8e831 100644 --- a/source/backend/vulkan/component/VulkanImage.cpp +++ b/source/backend/vulkan/component/VulkanImage.cpp @@ -6,7 +6,7 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanImage.hpp" +#include "backend/vulkan/component/VulkanImage.hpp" #include namespace MNN { VulkanSampler::VulkanSampler(const VulkanDevice& dev, VkFilter filter, VkSamplerAddressMode mode) : mDevice(dev) { diff --git a/source/backend/vulkan/component/VulkanImage.hpp b/source/backend/vulkan/component/VulkanImage.hpp index 79de79896..def45f862 100644 --- a/source/backend/vulkan/component/VulkanImage.hpp +++ b/source/backend/vulkan/component/VulkanImage.hpp @@ -8,9 +8,9 @@ #ifndef VulkanImage_hpp #define VulkanImage_hpp -#include "Tensor.hpp" -#include "VulkanBuffer.hpp" -#include "VulkanMemoryPool.hpp" +#include +#include "backend/vulkan/component/VulkanBuffer.hpp" +#include "backend/vulkan/component/VulkanMemoryPool.hpp" namespace MNN { class VulkanSampler : public NonCopyable { public: diff --git a/source/backend/vulkan/component/VulkanInstance.cpp b/source/backend/vulkan/component/VulkanInstance.cpp index f2c288559..e3583652a 100644 --- a/source/backend/vulkan/component/VulkanInstance.cpp +++ b/source/backend/vulkan/component/VulkanInstance.cpp @@ -6,7 +6,7 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanInstance.hpp" +#include "backend/vulkan/component/VulkanInstance.hpp" #include namespace MNN { @@ -66,4 +66,4 @@ const bool VulkanInstance::supportVulkan() const { } return true; } -} // namespace MNN \ No newline at end of file +} // namespace MNN diff --git a/source/backend/vulkan/component/VulkanInstance.hpp b/source/backend/vulkan/component/VulkanInstance.hpp index 73c5a3047..70c4562a9 100644 --- a/source/backend/vulkan/component/VulkanInstance.hpp +++ b/source/backend/vulkan/component/VulkanInstance.hpp @@ -9,8 +9,8 @@ #ifndef VulkanInstance_hpp #define VulkanInstance_hpp -#include "NonCopyable.hpp" -#include "vulkan_wrapper.h" +#include "core/NonCopyable.hpp" +#include "backend/vulkan/vulkan/vulkan_wrapper.h" namespace MNN { class VulkanInstance : public NonCopyable { diff --git a/source/backend/vulkan/component/VulkanMemoryPool.cpp b/source/backend/vulkan/component/VulkanMemoryPool.cpp index b2b837229..0ae27ee92 100644 --- a/source/backend/vulkan/component/VulkanMemoryPool.cpp +++ b/source/backend/vulkan/component/VulkanMemoryPool.cpp @@ -6,7 +6,7 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanMemoryPool.hpp" +#include "backend/vulkan/component/VulkanMemoryPool.hpp" namespace MNN { VulkanMemory::VulkanMemory(const VulkanDevice& dev, const VkMemoryAllocateInfo& info) : mDevice(dev) { CALL_VK(mDevice.allocMemory(mMemory, info)); diff --git a/source/backend/vulkan/component/VulkanMemoryPool.hpp b/source/backend/vulkan/component/VulkanMemoryPool.hpp index c1d61a8a8..7e542d7ff 100644 --- a/source/backend/vulkan/component/VulkanMemoryPool.hpp +++ b/source/backend/vulkan/component/VulkanMemoryPool.hpp @@ -12,9 +12,9 @@ #include #include #include -#include "NonCopyable.hpp" -#include "VulkanDevice.hpp" -#include "vulkan_wrapper.h" +#include "core/NonCopyable.hpp" +#include "backend/vulkan/component/VulkanDevice.hpp" +#include "backend/vulkan/vulkan/vulkan_wrapper.h" namespace MNN { diff --git a/source/backend/vulkan/component/VulkanPipeline.cpp b/source/backend/vulkan/component/VulkanPipeline.cpp index 786d69a7f..54efada53 100644 --- a/source/backend/vulkan/component/VulkanPipeline.cpp +++ b/source/backend/vulkan/component/VulkanPipeline.cpp @@ -6,7 +6,7 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanPipeline.hpp" +#include "backend/vulkan/component/VulkanPipeline.hpp" #include #include namespace MNN { diff --git a/source/backend/vulkan/component/VulkanPipeline.hpp b/source/backend/vulkan/component/VulkanPipeline.hpp index 00207ac16..7c7b22c81 100644 --- a/source/backend/vulkan/component/VulkanPipeline.hpp +++ b/source/backend/vulkan/component/VulkanPipeline.hpp @@ -13,10 +13,10 @@ #include #include #include -#include "NonCopyable.hpp" -#include "VulkanDevice.hpp" -#include "VulkanShaderMap.hpp" -#include "vulkan_wrapper.h" +#include "core/NonCopyable.hpp" +#include "backend/vulkan/component/VulkanDevice.hpp" +#include "backend/vulkan/shaders/VulkanShaderMap.hpp" +#include "backend/vulkan/vulkan/vulkan_wrapper.h" namespace MNN { class VulkanPipeline : public NonCopyable { public: diff --git a/source/backend/vulkan/component/VulkanSemaphore.cpp b/source/backend/vulkan/component/VulkanSemaphore.cpp index a42f7dc00..bf32f4dee 100644 --- a/source/backend/vulkan/component/VulkanSemaphore.cpp +++ b/source/backend/vulkan/component/VulkanSemaphore.cpp @@ -6,7 +6,7 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanSemaphore.hpp" +#include "backend/vulkan/component/VulkanSemaphore.hpp" namespace MNN { VulkanSemaphore::VulkanSemaphore(const VulkanDevice& dev) : mDevice(dev) { CALL_VK(mDevice.createSemaphore(mSemaphore)); diff --git a/source/backend/vulkan/component/VulkanSemaphore.hpp b/source/backend/vulkan/component/VulkanSemaphore.hpp index 549af3bf3..4a1260720 100644 --- a/source/backend/vulkan/component/VulkanSemaphore.hpp +++ b/source/backend/vulkan/component/VulkanSemaphore.hpp @@ -11,9 +11,9 @@ #include #include -#include "NonCopyable.hpp" -#include "VulkanDevice.hpp" -#include "vulkan_wrapper.h" +#include "core/NonCopyable.hpp" +#include "backend/vulkan/component/VulkanDevice.hpp" +#include "backend/vulkan/vulkan/vulkan_wrapper.h" namespace MNN { class VulkanSemaphore : public NonCopyable { diff --git a/source/backend/vulkan/execution/VulkanBasicExecution.cpp b/source/backend/vulkan/execution/VulkanBasicExecution.cpp index bfcd2aea7..ce2bfa444 100644 --- a/source/backend/vulkan/execution/VulkanBasicExecution.cpp +++ b/source/backend/vulkan/execution/VulkanBasicExecution.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanBasicExecution.hpp" -#include "VulkanBackend.hpp" +#include "backend/vulkan/execution/VulkanBasicExecution.hpp" +#include "backend/vulkan/backend/VulkanBackend.hpp" namespace MNN { VulkanBasicExecutionDirect::VulkanBasicExecutionDirect(std::shared_ptr encoder) : Execution(encoder->backend()) { mEncoder = encoder; @@ -47,7 +47,7 @@ VulkanBasicExecutionInDirect::VulkanBasicExecutionInDirect(std::shared_ptr &inputs, const std::vector &outputs) { - + auto extra = static_cast(backend()); auto mCmdBuffer = extra->getSingleCommand(); for (auto input : inputs) { diff --git a/source/backend/vulkan/execution/VulkanBasicExecution.hpp b/source/backend/vulkan/execution/VulkanBasicExecution.hpp index d7a7834ee..1ca3ab308 100644 --- a/source/backend/vulkan/execution/VulkanBasicExecution.hpp +++ b/source/backend/vulkan/execution/VulkanBasicExecution.hpp @@ -9,8 +9,8 @@ #ifndef VulkanBasicExecution_hpp #define VulkanBasicExecution_hpp -#include "Execution.hpp" -#include "VulkanBackend.hpp" +#include "core/Execution.hpp" +#include "backend/vulkan/backend/VulkanBackend.hpp" namespace MNN { class VulkanBasicExecution { diff --git a/source/backend/vulkan/execution/VulkanBatchToSpaceND.cpp b/source/backend/vulkan/execution/VulkanBatchToSpaceND.cpp index d3830fa79..a3e315d25 100644 --- a/source/backend/vulkan/execution/VulkanBatchToSpaceND.cpp +++ b/source/backend/vulkan/execution/VulkanBatchToSpaceND.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanBatchToSpaceND.hpp" +#include "backend/vulkan/execution/VulkanBatchToSpaceND.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { diff --git a/source/backend/vulkan/execution/VulkanBatchToSpaceND.hpp b/source/backend/vulkan/execution/VulkanBatchToSpaceND.hpp index 6b71f2d26..0f1e1215a 100644 --- a/source/backend/vulkan/execution/VulkanBatchToSpaceND.hpp +++ b/source/backend/vulkan/execution/VulkanBatchToSpaceND.hpp @@ -10,7 +10,7 @@ #define VulkanBatchToSpaceND_hpp #include -#include "VulkanBasicExecution.hpp" +#include "backend/vulkan/execution/VulkanBasicExecution.hpp" namespace MNN { diff --git a/source/backend/vulkan/execution/VulkanBinary.cpp b/source/backend/vulkan/execution/VulkanBinary.cpp index d93d69b22..8b23dea76 100644 --- a/source/backend/vulkan/execution/VulkanBinary.cpp +++ b/source/backend/vulkan/execution/VulkanBinary.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanBinary.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/vulkan/execution/VulkanBinary.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { diff --git a/source/backend/vulkan/execution/VulkanBinary.hpp b/source/backend/vulkan/execution/VulkanBinary.hpp index 0b3840e58..5d28c0312 100644 --- a/source/backend/vulkan/execution/VulkanBinary.hpp +++ b/source/backend/vulkan/execution/VulkanBinary.hpp @@ -10,7 +10,7 @@ #define VulkanBinary_hpp #include -#include "VulkanBasicExecution.hpp" +#include "backend/vulkan/execution/VulkanBasicExecution.hpp" namespace MNN { class VulkanBinary : public VulkanBasicExecution { diff --git a/source/backend/vulkan/execution/VulkanConcat.cpp b/source/backend/vulkan/execution/VulkanConcat.cpp index 7b3e463cb..efe12cf61 100644 --- a/source/backend/vulkan/execution/VulkanConcat.cpp +++ b/source/backend/vulkan/execution/VulkanConcat.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanConcat.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/vulkan/execution/VulkanConcat.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { struct ConcatParam { ivec4 inImageSize; diff --git a/source/backend/vulkan/execution/VulkanConcat.hpp b/source/backend/vulkan/execution/VulkanConcat.hpp index 228b09a19..72df65af8 100644 --- a/source/backend/vulkan/execution/VulkanConcat.hpp +++ b/source/backend/vulkan/execution/VulkanConcat.hpp @@ -8,8 +8,8 @@ #ifndef VulkanConcat_hpp #define VulkanConcat_hpp -#include "VulkanBasicExecution.hpp" -#include "VulkanImageConverter.hpp" +#include "backend/vulkan/execution/VulkanBasicExecution.hpp" +#include "backend/vulkan/execution/VulkanImageConverter.hpp" namespace MNN { diff --git a/source/backend/vulkan/execution/VulkanConvolution.cpp b/source/backend/vulkan/execution/VulkanConvolution.cpp index 958191609..3e130d174 100644 --- a/source/backend/vulkan/execution/VulkanConvolution.cpp +++ b/source/backend/vulkan/execution/VulkanConvolution.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanConvolution.hpp" -#include "Macro.h" -#include "VulkanConvolutionImpl.hpp" +#include "backend/vulkan/execution/VulkanConvolution.hpp" +#include "core/Macro.h" +#include "backend/vulkan/execution/VulkanConvolutionImpl.hpp" //#define MNN_USE_1x1 namespace MNN { std::string VulkanConvolutionCommon::getPostTreatMacro(const Convolution2DCommon* common) { @@ -201,10 +201,10 @@ class VulkanConvolutionCreator : public VulkanBackend::Creator { const int fw = common->kernelX(); int srcCount = 0; const float *source = nullptr; - + srcCount = convReal->weight()->size() / (outputCount * fh * fw); source = convReal->weight()->data(); - + if (op->type() == OpType_Convolution) { auto convCommonParam = op->main_as_Convolution2D()->common(); const int group = convCommonParam->group(); diff --git a/source/backend/vulkan/execution/VulkanConvolution.hpp b/source/backend/vulkan/execution/VulkanConvolution.hpp index 1fe2e7004..e66aed37a 100644 --- a/source/backend/vulkan/execution/VulkanConvolution.hpp +++ b/source/backend/vulkan/execution/VulkanConvolution.hpp @@ -9,7 +9,7 @@ #ifndef VulkanConvolution_hpp #define VulkanConvolution_hpp -#include "VulkanBasicExecution.hpp" +#include "backend/vulkan/execution/VulkanBasicExecution.hpp" namespace MNN { class VulkanConvolutionCommon : public VulkanBasicExecution { public: diff --git a/source/backend/vulkan/execution/VulkanConvolutionImpl.cpp b/source/backend/vulkan/execution/VulkanConvolutionImpl.cpp index 41e609b99..48fd6bb6d 100644 --- a/source/backend/vulkan/execution/VulkanConvolutionImpl.cpp +++ b/source/backend/vulkan/execution/VulkanConvolutionImpl.cpp @@ -6,13 +6,13 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanConvolutionImpl.hpp" -#include "Macro.h" -#include "VulkanConvolution.hpp" -#include "VulkanConvolutionWinograd.hpp" -#include "VulkanMatrixMultier.hpp" +#include "backend/vulkan/execution/VulkanConvolutionImpl.hpp" +#include "core/Macro.h" +#include "backend/vulkan/execution/VulkanConvolution.hpp" +#include "backend/vulkan/execution/VulkanConvolutionWinograd.hpp" +#include "backend/vulkan/execution/VulkanMatrixMultier.hpp" //#define MNN_OPEN_TIME_TRACE -#include "AutoTime.hpp" +#include namespace MNN { static int gPretreatLocalSize[3] = {16, 16, 1}; std::shared_ptr VulkanConvolutionImpl::createBufferForSlideWindow(const VulkanBackend* extra, diff --git a/source/backend/vulkan/execution/VulkanConvolutionImpl.hpp b/source/backend/vulkan/execution/VulkanConvolutionImpl.hpp index 6f1a8949c..6fe2b6b9d 100644 --- a/source/backend/vulkan/execution/VulkanConvolutionImpl.hpp +++ b/source/backend/vulkan/execution/VulkanConvolutionImpl.hpp @@ -8,7 +8,7 @@ #ifndef VulkanConvolutionImpl_hpp #define VulkanConvolutionImpl_hpp -#include "VulkanBasicExecution.hpp" +#include "backend/vulkan/execution/VulkanBasicExecution.hpp" namespace MNN { class VulkanConvolutionImpl { public: diff --git a/source/backend/vulkan/execution/VulkanConvolutionWinograd.cpp b/source/backend/vulkan/execution/VulkanConvolutionWinograd.cpp index 2a2b0e128..1472261f2 100644 --- a/source/backend/vulkan/execution/VulkanConvolutionWinograd.cpp +++ b/source/backend/vulkan/execution/VulkanConvolutionWinograd.cpp @@ -6,13 +6,13 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanConvolutionWinograd.hpp" +#include "backend/vulkan/execution/VulkanConvolutionWinograd.hpp" #include -#include "Macro.h" -#include "WingoradGenerater.hpp" +#include "core/Macro.h" +#include "math/WingoradGenerater.hpp" #define COMPUT_SIZE 4 -#define COMPUT_SIZE2 16 -#include "VulkanConvolution.hpp" +#define COMPUT_SIZE2 1 +#include "backend/vulkan/execution/VulkanConvolution.hpp" namespace MNN { struct WinogradConst { ivec4 inputSize; diff --git a/source/backend/vulkan/execution/VulkanConvolutionWinograd.hpp b/source/backend/vulkan/execution/VulkanConvolutionWinograd.hpp index 739d6fc5c..d567ec881 100644 --- a/source/backend/vulkan/execution/VulkanConvolutionWinograd.hpp +++ b/source/backend/vulkan/execution/VulkanConvolutionWinograd.hpp @@ -9,8 +9,8 @@ #ifndef VulkanConvolutionWinograd_hpp #define VulkanConvolutionWinograd_hpp -#include "VulkanConvolutionImpl.hpp" -#include "VulkanMatrixMultier.hpp" +#include "backend/vulkan/execution/VulkanConvolutionImpl.hpp" +#include "backend/vulkan/execution/VulkanMatrixMultier.hpp" namespace MNN { class VulkanConvolutionWinograd : public VulkanBasicExecution { diff --git a/source/backend/vulkan/execution/VulkanCrop.cpp b/source/backend/vulkan/execution/VulkanCrop.cpp index 164db5765..5add1819e 100644 --- a/source/backend/vulkan/execution/VulkanCrop.cpp +++ b/source/backend/vulkan/execution/VulkanCrop.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanCrop.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/vulkan/execution/VulkanCrop.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { VulkanCrop::VulkanCrop(const Op* op, Backend* bn) : VulkanBasicExecution(bn) { diff --git a/source/backend/vulkan/execution/VulkanCrop.hpp b/source/backend/vulkan/execution/VulkanCrop.hpp index 081d35698..601397006 100644 --- a/source/backend/vulkan/execution/VulkanCrop.hpp +++ b/source/backend/vulkan/execution/VulkanCrop.hpp @@ -10,7 +10,7 @@ #define VulkanCrop_hpp #include -#include "VulkanBasicExecution.hpp" +#include "backend/vulkan/execution/VulkanBasicExecution.hpp" namespace MNN { diff --git a/source/backend/vulkan/execution/VulkanDeconvolution.cpp b/source/backend/vulkan/execution/VulkanDeconvolution.cpp index f75021aa4..87fa733f3 100644 --- a/source/backend/vulkan/execution/VulkanDeconvolution.cpp +++ b/source/backend/vulkan/execution/VulkanDeconvolution.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanDeconvolution.hpp" -#include "Macro.h" +#include "backend/vulkan/execution/VulkanDeconvolution.hpp" +#include "core/Macro.h" namespace MNN { VulkanDeconvolution::VulkanDeconvolution(Backend* bn, const Convolution2D* conv) : VulkanBasicExecution(bn) { mConvCommonOption = conv->common(); diff --git a/source/backend/vulkan/execution/VulkanDeconvolution.hpp b/source/backend/vulkan/execution/VulkanDeconvolution.hpp index 06d2082ee..8480e7f55 100644 --- a/source/backend/vulkan/execution/VulkanDeconvolution.hpp +++ b/source/backend/vulkan/execution/VulkanDeconvolution.hpp @@ -8,9 +8,9 @@ #ifndef VulkanDeconvolution_hpp #define VulkanDeconvolution_hpp -#include "VulkanBasicExecution.hpp" -#include "VulkanConvolution.hpp" -#include "VulkanMatrixMultier.hpp" +#include "backend/vulkan/execution/VulkanBasicExecution.hpp" +#include "backend/vulkan/execution/VulkanConvolution.hpp" +#include "backend/vulkan/execution/VulkanMatrixMultier.hpp" namespace MNN { class VulkanDeconvolution : public VulkanBasicExecution { public: diff --git a/source/backend/vulkan/execution/VulkanDeconvolutionDepthwise.cpp b/source/backend/vulkan/execution/VulkanDeconvolutionDepthwise.cpp index 0dd645f68..948032e4d 100644 --- a/source/backend/vulkan/execution/VulkanDeconvolutionDepthwise.cpp +++ b/source/backend/vulkan/execution/VulkanDeconvolutionDepthwise.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanDeconvolutionDepthwise.hpp" -#include "Macro.h" +#include "backend/vulkan/execution/VulkanDeconvolutionDepthwise.hpp" +#include "core/Macro.h" namespace MNN { VulkanDeconvolutionDepthwise::VulkanDeconvolutionDepthwise(Backend* bn, const Convolution2D* conv) : VulkanBasicExecution(bn) { diff --git a/source/backend/vulkan/execution/VulkanDeconvolutionDepthwise.hpp b/source/backend/vulkan/execution/VulkanDeconvolutionDepthwise.hpp index 43fe5bf83..d9ce0a2be 100644 --- a/source/backend/vulkan/execution/VulkanDeconvolutionDepthwise.hpp +++ b/source/backend/vulkan/execution/VulkanDeconvolutionDepthwise.hpp @@ -8,8 +8,8 @@ #ifndef VulkanDeconvolutionDepthwise_hpp #define VulkanDeconvolutionDepthwise_hpp -#include "VulkanBasicExecution.hpp" -#include "VulkanDeconvolution.hpp" +#include "backend/vulkan/execution/VulkanBasicExecution.hpp" +#include "backend/vulkan/execution/VulkanDeconvolution.hpp" namespace MNN { class VulkanDeconvolutionDepthwise : public VulkanBasicExecution { diff --git a/source/backend/vulkan/execution/VulkanElementWise.cpp b/source/backend/vulkan/execution/VulkanElementWise.cpp index 2bf3cce9a..471da7b16 100644 --- a/source/backend/vulkan/execution/VulkanElementWise.cpp +++ b/source/backend/vulkan/execution/VulkanElementWise.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanElementWise.hpp" -#include "Macro.h" +#include "backend/vulkan/execution/VulkanElementWise.hpp" +#include "core/Macro.h" namespace MNN { diff --git a/source/backend/vulkan/execution/VulkanElementWise.hpp b/source/backend/vulkan/execution/VulkanElementWise.hpp index 3da0252c1..efeba7db3 100644 --- a/source/backend/vulkan/execution/VulkanElementWise.hpp +++ b/source/backend/vulkan/execution/VulkanElementWise.hpp @@ -10,7 +10,7 @@ #define VulkanElementWise_hpp #include -#include "VulkanBasicExecution.hpp" +#include "backend/vulkan/execution/VulkanBasicExecution.hpp" namespace MNN { class VulkanElementWise : public VulkanBasicExecution { diff --git a/source/backend/vulkan/execution/VulkanImageConverter.cpp b/source/backend/vulkan/execution/VulkanImageConverter.cpp index d00d97931..0787b9fc4 100644 --- a/source/backend/vulkan/execution/VulkanImageConverter.cpp +++ b/source/backend/vulkan/execution/VulkanImageConverter.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanImageConverter.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" -#include "VulkanBackend.hpp" +#include "backend/vulkan/execution/VulkanImageConverter.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" +#include "backend/vulkan/backend/VulkanBackend.hpp" namespace MNN { VulkanTensorConvert::VulkanTensorConvert(const VulkanBackend* bn) : mVulkanBackend(bn) { diff --git a/source/backend/vulkan/execution/VulkanImageConverter.hpp b/source/backend/vulkan/execution/VulkanImageConverter.hpp index c5f09e803..788fe776d 100644 --- a/source/backend/vulkan/execution/VulkanImageConverter.hpp +++ b/source/backend/vulkan/execution/VulkanImageConverter.hpp @@ -8,12 +8,12 @@ #ifndef VulkanImageConverter_hpp #define VulkanImageConverter_hpp -#include "Tensor.hpp" +#include #include "Tensor_generated.h" -#include "VulkanBuffer.hpp" -#include "VulkanCommandPool.hpp" -#include "VulkanImage.hpp" -#include "VulkanPipeline.hpp" +#include "backend/vulkan/component/VulkanBuffer.hpp" +#include "backend/vulkan/component/VulkanCommandPool.hpp" +#include "backend/vulkan/component/VulkanImage.hpp" +#include "backend/vulkan/component/VulkanPipeline.hpp" namespace MNN { class VulkanBackend; class VulkanTensorConvert { diff --git a/source/backend/vulkan/execution/VulkanInterp.cpp b/source/backend/vulkan/execution/VulkanInterp.cpp index a65d631c8..89c2e0ec2 100644 --- a/source/backend/vulkan/execution/VulkanInterp.cpp +++ b/source/backend/vulkan/execution/VulkanInterp.cpp @@ -6,7 +6,7 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanInterp.hpp" +#include "backend/vulkan/execution/VulkanInterp.hpp" namespace MNN { diff --git a/source/backend/vulkan/execution/VulkanInterp.hpp b/source/backend/vulkan/execution/VulkanInterp.hpp index 24483d7ec..a62d7c1f0 100644 --- a/source/backend/vulkan/execution/VulkanInterp.hpp +++ b/source/backend/vulkan/execution/VulkanInterp.hpp @@ -11,7 +11,7 @@ #include -#include "VulkanResize.hpp" +#include "backend/vulkan/execution/VulkanResize.hpp" namespace MNN { diff --git a/source/backend/vulkan/execution/VulkanLRN.cpp b/source/backend/vulkan/execution/VulkanLRN.cpp index 198b9e921..5a78dc39c 100644 --- a/source/backend/vulkan/execution/VulkanLRN.cpp +++ b/source/backend/vulkan/execution/VulkanLRN.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanLRN.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/vulkan/execution/VulkanLRN.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { struct GpuParam { diff --git a/source/backend/vulkan/execution/VulkanLRN.hpp b/source/backend/vulkan/execution/VulkanLRN.hpp index 1bc6eca14..bfaf34fc0 100644 --- a/source/backend/vulkan/execution/VulkanLRN.hpp +++ b/source/backend/vulkan/execution/VulkanLRN.hpp @@ -8,7 +8,7 @@ #ifndef VulkanLRN_hpp #define VulkanLRN_hpp -#include "VulkanReshape.hpp" +#include "backend/vulkan/execution/VulkanReshape.hpp" namespace MNN { class VulkanLRN : public VulkanReshape { diff --git a/source/backend/vulkan/execution/VulkanLSTM.cpp b/source/backend/vulkan/execution/VulkanLSTM.cpp index a861d7c0d..aa41d4129 100644 --- a/source/backend/vulkan/execution/VulkanLSTM.cpp +++ b/source/backend/vulkan/execution/VulkanLSTM.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanLSTM.hpp" -#include "Macro.h" +#include "backend/vulkan/execution/VulkanLSTM.hpp" +#include "core/Macro.h" namespace MNN { diff --git a/source/backend/vulkan/execution/VulkanLSTM.hpp b/source/backend/vulkan/execution/VulkanLSTM.hpp index ebfd72fc3..f6a82e04d 100644 --- a/source/backend/vulkan/execution/VulkanLSTM.hpp +++ b/source/backend/vulkan/execution/VulkanLSTM.hpp @@ -8,7 +8,7 @@ #ifndef VulkanLSTM_hpp #define VulkanLSTM_hpp -#include "VulkanBasicExecution.hpp" +#include "backend/vulkan/execution/VulkanBasicExecution.hpp" namespace MNN { diff --git a/source/backend/vulkan/execution/VulkanMatrixMultier.cpp b/source/backend/vulkan/execution/VulkanMatrixMultier.cpp index 609f004c2..a5e39eba7 100644 --- a/source/backend/vulkan/execution/VulkanMatrixMultier.cpp +++ b/source/backend/vulkan/execution/VulkanMatrixMultier.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanMatrixMultier.hpp" -#include "Macro.h" +#include "backend/vulkan/execution/VulkanMatrixMultier.hpp" +#include "core/Macro.h" namespace MNN { struct constUniform { ivec4 outputSize; diff --git a/source/backend/vulkan/execution/VulkanMatrixMultier.hpp b/source/backend/vulkan/execution/VulkanMatrixMultier.hpp index 226f8838d..2f4fcf54f 100644 --- a/source/backend/vulkan/execution/VulkanMatrixMultier.hpp +++ b/source/backend/vulkan/execution/VulkanMatrixMultier.hpp @@ -10,7 +10,7 @@ #define VulkanMatrixMultier_hpp #include -#include "VulkanBasicExecution.hpp" +#include "backend/vulkan/execution/VulkanBasicExecution.hpp" namespace MNN { class VulkanMatrixMultier : public NonCopyable { public: diff --git a/source/backend/vulkan/execution/VulkanNormalize.cpp b/source/backend/vulkan/execution/VulkanNormalize.cpp index d9ce4fa9d..2ffba8b6b 100644 --- a/source/backend/vulkan/execution/VulkanNormalize.cpp +++ b/source/backend/vulkan/execution/VulkanNormalize.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanNormalize.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/vulkan/execution/VulkanNormalize.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { struct GpuParam { diff --git a/source/backend/vulkan/execution/VulkanNormalize.hpp b/source/backend/vulkan/execution/VulkanNormalize.hpp index 407b8026c..f49021c36 100644 --- a/source/backend/vulkan/execution/VulkanNormalize.hpp +++ b/source/backend/vulkan/execution/VulkanNormalize.hpp @@ -8,7 +8,7 @@ #ifndef VulkanNormalize_hpp #define VulkanNormalize_hpp -#include "VulkanBasicExecution.hpp" +#include "backend/vulkan/execution/VulkanBasicExecution.hpp" namespace MNN { class VulkanNormalize : public VulkanBasicExecution { diff --git a/source/backend/vulkan/execution/VulkanPermute.cpp b/source/backend/vulkan/execution/VulkanPermute.cpp index 37a210d0a..b065dccaf 100644 --- a/source/backend/vulkan/execution/VulkanPermute.cpp +++ b/source/backend/vulkan/execution/VulkanPermute.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanPermute.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/vulkan/execution/VulkanPermute.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { struct GpuParam { diff --git a/source/backend/vulkan/execution/VulkanPermute.hpp b/source/backend/vulkan/execution/VulkanPermute.hpp index c085478ea..ed53d8cf8 100644 --- a/source/backend/vulkan/execution/VulkanPermute.hpp +++ b/source/backend/vulkan/execution/VulkanPermute.hpp @@ -8,8 +8,8 @@ #ifndef VulkanPermute_hpp #define VulkanPermute_hpp -#include "VulkanBasicExecution.hpp" -#include "VulkanImageConverter.hpp" +#include "backend/vulkan/execution/VulkanBasicExecution.hpp" +#include "backend/vulkan/execution/VulkanImageConverter.hpp" namespace MNN { class VulkanPermute : public VulkanBasicExecution { public: diff --git a/source/backend/vulkan/execution/VulkanPool.cpp b/source/backend/vulkan/execution/VulkanPool.cpp index aacae5997..93ece95df 100644 --- a/source/backend/vulkan/execution/VulkanPool.cpp +++ b/source/backend/vulkan/execution/VulkanPool.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanPool.hpp" -#include "Macro.h" +#include "backend/vulkan/execution/VulkanPool.hpp" +#include "core/Macro.h" namespace MNN { struct ConstBuffer { ivec4 inputSize; @@ -62,11 +62,11 @@ ErrorCode VulkanPool::onEncode(const std::vector& inputs, const std::ve pool->outputSize[0] = ow; pool->outputSize[1] = oh; pool->outputSize[2] = ocDiv4 * output->batch(); + int padWidth = mCommon->padX(); + int padHeight = mCommon->padY(); int strideWidth = mCommon->strideX(); int strideHeight = mCommon->strideY(); - int padWidth = mCommon->padX(); - int padHeight = mCommon->padY(); // edit const if global int kernelWidth = std::min(mCommon->kernelX(), iw); @@ -85,6 +85,8 @@ ErrorCode VulkanPool::onEncode(const std::vector& inputs, const std::ve int padNeededHeight = (output->height() - 1) * strideHeight + kernelHeight - input->height(); padWidth = padNeededWidth > 0 ? padNeededWidth / 2 : 0; padHeight = padNeededHeight > 0 ? padNeededHeight / 2 : 0; + } else if (mCommon->padType() == PoolPadType_VALID) { + padWidth = padHeight = 0; } pool->pad[0] = padWidth; diff --git a/source/backend/vulkan/execution/VulkanPool.hpp b/source/backend/vulkan/execution/VulkanPool.hpp index c378bd31e..b4e68683e 100644 --- a/source/backend/vulkan/execution/VulkanPool.hpp +++ b/source/backend/vulkan/execution/VulkanPool.hpp @@ -9,7 +9,7 @@ #ifndef VulkanPool_hpp #define VulkanPool_hpp -#include "VulkanBasicExecution.hpp" +#include "backend/vulkan/execution/VulkanBasicExecution.hpp" namespace MNN { class VulkanPool : public VulkanBasicExecution { public: diff --git a/source/backend/vulkan/execution/VulkanROIPooling.cpp b/source/backend/vulkan/execution/VulkanROIPooling.cpp index 22a45447e..58c308c9a 100644 --- a/source/backend/vulkan/execution/VulkanROIPooling.cpp +++ b/source/backend/vulkan/execution/VulkanROIPooling.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanROIPooling.hpp" -#include "Macro.h" +#include "backend/vulkan/execution/VulkanROIPooling.hpp" +#include "core/Macro.h" namespace MNN { struct GpuParam { diff --git a/source/backend/vulkan/execution/VulkanROIPooling.hpp b/source/backend/vulkan/execution/VulkanROIPooling.hpp index e5d1fff5e..68ea4d8a6 100644 --- a/source/backend/vulkan/execution/VulkanROIPooling.hpp +++ b/source/backend/vulkan/execution/VulkanROIPooling.hpp @@ -8,7 +8,7 @@ #ifndef VulkanROIPooling_hpp #define VulkanROIPooling_hpp -#include "VulkanBasicExecution.hpp" +#include "backend/vulkan/execution/VulkanBasicExecution.hpp" namespace MNN { class VulkanROIPooling : public VulkanBasicExecution { diff --git a/source/backend/vulkan/execution/VulkanRelu.cpp b/source/backend/vulkan/execution/VulkanRelu.cpp index 05b0c611d..25931a0ae 100644 --- a/source/backend/vulkan/execution/VulkanRelu.cpp +++ b/source/backend/vulkan/execution/VulkanRelu.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanRelu.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/vulkan/execution/VulkanRelu.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { struct GpuReluParam { diff --git a/source/backend/vulkan/execution/VulkanRelu.hpp b/source/backend/vulkan/execution/VulkanRelu.hpp index f1a4cbd9e..d091fbdde 100644 --- a/source/backend/vulkan/execution/VulkanRelu.hpp +++ b/source/backend/vulkan/execution/VulkanRelu.hpp @@ -11,7 +11,7 @@ #include -#include "VulkanBasicExecution.hpp" +#include "backend/vulkan/execution/VulkanBasicExecution.hpp" namespace MNN { diff --git a/source/backend/vulkan/execution/VulkanReshape.cpp b/source/backend/vulkan/execution/VulkanReshape.cpp index 64aaeeb0e..d6afb8300 100644 --- a/source/backend/vulkan/execution/VulkanReshape.cpp +++ b/source/backend/vulkan/execution/VulkanReshape.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanReshape.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/vulkan/execution/VulkanReshape.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { diff --git a/source/backend/vulkan/execution/VulkanReshape.hpp b/source/backend/vulkan/execution/VulkanReshape.hpp index fd0c88d4d..2ed7bc5c3 100644 --- a/source/backend/vulkan/execution/VulkanReshape.hpp +++ b/source/backend/vulkan/execution/VulkanReshape.hpp @@ -8,8 +8,8 @@ #ifndef VulkanReshape_hpp #define VulkanReshape_hpp -#include "VulkanBasicExecution.hpp" -#include "VulkanImageConverter.hpp" +#include "backend/vulkan/execution/VulkanBasicExecution.hpp" +#include "backend/vulkan/execution/VulkanImageConverter.hpp" namespace MNN { class VulkanReshape : public VulkanBasicExecution { diff --git a/source/backend/vulkan/execution/VulkanResize.cpp b/source/backend/vulkan/execution/VulkanResize.cpp index 1e12d77f2..43192ba4f 100644 --- a/source/backend/vulkan/execution/VulkanResize.cpp +++ b/source/backend/vulkan/execution/VulkanResize.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanResize.hpp" -#include "Macro.h" +#include "backend/vulkan/execution/VulkanResize.hpp" +#include "core/Macro.h" namespace MNN { struct GpuParam { diff --git a/source/backend/vulkan/execution/VulkanResize.hpp b/source/backend/vulkan/execution/VulkanResize.hpp index 000a419ad..94cd8587d 100644 --- a/source/backend/vulkan/execution/VulkanResize.hpp +++ b/source/backend/vulkan/execution/VulkanResize.hpp @@ -8,7 +8,7 @@ #ifndef VulkanResize_hpp #define VulkanResize_hpp -#include "VulkanBasicExecution.hpp" +#include "backend/vulkan/execution/VulkanBasicExecution.hpp" namespace MNN { class VulkanResize : public VulkanBasicExecution { diff --git a/source/backend/vulkan/execution/VulkanScale.cpp b/source/backend/vulkan/execution/VulkanScale.cpp index da156be10..d484c048d 100644 --- a/source/backend/vulkan/execution/VulkanScale.cpp +++ b/source/backend/vulkan/execution/VulkanScale.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanScale.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/vulkan/execution/VulkanScale.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { diff --git a/source/backend/vulkan/execution/VulkanScale.hpp b/source/backend/vulkan/execution/VulkanScale.hpp index 695914110..22e417841 100644 --- a/source/backend/vulkan/execution/VulkanScale.hpp +++ b/source/backend/vulkan/execution/VulkanScale.hpp @@ -10,7 +10,7 @@ #define VulkanScale_hpp #include -#include "VulkanBasicExecution.hpp" +#include "backend/vulkan/execution/VulkanBasicExecution.hpp" namespace MNN { class VulkanScale : public VulkanBasicExecution { diff --git a/source/backend/vulkan/execution/VulkanSigmoid.cpp b/source/backend/vulkan/execution/VulkanSigmoid.cpp index 8d236aa0d..09c20b11b 100644 --- a/source/backend/vulkan/execution/VulkanSigmoid.cpp +++ b/source/backend/vulkan/execution/VulkanSigmoid.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanSigmoid.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/vulkan/execution/VulkanSigmoid.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { diff --git a/source/backend/vulkan/execution/VulkanSigmoid.hpp b/source/backend/vulkan/execution/VulkanSigmoid.hpp index 2737b3d4d..d8dd5c459 100644 --- a/source/backend/vulkan/execution/VulkanSigmoid.hpp +++ b/source/backend/vulkan/execution/VulkanSigmoid.hpp @@ -8,7 +8,7 @@ #ifndef MNN_VULKANSIGMOID_H #define MNN_VULKANSIGMOID_H -#include "VulkanBasicExecution.hpp" +#include "backend/vulkan/execution/VulkanBasicExecution.hpp" namespace MNN { diff --git a/source/backend/vulkan/execution/VulkanSlice.cpp b/source/backend/vulkan/execution/VulkanSlice.cpp index f49fa00ad..bb62a52f5 100644 --- a/source/backend/vulkan/execution/VulkanSlice.cpp +++ b/source/backend/vulkan/execution/VulkanSlice.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanSlice.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/vulkan/execution/VulkanSlice.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { diff --git a/source/backend/vulkan/execution/VulkanSlice.hpp b/source/backend/vulkan/execution/VulkanSlice.hpp index 88c1c6194..6dc2eae5e 100644 --- a/source/backend/vulkan/execution/VulkanSlice.hpp +++ b/source/backend/vulkan/execution/VulkanSlice.hpp @@ -11,8 +11,8 @@ #include -#include "VulkanBasicExecution.hpp" -#include "VulkanImageConverter.hpp" +#include "backend/vulkan/execution/VulkanBasicExecution.hpp" +#include "backend/vulkan/execution/VulkanImageConverter.hpp" namespace MNN { diff --git a/source/backend/vulkan/execution/VulkanSoftmax.cpp b/source/backend/vulkan/execution/VulkanSoftmax.cpp index fb1bc28cf..d5545920a 100644 --- a/source/backend/vulkan/execution/VulkanSoftmax.cpp +++ b/source/backend/vulkan/execution/VulkanSoftmax.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanSoftmax.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/vulkan/execution/VulkanSoftmax.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { diff --git a/source/backend/vulkan/execution/VulkanSoftmax.hpp b/source/backend/vulkan/execution/VulkanSoftmax.hpp index eeb3a8891..e2c762fe4 100644 --- a/source/backend/vulkan/execution/VulkanSoftmax.hpp +++ b/source/backend/vulkan/execution/VulkanSoftmax.hpp @@ -10,7 +10,7 @@ #define VulkanSoftmax_hpp #include -#include "VulkanBasicExecution.hpp" +#include "backend/vulkan/execution/VulkanBasicExecution.hpp" namespace MNN { class VulkanSoftmax : public VulkanBasicExecution { diff --git a/source/backend/vulkan/execution/VulkanSpaceToBatchND.cpp b/source/backend/vulkan/execution/VulkanSpaceToBatchND.cpp index f5c36c5d5..13ba52a1d 100644 --- a/source/backend/vulkan/execution/VulkanSpaceToBatchND.cpp +++ b/source/backend/vulkan/execution/VulkanSpaceToBatchND.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanSpaceToBatchND.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/vulkan/execution/VulkanSpaceToBatchND.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { diff --git a/source/backend/vulkan/execution/VulkanSpaceToBatchND.hpp b/source/backend/vulkan/execution/VulkanSpaceToBatchND.hpp index a5742b87f..a9191b9c1 100644 --- a/source/backend/vulkan/execution/VulkanSpaceToBatchND.hpp +++ b/source/backend/vulkan/execution/VulkanSpaceToBatchND.hpp @@ -10,7 +10,7 @@ #define VulkanSpaceToBatchND_hpp #include -#include "VulkanBasicExecution.hpp" +#include "backend/vulkan/execution/VulkanBasicExecution.hpp" namespace MNN { diff --git a/source/backend/vulkan/execution/VulkanSpatialProduct.cpp b/source/backend/vulkan/execution/VulkanSpatialProduct.cpp index f93b42930..1a0c44515 100644 --- a/source/backend/vulkan/execution/VulkanSpatialProduct.cpp +++ b/source/backend/vulkan/execution/VulkanSpatialProduct.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanSpatialProduct.hpp" -#include "Macro.h" +#include "backend/vulkan/execution/VulkanSpatialProduct.hpp" +#include "core/Macro.h" namespace MNN { struct GpuParam { diff --git a/source/backend/vulkan/execution/VulkanSpatialProduct.hpp b/source/backend/vulkan/execution/VulkanSpatialProduct.hpp index 1364d0009..2fd58b825 100644 --- a/source/backend/vulkan/execution/VulkanSpatialProduct.hpp +++ b/source/backend/vulkan/execution/VulkanSpatialProduct.hpp @@ -8,7 +8,7 @@ #ifndef VulkanSpatialProduct_hpp #define VulkanSpatialProduct_hpp -#include "VulkanBasicExecution.hpp" +#include "backend/vulkan/execution/VulkanBasicExecution.hpp" namespace MNN { class VulkanSpatialProduct : public VulkanBasicExecution { diff --git a/source/backend/vulkan/execution/VulkanSqueeze.cpp b/source/backend/vulkan/execution/VulkanSqueeze.cpp index ea671a275..645a4f540 100644 --- a/source/backend/vulkan/execution/VulkanSqueeze.cpp +++ b/source/backend/vulkan/execution/VulkanSqueeze.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanSqueeze.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/vulkan/execution/VulkanSqueeze.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { VulkanSqueeze::VulkanSqueeze(Backend* bn) : VulkanBasicExecution(bn) { } diff --git a/source/backend/vulkan/execution/VulkanSqueeze.hpp b/source/backend/vulkan/execution/VulkanSqueeze.hpp index ee4aa69f1..443a5e677 100644 --- a/source/backend/vulkan/execution/VulkanSqueeze.hpp +++ b/source/backend/vulkan/execution/VulkanSqueeze.hpp @@ -10,7 +10,7 @@ #define VulkanSqueeze_hpp #include -#include "VulkanBasicExecution.hpp" +#include "backend/vulkan/execution/VulkanBasicExecution.hpp" namespace MNN { class VulkanSqueeze : public VulkanBasicExecution { diff --git a/source/backend/vulkan/execution/VulkanTensorConvert.cpp b/source/backend/vulkan/execution/VulkanTensorConvert.cpp index 0849892eb..0eee8aad7 100644 --- a/source/backend/vulkan/execution/VulkanTensorConvert.cpp +++ b/source/backend/vulkan/execution/VulkanTensorConvert.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanTensorConvert.hpp" -#include "TensorUtils.hpp" +#include "backend/vulkan/execution/VulkanTensorConvert.hpp" +#include "core/TensorUtils.hpp" namespace MNN { diff --git a/source/backend/vulkan/execution/VulkanTensorConvert.hpp b/source/backend/vulkan/execution/VulkanTensorConvert.hpp index bfcced472..dd4d52fbd 100644 --- a/source/backend/vulkan/execution/VulkanTensorConvert.hpp +++ b/source/backend/vulkan/execution/VulkanTensorConvert.hpp @@ -10,11 +10,11 @@ #define VulkanTensorConvert_hpp #include -#include "Macro.h" -#include "Macro.h" +#include "core/Macro.h" +#include "core/Macro.h" -#include "VulkanBasicExecution.hpp" -#include "VulkanImageConverter.hpp" +#include "backend/vulkan/execution/VulkanBasicExecution.hpp" +#include "backend/vulkan/execution/VulkanImageConverter.hpp" namespace MNN { class VulkanTensorConvertVulkanBasicExecution : public VulkanBasicExecution { diff --git a/source/backend/vulkan/execution/VulkanUnary.cpp b/source/backend/vulkan/execution/VulkanUnary.cpp index 9ffc912e8..3f27f0672 100644 --- a/source/backend/vulkan/execution/VulkanUnary.cpp +++ b/source/backend/vulkan/execution/VulkanUnary.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "VulkanUnary.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "backend/vulkan/execution/VulkanUnary.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" namespace MNN { diff --git a/source/backend/vulkan/execution/VulkanUnary.hpp b/source/backend/vulkan/execution/VulkanUnary.hpp index 3c5a5a4e4..367805f81 100644 --- a/source/backend/vulkan/execution/VulkanUnary.hpp +++ b/source/backend/vulkan/execution/VulkanUnary.hpp @@ -10,7 +10,7 @@ #define VulkanUnary_hpp #include -#include "VulkanBasicExecution.hpp" +#include "backend/vulkan/execution/VulkanBasicExecution.hpp" namespace MNN { diff --git a/source/backend/vulkan/runtime/vulkan_wrapper.cpp b/source/backend/vulkan/runtime/vulkan_wrapper.cpp index 3121f64f7..e6fd998fe 100644 --- a/source/backend/vulkan/runtime/vulkan_wrapper.cpp +++ b/source/backend/vulkan/runtime/vulkan_wrapper.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. // This file is generated. -#include "vulkan_wrapper.h" +#include "backend/vulkan/vulkan/vulkan_wrapper.h" #include int InitVulkan(void) { diff --git a/source/backend/vulkan/compiler/AllShader.h b/source/backend/vulkan/shaders/AllShader.h similarity index 99% rename from source/backend/vulkan/compiler/AllShader.h rename to source/backend/vulkan/shaders/AllShader.h index 4ca3908e4..99b5c07c9 100644 --- a/source/backend/vulkan/compiler/AllShader.h +++ b/source/backend/vulkan/shaders/AllShader.h @@ -162,4 +162,4 @@ extern const unsigned char glsl_nhwcTonc4hw4_comp[]; extern unsigned int glsl_nhwcTonc4hw4_comp_len; extern const unsigned char glsl_softmaxChannel_comp[]; extern unsigned int glsl_softmaxChannel_comp_len; -#endif \ No newline at end of file +#endif diff --git a/source/backend/vulkan/include/VulkanShaderMap.hpp b/source/backend/vulkan/shaders/VulkanShaderMap.hpp similarity index 100% rename from source/backend/vulkan/include/VulkanShaderMap.hpp rename to source/backend/vulkan/shaders/VulkanShaderMap.hpp diff --git a/source/backend/vulkan/include/vulkan/vk_platform.h b/source/backend/vulkan/vulkan/vk_platform.h similarity index 100% rename from source/backend/vulkan/include/vulkan/vk_platform.h rename to source/backend/vulkan/vulkan/vk_platform.h diff --git a/source/backend/vulkan/include/vulkan/vulkan.h b/source/backend/vulkan/vulkan/vulkan.h similarity index 100% rename from source/backend/vulkan/include/vulkan/vulkan.h rename to source/backend/vulkan/vulkan/vulkan.h diff --git a/source/backend/vulkan/include/vulkan/vulkan_android.h b/source/backend/vulkan/vulkan/vulkan_android.h similarity index 100% rename from source/backend/vulkan/include/vulkan/vulkan_android.h rename to source/backend/vulkan/vulkan/vulkan_android.h diff --git a/source/backend/vulkan/include/vulkan/vulkan_core.h b/source/backend/vulkan/vulkan/vulkan_core.h similarity index 100% rename from source/backend/vulkan/include/vulkan/vulkan_core.h rename to source/backend/vulkan/vulkan/vulkan_core.h diff --git a/source/backend/vulkan/runtime/vulkan_wrapper.h b/source/backend/vulkan/vulkan/vulkan_wrapper.h similarity index 99% rename from source/backend/vulkan/runtime/vulkan_wrapper.h rename to source/backend/vulkan/vulkan/vulkan_wrapper.h index 74f2e4919..764dfbdcf 100644 --- a/source/backend/vulkan/runtime/vulkan_wrapper.h +++ b/source/backend/vulkan/vulkan/vulkan_wrapper.h @@ -17,7 +17,7 @@ #define VULKAN_WRAPPER_H #define VK_NO_PROTOTYPES 1 -#include "Macro.h" +#include "core/Macro.h" #include "vulkan/vulkan.h" // Vulkan call wrapper #define CALL_VK(func) \ diff --git a/source/core/AutoTime.cpp b/source/core/AutoTime.cpp index 4b7424f12..b4f1be569 100644 --- a/source/core/AutoTime.cpp +++ b/source/core/AutoTime.cpp @@ -13,41 +13,59 @@ #else #include #endif -#include "AutoTime.hpp" -#include "Macro.h" +#include +#include "core/Macro.h" namespace MNN { -AutoTime::AutoTime(int line, const char* func) { + +Timer::Timer() { + reset(); +} + +Timer::~Timer() { + // do nothing +} + +void Timer::reset() { + #if defined(_MSC_VER) + LARGE_INTEGER time, freq; + QueryPerformanceFrequency(&freq); + QueryPerformanceCounter(&time); + uint64_t sec = time.QuadPart / freq.QuadPart; + uint64_t usec = (time.QuadPart % freq.QuadPart) * 1000000 / freq.QuadPart; + mLastResetTime = sec * 1000000 + usec; + #else + struct timeval Current; + gettimeofday(&Current, nullptr); + mLastResetTime = Current.tv_sec * 1000000 + Current.tv_usec; + #endif +} + +uint64_t Timer::durationInUs() { + #if defined(_MSC_VER) + LARGE_INTEGER time, freq; + QueryPerformanceCounter(&time); + QueryPerformanceFrequency(&freq); + uint64_t sec = time.QuadPart / freq.QuadPart; + uint64_t usec = (time.QuadPart % freq.QuadPart) * 1000000 / freq.QuadPart; + auto lastTime = sec * 1000000 + usec; + #else + struct timeval Current; + gettimeofday(&Current, nullptr); + auto lastTime = Current.tv_sec * 1000000 + Current.tv_usec; + #endif + + return lastTime - mLastResetTime; +} + +AutoTime::AutoTime(int line, const char* func) : Timer() { mName = ::strdup(func); mLine = line; -#if defined(_MSC_VER) - LARGE_INTEGER time, freq; - QueryPerformanceFrequency(&freq); - QueryPerformanceCounter(&time); - uint64_t sec = time.QuadPart / freq.QuadPart; - uint64_t usec = (time.QuadPart % freq.QuadPart) * 1000000 / freq.QuadPart; - mCurrentTime = sec * 1000000 + usec; -#else - struct timeval Current; - gettimeofday(&Current, nullptr); - mCurrentTime = Current.tv_sec * 1000000 + Current.tv_usec; -#endif } AutoTime::~AutoTime() { -#if defined(_MSC_VER) - LARGE_INTEGER time, freq; - QueryPerformanceCounter(&time); - QueryPerformanceFrequency(&freq); - uint64_t sec = time.QuadPart / freq.QuadPart; - uint64_t usec = (time.QuadPart % freq.QuadPart) * 1000000 / freq.QuadPart; - auto lastTime = sec * 1000000 + usec; -#else - struct timeval Current; - gettimeofday(&Current, nullptr); - auto lastTime = Current.tv_sec * 1000000 + Current.tv_usec; -#endif - - MNN_PRINT("%s, %d, cost time: %f ms\n", mName, mLine, (float)(lastTime - mCurrentTime) / 1000.0f); + auto timeInUs = durationInUs(); + MNN_PRINT("%s, %d, cost time: %f ms\n", mName, mLine, (float)timeInUs / 1000.0f); free(mName); } + } // namespace MNN diff --git a/source/core/Backend.cpp b/source/core/Backend.cpp index d04d31b7b..2ae579c0c 100644 --- a/source/core/Backend.cpp +++ b/source/core/Backend.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Backend.hpp" #include #include #include "MNN_generated.h" -#include "Macro.h" +#include "core/Macro.h" +#include "core/Backend.hpp" namespace MNN { diff --git a/source/core/Backend.hpp b/source/core/Backend.hpp index fb5da9e66..fc330c02c 100644 --- a/source/core/Backend.hpp +++ b/source/core/Backend.hpp @@ -13,10 +13,10 @@ #include #include #include -#include "ErrorCode.hpp" -#include "MNNForwardType.h" +#include +#include #include "NonCopyable.hpp" -#include "Tensor.hpp" +#include namespace MNN { diff --git a/source/core/BackendFactory.cpp b/source/core/BackendFactory.cpp index a910442c1..ddcf9e821 100644 --- a/source/core/BackendFactory.cpp +++ b/source/core/BackendFactory.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "BackendFactory.hpp" -#include "CPUBackend.hpp" -#include "Macro.h" +#include "core/BackendFactory.hpp" +//#include #include diff --git a/source/core/Execution.hpp b/source/core/Execution.hpp index 43133d570..c4f65a724 100644 --- a/source/core/Execution.hpp +++ b/source/core/Execution.hpp @@ -11,10 +11,10 @@ #include #include -#include "ErrorCode.hpp" -#include "MNNForwardType.h" +#include +#include #include "NonCopyable.hpp" -#include "Tensor.hpp" +#include namespace MNN { class Backend; diff --git a/source/core/FileLoader.cpp b/source/core/FileLoader.cpp index 874e36e5d..8898d8a7f 100644 --- a/source/core/FileLoader.cpp +++ b/source/core/FileLoader.cpp @@ -6,7 +6,7 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "FileLoader.hpp" +#include "core/FileLoader.hpp" namespace MNN { FileLoader::FileLoader(const char* file) { mFile = fopen(file, "rb"); diff --git a/source/core/FileLoader.hpp b/source/core/FileLoader.hpp index d53b90732..ff49814f6 100644 --- a/source/core/FileLoader.hpp +++ b/source/core/FileLoader.hpp @@ -7,7 +7,7 @@ // #include -#include "AutoStorage.h" +#include "core/AutoStorage.h" namespace MNN { class MNN_PUBLIC FileLoader { public: diff --git a/source/core/Interpreter.cpp b/source/core/Interpreter.cpp index 6ce73d3ef..01006e449 100644 --- a/source/core/Interpreter.cpp +++ b/source/core/Interpreter.cpp @@ -6,15 +6,15 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" #include #include #include #include -#include "AutoStorage.h" #include "MNN_generated.h" -#include "Session.hpp" -#include "FileLoader.hpp" +#include "core/AutoStorage.h" +#include +#include "core/Session.hpp" +#include "core/FileLoader.hpp" namespace MNN { struct Content { diff --git a/source/core/MNNMemoryUtils.c b/source/core/MNNMemoryUtils.cpp similarity index 82% rename from source/core/MNNMemoryUtils.c rename to source/core/MNNMemoryUtils.cpp index b4541c48e..96f7dce3b 100644 --- a/source/core/MNNMemoryUtils.c +++ b/source/core/MNNMemoryUtils.cpp @@ -6,16 +6,16 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "MNNMemoryUtils.h" #include #include -#include "Macro.h" +#include "core/MNNMemoryUtils.h" +#include "core/Macro.h" static inline void **alignPointer(void **ptr, size_t alignment) { return (void **)((intptr_t)((unsigned char *)ptr + alignment - 1) & -alignment); } -void *MNNMemoryAllocAlign(size_t size, size_t alignment) { +extern "C" void *MNNMemoryAllocAlign(size_t size, size_t alignment) { MNN_ASSERT(size > 0); #ifdef MNN_DEBUG_MEMORY @@ -33,7 +33,7 @@ void *MNNMemoryAllocAlign(size_t size, size_t alignment) { #endif } -void *MNNMemoryCallocAlign(size_t size, size_t alignment) { +extern "C" void *MNNMemoryCallocAlign(size_t size, size_t alignment) { MNN_ASSERT(size > 0); #ifdef MNN_DEBUG_MEMORY @@ -50,7 +50,7 @@ void *MNNMemoryCallocAlign(size_t size, size_t alignment) { #endif } -void MNNMemoryFreeAlign(void *aligned) { +extern "C" void MNNMemoryFreeAlign(void *aligned) { #ifdef MNN_DEBUG_MEMORY free(aligned); #else diff --git a/source/core/MNNMemoryUtils.h b/source/core/MNNMemoryUtils.h index 4edbc3293..bd48bcc5e 100644 --- a/source/core/MNNMemoryUtils.h +++ b/source/core/MNNMemoryUtils.h @@ -10,7 +10,7 @@ #define MNNMemoryUtils_h #include -#include "Macro.h" +#include "core/Macro.h" #ifdef __cplusplus extern "C" { diff --git a/source/core/Macro.h b/source/core/Macro.h index af6f2abc7..fba6a478a 100644 --- a/source/core/Macro.h +++ b/source/core/Macro.h @@ -8,7 +8,7 @@ #ifndef macro_h #define macro_h -#include "MNNDefine.h" +#include #define ALIMIN(x, y) ((x) < (y) ? (x) : (y)) #define ALIMAX(x, y) ((x) > (y) ? (x) : (y)) diff --git a/source/core/Pipeline.cpp b/source/core/Pipeline.cpp index fad7680d6..ba81c9b52 100644 --- a/source/core/Pipeline.cpp +++ b/source/core/Pipeline.cpp @@ -6,14 +6,14 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Pipeline.hpp" -#include "Backend.hpp" -#include "Macro.h" -#include "SizeComputer.hpp" -#include "TensorUtils.hpp" -#include "WrapExecution.hpp" +#include "core/Pipeline.hpp" +#include "core/Backend.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" +#include "core/TensorUtils.hpp" +#include "core/WrapExecution.hpp" //#define MNN_OPEN_TIME_TRACE -#include "AutoTime.hpp" +#include //#define MNN_DEBUG_TENSOR_SIZE namespace MNN { OperatorInfo::OperatorInfo() { @@ -38,7 +38,7 @@ float OperatorInfo::flops() const { static Backend::StorageType _getTensorStorageType(const Tensor* tensor) { auto des = TensorUtils::getDescribe(tensor); - if (des->isConst || des->isInput) { + if (des->isConst || des->isInput || des->isTrainableParameter) { return Backend::DYNAMIC_SEPERATE; } if (des->handleType != Tensor::HANDLE_NONE) { @@ -52,7 +52,7 @@ static Backend::StorageType _getTensorReleaseStorageType(const Tensor* tensor) { if (des->handleType != Tensor::HANDLE_NONE) { return Backend::DYNAMIC_SEPERATE; } - if (des->isConst) { + if (des->isConst || des->isTrainableParameter) { return Backend::DYNAMIC_SEPERATE; } return Backend::DYNAMIC; @@ -222,6 +222,12 @@ ErrorCode Pipeline::Unit::prepare(Backend* bn, Backend* cpuBn) { break; } } + if (mType == OpType_TrainableParam) { + for (auto t : mOutputs) { + TensorUtils::getDescribe(t)->isTrainableParameter = true; + } + mConst = false; + } if (mConst) { for (auto t : mOutputs) { @@ -309,7 +315,8 @@ ErrorCode Pipeline::prepare() { ErrorCode Pipeline::execute() { mBackend->onExecuteBegin(); - for (auto& u : mUnits) { + for (int i=0; iexecute(); if (code != NO_ERROR) { mBackend->onExecuteEnd(); diff --git a/source/core/Pipeline.hpp b/source/core/Pipeline.hpp index f36d8a6bc..f4045fe46 100644 --- a/source/core/Pipeline.hpp +++ b/source/core/Pipeline.hpp @@ -9,7 +9,7 @@ #ifndef Pipeline_hpp #define Pipeline_hpp -#include "Execution.hpp" +#include "core/Execution.hpp" #include "Schedule.hpp" #include "MNN_generated.h" diff --git a/source/core/Schedule.cpp b/source/core/Schedule.cpp index 702d7fa0a..0eee0371b 100644 --- a/source/core/Schedule.cpp +++ b/source/core/Schedule.cpp @@ -6,17 +6,17 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Schedule.hpp" +#include "core/Schedule.hpp" #include #include #include #include -#include "DirectedAcyclicGraph.hpp" -#include "Macro.h" -#include "TensorUtils.hpp" -#include "SizeComputer.hpp" +#include "core/DirectedAcyclicGraph.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" +#include "core/SizeComputer.hpp" //#define MNN_OPEN_TIME_TRACE -#include "AutoTime.hpp" +#include //#define MNN_AUTO_CHECK_COST namespace MNN { diff --git a/source/core/Schedule.hpp b/source/core/Schedule.hpp index c67338c15..794c5fb7e 100644 --- a/source/core/Schedule.hpp +++ b/source/core/Schedule.hpp @@ -13,8 +13,8 @@ #include #include #include -#include "Backend.hpp" -#include "Interpreter.hpp" +#include "core/Backend.hpp" +#include namespace MNN { diff --git a/source/core/Session.cpp b/source/core/Session.cpp index 0937096a8..67cd8ef83 100644 --- a/source/core/Session.cpp +++ b/source/core/Session.cpp @@ -6,18 +6,16 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Session.hpp" +#include "core/Session.hpp" #include #include #include -#include "AutoStorage.h" -#include "AutoTime.hpp" -#include "BackendFactory.hpp" -#include "CPUBackend.hpp" -#include "CommonOptFunction.h" +#include "core/AutoStorage.h" +#include +#include "core/BackendFactory.hpp" #include "MNN_generated.h" -#include "TensorUtils.hpp" -#include "WrapExecution.hpp" +#include "core/TensorUtils.hpp" +#include "core/WrapExecution.hpp" using namespace std; @@ -187,7 +185,10 @@ ErrorCode Session::updateToModel(Net* net) const { int opSize = net->oplists()->size(); for (int i = 0; i < opSize; ++i) { auto op = net->oplists()->GetAs(i); - if (op->type() != OpType_Const) { + if (net->usage() == Usage_INFERENCE && op->type() != OpType_Const) { + continue; + } + if (net->usage() == Usage_TRAIN && op->type() != OpType_TrainableParam) { continue; } if (!op->outputIndexes() || op->outputIndexes()->size() != 1) { @@ -198,8 +199,15 @@ ErrorCode Session::updateToModel(Net* net) const { if (blob->dataType() != DataType_DT_FLOAT) { continue; } - ::memcpy((void*)blob->float32s()->data(), mTensors[index].second->host(), - mTensors[index].second->size()); + std::shared_ptr tensor = mTensors[index].second; + if (tensor->host() == nullptr && tensor->deviceId() != 0) { + tensor.reset(Tensor::createHostTensorFromDevice(tensor.get(), true)); + if (tensor.get() == nullptr) { + MNN_ERROR("failed to copy trained param from device to host\n"); + return INVALID_VALUE; + } + } + ::memcpy((void*)blob->float32s()->data(), tensor->host(), tensor->size()); } return NO_ERROR; diff --git a/source/core/Session.hpp b/source/core/Session.hpp index fd1d89056..65211dca2 100644 --- a/source/core/Session.hpp +++ b/source/core/Session.hpp @@ -12,12 +12,12 @@ #include #include #include -#include "Backend.hpp" -#include "Macro.h" +#include "core/Backend.hpp" +#include "core/Macro.h" #include "Pipeline.hpp" #include "Schedule.hpp" #include "SizeComputer.hpp" -#include "Tensor.hpp" +#include namespace MNN { struct Net; @@ -124,7 +124,7 @@ class MNN_PUBLIC Session { ErrorCode updateToModel(Net* net) const; protected: - const std::vector>& getPipelines() const { + const std::vector>& getPipelines() const { return this->mPipelines; } @@ -134,8 +134,8 @@ class MNN_PUBLIC Session { Backend* _getDefaultBackend(); private: - std::map> mBackends; - std::vector> mPipelines; + std::map> mBackends; + std::vector> mPipelines; std::vector>> mTensors; std::map mInputs; std::map mOutputs; diff --git a/source/core/SizeComputer.cpp b/source/core/SizeComputer.cpp index 6ccd4155d..a41468239 100644 --- a/source/core/SizeComputer.cpp +++ b/source/core/SizeComputer.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "SizeComputer.hpp" +#include "core/SizeComputer.hpp" #include -#include "Macro.h" -#include "TensorUtils.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" #include namespace MNN { #ifdef MNN_CODEGEN_REGISTER @@ -83,7 +83,11 @@ float SizeComputer::computeFlops(const MNN::Op* op, const std::vector& if (nullptr != computer) { return computer->onComputeFlops(op, inputs, outputs); } - return (float)outputs[0]->elementSize() / 1024.0f / 1024.0f; + auto sumFlops = 0.0f; + for (auto output : outputs) { + sumFlops += (float)output->elementSize() / 1024.0f / 1024.0f; + } + return sumFlops; } bool SizeComputer::computeOutputSize(const MNN::Op* op, const std::vector& inputs, @@ -112,7 +116,7 @@ bool SizeComputer::computeOutputSize(const MNN::Op* op, const std::vectortype(), op->name()->c_str()); + MNN_PRINT("Can't compute size for %d, name=%s\n", op->type(), op->name() ? op->name()->c_str() : ""); return false; } diff --git a/source/core/SizeComputer.hpp b/source/core/SizeComputer.hpp index 0c06275b3..8b924860e 100644 --- a/source/core/SizeComputer.hpp +++ b/source/core/SizeComputer.hpp @@ -12,10 +12,10 @@ #include #include #include -#include "Execution.hpp" +#include "core/Execution.hpp" #include "MNN_generated.h" -#include "Tensor.hpp" -#include "TensorUtils.hpp" +#include +#include "core/TensorUtils.hpp" #define FLOPS_M 1000000.0f namespace MNN { diff --git a/source/core/Tensor.cpp b/source/core/Tensor.cpp index 74b3004a8..2910f9221 100644 --- a/source/core/Tensor.cpp +++ b/source/core/Tensor.cpp @@ -6,14 +6,14 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Tensor.hpp" +#include #include #include -#include "Backend.hpp" -#include "MNNMemoryUtils.h" +#include "core/Backend.hpp" +#include "core/MNNMemoryUtils.h" #include "MNN_generated.h" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" #define MAX_TENSOR_DIM 6 diff --git a/source/core/TensorUtils.cpp b/source/core/TensorUtils.cpp index 6be61c6b2..361f62558 100644 --- a/source/core/TensorUtils.cpp +++ b/source/core/TensorUtils.cpp @@ -6,14 +6,14 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "TensorUtils.hpp" +#include "core/TensorUtils.hpp" #include #include #include #include #include -#include "Backend.hpp" -#include "Macro.h" +#include "core/Backend.hpp" +#include "core/Macro.h" namespace MNN { Tensor::InsideDescribe* TensorUtils::getDescribe(const Tensor* tensor) { diff --git a/source/core/TensorUtils.hpp b/source/core/TensorUtils.hpp index 4fa4221a0..28f7d2f93 100644 --- a/source/core/TensorUtils.hpp +++ b/source/core/TensorUtils.hpp @@ -9,7 +9,7 @@ #ifndef TensorUtils_hpp #define TensorUtils_hpp -#include "Tensor.hpp" +#include #include "Tensor_generated.h" namespace MNN { @@ -30,6 +30,8 @@ struct Tensor::InsideDescribe { /** for HOST tensor only. host memory is owned by tensor or not */ bool ownHost = false; + /** Whether the tensor is a trainable parameter. Trainable parameter should be stored in a different area. */ + bool isTrainableParameter = false; /** for DEVICE tensor only. const data may be stored in different area on device. */ bool isConst = false; /** for DEVICE tensor only. backend used to manage tensor's device memory. */ diff --git a/source/core/WrapExecution.cpp b/source/core/WrapExecution.cpp index 1e125f56b..0d7ad1d45 100644 --- a/source/core/WrapExecution.cpp +++ b/source/core/WrapExecution.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "WrapExecution.hpp" -#include "TensorUtils.hpp" +#include "core/WrapExecution.hpp" +#include "core/TensorUtils.hpp" namespace MNN { diff --git a/source/core/WrapExecution.hpp b/source/core/WrapExecution.hpp index e8a4938ed..778f6829c 100644 --- a/source/core/WrapExecution.hpp +++ b/source/core/WrapExecution.hpp @@ -11,9 +11,9 @@ #include #include -#include "Backend.hpp" -#include "Execution.hpp" -#include "Macro.h" +#include "core/Backend.hpp" +#include "core/Execution.hpp" +#include "core/Macro.h" namespace MNN { diff --git a/source/cv/ImageBlitter.cpp b/source/cv/ImageBlitter.cpp index 72e3ae383..731fbb323 100644 --- a/source/cv/ImageBlitter.cpp +++ b/source/cv/ImageBlitter.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "ImageBlitter.hpp" +#include "cv/ImageBlitter.hpp" #include #include -#include "Macro.h" +#include "core/Macro.h" #ifdef MNN_USE_NEON #include #endif @@ -395,6 +395,9 @@ void MNNNV21ToBGR(const unsigned char* source, unsigned char* dest, size_t count if (source == src && dest == dst) return func ImageBlitter::BLITTER ImageBlitter::choose(ImageFormat source, ImageFormat dest) { + if (source == YUV_NV12) { + source = YUV_NV21; + } CHECKFORMAT(RGBA, RGBA, _copyC4); CHECKFORMAT(RGBA, BGRA, _rgba2bgra); CHECKFORMAT(RGBA, BGR, _rgba2bgr); diff --git a/source/cv/ImageBlitter.hpp b/source/cv/ImageBlitter.hpp index ca37916cb..a52ff4de2 100644 --- a/source/cv/ImageBlitter.hpp +++ b/source/cv/ImageBlitter.hpp @@ -10,7 +10,7 @@ #define ImageBlitter_hpp #include -#include "ImageProcess.hpp" +#include namespace MNN { namespace CV { class ImageBlitter { diff --git a/source/cv/ImageFloatBlitter.cpp b/source/cv/ImageFloatBlitter.cpp index d0e31ed68..80f8b69d2 100644 --- a/source/cv/ImageFloatBlitter.cpp +++ b/source/cv/ImageFloatBlitter.cpp @@ -1,4 +1,4 @@ -// + // // ImageFloatBlitter.cpp // MNN // @@ -6,7 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "ImageFloatBlitter.hpp" +#include "cv/ImageFloatBlitter.hpp" +#include "Tensor_generated.h" extern "C" { void MNNBlitC1ToFloatRGBA(const unsigned char* source, float* dest, const float* mean, const float* normal, size_t count); @@ -15,7 +16,7 @@ void MNNBlitC3ToFloatRGBA(const unsigned char* source, float* dest, const float* void MNNBlitC4ToFloatC4(const unsigned char* source, float* dest, const float* mean, const float* normal, size_t count); } -#include "Macro.h" +#include "core/Macro.h" #ifdef MNN_USE_NEON #include #endif diff --git a/source/cv/ImageFloatBlitter.hpp b/source/cv/ImageFloatBlitter.hpp index a15d71315..3e9014209 100644 --- a/source/cv/ImageFloatBlitter.hpp +++ b/source/cv/ImageFloatBlitter.hpp @@ -9,8 +9,7 @@ #ifndef ImageFloatBlitter_hpp #define ImageFloatBlitter_hpp -#include "ImageProcess.hpp" -#include "Tensor_generated.h" +#include namespace MNN { namespace CV { class ImageFloatBlitter { diff --git a/source/cv/ImageProcess.cpp b/source/cv/ImageProcess.cpp index 21417e5b6..36b8b68a3 100644 --- a/source/cv/ImageProcess.cpp +++ b/source/cv/ImageProcess.cpp @@ -6,17 +6,16 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "ImageProcess.hpp" #include #include -#include "AutoStorage.h" -#include "Macro.h" -#include "TensorUtils.hpp" +#include "core/AutoStorage.h" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" #define MNN_OPEN_TIME_TRACE -#include "AutoTime.hpp" -#include "ImageBlitter.hpp" -#include "ImageFloatBlitter.hpp" -#include "ImageSampler.hpp" +#include +#include "cv/ImageBlitter.hpp" +#include "cv/ImageFloatBlitter.hpp" +#include "cv/ImageSampler.hpp" #define CACHE_SIZE 128 namespace MNN { namespace CV { diff --git a/source/cv/ImageSampler.cpp b/source/cv/ImageSampler.cpp index 597d4f1fd..e55351d6a 100644 --- a/source/cv/ImageSampler.cpp +++ b/source/cv/ImageSampler.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "ImageSampler.hpp" +#include "cv/ImageSampler.hpp" #include -#include "Macro.h" +#include "core/Macro.h" #ifdef MNN_USE_NEON #include #endif @@ -183,18 +183,60 @@ static void MNNSamplerNV21Nearest(const unsigned char* source, unsigned char* de auto dstY = dest + sta; auto dstUV = dest + (capacity) + (sta / 2) * 2; - - MNNSamplerC1Nearest(srcY, dstY, points, 0, count, capacity, iw, ih, iw); + auto stride = yStride; + if (yStride == 0) { + stride = iw; + } + MNNSamplerC1Nearest(srcY, dstY, points, 0, count, capacity, iw, ih, stride); Point uvPoints[2]; uvPoints[0].fX = (points[0].fX - 0.01f) / 2.0f; uvPoints[0].fY = (points[0].fY - 0.01f) / 2.0f; uvPoints[1].fX = points[1].fX; uvPoints[1].fY = points[1].fY; + if (yStride == 0) { + stride = ((iw + 1) / 2) * 2; + } + MNNSamplerNearest(srcUV, dstUV, uvPoints, 0, (count + 1) / 2, (iw + 1) / 2, (ih + 1) / 2, stride, 2); +} - MNNSamplerNearest(srcUV, dstUV, uvPoints, 0, (count + 1) / 2, (iw + 1) / 2, (ih + 1) / 2, ((iw + 1) / 2) * 2, 2); +static void _swapUV(const unsigned char* source, unsigned char* dest, size_t countC2) { + int sta = 0; +#ifdef MNN_USE_NEON + int countC2C16 = (int)countC2 / 16; + sta = countC2C16 * 16; + for (int i=0; i namespace MNN { namespace CV { class ImageSampler { diff --git a/source/cv/Matrix_CV.cpp b/source/cv/Matrix_CV.cpp index 3e1beef0f..62e0b6125 100644 --- a/source/cv/Matrix_CV.cpp +++ b/source/cv/Matrix_CV.cpp @@ -9,8 +9,8 @@ #include #include #include -#include "Matrix.h" -#include "SkNx.h" +#include +#include "cv/SkNx.h" namespace MNN { namespace CV { diff --git a/source/cv/SkNx.h b/source/cv/SkNx.h index c962907b0..808bb4872 100644 --- a/source/cv/SkNx.h +++ b/source/cv/SkNx.h @@ -16,7 +16,7 @@ #include #include #include -#include "Macro.h" +#include "core/Macro.h" // Every single SkNx method wants to be fully inlined. (We know better than MSVC). #define AI inline diff --git a/source/math/Matrix.cpp b/source/math/Matrix.cpp index 4f905d88f..153aa319b 100644 --- a/source/math/Matrix.cpp +++ b/source/math/Matrix.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Matrix.hpp" -#include "MNNMemoryUtils.h" -#include "Macro.h" -#include "TensorUtils.hpp" -#include "math.h" +#include "math/Matrix.hpp" +#include "core/MNNMemoryUtils.h" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" +#include #ifdef MNN_USE_NEON #include @@ -146,18 +146,18 @@ void Matrix::add(Tensor* C, const Tensor* A, const Tensor* B) { float32x4_t b1 = vld1q_f32(b + i + 4); float32x4_t b2 = vld1q_f32(b + i + 8); float32x4_t b3 = vld1q_f32(b + i + 12); - + float32x4_t sum0 = vaddq_f32(a0, b0); float32x4_t sum1 = vaddq_f32(a1, b1); float32x4_t sum2 = vaddq_f32(a2, b2); float32x4_t sum3 = vaddq_f32(a3, b3); - + vst1q_f32(c + i, sum0); vst1q_f32(c + i + 4, sum1); vst1q_f32(c + i + 8, sum2); vst1q_f32(c + i + 12, sum3); } - + for (; i <= size - 4; i += 4) { float32x4_t aa = vld1q_f32(a + i); float32x4_t bb = vld1q_f32(b + i); diff --git a/source/math/Matrix.hpp b/source/math/Matrix.hpp index f777f8cdb..d8ae6f675 100644 --- a/source/math/Matrix.hpp +++ b/source/math/Matrix.hpp @@ -11,7 +11,7 @@ #include #include -#include "Tensor.hpp" +#include namespace MNN { namespace Math { class MNN_PUBLIC Matrix { diff --git a/source/math/Vec4.hpp b/source/math/Vec4.hpp index 217e03734..341300a1f 100644 --- a/source/math/Vec4.hpp +++ b/source/math/Vec4.hpp @@ -8,7 +8,7 @@ #ifndef Vec4_hpp #define Vec4_hpp -#include "Macro.h" +#include "core/Macro.h" #include // supply std::max and std::min #ifdef MNN_USE_NEON #include diff --git a/source/math/WingoradGenerater.cpp b/source/math/WingoradGenerater.cpp index 22474e065..a13f7662b 100644 --- a/source/math/WingoradGenerater.cpp +++ b/source/math/WingoradGenerater.cpp @@ -8,8 +8,8 @@ #include #include -#include "WingoradGenerater.hpp" -#include "Macro.h" +#include "math/WingoradGenerater.hpp" +#include "core/Macro.h" namespace MNN { namespace Math { diff --git a/source/math/WingoradGenerater.hpp b/source/math/WingoradGenerater.hpp index d012c0063..da0ac0ae9 100644 --- a/source/math/WingoradGenerater.hpp +++ b/source/math/WingoradGenerater.hpp @@ -9,7 +9,7 @@ #ifndef WingoradGenerater_hpp #define WingoradGenerater_hpp #include -#include "Matrix.hpp" +#include "math/Matrix.hpp" namespace MNN { namespace Math { class MNN_PUBLIC WinogradGenerater { diff --git a/source/shape/ShapeArgMax.cpp b/source/shape/ShapeArgMax.cpp index bfc991b1c..a8847e5d5 100644 --- a/source/shape/ShapeArgMax.cpp +++ b/source/shape/ShapeArgMax.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" namespace MNN { @@ -30,12 +30,9 @@ class ArgMaxComputer : public SizeComputer { const auto inputDimensionFormat = TensorUtils::getDescribe(inputs[0])->dimensionFormat; - // Argmax can accept intput dimension-format:NC4HW4/NHWC NOW! - MNN_ASSERT(inputDimensionFormat == MNN_DATA_FORMAT_NHWC || inputDimensionFormat == MNN_DATA_FORMAT_NC4HW4); - TensorUtils::getDescribe(outputs[0])->dimensionFormat = inputDimensionFormat; - if (inputDimensionFormat == MNN_DATA_FORMAT_NHWC) { + if (inputDimensionFormat != MNN_DATA_FORMAT_NC4HW4) { int axis = argMax->axis(); if(axis < 0){ axis = input.dimensions + axis; @@ -74,5 +71,6 @@ class ArgMaxComputer : public SizeComputer { }; REGISTER_SHAPE(ArgMaxComputer, OpType_ArgMax); +REGISTER_SHAPE(ArgMaxComputer, OpType_ArgMin); } // namespace MNN diff --git a/source/shape/ShapeAsString.cpp b/source/shape/ShapeAsString.cpp index 328938396..494ba1bd6 100644 --- a/source/shape/ShapeAsString.cpp +++ b/source/shape/ShapeAsString.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" namespace MNN { class AsStringComputer : public SizeComputer { diff --git a/source/shape/ShapeBatchMatMul.cpp b/source/shape/ShapeBatchMatMul.cpp index 415791a97..320473c2c 100644 --- a/source/shape/ShapeBatchMatMul.cpp +++ b/source/shape/ShapeBatchMatMul.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" -#include "TensorUtils.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" +#include "core/TensorUtils.hpp" namespace MNN { diff --git a/source/shape/ShapeBatchToSpaceND.cpp b/source/shape/ShapeBatchToSpaceND.cpp index 6a2c54e6a..154704514 100644 --- a/source/shape/ShapeBatchToSpaceND.cpp +++ b/source/shape/ShapeBatchToSpaceND.cpp @@ -6,7 +6,7 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "SizeComputer.hpp" +#include "core/SizeComputer.hpp" namespace MNN { class BatchToSpaceNDSizeComputer : public SizeComputer { public: diff --git a/source/shape/ShapeBinaryOp.cpp b/source/shape/ShapeBinaryOp.cpp index 31fc9d0d0..1ea04404a 100644 --- a/source/shape/ShapeBinaryOp.cpp +++ b/source/shape/ShapeBinaryOp.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" namespace MNN { class BinaryOpComputer : public SizeComputer { public: @@ -74,7 +74,7 @@ class BinaryOpComputer : public SizeComputer { input1Length = input1->length(i-diffDimension); } if (input0Length != input1Length && input1Length != 1 && input0Length != 1) { - MNN_PRINT("%d, %d\n", input1Length, input0Length); + MNN_PRINT("Don't support broadcast for binaryOp, i0=%d, i1=%d\n", input1Length, input0Length); return false; } buffer.dim[i].extent = std::max(input0Length, input1Length); diff --git a/source/shape/ShapeBroadcastTo.cpp b/source/shape/ShapeBroadcastTo.cpp new file mode 100644 index 000000000..9bbb26499 --- /dev/null +++ b/source/shape/ShapeBroadcastTo.cpp @@ -0,0 +1,46 @@ +// +// ShapeBroadcastTo.cpp +// MNN +// +// Created by MNN on 2019/12/2. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "core/Macro.h" +#include "core/SizeComputer.hpp" +#include "core/TensorUtils.hpp" + +namespace MNN { + +class ShapeBroadcastTo : public SizeComputer { + virtual bool onComputeSize(const MNN::Op* op, const std::vector& inputs, + const std::vector& outputs) const override { + MNN_ASSERT(inputs.size() == 2); + MNN_ASSERT(outputs.size() == 1); + + auto input = inputs[0]; + auto shape = inputs[1]; + auto output = outputs[0]; + + const int dimension = input->dimensions(); + MNN_CHECK(shape->elementSize() == dimension, "input dimension does not match given shape!"); + + output->buffer().dimensions = dimension; + const int* shapeData = shape->host(); + for (int i = 0; i < dimension; ++i) { + const int dim = input->length(i); + if (shapeData[i] != dim) { + MNN_CHECK(dim == 1, "for each dimension pair they are either equal or one of them is one."); + } + output->setLength(i, shapeData[i]); + } + output->buffer().type = input->buffer().type; + TensorUtils::getDescribe(output)->dimensionFormat = TensorUtils::getDescribe(input)->dimensionFormat; + + return true; + } +}; + +REGISTER_SHAPE_INPUTS(ShapeBroadcastTo, OpType_BroadcastTo, {1}); + +} // namespace MNN diff --git a/source/shape/ShapeCast.cpp b/source/shape/ShapeCast.cpp index d418fdbb2..ba7a9b1f9 100644 --- a/source/shape/ShapeCast.cpp +++ b/source/shape/ShapeCast.cpp @@ -6,7 +6,7 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "SizeComputer.hpp" +#include "core/SizeComputer.hpp" namespace MNN { diff --git a/source/shape/ShapeConcat.cpp b/source/shape/ShapeConcat.cpp index 3334c0c9f..e7e8f6d77 100644 --- a/source/shape/ShapeConcat.cpp +++ b/source/shape/ShapeConcat.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" namespace MNN { class ConcatSizeComputer : public SizeComputer { @@ -50,18 +50,19 @@ class ConcatSizeComputer : public SizeComputer { continue; } sum += t->buffer().dim[axis].extent; + ob.type = t->buffer().type; for (int i = 0; i < t->dimensions(); ++i) { if (axis == i) { continue; } if (t->length(i) != outputs[0]->length(i)) { - MNN_PRINT("Error for concat size of op %s, %d input not match output\n", op->name()->c_str(), i); + auto name = op->name() ? op->name()->c_str() : ""; + MNN_PRINT("Error for concat size of op %s, %d input not match output\n", name, i); return false; } } } ob.dim[axis].extent = sum; - ob.type = inputs[0]->buffer().type; TensorUtils::getDescribe(outputs[0])->dimensionFormat = TensorUtils::getDescribe(inputs[0])->dimensionFormat; return true; diff --git a/source/shape/ShapeConst.cpp b/source/shape/ShapeConst.cpp index d2866599d..a17ca55c8 100644 --- a/source/shape/ShapeConst.cpp +++ b/source/shape/ShapeConst.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" namespace MNN { class ConstComputer : public SizeComputer { @@ -34,5 +34,6 @@ class ConstComputer : public SizeComputer { }; REGISTER_SHAPE(ConstComputer, OpType_Const); +REGISTER_SHAPE(ConstComputer, OpType_TrainableParam); } // namespace MNN diff --git a/source/shape/ShapeConvolution.cpp b/source/shape/ShapeConvolution.cpp index df5120a42..2341542a4 100644 --- a/source/shape/ShapeConvolution.cpp +++ b/source/shape/ShapeConvolution.cpp @@ -7,14 +7,14 @@ // #include -#include "SizeComputer.hpp" -#include "TensorUtils.hpp" +#include "core/SizeComputer.hpp" +#include "core/TensorUtils.hpp" namespace MNN { class ConvolutionSizeComputer : public SizeComputer { public: virtual bool onComputeSize(const MNN::Op* op, const std::vector& inputs, const std::vector& outputs) const override { - MNN_ASSERT(1 == inputs.size() || 3 == inputs.size()); + MNN_ASSERT(inputs.size() >= 1); MNN_ASSERT(1 == outputs.size()); auto layer = op->main_as_Convolution2D()->common(); int kernel_width = layer->dilateX() * (layer->kernelX() - 1) + 1; @@ -75,8 +75,26 @@ class ConvolutionSizeComputer : public SizeComputer { } }; +class Dilation2DSizeComputer : public ConvolutionSizeComputer { +public: + virtual bool onComputeSize(const MNN::Op* op, const std::vector& inputs, + const std::vector& outputs) const override { + MNN_ASSERT(1 == inputs.size() && 1 == outputs.size()); + return ConvolutionSizeComputer::onComputeSize(op, inputs, outputs); + } + virtual float onComputeFlops(const MNN::Op* op, const std::vector& inputs, + const std::vector& outputs) const override { + auto output = outputs[0]; + auto layer = op->main_as_Convolution2D()->common(); + auto oSize = output->batch() * output->height() * output->width() * output->channel(); + auto flops = (float)oSize * layer->kernelY() * layer->kernelX() / FLOPS_M; + return flops; + } +}; + REGISTER_SHAPE(ConvolutionSizeComputer, OpType_Convolution); REGISTER_SHAPE(ConvolutionSizeComputer, OpType_ConvolutionDepthwise); REGISTER_SHAPE(ConvolutionSizeComputer, OpType_ConvInt8); REGISTER_SHAPE(ConvolutionSizeComputer, OpType_DepthwiseConvInt8); +REGISTER_SHAPE(Dilation2DSizeComputer, OpType_Dilation2D); } // namespace MNN diff --git a/source/shape/ShapeConvolution3D.cpp b/source/shape/ShapeConvolution3D.cpp index c594db851..76c5227c7 100644 --- a/source/shape/ShapeConvolution3D.cpp +++ b/source/shape/ShapeConvolution3D.cpp @@ -7,9 +7,9 @@ // #include -#include "Macro.h" -#include "SizeComputer.hpp" -#include "TensorUtils.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" +#include "core/TensorUtils.hpp" namespace MNN { class Convolution3DSizeComputer : public SizeComputer { public: diff --git a/source/shape/ShapeCosineSimilarity.cpp b/source/shape/ShapeCosineSimilarity.cpp index 954cc5d98..5320a330e 100644 --- a/source/shape/ShapeCosineSimilarity.cpp +++ b/source/shape/ShapeCosineSimilarity.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" -#include "TensorUtils.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" +#include "core/TensorUtils.hpp" namespace MNN { class CosineSimilaritySize : public SizeComputer { diff --git a/source/shape/ShapeCrop.cpp b/source/shape/ShapeCrop.cpp index 7e0726156..b72eb6dfc 100644 --- a/source/shape/ShapeCrop.cpp +++ b/source/shape/ShapeCrop.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" namespace MNN { class CropSizeComputer : public SizeComputer { diff --git a/source/shape/ShapeCropAndResize.cpp b/source/shape/ShapeCropAndResize.cpp index 137b6382e..bb4ab0a97 100644 --- a/source/shape/ShapeCropAndResize.cpp +++ b/source/shape/ShapeCropAndResize.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" namespace MNN { diff --git a/source/shape/ShapeDeconvolution.cpp b/source/shape/ShapeDeconvolution.cpp index 6b6a48711..8d072b1cf 100644 --- a/source/shape/ShapeDeconvolution.cpp +++ b/source/shape/ShapeDeconvolution.cpp @@ -6,7 +6,7 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "SizeComputer.hpp" +#include "core/SizeComputer.hpp" namespace MNN { class DeconvolutionSizeComputer : public SizeComputer { diff --git a/source/shape/ShapeDepthToSpace.cpp b/source/shape/ShapeDepthToSpace.cpp index 62d21babd..023b85bae 100644 --- a/source/shape/ShapeDepthToSpace.cpp +++ b/source/shape/ShapeDepthToSpace.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" -#include "TensorUtils.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" +#include "core/TensorUtils.hpp" namespace MNN { diff --git a/source/shape/ShapeDequantize.cpp b/source/shape/ShapeDequantize.cpp index 0f82c146a..bb7b671c8 100644 --- a/source/shape/ShapeDequantize.cpp +++ b/source/shape/ShapeDequantize.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" namespace MNN { class ShapeDequantize : public SizeComputer { diff --git a/source/shape/ShapeDetectionOutput.cpp b/source/shape/ShapeDetectionOutput.cpp index 8bbbde872..0b2af5925 100644 --- a/source/shape/ShapeDetectionOutput.cpp +++ b/source/shape/ShapeDetectionOutput.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" namespace MNN { // Size Computer diff --git a/source/shape/ShapeDetectionPostProcess.cpp b/source/shape/ShapeDetectionPostProcess.cpp index cf6563e92..a31adb552 100644 --- a/source/shape/ShapeDetectionPostProcess.cpp +++ b/source/shape/ShapeDetectionPostProcess.cpp @@ -5,7 +5,7 @@ // Created by MNN on 2019/10/29. // Copyright © 2018, Alibaba Group Holding Limited -#include "SizeComputer.hpp" +#include "core/SizeComputer.hpp" namespace MNN { diff --git a/source/shape/ShapeEltwise.cpp b/source/shape/ShapeEltwise.cpp index 559481932..d4c0e61ff 100644 --- a/source/shape/ShapeEltwise.cpp +++ b/source/shape/ShapeEltwise.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" -#include "TensorUtils.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" +#include "core/TensorUtils.hpp" namespace MNN { // Size Computer diff --git a/source/shape/ShapeExpandDims.cpp b/source/shape/ShapeExpandDims.cpp index 23683d548..3466f5433 100644 --- a/source/shape/ShapeExpandDims.cpp +++ b/source/shape/ShapeExpandDims.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" namespace MNN { class ExpandDimsComputer : public SizeComputer { diff --git a/source/shape/ShapeFill.cpp b/source/shape/ShapeFill.cpp index d8196306c..011a40462 100644 --- a/source/shape/ShapeFill.cpp +++ b/source/shape/ShapeFill.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" -#include "TensorUtils.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" +#include "core/TensorUtils.hpp" namespace MNN { diff --git a/source/shape/ShapeGather.cpp b/source/shape/ShapeGather.cpp index 283a6a1e8..a1c4c7021 100644 --- a/source/shape/ShapeGather.cpp +++ b/source/shape/ShapeGather.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" -#include "TensorUtils.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" +#include "core/TensorUtils.hpp" namespace MNN { class GatherComputer : public SizeComputer { diff --git a/source/shape/ShapeGatherND.cpp b/source/shape/ShapeGatherND.cpp index 5d2275989..0e395131b 100644 --- a/source/shape/ShapeGatherND.cpp +++ b/source/shape/ShapeGatherND.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" namespace MNN { class GatherNDComputer : public SizeComputer { diff --git a/source/shape/ShapeGatherV2.cpp b/source/shape/ShapeGatherV2.cpp index c3e40a546..006a2a6b7 100644 --- a/source/shape/ShapeGatherV2.cpp +++ b/source/shape/ShapeGatherV2.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" namespace MNN { diff --git a/source/shape/ShapeInnerProduct.cpp b/source/shape/ShapeInnerProduct.cpp index 5f70cea9d..e916d731c 100644 --- a/source/shape/ShapeInnerProduct.cpp +++ b/source/shape/ShapeInnerProduct.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" namespace MNN { class InnerProductComputer : public SizeComputer { diff --git a/source/shape/ShapeInterp.cpp b/source/shape/ShapeInterp.cpp index 8cae24195..706470334 100644 --- a/source/shape/ShapeInterp.cpp +++ b/source/shape/ShapeInterp.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" namespace MNN { diff --git a/source/shape/ShapeLSTM.cpp b/source/shape/ShapeLSTM.cpp index 63ad6aa75..a4936cf6b 100644 --- a/source/shape/ShapeLSTM.cpp +++ b/source/shape/ShapeLSTM.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" -#include "TensorUtils.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" +#include "core/TensorUtils.hpp" namespace MNN { diff --git a/source/shape/ShapeLinSpace.cpp b/source/shape/ShapeLinSpace.cpp new file mode 100644 index 000000000..6c27a7ad8 --- /dev/null +++ b/source/shape/ShapeLinSpace.cpp @@ -0,0 +1,45 @@ +// +// ShapeLinSpace.cpp +// MNN +// +// Created by MNN on 2019/12/11. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "core/Macro.h" +#include "core/SizeComputer.hpp" +#include "core/TensorUtils.hpp" + +namespace MNN { + +class LinSpaceSizeComputer : public SizeComputer { + virtual bool onComputeSize(const MNN::Op* op, const std::vector& inputs, + const std::vector& outputs) const override { + MNN_ASSERT(inputs.size() == 3); + MNN_ASSERT(outputs.size() == 1); + auto& ib1 = inputs[0]->buffer(); + auto& ib2 = inputs[1]->buffer(); + auto& ib3 = inputs[2]->buffer(); + auto& ob = outputs[0]->buffer(); + MNN_ASSERT(ib1.dimensions == 0); + MNN_ASSERT(ib2.dimensions == 0); + MNN_ASSERT(ib3.dimensions == 0); + + MNN_ASSERT(inputs[0]->getType() == halide_type_of()); + MNN_ASSERT(inputs[1]->getType() == halide_type_of()); + MNN_ASSERT(inputs[2]->getType() == halide_type_of()); + + int num = inputs[2]->host()[0]; + MNN_ASSERT(num > 0); + + ob.dimensions = 1; + ob.dim[0].extent = num; + outputs[0]->setType(DataType_DT_FLOAT); + TensorUtils::getDescribe(outputs[0])->dimensionFormat = TensorUtils::getDescribe(inputs[0])->dimensionFormat; + + return true; + } +}; + +REGISTER_SHAPE_INPUTS(LinSpaceSizeComputer, OpType_LinSpace, {2}); +} // namespace MNN diff --git a/source/shape/ShapeMatMul.cpp b/source/shape/ShapeMatMul.cpp index 48e871872..545343122 100644 --- a/source/shape/ShapeMatMul.cpp +++ b/source/shape/ShapeMatMul.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" -#include "TensorUtils.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" +#include "core/TensorUtils.hpp" namespace MNN { diff --git a/source/shape/ShapeMoments.cpp b/source/shape/ShapeMoments.cpp index d176453c7..420fb3750 100644 --- a/source/shape/ShapeMoments.cpp +++ b/source/shape/ShapeMoments.cpp @@ -6,7 +6,7 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "SizeComputer.hpp" +#include "core/SizeComputer.hpp" namespace MNN { class MomentsComputer : public SizeComputer { diff --git a/source/shape/ShapeNonMaxSuppressionV2.cpp b/source/shape/ShapeNonMaxSuppressionV2.cpp index 5e3e2d6e0..34b4d077e 100644 --- a/source/shape/ShapeNonMaxSuppressionV2.cpp +++ b/source/shape/ShapeNonMaxSuppressionV2.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" namespace MNN { diff --git a/source/shape/ShapeOneHot.cpp b/source/shape/ShapeOneHot.cpp new file mode 100644 index 000000000..d6fa83820 --- /dev/null +++ b/source/shape/ShapeOneHot.cpp @@ -0,0 +1,52 @@ +// +// ShapeOneHot.cpp +// MNN +// +// Created by MNN on 2019/11/29. +// Copyright © 2018, Alibaba Group Holding Limited +// + + +#include "core/Macro.h" +#include "core/SizeComputer.hpp" + +namespace MNN { +class ShapeOneHot : public SizeComputer { +public: + virtual bool onComputeSize(const MNN::Op* op, const std::vector& inputs, + const std::vector& outputs) const override { + MNN_ASSERT(4 == inputs.size()); + auto indices = inputs[0]; + auto depthTensor = inputs[1]; + + const int depth = depthTensor->host()[0]; + if (depth < 0) { + return false; + } + const int indicesDimension = indices->dimensions(); + const int outputDimension = indicesDimension + 1; + + auto param = op->main_as_OneHotParam(); + MNN_CHECK(param->dType() == DataType_DT_FLOAT, "TODO, support other data type!"); + int axis = param->axis(); + if (axis == -1) { + axis = outputDimension + axis; + } + auto output = outputs[0]; + output->buffer().dimensions = outputDimension; + for (int i = 0; i < outputDimension; ++i) { + if (i < axis) { + output->setLength(i, indices->length(i)); + } else if (i == axis) { + output->setLength(i, depth); + } else { + output->setLength(i, indices->length(i - 1)); + } + } + TensorUtils::getDescribe(output)->dimensionFormat = MNN_DATA_FORMAT_NHWC; + return true; + } +}; + +REGISTER_SHAPE_INPUTS(ShapeOneHot, OpType_OneHot, (std::vector{1})); +} // namespace MNN diff --git a/source/shape/ShapePack.cpp b/source/shape/ShapePack.cpp index ac4a51406..b7e66debd 100644 --- a/source/shape/ShapePack.cpp +++ b/source/shape/ShapePack.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" namespace MNN { diff --git a/source/shape/ShapePadding.cpp b/source/shape/ShapePadding.cpp index c1488fd5c..c58adebec 100644 --- a/source/shape/ShapePadding.cpp +++ b/source/shape/ShapePadding.cpp @@ -6,14 +6,14 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "SizeComputer.hpp" -#include "TensorUtils.hpp" +#include "core/SizeComputer.hpp" +#include "core/TensorUtils.hpp" namespace MNN { class PaddingComputer : public SizeComputer { virtual bool onComputeSize(const MNN::Op* op, const std::vector& inputs, const std::vector& outputs) const override { - if (2 != inputs.size() || 1 != outputs.size()) { + if ((2 != inputs.size() && 3 != inputs.size()) || 1 != outputs.size()) { MNN_ERROR("Padding inputs or outputs number error: %d -> %d\n", (int)inputs.size(), (int)outputs.size()); return false; } diff --git a/source/shape/ShapePermute.cpp b/source/shape/ShapePermute.cpp index 70e66c184..c0e806e5d 100644 --- a/source/shape/ShapePermute.cpp +++ b/source/shape/ShapePermute.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" namespace MNN { class PermuteComputer : public SizeComputer { diff --git a/source/shape/ShapePool.cpp b/source/shape/ShapePool.cpp index d0eeee1c7..5f770f7ff 100644 --- a/source/shape/ShapePool.cpp +++ b/source/shape/ShapePool.cpp @@ -8,8 +8,8 @@ #include -#include "Macro.h" -#include "SizeComputer.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" namespace MNN { class PoolSizeComputer : public SizeComputer { diff --git a/source/shape/ShapePool3D.cpp b/source/shape/ShapePool3D.cpp index d31486793..d230355e7 100644 --- a/source/shape/ShapePool3D.cpp +++ b/source/shape/ShapePool3D.cpp @@ -8,8 +8,8 @@ #include -#include "Macro.h" -#include "SizeComputer.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" namespace MNN { class Pool3DSizeComputer : public SizeComputer { @@ -36,7 +36,7 @@ class Pool3DSizeComputer : public SizeComputer { if (layer->padType() == PoolPadType_CAFFE) { int pad = (*layer->pads())[i]; - outputLength = UP_DIV(inputLength + 2 * pad - kernel, stride) + 1; + outputLength = (inputLength + 2 * pad - kernel) / stride + 1; } else if (layer->padType() == PoolPadType_SAME) { outputLength = UP_DIV(inputLength, stride); } else if (layer->padType() == PoolPadType_VALID) { diff --git a/source/shape/ShapePriorbox.cpp b/source/shape/ShapePriorbox.cpp index dd3d9d986..199206e6e 100644 --- a/source/shape/ShapePriorbox.cpp +++ b/source/shape/ShapePriorbox.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" -#include "TensorUtils.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" +#include "core/TensorUtils.hpp" namespace MNN { class PriorBoxComputer : public SizeComputer { @@ -55,8 +55,8 @@ class PriorBoxComputer : public SizeComputer { stepH = (float)imageH / h; } - int minSizeCount = minSizes ? minSizes->size() : 0; - int maxSizeCount = maxSizes ? maxSizes->size() : 0; + int minSizeCount = minSizes ? (int)minSizes->size() : 0; + int maxSizeCount = maxSizes ? (int)maxSizes->size() : 0; std::vector aspectRatiosValue{1.0f}; if (aspectRatios != nullptr) { for (int i = 0; i < aspectRatios->size(); ++i) { diff --git a/source/shape/ShapeProposal.cpp b/source/shape/ShapeProposal.cpp index b45412984..c7717dbae 100644 --- a/source/shape/ShapeProposal.cpp +++ b/source/shape/ShapeProposal.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" namespace MNN { diff --git a/source/shape/ShapeQuantizedAvgPool.cpp b/source/shape/ShapeQuantizedAvgPool.cpp index f73537f25..ee0ca4a05 100644 --- a/source/shape/ShapeQuantizedAvgPool.cpp +++ b/source/shape/ShapeQuantizedAvgPool.cpp @@ -7,8 +7,8 @@ // #ifdef MNN_SUPPORT_TFLITE_QUAN #include -#include "Macro.h" -#include "SizeComputer.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" namespace MNN { class QuantizedAvgPoolComputer : public SizeComputer { diff --git a/source/shape/ShapeQuantizedMaxPool.cpp b/source/shape/ShapeQuantizedMaxPool.cpp index 495cf05a7..d9ee1c766 100644 --- a/source/shape/ShapeQuantizedMaxPool.cpp +++ b/source/shape/ShapeQuantizedMaxPool.cpp @@ -7,8 +7,8 @@ // #ifdef MNN_SUPPORT_TFLITE_QUAN #include -#include "Macro.h" -#include "SizeComputer.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" namespace MNN { class QuantizedMaxPoolComputer : public SizeComputer { diff --git a/source/shape/ShapeQuantizedReshape.cpp b/source/shape/ShapeQuantizedReshape.cpp index dd1af5684..56188276a 100644 --- a/source/shape/ShapeQuantizedReshape.cpp +++ b/source/shape/ShapeQuantizedReshape.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // #ifdef MNN_SUPPORT_TFLITE_QUAN -#include "Macro.h" -#include "SizeComputer.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" namespace MNN { class QuantizedReshapeComputer : public SizeComputer { diff --git a/source/shape/ShapeRNNSequenceGRU.cpp b/source/shape/ShapeRNNSequenceGRU.cpp index aec9dfc6f..3d38b1511 100644 --- a/source/shape/ShapeRNNSequenceGRU.cpp +++ b/source/shape/ShapeRNNSequenceGRU.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "SizeComputer.hpp" -#include "TensorUtils.hpp" +#include "core/SizeComputer.hpp" +#include "core/TensorUtils.hpp" namespace MNN { class RNNSequenceGRUComputer : public SizeComputer { diff --git a/source/shape/ShapeROIPooling.cpp b/source/shape/ShapeROIPooling.cpp index 97c0eef84..89f1c71f0 100644 --- a/source/shape/ShapeROIPooling.cpp +++ b/source/shape/ShapeROIPooling.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" namespace MNN { diff --git a/source/shape/ShapeRange.cpp b/source/shape/ShapeRange.cpp index bd66adcea..f0fcf0d65 100644 --- a/source/shape/ShapeRange.cpp +++ b/source/shape/ShapeRange.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" #include "math.h" namespace MNN { diff --git a/source/shape/ShapeRank.cpp b/source/shape/ShapeRank.cpp index 346b7ac79..f14a362fc 100644 --- a/source/shape/ShapeRank.cpp +++ b/source/shape/ShapeRank.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "SizeComputer.hpp" -#include "TensorUtils.hpp" +#include "core/SizeComputer.hpp" +#include "core/TensorUtils.hpp" namespace MNN { class RankComputer : public SizeComputer { diff --git a/source/shape/ShapeReduceJoin.cpp b/source/shape/ShapeReduceJoin.cpp index afa330656..84d38798c 100644 --- a/source/shape/ShapeReduceJoin.cpp +++ b/source/shape/ShapeReduceJoin.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" namespace MNN { class ReduceJoinComputer : public SizeComputer { diff --git a/source/shape/ShapeReduction.cpp b/source/shape/ShapeReduction.cpp index d5d97c8ba..4a1f4cbc5 100644 --- a/source/shape/ShapeReduction.cpp +++ b/source/shape/ShapeReduction.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" -#include "TensorUtils.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" +#include "core/TensorUtils.hpp" namespace MNN { class ReductionComputer : public SizeComputer { diff --git a/source/shape/ShapeRegister.cpp b/source/shape/ShapeRegister.cpp index 5130b9dcb..a7a55843d 100644 --- a/source/shape/ShapeRegister.cpp +++ b/source/shape/ShapeRegister.cpp @@ -2,20 +2,24 @@ namespace MNN { #ifdef MNN_CODEGEN_REGISTER extern void ___ArgMaxComputer__OpType_ArgMax__(); +extern void ___ArgMaxComputer__OpType_ArgMin__(); extern void ___AsStringComputer__OpType_AsString__(); extern void ___BatchMatMulComputer__OpType_BatchMatMul__(); extern void ___BatchToSpaceNDSizeComputer__OpType_BatchToSpaceND__(); extern void ___BinaryOpComputer__OpType_BinaryOp__(); +extern void ___ShapeBroadcastTo__OpType_BroadcastTo__(); extern void ___CastSizeComputer__OpType_Cast__(); extern void ___CastSizeComputer__OpType_FloatToInt8__(); extern void ___CastSizeComputer__OpType_Int8ToFloat__(); extern void ___ConcatSizeComputer__OpType_Concat__(); extern void ___ConcatSizeComputer__OpType_QuantizedConcat__(); extern void ___ConstComputer__OpType_Const__(); +extern void ___ConstComputer__OpType_TrainableParam__(); extern void ___ConvolutionSizeComputer__OpType_Convolution__(); extern void ___ConvolutionSizeComputer__OpType_ConvolutionDepthwise__(); extern void ___ConvolutionSizeComputer__OpType_ConvInt8__(); extern void ___ConvolutionSizeComputer__OpType_DepthwiseConvInt8__(); +extern void ___Dilation2DSizeComputer__OpType_Dilation2D__(); extern void ___Convolution3DSizeComputer__OpType_Convolution3D__(); extern void ___CosineSimilaritySize__OpType_CosineSimilarity__(); extern void ___CropSizeComputer__OpType_Crop__(); @@ -36,9 +40,11 @@ extern void ___GatherV2Computer__OpType_GatherV2__(); extern void ___InnerProductComputer__OpType_InnerProduct__(); extern void ___InterpComputer__OpType_Interp__(); extern void ___LSTMComputer__OpType_LSTM__(); +extern void ___LinSpaceSizeComputer__OpType_LinSpace__(); extern void ___MatMulSizeComputer__OpType_MatMul__(); extern void ___MomentsComputer__OpType_Moments__(); extern void ___NonMaxSuppressionV2Computer__OpType_NonMaxSuppressionV2__(); +extern void ___ShapeOneHot__OpType_OneHot__(); extern void ___PackComputer__OpType_Pack__(); extern void ___PaddingComputer__OpType_Padding__(); extern void ___PermuteComputer__OpType_Permute__(); @@ -58,6 +64,7 @@ extern void ___ReduceJoinComputer__OpType_ReduceJoin__(); extern void ___ReductionComputer__OpType_Reduction__(); extern void ___ReshapeComputer__OpType_Reshape__(); extern void ___ResizeComputer__OpType_Resize__(); +extern void ___ShapeScatterNd__OpType_ScatterNd__(); extern void ___SelectSizeComputer__OpType_Select__(); extern void ___ShapeSizeComputer__OpType_Shape__(); extern void ___SizeOpComputer__OpType_Size__(); @@ -75,24 +82,29 @@ extern void ___TileComputer__OpType_Tile__(); extern void ___TopKV2SizeComputer__OpType_TopKV2__(); extern void ___TransposeComputer__OpType_Transpose__(); extern void ___UnpackComputer__OpType_Unpack__(); +extern void ___UnravelIndexSize__OpType_UnravelIndex__(); extern void ___WhereSizeComputer__OpType_Where__(); void registerShapeOps() { ___ArgMaxComputer__OpType_ArgMax__(); +___ArgMaxComputer__OpType_ArgMin__(); ___AsStringComputer__OpType_AsString__(); ___BatchMatMulComputer__OpType_BatchMatMul__(); ___BatchToSpaceNDSizeComputer__OpType_BatchToSpaceND__(); ___BinaryOpComputer__OpType_BinaryOp__(); +___ShapeBroadcastTo__OpType_BroadcastTo__(); ___CastSizeComputer__OpType_Cast__(); ___CastSizeComputer__OpType_FloatToInt8__(); ___CastSizeComputer__OpType_Int8ToFloat__(); ___ConcatSizeComputer__OpType_Concat__(); ___ConcatSizeComputer__OpType_QuantizedConcat__(); ___ConstComputer__OpType_Const__(); +___ConstComputer__OpType_TrainableParam__(); ___ConvolutionSizeComputer__OpType_Convolution__(); ___ConvolutionSizeComputer__OpType_ConvolutionDepthwise__(); ___ConvolutionSizeComputer__OpType_ConvInt8__(); ___ConvolutionSizeComputer__OpType_DepthwiseConvInt8__(); +___Dilation2DSizeComputer__OpType_Dilation2D__(); ___Convolution3DSizeComputer__OpType_Convolution3D__(); ___CosineSimilaritySize__OpType_CosineSimilarity__(); ___CropSizeComputer__OpType_Crop__(); @@ -113,9 +125,11 @@ ___GatherV2Computer__OpType_GatherV2__(); ___InnerProductComputer__OpType_InnerProduct__(); ___InterpComputer__OpType_Interp__(); ___LSTMComputer__OpType_LSTM__(); +___LinSpaceSizeComputer__OpType_LinSpace__(); ___MatMulSizeComputer__OpType_MatMul__(); ___MomentsComputer__OpType_Moments__(); ___NonMaxSuppressionV2Computer__OpType_NonMaxSuppressionV2__(); +___ShapeOneHot__OpType_OneHot__(); ___PackComputer__OpType_Pack__(); ___PaddingComputer__OpType_Padding__(); ___PermuteComputer__OpType_Permute__(); @@ -135,6 +149,7 @@ ___ReduceJoinComputer__OpType_ReduceJoin__(); ___ReductionComputer__OpType_Reduction__(); ___ReshapeComputer__OpType_Reshape__(); ___ResizeComputer__OpType_Resize__(); +___ShapeScatterNd__OpType_ScatterNd__(); ___SelectSizeComputer__OpType_Select__(); ___ShapeSizeComputer__OpType_Shape__(); ___SizeOpComputer__OpType_Size__(); @@ -152,6 +167,7 @@ ___TileComputer__OpType_Tile__(); ___TopKV2SizeComputer__OpType_TopKV2__(); ___TransposeComputer__OpType_Transpose__(); ___UnpackComputer__OpType_Unpack__(); +___UnravelIndexSize__OpType_UnravelIndex__(); ___WhereSizeComputer__OpType_Where__(); } #endif diff --git a/source/shape/ShapeReshape.cpp b/source/shape/ShapeReshape.cpp index aab269d83..248d0bb04 100644 --- a/source/shape/ShapeReshape.cpp +++ b/source/shape/ShapeReshape.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" -#include "TensorUtils.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" +#include "core/TensorUtils.hpp" namespace MNN { class ReshapeComputer : public SizeComputer { diff --git a/source/shape/ShapeResize.cpp b/source/shape/ShapeResize.cpp index 31938ca0a..4da38e2a0 100644 --- a/source/shape/ShapeResize.cpp +++ b/source/shape/ShapeResize.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" namespace MNN { // Size Computer diff --git a/source/shape/ShapeScatterNd.cpp b/source/shape/ShapeScatterNd.cpp new file mode 100644 index 000000000..f1229ddf5 --- /dev/null +++ b/source/shape/ShapeScatterNd.cpp @@ -0,0 +1,50 @@ +// +// ShapeScatterNd.cpp +// MNN +// +// Created by MNN on 2019/11/27. +// Copyright © 2018, Alibaba Group Holding Limited +// + + +#include "core/Macro.h" +#include "core/SizeComputer.hpp" + +namespace MNN { +// Size Computer +class ShapeScatterNd : public SizeComputer { + bool onComputeSize(const MNN::Op *op, const std::vector &inputs, + const std::vector &outputs) const override { + MNN_ASSERT(3 == inputs.size()); + auto indices = inputs[0]; + auto updates = inputs[1]; + auto shape = inputs[2]; + auto output = outputs[0]; + MNN_CHECK(shape->dimensions() == 1, "shape rank should be one"); + const int indicesDimension = indices->dimensions(); + MNN_CHECK(indices->length(indicesDimension - 1) == 1, "indices.shape[-1] = shape.rank"); + + const int outerDims = indicesDimension - 1; + for (int i = 0; i < outerDims; ++i) { + MNN_CHECK(indices->length(i) == updates->length(i), "indices shape does not match updates'"); + } + + const int dimension = shape->length(0); + MNN_CHECK(updates->dimensions() == dimension, "updates dimension should be equal to given shape"); + + output->buffer().dimensions = dimension; + + auto shapeData = shape->host(); + for (int i = 0; i < dimension; ++i) { + output->setLength(i, shapeData[i]); + } + output->buffer().type = updates->buffer().type; + + TensorUtils::getDescribe(output)->dimensionFormat = TensorUtils::getDescribe(updates)->dimensionFormat; + + return true; + } +}; + +REGISTER_SHAPE_INPUTS(ShapeScatterNd, OpType_ScatterNd, (std::vector{2})); +} // namespace MNN diff --git a/source/shape/ShapeSelect.cpp b/source/shape/ShapeSelect.cpp index 1cae0e48e..916b06bf8 100644 --- a/source/shape/ShapeSelect.cpp +++ b/source/shape/ShapeSelect.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" -#include "TensorUtils.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" +#include "core/TensorUtils.hpp" namespace MNN { class SelectSizeComputer : public SizeComputer { public: diff --git a/source/shape/ShapeShape.cpp b/source/shape/ShapeShape.cpp index 3de04ef2b..a02cf9f78 100644 --- a/source/shape/ShapeShape.cpp +++ b/source/shape/ShapeShape.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" -#include "TensorUtils.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" +#include "core/TensorUtils.hpp" namespace MNN { diff --git a/source/shape/ShapeSize.cpp b/source/shape/ShapeSize.cpp index e1d7ade11..2a925bc50 100644 --- a/source/shape/ShapeSize.cpp +++ b/source/shape/ShapeSize.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" -#include "TensorUtils.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" +#include "core/TensorUtils.hpp" namespace MNN { diff --git a/source/shape/ShapeSlice.cpp b/source/shape/ShapeSlice.cpp index dfe52fb73..128a5071e 100644 --- a/source/shape/ShapeSlice.cpp +++ b/source/shape/ShapeSlice.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" #include namespace MNN { diff --git a/source/shape/ShapeSliceTf.cpp b/source/shape/ShapeSliceTf.cpp index f2e91e3fa..ae92f11bd 100644 --- a/source/shape/ShapeSliceTf.cpp +++ b/source/shape/ShapeSliceTf.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" namespace MNN { diff --git a/source/shape/ShapeSpaceToBatchND.cpp b/source/shape/ShapeSpaceToBatchND.cpp index 4264c5d18..fe12aba8a 100644 --- a/source/shape/ShapeSpaceToBatchND.cpp +++ b/source/shape/ShapeSpaceToBatchND.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "SizeComputer.hpp" -#include "TensorUtils.hpp" +#include "core/SizeComputer.hpp" +#include "core/TensorUtils.hpp" namespace MNN { class SpaceToBatchNDSizeComputer : public SizeComputer { diff --git a/source/shape/ShapeSpaceToDepth.cpp b/source/shape/ShapeSpaceToDepth.cpp index 35b9c9809..d765e9b94 100644 --- a/source/shape/ShapeSpaceToDepth.cpp +++ b/source/shape/ShapeSpaceToDepth.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" -#include "TensorUtils.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" +#include "core/TensorUtils.hpp" namespace MNN { diff --git a/source/shape/ShapeSqueeze.cpp b/source/shape/ShapeSqueeze.cpp index 7a8e22de6..6cfeff775 100644 --- a/source/shape/ShapeSqueeze.cpp +++ b/source/shape/ShapeSqueeze.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" -#include "TensorUtils.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" +#include "core/TensorUtils.hpp" namespace MNN { class UnSqueezeSizeComputer : public SizeComputer { diff --git a/source/shape/ShapeStridedSlice.cpp b/source/shape/ShapeStridedSlice.cpp index 7756ef43e..79819d898 100644 --- a/source/shape/ShapeStridedSlice.cpp +++ b/source/shape/ShapeStridedSlice.cpp @@ -8,11 +8,11 @@ #include #include -#include "CPUStridedSlice.hpp" -#include "CommonOptFunction.h" -#include "Macro.h" -#include "SizeComputer.hpp" -#include "TensorUtils.hpp" +#include "backend/cpu/CPUStridedSlice.hpp" +#include "backend/cpu/compute/CommonOptFunction.h" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" +#include "core/TensorUtils.hpp" namespace MNN { class StridedSliceComputer : public SizeComputer { @@ -21,8 +21,7 @@ class StridedSliceComputer : public SizeComputer { const std::vector &outputs) const override { MNN_ASSERT(4 == inputs.size()); MNN_ASSERT(1 == outputs.size()); - const std::string name = op->name()->c_str(); - + Tensor *input = inputs[0]; const int inputDimension = input->buffer().dimensions; if (inputDimension <= 0) { diff --git a/source/shape/ShapeTFQuantizedConv2D.cpp b/source/shape/ShapeTFQuantizedConv2D.cpp index 0f74c5fd3..1978d4b00 100644 --- a/source/shape/ShapeTFQuantizedConv2D.cpp +++ b/source/shape/ShapeTFQuantizedConv2D.cpp @@ -7,8 +7,8 @@ // #ifdef MNN_SUPPORT_TFLITE_QUAN #include -#include "Macro.h" -#include "SizeComputer.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" namespace MNN { class TFQuantizedConv2DComputer : public SizeComputer { diff --git a/source/shape/ShapeTensorConvert.cpp b/source/shape/ShapeTensorConvert.cpp index 56d9281bd..78eaca75e 100644 --- a/source/shape/ShapeTensorConvert.cpp +++ b/source/shape/ShapeTensorConvert.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" -#include "TensorUtils.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" +#include "core/TensorUtils.hpp" namespace MNN { class TensorConvertSizeComputer : public SizeComputer { @@ -21,7 +21,7 @@ class TensorConvertSizeComputer : public SizeComputer { return false; } auto info = op->main_as_TensorConvertInfo(); - auto sourceFmt = info->source(); + auto sourceFmt = TensorUtils::getDescribe(inputs[0])->dimensionFormat; auto destFmt = info->dest(); TensorUtils::getDescribe(outputs[0])->dimensionFormat = destFmt; ob.type = ib.type; diff --git a/source/shape/ShapeTile.cpp b/source/shape/ShapeTile.cpp index 33b3485b4..a4f8a613a 100644 --- a/source/shape/ShapeTile.cpp +++ b/source/shape/ShapeTile.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" -#include "TensorUtils.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" +#include "core/TensorUtils.hpp" namespace MNN { diff --git a/source/shape/ShapeTopKV2.cpp b/source/shape/ShapeTopKV2.cpp index b8d7d83ca..c45a40be0 100644 --- a/source/shape/ShapeTopKV2.cpp +++ b/source/shape/ShapeTopKV2.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" namespace MNN { diff --git a/source/shape/ShapeTranspose.cpp b/source/shape/ShapeTranspose.cpp index 8ec73ca0b..fa801f116 100644 --- a/source/shape/ShapeTranspose.cpp +++ b/source/shape/ShapeTranspose.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" -#include "TensorUtils.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" +#include "core/TensorUtils.hpp" namespace MNN { class TransposeComputer : public SizeComputer { diff --git a/source/shape/ShapeUnpack.cpp b/source/shape/ShapeUnpack.cpp index 53d47899c..64a2c506b 100644 --- a/source/shape/ShapeUnpack.cpp +++ b/source/shape/ShapeUnpack.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" namespace MNN { diff --git a/source/shape/ShapeUnravelIndex.cpp b/source/shape/ShapeUnravelIndex.cpp new file mode 100644 index 000000000..2d3b5b71a --- /dev/null +++ b/source/shape/ShapeUnravelIndex.cpp @@ -0,0 +1,41 @@ +// +// ShapeUnravelIndex.cpp +// MNN +// +// Created by MNN on 2019/11/26. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "core/Macro.h" +#include "core/SizeComputer.hpp" + +namespace MNN { + +class UnravelIndexSize : public SizeComputer { + virtual bool onComputeSize(const MNN::Op *op, const std::vector &inputs, + const std::vector &outputs) const override { + MNN_ASSERT(2 == inputs.size()); + + auto indices = inputs[0]; + auto dims = inputs[1]; + auto output = outputs[0]; + MNN_CHECK(dims->dimensions() == 1, "dims should be one dimension tensor!"); + + const int inputDimension = indices->dimensions(); + output->setType(DataType_DT_INT32); + if (inputDimension == 0) { + output->buffer().dimensions = 1; + output->setLength(0, dims->length(0)); + } else { + output->buffer().dimensions = 2; + output->setLength(0, dims->length(0)); + output->setLength(1, indices->elementSize()); + } + + return true; + } +}; + +REGISTER_SHAPE(UnravelIndexSize, OpType_UnravelIndex); + +} // namespace MNN diff --git a/source/shape/ShapeWhere.cpp b/source/shape/ShapeWhere.cpp index d96a2dc93..4fefb84e8 100644 --- a/source/shape/ShapeWhere.cpp +++ b/source/shape/ShapeWhere.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Macro.h" -#include "SizeComputer.hpp" +#include "core/Macro.h" +#include "core/SizeComputer.hpp" namespace MNN { diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 14872f3c0..673e84a7d 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -1,14 +1,17 @@ -# put output to build dir -SET( CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/../) - -file(GLOB_RECURSE Files "*.cpp") -include_directories(".") -include_directories("express/include") -if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin") - file(GLOB_RECURSE MMFiles "*.mm") - add_executable(run_test.out ${Files} ${MMFiles}) - target_link_libraries(run_test.out "-framework Foundation") -else() - add_executable(run_test.out ${Files}) -endif() -target_link_libraries(run_test.out ${MNN_DEPEND} MNN_Express) +IF(MNN_BUILD_TEST) + file(GLOB_RECURSE MNN_TEST_CPP_SOURCES ${CMAKE_CURRENT_LIST_DIR}/*.cpp) + file(GLOB_RECURSE MNN_TEST_OBJC_SOURCES ${CMAKE_CURRENT_LIST_DIR}/*.mm) + set(MNN_TEST_SRCS "") + LIST(APPEND MNN_TEST_SRCS ${MNN_TEST_CPP_SOURCES}) + IF(APPLE) + LIST(APPEND MNN_TEST_SRCS ${MNN_TEST_OBJC_SOURCES}) + ENDIF() + add_executable(run_test.out ${MNN_TEST_SRCS}) + target_link_libraries(run_test.out ${MNN_DEPS}) + target_include_directories(run_test.out PRIVATE ${CMAKE_CURRENT_LIST_DIR}/) + add_dependencies(run_test.out MNN_SCHEMA_GEN) + if(APPLE) + find_library(FOUNDATION Foundation REQUIRED) + target_link_libraries(run_test.out ${FOUNDATION}) + endif() +ENDIF() diff --git a/test/MNNTestSuite.cpp b/test/MNNTestSuite.cpp index 4b124cfbf..07fa6f0b3 100644 --- a/test/MNNTestSuite.cpp +++ b/test/MNNTestSuite.cpp @@ -63,6 +63,10 @@ void MNNTestSuite::runAll() { // Don't test for speed because cost continue; } + if (test->name.find("model") != std::string::npos) { + // Don't test for model because need resource + continue; + } printf("\trunning %s.\n", test->name.c_str()); auto res = test->run(); if (!res) { diff --git a/test/TestUtils.cpp b/test/TestUtils.cpp index ca56ca1ec..f2ec12cbc 100644 --- a/test/TestUtils.cpp +++ b/test/TestUtils.cpp @@ -7,9 +7,9 @@ // #include "TestUtils.h" -#include "Macro.h" -#include "TensorUtils.hpp" -#include "MNNDefine.h" +#include "core/Macro.h" +#include "core/TensorUtils.hpp" +#include using namespace MNN; diff --git a/test/TestUtils.h b/test/TestUtils.h index 1633e9765..f3514ede2 100644 --- a/test/TestUtils.h +++ b/test/TestUtils.h @@ -13,9 +13,9 @@ #include #include #include -#include "MNNForwardType.h" -#include "Session.hpp" -#include "Tensor.hpp" +#include +#include "core/Session.hpp" +#include #include /** @@ -57,4 +57,20 @@ bool checkVector(const T* result, const T* rightData, int size, T threshold){ return true; } +template +bool checkVectorByRelativeError(const T* result, const T* rightData, int size, float rtol) { + MNN_ASSERT(result != nullptr); + MNN_ASSERT(rightData != nullptr); + MNN_ASSERT(size >= 0); + for(int i = 0; i < size; ++i){ + if (fabs(rightData[i]) < 0.000001 && fabs(result[i]) < 0.000001) { + continue; + } + if (fabs(result[i] - rightData[i]) / rightData[i] > rtol) { + return false; + } + } + return true; +} + #endif /* TestUtils_h */ diff --git a/test/core/AutoStorageTest.cpp b/test/core/AutoStorageTest.cpp index 2b22e5f44..871cfb94d 100644 --- a/test/core/AutoStorageTest.cpp +++ b/test/core/AutoStorageTest.cpp @@ -7,7 +7,7 @@ // #include -#include "AutoStorage.h" +#include "core/AutoStorage.h" #include "MNNTestSuite.h" using namespace MNN; diff --git a/test/core/BackendTest.cpp b/test/core/BackendTest.cpp index f66abdc39..6c9053069 100644 --- a/test/core/BackendTest.cpp +++ b/test/core/BackendTest.cpp @@ -7,10 +7,10 @@ // #include -#include "Backend.hpp" -#include "MNNDefine.h" +#include "core/Backend.hpp" +#include #include "MNNTestSuite.h" -#include "Tensor.hpp" +#include using namespace MNN; diff --git a/test/core/BufferAllocatorTest.cpp b/test/core/BufferAllocatorTest.cpp index a53b40d85..07d6a024a 100644 --- a/test/core/BufferAllocatorTest.cpp +++ b/test/core/BufferAllocatorTest.cpp @@ -6,7 +6,7 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "BufferAllocator.hpp" +#include "core/BufferAllocator.hpp" #include "MNNTestSuite.h" using namespace MNN; diff --git a/test/core/DirectedAcyclicGraphTest.cpp b/test/core/DirectedAcyclicGraphTest.cpp index e2137be93..cca1748b8 100644 --- a/test/core/DirectedAcyclicGraphTest.cpp +++ b/test/core/DirectedAcyclicGraphTest.cpp @@ -8,8 +8,8 @@ #include #include -#include "DirectedAcyclicGraph.hpp" -#include "MNNDefine.h" +#include "core/DirectedAcyclicGraph.hpp" +#include #include "MNNTestSuite.h" using namespace MNN; diff --git a/test/core/MemoryUtilsTest.cpp b/test/core/MemoryUtilsTest.cpp index 2cc321db9..93263a318 100644 --- a/test/core/MemoryUtilsTest.cpp +++ b/test/core/MemoryUtilsTest.cpp @@ -6,7 +6,7 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "MNNMemoryUtils.h" +#include "core/MNNMemoryUtils.h" #include "MNNTestSuite.h" #ifndef MNN_DEBUG_MEMORY diff --git a/test/core/ScheduleTest.cpp b/test/core/ScheduleTest.cpp index 4940f4e25..30986aaeb 100644 --- a/test/core/ScheduleTest.cpp +++ b/test/core/ScheduleTest.cpp @@ -11,13 +11,13 @@ #include #include #include -#include "MNNDefine.h" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "Pipeline.hpp" -#include "Schedule.hpp" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Pipeline.hpp" +#include "core/Schedule.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; @@ -45,7 +45,7 @@ class FakeSession : public Session { FakeSession(const Schedule::ScheduleInfo& info) : Session(info) { } - const std::vector>& getFakePipelines() const { + const std::vector>& getFakePipelines() const { return this->getPipelines(); } }; @@ -125,7 +125,7 @@ static Interpreter* createInterpreter(int b, int c, int h, int w, bool tensorflo * multi path expect A->B->C->D->X or X->A->B->C->D * */ static void TestScheduleSpec() { - unique_ptr instance(createInterpreter(0, 0, 0, 0, false)); + shared_ptr instance(createInterpreter(0, 0, 0, 0, false)); ScheduleConfig conf; conf.path.inputs.push_back("A"); conf.path.outputs.push_back("B"); @@ -137,7 +137,7 @@ static void TestScheduleSpec() { Session* session = instance->createSession(conf); FakeSession* fakeSession = reinterpret_cast(session); - const std::vector>& pipelines = fakeSession->getFakePipelines(); + const std::vector>& pipelines = fakeSession->getFakePipelines(); FakePipeline* fakePipeline = reinterpret_cast(pipelines[0].get()); const std::vector>& units = fakePipeline->getFakeUnit(); stringstream ss; @@ -176,7 +176,7 @@ static void TestScheduleSpec() { Session* sessionMulti = instance->createMultiPathSession(configs); fakeSession = reinterpret_cast(sessionMulti); - const std::vector>& multiPipelines = fakeSession->getFakePipelines(); + const std::vector>& multiPipelines = fakeSession->getFakePipelines(); fakePipeline = reinterpret_cast(multiPipelines[0].get()); const std::vector>& multiUnits = fakePipeline->getFakeUnit(); FakePipeline* fakePipeline2 = reinterpret_cast(multiPipelines[1].get()); @@ -217,7 +217,7 @@ static void TestScheduleSpec() { * multi path expect A->B->C->D->E->F * */ static void TestSchedule() { - unique_ptr instance(createInterpreter(0, 0, 0, 0, false)); + shared_ptr instance(createInterpreter(0, 0, 0, 0, false)); ScheduleConfig conf; conf.path.inputs.push_back("A"); conf.path.outputs.push_back("B"); @@ -229,7 +229,7 @@ static void TestSchedule() { Session* session = instance->createSession(conf); FakeSession* fakeSession = reinterpret_cast(session); - const std::vector>& pipelines = fakeSession->getFakePipelines(); + const std::vector>& pipelines = fakeSession->getFakePipelines(); FakePipeline* fakePipeline = reinterpret_cast(pipelines[0].get()); const std::vector>& units = fakePipeline->getFakeUnit(); stringstream ss; @@ -265,7 +265,7 @@ static void TestSchedule() { Session* sessionMulti = instance->createMultiPathSession(configs); fakeSession = reinterpret_cast(sessionMulti); - const std::vector>& multiPipelines = fakeSession->getFakePipelines(); + const std::vector>& multiPipelines = fakeSession->getFakePipelines(); fakePipeline = reinterpret_cast(multiPipelines[0].get()); const std::vector>& multiUnits = fakePipeline->getFakeUnit(); FakePipeline* fakePipeline2 = reinterpret_cast(multiPipelines[1].get()); @@ -304,14 +304,14 @@ static void TestSchedule() { * multi path expect A->B->C->D->E->F */ static void TestScheduleOneInputHaveBeginNoEnd() { - unique_ptr instance(createInterpreter(0, 0, 0, 0, false)); + shared_ptr instance(createInterpreter(0, 0, 0, 0, false)); ScheduleConfig conf; conf.path.inputs.push_back("A"); Session* session = instance->createSession(conf); FakeSession* fakeSession = reinterpret_cast(session); - const std::vector>& pipelines = fakeSession->getFakePipelines(); + const std::vector>& pipelines = fakeSession->getFakePipelines(); FakePipeline* fakePipeline = reinterpret_cast(pipelines[0].get()); const std::vector>& units = fakePipeline->getFakeUnit(); stringstream ss; @@ -338,7 +338,7 @@ static void TestScheduleOneInputHaveBeginNoEnd() { Session* sessionMulti = instance->createMultiPathSession(configs); fakeSession = reinterpret_cast(sessionMulti); - const std::vector>& multiPipelines = fakeSession->getFakePipelines(); + const std::vector>& multiPipelines = fakeSession->getFakePipelines(); fakePipeline = reinterpret_cast(multiPipelines[0].get()); const std::vector>& multiUnits = fakePipeline->getFakeUnit(); ss.str(""); @@ -362,7 +362,7 @@ static void TestScheduleOneInputHaveBeginNoEnd() { * multi path expect A->B->C->D->E->F */ static void TestScheduleMultiInputsHaveBeginNoEnd() { - unique_ptr instance(createInterpreter(0, 0, 0, 0, false)); + shared_ptr instance(createInterpreter(0, 0, 0, 0, false)); ScheduleConfig conf; conf.path.inputs.push_back("A"); conf.path.inputs.push_back("D"); @@ -370,7 +370,7 @@ static void TestScheduleMultiInputsHaveBeginNoEnd() { Session* session = instance->createSession(conf); FakeSession* fakeSession = reinterpret_cast(session); - const std::vector>& pipelines = fakeSession->getFakePipelines(); + const std::vector>& pipelines = fakeSession->getFakePipelines(); FakePipeline* fakePipeline = reinterpret_cast(pipelines[0].get()); const std::vector>& units = fakePipeline->getFakeUnit(); stringstream ss; @@ -398,7 +398,7 @@ static void TestScheduleMultiInputsHaveBeginNoEnd() { Session* sessionMulti = instance->createMultiPathSession(configs); fakeSession = reinterpret_cast(sessionMulti); - const std::vector>& multiPipelines = fakeSession->getFakePipelines(); + const std::vector>& multiPipelines = fakeSession->getFakePipelines(); fakePipeline = reinterpret_cast(multiPipelines[0].get()); const std::vector>& multiUnits = fakePipeline->getFakeUnit(); ss.str(""); @@ -493,13 +493,13 @@ static MNN::Tensor* createTensor(const MNN::Tensor* shape, const char* path) { } static void TestSqueezeNet() { - const unique_ptr net(Interpreter::createFromFile(model_file.c_str())); + const shared_ptr net(Interpreter::createFromFile(model_file.c_str())); ScheduleConfig config; config.type = MNN_FORWARD_CPU; Session* session = net->createSession(config); Tensor* inputTensor = net->getSessionInput(session, NULL); - const unique_ptr givenTensor(createTensor(inputTensor, input_file.c_str())); + const shared_ptr givenTensor(createTensor(inputTensor, input_file.c_str())); if (!givenTensor) { MNN_ERROR("[FAIL] TestSqueezeNetFailed to open input file %s.\n", input_file.c_str()); return; @@ -507,7 +507,7 @@ static void TestSqueezeNet() { net->getBackend(session, inputTensor)->onCopyBuffer(givenTensor.get(), inputTensor); Tensor* outputTensor = net->getSessionOutput(session, NULL); - unique_ptr expectTensor(createTensor(outputTensor, output_file.c_str())); + shared_ptr expectTensor(createTensor(outputTensor, output_file.c_str())); if (!expectTensor.get()) { MNN_ERROR("[FAIL] TestSqueezeNetFailed to open output file %s.\n", input_file.c_str()); return; @@ -523,14 +523,14 @@ static void TestSqueezeNet() { } static void TestSqueezeNetOnePath() { - const unique_ptr net(Interpreter::createFromFile(model_file.c_str())); + const shared_ptr net(Interpreter::createFromFile(model_file.c_str())); ScheduleConfig config; config.type = MNN_FORWARD_CPU; config.path.inputs.push_back("conv1"); Session* session = net->createSession(config); Tensor* inputTensor = net->getSessionInput(session, NULL); - const unique_ptr givenTensor(createTensor(inputTensor, input_file.c_str())); + const shared_ptr givenTensor(createTensor(inputTensor, input_file.c_str())); if (!givenTensor) { MNN_ERROR("[FAIL] TestSqueezeNetOnePath to open input file %s.\n", input_file.c_str()); return; @@ -538,7 +538,7 @@ static void TestSqueezeNetOnePath() { net->getBackend(session, inputTensor)->onCopyBuffer(givenTensor.get(), inputTensor); Tensor* outputTensor = net->getSessionOutput(session, NULL); - unique_ptr expectTensor(createTensor(outputTensor, output_file.c_str())); + shared_ptr expectTensor(createTensor(outputTensor, output_file.c_str())); if (!expectTensor.get()) { MNN_ERROR("[FAIL] TestSqueezeNetOnePath to open output file %s.\n", input_file.c_str()); return; @@ -554,7 +554,7 @@ static void TestSqueezeNetOnePath() { } static void TestSqueezeNetOnePathFailed() { - const unique_ptr net(Interpreter::createFromFile(model_file.c_str())); + const shared_ptr net(Interpreter::createFromFile(model_file.c_str())); ScheduleConfig config; config.type = MNN_FORWARD_CPU; config.path.inputs.push_back("conv1"); @@ -562,7 +562,7 @@ static void TestSqueezeNetOnePathFailed() { Session* session = net->createSession(config); Tensor* inputTensor = net->getSessionInput(session, NULL); - const unique_ptr givenTensor(createTensor(inputTensor, input_file.c_str())); + const shared_ptr givenTensor(createTensor(inputTensor, input_file.c_str())); if (!givenTensor) { MNN_ERROR("[FAIL] TestSqueezeNetOnePathFailed to open input file %s.\n", input_file.c_str()); return; @@ -570,7 +570,7 @@ static void TestSqueezeNetOnePathFailed() { net->getBackend(session, inputTensor)->onCopyBuffer(givenTensor.get(), inputTensor); Tensor* outputTensor = net->getSessionOutput(session, NULL); - unique_ptr expectTensor(createTensor(outputTensor, output_file.c_str())); + shared_ptr expectTensor(createTensor(outputTensor, output_file.c_str())); if (!expectTensor.get()) { MNN_ERROR("[FAIL] TestSqueezeNetOnePathFailed to open output file %s.\n", input_file.c_str()); return; @@ -586,7 +586,7 @@ static void TestSqueezeNetOnePathFailed() { } static void TestScheduleSqueezeNetMultiPathFailed() { - const unique_ptr net(Interpreter::createFromFile(model_file.c_str())); + const shared_ptr net(Interpreter::createFromFile(model_file.c_str())); ScheduleConfig conf1; conf1.type = MNN_FORWARD_CPU; @@ -603,7 +603,7 @@ static void TestScheduleSqueezeNetMultiPathFailed() { Session* session = net->createMultiPathSession(configs); Tensor* inputTensor = net->getSessionInput(session, NULL); - const unique_ptr givenTensor(createTensor(inputTensor, input_file.c_str())); + const shared_ptr givenTensor(createTensor(inputTensor, input_file.c_str())); if (!givenTensor) { MNN_ERROR("[FAIL] TestScheduleSqueezeNetMultiPathFailed to open input file %s.\n", input_file.c_str()); return; @@ -611,7 +611,7 @@ static void TestScheduleSqueezeNetMultiPathFailed() { net->getBackend(session, inputTensor)->onCopyBuffer(givenTensor.get(), inputTensor); Tensor* outputTensor = net->getSessionOutput(session, NULL); - const unique_ptr expectTensor(createTensor(outputTensor, output_file.c_str())); + const shared_ptr expectTensor(createTensor(outputTensor, output_file.c_str())); if (!expectTensor.get()) { MNN_ERROR("[FAIL] TestScheduleSqueezeNetMultiPathFailed to open output file %s.\n", input_file.c_str()); return; @@ -627,7 +627,7 @@ static void TestScheduleSqueezeNetMultiPathFailed() { } static void TestScheduleSqueezeNetMultiPath() { - const unique_ptr net(Interpreter::createFromFile(model_file.c_str())); + const shared_ptr net(Interpreter::createFromFile(model_file.c_str())); ScheduleConfig conf1; conf1.type = MNN_FORWARD_CPU; @@ -643,7 +643,7 @@ static void TestScheduleSqueezeNetMultiPath() { Session* session = net->createMultiPathSession(configs); Tensor* inputTensor = net->getSessionInput(session, NULL); - const unique_ptr givenTensor(createTensor(inputTensor, input_file.c_str())); + const shared_ptr givenTensor(createTensor(inputTensor, input_file.c_str())); if (!givenTensor) { MNN_ERROR("[FAIL] TestSqueezeNetFailed to open input file %s.\n", input_file.c_str()); return; @@ -651,7 +651,7 @@ static void TestScheduleSqueezeNetMultiPath() { net->getBackend(session, inputTensor)->onCopyBuffer(givenTensor.get(), inputTensor); Tensor* outputTensor = net->getSessionOutput(session, NULL); - const unique_ptr expectTensor(createTensor(outputTensor, output_file.c_str())); + const shared_ptr expectTensor(createTensor(outputTensor, output_file.c_str())); if (!expectTensor.get()) { MNN_ERROR("[FAIL] TestSqueezeNetFailed to open output file %s.\n", input_file.c_str()); return; diff --git a/test/core/SizeComputerTest.cpp b/test/core/SizeComputerTest.cpp index 395249350..d74ca4860 100644 --- a/test/core/SizeComputerTest.cpp +++ b/test/core/SizeComputerTest.cpp @@ -7,7 +7,7 @@ // #include "MNNTestSuite.h" -#include "SizeComputer.hpp" +#include "core/SizeComputer.hpp" using namespace MNN; diff --git a/test/core/TensorTest.cpp b/test/core/TensorTest.cpp index e50b5684e..70fb81b0b 100644 --- a/test/core/TensorTest.cpp +++ b/test/core/TensorTest.cpp @@ -7,7 +7,7 @@ // #include "MNNTestSuite.h" -#include "Tensor.hpp" +#include using namespace MNN; diff --git a/test/core/TensorUtilsTest.cpp b/test/core/TensorUtilsTest.cpp index 0a3b7bee9..2cb893ec8 100644 --- a/test/core/TensorUtilsTest.cpp +++ b/test/core/TensorUtilsTest.cpp @@ -7,7 +7,7 @@ // #include "MNNTestSuite.h" -#include "TensorUtils.hpp" +#include "core/TensorUtils.hpp" using namespace MNN; diff --git a/test/core/ThreadPoolTest.cpp b/test/core/ThreadPoolTest.cpp index 8b06fda9b..a1b224d9f 100644 --- a/test/core/ThreadPoolTest.cpp +++ b/test/core/ThreadPoolTest.cpp @@ -7,9 +7,9 @@ // #ifdef MNN_USE_THREAD_POOL -#include "MNNDefine.h" +#include #include "MNNTestSuite.h" -#include "ThreadPool.hpp" +#include "backend/cpu/ThreadPool.hpp" using namespace MNN; diff --git a/test/cv/ImageProcessTest.cpp b/test/cv/ImageProcessTest.cpp index b411e22f2..83b6797b9 100644 --- a/test/cv/ImageProcessTest.cpp +++ b/test/cv/ImageProcessTest.cpp @@ -8,7 +8,7 @@ #include #include -#include "ImageProcess.hpp" +#include #include "MNNTestSuite.h" using namespace MNN; @@ -242,7 +242,7 @@ class ImageProcessRGBToBGRTest : public MNNTestCase { ImageProcess::Config config; config.sourceFormat = RGB; config.destFormat = BGR; - + std::shared_ptr process(ImageProcess::create(config)); process->convert(integers.data(), w, h, 0, tensor.get()); for (int i = 0; i < size; ++i) { @@ -512,9 +512,9 @@ class ImageProcessRGBAToBGRTest : public MNNTestCase { }; MNNTestSuiteRegister(ImageProcessRGBAToBGRTest, "cv/image_process/rgba_to_bgr"); -class ImageProcessNV12ToRGBTest : public MNNTestCase { +class ImageProcessNV21ToRGBTest : public MNNTestCase { public: - virtual ~ImageProcessNV12ToRGBTest() = default; + virtual ~ImageProcessNV21ToRGBTest() = default; virtual bool run() { ImageProcess::Config config; config.sourceFormat = YUV_NV21; @@ -586,8 +586,85 @@ class ImageProcessNV12ToRGBTest : public MNNTestCase { return true; } }; +MNNTestSuiteRegister(ImageProcessNV21ToRGBTest, "cv/image_process/nv21_to_rgb"); + +class ImageProcessNV12ToRGBTest : public MNNTestCase { +public: + virtual ~ImageProcessNV12ToRGBTest() = default; + virtual bool run() { + ImageProcess::Config config; + config.sourceFormat = YUV_NV12; + config.destFormat = RGB; + config.filterType = NEAREST; + config.wrap = CLAMP_TO_EDGE; + std::shared_ptr process(ImageProcess::create(config)); + + int sw = 1920; + int sh = 1080; + Matrix tr; + process->setMatrix(tr); + std::shared_ptr nv12(new unsigned char[sw * sh + (sw / 2) * (sh / 2) * 2]); + auto pixels = nv12.get(); + for (int y = 0; y < sh; ++y) { + auto pixelY = pixels + sw * y; + auto pixelUV = pixels + sw * sh + (y/2) * sw; + int magicY = ((sh - y) * (sh - y)) % 79; + for (int x = 0; x < sw; ++x) { + auto pixelX = pixelY + x; + int magicX = (x * x) % 113; + int magic = (magicX + magicY) % 255; + pixelX[0] = magic; + } + for (int x = 0; x < sw / 2; ++x) { + auto pixelX = pixelUV + 2 * x; + int magicX = (x * x * x * x) % 283; + int magic0 = (magicX + magicY) % 255; + int magic1 = (magicX + magicY * 179) % 255; + pixelX[0] = magic0; + pixelX[1] = magic1; + } + } + + std::shared_ptr tensor( + Tensor::create(std::vector{1, sh, sw, 3}, nullptr, Tensor::TENSORFLOW)); + process->convert(nv12.get(), sw, sh, 0, tensor.get()); + for (int y = 0; y < sh; ++y) { + auto dstY = tensor->host() + 3 * y * sw; + auto srcY_Y = nv12.get() + y * sw; + auto srcY_UV = nv12.get() + (y / 2) * (sw / 2) * 2 + sw * sh; + for (int x = 0; x < sw; ++x) { + auto dstX = dstY + 3 * x; + auto srcX_Y = srcY_Y + x; + auto srcX_UV = srcY_UV + (x / 2) * 2; + int Y = srcX_Y[0]; + int U = (int)srcX_UV[0] - 128; + int V = (int)srcX_UV[1] - 128; + + Y = Y << 6; + int r = (Y + 73 * V) >> 6; + int g = (Y - 25 * U - 37 * V) >> 6; + int b = (Y + 130 * U) >> 6; + + r = r < 0 ? 0 : r; + r = r > 255 ? 255 : r; + g = g < 0 ? 0 : g; + g = g > 255 ? 255 : g; + b = b < 0 ? 0 : b; + b = b > 255 ? 255 : b; + auto diff = [](int a, int b) { return abs(a - b) > 5; }; + if (diff(dstX[0], r) || diff(dstX[1], g) || diff(dstX[2], b)) { + MNN_ERROR("%d, Error for NV12 to RGB: %d: %d, %d, %d -> %d, %d, %d, wrong: %d, %d, %d\n", y, x, (int)srcX_Y[0], + U, V, r, g, b, dstX[0], dstX[1], dstX[2]); + return false; + } + } + } + return true; + } +}; MNNTestSuiteRegister(ImageProcessNV12ToRGBTest, "cv/image_process/nv12_to_rgb"); + class ImageProcessNV12ToRGBATest : public MNNTestCase { public: virtual ~ImageProcessNV12ToRGBATest() { @@ -663,7 +740,7 @@ class ImageProcessNV12ToRGBATest : public MNNTestCase { return true; } }; -MNNTestSuiteRegister(ImageProcessNV12ToRGBATest, "cv/image_process/nv12_to_rgba"); +MNNTestSuiteRegister(ImageProcessNV12ToRGBATest, "cv/image_process/nv21_to_rgba"); // Test for _blitC3ToFloatC3 class ImageProcessBGRToBGRFloatBlitterTest : public MNNTestCase { @@ -684,7 +761,7 @@ class ImageProcessBGRToBGRFloatBlitterTest : public MNNTestCase { ImageProcess::Config config; config.sourceFormat = BGR; config.destFormat = BGR; - + const float means[3] = {127.5f, 127.5f, 127.5f}; const float normals[3] = {2.0f / 255.0f, 2.0f / 255.0f, 2.0f / 255.0f}; memcpy(config.mean, means, sizeof(means)); @@ -724,7 +801,7 @@ class ImageProcessGrayToGrayFloatBlitterTest : public MNNTestCase { ImageProcess::Config config; config.sourceFormat = GRAY; config.destFormat = GRAY; - + const float means[1] = {127.5f}; const float normals[1] = {2.0f / 255.0f}; memcpy(config.mean, means, sizeof(means)); diff --git a/test/cv/MatrixTest.cpp b/test/cv/MatrixTest.cpp index bcf12156c..ce1b13873 100644 --- a/test/cv/MatrixTest.cpp +++ b/test/cv/MatrixTest.cpp @@ -7,7 +7,7 @@ // #include "MNNTestSuite.h" -#include "Matrix.h" +#include using namespace MNN::CV; diff --git a/test/expr/AllAnyTest.cpp b/test/expr/AllAnyTest.cpp index 3800ee6d6..e4f742203 100644 --- a/test/expr/AllAnyTest.cpp +++ b/test/expr/AllAnyTest.cpp @@ -6,7 +6,7 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "ExprCreator.hpp" +#include #include "MNNTestSuite.h" using namespace MNN::Express; @@ -20,8 +20,8 @@ class AllAnyTest : public MNNTestCase { std::vector seq2 = {0, 0, 0, 0}; auto yPtr = y->writeMap(); ::memcpy(yPtr, seq0.data(), seq0.size() * sizeof(int32_t)); - auto zAny = _Any(y, {0}); - auto zAll = _All(y, {0}); + auto zAny = _ReduceAny(y, {0}); + auto zAll = _ReduceAll(y, {0}); if (zAny->readMap()[0] != 1) { FUNC_PRINT(1); return false; diff --git a/test/expr/ExprResizeTest.cpp b/test/expr/ExprResizeTest.cpp index 079922bab..e292fda83 100644 --- a/test/expr/ExprResizeTest.cpp +++ b/test/expr/ExprResizeTest.cpp @@ -5,8 +5,7 @@ // Created by MNN on 2019/09/10. // Copyright © 2018, Alibaba Group Holding Limited // - -#include "ExprCreator.hpp" +#include #include "MNNTestSuite.h" using namespace MNN::Express; @@ -19,11 +18,11 @@ class ExprResizeTest : public MNNTestCase { Variable::replace(x, newX); std::vector x0 = {0, 1, 2, 3, 4, 5, 6}; ::memcpy(x->writeMap(), x0.data(), x->getInfo()->size*sizeof(int32_t)); - auto y = _Sum(_Mul(x, x), {}); + auto y = _ReduceSum(_Multiply(x, x), {}); if (14 != y->readMap()[0]) { return false; } - + x->resize({5}); ::memcpy(x->writeMap(), x0.data(), x->getInfo()->size*sizeof(int32_t)); if (30 != y->readMap()[0]) { diff --git a/test/expr/ExtraTest.cpp b/test/expr/ExtraTest.cpp index d95cd82f2..229432f5b 100644 --- a/test/expr/ExtraTest.cpp +++ b/test/expr/ExtraTest.cpp @@ -6,7 +6,7 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "ExprCreator.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" diff --git a/test/expr/GatherTest.cpp b/test/expr/GatherTest.cpp index 8e6443051..d58893e03 100644 --- a/test/expr/GatherTest.cpp +++ b/test/expr/GatherTest.cpp @@ -9,7 +9,7 @@ /* Test Case From https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/gather-nd */ -#include "ExprCreator.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" using namespace MNN::Express; diff --git a/test/expr/MatMulTest.cpp b/test/expr/MatMulTest.cpp index 0c05b06b7..ffd9e40e9 100644 --- a/test/expr/MatMulTest.cpp +++ b/test/expr/MatMulTest.cpp @@ -8,7 +8,7 @@ #include #include -#include "ExprCreator.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" using namespace MNN::Express; @@ -34,7 +34,8 @@ static bool checkMatMul(const float* C, const float* A, const float* B, int e, i expected += AY[k] * BX[k*e]; } auto diff = fabsf(expected-computed); - if (diff > 0.000001f) { + if (diff > 0.001f) { + MNN_PRINT("%f -> %f\n", expected, computed); return false; } } @@ -55,7 +56,7 @@ class MatMulTest : public MNNTestCase { auto matmulParam = op->main.AsMatMul(); matmulParam->transposeA = false; matmulParam->transposeB = false; - + auto x0 = _Input({}, NHWC, halide_type_of()); auto x1 = _Input({}, NHWC, halide_type_of()); auto y = Variable::create(Expr::create(op.get(), {x0, x1})); @@ -63,7 +64,7 @@ class MatMulTest : public MNNTestCase { x1->resize({l, e}); fillFloat(x0->writeMap(), h, l); fillFloat(x1->writeMap(), l, e); - + auto res = checkMatMul(y->readMap(), x0->readMap(), x1->readMap(), e, l, h); if (!res) { FUNC_PRINT(1); @@ -104,7 +105,7 @@ class MatMulTest : public MNNTestCase { auto param = op->main.AsBatchMatMulParam(); param->adjX = false; param->adjY = false; - + int batch = 5; auto x0 = _Input({}, NHWC, halide_type_of()); auto x1 = _Input({}, NHWC, halide_type_of()); @@ -126,7 +127,7 @@ class MatMulTest : public MNNTestCase { } } } - + return true; } }; diff --git a/test/expr/MatrixBandTest.cpp b/test/expr/MatrixBandTest.cpp index 3c44cd5d5..5cb68f311 100644 --- a/test/expr/MatrixBandTest.cpp +++ b/test/expr/MatrixBandTest.cpp @@ -9,7 +9,7 @@ /* Test Case From https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/matrix-band-part */ -#include "ExprCreator.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" using namespace MNN::Express; diff --git a/test/expr/MultiThreadLoad.cpp b/test/expr/MultiThreadLoad.cpp index 95da41cff..a22d47811 100644 --- a/test/expr/MultiThreadLoad.cpp +++ b/test/expr/MultiThreadLoad.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "ExprCreator.hpp" +#include #include "MNNTestSuite.h" -#include "Interpreter.hpp" +#include #include "MNN_generated.h" #include using namespace MNN::Express; @@ -27,7 +27,7 @@ class MultiThreadLoadTest : public MNNTestCase { builderOutput.Finish(len); int sizeOutput = builderOutput.GetSize(); auto bufferOutput = builderOutput.GetBufferPointer(); - + std::vector threads; for (int i=0; i<100; ++i) { threads.emplace_back([&](){ diff --git a/test/expr/PaddingTest.cpp b/test/expr/PaddingTest.cpp index ed13a7d10..7c589de1b 100644 --- a/test/expr/PaddingTest.cpp +++ b/test/expr/PaddingTest.cpp @@ -6,50 +6,92 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "ExprCreator.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" +#include "TestUtils.h" + using namespace MNN::Express; using namespace MNN; static void fillVar(VARP x) { auto size = x->getInfo()->size; - auto ptr = x->writeMap(); - for (int i=0; iwriteMap(); + for (int i = 0; i < size; ++i) { ptr[i] = i + 1; } } static void printVar(VARP x) { auto size = x->getInfo()->size; - auto ptr = x->readMap(); - for (int i=0; ireadMap(); + for (int i = 0; i < size; ++i) { MNN_PRINT("%d, ", ptr[i]); } MNN_PRINT("\n"); } +template +bool CreateCaseSymmetric() { + const T tensorData[] = {1, 2, 3, 4, 5, 6}; + const int padData[] = {1, 1, 2, 2}; + + const T expectedData[] = {2, 1, 1, 2, 3, 3, 2, 2, 1, 1, 2, 3, 3, 2, 5, 4, 4, 5, 6, 6, 5, 5, 4, 4, 5, 6, 6, 5}; + + auto tensor = _Const(tensorData, {2, 3}, NHWC, halide_type_of()); + auto pad = _Const(padData, {4}, NHWC, halide_type_of()); + auto result = _Pad(tensor, pad, SYMMETRIC); + + const auto resultData = result->template readMap(); + const int size = result->getInfo()->size; + if (!checkVector(resultData, expectedData, size, 0)) { + return false; + } + + return true; +} + +template +bool CreateCaseReflect() { + const T tensorData[] = {1, 2, 3, 4, 5, 6}; + const int padData[] = {1, 1, 2, 2}; + + const T expectedData[] = {6, 5, 4, 5, 6, 5, 4, 3, 2, 1, 2, 3, 2, 1, 6, 5, 4, 5, 6, 5, 4, 3, 2, 1, 2, 3, 2, 1}; + + auto tensor = _Const(tensorData, {2, 3}, NHWC, halide_type_of()); + auto pad = _Const(padData, {4}, NHWC, halide_type_of()); + auto result = _Pad(tensor, pad, REFLECT); + + const auto resultData = result->template readMap(); + const int size = result->getInfo()->size; + if (!checkVector(resultData, expectedData, size, 0)) { + return false; + } + + return true; +} + class PaddingTest : public MNNTestCase { public: virtual bool run() { std::unique_ptr padding(new OpT); padding->type = OpType_Padding; { - auto x = _Input({4, 6}, NCHW, halide_type_of()); - auto pad = _Input({4}, NCHW, halide_type_of()); + auto x = _Input({4, 6}, NCHW, halide_type_of()); + auto pad = _Input({4}, NCHW, halide_type_of()); auto paddingPtr = pad->writeMap(); - paddingPtr[0] = 0; - paddingPtr[1] = 1; - paddingPtr[2] = 1; - paddingPtr[3] = 1; + paddingPtr[0] = 0; + paddingPtr[1] = 1; + paddingPtr[2] = 1; + paddingPtr[3] = 1; fillVar(x); auto y = Variable::create(Expr::create(padding.get(), {x, pad})); { auto size = y->getInfo()->dim; - auto ptr = y->readMap(); - for (int i=0; ireadMap(); + for (int i = 0; i < size[0]; ++i) { + for (int j = 0; j < size[1]; ++j) { + auto compute = ptr[i * 8 + j]; + auto expect = i * 6 + (j - 1) + 1; if (i >= 4 || j < 1 || j >= 7) { expect = 0; } @@ -62,39 +104,39 @@ class PaddingTest : public MNNTestCase { } } { - auto x = _Input({1, 3, 4, 6}, NCHW, halide_type_of()); - auto convert = _Convert(x, NC4HW4); - auto pad = _Input({8}, NCHW, halide_type_of()); + auto x = _Input({1, 3, 4, 6}, NCHW, halide_type_of()); + auto convert = _Convert(x, NC4HW4); + auto pad = _Input({8}, NCHW, halide_type_of()); auto paddingPtr = pad->writeMap(); - paddingPtr[0] = 0; - paddingPtr[1] = 1; - paddingPtr[2] = 0; - paddingPtr[3] = 0; - paddingPtr[4] = 1; - paddingPtr[5] = 1; - paddingPtr[6] = 1; - paddingPtr[7] = 1; + paddingPtr[0] = 0; + paddingPtr[1] = 1; + paddingPtr[2] = 0; + paddingPtr[3] = 0; + paddingPtr[4] = 1; + paddingPtr[5] = 1; + paddingPtr[6] = 1; + paddingPtr[7] = 1; fillVar(x); - auto y = Variable::create(Expr::create(padding.get(), {x, pad})); + auto y = Variable::create(Expr::create(padding.get(), {x, pad})); auto yC4 = _Convert(Variable::create(Expr::create(padding.get(), {convert, pad})), NCHW); { - auto info = y->getInfo(); + auto info = y->getInfo(); auto info2 = yC4->getInfo(); - if(info->size != info2->size) { + if (info->size != info2->size) { FUNC_PRINT(1); return false; } auto ptr0 = y->readMap(); auto ptr1 = yC4->readMap(); - for (int i=0; isize; ++i) { + for (int i = 0; i < info->size; ++i) { if (ptr0[i] != ptr1[i]) { FUNC_PRINT(1); return false; } } } - - paddingPtr = pad->writeMap(); + + paddingPtr = pad->writeMap(); paddingPtr[0] = 0; paddingPtr[1] = 1; paddingPtr[2] = 0; @@ -104,15 +146,15 @@ class PaddingTest : public MNNTestCase { paddingPtr[6] = 1; paddingPtr[7] = 1; { - auto info = y->getInfo(); + auto info = y->getInfo(); auto info2 = yC4->getInfo(); - if(info->size != info2->size) { + if (info->size != info2->size) { FUNC_PRINT(1); return false; } auto ptr0 = y->readMap(); auto ptr1 = yC4->readMap(); - for (int i=0; isize; ++i) { + for (int i = 0; i < info->size; ++i) { if (ptr0[i] != ptr1[i]) { FUNC_PRINT(1); return false; @@ -120,6 +162,24 @@ class PaddingTest : public MNNTestCase { } } } + + { + if (!CreateCaseSymmetric()) { + return false; + } + if (!CreateCaseSymmetric()) { + return false; + } + } + { + if (!CreateCaseReflect()) { + return false; + } + if (!CreateCaseReflect()) { + return false; + } + } + return true; } }; diff --git a/test/expr/ReplaceTest.cpp b/test/expr/ReplaceTest.cpp index f92ba8436..76b7d8b04 100644 --- a/test/expr/ReplaceTest.cpp +++ b/test/expr/ReplaceTest.cpp @@ -6,7 +6,7 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "ExprCreator.hpp" +#include #include "MNNTestSuite.h" using namespace MNN::Express; @@ -20,14 +20,14 @@ class ReplaceTest : public MNNTestCase { auto c4 = MNN::Express::_Const(4.f, {1, 1, 1, 1}, MNN::Express::NHWC); auto c5 = MNN::Express::_Const(5.f, {1, 1, 1, 1}, MNN::Express::NHWC); auto b1 = MNN::Express::_Add(c1, c2); - auto b2 = MNN::Express::_Mul(c3, c4); - + auto b2 = MNN::Express::_Multiply(c3, c4); + auto r1 = b1->readMap(); if (3.0f != r1[0]) { MNN_PRINT("1 + 2 = %f\n", r1[0]); return false; } - + MNN::Express::Variable::replace(c2, b2); auto r2 = b1->readMap(); if (13.0f != r2[0]) { diff --git a/test/expr/ReverseSequenceTest.cpp b/test/expr/ReverseSequenceTest.cpp index 4a6b68fbd..f13cfbf1e 100644 --- a/test/expr/ReverseSequenceTest.cpp +++ b/test/expr/ReverseSequenceTest.cpp @@ -6,7 +6,7 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "ExprCreator.hpp" +#include #include "MNNTestSuite.h" using namespace MNN::Express; diff --git a/test/model/MobileNetTest.cpp b/test/model/MobileNetTest.cpp index e0519c017..3c90f2bf4 100644 --- a/test/model/MobileNetTest.cpp +++ b/test/model/MobileNetTest.cpp @@ -11,10 +11,10 @@ #endif #include -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/model/SqueezeNetTest.cpp b/test/model/SqueezeNetTest.cpp index 7ef2bb82c..258f6650a 100644 --- a/test/model/SqueezeNetTest.cpp +++ b/test/model/SqueezeNetTest.cpp @@ -11,10 +11,10 @@ #endif #include -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/ArgMaxTest.cpp b/test/op/ArgMaxTest.cpp new file mode 100644 index 000000000..a42c660c0 --- /dev/null +++ b/test/op/ArgMaxTest.cpp @@ -0,0 +1,47 @@ +// +// UnaryTest.cpp +// MNNTests +// +// Created by MNN on 2019/01/15. +// Copyright © 2018, Alibaba Group Holding Limited +// + + +#include +#include +#include "MNNTestSuite.h" +#include "TestUtils.h" +using namespace MNN::Express; +class ArgMaxTest : public MNNTestCase { +public: + virtual ~ArgMaxTest() = default; + virtual bool run() { + auto input = _Input({4,4}, NHWC); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.0, 2.0, -3.0, 4.0, + 5.0, -6.0, 7.0, -8.0, + -9.0, -10.0, 11.0, 12.0, + 13.0, 14.0, -15.0, -16.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 16 * sizeof(float)); + input->unMap(); + auto output_0 = _ArgMax(input, 0); + auto output_1 = _ArgMax(input, 1); + const std::vector expectedOutput_0 = {3, 3, 2, 2}; + const std::vector expectedOutput_1 = {3, 2, 3, 1}; + auto gotOutput_0 = output_0->readMap(); + auto gotOutput_1 = output_1->readMap(); + if (!checkVector(gotOutput_0, expectedOutput_0.data(), 4, 0)) { + MNN_ERROR("ArgMaxTest test axis_0 failed!\n"); + return false; + } + if (!checkVector(gotOutput_1, expectedOutput_1.data(), 4, 0)) { + MNN_ERROR("ArgMaxTest test axis_1 failed!\n"); + return false; + } + return true; + } +}; +MNNTestSuiteRegister(ArgMaxTest, "op/argmax"); + diff --git a/test/op/BatchToSpaceNDTest.cpp b/test/op/BatchToSpaceNDTest.cpp index de05a885e..3b5077380 100644 --- a/test/op/BatchToSpaceNDTest.cpp +++ b/test/op/BatchToSpaceNDTest.cpp @@ -5,147 +5,41 @@ // Created by MNN on 2019/01/15. // Copyright © 2018, Alibaba Group Holding Limited // - -#include "Interpreter.hpp" +#include +#include #include "MNNTestSuite.h" -#include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" #include "TestUtils.h" -using namespace MNN; - -static Interpreter *create(std::vector s, std::vector pad, std::vector dims) { - flatbuffers::FlatBufferBuilder fbb; - std::vector> vec; - - { - auto idims = fbb.CreateVector(std::vector(dims)); - InputBuilder ib(fbb); - ib.add_dims(idims); - auto input = ib.Finish(); - auto name = fbb.CreateString("input"); - auto iv = fbb.CreateVector(std::vector({0})); - auto ov = fbb.CreateVector(std::vector({0})); - OpBuilder builder(fbb); - builder.add_type(OpType_Input); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Input); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - } - { - auto svals = fbb.CreateVector(s); - auto sdims = fbb.CreateVector(std::vector({2})); - BlobBuilder sbb(fbb); - sbb.add_dims(sdims); - sbb.add_dataType(DataType_DT_INT32); - sbb.add_dataFormat(MNN_DATA_FORMAT_NCHW); - sbb.add_int32s(svals); - auto shape = sbb.Finish(); - - auto pvals = fbb.CreateVector(pad); - auto pdims = fbb.CreateVector(std::vector({4})); - BlobBuilder pbb(fbb); - pbb.add_dims(pdims); - pbb.add_dataType(DataType_DT_INT32); - pbb.add_dataFormat(MNN_DATA_FORMAT_NCHW); - pbb.add_int32s(pvals); - auto padding = pbb.Finish(); - - SpaceBatchBuilder builder(fbb); - builder.add_padding(flatbuffers::Offset(padding.o)); - builder.add_blockShape(flatbuffers::Offset(shape.o)); - auto sb = builder.Finish(); - - auto name = fbb.CreateString("space_to_batch"); - auto iv = fbb.CreateVector(std::vector({0})); - auto ov = fbb.CreateVector(std::vector({1})); - OpBuilder op(fbb); - op.add_type(OpType_BatchToSpaceND); - op.add_name(name); - op.add_inputIndexes(iv); - op.add_outputIndexes(ov); - op.add_main_type(OpParameter_SpaceBatch); - op.add_main(flatbuffers::Offset(sb.o)); - vec.push_back(op.Finish()); - } - - auto ops = fbb.CreateVector(vec); - auto names = fbb.CreateVectorOfStrings({"input", "output"}); - NetBuilder net(fbb); - net.add_oplists(ops); - net.add_tensorName(names); - fbb.Finish(net.Finish()); - return Interpreter::createFromBuffer((const char *)fbb.GetBufferPointer(), fbb.GetSize()); -} - -static Tensor *infer(const Interpreter *net, Session *session) { - net->runSession(session); - return net->getSessionOutputAll(session).begin()->second; -} - +using namespace MNN::Express; class BatchToSpaceNDTest : public MNNTestCase { public: virtual ~BatchToSpaceNDTest() = default; virtual bool run() { - for (int ob = 1; ob <= 2; ob++) { - for (int c = 1; c <= 4; c *= 2) { - for (int h = 1; h <= 4; h *= 2) { - for (int w = 1; w <= 4; w *= 2) { - for (int pw = 0; pw <= 1; pw++) { - for (int ph = 0; ph <= 1; ph++) { - for (int sw = 1; sw <= 2; sw *= 2) { - for (int sh = 1; sh <= 2; sh *= 2) { - if (h * sh <= 2 * ph || w * sw <= 2 * pw) - continue; - - int b = ob * sw * sh; - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - // nets - auto net = create({sh, sw}, {ph, ph, pw, pw}, {b, c, h, w}); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input/output - auto input = new Tensor(4); - input->buffer().dim[0].extent = b; - input->buffer().dim[1].extent = c; - input->buffer().dim[2].extent = h; - input->buffer().dim[3].extent = w; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - for (int i = 0; i < w * h * c * b; i++) { - input->host()[i] = rand() % 255 / 255.f; - } - auto host = net->getSessionInput(CPU, NULL); - auto device = net->getSessionInput(GPU, NULL); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - free(input->buffer().host); - delete input; - delete net; - }); - } - } - } - } - } - } - } + auto input = _Input({4,1,1,3}, NHWC); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 12 * sizeof(float)); + input->unMap(); + const int blockshapedata[] = {2,2}; + const int cropsdata[] = {0,0,0,0}; + auto block_shape = _Const(blockshapedata,{2,},NCHW,halide_type_of()); + auto crops = _Const(cropsdata,{2,2},NCHW,halide_type_of()); + input = _Convert(input, NC4HW4); + auto tmp = _BatchToSpaceND(input, block_shape, crops); + auto output = _Convert(tmp, NHWC); + const std::vector expectedOutput = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 12, 0.01)) { + MNN_ERROR("BatchToSpaceNDTest test failed!\n"); + return false; + } + const std::vector expectedDims = {1,2,2,3}; + auto gotDims = output->getInfo()->dim; + if (!checkVector(gotDims.data(), expectedDims.data(), 4, 0)) { + MNN_ERROR("BatchToSpaceNDTest test failed!\n"); + return false; } return true; } diff --git a/test/op/BinaryOPTest.cpp b/test/op/BinaryOPTest.cpp index 8bda10e48..b6d75df4c 100644 --- a/test/op/BinaryOPTest.cpp +++ b/test/op/BinaryOPTest.cpp @@ -5,173 +5,457 @@ // Created by MNN on 2019/01/15. // Copyright © 2018, Alibaba Group Holding Limited // - -#include "Interpreter.hpp" +#include +#include #include "MNNTestSuite.h" -#include "MNN_generated.h" -#include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" #include "TestUtils.h" -using namespace MNN; - -static Interpreter *create(int opType, int b0, int c0, int h0, int w0, int b1, int c1, int h1, int w1) { - flatbuffers::FlatBufferBuilder fbb; - std::vector> vec; - - { - auto dims = fbb.CreateVector(std::vector({b0, c0, h0, w0})); - InputBuilder ib(fbb); - ib.add_dims(dims); - auto input = ib.Finish(); - auto name = fbb.CreateString("input0"); - auto iv = fbb.CreateVector(std::vector({0})); - auto ov = fbb.CreateVector(std::vector({0})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Input); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Input); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - } - { - auto dims = fbb.CreateVector(std::vector({b1, c1, h1, w1})); - InputBuilder ib(fbb); - ib.add_dims(dims); - auto input = ib.Finish(); - auto name = fbb.CreateString("input1"); - auto iv = fbb.CreateVector(std::vector({1})); - auto ov = fbb.CreateVector(std::vector({1})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Input); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Input); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - } - { - BinaryOpBuilder bob(fbb); - bob.add_opType(opType); - auto binary = bob.Finish(); - auto name = fbb.CreateString("binaryop"); - auto iv = fbb.CreateVector(std::vector({0, 1})); - auto ov = fbb.CreateVector(std::vector({2})); - - OpBuilder builder(fbb); - builder.add_type(OpType_BinaryOp); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_BinaryOp); - builder.add_main(flatbuffers::Offset(binary.o)); - vec.push_back(builder.Finish()); +using namespace MNN::Express; +class AddTest : public MNNTestCase { +public: + virtual ~AddTest() = default; + virtual bool run() { + auto input_x = _Input({4,}, NCHW); + auto input_y = _Input({4,}, NCHW); + input_x->setName("input_x"); + input_y->setName("input_y"); + // set input data + const float data_x[] = {-1.0, -2.0, -3.0, -4.0}; + const float data_y[] = {1.0, 2.0, 3.0, 4.0}; + auto ptr_x = input_x->writeMap(); + auto ptr_y = input_y->writeMap(); + memcpy(ptr_x, data_x, 4 * sizeof(float)); + memcpy(ptr_y, data_y, 4 * sizeof(float)); + input_x->unMap(); + input_y->unMap(); + auto output = _Add(input_x, input_y); + const std::vector expectedOutput = {0.0, 0.0, 0.0, 0.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("AddTest test failed!\n"); + return false; + } + return true; } - - auto ops = fbb.CreateVector(vec); - auto names = fbb.CreateVectorOfStrings({"input0", "input1", "output"}); - NetBuilder net(fbb); - net.add_oplists(ops); - net.add_tensorName(names); - fbb.Finish(net.Finish()); - return Interpreter::createFromBuffer((const char *)fbb.GetBufferPointer(), fbb.GetSize()); -} - -static Tensor *infer(const Interpreter *net, Session *session) { - net->runSession(session); - return net->getSessionOutputAll(session).begin()->second; -} - -class BinaryOPTest : public MNNTestCase { -public: - virtual ~BinaryOPTest() = default; - virtual bool run() { - for (int b = 1; b <= 2; b *= 2) { - for (int c = 1; c <= 8; c *= 2) { - for (int h = 1; h <= 8; h *= 2) { - for (int w = 1; w <= 8; w *= 2) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - int optype = 0; - int b0, c0, h0, w0, b1, c1, h1, w1; - int b_1[] = {b, 1}; - int c_1[] = {c, 1}; - int h_1[] = {h, 1}; - int w_1[] = {w, 1}; - b0 = b_1[rand() % 2]; - c0 = c_1[rand() % 2]; - h0 = h_1[rand() % 2]; - w0 = w_1[rand() % 2]; - b1 = b_1[rand() % 2]; - c1 = c_1[rand() % 2]; - h1 = h_1[rand() % 2]; - w1 = w_1[rand() % 2]; - - auto net = create(optype, b0, c0, h0, w0, b1, c1, h1, w1); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input - auto input0 = new Tensor(4); - { - input0->buffer().dim[0].extent = b0; - input0->buffer().dim[1].extent = c0; - input0->buffer().dim[2].extent = h0; - input0->buffer().dim[3].extent = w0; - TensorUtils::setLinearLayout(input0); - input0->buffer().host = (uint8_t *)malloc(input0->size()); - for (int i = 0; i < b0 * c0 * h0 * w0; i++) { - input0->host()[i] = rand() % 255 / 255.f; - } - auto host = net->getSessionInput(CPU, NULL); - auto device = net->getSessionInput(GPU, NULL); - net->getBackend(CPU, host)->onCopyBuffer(input0, host); - net->getBackend(GPU, device)->onCopyBuffer(input0, device); - } - - auto input1 = new Tensor(4); - { - input1->buffer().dim[0].extent = b1; - input1->buffer().dim[1].extent = c1; - input1->buffer().dim[2].extent = h1; - input1->buffer().dim[3].extent = w1; - TensorUtils::setLinearLayout(input1); - input1->buffer().host = (uint8_t *)malloc(input1->size()); - for (int i = 0; i < b1 * c1 * h1 * w1; i++) { - input1->host()[i] = rand() % 255 / 255.f; - } - auto host = net->getSessionInput(CPU, "input1"); - auto device = net->getSessionInput(GPU, "input1"); - net->getBackend(CPU, host)->onCopyBuffer(input1, host); - net->getBackend(GPU, device)->onCopyBuffer(input1, device); - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - free(input0->buffer().host); - free(input1->buffer().host); - delete input0; - delete input1; - delete net; - }); - } - } - } +}; +class SubtractTest : public MNNTestCase { +public: + virtual ~SubtractTest() = default; + virtual bool run() { + auto input_x = _Input({4,}, NCHW); + auto input_y = _Input({4,}, NCHW); + input_x->setName("input_x"); + input_y->setName("input_y"); + // set input data + const float data_x[] = {-1.0, -2.0, -3.0, -4.0}; + const float data_y[] = {1.0, 2.0, 3.0, 4.0}; + auto ptr_x = input_x->writeMap(); + auto ptr_y = input_y->writeMap(); + memcpy(ptr_x, data_x, 4 * sizeof(float)); + memcpy(ptr_y, data_y, 4 * sizeof(float)); + input_x->unMap(); + input_y->unMap(); + auto output = _Subtract(input_x, input_y); + const std::vector expectedOutput = {-2.0, -4.0, -6.0, -8.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("SubtractTest test failed!\n"); + return false; + } + return true; + } +}; +class MultiplyTest : public MNNTestCase { +public: + virtual ~MultiplyTest() = default; + virtual bool run() { + auto input_x = _Input({4,}, NCHW); + auto input_y = _Input({4,}, NCHW); + input_x->setName("input_x"); + input_y->setName("input_y"); + // set input data + const float data_x[] = {-1.0, -2.0, -3.0, -4.0}; + const float data_y[] = {1.0, 2.0, 3.0, 4.0}; + auto ptr_x = input_x->writeMap(); + auto ptr_y = input_y->writeMap(); + memcpy(ptr_x, data_x, 4 * sizeof(float)); + memcpy(ptr_y, data_y, 4 * sizeof(float)); + input_x->unMap(); + input_y->unMap(); + auto output = _Multiply(input_x, input_y); + const std::vector expectedOutput = {-1.0, -4.0, -9.0, -16.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("MultiplyTest test failed!\n"); + return false; + } + return true; + } +}; +class DivideTest : public MNNTestCase { +public: + virtual ~DivideTest() = default; + virtual bool run() { + auto input_x = _Input({4,}, NCHW); + auto input_y = _Input({4,}, NCHW); + input_x->setName("input_x"); + input_y->setName("input_y"); + // set input data + const float data_x[] = {-1.0, -2.0, -3.0, -4.0}; + const float data_y[] = {2.0, 4.0, 6.0, 8.0}; + auto ptr_x = input_x->writeMap(); + auto ptr_y = input_y->writeMap(); + memcpy(ptr_x, data_x, 4 * sizeof(float)); + memcpy(ptr_y, data_y, 4 * sizeof(float)); + input_x->unMap(); + input_y->unMap(); + auto output = _Divide(input_x, input_y); + const std::vector expectedOutput = {-0.5, -0.5, -0.5, -0.5}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("DivideTest test failed!\n"); + return false; + } + return true; + } +}; +class PowTest : public MNNTestCase { +public: + virtual ~PowTest() = default; + virtual bool run() { + auto input_x = _Input({4,}, NCHW); + auto input_y = _Input({4,}, NCHW); + input_x->setName("input_x"); + input_y->setName("input_y"); + // set input data + const float data_x[] = {-1.0, -2.0, -3.0, -4.0}; + const float data_y[] = {2.0, 4.0, 6.0, 8.0}; + auto ptr_x = input_x->writeMap(); + auto ptr_y = input_y->writeMap(); + memcpy(ptr_x, data_x, 4 * sizeof(float)); + memcpy(ptr_y, data_y, 4 * sizeof(float)); + input_x->unMap(); + input_y->unMap(); + auto output = _Pow(input_x, input_y); + const std::vector expectedOutput = {1.0, 16.0, 729.0, 65536.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("PowTest test failed!\n"); + return false; + } + return true; + } +}; +class MinimumTest : public MNNTestCase { +public: + virtual ~MinimumTest() = default; + virtual bool run() { + auto input_x = _Input({4,}, NCHW); + auto input_y = _Input({4,}, NCHW); + input_x->setName("input_x"); + input_y->setName("input_y"); + // set input data + const float data_x[] = {-1.0, -2.0, -3.0, -4.0}; + const float data_y[] = {2.0, 4.0, 6.0, 8.0}; + auto ptr_x = input_x->writeMap(); + auto ptr_y = input_y->writeMap(); + memcpy(ptr_x, data_x, 4 * sizeof(float)); + memcpy(ptr_y, data_y, 4 * sizeof(float)); + input_x->unMap(); + input_y->unMap(); + auto output = _Minimum(input_x, input_y); + const std::vector expectedOutput = {-1.0, -2.0, -3.0, -4.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("MinimumTest test failed!\n"); + return false; + } + return true; + } +}; +class MaximumTest : public MNNTestCase { +public: + virtual ~MaximumTest() = default; + virtual bool run() { + auto input_x = _Input({4,}, NCHW); + auto input_y = _Input({4,}, NCHW); + input_x->setName("input_x"); + input_y->setName("input_y"); + // set input data + const float data_x[] = {-1.0, -2.0, -3.0, -4.0}; + const float data_y[] = {2.0, 4.0, 6.0, 8.0}; + auto ptr_x = input_x->writeMap(); + auto ptr_y = input_y->writeMap(); + memcpy(ptr_x, data_x, 4 * sizeof(float)); + memcpy(ptr_y, data_y, 4 * sizeof(float)); + input_x->unMap(); + input_y->unMap(); + auto output = _Maximum(input_x, input_y); + const std::vector expectedOutput = {2.0, 4.0, 6.0, 8.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("MaximumTest test failed!\n"); + return false; + } + return true; + } +}; +class BiasAddTest : public MNNTestCase { +public: + virtual ~BiasAddTest() = default; + virtual bool run() { + auto input_x = _Input({4,2}, NCHW); + auto input_y = _Input({2,}, NCHW); + input_x->setName("input_x"); + input_y->setName("input_y"); + // set input data + const float data_x[] = {-1.0, -2.0, -3.0, -4.0, -5.0, -6.0, -7.0, -8.0}; + const float data_y[] = {1.0, 2.0}; + auto ptr_x = input_x->writeMap(); + auto ptr_y = input_y->writeMap(); + memcpy(ptr_x, data_x, 8 * sizeof(float)); + memcpy(ptr_y, data_y, 2 * sizeof(float)); + input_x->unMap(); + input_y->unMap(); + auto output = _BiasAdd(input_x, input_y); + const std::vector expectedOutput = {0.0, 0.0, -2.0, -2.0, -4.0, -4.0, -6.0, -6.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 8, 0.01)) { + MNN_ERROR("BiasAddTest test failed!\n"); + return false; + } + return true; + } +}; +class GreaterTest : public MNNTestCase { +public: + virtual ~GreaterTest() = default; + virtual bool run() { + auto input_x = _Input({4,2}, NCHW); + auto input_y = _Input({2,}, NCHW); + input_x->setName("input_x"); + input_y->setName("input_y"); + // set input data + const float data_x[] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}; + const float data_y[] = {3.0, 4.0}; + auto ptr_x = input_x->writeMap(); + auto ptr_y = input_y->writeMap(); + memcpy(ptr_x, data_x, 8 * sizeof(float)); + memcpy(ptr_y, data_y, 2 * sizeof(float)); + input_x->unMap(); + input_y->unMap(); + auto output = _Greater(input_x, input_y); + const std::vector expectedOutput = {0, 0, 0, 0, 1, 1, 1, 1}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 8, 0)) { + MNN_ERROR("GreaterTest test failed!\n"); + return false; + } + return true; + } +}; +class GreaterEqualTest : public MNNTestCase { +public: + virtual ~GreaterEqualTest() = default; + virtual bool run() { + auto input_x = _Input({4,2}, NCHW); + auto input_y = _Input({2,}, NCHW); + input_x->setName("input_x"); + input_y->setName("input_y"); + // set input data + const float data_x[] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}; + const float data_y[] = {3.0, 4.0}; + auto ptr_x = input_x->writeMap(); + auto ptr_y = input_y->writeMap(); + memcpy(ptr_x, data_x, 8 * sizeof(float)); + memcpy(ptr_y, data_y, 2 * sizeof(float)); + input_x->unMap(); + input_y->unMap(); + auto output = _GreaterEqual(input_x, input_y); + const std::vector expectedOutput = {0, 0, 1, 1, 1, 1, 1, 1}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 8, 0)) { + MNN_ERROR("GreaterEqualTest test failed!\n"); + return false; + } + return true; + } +}; +class LessTest : public MNNTestCase { +public: + virtual ~LessTest() = default; + virtual bool run() { + auto input_x = _Input({4,2}, NCHW); + auto input_y = _Input({2,}, NCHW); + input_x->setName("input_x"); + input_y->setName("input_y"); + // set input data + const float data_x[] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}; + const float data_y[] = {3.0, 4.0}; + auto ptr_x = input_x->writeMap(); + auto ptr_y = input_y->writeMap(); + memcpy(ptr_x, data_x, 8 * sizeof(float)); + memcpy(ptr_y, data_y, 2 * sizeof(float)); + input_x->unMap(); + input_y->unMap(); + auto output = _Less(input_x, input_y); + const std::vector expectedOutput = {1, 1, 0, 0, 0, 0, 0, 0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 8, 0)) { + MNN_ERROR("LessTest test failed!\n"); + return false; + } + return true; + } +}; +class FloorDivTest : public MNNTestCase { +public: + virtual ~FloorDivTest() = default; + virtual bool run() { + auto input_x = _Input({4,2}, NCHW); + auto input_y = _Input({2,}, NCHW); + input_x->setName("input_x"); + input_y->setName("input_y"); + // set input data + const float data_x[] = {-1.0, -2.0, -3.0, -4.0, 5.0, 6.0, 7.0, 8.0}; + const float data_y[] = {3.0, 4.0}; + auto ptr_x = input_x->writeMap(); + auto ptr_y = input_y->writeMap(); + memcpy(ptr_x, data_x, 8 * sizeof(float)); + memcpy(ptr_y, data_y, 2 * sizeof(float)); + input_x->unMap(); + input_y->unMap(); + auto output = _FloorDiv(input_x, input_y); + const std::vector expectedOutput = {-1.0, -1.0, -1.0, -1.0, 1.0, 1.0, 2.0, 2.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 8, 0.01)) { + MNN_ERROR("FloorDivTest test failed!\n"); + return false; + } + return true; + } +}; +class SquaredDifferenceTest : public MNNTestCase { +public: + virtual ~SquaredDifferenceTest() = default; + virtual bool run() { + auto input_x = _Input({4,2}, NCHW); + auto input_y = _Input({2,}, NCHW); + input_x->setName("input_x"); + input_y->setName("input_y"); + // set input data + const float data_x[] = {-1.0, -2.0, -3.0, -4.0, 5.0, 6.0, 7.0, 8.0}; + const float data_y[] = {3.0, 4.0}; + auto ptr_x = input_x->writeMap(); + auto ptr_y = input_y->writeMap(); + memcpy(ptr_x, data_x, 8 * sizeof(float)); + memcpy(ptr_y, data_y, 2 * sizeof(float)); + input_x->unMap(); + input_y->unMap(); + auto output = _SquaredDifference(input_x, input_y); + const std::vector expectedOutput = {16.0, 36.0, 36.0, 64.0, 4.0, 4.0, 16.0, 16.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 8, 0.01)) { + MNN_ERROR("SquaredDifferenceTest test failed!\n"); + return false; + } + return true; + } +}; +class EqualTest : public MNNTestCase { +public: + virtual ~EqualTest() = default; + virtual bool run() { + auto input_x = _Input({4,2}, NCHW); + auto input_y = _Input({2,}, NCHW); + input_x->setName("input_x"); + input_y->setName("input_y"); + // set input data + const float data_x[] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}; + const float data_y[] = {3.0, 4.0}; + auto ptr_x = input_x->writeMap(); + auto ptr_y = input_y->writeMap(); + memcpy(ptr_x, data_x, 8 * sizeof(float)); + memcpy(ptr_y, data_y, 2 * sizeof(float)); + input_x->unMap(); + input_y->unMap(); + auto output = _Equal(input_x, input_y); + const std::vector expectedOutput = {0, 0, 1, 1, 0, 0, 0, 0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 8, 0)) { + MNN_ERROR("EqualTest test failed!\n"); + return false; + } + return true; + } +}; +class LessEqualTest : public MNNTestCase { +public: + virtual ~LessEqualTest() = default; + virtual bool run() { + auto input_x = _Input({4,2}, NCHW); + auto input_y = _Input({2,}, NCHW); + input_x->setName("input_x"); + input_y->setName("input_y"); + // set input data + const float data_x[] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}; + const float data_y[] = {3.0, 4.0}; + auto ptr_x = input_x->writeMap(); + auto ptr_y = input_y->writeMap(); + memcpy(ptr_x, data_x, 8 * sizeof(float)); + memcpy(ptr_y, data_y, 2 * sizeof(float)); + input_x->unMap(); + input_y->unMap(); + auto output = _LessEqual(input_x, input_y); + const std::vector expectedOutput = {1, 1, 1, 1, 0, 0, 0, 0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 8, 0)) { + MNN_ERROR("LessEqualTest test failed!\n"); + return false; + } + return true; + } +}; +class FloorModTest : public MNNTestCase { +public: + virtual ~FloorModTest() = default; + virtual bool run() { + auto input_x = _Input({4,2}, NCHW); + auto input_y = _Input({2,}, NCHW); + input_x->setName("input_x"); + input_y->setName("input_y"); + // set input data + const float data_x[] = {-1.0, -2.0, -3.0, -4.0, 5.0, 6.0, 7.0, 8.0}; + const float data_y[] = {3.0, 4.0}; + auto ptr_x = input_x->writeMap(); + auto ptr_y = input_y->writeMap(); + memcpy(ptr_x, data_x, 8 * sizeof(float)); + memcpy(ptr_y, data_y, 2 * sizeof(float)); + input_x->unMap(); + input_y->unMap(); + auto output = _FloorMod(input_x, input_y); + const std::vector expectedOutput = {2.0, 2.0, 0.0, 0.0, 2.0, 2.0, 1.0, 0.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 8, 0.01)) { + MNN_ERROR("FloorMod test failed!\n"); + return false; } return true; } }; -MNNTestSuiteRegister(BinaryOPTest, "op/binary"); +MNNTestSuiteRegister(AddTest, "op/binary/add"); +MNNTestSuiteRegister(SubtractTest, "op/binary/subtract"); +MNNTestSuiteRegister(MultiplyTest, "op/binary/multiply"); +MNNTestSuiteRegister(DivideTest, "op/binary/divide"); +MNNTestSuiteRegister(PowTest, "op/binary/pow"); +MNNTestSuiteRegister(MinimumTest, "op/binary/minimum"); +MNNTestSuiteRegister(MaximumTest, "op/binary/maximum"); +MNNTestSuiteRegister(BiasAddTest, "op/binary/biasadd"); +MNNTestSuiteRegister(GreaterTest, "op/binary/greater"); +MNNTestSuiteRegister(GreaterEqualTest, "op/binary/greaterequal"); +MNNTestSuiteRegister(LessTest, "op/binary/less"); +MNNTestSuiteRegister(FloorDivTest, "op/binary/floordiv"); +MNNTestSuiteRegister(SquaredDifferenceTest, "op/binary/squareddifference"); +MNNTestSuiteRegister(EqualTest, "op/binary/equal"); +MNNTestSuiteRegister(LessEqualTest, "op/binary/lessequal"); +MNNTestSuiteRegister(FloorModTest, "op/binary/floormod"); diff --git a/test/op/BroadcastToTest.cpp b/test/op/BroadcastToTest.cpp new file mode 100644 index 000000000..009fc2e8c --- /dev/null +++ b/test/op/BroadcastToTest.cpp @@ -0,0 +1,92 @@ +// +// BroadcastToTest.cpp +// MNNTests +// +// Created by MNN on 2019/12/3. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include +#include "MNNTestSuite.h" +#include "TestUtils.h" + +using namespace MNN::Express; + +class BroadcastToTest : public MNNTestCase { + virtual ~BroadcastToTest() = default; + + virtual bool run() { + { + const float tensorData[] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0}; + const int shapeData[] = {2, 3, 2, 2}; + const float expectedData[] = { + 1.0, 2.0, 1.0, 2.0, 3.0, 4.0, 3.0, 4.0, 5.0, 6.0, 5.0, 6.0, + 1.0, 2.0, 1.0, 2.0, 3.0, 4.0, 3.0, 4.0, 5.0, 6.0, 5.0, 6.0, + }; + + auto tensor = _Const(tensorData, {1, 3, 1, 2}, NHWC, halide_type_of()); + auto shape = _Const(shapeData, {4}, NHWC, halide_type_of()); + auto result = _BroadcastTo(tensor, shape); + + const int size = result->getInfo()->size; + auto resultData = result->readMap(); + if (!checkVector(resultData, expectedData, size, 0.0)) { + return false; + } + } + + { + const float tensorData[] = {1.0, 2.0, 3.0}; + const int shapeData[] = {3, 3}; + const float expectedData[] = {1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0}; + + auto tensor = _Const(tensorData, {1, 3}, NHWC, halide_type_of()); + auto shape = _Const(shapeData, {2}, NHWC, halide_type_of()); + auto result = _BroadcastTo(tensor, shape); + + const int size = result->getInfo()->size; + auto resultData = result->readMap(); + if (!checkVector(resultData, expectedData, size, 0.0)) { + return false; + } + } + + { + const float tensorData[] = {1.0, 2.0, 3.0}; + const int shapeData[] = {3, 3}; + const float expectedData[] = {1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0}; + + auto tensor = _Const(tensorData, {3, 1}, NHWC, halide_type_of()); + auto shape = _Const(shapeData, {2}, NHWC, halide_type_of()); + auto result = _BroadcastTo(tensor, shape); + + const int size = result->getInfo()->size; + auto resultData = result->readMap(); + if (!checkVector(resultData, expectedData, size, 0.0)) { + return false; + } + } + + { + const float tensorData[] = {1.0, 2.0}; + const int shapeData[] = {2, 3, 2, 2}; + const float expectedData[] = {1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, + 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0}; + + auto tensor = _Const(tensorData, {1, 1, 1, 2}, NHWC, halide_type_of()); + auto shape = _Const(shapeData, {4}, NHWC, halide_type_of()); + auto result = _BroadcastTo(tensor, shape); + + const int size = result->getInfo()->size; + auto resultData = result->readMap(); + if (!checkVector(resultData, expectedData, size, 0.0)) { + return false; + } + } + + return true; + } +}; + +MNNTestSuiteRegister(BroadcastToTest, "op/BroadcastToTest"); diff --git a/test/op/CastTest.cpp b/test/op/CastTest.cpp index 4710747e4..fbc8dd06a 100644 --- a/test/op/CastTest.cpp +++ b/test/op/CastTest.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/ConcatTest.cpp b/test/op/ConcatTest.cpp index 412a38af4..8fa60495c 100644 --- a/test/op/ConcatTest.cpp +++ b/test/op/ConcatTest.cpp @@ -6,13 +6,12 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" - using namespace MNN; static Interpreter *create(int axis, int n, int b, int c, int h, int w, bool tensorflow) { diff --git a/test/op/Conv2DBackPropFilterTest.cpp b/test/op/Conv2DBackPropFilterTest.cpp index 827da13be..3df21b546 100644 --- a/test/op/Conv2DBackPropFilterTest.cpp +++ b/test/op/Conv2DBackPropFilterTest.cpp @@ -6,80 +6,144 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Expr.hpp" -#include "ExprCreator.hpp" +#include +#include + #include "MNNTestSuite.h" #include "TestUtils.h" +#include +#include "MNN_generated.h" +#include +#include +#include using namespace MNN::Express; class Conv2DBackPropFilterTest : public MNNTestCase { public: virtual ~Conv2DBackPropFilterTest() = default; - virtual bool run() { - auto input = _Input({1, 3, 5, 5}, NCHW); - - input->setName("input_tensor"); - // set input data - const float inpudata[] = { - 0.9329, 0.8632, 0.4275, 0.6670, 0.1923, - 0.6141, 0.8261, 0.0899, 0.1442, 0.7056, - 0.5515, 0.0435, 0.5664, 0.3330, 0.8119, - 0.8131, 0.2928, 0.5145, 0.2485, 0.2596, - 0.3923, 0.8260, 0.7251, 0.7897, 0.9686, // the first channel - 0.5073, 0.2214, 0.2474, 0.3628, 0.0242, - 0.1869, 0.4747, 0.3383, 0.6147, 0.8212, - 0.0944, 0.4912, 0.2376, 0.2423, 0.6194, - 0.4229, 0.2750, 0.2160, 0.6690, 0.4680, - 0.6593, 0.6406, 0.7864, 0.0265, 0.3638, // the second channel - 0.6708, 0.3008, 0.4975, 0.8371, 0.4141, - 0.4837, 0.9709, 0.9418, 0.5752, 0.7287, - 0.4387, 0.4936, 0.5065, 0.1497, 0.3947, - 0.4060, 0.3319, 0.9262, 0.9229, 0.7986, - 0.8909, 0.5558, 0.7642, 0.5227, 0.9615}; // the last channel - auto inputPtr = input->writeMap(); - memcpy(inputPtr, inpudata, 75 * sizeof(float)); - input->unMap(); +protected: + bool testOnBackend(MNNForwardType type, const std::string& deviceName) { + auto creator = MNN::MNNGetExtraBackendCreator(type); + if (creator == nullptr) { + MNN_ERROR("backend %d not found!\n", type); + return false; + } - input = _Convert(input, NC4HW4); - auto weight = _Const(1.0, {1, 3, 3, 3}, NCHW); - auto bias = _Const(0.0, {1}, NCHW); + const int inputHeight = 5, inputWidth = 5, inputChannel = 2, outputChannel = 3; + const int kernelSize = 3, stride = 2, pad = 1, batch = 1; + const int height = (inputHeight + 2 * pad - kernelSize) / stride + 1; // height = 3 + const int width = (inputWidth + 2 * pad - kernelSize) / stride + 1; // width = 3 + const std::vector inputData = { + // channel 0 + 0.6345, 0.1219, 0.0424, 0.0501, 0.3934, + 0.4311, 0.5961, 0.6642, 0.734 , 0.062 , + 0.88 , 0.503 , 0.1638, 0.6367, 0.2151, + 0.0795, 0.7693, 0.134 , 0.4963, 0.7571, + 0.5428, 0.3663, 0.2823, 0.7478, 0.579 , + // channel 1 + 0.6917, 0.4047, 0.9673, 0.9111, 0.608 , + 0.4621, 0.6567, 0.3192, 0.726 , 0.9066, + 0.885 , 0.3491, 0.7938, 0.2593, 0.3146, + 0.6901, 0.2126, 0.649 , 0.7919, 0.9838, + 0.0672, 0.0357, 0.383 , 0.5043, 0.2803 + }; + const std::vector gradData = { + // channel 0 + 0.0229, 0.6325, 0.8646, + 0.7819, 0.6056, 0.0749, + 0.2162, 0.4768, 0.5742, + // channel 1 + 0.0241, 0.8462, 0.7895, + 0.4366, 0.1978, 0.6466, + 0.7126, 0.9574, 0.2927, + // channel 2 + 0.0020, 0.3654, 0.3904, + 0.3954, 0.5271, 0.2788, + 0.9785, 0.2899, 0.5009 + }; + const std::vector filterData(outputChannel * inputChannel * kernelSize * kernelSize, 0.0); + const std::vector outputData = { + // outputChannel = 0, inputChannel = 0 + 1.067752, 1.259766, 1.313559, + 1.076762, 1.769278, 1.249106, + 1.514711, 0.683602, 1.379981, + // outputChannel = 0, inputChannel = 1 + 1.008152, 1.646069, 1.376681, + 1.581137, 2.707695, 1.263700, + 1.231126, 2.002633, 1.120040, + // outputChannel = 1, inputChannel = 0 + 1.474308, 0.766233, 1.428803, + 1.223466, 1.743998, 1.367851, + 1.556988, 1.172140, 1.069521, + // outputChannel = 1, inputChannel = 1 + 1.034659, 2.252174, 1.339982, + 1.480274, 2.558655, 1.492689, + 1.682971, 2.062799, 0.879627, + // outputChannel = 2, inputChannel = 0 + 0.990460, 1.033711, 1.519227, + 0.987508, 1.567596, 1.128253, + 1.048235, 0.580911, 0.835177, + // outputChannel = 2, inputChannel = 1 + 1.006851, 1.959918, 1.079935, + 1.022828, 1.765439, 0.789565, + 0.856232, 1.360733, 0.768066 + }; - auto convOut = _Conv(weight, bias, input); - auto convOutDims = convOut->getInfo()->dim; + auto input = _Input({batch, inputChannel, inputHeight, inputWidth}, NCHW, halide_type_of()); + auto grad = _Input({batch, outputChannel, height, width}, NCHW, halide_type_of()); + auto filter = _Input({outputChannel, inputChannel, kernelSize, kernelSize}, NCHW, halide_type_of()); + auto output = _Conv2DBackPropFilter(filter, _Convert(input, NC4HW4), _Convert(grad, NC4HW4), + CAFFE, {stride, stride}, {1, 1}, 1, {pad, pad}); + output = _Convert(output, NCHW); - auto grad = _Const(1.0, convOutDims, NCHW); - grad = _Convert(grad, NC4HW4); - auto weightGrad = _Conv2DBackPropFilter(weight, input, grad); - weightGrad->setName("Conv2DBackPropFilter"); - weightGrad = _Convert(weightGrad, NCHW); - weightGrad->setName("nc4hw4_to_nchw"); + if (type != MNN_FORWARD_CPU) { + Optimizer::Config config; + config.forwardType = type; + auto optimizer = Optimizer::create(config); + if (optimizer == nullptr) { + MNN_ERROR("backend %s not support\n", deviceName.c_str()); + return false; + } + optimizer->onExecute({output}); + } - auto weightGradDims = weightGrad->getInfo()->dim; - const std::vector expectedDims = {1, 3, 3, 3}; - if (!checkVector(weightGradDims.data(), expectedDims.data(), 4, 0)) { - MNN_ERROR("Conv2DBackPropFilter's output shape compute ERROR!\n"); + const std::vector outDim = {outputChannel, inputChannel, kernelSize, kernelSize}; + if (!checkVector(output->getInfo()->dim.data(), outDim.data(), 4, 0)) { + MNN_ERROR("Conv2DBackPropFilter(%s) shape test failed!\n", deviceName.c_str()); return false; } - const std::vector expectedWeightGrad = { - 4.9151, 3.9609, 3.9378, - 4.3119, 3.0589, 3.6735, - 4.7253, 4.3395, 5.2173, - 2.7992, 3.2303, 3.5078, - 2.7370, 3.5589, 4.2264, - 3.8235, 3.5845, 3.6288, - 5.3044, 5.2732, 5.0453, - 5.4994, 5.8188, 5.9443, - 5.3138, 5.1735, 5.9469}; - auto weightGradPtr = weightGrad->readMap(); - if (!checkVector(weightGradPtr, expectedWeightGrad.data(), 27, 0.01)) { - MNN_ERROR("Conv2DBackPropFilter test failed!\n"); + + ::memcpy(input->writeMap(), inputData.data(), inputData.size() * sizeof(float)); + ::memcpy(grad->writeMap(), gradData.data(), gradData.size() * sizeof(float)); + ::memcpy(filter->writeMap(), filterData.data(), filterData.size() * sizeof(float)); + if(!checkVectorByRelativeError(output->readMap(), outputData.data(), outputData.size(), 0.005)) { + MNN_ERROR("Conv2DBackPropFilter(%s) test failed!\n", deviceName.c_str()); return false; } return true; } }; +class Conv2DBackPropFilterTestOnCPU : public Conv2DBackPropFilterTest { +public: + virtual ~Conv2DBackPropFilterTestOnCPU() = default; + virtual bool run() { + return testOnBackend(MNN_FORWARD_CPU, "CPU"); + } +}; + +class Conv2DBackPropFilterTestOnOpencl : public Conv2DBackPropFilterTest { +public: + virtual ~Conv2DBackPropFilterTestOnOpencl() = default; + virtual bool run() { + return testOnBackend(MNN_FORWARD_OPENCL, "OPENCL"); + } +}; + +MNNTestSuiteRegister(Conv2DBackPropFilterTestOnCPU, "op/Conv2DBackPropFilter/cpu"); +MNNTestSuiteRegister(Conv2DBackPropFilterTestOnOpencl, "op/Conv2DBackPropFilter/opencl"); + class Conv2DDWBackPropFilterTest : public MNNTestCase { public: virtual ~Conv2DDWBackPropFilterTest() = default; @@ -146,5 +210,4 @@ class Conv2DDWBackPropFilterTest : public MNNTestCase { } }; -MNNTestSuiteRegister(Conv2DBackPropFilterTest, "op/Conv2DBackPropFilter"); MNNTestSuiteRegister(Conv2DDWBackPropFilterTest, "op/Conv2DBackPropFilterDW"); diff --git a/test/op/Conv2DBackPropTest.cpp b/test/op/Conv2DBackPropTest.cpp index 2ec329002..c08316536 100644 --- a/test/op/Conv2DBackPropTest.cpp +++ b/test/op/Conv2DBackPropTest.cpp @@ -6,27 +6,41 @@ // Copyright © 2018, Alibaba Group Holding Limited // +#include +#include +#include +#include #include "MNNTestSuite.h" -#include "Expr.hpp" -#include "ExprCreator.hpp" #include "TestUtils.h" +#include +#include "MNN_generated.h" +#include +#include +#include +#include using namespace MNN::Express; -class Conv2DBackPropTest : public MNNTestCase{ +class Conv2DBackPropTest : public MNNTestCase { +public: virtual ~Conv2DBackPropTest() = default; - - virtual bool run(){ - +protected: + bool testOnBackend(MNNForwardType type, const std::string& deviceName) { + auto creator = MNN::MNNGetExtraBackendCreator(type); + if (creator == nullptr) { + MNN_ERROR("backend %d not found!\n", type); + return false; + } + const float inputGradData[] = { 1., 1., 1., 1., 1., 1., 1., 1., 1 }; // 1x1x3x3 - + auto inputGrad = _Const(inputGradData, {1, 1, 3, 3}, NCHW); inputGrad = _Convert(inputGrad, NC4HW4); - + const float weightData[] = { 1., 1., 1., 1., 1., 1., @@ -39,22 +53,33 @@ class Conv2DBackPropTest : public MNNTestCase{ 1., 1., 1.}; // 1x3x3x3 auto weight = _Const(weightData, {1,3,3,3}, NCHW); auto bias = _Const(0., {1}, NCHW); - + auto outputGrad = _Deconv(weight, bias, inputGrad); outputGrad = _Convert(outputGrad, NCHW); auto outputGradDim = outputGrad->getInfo()->dim; - + const int outSize = outputGrad->getInfo()->size; if(outputGrad->getInfo()->size != outSize){ return false; } - + + if (type != MNN_FORWARD_CPU) { + Optimizer::Config config; + config.forwardType = type; + auto optimizer = Optimizer::create(config); + if (optimizer == nullptr) { + MNN_ERROR("backend %s not support\n", deviceName.c_str()); + return false; + } + optimizer->onExecute({outputGrad}); + } + const std::vector expectedDim = {1,3,5,5}; if(!checkVector(outputGradDim.data(), expectedDim.data(), 4, 0)){ - MNN_ERROR("Conv2DBackProp shape test failed!\n"); + MNN_ERROR("Conv2DBackProp(%s) shape test failed!\n", deviceName.c_str()); return false; } - + const float expectedOutputGrad[] = { 1., 2., 3., 2., 1., 2., 4., 6., 4., 2., @@ -72,14 +97,98 @@ class Conv2DBackPropTest : public MNNTestCase{ 2., 4., 6., 4., 2., 1., 2., 3., 2., 1.}; auto outputGradData = outputGrad->readMap(); - - if(!checkVector(outputGradData, expectedOutputGrad, outSize, 0.01)){ - MNN_ERROR("Conv2DBackProp test failed!\n"); + + if(!checkVector(outputGradData, expectedOutputGrad, outSize, 0.005)){ + MNN_ERROR("Conv2DBackProp(%s) test failed!\n", deviceName.c_str()); return false; } - + return true; } }; -MNNTestSuiteRegister(Conv2DBackPropTest, "op/Conv2DBackPropTest"); +class Conv2DBackPropTestOnCPU : public Conv2DBackPropTest { + virtual ~Conv2DBackPropTestOnCPU() = default; + virtual bool run() { + return testOnBackend(MNN_FORWARD_CPU, "CPU"); + } +}; + +class Conv2DBackPropTestOnOpencl : public Conv2DBackPropTest { + virtual ~Conv2DBackPropTestOnOpencl() = default; + virtual bool run() { + return testOnBackend(MNN_FORWARD_OPENCL, "OPENCL"); + } +}; + +MNNTestSuiteRegister(Conv2DBackPropTestOnCPU, "op/Conv2DBackPropTest/cpu"); +MNNTestSuiteRegister(Conv2DBackPropTestOnOpencl, "op/Conv2DBackPropTest/opencl"); + +class ConvBiasGradTest : public MNNTestCase { +public: + virtual ~ConvBiasGradTest() = default; +protected: + bool testOnBackend(MNNForwardType type, const std::string& deviceName) { + auto creator = MNN::MNNGetExtraBackendCreator(type); + if (creator == nullptr) { + MNN_ERROR("backend %d not found!\n", type); + return false; + } + + const int height = 32, width = 32, channel = 32, batch = 16; + std::vector gradData(height * width * channel * batch, 0); + for (unsigned int i = 0; i < gradData.size(); ++i) { + gradData[i] = (float)rand() / RAND_MAX; + } + + std::vector outputData(channel, 0); + for (unsigned int i = 0; i < gradData.size(); ++i) { + outputData[(i / (height * width)) % channel] += gradData[i]; + } + + auto grad = _Input({batch, channel, height, width}, NCHW, halide_type_of()); + auto output = _Convert(_ReduceSum(grad, {0, 2, 3}, false), NCHW);; + + if (type != MNN_FORWARD_CPU) { + Optimizer::Config config; + config.forwardType = type; + auto optimizer = Optimizer::create(config); + if (optimizer == nullptr) { + MNN_ERROR("backend %s not support\n", deviceName.c_str()); + return false; + } + optimizer->onExecute({output}); + } + + const std::vector outDim = {channel}; + if (!checkVector(output->getInfo()->dim.data(), outDim.data(), 1, 0)) { + MNN_ERROR("ConvBiasGradTest(%s) shape test failed!\n", deviceName.c_str()); + return false; + } + + ::memcpy(grad->writeMap(), gradData.data(), gradData.size() * sizeof(float)); + // difference below 0.5% relative error is considered correct. + if(!checkVectorByRelativeError(output->readMap(), outputData.data(), outputData.size(), 0.005)) { + MNN_ERROR("ConvBiasGradTest(%s) test failed!\n", deviceName.c_str()); + return false; + } + return true; + } +}; + +class ConvBiasGradTestOnCPU : public ConvBiasGradTest { + virtual ~ConvBiasGradTestOnCPU() = default; + virtual bool run() { + return testOnBackend(MNN_FORWARD_CPU, "CPU"); + } +}; + +class ConvBiasGradTestOnOpencl : public ConvBiasGradTest { + virtual ~ConvBiasGradTestOnOpencl() = default; + virtual bool run() { + return testOnBackend(MNN_FORWARD_OPENCL, "OPENCL"); + } +}; + +MNNTestSuiteRegister(ConvBiasGradTestOnCPU, "op/bias_grad/cpu"); +MNNTestSuiteRegister(ConvBiasGradTestOnOpencl, "op/bias_grad/opencl"); diff --git a/test/op/Convolution3DTest.cpp b/test/op/Convolution3DTest.cpp new file mode 100644 index 000000000..77a64bdc8 --- /dev/null +++ b/test/op/Convolution3DTest.cpp @@ -0,0 +1,206 @@ +// +// Convolution3DTest.cpp +// MNNTests +// +// Created by MNN on 2019/12/03. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include +#include +#include +#include +#include "TestUtils.h" +#include "MNNTestSuite.h" +#include "MNN_generated.h" + +#define TEST_RANDOM_SEED 100 + +using namespace MNN; +using namespace MNN::Express; + +static void reference_conv3d(const std::vector& input, const std::vector& weight, + const std::vector& bias, std::vector& output, + int batch, int ic, int oc, INTS inputShape, PadMode mode, INTS pads, + INTS kernels, INTS strides, INTS dilations, int group) { + INTS outputShape; + if (mode == PadMode_SAME) { + pads.clear(); + for (int i = 0; i < 3; ++i) { + outputShape.push_back((inputShape[i] + strides[i] - 1) / strides[i]); + pads.push_back(((outputShape[i] - 1) * strides[i] + (kernels[i] - 1) * dilations[i] + 1 - inputShape[i]) / 2); + } + } else { + if (mode == PadMode_VALID) { + pads = std::vector(3, 0); + } + for (int i = 0; i < 3; ++i) { + outputShape.push_back((inputShape[i] + 2 * pads[i] - (kernels[i] - 1) * dilations[i] - 1) / strides[i] + 1); + } + } + + MNN_ASSERT(oc % group == 0 && ic % group == 0); + output.resize(batch * oc * outputShape[0] * outputShape[1] * outputShape[2]); + int oc_step = oc / group, ic_step = ic / group; + for (int b = 0; b < batch; ++b) { + for (int o_c = 0; o_c < oc; ++o_c) { + for (int o_d = 0; o_d < outputShape[0]; ++o_d) { + for (int o_h = 0; o_h < outputShape[1]; ++o_h) { + for (int o_w = 0; o_w < outputShape[2]; ++o_w) { + float result_data = 0; + int g = o_c / oc_step; + for (int i_c = g * ic_step; i_c < (g + 1) * ic_step; ++i_c) { + for (int k_d = 0; k_d < kernels[0]; ++k_d) { + for (int k_h = 0; k_h < kernels[1]; ++k_h) { + for (int k_w = 0; k_w < kernels[2]; ++k_w) { + int i_d = o_d * strides[0] - pads[0] + k_d * dilations[0]; + int i_h = o_h * strides[1] - pads[1] + k_h * dilations[1]; + int i_w = o_w * strides[2] - pads[2] + k_w * dilations[2]; + if (i_d < 0 || i_d >= inputShape[0] || + i_h < 0 || i_h >= inputShape[1] || + i_w < 0 || i_w >= inputShape[2]) { + continue; + } + float input_data = input[(((b * ic + i_c) * inputShape[0] + i_d) * inputShape[1] + i_h) * inputShape[2] + i_w]; + float weight_data = weight[((((g * oc_step + o_c % oc_step) * ic_step + i_c % ic_step) * kernels[0] + k_d) * kernels[1] + k_h) * kernels[2] + k_w]; + result_data += input_data * weight_data; + } + } + } + } + output[(((b * oc + o_c) * outputShape[0] + o_d) * outputShape[1] + o_h) * outputShape[2] + o_w] = result_data + bias[o_c]; + } + } + } + } + } +} + +static VARP _Conv3D(VARP input, const std::vector& weight, const std::vector& bias, + INTS channel, INTS kernelSize, PadMode mode, INTS pads, INTS strides, INTS dilates, int group) { + MNN_ASSERT(group == 1); + MNN_ASSERT(dilates.size() == 3 && strides.size() == 3 && kernelSize.size() == 3 && channel.size() == 2); + MNN_ASSERT(mode != PadMode_CAFFE || pads.size()); + + std::unique_ptr conv3d(new Convolution3DT); + conv3d->weight = weight; + conv3d->bias = bias; + conv3d->common.reset(new Convolution3DCommonT); + auto common = conv3d->common.get(); + common->dilates = dilates; + common->strides = strides; + common->kernels = kernelSize; + common->padMode = mode; + common->pads = std::vector({0, 0, 0}); + if (mode == PadMode_CAFFE) { + common->pads = pads; + } + common->inputCount = channel[0]; + common->outputCount = channel[1]; + common->relu = common->relu6 = false; + + std::unique_ptr convOp(new OpT); + convOp->type = OpType_Convolution3D; + convOp->main.type = OpParameter_Convolution3D; + convOp->main.value = conv3d.release(); + + return (Variable::create(Expr::create(convOp.get(), {input}))); +} + +class Convolution3DCommonTest : public MNNTestCase { +public: + virtual ~Convolution3DCommonTest() = default; +protected: + static bool test(MNNForwardType type, const std::string& device_name, const std::string& test_op_name, + int batch, int ic, int oc, INTS inputShape, PadMode mode, INTS pads, + INTS kernels, INTS strides, INTS dilations, int group) { + using namespace MNN::Express; + auto creator = MNN::MNNGetExtraBackendCreator(type); + if (creator == nullptr) { + MNN_ERROR("backend %d not found!\n", type); + return false; + } + + std::vector weightData, biasData; + for (int i = 0; i < group * (oc / group) * (ic / group) * kernels[0] * kernels[1] * kernels[2]; i++) { + weightData.push_back(rand() % 255 / 255.f); + } + for (int i = 0; i < oc; i++) { + biasData.push_back(rand() % 255 / 255.f); + } + std::vector inputData, outputData; + for (int i = 0; i < batch * ic * inputShape[0] * inputShape[1] * inputShape[2]; ++i) { + inputData.push_back(rand() % 255 / 255.f); + } + reference_conv3d(inputData, weightData, biasData, outputData, batch, ic, oc, inputShape, + mode, pads, kernels, strides, dilations, group); + auto input = _Input({batch, ic, inputShape[0], inputShape[1], inputShape[2]}, NCHW, halide_type_of()); + auto output = _Conv3D(_Convert(input, NC4HW4), weightData, biasData, {ic, oc}, kernels, + mode, pads, strides, dilations, group); + output = _Convert(output, NCHW); + if (type != MNN_FORWARD_CPU) { + Optimizer::Config config; + config.forwardType = type; + auto optimizer = Optimizer::create(config); + if (optimizer == nullptr) { + MNN_ERROR("backend %s not support\n", device_name.c_str()); + return false; + } + optimizer->onExecute({output}); + } + + ::memcpy(input->writeMap(), inputData.data(), inputData.size() * sizeof(float)); + // difference below 0.5% relative error is considered correct. + if (!checkVectorByRelativeError(output->readMap(), outputData.data(), outputData.size(), 0.005)) { + MNN_ERROR("%s(%s) test failed!\n", test_op_name.c_str(), device_name.c_str()); + return false; + } + return true; + } +}; + +class Convolution3DTest : public Convolution3DCommonTest { +public: + virtual ~Convolution3DTest() = default; +protected: + static bool test(MNNForwardType type, const std::string& device_name) { + srand(TEST_RANDOM_SEED); + for (int b = 1; b <= 2; b++) { + for (int oc = 1; oc <= 8; oc *= 2) { + for (int ic = 1; ic <= 8; ic *= 2) { + for (int is = 1; is <= 8; is *= 2) { + for (int id = 1; id <= 4; ++id) { + for (int kd = 1; kd <= 3 && kd <= id; ++kd) { + for (int kw = 1; kw <= 3 && kw <= is; ++kw) { + for (int kh = 1; kh <= 3 && kh <= is; ++kh) { + for (int p = 0; p <= 1; p++) { + bool succ = Convolution3DCommonTest::test(type, device_name, "Conv3D", + b, ic, oc, {id, is, is}, PadMode_CAFFE, + {p, p, p}, {kd, kh, kw}, {1, 1, 1}, + {1, 1, 1}, 1); + if (!succ) { + return false; + } + } + } + } + } + } + } + } + } + } + return true; + } +}; + +class Convolution3DTestOnCPU : public Convolution3DTest { +public: + virtual ~Convolution3DTestOnCPU() = default; + virtual bool run() { + return Convolution3DTest::test(MNN_FORWARD_CPU, "CPU"); + } +}; + +MNNTestSuiteRegister(Convolution3DTestOnCPU, "op/convolution/conv3d/cpu"); diff --git a/test/op/ConvolutionTest.cpp b/test/op/ConvolutionTest.cpp index aac84ac59..90b65e3f5 100644 --- a/test/op/ConvolutionTest.cpp +++ b/test/op/ConvolutionTest.cpp @@ -6,96 +6,290 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include +#include +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" +#include +#include +#include +#include "TestUtils.h" #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" -#include "TestUtils.h" + +#define TEST_RANDOM_SEED 100 using namespace MNN; -static Interpreter *create(int oc, // output channel - int w, // input width - int h, // input height - int c, // input channel - int b, // batch - int d, // dilation - int kw, // kenrel width - int kh, // kenrel height - int s, // stride - int p, // padding - int g, // group - std::vector wt, std::vector bias, bool depthwise) { - flatbuffers::FlatBufferBuilder fbb; - std::vector> vec; +static void reference_conv2d(const std::vector& input, const std::vector& weight, + const std::vector& bias, std::vector& output, + int batch, int ic, int oc, int ih, int iw, PadMode mode, int pad_h, int pad_w, + int kh, int kw, int stride, int dilation, int group) { + int oh, ow; + if (mode == PadMode_SAME) { + oh = (ih + stride - 1) / stride; // oh = ceil(ih / stride) + ow = (iw + stride - 1) / stride; // ow = ceil(iw / stride) + pad_h = ((oh - 1) * stride + (kh - 1) * dilation + 1 - ih) / 2; + pad_w = ((ow - 1) * stride + (kw - 1) * dilation + 1 - iw) / 2; + } else { + if (mode == PadMode_VALID) { + pad_h = pad_w = 0; + } + oh = (ih + 2 * pad_h - (kh - 1) * dilation - 1) / stride + 1; + ow = (iw + 2 * pad_w - (kw - 1) * dilation - 1) / stride + 1; + } - { - auto dims = fbb.CreateVector(std::vector({b, c, h, w})); - InputBuilder ib(fbb); - ib.add_dims(dims); - auto input = ib.Finish(); - auto name = fbb.CreateString("input"); - auto iv = fbb.CreateVector(std::vector({0})); - auto ov = fbb.CreateVector(std::vector({0})); + MNN_ASSERT(oc % group == 0 && ic % group == 0); + output.resize(batch * oh * ow * oc); + int oc_step = oc / group, ic_step = ic / group; + for (int b = 0; b < batch; ++b) { + for (int o_c = 0; o_c < oc; ++o_c) { + for (int o_h = 0; o_h < oh; ++o_h) { + for (int o_w = 0; o_w < ow; ++o_w) { + float result_data = 0; + int g = o_c / oc_step; + for (int i_c = g * ic_step; i_c < (g + 1) * ic_step; ++i_c) { + for (int k_h = 0; k_h < kh; ++k_h) { + for (int k_w = 0; k_w < kw; ++k_w) { + int i_h = o_h * stride - pad_h + k_h * dilation; + int i_w = o_w * stride - pad_w + k_w * dilation; + if (i_h < 0 || i_h >= ih || i_w < 0 || i_w >= iw) { + continue; + } + float input_data = input[((b * ic + i_c) * ih + i_h) * iw + i_w]; + float weight_data = weight[(((g * oc_step + o_c % oc_step) * ic_step + i_c % ic_step) * kh + k_h) * kw + k_w]; + result_data += input_data * weight_data; + } + } + } + output[((b * oc + o_c) * oh + o_h) * ow + o_w] = result_data + bias[o_c]; + } + } + } + } +} - OpBuilder builder(fbb); - builder.add_type(OpType_Input); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Input); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); +class ConvolutionCommonTest : public MNNTestCase { +public: + virtual ~ConvolutionCommonTest() = default; +protected: + static bool test(MNNForwardType type, const std::string& device_name, const std::string& test_op_name, + int batch, int ic, int oc, int ih, int iw, PadMode mode, + int pad_h, int pad_w, int kh, int kw, int stride, int dilation, int group) { + using namespace MNN::Express; + auto creator = MNN::MNNGetExtraBackendCreator(type); + if (creator == nullptr) { + MNN_ERROR("backend %d not found!\n", type); + return false; + } + std::map padMap = { + {PadMode_CAFFE, CAFFE}, + {PadMode_VALID, VALID}, + {PadMode_SAME, SAME} + }; + std::vector weightData, biasData; + for (int i = 0; i < group * (oc / group) * (ic / group) * kw * kh; i++) { + weightData.push_back(rand() % 255 / 255.f); + } + for (int i = 0; i < oc; i++) { + biasData.push_back(rand() % 255 / 255.f); + } + std::vector inputData, outputData; + for (int i = 0; i < ih * iw * ic * batch; ++i) { + inputData.push_back(rand() % 255 / 255.f); + } + reference_conv2d(inputData, weightData, biasData, outputData, batch, ic, oc, ih, iw, + mode, pad_h, pad_w, kh, kw, stride, dilation, group); + auto input = _Input({batch, ic, ih, iw}, NCHW, halide_type_of()); + auto output = _Conv(std::move(weightData), std::move(biasData), _Convert(input, NC4HW4), {ic, oc}, {kw, kh}, + padMap[mode], {stride, stride}, {dilation, dilation}, group, {pad_w, pad_h}); + output = _Convert(output, NCHW); + if (type != MNN_FORWARD_CPU) { + Optimizer::Config config; + config.forwardType = type; + auto optimizer = Optimizer::create(config); + if (optimizer == nullptr) { + MNN_ERROR("backend %s not support\n", device_name.c_str()); + return false; + } + optimizer->onExecute({output}); + } + + ::memcpy(input->writeMap(), inputData.data(), inputData.size() * sizeof(float)); + // difference below 0.5% relative error is considered correct. + if (!checkVectorByRelativeError(output->readMap(), outputData.data(), outputData.size(), 0.005)) { + MNN_ERROR("%s(%s) test failed!\n", test_op_name.c_str(), device_name.c_str()); + return false; + } + return true; } - { - int activation = rand() % 0b11; - auto ccb = Convolution2DCommonBuilder(fbb); - ccb.add_dilateX(d); - ccb.add_dilateY(d); - ccb.add_strideX(s); - ccb.add_strideY(s); - ccb.add_kernelX(kw); - ccb.add_kernelY(kh); - ccb.add_padX(p); - ccb.add_padY(p); - ccb.add_padMode(PadMode_CAFFE); - ccb.add_group(g); - ccb.add_outputCount(oc); - ccb.add_relu(activation & 0b01); - ccb.add_relu6(activation & 0b10); - auto common = ccb.Finish(); +}; - auto weights = fbb.CreateVector(wt); - auto biases = fbb.CreateVector(bias); - auto cb = Convolution2DBuilder(fbb); - cb.add_common(common); - cb.add_weight(weights); - cb.add_bias(biases); - auto conv = cb.Finish(); - auto name = fbb.CreateString("conv"); - auto iv = fbb.CreateVector(std::vector({0})); - auto ov = fbb.CreateVector(std::vector({1})); +class ConvolutionTest : public ConvolutionCommonTest { +public: + virtual ~ConvolutionTest() = default; +protected: + static bool test(MNNForwardType type, const std::string& device_name) { + srand(TEST_RANDOM_SEED); + for (int b = 1; b <= 2; b++) { + for (int oc = 1; oc <= 8; oc *= 2) { + for (int ic = 1; ic <= 8; ic *= 2) { + for (int is = 1; is <= 8; is *= 2) { + for (int kw = 1; kw <= 3 && kw <= is; kw++) { + for (int kh = 1; kh <= 3 && kh <= is; kh++) { + for (int d = 1; d <= 2; d++) { + if (d > std::min(kw, kh) || d * (std::max(kw, kh) - 1) + 1 > is) + continue; + for (int s = 1; s <= 2; s++) { + for (int p = 0; p <= 1; p++) { + bool succ = ConvolutionCommonTest::test(type, device_name, "Conv2D", + b, ic, oc, is, is, PadMode_CAFFE, + p, p, kh, kw, s, d, 1); + if (!succ) { + return false; + } + } + } + } + } + } + } + } + } + } + return true; + } +}; - OpBuilder builder(fbb); - builder.add_type(depthwise ? OpType_ConvolutionDepthwise : OpType_Convolution); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Convolution2D); - builder.add_main(flatbuffers::Offset(conv.o)); - vec.push_back(builder.Finish()); +class ConvolutionTestOnCPU : public ConvolutionTest { +public: + ~ConvolutionTestOnCPU() = default; + virtual bool run() { + return ConvolutionTest::test(MNN_FORWARD_CPU, "CPU"); } +}; - auto ops = fbb.CreateVector(vec); - auto names = fbb.CreateVectorOfStrings({"input", "output"}); - NetBuilder net(fbb); - net.add_oplists(ops); - net.add_tensorName(names); - fbb.Finish(net.Finish()); - return Interpreter::createFromBuffer((const char *)fbb.GetBufferPointer(), fbb.GetSize()); -} +class ConvolutionTestOnOpencl : public ConvolutionTest { +public: + ~ConvolutionTestOnOpencl() = default; + virtual bool run() { + return ConvolutionTest::test(MNN_FORWARD_OPENCL, "OPENCL"); + } +}; + +class DepthwiseConvolutionTest : public ConvolutionCommonTest { +public: + virtual ~DepthwiseConvolutionTest() = default; +protected: + static bool test(MNNForwardType type, const std::string& device_name) { + srand(TEST_RANDOM_SEED); + for (int b = 1; b <= 2; b++) { + for (int oc = 4; oc <= 8; oc *= 2) { + for (int ic = oc; ic <= oc; ic++) { + for (int is = 1; is <= 8; is *= 2) { + for (int kw = 1; kw <= 3 && kw <= is; kw++) { + for (int kh = 1; kh <= 3 && kh <= is; kh++) { + for (int d = 1; d <= 2; d++) { + if (d > std::min(kw, kh) || d * (std::max(kw, kh) - 1) + 1 > is) + continue; + for (int s = 1; s <= 2; s++) { + for (int p = 0; p <= 1; p++) { + // depthwise <==> group == outputChannel + bool succ = ConvolutionCommonTest::test(type, device_name, "DepthwiseConv2D", + b, ic, oc, is, is, PadMode_CAFFE, + p, p, kh, kw, s, d, oc); + if (!succ) { + return false; + } + } + } + } + } + } + } + } + } + } + return true; + } +}; + +class DepthwiseConvolutionTestOnCPU : public DepthwiseConvolutionTest { +public: + ~DepthwiseConvolutionTestOnCPU() = default; + virtual bool run() { + return DepthwiseConvolutionTest::test(MNN_FORWARD_CPU, "CPU"); + } +}; + +class DepthwiseConvolutionTestOnOpencl : public DepthwiseConvolutionTest { +public: + ~DepthwiseConvolutionTestOnOpencl() = default; + virtual bool run() { + return DepthwiseConvolutionTest::test(MNN_FORWARD_OPENCL, "OPENCL"); + } +}; + +class GroupConvolutionTest : public ConvolutionCommonTest { +public: + virtual ~GroupConvolutionTest() = default; +protected: + static bool test(MNNForwardType type, const std::string& device_name) { + srand(TEST_RANDOM_SEED); + for (int b = 1; b <= 2; b++) { + for (int g = 2; g <= 4; g *= 2) { + for (int oc = g * 4; oc <= 4 * g * 4; oc += g * 4) { + for (int ic = g * 4; ic <= 4 * g * 4; ic += g * 4) { + for (int is = 1; is <= 8; is *= 2) { + for (int kw = 1; kw <= 3 && kw <= is; kw++) { + for (int kh = 1; kh <= 3 && kh <= is; kh++) { + for (int d = 1; d <= 2; d++) { + if (d > std::min(kw, kh) || d * (std::max(kw, kh) - 1) + 1 > is) + continue; + for (int s = 1; s <= 2; s++) { + for (int p = 0; p <= 1; p++) { + bool succ = ConvolutionCommonTest::test(type, device_name, "GroupConv2D", + b, ic, oc, is, is, PadMode_CAFFE, + p, p, kh, kw, s, d, g); + if (!succ) { + return false; + } + } + } + } + } + } + } + } + } + } + } + return true; + } +}; + +class GroupConvolutionTestOnCPU : public GroupConvolutionTest { +public: + virtual ~GroupConvolutionTestOnCPU() = default; + virtual bool run() { + return GroupConvolutionTest::test(MNN_FORWARD_CPU, "CPU"); + } +}; + +class GroupConvolutionTestOnOpencl : public GroupConvolutionTest { +public: + virtual ~GroupConvolutionTestOnOpencl() = default; + virtual bool run() { + return GroupConvolutionTest::test(MNN_FORWARD_OPENCL, "OPENCL"); + } +}; + +MNNTestSuiteRegister(ConvolutionTestOnCPU, "op/convolution/conv/cpu"); +MNNTestSuiteRegister(ConvolutionTestOnOpencl, "op/convolution/conv/opencl"); +MNNTestSuiteRegister(DepthwiseConvolutionTestOnCPU, "op/convolution/depthwise_conv/cpu"); +MNNTestSuiteRegister(DepthwiseConvolutionTestOnOpencl, "op/convolution/depthwise_conv/opencl"); +MNNTestSuiteRegister(GroupConvolutionTestOnCPU, "op/convolution/conv_group/cpu"); +MNNTestSuiteRegister(GroupConvolutionTestOnOpencl, "op/convolution/conv_group/opencl"); static Interpreter *create(int oc, // output channel int w, // input width @@ -226,86 +420,6 @@ static Tensor *infer(const Interpreter *net, Session *session) { return net->getSessionOutputAll(session).begin()->second; } -class ConvolutionTest : public MNNTestCase { -public: - virtual ~ConvolutionTest() = default; - virtual bool run() { - for (int b = 1; b <= 2; b++) { - for (int g = 1; g <= 1; g++) { // 1 - for (int o = 1; o <= 8; o *= 2) { - for (int c = 1; c <= 8; c *= 2) { - for (int is = 1; is <= 8; is *= 2) { - for (int kw = 1; kw <= 3 && kw <= is; kw++) { - for (int kh = 1; kh <= 3 && kh <= is; kh++) { - for (int d = 1; d <= 2; d++) { - if (d > std::min(kw, kh) || d * (std::max(kw, kh) - 1) + 1 > is) - continue; - - for (int s = 1; s <= 2; s++) { - for (int p = 0; p <= 1; p++) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - std::vector wt, bias; - for (int i = 0; i < g * (o / g) * (c / g) * kw * kh; i++) { - wt.push_back(rand() % 255 / 255.f); - } - for (int i = 0; i < o; i++) { - bias.push_back(rand() % 255 / 255.f); - } - - // nets - auto net = - create(o, is, is, c, b, d, kw, kh, s, p, g, wt, bias, false); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input/output - auto input = new Tensor(4); - { - input->buffer().dim[0].extent = b; - input->buffer().dim[1].extent = c; - input->buffer().dim[2].extent = is; - input->buffer().dim[3].extent = is; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - for (int i = 0; i < is * is * c * b; i++) { - input->host()[i] = rand() % 255 / 255.f; - } - - auto host = net->getSessionInput(CPU, NULL); - auto device = net->getSessionInput(GPU, NULL); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), - 0.01)); - - // clean up - free(input->buffer().host); - delete input; - delete net; - }); - } - } - } - } - } - } - } - } - } - } - return true; - } -}; - class QuantizedConvolutionTest : public MNNTestCase { public: virtual ~QuantizedConvolutionTest() = default; @@ -390,86 +504,6 @@ class QuantizedConvolutionTest : public MNNTestCase { } }; -class DepthwiseConvolutionTest : public MNNTestCase { -public: - virtual ~DepthwiseConvolutionTest() = default; - virtual bool run() { - for (int b = 1; b <= 2; b++) { - for (int o = 4; o <= 8; o *= 2) { - for (int g = o; g <= o; g++) { - for (int c = o; c <= o; c++) { - for (int is = 1; is <= 8; is *= 2) { - for (int kw = 1; kw <= 3 && kw <= is; kw++) { - for (int kh = 1; kh <= 3 && kh <= is; kh++) { - for (int d = 1; d <= 2; d++) { - if (d > std::min(kw, kh) || d * (std::max(kw, kh) - 1) + 1 > is) - continue; - - for (int s = 1; s <= 2; s++) { - for (int p = 0; p <= 1; p++) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - std::vector wt, bias; - for (int i = 0; i < g * o / g * c / g * kw * kh; i++) { - wt.push_back(rand() % 255 / 255.f); - } - for (int i = 0; i < o; i++) { - bias.push_back(rand() % 255 / 255.f); - } - - // nets - auto net = - create(o, is, is, c, b, d, kw, kh, s, p, g, wt, bias, true); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input/output - auto input = new Tensor(4); - { - input->buffer().dim[0].extent = b; - input->buffer().dim[1].extent = c; - input->buffer().dim[2].extent = is; - input->buffer().dim[3].extent = is; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - for (int i = 0; i < is * is * c * b; i++) { - input->host()[i] = rand() % 255 / 255.f; - } - } - - auto host = net->getSessionInput(CPU, NULL); - auto device = net->getSessionInput(GPU, NULL); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), - 0.01)); - - // clean up - free(input->buffer().host); - delete input; - delete net; - }); - } - } - } - } - } - } - } - } - } - } - return true; - } -}; - class QuantizedDepthwiseConvolutionTest : public MNNTestCase { public: virtual ~QuantizedDepthwiseConvolutionTest() = default; @@ -552,88 +586,5 @@ class QuantizedDepthwiseConvolutionTest : public MNNTestCase { } }; -class GroupConvolutionTest : public MNNTestCase { -public: - virtual ~GroupConvolutionTest() = default; - virtual bool run() { - for (int b = 1; b <= 2; b++) { - for (int g = 2; g <= 4; g *= 2) { - for (int o = g * 4; o <= 4 * g * 4; o += g * 4) { - for (int c = g * 4; c <= 4 * g * 4; c += g * 4) { - for (int is = 1; is <= 8; is *= 2) { - for (int kw = 1; kw <= 3 && kw <= is; kw++) { - for (int kh = 1; kh <= 3 && kh <= is; kh++) { - for (int d = 1; d <= 2; d++) { - if (d > std::min(kw, kh) || d * (std::max(kw, kh) - 1) + 1 > is) - continue; - - for (int s = 1; s <= 2; s++) { - for (int p = 0; p <= 1; p++) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - std::vector wt, bias; - for (int i = 0; i < g * o / g * c / g * kw * kh; i++) { - wt.push_back(rand() % 255 / 255.f); - } - for (int i = 0; i < o; i++) { - bias.push_back(rand() % 255 / 255.f); - } - - // nets - auto net = - create(o, is, is, c, b, d, kw, kh, s, p, g, wt, bias, false); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input/output - auto input = new Tensor(4); - { - input->buffer().dim[0].extent = b; - input->buffer().dim[1].extent = c; - input->buffer().dim[2].extent = is; - input->buffer().dim[3].extent = is; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - for (int i = 0; i < is * is * c * b; i++) { - input->host()[i] = rand() % 255 / 255.f; - } - } - - auto host = net->getSessionInput(CPU, NULL); - auto device = net->getSessionInput(GPU, NULL); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), - 0.01)); - - // clean up - free(input->buffer().host); - delete input; - delete net; - }); - } - } - } - } - } - } - } - } - } - } - return true; - } -}; - -MNNTestSuiteRegister(ConvolutionTest, "op/convolution/conv"); MNNTestSuiteRegister(QuantizedConvolutionTest, "op/convolution/qnt_conv"); -MNNTestSuiteRegister(DepthwiseConvolutionTest, "op/convolution/depthwise_conv"); MNNTestSuiteRegister(QuantizedDepthwiseConvolutionTest, "op/convolution/qnt_depthwise_conv"); -MNNTestSuiteRegister(GroupConvolutionTest, "op/convolution/conv_group"); diff --git a/test/op/CropAndResizeTest.cpp b/test/op/CropAndResizeTest.cpp index 99498f54a..4dff8e9d9 100644 --- a/test/op/CropAndResizeTest.cpp +++ b/test/op/CropAndResizeTest.cpp @@ -6,12 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/CropTest.cpp b/test/op/CropTest.cpp index ca6f69bda..b0551b190 100644 --- a/test/op/CropTest.cpp +++ b/test/op/CropTest.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/DeconvolutionTest.cpp b/test/op/DeconvolutionTest.cpp index 1f925d7e9..73e971076 100644 --- a/test/op/DeconvolutionTest.cpp +++ b/test/op/DeconvolutionTest.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/DequantizeTest.cpp b/test/op/DequantizeTest.cpp index 767e7cd0a..eb031e6de 100644 --- a/test/op/DequantizeTest.cpp +++ b/test/op/DequantizeTest.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" -#include "Session.hpp" +#include "core/Session.hpp" #include "TFQuantizeOp_generated.h" -#include "TensorUtils.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/Dilation2DTest.cpp b/test/op/Dilation2DTest.cpp new file mode 100644 index 000000000..c5c98cc76 --- /dev/null +++ b/test/op/Dilation2DTest.cpp @@ -0,0 +1,143 @@ +// +// Dilation2DTest.cpp +// MNNTests +// +// Created by MNN on 2019/12/02. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include +#include +#include +#include +#include "MNNTestSuite.h" +#include "TestUtils.h" + +using namespace MNN; +using namespace MNN::Express; + +static VARP _Dilation2D(VARP input, const std::vector& filterData, int depth, INTS kernels, INTS strides, INTS dilations, + PadMode mode) { + std::unique_ptr common(new Convolution2DCommonT); + common->dilateX = dilations[0]; + common->dilateY = dilations[1]; + common->strideX = strides[0]; + common->strideY = strides[1]; + common->kernelX = kernels[0]; + common->kernelY = kernels[1]; + common->outputCount = depth; + common->padMode = mode; + + std::unique_ptr conv(new Convolution2DT); + conv->weight = filterData; + conv->common.reset(common.release()); + + std::unique_ptr dilation2d(new OpT); + dilation2d->type = OpType_Dilation2D; + dilation2d->main.type = OpParameter_Convolution2D; + dilation2d->main.value = conv.release(); + + return Variable::create(Expr::create(std::move(dilation2d), {input})); +} + +class Dilation2DTest : public MNNTestCase { +public: + virtual ~Dilation2DTest() = default; +protected: + bool testOnBackend(MNNForwardType type, const std::string& deviceName) { + auto creator = MNN::MNNGetExtraBackendCreator(type); + if (creator == nullptr) { + MNN_ERROR("backend %d not found!\n", type); + return false; + } + + const int batch = 1, hInput = 8, wInput = 8, depth = 2; + const int kernel = 3, stride = 2, dilation = 2; + const int hOutput = 4, wOutput = 4; + PadMode mode = PadMode_SAME; + + const std::vector inputData = { + // depth 0 + 0.2442, 0.1112, 0.0019, 0.7956, 0.5193, 0.9469, 0.0734, 0.868 , + 0.938 , 0.0044, 0.7733, 0.9659, 0.7684, 0.4761, 0.0532, 0.1346, + 0.5177, 0.8166, 0.3568, 0.0978, 0.031 , 0.4323, 0.6097, 0.4109, + 0.1825, 0.0286, 0.7931, 0.0961, 0.3956, 0.5455, 0.9764, 0.0147, + 0.7976, 0.6743, 0.4025, 0.0064, 0.9415, 0.0588, 0.3576, 0.1793, + 0.9301, 0.1929, 0.2146, 0.75 , 0.5445, 0.1015, 0.9295, 0.8509, + 0.7781, 0.4447, 0.7502, 0.938 , 0.0157, 0.2437, 0.0735, 0.1505, + 0.2763, 0.5841, 0.6464, 0.0575, 0.2624, 0.3593, 0.5915, 0.5977, + // depth 1 + 0.2905, 0.765 , 0.6162, 0.7862, 0.6024, 0.4286, 0.6094, 0.1839, + 0.2038, 0.9094, 0.1573, 0.8314, 0.8618, 0.1735, 0.9426, 0.2599, + 0.3691, 0.0707, 0.4993, 0.9102, 0.6286, 0.3101, 0.0336, 0.0018, + 0.4176, 0.9939, 0.5555, 0.8251, 0.6085, 0.0912, 0.002 , 0.1107, + 0.4421, 0.0648, 0.298 , 0.3073, 0.1005, 0.0732, 0.6128, 0.5606, + 0.5251, 0.004 , 0.0443, 0.9015, 0.641 , 0.2778, 0.3342, 0.5899, + 0.3267, 0.8305, 0.4335, 0.5785, 0.7227, 0.9369, 0.1777, 0.8986, + 0.5972, 0.3452, 0.7728, 0.331 , 0.5725, 0.7188, 0.1314, 0.8734 + }; + const std::vector filterData = { + // depth 0 + 0.0707, 0.8473, 0.2599, + 0.111 , 0.0394, 0.8792, + 0.3143, 0.5409, 0.527 , + // depth 1 + 0.124 , 0.4437, 0.5337, + 0.057 , 0.8509, 0.312 , + 0.2286, 0.419 , 0.0331 + }; + const std::vector outputData = { + // depth 0 + 1.8451, 1.3553, 1.0864, 0.8598, + 1.277 , 1.8132, 1.3779, 1.3918, + 1.6292, 0.9807, 1.7301, 1.1386, + 1.0402, 1.5973, 1.4769, 1.6982, + // depth 1 + 1.7603, 1.6823, 1.0537, 1.1108, + 1.8448, 1.676 , 1.1301, 1.0089, + 1.4376, 1.7524, 1.1378, 1.4408, + 1.4352, 1.3452, 1.5697, 1.7243 + }; + + auto input = _Input({batch, depth, hInput, wInput}, NCHW, halide_type_of()); + auto output = _Dilation2D(_Convert(input, NC4HW4), filterData, depth, + {kernel, kernel}, {stride, stride}, {dilation, dilation}, mode); + output = _Convert(output, NCHW); + + if (type != MNN_FORWARD_CPU) { + Optimizer::Config config; + config.forwardType = type; + auto optimizer = Optimizer::create(config); + if (optimizer == nullptr) { + MNN_ERROR("backend %s not support\n", deviceName.c_str()); + return false; + } + optimizer->onExecute({output}); + } + + const std::vector outDim = {batch, depth, hOutput, wOutput}; + if (!checkVector(output->getInfo()->dim.data(), outDim.data(), 4, 0)) { + MNN_ERROR("Dilation2D(%s) shape test failed!\n", deviceName.c_str()); + return false; + } + + ::memcpy(input->writeMap(), inputData.data(), inputData.size() * sizeof(float)); + if(!checkVectorByRelativeError(output->readMap(), outputData.data(), outputData.size(), 0.005)) { + MNN_ERROR("Dilation2D(%s) test failed!\n", deviceName.c_str()); + return false; + } + + return true; + } +}; + +class Dilation2DTestOnCPU : public Dilation2DTest { +public: + virtual ~Dilation2DTestOnCPU() = default; + virtual bool run() { + return testOnBackend(MNN_FORWARD_CPU, "CPU"); + } +}; + +MNNTestSuiteRegister(Dilation2DTestOnCPU, "op/Dilation2D/cpu"); diff --git a/test/op/EltwiseTest.cpp b/test/op/EltwiseTest.cpp index 9f631a61d..584ff1145 100644 --- a/test/op/EltwiseTest.cpp +++ b/test/op/EltwiseTest.cpp @@ -6,12 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/EluTest.cpp b/test/op/EluTest.cpp new file mode 100644 index 000000000..6cb06539a --- /dev/null +++ b/test/op/EluTest.cpp @@ -0,0 +1,36 @@ +// +// EluTest.cpp +// MNNTests +// +// Created by MNN on 2019/12/13. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include +#include "MNNTestSuite.h" +#include "TestUtils.h" + +using namespace MNN::Express; +class EluTest : public MNNTestCase { +public: + virtual ~EluTest() = default; + virtual bool run() { + auto input = _Input({4,}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.0, -2.0, 3.0, 4.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _Elu(input,2.0); + const std::vector expectedOutput = {-1.26, -1.73, 3.0, 4.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("EluTest test failed!\n"); + return false; + } + return true; + } +}; +MNNTestSuiteRegister(EluTest, "op/elu"); diff --git a/test/op/FillTest.cpp b/test/op/FillTest.cpp index 5e193af32..3145ec0ad 100644 --- a/test/op/FillTest.cpp +++ b/test/op/FillTest.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/GatherTest.cpp b/test/op/GatherTest.cpp index 38ce17fc8..e147aed7b 100644 --- a/test/op/GatherTest.cpp +++ b/test/op/GatherTest.cpp @@ -5,197 +5,36 @@ // Created by MNN on 2019/01/15. // Copyright © 2018, Alibaba Group Holding Limited // - -#include "Interpreter.hpp" +#include +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" -using namespace MNN; - -static Interpreter *create(int o, int s, int b, int c, int h, int w) { - flatbuffers::FlatBufferBuilder fbb; - std::vector> vec; - - { - auto dims = fbb.CreateVector(std::vector({o, s})); - InputBuilder ib(fbb); - ib.add_dims(dims); - ib.add_dtype(DataType_DT_FLOAT); - ib.add_dformat(MNN_DATA_FORMAT_NHWC); - auto input = ib.Finish(); - auto name = fbb.CreateString("input"); - auto iv = fbb.CreateVector(std::vector({})); - auto ov = fbb.CreateVector(std::vector({0})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Input); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Input); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - } - { - auto dims = fbb.CreateVector(std::vector({b, h, w, c})); - InputBuilder ib(fbb); - ib.add_dims(dims); - ib.add_dtype(DataType_DT_INT32); - ib.add_dformat(MNN_DATA_FORMAT_NHWC); - auto input = ib.Finish(); - auto name = fbb.CreateString("indices"); - auto iv = fbb.CreateVector(std::vector({})); - auto ov = fbb.CreateVector(std::vector({1})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Input); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Input); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - } - { - auto gather = GatherBuilder(fbb); - gather.add_Tindices(DataType_DT_INT32); - gather.add_Tparams(DataType_DT_FLOAT); - auto name = fbb.CreateString("gather"); - auto iv = fbb.CreateVector(std::vector({0, 1})); - auto ov = fbb.CreateVector(std::vector({2})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Gather); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Gather); - builder.add_main(flatbuffers::Offset(gather.Finish().o)); - vec.push_back(builder.Finish()); - } - - BlobBuilder fb(fbb), qb(fbb); - fb.add_dataType(DataType_DT_INT32); - fb.add_dataFormat(MNN_DATA_FORMAT_NHWC); - qb.add_dataType(DataType_DT_INT32); - qb.add_dataFormat(MNN_DATA_FORMAT_NHWC); - auto flt = fb.Finish(), qnt = qb.Finish(); - - std::vector> desc; - { - TensorDescribeBuilder tdb(fbb); - tdb.add_index(0); - tdb.add_blob(flatbuffers::Offset(flt.o)); - desc.push_back(tdb.Finish()); - } - { - TensorDescribeBuilder tdb(fbb); - tdb.add_index(1); - tdb.add_blob(flatbuffers::Offset(qnt.o)); - desc.push_back(tdb.Finish()); - } - { - TensorDescribeBuilder tdb(fbb); - tdb.add_index(2); - tdb.add_blob(flatbuffers::Offset(flt.o)); - desc.push_back(tdb.Finish()); - } - - auto ops = fbb.CreateVector(vec); - auto names = fbb.CreateVectorOfStrings({"input", "indices", "output"}); - auto extras = fbb.CreateVector(desc); - NetBuilder net(fbb); - net.add_oplists(ops); - net.add_tensorName(names); - net.add_extraTensorDescribe(extras); - net.add_sourceType(NetSource_TENSORFLOW); - fbb.Finish(net.Finish()); - return Interpreter::createFromBuffer((const char *)fbb.GetBufferPointer(), fbb.GetSize()); -} - -static Tensor *infer(const Interpreter *net, Session *session) { - net->runSession(session); - return net->getSessionOutputAll(session).begin()->second; -} - -class GatherTest : public MNNTestCase { +using namespace MNN::Express; +class GatherNDTest : public MNNTestCase { public: - virtual ~GatherTest() = default; + virtual ~GatherNDTest() = default; virtual bool run() { - for (int o = 1; o <= 4; o *= 2) { - for (int s = 1; s <= 4; s *= 2) { - for (int b = 1; b <= 2; b *= 2) { - for (int h = 1; h <= 4; h *= 2) { - for (int w = 1; w <= 4; w *= 2) { - for (int c = 1; c <= 4; c *= 2) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - // nets - auto net = create(o, s, b, c, h, w); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input - auto input = new Tensor(2, Tensor::TENSORFLOW); - { - input->setType(DataType_DT_FLOAT); - input->buffer().dim[0].extent = o; - input->buffer().dim[1].extent = s; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - for (int i = 0; i < o * s; i++) { - input->host()[i] = rand() % 255 / 255.f; - } - auto host = net->getSessionInput(CPU, "input"); - auto device = net->getSessionInput(GPU, "input"); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - } - - // indices - auto indices = new Tensor(4, Tensor::TENSORFLOW); - { - indices->setType(DataType_DT_INT32); - indices->buffer().dim[0].extent = b; - indices->buffer().dim[1].extent = h; - indices->buffer().dim[2].extent = w; - indices->buffer().dim[3].extent = c; - TensorUtils::setLinearLayout(indices); - indices->buffer().host = (uint8_t *)malloc(indices->size()); - for (int i = 0; i < b * c * h * w; i++) { - indices->host()[i] = rand() % o; - } - auto host = net->getSessionInput(CPU, "indices"); - auto device = net->getSessionInput(GPU, "indices"); - net->getBackend(CPU, host)->onCopyBuffer(indices, host); - net->getBackend(GPU, device)->onCopyBuffer(indices, device); - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - free(input->buffer().host); - free(indices->buffer().host); - delete input; - delete indices; - delete net; - }); - } - } - } - } - } + auto params = _Input({2,2}, NCHW); + params->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.0, -2.0, 3.0, 4.0}; + auto inputPtr = params->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + params->unMap(); + const int indices_data[] = {0, 0, 1, 1}; + auto indices = _Const(indices_data, {2, 2}, NCHW, halide_type_of()); + auto output = _GatherND(params, indices); + const std::vector expectedOutput = {-1.0, 4.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 2, 0.01)) { + MNN_ERROR("GatherNDTest test failed!\n"); + return false; } return true; } }; -MNNTestSuiteRegister(GatherTest, "op/gather"); +MNNTestSuiteRegister(GatherNDTest, "op/gather_nd"); diff --git a/test/op/GatherV2Test.cpp b/test/op/GatherV2Test.cpp index 421633c0f..fc5d80282 100644 --- a/test/op/GatherV2Test.cpp +++ b/test/op/GatherV2Test.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/InterpTest.cpp b/test/op/InterpTest.cpp index b166121eb..6b1555251 100644 --- a/test/op/InterpTest.cpp +++ b/test/op/InterpTest.cpp @@ -6,12 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/LRNTest.cpp b/test/op/LRNTest.cpp index a72982506..fa9c4993e 100644 --- a/test/op/LRNTest.cpp +++ b/test/op/LRNTest.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/LSTMTest.cpp b/test/op/LSTMTest.cpp index c998c1d5a..ca7193e5e 100644 --- a/test/op/LSTMTest.cpp +++ b/test/op/LSTMTest.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "Tensor_generated.h" #include "TestUtils.h" diff --git a/test/op/MatMulTest.cpp b/test/op/MatMulTest.cpp index e3651039f..bdee1a763 100644 --- a/test/op/MatMulTest.cpp +++ b/test/op/MatMulTest.cpp @@ -5,179 +5,123 @@ // Created by MNN on 2019/01/15. // Copyright © 2018, Alibaba Group Holding Limited // - -#include "Interpreter.hpp" +#include +#include +#include +#include +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" -using namespace MNN; - -static Interpreter *create(int iw0, int ih0, int iw1, int ih1, int ow, int oh) { - flatbuffers::FlatBufferBuilder fbb; - std::vector> vec; - - { - auto dims = fbb.CreateVector(std::vector({iw0, ih0})); - InputBuilder ib(fbb); - auto input = ib.Finish(); - ib.add_dims(dims); - - OpBuilder builder(fbb); - builder.add_type(OpType_Input); - auto name = fbb.CreateString("input0"); - builder.add_name(name); - auto iv = fbb.CreateVector(std::vector({0})); - builder.add_inputIndexes(iv); - auto ov = fbb.CreateVector(std::vector({0})); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Input); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - } - { - auto dims = fbb.CreateVector(std::vector({iw1, ih1})); - InputBuilder ib(fbb); - auto input = ib.Finish(); - ib.add_dims(dims); - - OpBuilder builder(fbb); - builder.add_type(OpType_Input); - auto name = fbb.CreateString("input1"); - builder.add_name(name); - auto iv = fbb.CreateVector(std::vector({1})); - builder.add_inputIndexes(iv); - auto ov = fbb.CreateVector(std::vector({1})); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Input); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - } - { - OpBuilder builder(fbb); - builder.add_type(OpType_MatMul); - auto name = fbb.CreateString("matMul"); - builder.add_name(name); - auto iv = fbb.CreateVector(std::vector({0, 1})); - builder.add_inputIndexes(iv); - auto ov = fbb.CreateVector(std::vector({2})); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_MatMul); - builder.add_main(flatbuffers::Offset(MatMulBuilder(fbb).Finish().o)); - vec.push_back(builder.Finish()); - } - - BlobBuilder builder(fbb); - builder.add_dataType(DataType_DT_FLOAT); - builder.add_dataFormat(MNN_DATA_FORMAT_NHWC); - auto blob = builder.Finish(); - - std::vector> desc; - { - TensorDescribeBuilder tdb(fbb); - tdb.add_index(0); - tdb.add_blob(flatbuffers::Offset(blob.o)); - desc.push_back(tdb.Finish()); +#define TEST_RANDOM_SEED 100 + +using std::vector; +// C = A * B +static void reference_matmul(const vector& matrix_a, const vector& matrix_b, vector& matrix_c, int width_a, int width_b, bool tranpose_a, bool tranpose_b) { + int height_c = matrix_a.size() / width_a, width_c = width_b, length = width_a; + int stride_a_h = width_a, stride_a_w = 1, stride_b_h = width_b, stride_b_w = 1; + if (tranpose_a) { + length = matrix_a.size() / width_a; + stride_a_w = height_c = width_a; + stride_a_h = 1; } - { - TensorDescribeBuilder tdb(fbb); - tdb.add_index(1); - tdb.add_blob(flatbuffers::Offset(blob.o)); - desc.push_back(tdb.Finish()); + if (tranpose_b) { + width_c = matrix_b.size() / width_b; + length = stride_b_w = width_b; + stride_b_h = 1; } - { - TensorDescribeBuilder tdb(fbb); - tdb.add_index(2); - tdb.add_blob(flatbuffers::Offset(blob.o)); - desc.push_back(tdb.Finish()); + matrix_c.resize(height_c * width_c); + for (int h = 0; h < height_c; ++h) { + for (int w = 0; w < width_c; ++w) { + float result = 0; + for (int i = 0; i < length; ++i) { + result += matrix_a[h * stride_a_h + i * stride_a_w] * matrix_b[i * stride_b_h + w * stride_b_w]; + } + matrix_c[h * width_c + w] = result; + } } - - NetBuilder net(fbb); - auto ops = fbb.CreateVector(vec); - net.add_oplists(ops); - auto names = fbb.CreateVectorOfStrings({"input0", "input1", "output"}); - net.add_tensorName(names); - auto extras = fbb.CreateVector(desc); - net.add_extraTensorDescribe(extras); - net.add_sourceType(NetSource_TENSORFLOW); - fbb.Finish(net.Finish()); - return Interpreter::createFromBuffer((const char *)fbb.GetBufferPointer(), fbb.GetSize()); } -static Tensor *infer(const Interpreter *net, Session *session) { - net->runSession(session); - return net->getSessionOutputAll(session).begin()->second; -} +using namespace MNN::Express; +class MatMulCommonTest : public MNNTestCase { +public: + virtual ~MatMulCommonTest() = default; +protected: + static bool test(MNNForwardType type, const std::string& device_name, const std::string& test_op_name, + int height_a, int width_a, int height_b, int width_b, bool tranpose_a, bool tranpose_b) { + auto input_a = _Input({height_a, width_a}, NCHW); + auto input_b = _Input({height_b, width_b}, NCHW); + auto output = _MatMul(input_a, input_b, tranpose_a, tranpose_b); + if (type != MNN_FORWARD_CPU) { + Optimizer::Config config; + config.forwardType = type; + auto optimizer = Optimizer::create(config); + if (optimizer == nullptr) { + MNN_ERROR("backend %s not support\n", device_name.c_str()); + return false; + } + optimizer->onExecute({output}); + } + vector data_a, data_b, data_c; + for (int i = 0; i < height_a * width_a; ++i) { + data_a.push_back(rand() % 255 / 255.f); + } + for (int i = 0; i < height_b * width_b; ++i) { + data_b.push_back(rand() % 255 / 255.f); + } + reference_matmul(data_a, data_b, data_c, width_a, width_b, tranpose_a, tranpose_b); + ::memcpy(input_a->writeMap(), data_a.data(), data_a.size() * sizeof(float)); + ::memcpy(input_b->writeMap(), data_b.data(), data_b.size() * sizeof(float)); + if (!checkVectorByRelativeError(output->readMap(), data_c.data(), data_c.size(), 0.005)) { + MNN_ERROR("%s(%s) test failed!\n", test_op_name.c_str(), device_name.c_str()); + return false; + } + return true; + } +}; -class MatMulTest : public MNNTestCase { +class MatMulTest : public MatMulCommonTest { public: virtual ~MatMulTest() = default; - virtual bool run() { - for (int iw0 = 1; iw0 < 2; iw0++) { - for (int ih0 = 10; ih0 < 20; ih0++) { - int iw1 = ih0; - for (int ih1 = 10; ih1 < 20; ih1++) { - int ow = iw0; - int oh = ih1; - - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - auto net = create(iw0, ih0, iw1, ih1, ow, oh); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; +protected: + static bool test(MNNForwardType type, const std::string& device_name) { + srand(TEST_RANDOM_SEED); + for (int height_c = 1; height_c <= 20; ++height_c) { + for (int width_c = 1; width_c <= 20; ++width_c) { + for (int length = 1; length <= 20; ++length) { + int height_a = height_c, height_b = length, width_a = length, width_b = width_c; + for (int tranpose_a = 0; tranpose_a <= 1; ++tranpose_a) { + int height_a = height_c, width_a = length; + if (tranpose_a == 1) { + std::swap(height_a, width_a); } - - // input/output - auto input0 = new Tensor(2, Tensor::TENSORFLOW); - { - input0->buffer().dim[0].extent = iw0; - input0->buffer().dim[1].extent = ih0; - TensorUtils::setLinearLayout(input0); - input0->buffer().host = (uint8_t *)malloc(input0->size()); - for (int i = 0; i < iw0 * ih0; i++) { - input0->host()[i] = rand() % 255 / 255.f; + for (int tranpose_b = 0; tranpose_b <= 1; ++tranpose_b) { + int height_b = length, width_b = width_c; + if (tranpose_b == 1) { + std::swap(height_b, width_b); } - auto host = net->getSessionInput(CPU, "input0"); - auto device = net->getSessionInput(GPU, "input0"); - net->getBackend(CPU, host)->onCopyBuffer(input0, host); - net->getBackend(GPU, device)->onCopyBuffer(input0, device); - } - - auto input1 = new Tensor(2, Tensor::TENSORFLOW); - { - input1->buffer().dim[0].extent = iw1; - input1->buffer().dim[1].extent = ih1; - TensorUtils::setLinearLayout(input1); - input1->buffer().host = (uint8_t *)malloc(input1->size()); - for (int i = 0; i < iw1 * ih1; i++) { - input1->host()[i] = rand() % 255 / 255.f; + bool succ = MatMulCommonTest::test(type, device_name, "MatMul", height_a, width_a, height_b, width_b, tranpose_a != 0, tranpose_b != 0); + if (!succ) { + return false; } - auto host = net->getSessionInput(CPU, "input1"); - auto device = net->getSessionInput(GPU, "input1"); - net->getBackend(CPU, host)->onCopyBuffer(input1, host); - net->getBackend(GPU, device)->onCopyBuffer(input1, device); } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - free(input0->buffer().host); - free(input1->buffer().host); - delete input0; - delete input1; - delete net; - }); + } } } } return true; } }; -MNNTestSuiteRegister(MatMulTest, "op/matmul"); + +class MatMulTestOnCPU : public MatMulTest { +public: + virtual ~MatMulTestOnCPU() = default; + virtual bool run() { + return MatMulTest::test(MNN_FORWARD_CPU, "CPU"); + } +}; + +MNNTestSuiteRegister(MatMulTestOnCPU, "op/matmul/cpu"); diff --git a/test/op/MatrixBandPart.cpp b/test/op/MatrixBandPart.cpp new file mode 100644 index 000000000..60e55aba7 --- /dev/null +++ b/test/op/MatrixBandPart.cpp @@ -0,0 +1,47 @@ +// +// MatrixBandPart.cpp +// MNNTests +// +// Created by MNN on 2019/12/17. +// Copyright © 2018, Alibaba Group Holding Limited +// + + +#include +#include +#include "MNNTestSuite.h" +#include "TestUtils.h" +using namespace MNN::Express; +class MatrixBandPartTest : public MNNTestCase { +public: + virtual ~MatrixBandPartTest() = default; + virtual bool run() { + auto input = _Input({4,4}, NHWC); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {0.0, 1.0, 2.0, 3.0, + -1.0, 0.0, 1.0, 2.0, + -2.0, -1.0, 0.0, 1.0, + -3.0, -2.0, -1.0, 0.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 16 * sizeof(float)); + input->unMap(); + int lower_data = 1; + int higher_data = -1; + auto lower = _Const(&lower_data, {}, NCHW, halide_type_of()); + auto higher = _Const(&higher_data, {}, NCHW, halide_type_of()); + auto output = _MatrixBandPart(input, lower, higher); + const std::vector expectedOutput = {0.0, 1.0, 2.0, 3.0, + -1.0, 0.0, 1.0, 2.0, + 0.0, -1.0, 0.0, 1.0, + 0.0, 0.0, -1.0, 0.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 16, 0.01)) { + MNN_ERROR("MatrixBandPartTest test failed!\n"); + return false; + } + return true; + } +}; +MNNTestSuiteRegister(MatrixBandPartTest, "op/matrixbandpart"); + diff --git a/test/op/MomentsTest.cpp b/test/op/MomentsTest.cpp new file mode 100644 index 000000000..5ff0afbc7 --- /dev/null +++ b/test/op/MomentsTest.cpp @@ -0,0 +1,47 @@ +// +// MomentsTest.cpp +// MNNTests +// +// Created by MNN on 2019/12/17. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include +#include "MNNTestSuite.h" +#include "TestUtils.h" + +using namespace MNN::Express; +class MomentsTest : public MNNTestCase { +public: + virtual ~MomentsTest() = default; + virtual bool run() { + auto input = _Input({1,4,4,1}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[16] = {0.0, 1.0, 2.0, 3.0, + -1.0, 0.0, 1.0, 2.0, + -2.0, -1.0, 0.0, 1.0, + -3.0, -2.0, -1.0, 0.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 16 * sizeof(float)); + input->unMap(); + input = _Convert(input, NC4HW4); + auto notused_var = _Const(1.0); + auto outputs = _Moments(input, {2,3}, notused_var, true); + const std::vector expectedMean = {1.5, 0.5, -0.5, -1.5}; + const std::vector expectedVar = {1.25, 1.25, 1.25, 1.25}; + auto gotOutputMean = outputs[0]->readMap(); + auto gotOutputVar = outputs[1]->readMap(); + if (!checkVector(gotOutputMean, expectedMean.data(), 4, 0.01)) { + MNN_ERROR("MomentsTest test failed!\n"); + return false; + } + if (!checkVector(gotOutputVar, expectedVar.data(), 4, 0.01)) { + MNN_ERROR("MomentsTest test failed!\n"); + return false; + } + return true; + } +}; +MNNTestSuiteRegister(MomentsTest, "op/moments"); diff --git a/test/op/MultiConvolutionTest.cpp b/test/op/MultiConvolutionTest.cpp new file mode 100644 index 000000000..6bb668283 --- /dev/null +++ b/test/op/MultiConvolutionTest.cpp @@ -0,0 +1,118 @@ +// +// MultiConvolutionTest.cpp +// MNNTests +// +// Created by MNN on 2019/10/24. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include +#include "MNNTestSuite.h" +#include "TestUtils.h" +#include +#include "MNN_generated.h" +#include +#include +#include + +using namespace MNN::Express; + +class MultiConvolutionTest : public MNNTestCase { +public: + virtual ~MultiConvolutionTest() = default; +protected: + bool testOnBackend(MNNForwardType type, const std::string& deviceName) { + auto creator = MNN::MNNGetExtraBackendCreator(type); + if (creator == nullptr) { + MNN_ERROR("backend %d not found!\n", type); + return false; + } + + const int inputHeight = 5, inputWidth = 5, inputChannel = 2, outputChannel = 1; + const int kernelSize = 3, stride = 2, pad = 1, batch = 1; + const int height = (inputHeight + 2 * pad - kernelSize) / stride + 1; // height = 3 + const int width = (inputWidth + 2 * pad - kernelSize) / stride + 1; // width = 3 + const std::vector inputData = { + // channel 0 + 0.6345, 0.1219, 0.0424, 0.0501, 0.3934, + 0.4311, 0.5961, 0.6642, 0.734 , 0.062 , + 0.88 , 0.503 , 0.1638, 0.6367, 0.2151, + 0.0795, 0.7693, 0.134 , 0.4963, 0.7571, + 0.5428, 0.3663, 0.2823, 0.7478, 0.579 , + // channel 1 + 0.6917, 0.4047, 0.9673, 0.9111, 0.608 , + 0.4621, 0.6567, 0.3192, 0.726 , 0.9066, + 0.885 , 0.3491, 0.7938, 0.2593, 0.3146, + 0.6901, 0.2126, 0.649 , 0.7919, 0.9838, + 0.0672, 0.0357, 0.383 , 0.5043, 0.2803 + }; + const std::vector filterData = { + // outputChannel = 0, inputChannel = 0 + 0.5567, 0.4559, 0.0203, + 0.9659, 0.2679, 0.4117, + 0.9696, 0.4567, 0.3787, + // outputChannel = 0, inputChannel = 1 + 0.3354, 0.2056, 0.0342, + 0.023 , 0.4683, 0.9966, + 0.6097, 0.0873, 0.7917 + }; + const std::vector biasData = {1.0}; + const std::vector outputData = { + 2.930293, 4.682340, 2.721255, + 3.087505, 5.198602, 4.088373, + 1.564287, 3.151330, 3.109602 + }; + + auto input = _Input({batch, inputChannel, inputHeight, inputWidth}, NCHW, halide_type_of()); + auto filter = _Input({outputChannel, inputChannel, kernelSize, kernelSize}, NCHW, halide_type_of()); + auto bias = _Input({outputChannel}, NCHW, halide_type_of()); + auto output = _Conv(filter, bias, _Convert(input, NC4HW4), CAFFE, {stride, stride}, {1, 1}, 1, {pad, pad}); + output = _Convert(output, NCHW); + + if (type != MNN_FORWARD_CPU) { + Optimizer::Config config; + config.forwardType = type; + auto optimizer = Optimizer::create(config); + if (optimizer == nullptr) { + MNN_ERROR("backend %s not support\n", deviceName.c_str()); + return false; + } + optimizer->onExecute({output}); + } + + const std::vector outDim = {batch, outputChannel, height, width}; + if (!checkVector(output->getInfo()->dim.data(), outDim.data(), 4, 0)) { + MNN_ERROR("MultiConvolution(%s) shape test failed!\n", deviceName.c_str()); + return false; + } + + ::memcpy(input->writeMap(), inputData.data(), inputData.size() * sizeof(float)); + ::memcpy(filter->writeMap(), filterData.data(), filterData.size() * sizeof(float)); + ::memcpy(bias->writeMap(), biasData.data(), biasData.size() * sizeof(float)); + if(!checkVectorByRelativeError(output->readMap(), outputData.data(), outputData.size(), 0.001)) { + MNN_ERROR("MultiConvolution(%s) test failed!\n", deviceName.c_str()); + return false; + } + return true; + } +}; + +class MultiConvolutionTestOnCPU : public MultiConvolutionTest { +public: + virtual ~MultiConvolutionTestOnCPU() = default; + virtual bool run() { + return testOnBackend(MNN_FORWARD_CPU, "CPU"); + } +}; + +class MultiConvolutionTestOnOpencl : public MultiConvolutionTest { +public: + virtual ~MultiConvolutionTestOnOpencl() = default; + virtual bool run() { + return testOnBackend(MNN_FORWARD_OPENCL, "OPENCL"); + } +}; + +MNNTestSuiteRegister(MultiConvolutionTestOnCPU, "op/MultiConv/cpu"); +MNNTestSuiteRegister(MultiConvolutionTestOnOpencl, "op/MultiConv/opencl"); diff --git a/test/op/MultiDeconvolutionTest.cpp b/test/op/MultiDeconvolutionTest.cpp new file mode 100644 index 000000000..7656b304d --- /dev/null +++ b/test/op/MultiDeconvolutionTest.cpp @@ -0,0 +1,144 @@ +// +// MultiDeconvolutionTest.cpp +// MNNTests +// +// Created by MNN on 2019/10/24. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include +#include "MNNTestSuite.h" +#include "TestUtils.h" +#include +#include "MNN_generated.h" +#include +#include +#include + +using namespace MNN::Express; + +class MultiDeconvolutionTest : public MNNTestCase { +public: + virtual ~MultiDeconvolutionTest() = default; +protected: + bool testOnBackend(MNNForwardType type, const std::string& deviceName) { + auto creator = MNN::MNNGetExtraBackendCreator(type); + if (creator == nullptr) { + MNN_ERROR("backend %d not found!\n", type); + return false; + } + + const int inputHeight = 3, inputWidth = 3, inputChannel = 3, outputChannel = 2; + const int kernelSize = 3, stride = 2, pad = 1, batch = 1; + const int height = (inputHeight - 1) * stride + kernelSize - pad * 2; // height = 5 + const int width = (inputWidth - 1) * stride + kernelSize - pad * 2; // width = 5 + const std::vector inputData = { + // channel 0 + 0.0500, 0.2283, 0.9916, + 0.5502, 0.2731, 0.0964, + 0.5169, 0.3492, 0.0057, + // channel 1 + 0.5207, 0.2388, 0.2215, + 0.7307, 0.4999, 0.7638, + 0.3025, 0.7966, 0.7117, + // channel 2 + 0.3264, 0.1317, 0.9161, + 0.8626, 0.9634, 0.1032, + 0.4114, 0.7719, 0.1408 + }; + const std::vector filterData = { + // outputChannel = 0, inputChannel = 0 + 0.7648, 0.83 , 0.3509, + 0.8953, 0.7895, 0.4066, + 0.5893, 0.9506, 0.4081, + // outputChannel = 1, inputChannel = 0 + 0.1982, 0.2179, 0.2756, + 0.5602, 0.2062, 0.8441, + 0.6934, 0.5666, 0.765 , + // outputChannel = 0, inputChannel = 1 + 0.0375, 0.2276, 0.6908, + 0.2677, 0.2822, 0.9121, + 0.0821, 0.1406, 0.1126, + // outputChannel = 1, inputChannel = 1 + 0.3432, 0.4277, 0.6015, + 0.0909, 0.957 , 0.3732, + 0.4586, 0.2034, 0.5555, + // outputChannel = 0, inputChannel = 2 + 0.8036, 0.8453, 0.226 , + 0.6534, 0.7527, 0.9455, + 0.0295, 0.1798, 0.4561, + // outputChannel = 1, inputChannel = 2 + 0.3859, 0.1691, 0.7373, + 0.246 , 0.7928, 0.4552, + 0.8937, 0.4109, 0.3926 + }; + const std::vector biasData = {1.0, 0.0}; + const std::vector outputData = { + // channel 0 + 1.432098, 2.158248, 1.346763, 2.980813, 2.534924, + 2.531556, 3.280517, 2.429089, 2.653877, 2.479560, + 2.289865, 3.713586, 2.081835, 2.836103, 1.369331, + 2.626485, 3.331208, 2.626743, 2.721178, 1.503316, + 1.803119, 2.905308, 2.081503, 2.886019, 1.311322, + // channel 1 + 0.767390, 0.567106, 0.380019, 1.142767, 1.142727, + 0.846633, 2.665777, 0.668269, 3.374221, 1.348453, + 1.496601, 1.565205, 1.298501, 1.004446, 0.832651, + 1.126390, 3.713293, 1.199604, 2.818435, 0.581827, + 0.722235, 1.194398, 1.446314, 1.045943, 0.793899 + + }; + + auto input = _Input({batch, inputChannel, inputHeight, inputWidth}, NCHW, halide_type_of()); + auto filter = _Input({inputChannel, outputChannel, kernelSize, kernelSize}, NCHW, halide_type_of()); + auto bias = _Input({outputChannel}, NCHW, halide_type_of()); + auto output = _Deconv(filter, bias, _Convert(input, NC4HW4), CAFFE, {stride, stride}, {1, 1}, 1, {pad, pad}); + output = _Convert(output, NCHW); + + if (type != MNN_FORWARD_CPU) { + Optimizer::Config config; + config.forwardType = type; + auto optimizer = Optimizer::create(config); + if (optimizer == nullptr) { + MNN_ERROR("backend %s not support\n", deviceName.c_str()); + return false; + } + optimizer->onExecute({output}); + } + + const std::vector outDim = {batch, outputChannel, height, width}; + if (!checkVector(output->getInfo()->dim.data(), outDim.data(), 4, 0)) { + MNN_ERROR("MultiDeconvolution(%s) shape test failed!\n", deviceName.c_str()); + return false; + } + + ::memcpy(input->writeMap(), inputData.data(), inputData.size() * sizeof(float)); + ::memcpy(filter->writeMap(), filterData.data(), filterData.size() * sizeof(float)); + ::memcpy(bias->writeMap(), biasData.data(), biasData.size() * sizeof(float)); + if(!checkVectorByRelativeError(output->readMap(), outputData.data(), outputData.size(), 0.005)) { + MNN_ERROR("MultiDeconvolution(%s) test failed!\n", deviceName.c_str()); + return false; + } + return true; + } +}; + +class MultiDeconvolutionTestOnCPU : public MultiDeconvolutionTest { +public: + virtual ~MultiDeconvolutionTestOnCPU() = default; + virtual bool run() { + return testOnBackend(MNN_FORWARD_CPU, "CPU"); + } +}; + +class MultiDeconvolutionTestOnOpencl : public MultiDeconvolutionTest { +public: + virtual ~MultiDeconvolutionTestOnOpencl() = default; + virtual bool run() { + return testOnBackend(MNN_FORWARD_OPENCL, "OPENCL"); + } +}; + +MNNTestSuiteRegister(MultiDeconvolutionTestOnCPU, "op/MultiDeconv/cpu"); +MNNTestSuiteRegister(MultiDeconvolutionTestOnOpencl, "op/MultiDeconv/opencl"); diff --git a/test/op/NormalizeTest.cpp b/test/op/NormalizeTest.cpp index d235f57ac..6d3e15c4c 100644 --- a/test/op/NormalizeTest.cpp +++ b/test/op/NormalizeTest.cpp @@ -6,12 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/OneHotTest.cpp b/test/op/OneHotTest.cpp new file mode 100644 index 000000000..4841add56 --- /dev/null +++ b/test/op/OneHotTest.cpp @@ -0,0 +1,94 @@ +// +// OneHotTest.cpp +// MNN +// +// Created by MNN on 2019/11/29. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include +#include "MNNTestSuite.h" +#include "TestUtils.h" + +using namespace MNN::Express; + +class OneHotTest : public MNNTestCase { + virtual ~OneHotTest() = default; + + virtual bool run() { + { + const int indicesData[] = {0, 1, 2}; + const int depthData[] = {3}; + const float onValueData[] = {1.0}; + const float offValueData[] = {0.0}; + const int axis = -1; + + const float expectedValue[] = { + 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, + }; + + auto indices = _Const(indicesData, {3}, NHWC, halide_type_of()); + auto depth = _Const(depthData, {1}, NHWC, halide_type_of()); + auto onValue = _Const(onValueData, {1}, NHWC, halide_type_of()); + auto offValue = _Const(offValueData, {1}, NHWC, halide_type_of()); + + auto result = _OneHot(indices, depth, onValue, offValue, axis); + auto resultdata = result->readMap(); + const int size = result->getInfo()->size; + if (!checkVector(resultdata, expectedValue, size, 0.0)) { + return false; + } + } + + { + const int indicesData[] = {0, 2, -1, 1}; + const int depthData[] = {3}; + const float onValueData[] = {5.0}; + const float offValueData[] = {0.0}; + const int axis = -1; + + const float expectedValue[] = { + 5.0, 0.0, 0.0, 0.0, 0.0, 5.0, 0.0, 0.0, 0.0, 0.0, 5.0, 0.0, + }; + + auto indices = _Const(indicesData, {4}, NHWC, halide_type_of()); + auto depth = _Const(depthData, {1}, NHWC, halide_type_of()); + auto onValue = _Const(onValueData, {1}, NHWC, halide_type_of()); + auto offValue = _Const(offValueData, {1}, NHWC, halide_type_of()); + + auto result = _OneHot(indices, depth, onValue, offValue, axis); + auto resultdata = result->readMap(); + const int size = result->getInfo()->size; + if (!checkVector(resultdata, expectedValue, size, 0.0)) { + return false; + } + } + + { + const int indicesData[] = {0, 2, 1, -1}; + const int depthData[] = {3}; + const float onValueData[] = {1.0}; + const float offValueData[] = {0.0}; + const int axis = -1; + + const float expectedValue[] = {1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0}; + + auto indices = _Const(indicesData, {2, 2}, NHWC, halide_type_of()); + auto depth = _Const(depthData, {1}, NHWC, halide_type_of()); + auto onValue = _Const(onValueData, {1}, NHWC, halide_type_of()); + auto offValue = _Const(offValueData, {1}, NHWC, halide_type_of()); + + auto result = _OneHot(indices, depth, onValue, offValue, axis); + auto resultdata = result->readMap(); + const int size = result->getInfo()->size; + if (!checkVector(resultdata, expectedValue, size, 0.0)) { + return false; + } + } + + return true; + } +}; + +MNNTestSuiteRegister(OneHotTest, "op/OneHotTest"); diff --git a/test/op/PReLUTest.cpp b/test/op/PReLUTest.cpp index 5e1bdd732..aa0bb19e7 100644 --- a/test/op/PReLUTest.cpp +++ b/test/op/PReLUTest.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/PackTest.cpp b/test/op/PackTest.cpp deleted file mode 100644 index fd1311975..000000000 --- a/test/op/PackTest.cpp +++ /dev/null @@ -1,240 +0,0 @@ -// -// PackTest.cpp -// MNNTests -// -// Created by MNN on 2019/01/15. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#include "Interpreter.hpp" -#include "MNNTestSuite.h" -#include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" -#include "TestUtils.h" - -using namespace MNN; - -static Interpreter *create(DataType type, int axis, int n, std::vector shape) { - flatbuffers::FlatBufferBuilder fbb; - std::vector> vec; - - std::vector> ns; - for (int i = 0; i < n; i++) { - auto dims = fbb.CreateVector(shape); - InputBuilder ib(fbb); - ib.add_dims(dims); - ib.add_dtype(type); - auto input = ib.Finish(); - auto name = fbb.CreateString(std::to_string(i)); - auto iv = fbb.CreateVector(std::vector({i})); - auto ov = fbb.CreateVector(std::vector({i})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Input); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Input); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - ns.push_back(name); - } - { - auto ppb = PackParamBuilder(fbb); - ppb.add_dataType(type); - ppb.add_axis(axis); - auto pack = ppb.Finish(); - auto name = fbb.CreateString("pack"); - std::vector ips; - for (int i = 0; i < n; i++) { - ips.push_back(i); - } - auto iv = fbb.CreateVector(ips); - auto ov = fbb.CreateVector(std::vector({n})); - OpBuilder builder(fbb); - builder.add_type(OpType_Pack); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_PackParam); - builder.add_main(flatbuffers::Offset(pack.o)); - vec.push_back(builder.Finish()); - ns.push_back(name); - } - - BlobBuilder builder(fbb); - builder.add_dataType(type); - builder.add_dataFormat(MNN_DATA_FORMAT_NHWC); - auto blob = builder.Finish(); - - std::vector> desc; - for (int i = 0; i < n; i++) { - TensorDescribeBuilder tdb(fbb); - tdb.add_index(i); - tdb.add_blob(flatbuffers::Offset(blob.o)); - desc.push_back(tdb.Finish()); - } - { - TensorDescribeBuilder tdb(fbb); - tdb.add_index(n); - tdb.add_blob(flatbuffers::Offset(blob.o)); - desc.push_back(tdb.Finish()); - } - - auto ops = fbb.CreateVector(vec); - auto names = fbb.CreateVector(ns); - auto extras = fbb.CreateVector(desc); - NetBuilder net(fbb); - net.add_oplists(ops); - net.add_tensorName(names); - net.add_extraTensorDescribe(extras); - net.add_sourceType(NetSource_TENSORFLOW); - fbb.Finish(net.Finish()); - return Interpreter::createFromBuffer((const char *)fbb.GetBufferPointer(), fbb.GetSize()); -} - -static Tensor *infer(const Interpreter *net, Session *session) { - net->runSession(session); - return net->getSessionOutputAll(session).begin()->second; -} - -class PackTensorTest : public MNNTestCase { -public: - virtual ~PackTensorTest() = default; - virtual bool run() { - DataType types[] = { - DataType_DT_INT32, DataType_DT_FLOAT, - }; - - for (int t = 0; t < sizeof(types) / sizeof(DataType); t++) { - DataType type = types[t]; - for (int axis = 0; axis <= 3; axis++) { - for (int n = 2; n <= 4; n++) { - for (int b = 1; b <= 2; b++) { - for (int c = 1; c <= 8; c *= 2) { - for (int h = 1; h <= 8; h *= 2) { - for (int w = 1; w <= 8; w *= 2) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - // nets - auto net = create(type, axis, n, {b, h, w, c}); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input - for (int i = 0; i < n; i++) { - auto input = new Tensor(4, Tensor::TENSORFLOW); - { - input->setType(type); - input->buffer().dim[0].extent = b; - input->buffer().dim[1].extent = h; - input->buffer().dim[2].extent = w; - input->buffer().dim[3].extent = c; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - if (type == DataType_DT_INT32) { - for (int j = 0; j < b * c * h * w; j++) { - input->host()[j] = rand() % 255; - } - } else if (type == DataType_DT_FLOAT) { - for (int j = 0; j < b * c * h * w; j++) { - input->host()[j] = rand() % 255 / 255.f; - } - } - } - - auto host = net->getSessionInput(CPU, std::to_string(i).c_str()); - auto device = net->getSessionInput(GPU, std::to_string(i).c_str()); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - - // clean up - free(input->buffer().host); - delete input; - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - delete net; - }); - } - } - } - } - } - } - } - return true; - } -}; - -class PackScalarTest : public MNNTestCase { -public: - virtual ~PackScalarTest() = default; - virtual bool run() { - DataType types[] = { - DataType_DT_INT32, DataType_DT_FLOAT, - }; - - for (int t = 0; t < sizeof(types) / sizeof(DataType); t++) { - DataType type = types[t]; - for (int n = 2; n <= 4; n++) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - // nets - auto net = create(type, 0, n, {}); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input - for (int i = 0; i < n; i++) { - auto input = new Tensor(1, Tensor::TENSORFLOW); - { - input->setType(type); - input->buffer().dim[0].extent = 1; - TensorUtils::setLinearLayout(input); - input->buffer().dimensions = 0; - input->buffer().host = (uint8_t *)malloc(input->size()); - if (type == DataType_DT_INT32) { - input->host()[0] = rand() % 255; - } else if (type == DataType_DT_FLOAT) { - input->host()[0] = rand() % 255 / 255.f; - } - } - - auto host = net->getSessionInput(CPU, std::to_string(i).c_str()); - auto device = net->getSessionInput(GPU, std::to_string(i).c_str()); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - - // clean up - free(input->buffer().host); - delete input; - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - delete net; - }); - } - } - return true; - } -}; -MNNTestSuiteRegister(PackTensorTest, "op/pack/tensor"); -MNNTestSuiteRegister(PackScalarTest, "op/pack/scalar"); diff --git a/test/op/PermuteTest.cpp b/test/op/PermuteTest.cpp index 140e71e30..482fe0134 100644 --- a/test/op/PermuteTest.cpp +++ b/test/op/PermuteTest.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/Pool3DTest.cpp b/test/op/Pool3DTest.cpp new file mode 100644 index 000000000..23d69f27d --- /dev/null +++ b/test/op/Pool3DTest.cpp @@ -0,0 +1,145 @@ +// +// Pool3DTest.cpp +// MNNTests +// +// Created by MNN on 2019/12/05. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include +#include +#include +#include "MNNTestSuite.h" +#include "TestUtils.h" + +using namespace MNN; +using namespace MNN::Express; + +// PoolType_MAXPOOL or PoolType_AVEPOOL +static VARP _Pool3D(VARP x, INTS kernels, INTS strides, PoolType type, PoolPadType padType, INTS pads) { + std::unique_ptr pool3d(new Pool3DT); + pool3d->strides = strides; + pool3d->kernels = kernels; + pool3d->pads = pads; + pool3d->type = type; + pool3d->padType = padType; + + std::unique_ptr op(new OpT); + op->type = OpType_Pooling3D; + op->main.type = OpParameter_Pool3D; + op->main.value = pool3d.release(); + + return (Variable::create(Expr::create(op.get(), {x}))); +} + +class Pool3DCommonTest : public MNNTestCase { +public: + virtual ~Pool3DCommonTest() = default; +protected: + static bool testOnBackend(MNNForwardType type, const std::string& deviceName, const std::string& test_op_name, PoolType poolType) { + auto creator = MNN::MNNGetExtraBackendCreator(type); + if (creator == nullptr) { + MNN_ERROR("backend %d not found!\n", type); + return false; + } + + const int h = 4, w = 4, depth = 3; + const int poolSize = 2, poolDepth = 3; + const int stride = 2, strideDepth = 1; + const int pad = 1, padDepth = 1; + + const std::vector inputData = { + // depth = 0 + 0.5488, 0.7152, 0.6028, 0.5449, + 0.4237, 0.6459, 0.4376, 0.8918, + 0.9637, 0.3834, 0.7917, 0.5289, + 0.568 , 0.9256, 0.071 , 0.0871, + // depth = 1 + 0.0202, 0.8326, 0.7782, 0.87 , + 0.9786, 0.7992, 0.4615, 0.7805, + 0.1183, 0.6399, 0.1434, 0.9447, + 0.5218, 0.4147, 0.2646, 0.7742, + // depth = 2 + 0.4562, 0.5684, 0.0188, 0.6176, + 0.6121, 0.6169, 0.9437, 0.6818, + 0.3595, 0.437 , 0.6976, 0.0602, + 0.6668, 0.6706, 0.2104, 0.1289 + }; + std::vector outputData; + + if (poolType == PoolType_MAXPOOL) { + outputData = std::vector({ + // depth = 0 + 0.5488, 0.8326, 0.87 , + 0.9786, 0.7992, 0.9447, + 0.568 , 0.9256, 0.7742, + // depth = 1 + 0.5488, 0.8326, 0.87 , + 0.9786, 0.9437, 0.9447, + 0.6668, 0.9256, 0.7742, + // depth = 2 + 0.4562, 0.8326, 0.87 , + 0.9786, 0.9437, 0.9447, + 0.6668, 0.6706, 0.7742 + }); + } else { + outputData = std::vector({ + // depth = 0 + 0.071125, 0.366100, 0.176863, + 0.310538, 0.537825, 0.393238, + 0.136225, 0.209487, 0.107662, + // depth = 1 + 0.085433, 0.293000, 0.169375, + 0.287992, 0.583150, 0.323992, + 0.146383, 0.213075, 0.082517, + // depth = 2 + 0.059550, 0.274750, 0.185950, + 0.258563, 0.592400, 0.308400, + 0.148575, 0.195037, 0.112888 + }); + } + + auto input = _Input({1, 1, depth, h, w}, NCHW, halide_type_of()); + auto output = _Pool3D(_Convert(input, NC4HW4), {poolDepth, poolSize, poolSize}, {strideDepth, stride, stride}, poolType, PoolPadType_CAFFE, {padDepth, pad, pad}); + output = _Convert(output, NCHW); + + if (type != MNN_FORWARD_CPU) { + Optimizer::Config config; + config.forwardType = type; + auto optimizer = Optimizer::create(config); + if (optimizer == nullptr) { + MNN_ERROR("backend %s not support\n", deviceName.c_str()); + return false; + } + optimizer->onExecute({output}); + } + + ::memcpy(input->writeMap(), inputData.data(), inputData.size() * sizeof(float)); + if(!checkVectorByRelativeError(output->readMap(), outputData.data(), outputData.size(), 0.001)) { + MNN_ERROR("%s(%s) test failed!\n", test_op_name.c_str(), deviceName.c_str()); + return false; + } + + return true; + } +}; + +class MaxPool3DTestOnCPU : public Pool3DCommonTest { +public: + virtual ~MaxPool3DTestOnCPU() = default; + virtual bool run() { + return Pool3DCommonTest::testOnBackend(MNN_FORWARD_CPU, "CPU", "MaxPool3D", PoolType_MAXPOOL); + } +}; + +class AvePool3DTestOnCPU : public Pool3DCommonTest { +public: + virtual ~AvePool3DTestOnCPU() = default; + virtual bool run() { + return Pool3DCommonTest::testOnBackend(MNN_FORWARD_CPU, "CPU", "AvePool3D", PoolType_AVEPOOL); + } +}; + +MNNTestSuiteRegister(MaxPool3DTestOnCPU, "op/MaxPool3d/cpu"); +MNNTestSuiteRegister(AvePool3DTestOnCPU, "op/AvePool3d/cpu"); diff --git a/test/op/PoolGradTest.cpp b/test/op/PoolGradTest.cpp index 7994bfde8..926371d5f 100644 --- a/test/op/PoolGradTest.cpp +++ b/test/op/PoolGradTest.cpp @@ -6,94 +6,130 @@ // Copyright © 2018, Alibaba Group Holding Limited // +#include #include "MNNTestSuite.h" -#include "Expr.hpp" -#include "ExprCreator.hpp" #include "TestUtils.h" +#include +#include +#include using namespace MNN::Express; -class PoolGradTest : public MNNTestCase{ +class PoolGradTest : public MNNTestCase { public: virtual ~PoolGradTest() = default; - virtual bool run(){ - +protected: + bool testOnBackend(MNNForwardType type, const std::string& deviceName) { + auto creator = MNN::MNNGetExtraBackendCreator(type); + if (creator == nullptr) { + MNN_ERROR("backend %d not found!\n", type); + return false; + } + + const int h = 7, w = 7, size = h * w; const float originInputData[] = { - 0.2025, 0.0156, 0.0765, 0.1872, 0.2949, -0.0325, 0.0052, 0.4046, - 0.0455, 0.3100, 0.0162, -0.1304, 0.2245, 0.1622, 0.2437,0.1605, - -0.0330, 0.0641, 0.2964, 0.0452, -0.1621, 0.2534, 0.3948,0.3268, - 0.2203, -0.0665, 0.1727, 0.1119, -0.1570, 0.1260, 0.3523,-0.0115, - 0.2305, 0.1664, 0.1277, 0.4092, -0.1601, 0.0929, 0.1138,0.0624, - 0.2331, 0.3501, 0.3382, 0.2309, 0.2175, 0.0826, -0.1567,0.1327, - 0.0320, 0.1205, -0.0566, 0.1267, -0.0004, 0.2930, 0.2353,-0.1668, - 0.1653, 0.3441, -0.0312, 0.2422, 0.1814, 0.1478, 0.2195,-0.0848}; - auto poolInput = _Const(originInputData, {1, 1, 8, 8}, NCHW); - poolInput = _Convert(poolInput, NC4HW4); - auto poolOut = _MaxPool(poolInput, {2, 2}, {2, 2}); - auto poolOutDim = poolOut->getInfo()->dim; + 0.3100, 0.0156, 0.0765, 0.1872, 0.2949, 0.2949, 0.0052, + 0.0455, 0.3100, 0.1872, -0.1304, 0.2949, 0.2949, 0.2437, + -0.0330, 0.0641, 0.2934, 0.0452, -0.1621, 0.2534, 0.3948, + 0.2203, -0.0665, 0.1727, 0.1119, -0.1570, 0.1260, 0.3523, + 0.2305, 0.1664, 0.1277, 0.4092, -0.1601, 0.0929, 0.1138, + 0.2331, 0.3501, 0.3382, 0.2309, 0.2175, 0.0826, -0.1567, + 0.0320, 0.1205, -0.0566, 0.1267, -0.0004, 0.2930, 0.2353 + }; const float poolInputGradData[] = { - 1., 1., 1., 1., - 1., 1., 1., 1., - 1., 1., 1., 1., - 1., 1., 1., 1.}; - - auto poolInputGrad = _Const(poolInputGradData, poolOutDim, NCHW); - poolInputGrad = _Convert(poolInputGrad, NC4HW4); - - auto maxPoolOutputGrad = _PoolGrad(poolInput, poolOut, poolInputGrad, {2, 2}, {2, 2}, MAXPOOL); - auto avePoolOutputGrad = _PoolGrad(poolInput, poolOut, poolInputGrad, {2, 2}, {2, 2}, AVEPOOL); - - maxPoolOutputGrad = _Convert(maxPoolOutputGrad, NCHW); - avePoolOutputGrad = _Convert(avePoolOutputGrad, NCHW); + 1., 2., 3., + 2., 3., 1., + 3., 1., 2. + }; const float maxExpectedGrad[] = { - 0., 0., 0., 1., 1., 0., 0., 1., - 0., 1., 0., 0., 0., 0., 0., 0., - 0., 0., 1., 0., 0., 1., 1., 0., - 1., 0., 0., 0., 0., 0., 0., 0., - 0., 0., 0., 1., 0., 0., 0., 0., - 0., 1., 0., 0., 1., 0., 0., 1., - 0., 0., 0., 0., 0., 1., 1., 0., - 0., 1., 0., 1., 0., 0., 0., 0.}; - + 1., 0., 0., 0., 2., 0., 0., + 0., 0., 0., 0., 0., 0., 0., + 0., 0., 2., 0., 0., 0., 4., + 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 4., 0., 0., 0., + 0., 3., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 2., 0. + }; const float aveExpectedGrad[] = { - 0.2500, 0.2500, 0.2500, 0.2500, 0.2500, 0.2500, 0.2500, 0.2500, - 0.2500, 0.2500, 0.2500, 0.2500, 0.2500, 0.2500, 0.2500, 0.2500, - 0.2500, 0.2500, 0.2500, 0.2500, 0.2500, 0.2500, 0.2500, 0.2500, - 0.2500, 0.2500, 0.2500, 0.2500, 0.2500, 0.2500, 0.2500, 0.2500, - 0.2500, 0.2500, 0.2500, 0.2500, 0.2500, 0.2500, 0.2500, 0.2500, - 0.2500, 0.2500, 0.2500, 0.2500, 0.2500, 0.2500, 0.2500, 0.2500, - 0.2500, 0.2500, 0.2500, 0.2500, 0.2500, 0.2500, 0.2500, 0.2500, - 0.2500, 0.2500, 0.2500, 0.2500, 0.2500, 0.2500, 0.2500, 0.2500 + 0.111111, 0.111111, 0.333333, 0.222222, 0.555556, 0.333333, 0.333333, + 0.111111, 0.111111, 0.333333, 0.222222, 0.555556, 0.333333, 0.333333, + 0.333333, 0.333333, 0.888889, 0.555556, 1.000000, 0.444444, 0.444444, + 0.222222, 0.222222, 0.555556, 0.333333, 0.444444, 0.111111, 0.111111, + 0.555556, 0.555556, 1.000000, 0.444444, 0.777778, 0.333333, 0.333333, + 0.333333, 0.333333, 0.444444, 0.111111, 0.333333, 0.222222, 0.222222, + 0.333333, 0.333333, 0.444444, 0.111111, 0.333333, 0.222222, 0.222222 }; - - const std::vector outDim = {1, 1, 8, 8}; + + auto poolInput = _Input({1, 1, h, w}, NCHW, halide_type_of()); + auto poolInputConvert = _Convert(poolInput, NC4HW4); + auto maxPoolOut = _MaxPool(poolInputConvert, {3, 3}, {2, 2}); + auto avePoolOut = _AvePool(poolInputConvert, {3, 3}, {2, 2}); + auto poolOutDim = maxPoolOut->getInfo()->dim; + + int poolSize = 1; + for (auto length : poolOutDim) { + poolSize *= length; + } + + auto poolInputGrad = _Input(poolOutDim, NCHW, halide_type_of()); + auto poolInputGradConvert = _Convert(poolInputGrad, NC4HW4); + + auto maxPoolOutputGrad = _Convert(_PoolGrad(poolInputConvert, maxPoolOut, poolInputGradConvert, {3, 3}, {2, 2}, MAXPOOL), NCHW); + auto avePoolOutputGrad = _Convert(_PoolGrad(poolInputConvert, avePoolOut, poolInputGradConvert, {3, 3}, {2, 2}, AVEPOOL), NCHW); + + if (type != MNN_FORWARD_CPU) { + Optimizer::Config config; + config.forwardType = type; + auto optimizer = Optimizer::create(config); + if (optimizer == nullptr) { + MNN_ERROR("backend %s not support\n", deviceName.c_str()); + return false; + } + optimizer->onExecute({maxPoolOutputGrad, avePoolOutputGrad}); + } + + const std::vector outDim = {1, 1, h, w}; auto maxpoolOutputGradDim = maxPoolOutputGrad->getInfo()->dim; auto avepoolOutputGradDim = avePoolOutputGrad->getInfo()->dim; if(!checkVector(maxpoolOutputGradDim.data(), outDim.data(), 4, 0)){ - MNN_ERROR("MaxpoolGrad shape test failed!\n"); + MNN_ERROR("MaxpoolGrad(%s) shape test failed!\n", deviceName.c_str()); return false; } if(!checkVector(avepoolOutputGradDim.data(), outDim.data(), 4, 0)){ - MNN_ERROR("AvepoolGrad shape test failed!\n"); + MNN_ERROR("AvepoolGrad(%s) shape test failed!\n", deviceName.c_str()); return false; } - auto maxpoolOutputGradData = maxPoolOutputGrad->readMap(); - auto avepoolOutputGradData = avePoolOutputGrad->readMap(); - - if(!checkVector(maxpoolOutputGradData, maxExpectedGrad, 64, 0.0)){ - MNN_ERROR("MaxpoolGrad test failed!\n"); + + ::memcpy(poolInput->writeMap(), (const float *)originInputData, size * sizeof(float)); + ::memcpy(poolInputGrad->writeMap(), (const float *)poolInputGradData, poolSize * sizeof(float)); + if(!checkVectorByRelativeError(maxPoolOutputGrad->readMap(), maxExpectedGrad, size, 0.001)) { + MNN_ERROR("MaxpoolGrad(%s) test failed!\n", deviceName.c_str()); return false; } - if(!checkVector(avepoolOutputGradData, aveExpectedGrad, 64, 0.0)){ - MNN_ERROR("AvepoolGrad test failed!\n"); + if(!checkVectorByRelativeError(avePoolOutputGrad->readMap(), aveExpectedGrad, size, 0.001)) { + MNN_ERROR("AvepoolGrad(%s) test failed!\n", deviceName.c_str()); return false; } - - + return true; } }; +class PoolGradTestOnCPU : public PoolGradTest { +public: + virtual ~PoolGradTestOnCPU() = default; + virtual bool run() { + return testOnBackend(MNN_FORWARD_CPU, "CPU"); + } +}; +class PoolGradTestOnOpencl : public PoolGradTest { +public: + virtual ~PoolGradTestOnOpencl() = default; + virtual bool run() { + return testOnBackend(MNN_FORWARD_OPENCL, "OPENCL"); + } +}; -MNNTestSuiteRegister(PoolGradTest, "op/PoolGrad"); +MNNTestSuiteRegister(PoolGradTestOnCPU, "op/PoolGrad/cpu"); +MNNTestSuiteRegister(PoolGradTestOnOpencl, "op/PoolGrad/opencl"); diff --git a/test/op/PoolingTest.cpp b/test/op/PoolingTest.cpp index 575b81a95..c6c17f980 100644 --- a/test/op/PoolingTest.cpp +++ b/test/op/PoolingTest.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/QuantizedAddTest.cpp b/test/op/QuantizedAddTest.cpp index 358957d48..2b887efa6 100644 --- a/test/op/QuantizedAddTest.cpp +++ b/test/op/QuantizedAddTest.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" -#include "Session.hpp" +#include "core/Session.hpp" #include "TFQuantizeOp_generated.h" -#include "TensorUtils.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/QuantizedAvgPoolTest.cpp b/test/op/QuantizedAvgPoolTest.cpp index b857d08a6..6441a0362 100644 --- a/test/op/QuantizedAvgPoolTest.cpp +++ b/test/op/QuantizedAvgPoolTest.cpp @@ -6,12 +6,12 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "Session.hpp" +#include "core/Session.hpp" #include "TFQuantizeOp_generated.h" -#include "TensorUtils.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/QuantizedMaxPoolTest.cpp b/test/op/QuantizedMaxPoolTest.cpp index afb378367..73da9b2e9 100644 --- a/test/op/QuantizedMaxPoolTest.cpp +++ b/test/op/QuantizedMaxPoolTest.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" -#include "Session.hpp" +#include "core/Session.hpp" #include "TFQuantizeOp_generated.h" -#include "TensorUtils.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/QuantizedReshapeTest.cpp b/test/op/QuantizedReshapeTest.cpp index 89b429f66..2444c5918 100644 --- a/test/op/QuantizedReshapeTest.cpp +++ b/test/op/QuantizedReshapeTest.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/ROIPoolingTest.cpp b/test/op/ROIPoolingTest.cpp index fd994a96a..8ce7e2c38 100644 --- a/test/op/ROIPoolingTest.cpp +++ b/test/op/ROIPoolingTest.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/RangeTest.cpp b/test/op/RangeTest.cpp index c95b02f96..66b722416 100644 --- a/test/op/RangeTest.cpp +++ b/test/op/RangeTest.cpp @@ -5,254 +5,34 @@ // Created by MNN on 2019/01/15. // Copyright © 2018, Alibaba Group Holding Limited // - -#include "Interpreter.hpp" +#include +#include #include "MNNTestSuite.h" -#include "Session.hpp" -#include "TFQuantizeOp_generated.h" -#include "TensorUtils.hpp" #include "TestUtils.h" -using namespace MNN; - -static Interpreter *create(DataType type) { - flatbuffers::FlatBufferBuilder fbb; - std::vector> vec; - - if (type == DataType_DT_INT32) { - std::vector start = {8}; - std::vector limit = {128}; - std::vector delta = {3}; - - { // start - auto dims = fbb.CreateVector(std::vector({1})); - auto data = fbb.CreateVector(start); - BlobBuilder ib(fbb); - ib.add_dims(dims); - ib.add_dataType(type); - ib.add_dataFormat(MNN_DATA_FORMAT_NHWC); - ib.add_int32s(flatbuffers::Offset>(data.o)); - auto input = ib.Finish(); - auto name = fbb.CreateString("input0"); - auto iv = fbb.CreateVector(std::vector({})); - auto ov = fbb.CreateVector(std::vector({0})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Const); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Blob); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - } - { // limit - auto dims = fbb.CreateVector(std::vector({1})); - auto data = fbb.CreateVector(limit); - BlobBuilder ib(fbb); - ib.add_dims(dims); - ib.add_dataType(type); - ib.add_dataFormat(MNN_DATA_FORMAT_NHWC); - ib.add_int32s(flatbuffers::Offset>(data.o)); - auto input = ib.Finish(); - auto name = fbb.CreateString("input1"); - auto iv = fbb.CreateVector(std::vector({})); - auto ov = fbb.CreateVector(std::vector({1})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Const); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Blob); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - } - { // delta - auto dims = fbb.CreateVector(std::vector({1})); - auto data = fbb.CreateVector(delta); - BlobBuilder ib(fbb); - ib.add_dims(dims); - ib.add_dataType(type); - ib.add_dataFormat(MNN_DATA_FORMAT_NHWC); - ib.add_int32s(flatbuffers::Offset>(data.o)); - auto input = ib.Finish(); - auto name = fbb.CreateString("input2"); - auto iv = fbb.CreateVector(std::vector({})); - auto ov = fbb.CreateVector(std::vector({2})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Const); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Blob); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - } - { - auto rb = RangeBuilder(fbb); - rb.add_Tidx(type); - auto range = rb.Finish(); - auto name = fbb.CreateString("range"); - auto iv = fbb.CreateVector(std::vector({0, 1, 2})); - auto ov = fbb.CreateVector(std::vector({3})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Range); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Range); - builder.add_main(flatbuffers::Offset(range.o)); - vec.push_back(builder.Finish()); - } - } else if (type == DataType_DT_FLOAT) { - std::vector start = {8.f}; - std::vector limit = {128.f}; - std::vector delta = {3.f}; - - { // start - auto dims = fbb.CreateVector(std::vector({1})); - auto data = fbb.CreateVector(start); - BlobBuilder ib(fbb); - ib.add_dims(dims); - ib.add_dataType(type); - ib.add_dataFormat(MNN_DATA_FORMAT_NHWC); - ib.add_float32s(flatbuffers::Offset>(data.o)); - auto input = ib.Finish(); - auto name = fbb.CreateString("input0"); - auto iv = fbb.CreateVector(std::vector({})); - auto ov = fbb.CreateVector(std::vector({0})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Const); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Blob); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - } - { // limit - auto dims = fbb.CreateVector(std::vector({1})); - auto data = fbb.CreateVector(limit); - BlobBuilder ib(fbb); - ib.add_dims(dims); - ib.add_dataType(type); - ib.add_dataFormat(MNN_DATA_FORMAT_NHWC); - ib.add_float32s(flatbuffers::Offset>(data.o)); - auto input = ib.Finish(); - auto name = fbb.CreateString("input1"); - auto iv = fbb.CreateVector(std::vector({})); - auto ov = fbb.CreateVector(std::vector({1})); - OpBuilder builder(fbb); - builder.add_type(OpType_Const); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Blob); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - } - { // delta - auto dims = fbb.CreateVector(std::vector({1})); - auto data = fbb.CreateVector(delta); - BlobBuilder ib(fbb); - ib.add_dims(dims); - ib.add_dataType(type); - ib.add_dataFormat(MNN_DATA_FORMAT_NHWC); - ib.add_float32s(flatbuffers::Offset>(data.o)); - auto input = ib.Finish(); - auto name = fbb.CreateString("input2"); - auto iv = fbb.CreateVector(std::vector({})); - auto ov = fbb.CreateVector(std::vector({2})); - OpBuilder builder(fbb); - builder.add_type(OpType_Const); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Blob); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - } - { - auto rb = RangeBuilder(fbb); - rb.add_Tidx(type); - auto range = rb.Finish(); - auto name = fbb.CreateString("range"); - auto iv = fbb.CreateVector(std::vector({0, 1, 2})); - auto ov = fbb.CreateVector(std::vector({3})); - OpBuilder builder(fbb); - builder.add_type(OpType_Range); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Range); - builder.add_main(flatbuffers::Offset(range.o)); - vec.push_back(builder.Finish()); - } - } - - BlobBuilder builder(fbb); - builder.add_dataType(type); - builder.add_dataFormat(MNN_DATA_FORMAT_NHWC); - auto blob = builder.Finish(); - - std::vector> desc; - for (int i = 0; i <= 3; i++) { - TensorDescribeBuilder tdb(fbb); - tdb.add_blob(flatbuffers::Offset(blob.o)); - tdb.add_index(i); - desc.push_back(tdb.Finish()); - } - - auto ops = fbb.CreateVector(vec); - auto names = fbb.CreateVectorOfStrings({"input0", "input1", "input2", "output"}); - auto extras = fbb.CreateVector(desc); - NetBuilder net(fbb); - net.add_oplists(ops); - net.add_tensorName(names); - net.add_extraTensorDescribe(extras); - net.add_sourceType(NetSource_TENSORFLOW); - fbb.Finish(net.Finish()); - return Interpreter::createFromBuffer((const char *)fbb.GetBufferPointer(), fbb.GetSize()); -} - -static Tensor *infer(const Interpreter *net, Session *session) { - net->runSession(session); - return net->getSessionOutputAll(session).begin()->second; -} - +using namespace MNN::Express; class RangeTest : public MNNTestCase { public: virtual ~RangeTest() = default; virtual bool run() { - DataType types[] = { - DataType_DT_INT32, DataType_DT_FLOAT, - }; - - for (int t = 0; t < sizeof(types) / sizeof(DataType); t++) { - DataType type = (DataType)types[t]; - - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - // nets - auto net = create(type); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - delete net; - }); + auto start = _Const(0.0); + auto limit = _Const(1.0); + auto delta = _Const(0.3); + auto output = _Range(start, limit, delta); + const std::vector expectedOutput = {0.0, 0.3, 0.6, 0.9}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("RangeTest test failed!\n"); + return false; + } + auto dims = output->getInfo()->dim; + if (dims.size() !=1) { + MNN_ERROR("RangeTest test failed!\n"); + return false; + } + if (dims[0]!=4) { + MNN_ERROR("RangeTest test failed!\n"); + return false; } return true; } diff --git a/test/op/RankTest.cpp b/test/op/RankTest.cpp index 56229f2e0..6c0650707 100644 --- a/test/op/RankTest.cpp +++ b/test/op/RankTest.cpp @@ -5,139 +5,34 @@ // Created by MNN on 2019/01/15. // Copyright © 2018, Alibaba Group Holding Limited // - -#include "Interpreter.hpp" +#include +#include #include "MNNTestSuite.h" -#include "Session.hpp" -#include "TFQuantizeOp_generated.h" -#include "TensorUtils.hpp" #include "TestUtils.h" -using namespace MNN; - -static Interpreter *create(int b, int c, int h, int w) { - flatbuffers::FlatBufferBuilder fbb; - std::vector> vec; - - { // input - auto dims = fbb.CreateVector(std::vector({b, h, w, c})); - InputBuilder ib(fbb); - ib.add_dims(dims); - auto input = ib.Finish(); - auto name = fbb.CreateString("input"); - auto iv = fbb.CreateVector(std::vector({0})); - auto ov = fbb.CreateVector(std::vector({0})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Input); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Input); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - } - { - auto rank = RankBuilder(fbb).Finish(); - auto name = fbb.CreateString("rank"); - auto iv = fbb.CreateVector(std::vector({0})); - auto ov = fbb.CreateVector(std::vector({1})); - OpBuilder builder(fbb); - builder.add_type(OpType_Rank); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Rank); - builder.add_main(flatbuffers::Offset(rank.o)); - vec.push_back(builder.Finish()); - } - - BlobBuilder fb(fbb); - fb.add_dataType(DataType_DT_FLOAT); - fb.add_dataFormat(MNN_DATA_FORMAT_NHWC); - auto flt = fb.Finish(); - BlobBuilder qb(fbb); - qb.add_dataType(DataType_DT_INT32); - qb.add_dataFormat(MNN_DATA_FORMAT_NHWC); - auto qnt = qb.Finish(); - - std::vector> desc; - { - TensorDescribeBuilder tdb(fbb); - tdb.add_index(0); - tdb.add_blob(flatbuffers::Offset(flt.o)); - desc.push_back(tdb.Finish()); - } - { - TensorDescribeBuilder tdb(fbb); - tdb.add_index(1); - tdb.add_blob(flatbuffers::Offset(qnt.o)); - desc.push_back(tdb.Finish()); - } - - auto ops = fbb.CreateVector(vec); - auto names = fbb.CreateVectorOfStrings({"input", "output"}); - auto extras = fbb.CreateVector(desc); - NetBuilder net(fbb); - net.add_oplists(ops); - net.add_tensorName(names); - net.add_extraTensorDescribe(extras); - net.add_sourceType(NetSource_TENSORFLOW); - fbb.Finish(net.Finish()); - return Interpreter::createFromBuffer((const char *)fbb.GetBufferPointer(), fbb.GetSize()); -} - -static Tensor *infer(const Interpreter *net, Session *session) { - net->runSession(session); - return net->getSessionOutputAll(session).begin()->second; -} - +using namespace MNN::Express; class RankTest : public MNNTestCase { public: virtual ~RankTest() = default; virtual bool run() { - for (int b = 1; b <= 2; b++) { - for (int h = 1; h <= 8; h *= 2) { - for (int w = 1; w <= 8; w *= 2) { - for (int c = 1; c <= 8; c *= 2) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - // nets - auto net = create(b, c, h, w); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input - auto input = new Tensor(4, Tensor::TENSORFLOW); - { - input->buffer().dim[0].extent = b; - input->buffer().dim[1].extent = h; - input->buffer().dim[2].extent = w; - input->buffer().dim[3].extent = c; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - auto host = net->getSessionInput(CPU, NULL); - auto device = net->getSessionInput(GPU, NULL); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - free(input->buffer().host); - delete input; - delete net; - }); - } - } - } + auto input = _Input({2,2}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.0, -2.0, 3.0, 4.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _Rank(input); + const std::vector expectedOutput = {2}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 1, 0)) { + MNN_ERROR("RankTest test failed!\n"); + return false; + } + auto dims = output->getInfo()->dim; + if (dims.size() !=0) { + MNN_ERROR("RankTest test failed!\n"); + return false; } return true; } diff --git a/test/op/ReLU6Test.cpp b/test/op/ReLU6Test.cpp index 14358f21b..82e4d46ec 100644 --- a/test/op/ReLU6Test.cpp +++ b/test/op/ReLU6Test.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/ReLUTest.cpp b/test/op/ReLUTest.cpp index e442c9ec9..1a278888e 100644 --- a/test/op/ReLUTest.cpp +++ b/test/op/ReLUTest.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/ReductionTest.cpp b/test/op/ReductionTest.cpp index 005a71084..6ef542f57 100644 --- a/test/op/ReductionTest.cpp +++ b/test/op/ReductionTest.cpp @@ -6,12 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/ReluGradTest.cpp b/test/op/ReluGradTest.cpp new file mode 100644 index 000000000..3d5e0d30f --- /dev/null +++ b/test/op/ReluGradTest.cpp @@ -0,0 +1,134 @@ +// +// ReluGradTest.cpp +// MNNTests +// +// Created by MNN on 2019/10/16. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include +#include "MNNTestSuite.h" +#include "TestUtils.h" +#include +#include "MNN_generated.h" +#include +#include +#include + +using namespace MNN::Express; + +static VARP _ReluGrad(VARP originInput, VARP inputGrad) { + using namespace MNN; + std::unique_ptr relu(new OpT); + relu->type = OpType_ReluGrad; + relu->main.type = OpParameter_Relu; + relu->main.value = new ReluT; + relu->main.AsRelu()->slope = 0.0f; + return Variable::create(Expr::create(std::move(relu), {originInput, inputGrad})); +} + +static VARP _Relu6Grad(VARP originInput, VARP inputGrad) { + using namespace MNN; + std::unique_ptr relu6(new OpT); + relu6->type = OpType_Relu6Grad; + relu6->main.type = OpParameter_Relu6; + relu6->main.value = new Relu6T; + relu6->main.AsRelu6()->slope = 0.0f; + return Variable::create(Expr::create(std::move(relu6), {originInput, inputGrad})); +} + +class ReluGradTest : public MNNTestCase { +public: + virtual ~ReluGradTest() = default; +protected: + bool testOnBackend(MNNForwardType type, const std::string& deviceName) { + auto creator = MNN::MNNGetExtraBackendCreator(type); + if (creator == nullptr) { + MNN_ERROR("backend %d not found!\n", type); + return false; + } + + const int h = 4, w = 4, size = h * w; + const std::vector originInputData = { + 6.2025, -0.0156, 0.0765, 6.1872, + 0.0455, 6.3100, 0.0162, -0.1304, + -0.0330, 0.0641, 6.2964, 0.0452, + 0.2203, -0.0665, 0.1727, 0.1119 + }; + const std::vector inputGradData = { + 1., 2., 3., 4., + 2., 3., 4., 1., + 3., 4., 1., 2., + 4., 1., 2., 3. + }; + std::vector reluExpectedGrad(size), relu6ExpectedGrad(size); + for (int i = 0; i < size; ++i) { + bool positive = (originInputData[i] > 0); + bool under6 = (originInputData[i] < 6); + reluExpectedGrad[i] = (positive ? inputGradData[i] : 0); + relu6ExpectedGrad[i] = ((positive && under6) ? inputGradData[i] : 0); + } + + auto input = _Input({1, 1, h, w}, NCHW, halide_type_of()); + auto inputGrad = _Input({1, 1, h, w}, NCHW, halide_type_of()); + auto inputConvert = _Convert(input, NC4HW4); + auto inputGradConvert = _Convert(inputGrad, NC4HW4); + auto reluGrad = _Convert(_ReluGrad(inputConvert, inputGradConvert), NCHW); + auto relu6Grad = _Convert(_Relu6Grad(inputConvert, inputGradConvert), NCHW); + + if (type != MNN_FORWARD_CPU) { + Optimizer::Config config; + config.forwardType = type; + auto optimizer = Optimizer::create(config); + if (optimizer == nullptr) { + MNN_ERROR("backend %s not support\n", deviceName.c_str()); + return false; + } + optimizer->onExecute({reluGrad, relu6Grad}); + } + + const std::vector outDim = {1, 1, h, w}; + auto reluGradDim = reluGrad->getInfo()->dim; + auto relu6GradDim = relu6Grad->getInfo()->dim; + if (!checkVector(reluGradDim.data(), outDim.data(), 4, 0)) { + MNN_ERROR("ReluGrad(%s) shape test failed!\n", deviceName.c_str()); + return false; + } + if (!checkVector(relu6GradDim.data(), outDim.data(), 4, 0)) { + MNN_ERROR("Relu6Grad(%s) shape test failed!\n", deviceName.c_str()); + return false; + } + + ::memcpy(input->writeMap(), originInputData.data(), size * sizeof(float)); + ::memcpy(inputGrad->writeMap(), inputGradData.data(), size * sizeof(float)); + if(!checkVector(reluGrad->readMap(), reluExpectedGrad.data(), size, 1e-6)){ + MNN_ERROR("ReluGrad(%s) test failed!\n", deviceName.c_str()); + return false; + } + if(!checkVector(relu6Grad->readMap(), relu6ExpectedGrad.data(), size, 1e-6)){ + MNN_ERROR("Relu6Grad(%s) test failed!\n", deviceName.c_str()); + return false; + } + return true; + } +}; + +class ReluGradTestOnCPU : public ReluGradTest { +public: + virtual ~ReluGradTestOnCPU() = default; + virtual bool run() { + return testOnBackend(MNN_FORWARD_CPU, "CPU"); + } +}; + +class ReluGradTestOnOpencl : public ReluGradTest { +public: + virtual ~ReluGradTestOnOpencl() = default; + virtual bool run() { + return testOnBackend(MNN_FORWARD_OPENCL, "OPENCL"); + } +}; + +MNNTestSuiteRegister(ReluGradTestOnCPU, "op/ReluGrad/cpu"); +MNNTestSuiteRegister(ReluGradTestOnOpencl, "op/ReluGrad/opencl"); diff --git a/test/op/ReshapeTest.cpp b/test/op/ReshapeTest.cpp index 9dd521a98..588b6dc0c 100644 --- a/test/op/ReshapeTest.cpp +++ b/test/op/ReshapeTest.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/ResizeTest.cpp b/test/op/ResizeTest.cpp index 917c97abe..3b951ed87 100644 --- a/test/op/ResizeTest.cpp +++ b/test/op/ResizeTest.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/ScaleTest.cpp b/test/op/ScaleTest.cpp index e2569e851..cdd6c7c34 100644 --- a/test/op/ScaleTest.cpp +++ b/test/op/ScaleTest.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/ScatterNdTest.cpp b/test/op/ScatterNdTest.cpp new file mode 100644 index 000000000..0cbb0254f --- /dev/null +++ b/test/op/ScatterNdTest.cpp @@ -0,0 +1,61 @@ +// +// ScatterNdTest.cpp +// MNNTests +// +// Created by MNN on 2019/11/28. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include +#include "MNNTestSuite.h" +#include "TestUtils.h" + +using namespace MNN::Express; + +class ScatterNdTest : public MNNTestCase { + virtual ~ScatterNdTest() = default; + + virtual bool run() { + { + const int indicesData[] = {4, 3, 1, 7}; + const int updatesData[] = {9, 10, 11, 12}; + const int shapeData[] = {8}; + const int expectedResult[] = {0, 11, 0, 10, 9, 0, 0, 12}; + + auto indices = _Const(indicesData, {4, 1}, NHWC, halide_type_of()); + auto updates = _Const(updatesData, {4}, NHWC, halide_type_of()); + auto shape = _Const(shapeData, {1}, NHWC, halide_type_of()); + auto result = _ScatterNd(indices, updates, shape); + + auto resultData = result->readMap(); + const int size = result->getInfo()->size; + if (!checkVector(resultData, expectedResult, size, 0)) { + return false; + } + } + { + const int indicesData[] = {0, 2}; + const int updatesData[] = {5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, + 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8}; + const int shapeData[] = {4, 4, 4}; + auto indices = _Const(indicesData, {2, 1}, NHWC, halide_type_of()); + auto updates = _Const(updatesData, {2, 4, 4}, NHWC, halide_type_of()); + auto shape = _Const(shapeData, {3}, NHWC, halide_type_of()); + const int expectedResult[] = {5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, + 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + auto result = _ScatterNd(indices, updates, shape); + + auto resultData = result->readMap(); + const int size = result->getInfo()->size; + if (!checkVector(resultData, expectedResult, size, 0)) { + return false; + } + } + + return true; + } +}; + +MNNTestSuiteRegister(ScatterNdTest, "op/ScatterNdTest"); diff --git a/test/op/SeLUTest.cpp b/test/op/SeLUTest.cpp index 011bedd9f..44d3b9417 100644 --- a/test/op/SeLUTest.cpp +++ b/test/op/SeLUTest.cpp @@ -6,209 +6,31 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include +#include #include "MNNTestSuite.h" -#include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" #include "TestUtils.h" -using namespace MNN; - -static Interpreter *create(float scale, float alpha, int b, int c, int h, int w, bool tensorflow) { - flatbuffers::FlatBufferBuilder fbb; - std::vector> vec; - - { - auto dims = fbb.CreateVector(tensorflow ? std::vector({b, h, w, c}) : std::vector({b, c, h, w})); - InputBuilder ib(fbb); - ib.add_dims(dims); - auto input = ib.Finish(); - auto name = fbb.CreateString("input"); - auto iv = fbb.CreateVector(std::vector({0})); - auto ov = fbb.CreateVector(std::vector({0})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Input); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Input); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - } - { - auto sb = SeluBuilder(fbb); - sb.add_scale(scale); - sb.add_alpha(alpha); - auto selu = sb.Finish(); - auto name = fbb.CreateString("selu"); - auto iv = fbb.CreateVector(std::vector({0})); - auto ov = fbb.CreateVector(std::vector({1})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Selu); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Selu); - builder.add_main(flatbuffers::Offset(selu.o)); - vec.push_back(builder.Finish()); - } - - auto ops = fbb.CreateVector(vec); - auto names = fbb.CreateVectorOfStrings({"input", "output"}); - if (tensorflow) { - BlobBuilder builder(fbb); - builder.add_dataType(DataType_DT_FLOAT); - builder.add_dataFormat(MNN_DATA_FORMAT_NHWC); - auto blob = builder.Finish(); - - std::vector> desc; - { - TensorDescribeBuilder tdb(fbb); - tdb.add_index(0); - tdb.add_blob(flatbuffers::Offset(blob.o)); - desc.push_back(tdb.Finish()); - } - { - TensorDescribeBuilder tdb(fbb); - tdb.add_index(1); - tdb.add_blob(flatbuffers::Offset(blob.o)); - desc.push_back(tdb.Finish()); - } - - auto extras = fbb.CreateVector(desc); - NetBuilder net(fbb); - net.add_oplists(ops); - net.add_tensorName(names); - net.add_extraTensorDescribe(extras); - net.add_sourceType(NetSource_TENSORFLOW); - fbb.Finish(net.Finish()); - } else { - NetBuilder net(fbb); - net.add_oplists(ops); - net.add_tensorName(names); - fbb.Finish(net.Finish()); - } - return Interpreter::createFromBuffer((const char *)fbb.GetBufferPointer(), fbb.GetSize()); -} - -static Tensor *infer(const Interpreter *net, Session *session) { - net->runSession(session); - return net->getSessionOutputAll(session).begin()->second; -} - -class SeLUCaffeTest : public MNNTestCase { +using namespace MNN::Express; +class SeluTest : public MNNTestCase { public: - virtual ~SeLUCaffeTest() = default; + virtual ~SeluTest() = default; virtual bool run() { - for (int b = 1; b <= 2; b++) { - for (int c = 1; c <= 16; c *= 2) { - for (int h = 1; h <= 16; h *= 2) { - for (int w = 1; w <= 16; w *= 2) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - // nets - float scale = rand() % 255 / 255.f; - float alpha = rand() % 255 / 255.f; - auto net = create(scale, alpha, b, c, h, w, false); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input/output - auto input = new Tensor(4); - { - input->buffer().dim[0].extent = b; - input->buffer().dim[1].extent = c; - input->buffer().dim[2].extent = h; - input->buffer().dim[3].extent = w; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - for (int i = 0; i < b * c * h * w; i++) { - input->host()[i] = (rand() % 255 - 512) / 256.f; - } - auto host = net->getSessionInput(CPU, NULL); - auto device = net->getSessionInput(GPU, NULL); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - free(input->buffer().host); - delete input; - delete net; - }); - } - } - } - } - return true; - } -}; - -class SeLUTensorflowTest : public MNNTestCase { -public: - virtual ~SeLUTensorflowTest() = default; - virtual bool run() { - for (int b = 1; b <= 2; b++) { - for (int c = 1; c <= 16; c *= 2) { - for (int h = 1; h <= 4; h++) { - for (int w = 1; w <= 4; w++) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - // nets - float scale = rand() % 255 / 255.f; - float alpha = rand() % 255 / 255.f; - auto net = create(scale, alpha, b, c, h, w, true); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input/output - auto input = new Tensor(4, Tensor::TENSORFLOW); - { - input->buffer().dim[0].extent = b; - input->buffer().dim[1].extent = h; - input->buffer().dim[2].extent = w; - input->buffer().dim[3].extent = c; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - for (int i = 0; i < b * c * h * w; i++) { - input->host()[i] = (rand() % 255 - 512) / 256.f; - } - auto host = net->getSessionInput(CPU, NULL); - auto device = net->getSessionInput(GPU, NULL); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - free(input->buffer().host); - delete input; - delete net; - }); - } - } - } + auto input = _Input({4,}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.0, -2.0, 3.0, 4.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _Selu(input,2.0, 0.5); + const std::vector expectedOutput = {-0.63, -0.86, 6.0, 8.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("SeluTest test failed!\n"); + return false; } return true; } }; -MNNTestSuiteRegister(SeLUCaffeTest, "op/selu/caffe"); -MNNTestSuiteRegister(SeLUTensorflowTest, "op/selu/tensorflow"); +MNNTestSuiteRegister(SeluTest, "op/selu"); diff --git a/test/op/SetDiff1DTest.cpp b/test/op/SetDiff1DTest.cpp new file mode 100644 index 000000000..16baa4d65 --- /dev/null +++ b/test/op/SetDiff1DTest.cpp @@ -0,0 +1,47 @@ +// +// SetDiff1DTest.cpp +// MNNTests +// +// Created by MNN on 2019/01/15. +// Copyright © 2018, Alibaba Group Holding Limited +// + + +#include +#include +#include "MNNTestSuite.h" +#include "TestUtils.h" +using namespace MNN::Express; +class SetDiff1DTest : public MNNTestCase { +public: + virtual ~SetDiff1DTest() = default; + virtual bool run() { + auto input_x = _Input({16}, NHWC, halide_type_of()); + auto input_y = _Input({8}, NHWC, halide_type_of()); + input_x->setName("input_x"); + input_y->setName("input_y"); + // set input data + const int x_data[] = {-1, 2, -3, 4, + 5, -6, 7, -8, + -9, -10, 11, 12, + 13, 14, -15, -16}; + const int y_data[] = {-1, 2, -3, 4, + 5, -6, 7, -8}; + auto xPtr = input_x->writeMap(); + auto yPtr = input_y->writeMap(); + memcpy(xPtr, x_data, 16 * sizeof(int)); + memcpy(yPtr, y_data, 8 * sizeof(int)); + input_x->unMap(); + input_y->unMap(); + auto output = _SetDiff1D(input_x, input_y); + const std::vector expectedOutput = {-9, -10, 11, 12, 13, 14, -15, -16}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 8, 0)) { + MNN_ERROR("SetDiff1DTest test failed!\n"); + return false; + } + return true; + } +}; +MNNTestSuiteRegister(SetDiff1DTest, "op/setdiff1d"); + diff --git a/test/op/SigmoidTest.cpp b/test/op/SigmoidTest.cpp index 83e202261..d308538b9 100644 --- a/test/op/SigmoidTest.cpp +++ b/test/op/SigmoidTest.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/SizeTest.cpp b/test/op/SizeTest.cpp new file mode 100644 index 000000000..eda381f46 --- /dev/null +++ b/test/op/SizeTest.cpp @@ -0,0 +1,41 @@ +// +// SizeTest.cpp +// MNNTests +// +// Created by MNN on 2019/12/13. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include +#include "MNNTestSuite.h" +#include "TestUtils.h" + +using namespace MNN::Express; +class SizeTest : public MNNTestCase { +public: + virtual ~SizeTest() = default; + virtual bool run() { + auto input = _Input({2,2}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.0, -2.0, 3.0, 4.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _Size(input); + const std::vector expectedOutput = {4}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 1, 0)) { + MNN_ERROR("SizeTest test failed!\n"); + return false; + } + auto dims = output->getInfo()->dim; + if (dims.size() !=0) { + MNN_ERROR("SizeTest test failed!\n"); + return false; + } + return true; + } +}; +MNNTestSuiteRegister(SizeTest, "op/size"); diff --git a/test/op/SliceTFTest.cpp b/test/op/SliceTFTest.cpp index 7f972b436..d224fa0f2 100644 --- a/test/op/SliceTFTest.cpp +++ b/test/op/SliceTFTest.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/SliceTest.cpp b/test/op/SliceTest.cpp index fb074d75e..7ea3b79bc 100644 --- a/test/op/SliceTest.cpp +++ b/test/op/SliceTest.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/SoftmaxGradTest.cpp b/test/op/SoftmaxGradTest.cpp new file mode 100644 index 000000000..ac391c936 --- /dev/null +++ b/test/op/SoftmaxGradTest.cpp @@ -0,0 +1,117 @@ +// +// ReluGradTest.cpp +// MNNTests +// +// Created by MNN on 2019/10/16. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include +#include "MNNTestSuite.h" +#include "TestUtils.h" +#include +#include "MNN_generated.h" +#include +#include +#include + +using namespace MNN::Express; + +static VARP _SoftmaxGrad(VARP originOutput, VARP outputGrad, int axis) { + using namespace MNN; + std::unique_ptr softmax(new OpT); + softmax->type = OpType_SoftmaxGrad; + softmax->main.type = OpParameter_Axis; + softmax->main.value = new AxisT; + softmax->main.AsAxis()->axis = axis; + return Variable::create(Expr::create(std::move(softmax), {originOutput, outputGrad})); +} + +class SoftmaxGradTest : public MNNTestCase { +public: + virtual ~SoftmaxGradTest() = default; +protected: + bool testOnBackend(MNNForwardType type, const std::string& deviceName) { + auto creator = MNN::MNNGetExtraBackendCreator(type); + if (creator == nullptr) { + MNN_ERROR("backend %d not found!\n", type); + return false; + } + + const int batch = 4, channel = 4, size = batch * channel; + float originOutputData[batch][channel] = { + 0.2, 0.23, 0.3, 0.27, + 0.18, 0.33, 0.16, 0.33, + 0.15, 0.18, 0.35, 0.32, + 0.29, 0.18, 0.22, 0.31 + }; + float outputGradData[batch][channel] = { + 1., 2., 3., 4., + 2., 3., 4., 1., + 3., 4., 1., 2., + 4., 1., 2., 3. + }; + float expectGrad[batch][channel]; + for (int b = 0; b < batch; ++b) { + float sum = 0; + for (int c = 0; c < channel; ++c) { + sum += originOutputData[b][c] * outputGradData[b][c]; + } + for (int c = 0; c < channel; ++c) { + expectGrad[b][c] = originOutputData[b][c] * (outputGradData[b][c] - sum); + } + } + + auto output = _Input({batch, channel}, NCHW, halide_type_of()); + auto outputGrad = _Input({batch, channel}, NCHW, halide_type_of()); + auto outputConvert = _Convert(output, NC4HW4); + auto outputGradConvert = _Convert(outputGrad, NC4HW4); + auto softmaxGrad = _Convert(_SoftmaxGrad(outputConvert, outputGradConvert, 1), NCHW); + + if (type != MNN_FORWARD_CPU) { + Optimizer::Config config; + config.forwardType = type; + auto optimizer = Optimizer::create(config); + if (optimizer == nullptr) { + MNN_ERROR("backend %s not support\n", deviceName.c_str()); + return false; + } + optimizer->onExecute({softmaxGrad}); + } + + const std::vector outDim = {batch, channel}; + auto softmaxGradDim = softmaxGrad->getInfo()->dim; + if (!checkVector(softmaxGradDim.data(), outDim.data(), 2, 0)) { + MNN_ERROR("SoftmaxGrad(%s) shape test failed!\n", deviceName.c_str()); + return false; + } + + ::memcpy(output->writeMap(), (const float *)originOutputData, size * sizeof(float)); + ::memcpy(outputGrad->writeMap(), (const float *)outputGradData, size * sizeof(float)); + if(!checkVectorByRelativeError(softmaxGrad->readMap(), (const float *)expectGrad, size, 0.005)){ + MNN_ERROR("SoftmaxGrad(%s) test failed!\n", deviceName.c_str()); + return false; + } + return true; + } +}; + +class SoftmaxGradTestOnCPU : public SoftmaxGradTest { +public: + virtual ~SoftmaxGradTestOnCPU() = default; + virtual bool run() { + return testOnBackend(MNN_FORWARD_CPU, "CPU"); + } +}; + +class SoftmaxGradTestOnOpencl : public SoftmaxGradTest { +public: + virtual ~SoftmaxGradTestOnOpencl() = default; + virtual bool run() { + return testOnBackend(MNN_FORWARD_OPENCL, "OPENCL"); + } +}; + +MNNTestSuiteRegister(SoftmaxGradTestOnCPU, "op/SoftmaxGrad/cpu"); +MNNTestSuiteRegister(SoftmaxGradTestOnOpencl, "op/SoftmaxGrad/opencl"); diff --git a/test/op/SoftmaxTest.cpp b/test/op/SoftmaxTest.cpp index 949de89bf..fa995bf1a 100644 --- a/test/op/SoftmaxTest.cpp +++ b/test/op/SoftmaxTest.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/SpaceToBatchNDTest.cpp b/test/op/SpaceToBatchNDTest.cpp index 82e0be831..87d419988 100644 --- a/test/op/SpaceToBatchNDTest.cpp +++ b/test/op/SpaceToBatchNDTest.cpp @@ -5,149 +5,41 @@ // Created by MNN on 2019/01/15. // Copyright © 2018, Alibaba Group Holding Limited // - -#include "Interpreter.hpp" +#include +#include #include "MNNTestSuite.h" -#include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" #include "TestUtils.h" -using namespace MNN; - -static Interpreter *create(std::vector s, std::vector pad, std::vector dims) { - flatbuffers::FlatBufferBuilder fbb; - std::vector> vec; - - { - auto idims = fbb.CreateVector(std::vector(dims)); - InputBuilder ib(fbb); - ib.add_dims(idims); - auto input = ib.Finish(); - auto name = fbb.CreateString("input"); - auto iv = fbb.CreateVector(std::vector({0})); - auto ov = fbb.CreateVector(std::vector({0})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Input); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Input); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - } - { - auto sdims = fbb.CreateVector(std::vector({2})); - auto shapes = fbb.CreateVector(s); - BlobBuilder bb(fbb); - bb.add_dims(sdims); - bb.add_dataType(DataType_DT_INT32); - bb.add_dataFormat(MNN_DATA_FORMAT_NCHW); - bb.add_int32s(shapes); - auto shape = bb.Finish(); - - auto pdims = fbb.CreateVector(std::vector({4})); - auto pads = fbb.CreateVector(pad); - BlobBuilder pb(fbb); - pb.add_dims(pdims); - pb.add_dataType(DataType_DT_INT32); - pb.add_dataFormat(MNN_DATA_FORMAT_NCHW); - pb.add_int32s(pads); - auto padding = pb.Finish(); - - auto sbb = SpaceBatchBuilder(fbb); - sbb.add_blockShape(flatbuffers::Offset(shape.o)); - sbb.add_padding(flatbuffers::Offset(padding.o)); - auto sb = sbb.Finish(); - - auto name = fbb.CreateString("space_to_batch"); - auto iv = fbb.CreateVector(std::vector({0})); - auto ov = fbb.CreateVector(std::vector({1})); - OpBuilder builder(fbb); - builder.add_type(OpType_SpaceToBatchND); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_SpaceBatch); - builder.add_main(flatbuffers::Offset(sb.o)); - vec.push_back(builder.Finish()); - } - - auto ops = fbb.CreateVector(vec); - auto names = fbb.CreateVectorOfStrings({"input", "output"}); - NetBuilder builder(fbb); - builder.add_oplists(ops); - builder.add_tensorName(names); - fbb.Finish(builder.Finish()); - return Interpreter::createFromBuffer((const char *)fbb.GetBufferPointer(), fbb.GetSize()); -} - -static Tensor *infer(const Interpreter *net, Session *session) { - net->runSession(session); - return net->getSessionOutputAll(session).begin()->second; -} - +using namespace MNN::Express; class SpaceToBatchNDTest : public MNNTestCase { public: virtual ~SpaceToBatchNDTest() = default; virtual bool run() { - for (int b = 1; b <= 2; b++) { - for (int c = 1; c <= 4; c *= 2) { - for (int h = 1; h <= 4; h *= 2) { - for (int w = 1; w <= 4; w *= 2) { - for (int pw = 0; pw <= 1; pw++) { - for (int ph = 0; ph <= 1; ph++) { - for (int sw = 1; sw <= 2; sw *= 2) { - for (int sh = 1; sh <= 2; sh *= 2) { - if (sh > h + 2 * ph || sw > w + 2 * pw) - continue; - - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - // nets - auto net = create({sh, sw}, {ph, ph, pw, pw}, {b, c, h, w}); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input/output - auto input = new Tensor(4); - { - input->buffer().dim[0].extent = b; - input->buffer().dim[1].extent = c; - input->buffer().dim[2].extent = h; - input->buffer().dim[3].extent = w; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - for (int i = 0; i < w * h * c * b; i++) { - input->host()[i] = rand() % 255 / 255.f; - } - auto host = net->getSessionInput(CPU, NULL); - auto device = net->getSessionInput(GPU, NULL); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - free(input->buffer().host); - delete input; - delete net; - }); - } - } - } - } - } - } - } + auto input = _Input({3,1,2,2}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 12 * sizeof(float)); + input->unMap(); + const int blockshapedata[] = {2,2}; + const int paddingdata[] = {0,0,0,0}; + auto block_shape = _Const(blockshapedata,{2,},NCHW,halide_type_of()); + auto paddings = _Const(paddingdata,{2,2},NCHW,halide_type_of()); + input = _Convert(input, NC4HW4); + auto tmp = _SpaceToBatchND(input, block_shape, paddings); + auto output = _Convert(tmp, NCHW); + const std::vector expectedOutput = {1.0, 5.0, 9.0, 2.0, 6.0, 10.0, 3.0, 7.0, 11.0, 4.0, 8.0, 12.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 12, 0.01)) { + MNN_ERROR("SpaceToBatchNDTest test failed!\n"); + return false; + } + const std::vector expectedDims = {12,1,1,1}; + auto gotDims = output->getInfo()->dim; + if (!checkVector(gotDims.data(), expectedDims.data(), 4, 0)) { + MNN_ERROR("SpaceToBatchNDTest test failed!\n"); + return false; } return true; } diff --git a/test/op/SpaceToDepthTest.cpp b/test/op/SpaceToDepthTest.cpp new file mode 100644 index 000000000..4deb0383e --- /dev/null +++ b/test/op/SpaceToDepthTest.cpp @@ -0,0 +1,49 @@ +// +// SpaceToDepthTest.cpp +// MNNTests +// +// Created by MNN on 2019/12/18. +// Copyright © 2018, Alibaba Group Holding Limited +// + + +#include +#include +#include "MNNTestSuite.h" +#include "TestUtils.h" +using namespace MNN::Express; +class SpaceToDepthTest : public MNNTestCase { +public: + virtual ~SpaceToDepthTest() = default; + virtual bool run() { + auto input = _Input({1, 4, 4, 1}, NHWC); + input->setName("input"); + // set input data + const float input_data[] = {-1.0, 2.0, -3.0, 4.0, + 5.0, 6.0, 7.0, -8.0, + -9.0, -10.0, 11.0, 12.0, + 13.0, 14.0, -15.0, -16.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, input_data, 16 * sizeof(float)); + input->unMap(); + auto output = _SpaceToDepth(input, 2); + const std::vector expectedOutput = {-1.0, 2.0, 5.0, 6.0, + -3.0, 4.0, 7.0, -8.0, + -9.0, -10.0, 13.0, 14.0, + 11.0, 12.0, -15.0, -16.0}; + const std::vector expectedDim = {1, 2, 2, 4}; + auto gotOutput = output->readMap(); + auto gotDim = output->getInfo()->dim; + if (!checkVector(gotOutput, expectedOutput.data(), 16, 0)) { + MNN_ERROR("SpaceToDepthTest test failed!\n"); + return false; + } + if (!checkVector(gotDim.data(), expectedDim.data(), 4, 0)) { + MNN_ERROR("SpaceToDepthTest test failed!\n"); + return false; + } + return true; + } +}; +MNNTestSuiteRegister(SpaceToDepthTest, "op/spacetodepth"); + diff --git a/test/op/SpatialProductTest.cpp b/test/op/SpatialProductTest.cpp index 3f61ec311..9f0a0ab5a 100644 --- a/test/op/SpatialProductTest.cpp +++ b/test/op/SpatialProductTest.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/SqueezeTest.cpp b/test/op/SqueezeTest.cpp index 05e0c9bf7..3db727830 100644 --- a/test/op/SqueezeTest.cpp +++ b/test/op/SqueezeTest.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" -#include "Session.hpp" +#include "core/Session.hpp" #include "TFQuantizeOp_generated.h" -#include "TensorUtils.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/StackTest.cpp b/test/op/StackTest.cpp new file mode 100644 index 000000000..6e7cfb0a3 --- /dev/null +++ b/test/op/StackTest.cpp @@ -0,0 +1,41 @@ +// +// StackTest.cpp +// MNNTests +// +// Created by MNN on 2019/12/19. +// Copyright © 2018, Alibaba Group Holding Limited +// +#include +#include +#include "MNNTestSuite.h" +#include "TestUtils.h" + +using namespace MNN::Express; +class StackTest : public MNNTestCase { +public: + virtual ~StackTest() = default; + virtual bool run() { + auto input0 = _Input({3,1,2}, NCHW); + input0->setName("input0"); + const float input0_data[] = {1.0, 2.0, 5.0, 6.0, 9.0, 10.0}; + auto input0Ptr = input0->writeMap(); + memcpy(input0Ptr, input0_data, 6 * sizeof(float)); + input0->unMap(); + auto input1 = _Input({3,1,2}, NCHW); + input1->setName("input1"); + const float input1_data[] = {3.0, 4.0, 7.0, 8.0, 11.0, 12.0}; + auto input1Ptr = input1->writeMap(); + memcpy(input1Ptr, input1_data, 6 * sizeof(float)); + input1->unMap(); + auto output = _Stack({input0,input1}, 2); + const std::vector expectedOutput = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 12, 0.01)) { + MNN_ERROR("StackTest test failed!\n"); + return false; + } + return true; + } +}; + +MNNTestSuiteRegister(StackTest, "op/stack"); diff --git a/test/op/StridedSliceTest.cpp b/test/op/StridedSliceTest.cpp index f41d7b584..e2fadf1c3 100644 --- a/test/op/StridedSliceTest.cpp +++ b/test/op/StridedSliceTest.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/TFQuantizedConv2DTest.cpp b/test/op/TFQuantizedConv2DTest.cpp index c970b84cc..a173d6ecc 100644 --- a/test/op/TFQuantizedConv2DTest.cpp +++ b/test/op/TFQuantizedConv2DTest.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" -#include "Session.hpp" +#include "core/Session.hpp" #include "TFQuantizeOp_generated.h" -#include "TensorUtils.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/TanHTest.cpp b/test/op/TanHTest.cpp index bd6e70ac4..694e204d5 100644 --- a/test/op/TanHTest.cpp +++ b/test/op/TanHTest.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/TensorConverterTest.cpp b/test/op/TensorConverterTest.cpp index d524975d3..b2c2c24b5 100644 --- a/test/op/TensorConverterTest.cpp +++ b/test/op/TensorConverterTest.cpp @@ -6,12 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/TileTest.cpp b/test/op/TileTest.cpp index 0811bdf1d..95a4f2932 100644 --- a/test/op/TileTest.cpp +++ b/test/op/TileTest.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" #include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" +#include "core/Session.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/TransposeTest.cpp b/test/op/TransposeTest.cpp index a82cae23e..26181815e 100644 --- a/test/op/TransposeTest.cpp +++ b/test/op/TransposeTest.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" +#include #include "MNNTestSuite.h" -#include "Session.hpp" +#include "core/Session.hpp" #include "TFQuantizeOp_generated.h" -#include "TensorUtils.hpp" +#include "core/TensorUtils.hpp" #include "TestUtils.h" using namespace MNN; diff --git a/test/op/UnaryTest.cpp b/test/op/UnaryTest.cpp index 329e9a39e..f321c5d76 100644 --- a/test/op/UnaryTest.cpp +++ b/test/op/UnaryTest.cpp @@ -6,211 +6,428 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Interpreter.hpp" + +#include +#include #include "MNNTestSuite.h" -#include "MNN_generated.h" -#include "Session.hpp" -#include "TensorUtils.hpp" #include "TestUtils.h" -using namespace MNN; - -static Interpreter *create(UnaryOpOperation type, int b, int c, int h, int w, bool tensorflow) { - flatbuffers::FlatBufferBuilder fbb; - std::vector> vec; - { - auto dims = fbb.CreateVector(tensorflow ? std::vector({b, h, w, c}) : std::vector({b, c, h, w})); - InputBuilder ib(fbb); - ib.add_dims(dims); - auto input = ib.Finish(); - auto name = fbb.CreateString("input"); - auto iv = fbb.CreateVector(std::vector({0})); - auto ov = fbb.CreateVector(std::vector({0})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Input); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Input); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - } - { - auto ub = UnaryOpBuilder(fbb); - ub.add_opType(type); - ub.add_T(DataType_DT_FLOAT); - auto unary = ub.Finish(); - - auto name = fbb.CreateString("unary"); - auto iv = fbb.CreateVector(std::vector({0})); - auto ov = fbb.CreateVector(std::vector({1})); - OpBuilder builder(fbb); - builder.add_type(OpType_UnaryOp); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_UnaryOp); - builder.add_main(flatbuffers::Offset(unary.o)); - vec.push_back(builder.Finish()); +using namespace MNN::Express; +class AbsTest : public MNNTestCase { +public: + virtual ~AbsTest() = default; + virtual bool run() { + auto input = _Input({4,}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.0, -2.0, 3.0, 4.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _Abs(input); + const std::vector expectedOutput = {1.0, 2.0, 3.0, 4.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("AbsTest test failed!\n"); + return false; + } + return true; } - - auto ops = fbb.CreateVector(vec); - auto names = fbb.CreateVectorOfStrings({"input", "output"}); - if (tensorflow) { - BlobBuilder builder(fbb); - builder.add_dataType(DataType_DT_FLOAT); - builder.add_dataFormat(MNN_DATA_FORMAT_NHWC); - auto blob = builder.Finish(); - - std::vector> desc; - { - TensorDescribeBuilder tdb(fbb); - tdb.add_index(0); - tdb.add_blob(flatbuffers::Offset(blob.o)); - desc.push_back(tdb.Finish()); - } - { - TensorDescribeBuilder tdb(fbb); - tdb.add_index(1); - tdb.add_blob(flatbuffers::Offset(blob.o)); - desc.push_back(tdb.Finish()); - } - auto extras = fbb.CreateVector(desc); - NetBuilder net(fbb); - net.add_oplists(ops); - net.add_tensorName(names); - net.add_extraTensorDescribe(extras); - net.add_sourceType(NetSource_TENSORFLOW); - fbb.Finish(net.Finish()); - } else { - NetBuilder net(fbb); - net.add_oplists(ops); - net.add_tensorName(names); - fbb.Finish(net.Finish()); - } - return Interpreter::createFromBuffer((const char *)fbb.GetBufferPointer(), fbb.GetSize()); -} - -static Tensor *infer(const Interpreter *net, Session *session) { - net->runSession(session); - return net->getSessionOutputAll(session).begin()->second; -} - -class UnaryCaffeTest : public MNNTestCase { -public: - virtual ~UnaryCaffeTest() = default; - virtual bool run() { - int valids[] = {UnaryOpOperation_SQUARE, UnaryOpOperation_RSQRT, UnaryOpOperation_NEG, UnaryOpOperation_EXP, - UnaryOpOperation_SQRT, UnaryOpOperation_ABS, UnaryOpOperation_CEIL}; - for (int i = 0; i < sizeof(valids) / sizeof(int); i++) { - UnaryOpOperation optype = (UnaryOpOperation)valids[i]; - for (int b = 1; b <= 2; b++) { - for (int c = 4; c <= 8; c *= 2) { - for (int h = 1; h <= 8; h *= 2) { - for (int w = 1; w <= 8; w *= 2) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - auto net = create(optype, b, c, h, w, false); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input/output - auto input = new Tensor(4, Tensor::CAFFE); - { - input->buffer().dim[0].extent = b; - input->buffer().dim[1].extent = c; - input->buffer().dim[2].extent = h; - input->buffer().dim[3].extent = w; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - for (int i = 0; i < b * c * h * w; i++) { - input->host()[i] = rand() % 100 / 100.f; - } - auto host = net->getSessionInput(CPU, NULL); - auto device = net->getSessionInput(GPU, NULL); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - free(input->buffer().host); - delete input; - delete net; - }); - } - } - } - } +}; +class NegativeTest : public MNNTestCase { +public: + virtual ~NegativeTest() = default; + virtual bool run() { + auto input = _Input({4,}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.0, -2.0, 3.0, 4.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _Negative(input); + const std::vector expectedOutput = {1.0, 2.0, -3.0, -4.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("NegativeTest test failed!\n"); + return false; } return true; } }; - -class UnaryTensorflowTest : public MNNTestCase { -public: - virtual ~UnaryTensorflowTest() = default; - virtual bool run() { - int valids[] = {UnaryOpOperation_SQUARE, UnaryOpOperation_RSQRT, UnaryOpOperation_NEG, UnaryOpOperation_EXP, - UnaryOpOperation_SQRT, UnaryOpOperation_ABS, UnaryOpOperation_CEIL}; - for (int i = 0; i < sizeof(valids) / sizeof(int); i++) { - UnaryOpOperation optype = (UnaryOpOperation)valids[i]; - for (int b = 1; b <= 2; b++) { - for (int c = 4; c <= 8; c *= 2) { - for (int h = 1; h <= 8; h *= 2) { - for (int w = 1; w <= 8; w *= 2) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - auto net = create(optype, b, c, h, w, true); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input/output - auto input = new Tensor(4, Tensor::TENSORFLOW); - { - input->buffer().dim[0].extent = b; - input->buffer().dim[1].extent = h; - input->buffer().dim[2].extent = w; - input->buffer().dim[3].extent = c; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - for (int i = 0; i < b * c * h * w; i++) { - input->host()[i] = rand() % 100 / 100.f; - } - auto host = net->getSessionInput(CPU, NULL); - auto device = net->getSessionInput(GPU, NULL); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - free(input->buffer().host); - delete input; - delete net; - }); - } - } - } - } +class FloorTest : public MNNTestCase { +public: + virtual ~FloorTest() = default; + virtual bool run() { + auto input = _Input({4,}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.3, -2.6, 3.2, 4.6}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _Floor(input); + const std::vector expectedOutput = {-2.0, -3.0, 3.0, 4.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("FloorTest test failed!\n"); + return false; + } + return true; + } +}; +class CeilTest : public MNNTestCase { +public: + virtual ~CeilTest() = default; + virtual bool run() { + auto input = _Input({4,}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.3, -2.6, 3.2, 4.6}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _Ceil(input); + const std::vector expectedOutput = {-1.0, -2.0, 4.0, 5.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("CeilTest test failed!\n"); + return false; + } + return true; + } +}; +class SquareTest : public MNNTestCase { +public: + virtual ~SquareTest() = default; + virtual bool run() { + auto input = _Input({4,}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.0, -2.0, 3.0, 4.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _Square(input); + const std::vector expectedOutput = {1.0, 4.0, 9.0, 16.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("SquareTest test failed!\n"); + return false; + } + return true; + } +}; +class SqrtTest : public MNNTestCase { +public: + virtual ~SqrtTest() = default; + virtual bool run() { + auto input = _Input({4,}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {1.0, 4.0, 9.0, 16.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _Sqrt(input); + const std::vector expectedOutput = {1.0, 2.0, 3.0, 4.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("SqrtTest test failed!\n"); + return false; + } + return true; + } +}; +class RsqrtTest : public MNNTestCase { +public: + virtual ~RsqrtTest() = default; + virtual bool run() { + auto input = _Input({4,}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {1.0, 4.0, 9.0, 16.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _Rsqrt(input); + const std::vector expectedOutput = {1.0, 1.0/2.0, 1.0/3.0, 1.0/4.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("RsqrtTest test failed!\n"); + return false; + } + return true; + } +}; +class ExpTest : public MNNTestCase { +public: + virtual ~ExpTest() = default; + virtual bool run() { + auto input = _Input({4,}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {1.0, 2.0, 3.0, 4.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _Exp(input); + const std::vector expectedOutput = {2.718, 7.389, 20.086, 54.598}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("ExpTest test failed!\n"); + return false; + } + return true; + } +}; +class LogTest : public MNNTestCase { +public: + virtual ~LogTest() = default; + virtual bool run() { + auto input = _Input({4,}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {2.718, 7.389, 20.086, 54.598}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _Log(input); + const std::vector expectedOutput = {1.0, 2.0, 3.0, 4.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("LogTest test failed!\n"); + return false; + } + return true; + } +}; +class SinTest : public MNNTestCase { +public: + virtual ~SinTest() = default; + virtual bool run() { + auto input = _Input({4,}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {0.0, 3.14/2.0, 3.14, 3.14*3.0/2.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _Sin(input); + const std::vector expectedOutput = {0.0, 1.0, 0.0, -1.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("SinTest test failed!\n"); + return false; + } + return true; + } +}; +class CosTest : public MNNTestCase { +public: + virtual ~CosTest() = default; + virtual bool run() { + auto input = _Input({4,}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {0.0, 3.14/2.0, 3.14, 3.14*3.0/2.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _Cos(input); + const std::vector expectedOutput = {1.0, 0.0, -1.0, 0.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("CosTest test failed!\n"); + return false; + } + return true; + } +}; +class TanTest : public MNNTestCase { +public: + virtual ~TanTest() = default; + virtual bool run() { + auto input = _Input({4,}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {100.0, 200.0, 300.0, 400.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _Tan(input); + const std::vector expectedOutput = {-0.59, -1.79, 45.24, 1.62}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("TanTest test failed!\n"); + return false; + } + return true; + } +}; +class AsinTest : public MNNTestCase { +public: + virtual ~AsinTest() = default; + virtual bool run() { + auto input = _Input({4,}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.0, 0.0, 1.0, 0.707}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _Asin(input); + const std::vector expectedOutput = {-3.14/2.0, 0.0, 3.14/2.0, 3.14/4.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("AsinTest test failed!\n"); + return false; + } + return true; + } +}; +class AcosTest : public MNNTestCase { +public: + virtual ~AcosTest() = default; + virtual bool run() { + auto input = _Input({4,}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.0, 0.0, 1.0, 0.707}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _Acos(input); + const std::vector expectedOutput = {3.14, 1.57, 0.0, 3.14/4.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("AcosTest test failed!\n"); + return false; + } + return true; + } +}; +class AtanTest : public MNNTestCase { +public: + virtual ~AtanTest() = default; + virtual bool run() { + auto input = _Input({4,}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-2.0, -1.0, 0.0, 1.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _Atan(input); + const std::vector expectedOutput = {-1.11, -3.14/4.0, 0.0, 3.14/4.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("AtanTest test failed!\n"); + return false; + } + return true; + } +}; +class ReciprocalTest : public MNNTestCase { +public: + virtual ~ReciprocalTest() = default; + virtual bool run() { + auto input = _Input({4,}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-2.0, -4.0, 2.0, 4.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _Reciprocal(input); + const std::vector expectedOutput = {-0.5, -0.25, 0.50, 0.25}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("ReciprocalTest test failed!\n"); + return false; + } + return true; + } +}; +class Log1PTest : public MNNTestCase { +public: + virtual ~Log1PTest() = default; + virtual bool run() { + auto input = _Input({4,}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {0.0, 1.0, 2.0, 3.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _Log1p(input); + const std::vector expectedOutput = {0.0, 0.69, 1.10, 1.39}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("Log1PTest test failed!\n"); + return false; + } + return true; + } +}; +class TanhTest : public MNNTestCase { +public: + virtual ~TanhTest() = default; + virtual bool run() { + auto input = _Input({4,}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.0, 0.0, 1.0, 2.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _Tanh(input); + const std::vector expectedOutput = {-0.76, 0.0, 0.76, 0.96}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("TanhTest test failed!\n"); + return false; + } + return true; + } +}; +class SigmoidTest : public MNNTestCase { +public: + virtual ~SigmoidTest() = default; + virtual bool run() { + auto input = _Input({4,}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.0, 0.0, 1.0, 2.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _Sigmoid(input); + const std::vector expectedOutput = {0.27, 0.50, 0.73, 0.88}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("SigmoidTest test failed!\n"); + return false; } return true; } }; -MNNTestSuiteRegister(UnaryCaffeTest, "op/unary/caffe"); -MNNTestSuiteRegister(UnaryTensorflowTest, "op/unary/tensorflow"); +MNNTestSuiteRegister(AbsTest, "op/unary/abs"); +MNNTestSuiteRegister(NegativeTest, "op/unary/negative"); +MNNTestSuiteRegister(FloorTest, "op/unary/floor"); +MNNTestSuiteRegister(CeilTest, "op/unary/ceil"); +MNNTestSuiteRegister(SquareTest, "op/unary/square"); +MNNTestSuiteRegister(SqrtTest, "op/unary/sqrt"); +MNNTestSuiteRegister(RsqrtTest, "op/unary/rsqrt"); +MNNTestSuiteRegister(ExpTest, "op/unary/exp"); +MNNTestSuiteRegister(LogTest, "op/unary/log"); +MNNTestSuiteRegister(SinTest, "op/unary/sin"); +MNNTestSuiteRegister(CosTest, "op/unary/cos"); +MNNTestSuiteRegister(TanTest, "op/unary/tan"); +MNNTestSuiteRegister(AsinTest, "op/unary/asin"); +MNNTestSuiteRegister(AcosTest, "op/unary/acos"); +MNNTestSuiteRegister(AtanTest, "op/unary/atan"); +MNNTestSuiteRegister(ReciprocalTest, "op/unary/reciprocal"); +MNNTestSuiteRegister(Log1PTest, "op/unary/log1p"); +MNNTestSuiteRegister(TanhTest, "op/unary/tanh"); +MNNTestSuiteRegister(SigmoidTest, "op/unary/sigmoid"); diff --git a/test/op/UnravelIndexTest.cpp b/test/op/UnravelIndexTest.cpp new file mode 100644 index 000000000..8bd64a79d --- /dev/null +++ b/test/op/UnravelIndexTest.cpp @@ -0,0 +1,55 @@ +// +// UnravelIndexTest.cpp +// MNNTests +// +// Created by MNN on 2019/11/26. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include +#include "MNNTestSuite.h" +#include "TestUtils.h" + +using namespace MNN::Express; + +class UnravelIndexTest : public MNNTestCase { + virtual ~UnravelIndexTest() = default; + + virtual bool run() { + { + const int indicesData[] = {22, 41, 37}; + const int shapeData[] = {7, 6}; + auto indices = _Const(indicesData, {3}, NHWC, halide_type_of()); + auto dims = _Const(shapeData, {2}, NHWC, halide_type_of()); + auto result = _UnravelIndex(indices, dims); + + const int expectedData[] = {3, 6, 6, 4, 5, 1}; + + auto resultData = result->readMap(); + const int size = result->getInfo()->size; + if (!checkVector(resultData, expectedData, size, 0)) { + return false; + } + } + { + const int indicesData[] = {1621}; + const int shapeData[] = {6, 7, 8, 9}; + auto indices = _Const(indicesData, {1}, NHWC, halide_type_of()); + auto dims = _Const(shapeData, {4}, NHWC, halide_type_of()); + auto result = _UnravelIndex(indices, dims); + + const int expectedData[] = {3, 1, 4, 1}; + + auto resultData = result->readMap(); + const int size = result->getInfo()->size; + if (!checkVector(resultData, expectedData, size, 0)) { + return false; + } + } + + return true; + } +}; + +MNNTestSuiteRegister(UnravelIndexTest, "op/UnravelIndexTest"); diff --git a/test/op/UnstackTest.cpp b/test/op/UnstackTest.cpp new file mode 100644 index 000000000..dcbbc6663 --- /dev/null +++ b/test/op/UnstackTest.cpp @@ -0,0 +1,41 @@ +// +// UnstackTest.cpp +// MNNTests +// +// Created by MNN on 2019/12/18. +// Copyright © 2018, Alibaba Group Holding Limited +// +#include +#include +#include "MNNTestSuite.h" +#include "TestUtils.h" + +using namespace MNN::Express; +class UnstackTest : public MNNTestCase { +public: + virtual ~UnstackTest() = default; + virtual bool run() { + auto input = _Input({3,1,2,2}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 12 * sizeof(float)); + input->unMap(); + auto outputs = _Unstack(input, -2); + const std::vector expectedOutput_0 = {1.0, 2.0, 5.0, 6.0, 9.0, 10.0}; + const std::vector expectedOutput_1 = {3.0, 4.0, 7.0, 8.0, 11.0, 12.0}; + auto gotOutput_0 = outputs[0]->readMap(); + auto gotOutput_1 = outputs[1]->readMap(); + if (!checkVector(gotOutput_0, expectedOutput_0.data(), 6, 0.01)) { + MNN_ERROR("UnstackTest test failed!\n"); + return false; + } + if (!checkVector(gotOutput_1, expectedOutput_1.data(), 6, 0.01)) { + MNN_ERROR("UnstackTest test failed!\n"); + return false; + } + return true; + } +}; +MNNTestSuiteRegister(UnstackTest, "op/unstack"); diff --git a/test/op/ZerosLikeTest.cpp b/test/op/ZerosLikeTest.cpp new file mode 100644 index 000000000..d45b746f1 --- /dev/null +++ b/test/op/ZerosLikeTest.cpp @@ -0,0 +1,43 @@ +// +// ZerosLikeTest.cpp +// MNNTests +// +// Created by MNN on 2019/12/18. +// Copyright © 2018, Alibaba Group Holding Limited +// + + +#include +#include +#include "MNNTestSuite.h" +#include "TestUtils.h" +using namespace MNN::Express; +class ZerosLikeTest : public MNNTestCase { +public: + virtual ~ZerosLikeTest() = default; + virtual bool run() { + auto input = _Input({1, 4, 4, 1}, NHWC); + input->setName("input"); + // set input data + const float input_data[] = {-1.0, 2.0, -3.0, 4.0, + 5.0, 6.0, 7.0, -8.0, + -9.0, -10.0, 11.0, 12.0, + 13.0, 14.0, -15.0, -16.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, input_data, 16 * sizeof(float)); + input->unMap(); + auto output = _ZerosLike(input); + const std::vector expectedOutput = {0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 16, 0.01)) { + MNN_ERROR("ZerosLikeTest test failed!\n"); + return false; + } + return true; + } +}; +MNNTestSuiteRegister(ZerosLikeTest, "op/zeroslike"); + diff --git a/test/speed/BinarySpeedTest.cpp b/test/speed/BinarySpeedTest.cpp index f4188fa39..428baa6c4 100644 --- a/test/speed/BinarySpeedTest.cpp +++ b/test/speed/BinarySpeedTest.cpp @@ -8,9 +8,11 @@ #include #include -#include "ExprCreator.hpp" +#include +#include +#include #define MNN_OPEN_TIME_TRACE -#include "AutoTime.hpp" +#include #include "MNNTestSuite.h" using namespace MNN::Express; #define WIDTH 5001 @@ -21,7 +23,7 @@ class BinarySpeedTest : public MNNTestCase { void SubTest() { auto input0 = _Input({WIDTH, HEIGHT}); auto input1 = _Input({WIDTH, HEIGHT}); - auto output = _Sub(input0, input1); + auto output = input0 - input1; { AUTOTIME; for (int i=0; i -#include "ExprCreator.hpp" +#include +#include +#include #include "MNNTestSuite.h" #include "MNN_generated.h" #define MNN_OPEN_TIME_TRACE -#include "AutoTime.hpp" +#include using namespace MNN::Express; static void fillFloat(float* dst, int h, int w, float offset = 0.0f) { @@ -57,7 +59,7 @@ class MatMulSpeedTest : public MNNTestCase { auto matmulParam = op->main.AsMatMul(); matmulParam->transposeA = false; matmulParam->transposeB = false; - + auto x0 = _Input({}, NHWC, halide_type_of()); auto x1 = _Input({}, NHWC, halide_type_of()); auto y = Variable::create(Expr::create(op.get(), {x0, x1})); @@ -65,7 +67,7 @@ class MatMulSpeedTest : public MNNTestCase { x1->resize({l, e}); fillFloat(x0->writeMap(), h, l); fillFloat(x1->writeMap(), l, e); - + auto res = checkMatMul(y->readMap(), x0->readMap(), x1->readMap(), e, l, h); if (!res) { FUNC_PRINT(1); diff --git a/tools/converter/CMakeLists.txt b/tools/converter/CMakeLists.txt index bd70a756d..329cf6343 100644 --- a/tools/converter/CMakeLists.txt +++ b/tools/converter/CMakeLists.txt @@ -1,134 +1,56 @@ -cmake_minimum_required(VERSION 2.8) - -project(MNNConverter) -SET( CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}) - -set(CMAKE_C_STANDARD 99) -set(CMAKE_CXX_STANDARD 11) - -set(PROJECT_VERSION "0.0.0.1") - -configure_file( - ./CONFIGURECONVERT.h.in - ./CONFIGURECONVERT.h -) -include(cmake/macros.cmake) -# -----------debug or no----------- -option(MNN_CONVERT_DEBUG "Enable MNN CONVERT DEBUG" OFF) -option(MNN_BUILD_SHARED_LIBS "MNN build shared or static lib" ON) -if (WIN32) - foreach(flag_var - CMAKE_C_FLAGS CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_RELEASE - CMAKE_C_FLAGS_MINSIZEREL CMAKE_C_FLAGS_RELWITHDEBINFO - CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE - CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO) - if (MNN_BUILD_SHARED_LIBS) - if(${flag_var} MATCHES "/MT") - string(REGEX REPLACE "/MT" "/MD" ${flag_var} "${${flag_var}}") - endif() - else () - if(${flag_var} MATCHES "/MD") - string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}") - endif() - endif () - endforeach() -endif() - -if(MNN_CONVERT_DEBUG) - set(CMAKE_BUILD_TYPE "Debug") - if(MSVC) - set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /DEBUG") - set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /DEBUG") - else() - set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -g") - set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -g") - endif() -else() - set(CMAKE_BUILD_TYPE "Release") - add_definitions(-DNDEBUG) -endif() -if(SYSTEM.Linux) - add_definitions(-fPIC) -endif() -option(TF_CONVERT_ORIGIN "Fall Back to Origin Model Converter" OFF) -if (TF_CONVERT_ORIGIN) - add_definitions(-DTF_CONVERT_ORIGIN) -endif() - -# -----------Enable tensorflow model optimizer or no----------- -option(TFMODEL_OPTIMIZE "Enable tensorflow model optimizer" OFF) -if(TFMODEL_OPTIMIZE) - add_definitions(-DTFMODEL_OPTIMIZE) -endif() - -# -----------find protobuf lib----------- -if (MSVC OR WIN32) - set(Protobuf_SRC_ROOT_FOLDER $ENV{Protobuf_SRC_ROOT_FOLDER}) -endif() -find_package(Protobuf REQUIRED) -if (${CMAKE_VERSION} VERSION_LESS "3.6.0") - set(Protobuf_LIBRARIES ${PROTOBUF_LIBRARIES}) - set(Protobuf_INCLUDE_DIRS ${PROTOBUF_INCLUDE_DIRS}) -endif() - -# -----------set path----------- -set(SRC_PATH ${CMAKE_CURRENT_SOURCE_DIR}/source) -set(IR_PATH ${MNN_SOURCE_DIR}/schema/current ${MNN_SOURCE_DIR}/3rd_party/flatbuffers/include) -set(COMMON_PATH ${SRC_PATH}/common) -set(CAFFE_PATH ${SRC_PATH}/caffe) -set(TENSORFLOW_PATH ${SRC_PATH}/tensorflow) -set(ONNX_PATH ${SRC_PATH}/onnx) -set(TFLITE_PATH ${SRC_PATH}/tflite) -set(MNN_PATH ${SRC_PATH}/MNN) -set(OPTIMIZER_PATH ${SRC_PATH}/optimizer) -set(INCLUDE_PATH ${SRC_PATH}/include) - -# -----------set include path----------- -include_directories(${CMAKE_CURRENT_BINARY_DIR}) -include_directories(${INCLUDE_PATH}) -include_directories(${IR_PATH}) -include_directories(${TFLITE_PATH}/schema) -include_directories(${MNN_SOURCE_DIR}/include) -include_directories(${MNN_SOURCE_DIR}/express/include) -include_directories(${MNN_SOURCE_DIR}/3rd_party/half) - -add_subdirectory(${TENSORFLOW_PATH}) -add_subdirectory(${ONNX_PATH}) -add_subdirectory(${CAFFE_PATH}) -add_subdirectory(${MNN_PATH}) -add_subdirectory(${OPTIMIZER_PATH}) -add_subdirectory(${TFLITE_PATH}) - -file(GLOB COMMON_SRC ${COMMON_PATH}/*) - - -add_library(COMMON_LIB STATIC ${COMMON_SRC}) - -add_executable(MNNDump2Json.out ${SRC_PATH}/MNNDump2Json.cpp) - -set(COMMAND_SRC - ${SRC_PATH}/cli.cpp - ${SRC_PATH}/config.cpp +IF(MNN_BUILD_CONVERTER) + option(TF_CONVERT_ORIGIN "Fall Back to Origin Model Converter" OFF) + option(TFMODEL_OPTIMIZE "Enable tensorflow model optimizer" OFF) + IF(MNN_PORTABLE_BUILD) + SET(Protobuf_USE_STATIC_LIBS ON) + ENDIF() + find_package(Protobuf REQUIRED) + if (${CMAKE_VERSION} VERSION_LESS "3.6.0") + set(Protobuf_LIBRARIES ${PROTOBUF_LIBRARIES}) + set(Protobuf_INCLUDE_DIRS ${PROTOBUF_INCLUDE_DIRS}) + endif() + SET(MNN_CONVERTER_INC ${CMAKE_CURRENT_LIST_DIR}/include/ ${CMAKE_CURRENT_LIST_DIR}/source/tflite/schema/ ${Protobuf_INCLUDE_DIRS}) + SET(MNN_CONVERTER_BACKENDS_TARGETS "") + SET(MNN_CONVERTER_BACKENDS_OBJECTS "") + include(${CMAKE_CURRENT_LIST_DIR}/source/tensorflow/CMakeLists.txt) + include(${CMAKE_CURRENT_LIST_DIR}/source/onnx/CMakeLists.txt) + include(${CMAKE_CURRENT_LIST_DIR}/source/caffe/CMakeLists.txt) + include(${CMAKE_CURRENT_LIST_DIR}/source/MNN/CMakeLists.txt) + include(${CMAKE_CURRENT_LIST_DIR}/source/optimizer/CMakeLists.txt) + include(${CMAKE_CURRENT_LIST_DIR}/source/tflite/CMakeLists.txt) + + file(GLOB COMMON_SRC ${CMAKE_CURRENT_LIST_DIR}/source/common/*.cpp ${CMAKE_SOURCE_DIR}/3rd_party/flatbuffers/src/util.cpp) + add_executable(MNNDump2Json ${CMAKE_CURRENT_LIST_DIR}/source/MNNDump2Json.cpp) + add_dependencies(MNNDump2Json MNN_SCHEMA_GEN) + add_executable(MNNConvert + ${CMAKE_CURRENT_LIST_DIR}/source/MNNConverter.cpp + ${CMAKE_CURRENT_LIST_DIR}/source/cli.cpp + ${CMAKE_CURRENT_LIST_DIR}/source/config.cpp ${MNN_SOURCE_DIR}/3rd_party/flatbuffers/src/util.cpp -) - -add_executable(MNNConvert ${SRC_PATH}/MNNConverter.cpp ${COMMAND_SRC}) -target_include_directories(MNNConvert PRIVATE ${SRC_PATH}) - -if (MSVC OR WIN32) - target_compile_options(MNNConvert PRIVATE "/wd4267") - target_link_options(MNNConvert PRIVATE "/ignore:4099") - foreach(DEPEND tensorflow caffe onnx tflite optimizer) - target_link_options(MNNConvert PRIVATE /WHOLEARCHIVE:$) - endforeach() -endif() - -set(CONVERTER_DEPEND tensorflow caffe onnx mnn_bizcode tflite optimizer COMMON_LIB ${Protobuf_LIBRARIES}) -if(NOT MNN_BUILD_SHARED_LIBS) - if (BUILD_IOS OR APPLE) - set(CONVERTER_DEPEND -Wl,-all_load ${CONVERTER_DEPEND} -Wl,-noall_load) + ${SCHEMA_TARGETS} + ) + IF(MNN_BUILD_SHARED_LIBS) + add_library(MNNConvertDeps SHARED ${COMMON_SRC} ${MNN_CONVERTER_BACKENDS_OBJECTS} ${PROJECT_SOURCE_DIR}/cmake/dummy.cpp) + target_link_libraries(MNNConvertDeps PRIVATE ${MNN_DEPS} ${Protobuf_LIBRARIES}) + ELSE() + add_library(MNNConvertDeps STATIC ${COMMON_SRC} ${MNN_CONVERTER_BACKENDS_OBJECTS} ${PROJECT_SOURCE_DIR}/cmake/dummy.cpp) + target_link_libraries(MNNConvertDeps INTERFACE ${MNN_DEPS} ${Protobuf_LIBRARIES}) + ENDIF() + add_dependencies(MNNConvertDeps ${MNN_DEPS}) + target_link_libraries(MNNConvert ${MNN_DEPS} ${Protobuf_LIBRARIES}) + + IF(NOT MNN_BUILD_SHARED_LIBS) + if(APPLE) + target_link_libraries(MNNConvert -Wl,-all_load MNNConvertDeps -Wl,-noall_load) elseif (CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") - set(CONVERTER_DEPEND -Wl,--whole-archive ${CONVERTER_DEPEND} -Wl,--no-whole-archive) + target_link_libraries(MNNConvert -Wl,--whole-archive MNNConvertDeps -Wl,--no-whole-archive) endif() -endif() -target_link_libraries(MNNConvert ${CONVERTER_DEPEND} ${END_EXTRA_LINK_FLAG}) + ELSE() + target_link_libraries(MNNConvert MNNConvertDeps) + ENDIF() + add_dependencies(MNNConvert MNN_SCHEMA_GEN) + FOREACH(CONVERTER_TARGET ${MNN_CONVERTER_BACKENDS_TARGETS} MNNConvert MNNDump2Json MNNConvertDeps) + target_include_directories(${CONVERTER_TARGET} PRIVATE ${MNN_CONVERTER_INC}) + add_dependencies(${CONVERTER_TARGET} MNN_SCHEMA_GEN) + ENDFOREACH() +ENDIF() diff --git a/tools/converter/CONFIGURECONVERT.h.in b/tools/converter/CONFIGURECONVERT.h.in deleted file mode 100644 index 4f67b1490..000000000 --- a/tools/converter/CONFIGURECONVERT.h.in +++ /dev/null @@ -1 +0,0 @@ -#define PROJECT_VERSION "@PROJECT_VERSION@" \ No newline at end of file diff --git a/tools/converter/cmake/macros.cmake b/tools/converter/cmake/macros.cmake deleted file mode 100644 index 08287e4d6..000000000 --- a/tools/converter/cmake/macros.cmake +++ /dev/null @@ -1,37 +0,0 @@ -# The Lib Prefix -if (UNIX) - set(LIB_PFX "lib") - if (APPLE) - set(LIB_EXT ".dylib") - else () - set(LIB_EXT ".so") - endif () -else (UNIX) - set(LIB_PFX "") - set(LIB_EXT ".dll") -endif (UNIX) - -if(CMAKE_SYSTEM_NAME MATCHES "^Android") - set(SYSTEM.Android 1) -elseif(CMAKE_SYSTEM_NAME MATCHES "^Linux") - set(SYSTEM.Linux 1) -endif() - -if(CMAKE_SYSTEM_PROCESSOR MATCHES "^arm") - set(PROCESSOR.arm 1) -elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^aarch64") - set(PROCESSOR.aarch64 1) -elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^x86") - set(PROCESSOR.x86 1) -endif() - -# The Compiler ID -if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") - set(COMPILER_NAME "CLANG") -elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") - set(COMPILER_NAME "GCC") -elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel") - # using Intel C++ -elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") - set(COMPILER_NAME "MSVC") -endif() diff --git a/tools/converter/source/include/PostConverter.hpp b/tools/converter/include/PostConverter.hpp similarity index 95% rename from tools/converter/source/include/PostConverter.hpp rename to tools/converter/include/PostConverter.hpp index 7fb5b2523..eb972563f 100644 --- a/tools/converter/source/include/PostConverter.hpp +++ b/tools/converter/include/PostConverter.hpp @@ -24,6 +24,6 @@ /** *@brief optimize MNN net */ -std::unique_ptr optimizeNet(std::unique_ptr& netT); +std::unique_ptr optimizeNet(std::unique_ptr& netT, bool forTraining); #endif // OPTIMIZER_HPP diff --git a/tools/converter/source/include/addBizCode.hpp b/tools/converter/include/addBizCode.hpp similarity index 100% rename from tools/converter/source/include/addBizCode.hpp rename to tools/converter/include/addBizCode.hpp diff --git a/tools/converter/source/include/caffeConverter.hpp b/tools/converter/include/caffeConverter.hpp similarity index 100% rename from tools/converter/source/include/caffeConverter.hpp rename to tools/converter/include/caffeConverter.hpp diff --git a/tools/converter/source/include/liteConverter.hpp b/tools/converter/include/liteConverter.hpp similarity index 100% rename from tools/converter/source/include/liteConverter.hpp rename to tools/converter/include/liteConverter.hpp diff --git a/tools/converter/source/include/logkit.h b/tools/converter/include/logkit.h similarity index 100% rename from tools/converter/source/include/logkit.h rename to tools/converter/include/logkit.h diff --git a/tools/converter/source/include/onnxConverter.hpp b/tools/converter/include/onnxConverter.hpp similarity index 100% rename from tools/converter/source/include/onnxConverter.hpp rename to tools/converter/include/onnxConverter.hpp diff --git a/tools/converter/source/include/tensorflowConverter.hpp b/tools/converter/include/tensorflowConverter.hpp similarity index 100% rename from tools/converter/source/include/tensorflowConverter.hpp rename to tools/converter/include/tensorflowConverter.hpp diff --git a/tools/converter/source/include/writeFb.hpp b/tools/converter/include/writeFb.hpp similarity index 100% rename from tools/converter/source/include/writeFb.hpp rename to tools/converter/include/writeFb.hpp diff --git a/tools/converter/source/MNN/CMakeLists.txt b/tools/converter/source/MNN/CMakeLists.txt index d34a570f1..f0ce289ea 100644 --- a/tools/converter/source/MNN/CMakeLists.txt +++ b/tools/converter/source/MNN/CMakeLists.txt @@ -1,14 +1,4 @@ - -set(CMAKE_C_STANDARD 99) -set(CMAKE_CXX_STANDARD 11) - -include_directories(${SRC_PATH}/IR) -include_directories(${SRC_PATH}/include) - -file(GLOB MNN_SRC ${SRC_PATH}/MNN/*) - -if(MNN_BUILD_SHARED_LIBS) - add_library(mnn_bizcode SHARED ${MNN_SRC}) -else() - add_library(mnn_bizcode STATIC ${MNN_SRC}) -endif() +file(GLOB MNN_SRC ${CMAKE_CURRENT_LIST_DIR}/*.cpp) +add_library(MNNConverterMNN OBJECT ${MNN_SRC}) +list(APPEND MNN_CONVERTER_BACKENDS_OBJECTS $) +list(APPEND MNN_CONVERTER_BACKENDS_TARGETS MNNConverterMNN) diff --git a/tools/converter/source/MNNConverter.cpp b/tools/converter/source/MNNConverter.cpp index 16051316e..0ea954a42 100644 --- a/tools/converter/source/MNNConverter.cpp +++ b/tools/converter/source/MNNConverter.cpp @@ -21,33 +21,38 @@ int main(int argc, char *argv[]) { modelConfig modelPath; // parser command line arg - Cli::initializeMNNConvertArgs(modelPath, argc, argv); - Cli::printProjectBanner(); - - std::cout << "Start to Convert Other Model Format To MNN Model..." << std::endl; - std::unique_ptr netT = std::unique_ptr(new MNN::NetT()); - if (modelPath.model == modelConfig::CAFFE) { - caffe2MNNNet(modelPath.prototxtFile, modelPath.modelFile, modelPath.bizCode, netT); - } else if (modelPath.model == modelConfig::TENSORFLOW) { - tensorflow2MNNNet(modelPath.modelFile, modelPath.bizCode, netT); - } else if (modelPath.model == modelConfig::MNN) { - addBizCode(modelPath.modelFile, modelPath.bizCode, netT); - } else if (modelPath.model == modelConfig::ONNX) { - onnx2MNNNet(modelPath.modelFile, modelPath.bizCode, netT); - } else if (modelPath.model == modelConfig::TFLITE) { - tflite2MNNNet(modelPath.modelFile, modelPath.bizCode, netT); - } else { - std::cout << "Not Support Model Type" << std::endl; - } - - if (modelPath.model != modelConfig::MNN) { - std::cout << "Start to Optimize the MNN Net..." << std::endl; - std::unique_ptr newNet = optimizeNet(netT); - writeFb(newNet, modelPath.MNNModel, modelPath.benchmarkModel, modelPath.saveHalfFloat); - } else { - writeFb(netT, modelPath.MNNModel, modelPath.benchmarkModel, modelPath.saveHalfFloat); + try { + Cli::initializeMNNConvertArgs(modelPath, argc, argv); + Cli::printProjectBanner(); + + std::cout << "Start to Convert Other Model Format To MNN Model..." << std::endl; + std::unique_ptr netT = std::unique_ptr(new MNN::NetT()); + if (modelPath.model == modelConfig::CAFFE) { + caffe2MNNNet(modelPath.prototxtFile, modelPath.modelFile, modelPath.bizCode, netT); + } else if (modelPath.model == modelConfig::TENSORFLOW) { + tensorflow2MNNNet(modelPath.modelFile, modelPath.bizCode, netT); + } else if (modelPath.model == modelConfig::MNN) { + addBizCode(modelPath.modelFile, modelPath.bizCode, netT); + } else if (modelPath.model == modelConfig::ONNX) { + onnx2MNNNet(modelPath.modelFile, modelPath.bizCode, netT); + } else if (modelPath.model == modelConfig::TFLITE) { + tflite2MNNNet(modelPath.modelFile, modelPath.bizCode, netT); + } else { + std::cout << "Not Support Model Type" << std::endl; + } + + if (modelPath.model != modelConfig::MNN) { + std::cout << "Start to Optimize the MNN Net..." << std::endl; + std::unique_ptr newNet = optimizeNet(netT, modelPath.forTraining); + writeFb(newNet, modelPath.MNNModel, modelPath.benchmarkModel, modelPath.saveHalfFloat); + } else { + writeFb(netT, modelPath.MNNModel, modelPath.benchmarkModel, modelPath.saveHalfFloat); + } + } catch (const cxxopts::OptionException &e) { + std::cerr << "Error while parsing options! " << std::endl; + std::cerr << e.what() << std::endl; + exit(EXIT_FAILURE); } - std::cout << "Converted Done!" << std::endl; return 0; diff --git a/tools/converter/source/MNNDump2Json.cpp b/tools/converter/source/MNNDump2Json.cpp index e15041176..cfb639adf 100644 --- a/tools/converter/source/MNNDump2Json.cpp +++ b/tools/converter/source/MNNDump2Json.cpp @@ -58,9 +58,15 @@ int main(int argc, const char** argv) { extra->info.clear(); } else if(type == MNN::OpParameter::OpParameter_LSTM){ auto param = opParam->main.AsLSTM(); - param->weightH->float32s.clear(); - param->weightI->float32s.clear(); - param->bias->float32s.clear(); + if (param->weightH) { + param->weightH->float32s.clear(); + } + if (param->weightI) { + param->weightI->float32s.clear(); + } + if (param->bias) { + param->bias->float32s.clear(); + } } } flatbuffers::FlatBufferBuilder newBuilder(1024); diff --git a/tools/converter/source/caffe/BNLL.cpp b/tools/converter/source/caffe/BNLL.cpp new file mode 100644 index 000000000..fedb05df0 --- /dev/null +++ b/tools/converter/source/caffe/BNLL.cpp @@ -0,0 +1,36 @@ +// +// BNLL.cpp +// MNNConverter +// +// Created by MNN on 2019/12/06. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "OpConverter.hpp" + +class BNLL : public OpConverter { +public: + virtual void run(MNN::OpT* dstOp, const caffe::LayerParameter& parameters, const caffe::LayerParameter& weight); + BNLL() { + } + virtual ~BNLL() { + } + virtual MNN::OpType opType() { + return MNN::OpType_UnaryOp; + } + virtual MNN::OpParameter type() { + return MNN::OpParameter_UnaryOp; + } +}; + +void BNLL::run(MNN::OpT* dstOp, const caffe::LayerParameter& parameters, const caffe::LayerParameter& weight) { + auto parameter = new MNN::UnaryOpT; + + parameter->T = MNN::DataType_DT_FLOAT; + + parameter->opType = MNN::UnaryOpOperation_BNLL; + + dstOp->main.value = parameter; +} + +static OpConverterRegister ____a("BNLL"); diff --git a/tools/converter/source/caffe/CMakeLists.txt b/tools/converter/source/caffe/CMakeLists.txt index 5d5a92935..91d06ce77 100644 --- a/tools/converter/source/caffe/CMakeLists.txt +++ b/tools/converter/source/caffe/CMakeLists.txt @@ -1,35 +1,10 @@ - -set(CMAKE_C_STANDARD 99) -set(CMAKE_CXX_STANDARD 11) - -if (MSVC OR WIN32) - set(Protobuf_SRC_ROOT_FOLDER $ENV{Protobuf_SRC_ROOT_FOLDER}) -endif() -find_package(Protobuf REQUIRED) -if (${CMAKE_VERSION} VERSION_LESS "3.6.0") - set(Protobuf_LIBRARIES ${PROTOBUF_LIBRARIES}) - set(Protobuf_INCLUDE_DIRS ${PROTOBUF_INCLUDE_DIRS}) -endif() - protobuf_generate_cpp(CAFFE_PROTO_SRCS CAFFE_PROTO_HDRS - caffe.proto + ${CMAKE_CURRENT_LIST_DIR}/caffe.proto ) - -file(GLOB CAFFE_SRC ${SRC_PATH}/caffe/*) -if(MNN_BUILD_SHARED_LIBS) - add_library(caffe SHARED ${CAFFE_SRC} ${CAFFE_PROTO_SRCS}) -else() - add_library(caffe STATIC ${CAFFE_SRC} ${CAFFE_PROTO_SRCS}) -endif() - -if (MSVC OR WIN32) - target_compile_options(caffe PRIVATE "/wd4267" "/wd4244" "/wd4305") -endif() - -target_include_directories(caffe PRIVATE - ${Protobuf_INCLUDE_DIRS} +file(GLOB CAFFE_SRC ${CMAKE_CURRENT_LIST_DIR}/*.cpp) +add_library(MNNConverterCaffe OBJECT ${CAFFE_SRC} ${CAFFE_PROTO_SRCS}) +target_include_directories(MNNConverterCaffe PRIVATE ${CMAKE_CURRENT_BINARY_DIR} - ${SRC_PATH}/include ) - -target_link_libraries(caffe ${Protobuf_LIBRARIES}) +list(APPEND MNN_CONVERTER_BACKENDS_OBJECTS $) +list(APPEND MNN_CONVERTER_BACKENDS_TARGETS MNNConverterCaffe) diff --git a/tools/converter/source/caffe/Convolution.cpp b/tools/converter/source/caffe/Convolution.cpp index 57b83f772..a1236c1b2 100644 --- a/tools/converter/source/caffe/Convolution.cpp +++ b/tools/converter/source/caffe/Convolution.cpp @@ -27,24 +27,13 @@ class ConvolutionCommon : public OpConverter { common->outputCount = p.num_output(); auto& weightBlob = weight.blobs(0); - const auto& layerType = parameters.type(); - // get weight information from weight Blob shape(caffe proto v2) if (weightBlob.has_shape()) { + // get weight information from weight Blob shape(caffe proto v2) DCHECK(weightBlob.shape().dim_size() == 4) << "Conv Weight Dimension ERROR!"; - if (layerType == "Deconvolution") { - common->inputCount = weightBlob.shape().dim(0) * common->group; - } else { - common->inputCount = weightBlob.shape().dim(1) * common->group; - } + common->inputCount = weightBlob.shape().dim(0) * weightBlob.shape().dim(1) / p.num_output() * common->group; } else { // get shape information from Blob parameters(caffe proto v1) - if (layerType == "Deconvolution") { - DCHECK(weightBlob.has_num()) << "Caffemodel ERROR!"; - common->inputCount = weightBlob.num() * common->group; - } else { - DCHECK(weightBlob.has_channels()) << "Caffemodel ERROR!"; - common->inputCount = weightBlob.channels() * common->group; - } + common->inputCount = weightBlob.num() * weightBlob.channels() / p.num_output() * common->group; } // kernelsize int kernelSize[3]; diff --git a/tools/converter/source/caffe/OpConverter.cpp b/tools/converter/source/caffe/OpConverter.cpp index e4050fcd9..2bf36a3c9 100644 --- a/tools/converter/source/caffe/OpConverter.cpp +++ b/tools/converter/source/caffe/OpConverter.cpp @@ -7,7 +7,11 @@ // #include "OpConverter.hpp" +#include "Tensor_generated.h" +#include #include +#include +#include OpConverterSuit* OpConverterSuit::global = nullptr; class DefaultCaffeOpConverter : public OpConverter { @@ -16,6 +20,199 @@ class DefaultCaffeOpConverter : public OpConverter { dstOp->main.value = new MNN::ExtraT; dstOp->main.AsExtra()->engine = "Caffe"; dstOp->main.AsExtra()->type = parameters.type(); + + if (parameters.type() == "Power") { + std::unique_ptr attr1(new MNN::AttributeT); + attr1->key = "scale"; + attr1->f = parameters.power_param().scale(); + dstOp->main.AsExtra()->attr.emplace_back(std::move(attr1)); + + std::unique_ptr attr2(new MNN::AttributeT); + attr2->key = "shift"; + attr2->f = parameters.power_param().shift(); + dstOp->main.AsExtra()->attr.emplace_back(std::move(attr2)); + + std::unique_ptr attr3(new MNN::AttributeT); + attr3->key = "power"; + attr3->f = parameters.power_param().power(); + dstOp->main.AsExtra()->attr.emplace_back(std::move(attr3)); + } + + if (parameters.type() == "Exp") { + std::unique_ptr attr1(new MNN::AttributeT); + attr1->key = "base"; + attr1->f = parameters.exp_param().base(); + dstOp->main.AsExtra()->attr.emplace_back(std::move(attr1)); + + std::unique_ptr attr2(new MNN::AttributeT); + attr2->key = "scale"; + attr2->f = parameters.exp_param().scale(); + dstOp->main.AsExtra()->attr.emplace_back(std::move(attr2)); + + std::unique_ptr attr3(new MNN::AttributeT); + attr3->key = "shift"; + attr3->f = parameters.exp_param().shift(); + dstOp->main.AsExtra()->attr.emplace_back(std::move(attr3)); + } + + if (parameters.type() == "Log") { + std::unique_ptr attr1(new MNN::AttributeT); + attr1->key = "base"; + attr1->f = parameters.log_param().base(); + dstOp->main.AsExtra()->attr.emplace_back(std::move(attr1)); + + std::unique_ptr attr2(new MNN::AttributeT); + attr2->key = "scale"; + attr2->f = parameters.log_param().scale(); + dstOp->main.AsExtra()->attr.emplace_back(std::move(attr2)); + + std::unique_ptr attr3(new MNN::AttributeT); + attr3->key = "shift"; + attr3->f = parameters.log_param().shift(); + dstOp->main.AsExtra()->attr.emplace_back(std::move(attr3)); + } + + if (parameters.type() == "Clip") { + std::unique_ptr attr1(new MNN::AttributeT); + attr1->key = "min"; + attr1->f = parameters.clip_param().min(); + dstOp->main.AsExtra()->attr.emplace_back(std::move(attr1)); + + std::unique_ptr attr2(new MNN::AttributeT); + attr2->key = "max"; + attr2->f = parameters.clip_param().max(); + dstOp->main.AsExtra()->attr.emplace_back(std::move(attr2)); + } + + if (parameters.type() == "MVN") { + std::unique_ptr attr1(new MNN::AttributeT); + attr1->key = "across_channels"; + attr1->b = parameters.mvn_param().across_channels(); + dstOp->main.AsExtra()->attr.emplace_back(std::move(attr1)); + + std::unique_ptr attr2(new MNN::AttributeT); + attr2->key = "eps"; + attr2->f = parameters.mvn_param().eps(); + dstOp->main.AsExtra()->attr.emplace_back(std::move(attr2)); + + std::unique_ptr attr3(new MNN::AttributeT); + attr3->key = "normalize_variance"; + attr3->b = parameters.mvn_param().normalize_variance(); + dstOp->main.AsExtra()->attr.emplace_back(std::move(attr3)); + } + + if (parameters.type() == "Bias") { + std::unique_ptr attr1(new MNN::AttributeT); + attr1->key = "axis"; + attr1->i = parameters.bias_param().axis(); + dstOp->main.AsExtra()->attr.emplace_back(std::move(attr1)); + + std::unique_ptr attr2(new MNN::AttributeT); + attr2->key = "num_axes"; + attr2->i = parameters.bias_param().num_axes(); + dstOp->main.AsExtra()->attr.emplace_back(std::move(attr2)); + + if (parameters.blobs_size() != 0) { + MNN_ASSERT(parameters.blobs_size() == 1); + std::unique_ptr attr3(new MNN::AttributeT); + attr3->key = "bias"; + auto shapeSize = parameters.blobs(0).shape().dim_size(); + std::vector biasShape; + int biasSize = 1; + for (int i = 0; i < shapeSize; i++) { + biasShape.emplace_back(parameters.blobs(0).shape().dim(i)); + biasSize *= biasShape[i]; + } + attr3->tensor->dims = biasShape; + attr3->tensor->dataFormat = MNN::MNN_DATA_FORMAT::MNN_DATA_FORMAT_NCHW; + attr3->tensor->float32s.clear(); + for (int i = 0; i < biasSize; i++) { + attr3->tensor->float32s.emplace_back(parameters.blobs(0).data(i)); + } + attr3->i = biasSize; + dstOp->main.AsExtra()->attr.emplace_back(std::move(attr3)); + } + } + + if (parameters.type() == "Embed") { + std::unique_ptr attr1(new MNN::AttributeT); + attr1->key = "num_output"; + attr1->i = parameters.embed_param().num_output(); + dstOp->main.AsExtra()->attr.emplace_back(std::move(attr1)); + + std::unique_ptr attr2(new MNN::AttributeT); + attr2->key = "input_dim"; + attr2->i = parameters.embed_param().input_dim(); + dstOp->main.AsExtra()->attr.emplace_back(std::move(attr2)); + + std::unique_ptr attr3(new MNN::AttributeT); + attr3->key = "bias_term"; + attr3->b = parameters.embed_param().bias_term(); + dstOp->main.AsExtra()->attr.emplace_back(std::move(attr3)); + + std::unique_ptr attr4(new MNN::AttributeT); + attr4->key = "weights"; + auto shapeSize = parameters.blobs(0).shape().dim_size(); + std::vector weightsShape; + int weightsSize = 1; + for (int i = 0; i < shapeSize; i++) { + weightsShape.emplace_back(parameters.blobs(0).shape().dim(i)); + weightsSize *= weightsShape[i]; + } + attr4->tensor->dims = weightsShape; + attr4->tensor->dataFormat = MNN::MNN_DATA_FORMAT::MNN_DATA_FORMAT_NCHW; + attr4->tensor->float32s.clear(); + for (int i = 0; i < weightsSize; i++) { + attr4->tensor->float32s.emplace_back(parameters.blobs(0).data(i)); + } + attr4->i = weightsSize; + dstOp->main.AsExtra()->attr.emplace_back(std::move(attr4)); + + if (parameters.embed_param().bias_term()) { + std::unique_ptr attr5(new MNN::AttributeT); + attr5->key = "bias"; + auto shapeSize = parameters.blobs(0).shape().dim_size(); + std::vector biasShape; + int biasSize = 1; + for (int i = 0; i < shapeSize; i++) { + biasShape.emplace_back(parameters.blobs(0).shape().dim(i)); + biasSize *= biasShape[i]; + } + attr5->tensor->dims = biasShape; + attr5->tensor->dataFormat = MNN::MNN_DATA_FORMAT::MNN_DATA_FORMAT_NCHW; + attr5->tensor->float32s.clear(); + for (int i = 0; i < biasSize; i++) { + attr5->tensor->float32s.emplace_back(parameters.blobs(0).data(i)); + } + attr5->i = biasSize; + dstOp->main.AsExtra()->attr.emplace_back(std::move(attr5)); + } + } + + if (parameters.type() == "Reduction") { + std::string opType; + if (parameters.reduction_param().operation() == caffe::ReductionParameter_ReductionOp_SUM) { + opType = "SUM"; + } + if (parameters.reduction_param().operation() == caffe::ReductionParameter_ReductionOp_MEAN) { + opType = "MEAN"; + } + if (parameters.reduction_param().operation() == caffe::ReductionParameter_ReductionOp_ASUM) { + opType = "ASUM"; + } + if (parameters.reduction_param().operation() == caffe::ReductionParameter_ReductionOp_SUMSQ) { + opType = "SUMSQ"; + } + + std::unique_ptr attr1(new MNN::AttributeT); + attr1->key = opType; + auto reductionDim = parameters.reduction_param().axis(); + if (reductionDim < 0) { + reductionDim += 4; // only support at most 4 dimensions + } + attr1->i = reductionDim; + dstOp->main.AsExtra()->attr.emplace_back(std::move(attr1)); + } } virtual MNN::OpParameter type() override { return MNN::OpParameter_Extra; diff --git a/tools/converter/source/caffe/Threshold.cpp b/tools/converter/source/caffe/Threshold.cpp new file mode 100644 index 000000000..ee6b4639a --- /dev/null +++ b/tools/converter/source/caffe/Threshold.cpp @@ -0,0 +1,33 @@ +// +// Threshold.cpp +// MNNConverter +// +// Created by MNN on 2019/12/06. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "OpConverter.hpp" + +class Threshold : public OpConverter { +public: + virtual void run(MNN::OpT* dstOp, const caffe::LayerParameter& parameters, const caffe::LayerParameter& weight); + Threshold() { + } + virtual ~Threshold() { + } + virtual MNN::OpType opType() { + return MNN::OpType_Threshold; + } + virtual MNN::OpParameter type() { + return MNN::OpParameter_ELU; + } +}; + +void Threshold::run(MNN::OpT* dstOp, const caffe::LayerParameter& parameters, const caffe::LayerParameter& weight) { + auto threshold = parameters.threshold_param().threshold(); + auto parameter = new MNN::ELUT; + parameter->alpha = threshold; + dstOp->main.value = parameter; +} + +static OpConverterRegister ____a("Threshold"); diff --git a/tools/converter/source/caffe/caffe.proto b/tools/converter/source/caffe/caffe.proto index 11bdb1513..eb93bf1da 100644 --- a/tools/converter/source/caffe/caffe.proto +++ b/tools/converter/source/caffe/caffe.proto @@ -567,6 +567,13 @@ message LayerParameter { optional InterpParameter interp_param = 2210; optional ROIPoolingParameter roi_pooling_param = 2201; + optional ClipParameter clip_param = 2202; +} + +// Message that stores parameters used by ClipLayer +message ClipParameter { + required float min = 1; + required float max = 2; } // Message that stores parameters used by ROIPoolingLayer diff --git a/tools/converter/source/cli.cpp b/tools/converter/source/cli.cpp index 6b8ed6ceb..a8498fee3 100644 --- a/tools/converter/source/cli.cpp +++ b/tools/converter/source/cli.cpp @@ -22,7 +22,7 @@ */ void Cli::printProjectBanner() { // print project detail - //auto config = ProjectConfig::obtainSingletonInstance(); + // auto config = ProjectConfig::obtainSingletonInstance(); std::cout << "\nMNNConverter Version: " << ProjectConfig::version << " - MNN @ 2018\n\n" << std::endl; } @@ -32,120 +32,117 @@ cxxopts::Options Cli::initializeMNNConvertArgs(modelConfig &modelPath, int argc, options.positional_help("[optional args]").show_positional_help(); - try { - options.allow_unrecognised_options().add_options()("h, help", "Convert Other Model Format To MNN Model\n")( - "v, version", "show current version")("f, framework", "model type, ex: [TF,CAFFE,ONNX,TFLITE,MNN]", - cxxopts::value())( - "modelFile", "tensorflow Pb or caffeModel, ex: *.pb,*caffemodel", cxxopts::value())( - "prototxt", "only used for caffe, ex: *.prototxt", cxxopts::value())( - "MNNModel", "MNN model, ex: *.mnn", cxxopts::value())( - "fp16", "save Conv's weight/bias in half_float data type")( - "benchmarkModel", - "Do NOT save big size data, such as Conv's weight,BN's gamma,beta,mean and variance etc. Only used to test " - "the cost of the model")("bizCode", "MNN Model Flag, ex: MNN", cxxopts::value())( - "debug", "Enable debugging mode."); - - auto result = options.parse(argc, argv); - - if (result.count("help")) { - std::cout << options.help({""}) << std::endl; - exit(EXIT_SUCCESS); - } + options.allow_unrecognised_options().add_options()("h, help", "Convert Other Model Format To MNN Model\n")( + "v, version", "show current version")("f, framework", "model type, ex: [TF,CAFFE,ONNX,TFLITE,MNN]", + cxxopts::value())( + "modelFile", "tensorflow Pb or caffeModel, ex: *.pb,*caffemodel", cxxopts::value())( + "prototxt", "only used for caffe, ex: *.prototxt", cxxopts::value())( + "MNNModel", "MNN model, ex: *.mnn", cxxopts::value())( + "fp16", "save Conv's weight/bias in half_float data type")( + "benchmarkModel", + "Do NOT save big size data, such as Conv's weight,BN's gamma,beta,mean and variance etc. Only used to test " + "the cost of the model")("bizCode", "MNN Model Flag, ex: MNN", cxxopts::value())( + "debug", "Enable debugging mode.")( + "forTraining", "whether or not to save training ops BN and Dropout, default: false", cxxopts::value()); + + auto result = options.parse(argc, argv); + + if (result.count("help")) { + std::cout << options.help({""}) << std::endl; + exit(EXIT_SUCCESS); + } - if (result.count("version")) { - std::cout << PROJECT_VERSION << std::endl; - exit(EXIT_SUCCESS); - } + if (result.count("version")) { + std::cout << "\tVersion:" << ProjectConfig::version << std::endl; + exit(EXIT_SUCCESS); + } - modelPath.model = modelPath.MAX_SOURCE; - // model source - if (result.count("framework")) { - const std::string frameWork = result["framework"].as(); - if ("TF" == frameWork) { - modelPath.model = modelConfig::TENSORFLOW; - } else if ("CAFFE" == frameWork) { - modelPath.model = modelConfig::CAFFE; - } else if ("ONNX" == frameWork) { - modelPath.model = modelConfig::ONNX; - } else if ("MNN" == frameWork) { - modelPath.model = modelConfig::MNN; - } else if ("TFLITE" == frameWork) { - modelPath.model = modelConfig::TFLITE; - } else { - std::cout << "Framework Input ERROR or Not Support This Model Type Now!" << std::endl; - std::cout << options.help({""}) << std::endl; - exit(EXIT_FAILURE); - } + modelPath.model = modelPath.MAX_SOURCE; + // model source + if (result.count("framework")) { + const std::string frameWork = result["framework"].as(); + if ("TF" == frameWork) { + modelPath.model = modelConfig::TENSORFLOW; + } else if ("CAFFE" == frameWork) { + modelPath.model = modelConfig::CAFFE; + } else if ("ONNX" == frameWork) { + modelPath.model = modelConfig::ONNX; + } else if ("MNN" == frameWork) { + modelPath.model = modelConfig::MNN; + } else if ("TFLITE" == frameWork) { + modelPath.model = modelConfig::TFLITE; } else { + std::cout << "Framework Input ERROR or Not Support This Model Type Now!" << std::endl; std::cout << options.help({""}) << std::endl; exit(EXIT_FAILURE); } + } else { + std::cout << options.help({""}) << std::endl; + exit(EXIT_FAILURE); + } - // model file path - if (result.count("modelFile")) { - const std::string modelFile = result["modelFile"].as(); - if (CommonKit::FileIsExist(modelFile)) { - modelPath.modelFile = modelFile; - } else { - DLOG(INFO) << "Model File Does Not Exist! ==> " << modelFile; - exit(EXIT_FAILURE); - } + // model file path + if (result.count("modelFile")) { + const std::string modelFile = result["modelFile"].as(); + if (CommonKit::FileIsExist(modelFile)) { + modelPath.modelFile = modelFile; } else { - std::cout << options.help({""}) << std::endl; + DLOG(INFO) << "Model File Does Not Exist! ==> " << modelFile; exit(EXIT_FAILURE); } + } else { + std::cout << options.help({""}) << std::endl; + exit(EXIT_FAILURE); + } - // prototxt file path - if (result.count("prototxt")) { - const std::string prototxt = result["prototxt"].as(); - if (CommonKit::FileIsExist(prototxt)) { - modelPath.prototxtFile = prototxt; - } else { - DLOG(INFO) << "Model File Does Not Exist!"; - exit(EXIT_FAILURE); - } - } else { - // caffe model must have this option - if (modelPath.model == modelPath.CAFFE) { - std::cout << options.help({""}) << std::endl; - exit(EXIT_FAILURE); - } - } - - // MNN model output path - if (result.count("MNNModel")) { - const std::string MNNModelPath = result["MNNModel"].as(); - modelPath.MNNModel = MNNModelPath; + // prototxt file path + if (result.count("prototxt")) { + const std::string prototxt = result["prototxt"].as(); + if (CommonKit::FileIsExist(prototxt)) { + modelPath.prototxtFile = prototxt; } else { - std::cout << options.help({""}) << std::endl; + DLOG(INFO) << "Model File Does Not Exist!"; exit(EXIT_FAILURE); } - - // add MNN bizCode - if (result.count("bizCode")) { - const std::string bizCode = result["bizCode"].as(); - modelPath.bizCode = bizCode; - } else { + } else { + // caffe model must have this option + if (modelPath.model == modelPath.CAFFE) { std::cout << options.help({""}) << std::endl; exit(EXIT_FAILURE); } + } - // benchmarkModel - if (result.count("benchmarkModel")) { - modelPath.benchmarkModel = true; - modelPath.bizCode = "benchmark"; - } - // half float - if(result.count("fp16")){ - modelPath.saveHalfFloat = true; - } - - } catch (const cxxopts::OptionException &e) { - std::cerr << "Error while parsing options! " << std::endl; - std::cerr << e.what() << std::endl; + // MNN model output path + if (result.count("MNNModel")) { + const std::string MNNModelPath = result["MNNModel"].as(); + modelPath.MNNModel = MNNModelPath; + } else { + std::cout << options.help({""}) << std::endl; exit(EXIT_FAILURE); } + // add MNN bizCode + if (result.count("bizCode")) { + const std::string bizCode = result["bizCode"].as(); + modelPath.bizCode = bizCode; + } else { + std::cout << options.help({""}) << std::endl; + exit(EXIT_FAILURE); + } + + // benchmarkModel + if (result.count("benchmarkModel")) { + modelPath.benchmarkModel = true; + modelPath.bizCode = "benchmark"; + } + // half float + if (result.count("fp16")) { + modelPath.saveHalfFloat = true; + } + if (result.count("forTraining")) { + modelPath.forTraining = true; + } + return options; } diff --git a/tools/converter/source/config.cpp b/tools/converter/source/config.cpp index e036a4e74..e339de39d 100644 --- a/tools/converter/source/config.cpp +++ b/tools/converter/source/config.cpp @@ -7,8 +7,7 @@ // #include "config.hpp" - -const std::string ProjectConfig::version = PROJECT_VERSION; +const std::string ProjectConfig::version =MNN_VERSION; ProjectConfig *ProjectConfig::m_pConfig = nullptr; std::mutex ProjectConfig::m_mutex; diff --git a/tools/converter/source/config.hpp b/tools/converter/source/config.hpp index 08379d317..fd14ad535 100644 --- a/tools/converter/source/config.hpp +++ b/tools/converter/source/config.hpp @@ -11,8 +11,6 @@ #include #include -#include "CONFIGURECONVERT.h" - class ProjectConfig { public: static const std::string version; @@ -53,6 +51,7 @@ struct modelConfig { MODEL_SOURCE model; bool benchmarkModel; bool saveHalfFloat; + bool forTraining = false; }; #endif // CONFIG_HPP diff --git a/tools/converter/source/cxxopts.hpp b/tools/converter/source/cxxopts.hpp index 766964072..11bd88b0a 100644 --- a/tools/converter/source/cxxopts.hpp +++ b/tools/converter/source/cxxopts.hpp @@ -732,11 +732,7 @@ class OptionValue { template const T &as() const { -#ifdef CXXOPTS_NO_RTTI return static_cast &>(*m_value).get(); -#else - return dynamic_cast &>(*m_value).get(); -#endif } private: diff --git a/tools/converter/source/onnx/BinaryOpOnnx.cpp b/tools/converter/source/onnx/BinaryOpOnnx.cpp index 35a4ab2b7..b6be7d86d 100644 --- a/tools/converter/source/onnx/BinaryOpOnnx.cpp +++ b/tools/converter/source/onnx/BinaryOpOnnx.cpp @@ -26,6 +26,8 @@ void BinaryOpOnnx::run(MNN::OpT* dstOp, const onnx::NodeProto* onnxNode, {"Add", MNN::BinaryOpOperation_ADD}, {"Sum", MNN::BinaryOpOperation_ADD}, {"Sub", MNN::BinaryOpOperation_SUB}, {"Div", MNN::BinaryOpOperation_REALDIV}, {"Mul", MNN::BinaryOpOperation_MUL}, {"Pow", MNN::BinaryOpOperation_POW}, + {"Equal", MNN::BinaryOpOperation_EQUAL}, {"Less", MNN::BinaryOpOperation_LESS}, {"Greater", MNN::BinaryOpOperation_GREATER}, {"Max", MNN::BinaryOpOperation_MAXIMUM}, + {"Min", MNN::BinaryOpOperation_MINIMUM}, }; auto type = onnxNode->op_type(); @@ -40,3 +42,8 @@ REGISTER_CONVERTER(BinaryOpOnnx, Sub); REGISTER_CONVERTER(BinaryOpOnnx, Div); REGISTER_CONVERTER(BinaryOpOnnx, Mul); REGISTER_CONVERTER(BinaryOpOnnx, Pow); +REGISTER_CONVERTER(BinaryOpOnnx, Equal); +REGISTER_CONVERTER(BinaryOpOnnx, Less); +REGISTER_CONVERTER(BinaryOpOnnx, Greater); +REGISTER_CONVERTER(BinaryOpOnnx, Max); +REGISTER_CONVERTER(BinaryOpOnnx, Min); diff --git a/tools/converter/source/onnx/CMakeLists.txt b/tools/converter/source/onnx/CMakeLists.txt index f71fa91a5..8fb8382bf 100644 --- a/tools/converter/source/onnx/CMakeLists.txt +++ b/tools/converter/source/onnx/CMakeLists.txt @@ -1,38 +1,14 @@ - -set(CMAKE_C_STANDARD 99) -set(CMAKE_CXX_STANDARD 11) - -if (MSVC OR WIN32) - set(Protobuf_SRC_ROOT_FOLDER $ENV{Protobuf_SRC_ROOT_FOLDER}) -endif() -find_package(Protobuf REQUIRED) -if (${CMAKE_VERSION} VERSION_LESS "3.6.0") - set(Protobuf_LIBRARIES ${PROTOBUF_LIBRARIES}) - set(Protobuf_INCLUDE_DIRS ${PROTOBUF_INCLUDE_DIRS}) -endif() - protobuf_generate_cpp(ONNX_PROTO_SRCS ONNX_PROTO_HDRS - onnx.proto - onnx-operators.proto + ${CMAKE_CURRENT_LIST_DIR}/onnx.proto + ${CMAKE_CURRENT_LIST_DIR}/onnx-operators.proto # onnx-ml.proto # onnx-operators-ml.proto ) -file(GLOB ONNX_SRC ${SRC_PATH}/onnx/*) - -if(MNN_BUILD_SHARED_LIBS) - add_library(onnx SHARED ${ONNX_SRC} ${ONNX_PROTO_SRCS}) -else() - add_library(onnx STATIC ${ONNX_SRC} ${ONNX_PROTO_SRCS}) -endif() -if (MSVC OR WIN32) - target_compile_options(onnx PRIVATE "/wd4267" "/wd4244" "/wd4305") -endif() - -target_include_directories(onnx PRIVATE - ${SRC_PATH}/include - ${Protobuf_INCLUDE_DIRS} +file(GLOB ONNX_SRC ${CMAKE_CURRENT_LIST_DIR}/*.cpp) +add_library(MNNConverterONNX OBJECT ${ONNX_SRC} ${ONNX_PROTO_SRCS}) +target_include_directories(MNNConverterONNX PRIVATE ${CMAKE_CURRENT_BINARY_DIR} ) - -target_link_libraries(onnx ${Protobuf_LIBRARIES}) +list(APPEND MNN_CONVERTER_BACKENDS_OBJECTS $) +list(APPEND MNN_CONVERTER_BACKENDS_TARGETS MNNConverterONNX) diff --git a/tools/converter/source/onnx/DepthToSpaceOnnx.cpp b/tools/converter/source/onnx/DepthToSpaceOnnx.cpp new file mode 100644 index 000000000..1fc1195cb --- /dev/null +++ b/tools/converter/source/onnx/DepthToSpaceOnnx.cpp @@ -0,0 +1,39 @@ +// +// DepthToSpaceOnnx.cpp +// MNN +// +// Created by MNN on 2019/06/28. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include "onnxOpConverter.hpp" + +DECLARE_OP_CONVERTER(DepthToSpaceOnnx); + +MNN::OpType DepthToSpaceOnnx::opType() { + return MNN::OpType_SpaceToDepth; +} + +MNN::OpParameter DepthToSpaceOnnx::type() { + return MNN::OpParameter_DepthSpaceParam; +} + +void DepthToSpaceOnnx::run(MNN::OpT* dstOp, const onnx::NodeProto* onnxNode, + std::vector initializers) { + + auto spaceToDepthParam = new MNN::DepthSpaceParamT; + + const auto attrSize = onnxNode->attribute_size(); + for (int i = 0; i < attrSize; ++i) { + const auto& attributeProto = onnxNode->attribute(i); + const auto& attributeName = attributeProto.name(); + if (attributeName == "blocksize") { + spaceToDepthParam->blockSize = (int)attributeProto.i(); + } + } + + dstOp->main.value = spaceToDepthParam; +} + +REGISTER_CONVERTER(DepthToSpaceOnnx, DepthToSpace); diff --git a/tools/converter/source/onnx/ExpandOnnx.cpp b/tools/converter/source/onnx/ExpandOnnx.cpp new file mode 100644 index 000000000..d0dd23c9f --- /dev/null +++ b/tools/converter/source/onnx/ExpandOnnx.cpp @@ -0,0 +1,27 @@ +// +// ExpandOnnx.cpp +// MNNConverter +// +// Created by MNN on 2019/12/12. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "onnxOpConverter.hpp" + +DECLARE_OP_CONVERTER(ExpandOnnx); + +MNN::OpType ExpandOnnx::opType(){ + return MNN::OpType_BroadcastTo; +} + +MNN::OpParameter ExpandOnnx::type(){ + return MNN::OpParameter_NONE; +} + +void ExpandOnnx::run(MNN::OpT *dstOp, const onnx::NodeProto *onnxNode, std::vector initializers){ + + DCHECK(2 == onnxNode->input_size()) << "ONNX Expand should have 2 inputs!"; + return; +} + +REGISTER_CONVERTER(ExpandOnnx, Expand); diff --git a/tools/converter/source/onnx/MatMulOnnx.cpp b/tools/converter/source/onnx/MatMulOnnx.cpp index 2bef9017b..dcc98d2cb 100644 --- a/tools/converter/source/onnx/MatMulOnnx.cpp +++ b/tools/converter/source/onnx/MatMulOnnx.cpp @@ -9,19 +9,19 @@ #include #include "onnxOpConverter.hpp" -DECLARE_OP_CONVERTER(MatMulOnnx); +DECLARE_OP_CONVERTER(GemmOnnx); -MNN::OpType MatMulOnnx::opType() { +MNN::OpType GemmOnnx::opType() { return MNN::OpType_InnerProduct; } -MNN::OpParameter MatMulOnnx::type() { +MNN::OpParameter GemmOnnx::type() { return MNN::OpParameter_InnerProduct; } -void MatMulOnnx::run(MNN::OpT* dstOp, const onnx::NodeProto* onnxNode, +void GemmOnnx::run(MNN::OpT* dstOp, const onnx::NodeProto* onnxNode, std::vector initializers) { const int size = initializers.size(); - DCHECK(size <= 2 && size >= 1) << "Gemm/MatMul Input ERROR!"; + DCHECK(size <= 2 && size >= 1) << "Gemm Input ERROR!"; auto gemmParam = new MNN::InnerProductT; bool transA = false; @@ -131,4 +131,26 @@ void MatMulOnnx::run(MNN::OpT* dstOp, const onnx::NodeProto* onnxNode, dstOp->main.value = gemmParam; } -REGISTER_CONVERTER(MatMulOnnx, Gemm); +REGISTER_CONVERTER(GemmOnnx, Gemm); + + +DECLARE_OP_CONVERTER(MatMulOnnx); + +MNN::OpType MatMulOnnx::opType(){ + return MNN::OpType_MatMul; +} + +MNN::OpParameter MatMulOnnx::type(){ + return MNN::OpParameter_MatMul; +} + +void MatMulOnnx::run(MNN::OpT *dstOp, const onnx::NodeProto *onnxNode, std::vector initializers){ + + CHECK(2 == onnxNode->input_size()) << "ONNX Matmul input error!"; + auto param = new MNN::MatMulT; + param->T = MNN::DataType_DT_FLOAT; + + dstOp->main.value = param; +} + +REGISTER_CONVERTER(MatMulOnnx, MatMul); diff --git a/tools/converter/source/onnx/NonMaxSuppression.cpp b/tools/converter/source/onnx/NonMaxSuppression.cpp new file mode 100644 index 000000000..d66574c20 --- /dev/null +++ b/tools/converter/source/onnx/NonMaxSuppression.cpp @@ -0,0 +1,27 @@ +// +// NonMaxSuppression.cpp +// MNN +// +// Created by MNN on 2019/07/23. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include "onnxOpConverter.hpp" + +DECLARE_OP_CONVERTER(NonMaxSuppressionOnnx); + + +MNN::OpType NonMaxSuppressionOnnx::opType() { + return MNN::OpType_NonMaxSuppressionV2; +} +MNN::OpParameter NonMaxSuppressionOnnx::type() { + return MNN::OpParameter_NonMaxSuppressionV2; +} + +void NonMaxSuppressionOnnx::run(MNN::OpT* dstOp, const onnx::NodeProto* onnxNode, + std::vector initializers) { + dstOp->main.value = nullptr; +} + +REGISTER_CONVERTER(NonMaxSuppressionOnnx, NonMaxSuppression); diff --git a/tools/converter/source/onnx/PadOnnx.cpp b/tools/converter/source/onnx/PadOnnx.cpp index e699af550..cfdd5e1dd 100644 --- a/tools/converter/source/onnx/PadOnnx.cpp +++ b/tools/converter/source/onnx/PadOnnx.cpp @@ -27,10 +27,12 @@ void PadOnnx::run(MNN::OpT* dstOp, const onnx::NodeProto* onnxNode, const auto& attributeProto = onnxNode->attribute(i); const auto& attributeName = attributeProto.name(); if (attributeName == "pads") { - para->int32s.resize(attributeProto.ints_size()); - para->dims = {(int)para->int32s.size() / 2, 2}; - for (int i = 0; i < para->int32s.size(); ++i) { - para->int32s[i] = attributeProto.ints(i); + const int size = attributeProto.ints_size(); + para->int32s.resize(size); + para->dims = {size}; + for (int i = 0; i < size / 2; ++i) { + para->int32s[i * 2] = attributeProto.ints(i); + para->int32s[i * 2 + 1] = attributeProto.ints(i + size / 2); } } } diff --git a/tools/converter/source/onnx/PoolingOnnx.cpp b/tools/converter/source/onnx/PoolingOnnx.cpp index 7b25220d1..6cdcfac11 100644 --- a/tools/converter/source/onnx/PoolingOnnx.cpp +++ b/tools/converter/source/onnx/PoolingOnnx.cpp @@ -17,8 +17,56 @@ MNN::OpParameter PoolingOnnx::type() { return MNN::OpParameter_Pool; } +static int poolSpatialDim(const onnx::NodeProto* onnxNode) { + for (int i = 0; i < onnxNode->attribute_size(); ++i) { + const auto& attributeProto = onnxNode->attribute(i); + if (attributeProto.name() != "kernel_shape") { + continue; + } + return attributeProto.ints_size(); + } + return -1; +} + +static void runPooling3D(MNN::OpT* dstOp, const onnx::NodeProto* onnxNode, + std::vector initializers) { + std::unique_ptr pool(new MNN::Pool3DT); + const auto& type = onnxNode->op_type(); + if (type == "MaxPool") { + pool->type = MNN::PoolType_MAXPOOL; + } else if (type == "AveragePool") { + pool->type = MNN::PoolType_AVEPOOL; + } else { + DLOG(ERROR) << "TODO ==> " << type; + } + pool->padType = MNN::PoolPadType_CAFFE; + for (int i = 0; i < onnxNode->attribute_size(); ++i) { + const auto& attributeProto = onnxNode->attribute(i); + const auto& attributeName = attributeProto.name(); + auto vec = std::vector({ + static_cast(attributeProto.ints(0)), + static_cast(attributeProto.ints(1)), + static_cast(attributeProto.ints(2)) + }); + if (attributeName == "kernel_shape") { + pool->kernels = vec; + } else if (attributeName == "strides") { + pool->strides = vec; + } else if (attributeName == "pads") { + pool->pads = vec; + } + } + dstOp->type = MNN::OpType_Pooling3D; + dstOp->main.type = MNN::OpParameter_Pool3D; + dstOp->main.value = pool.release(); +} + void PoolingOnnx::run(MNN::OpT* dstOp, const onnx::NodeProto* onnxNode, std::vector initializers) { + if (poolSpatialDim(onnxNode) == 3) { + runPooling3D(dstOp, onnxNode, initializers); + return; + } auto poolParam = new MNN::PoolT; int kw = 1; int kh = 1; diff --git a/tools/converter/source/onnx/ReduceOnnx.cpp b/tools/converter/source/onnx/ReduceOnnx.cpp index 27dd80c7c..7e6ef0fd0 100644 --- a/tools/converter/source/onnx/ReduceOnnx.cpp +++ b/tools/converter/source/onnx/ReduceOnnx.cpp @@ -43,6 +43,16 @@ void ReduceOnnx::run(MNN::OpT *dstOp, const onnx::NodeProto *onnxNode, auto type = onnxNode->op_type(); if (type == "ReduceMean") { param->operation = MNN::ReductionType_MEAN; + } else if (type == "ReduceMax") { + param->operation = MNN::ReductionType_MAX; + } else if (type == "ReduceMin") { + param->operation = MNN::ReductionType_MIN; + } else if (type == "ReduceProd") { + param->operation = MNN::ReductionType_PROD; + } else if (type == "ReduceSum") { + param->operation = MNN::ReductionType_SUM; + } else if (type == "ReduceSumSquare") { + param->operation = MNN::ReductionType_SUMSQ; } else { DLOG(ERROR) << "TODO ==> " << type; } @@ -54,3 +64,8 @@ void ReduceOnnx::run(MNN::OpT *dstOp, const onnx::NodeProto *onnxNode, } REGISTER_CONVERTER(ReduceOnnx, ReduceMean); +REGISTER_CONVERTER(ReduceOnnx, ReduceMax); +REGISTER_CONVERTER(ReduceOnnx, ReduceMin); +REGISTER_CONVERTER(ReduceOnnx, ReduceProd); +REGISTER_CONVERTER(ReduceOnnx, ReduceSum); +REGISTER_CONVERTER(ReduceOnnx, ReduceSumSquare); diff --git a/tools/converter/source/onnx/ScatterNDOnnx.cpp b/tools/converter/source/onnx/ScatterNDOnnx.cpp new file mode 100644 index 000000000..acee95d7b --- /dev/null +++ b/tools/converter/source/onnx/ScatterNDOnnx.cpp @@ -0,0 +1,27 @@ +// +// ScatterNDOnnx.cpp +// MNN +// +// Created by MNN on 2019/06/28. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include "onnxOpConverter.hpp" + +DECLARE_OP_CONVERTER(ScatterNDOnnx); + +MNN::OpType ScatterNDOnnx::opType() { + return MNN::OpType_ScatterNd; +} +MNN::OpParameter ScatterNDOnnx::type() { + return MNN::OpParameter_NONE; +} + +void ScatterNDOnnx::run(MNN::OpT* dstOp, const onnx::NodeProto* onnxNode, + std::vector initializers) { + dstOp->main.value = nullptr; +} + + +REGISTER_CONVERTER(ScatterNDOnnx, ScatterND); diff --git a/tools/converter/source/onnx/SpaceToDepthOnnx.cpp b/tools/converter/source/onnx/SpaceToDepthOnnx.cpp new file mode 100644 index 000000000..98dc191aa --- /dev/null +++ b/tools/converter/source/onnx/SpaceToDepthOnnx.cpp @@ -0,0 +1,36 @@ +// +// Created by MNN on 2019/06/28. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include "onnxOpConverter.hpp" + +DECLARE_OP_CONVERTER(SpaceToDepthOnnx); + +MNN::OpType SpaceToDepthOnnx::opType() { + return MNN::OpType_SpaceToDepth; +} + +MNN::OpParameter SpaceToDepthOnnx::type() { + return MNN::OpParameter_DepthSpaceParam; +} + +void SpaceToDepthOnnx::run(MNN::OpT* dstOp, const onnx::NodeProto* onnxNode, +std::vector initializers) { + + auto spaceToDepthParam = new MNN::DepthSpaceParamT; + + const auto attrSize = onnxNode->attribute_size(); + for (int i = 0; i < attrSize; ++i) { + const auto& attributeProto = onnxNode->attribute(i); + const auto& attributeName = attributeProto.name(); + if (attributeName == "blocksize") { + spaceToDepthParam->blockSize = (int)attributeProto.i(); + } + } + + dstOp->main.value = spaceToDepthParam; +} + +REGISTER_CONVERTER(SpaceToDepthOnnx, SpaceToDepth); diff --git a/tools/converter/source/onnx/TanhOnnx.cpp b/tools/converter/source/onnx/TanhOnnx.cpp new file mode 100644 index 000000000..a3bf8d3ff --- /dev/null +++ b/tools/converter/source/onnx/TanhOnnx.cpp @@ -0,0 +1,26 @@ +// +// TanhOnnx.cpp +// MNN +// +// Created by MNN on 2019/06/28. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include "onnxOpConverter.hpp" + +DECLARE_OP_CONVERTER(TanhOnnx); + +MNN::OpType TanhOnnx::opType() { + return MNN::OpType_TanH; +} +MNN::OpParameter TanhOnnx::type() { + return MNN::OpParameter_NONE; +} + +void TanhOnnx::run(MNN::OpT* dstOp, const onnx::NodeProto* onnxNode, + std::vector initializers) { + dstOp->main.value = nullptr; +} + +REGISTER_CONVERTER(TanhOnnx, Tanh); diff --git a/tools/converter/source/onnx/UnaryOnnx.cpp b/tools/converter/source/onnx/UnaryOnnx.cpp index d3e8e3b92..8af52f344 100644 --- a/tools/converter/source/onnx/UnaryOnnx.cpp +++ b/tools/converter/source/onnx/UnaryOnnx.cpp @@ -31,8 +31,30 @@ void UnaryOnnx::run(MNN::OpT *dstOp, const onnx::NodeProto *onnxNode, } TO_UNARY_OP("Floor", MNN::UnaryOpOperation_FLOOR); + TO_UNARY_OP("Neg", MNN::UnaryOpOperation_NEG); + TO_UNARY_OP("Abs", MNN::UnaryOpOperation_ABS); + TO_UNARY_OP("Exp", MNN::UnaryOpOperation_EXP); + TO_UNARY_OP("Cos", MNN::UnaryOpOperation_COS); + TO_UNARY_OP("Sin", MNN::UnaryOpOperation_SIN); + TO_UNARY_OP("Sqrt", MNN::UnaryOpOperation_SQRT); + TO_UNARY_OP("Ceil", MNN::UnaryOpOperation_CEIL); + TO_UNARY_OP("Log", MNN::UnaryOpOperation_LOG); + TO_UNARY_OP("Tan", MNN::UnaryOpOperation_TAN); + TO_UNARY_OP("ATan", MNN::UnaryOpOperation_ATAN); + TO_UNARY_OP("Asin", MNN::UnaryOpOperation_ASIN); dstOp->main.value = unaryOpParam.release(); } REGISTER_CONVERTER(UnaryOnnx, Floor); +REGISTER_CONVERTER(UnaryOnnx, Abs); +REGISTER_CONVERTER(UnaryOnnx, Neg); +REGISTER_CONVERTER(UnaryOnnx, Exp); +REGISTER_CONVERTER(UnaryOnnx, Cos); +REGISTER_CONVERTER(UnaryOnnx, Sin); +REGISTER_CONVERTER(UnaryOnnx, Sqrt); +REGISTER_CONVERTER(UnaryOnnx, Ceil); +REGISTER_CONVERTER(UnaryOnnx, Log); +REGISTER_CONVERTER(UnaryOnnx, Tan); +REGISTER_CONVERTER(UnaryOnnx, ATan); +REGISTER_CONVERTER(UnaryOnnx, Asin); diff --git a/tools/converter/source/onnx/UnaryOpOnnx.cpp b/tools/converter/source/onnx/UnaryOpOnnx.cpp deleted file mode 100644 index 0208c71d9..000000000 --- a/tools/converter/source/onnx/UnaryOpOnnx.cpp +++ /dev/null @@ -1,41 +0,0 @@ -// -// UnaryOpOnnx.cpp -// MNNConverter -// -// Created by MNN on 2019/06/28. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#include -#include "onnxOpConverter.hpp" - -DECLARE_OP_CONVERTER(UnaryOpOnnx); - -MNN::OpType UnaryOpOnnx::opType() { - return MNN::OpType_UnaryOp; -} - -MNN::OpParameter UnaryOpOnnx::type() { - return MNN::OpParameter_UnaryOp; -} - -void UnaryOpOnnx::run(MNN::OpT* dstOp, const onnx::NodeProto* onnxNode, - std::vector initializers) { - auto param = new MNN::UnaryOpT; - static std::map gMaps{ - {"Neg", MNN::UnaryOpOperation_NEG}, {"Abs", MNN::UnaryOpOperation_ABS}, {"Exp", MNN::UnaryOpOperation_EXP}, - {"Cos", MNN::UnaryOpOperation_COS}, {"Sin", MNN::UnaryOpOperation_SIN}, {"Sqrt", MNN::UnaryOpOperation_SQRT}, - }; - - auto type = onnxNode->op_type(); - param->opType = gMaps[type]; - param->T = MNN::DataType_DT_FLOAT; - dstOp->main.value = param; -} - -REGISTER_CONVERTER(UnaryOpOnnx, Abs); -REGISTER_CONVERTER(UnaryOpOnnx, Neg); -REGISTER_CONVERTER(UnaryOpOnnx, Exp); -REGISTER_CONVERTER(UnaryOpOnnx, Cos); -REGISTER_CONVERTER(UnaryOpOnnx, Sin); -REGISTER_CONVERTER(UnaryOpOnnx, Sqrt); diff --git a/tools/converter/source/optimizer/CMakeLists.txt b/tools/converter/source/optimizer/CMakeLists.txt index f72686180..123e439bf 100644 --- a/tools/converter/source/optimizer/CMakeLists.txt +++ b/tools/converter/source/optimizer/CMakeLists.txt @@ -1,19 +1,4 @@ - -set(CMAKE_C_STANDARD 99) -set(CMAKE_CXX_STANDARD 11) - -include_directories(${SRC_PATH}/IR) -include_directories(${SRC_PATH}/include) - -file(GLOB_RECURSE OPTIMIZER_SRC ${SRC_PATH}/optimizer/*) - -if(MNN_BUILD_SHARED_LIBS) - add_library(optimizer SHARED ${OPTIMIZER_SRC}) -else() - add_library(optimizer STATIC ${OPTIMIZER_SRC}) -endif() -target_link_libraries(optimizer MNN_Express MNN) - -if (MSVC OR WIN32) - target_compile_options(optimizer PRIVATE "/wd4267") -endif() +file(GLOB_RECURSE OPTIMIZER_SRC ${CMAKE_CURRENT_LIST_DIR}/*.cpp) +add_library(MNNConverterOpt OBJECT ${OPTIMIZER_SRC}) +list(APPEND MNN_CONVERTER_BACKENDS_OBJECTS $) +list(APPEND MNN_CONVERTER_BACKENDS_TARGETS MNNConverterOpt) diff --git a/tools/converter/source/optimizer/PostConverter.cpp b/tools/converter/source/optimizer/PostConverter.cpp index 95558ccae..fc0fabeef 100644 --- a/tools/converter/source/optimizer/PostConverter.cpp +++ b/tools/converter/source/optimizer/PostConverter.cpp @@ -10,7 +10,7 @@ #include "PostTreatUtils.hpp" #include "Program.hpp" #include "TemplateMerge.hpp" -#include "Optimizer.hpp" +#include using namespace MNN::Express; static void _printInputOutputs(const MNN::NetT* newNet) { @@ -41,7 +41,10 @@ static void _printInputOutputs(const MNN::NetT* newNet) { } } -std::unique_ptr optimizeNet(std::unique_ptr& originNet) { +std::unique_ptr optimizeNet(std::unique_ptr& originNet, bool forTraining) { + if (forTraining) { + LOG(INFO) << "convert model for training, reserve BatchNorm and Dropout"; + } if (originNet->oplists.size() <= 0) { return nullptr; } @@ -50,9 +53,12 @@ std::unique_ptr optimizeNet(std::unique_ptr& originNet) { // Seperate Tensor for inplace op "RemoveInplace", - // Remove Unuseful Op such as NoOp, Identity, Dropout, Seq2Out, + // Remove Unuseful Op such as NoOp, Identity, Seq2Out, "RemoveUnusefulOp", + // Remove Dropout, if `forTraining` flag is set, Dropout will be reserved + "RemoveDropout", + // Turn InnerProduct from Caffe / Onnx to Convolution "TransformInnerProduct", @@ -65,6 +71,14 @@ std::unique_ptr optimizeNet(std::unique_ptr& originNet) { // Turn Onnx's Pad to Tensorflow's Pad "TransformOnnxPad", }; + if (forTraining) { + std::vector::iterator iter; + for (iter = postConvertPass.begin(); iter != postConvertPass.end(); iter++) { + if (*iter == "RemoveDropout") { + postConvertPass.erase(iter); + } + } + } for (auto pass : postConvertPass) { auto convert = PostConverter::get(pass); if (nullptr == convert) { @@ -76,7 +90,7 @@ std::unique_ptr optimizeNet(std::unique_ptr& originNet) { LOG(INFO) << "Run " << pass << "Error\n"; } } - + auto program = MNN::Express::Program::create(originNet.get(), true); std::vector optimizePass = { "Merge", @@ -85,6 +99,9 @@ std::unique_ptr optimizeNet(std::unique_ptr& originNet) { case MNN::NetSource_TENSORFLOW: optimizePass.insert(optimizePass.begin(), "TFExtra"); break; + case MNN::NetSource_CAFFE: + optimizePass.insert(optimizePass.begin(), "CaffeExtra"); + break; case MNN::NetSource_ONNX: optimizePass.insert(optimizePass.begin(), "OnnxExtra"); break; @@ -115,16 +132,30 @@ std::unique_ptr optimizeNet(std::unique_ptr& originNet) { newNet->bizCode = originNet->bizCode; Variable::save(outputs, newNet.get()); } - + std::vector afterProgramConvert = { - // Turn BatchNormal to Scale When inference + // Turn BatchNormal to Scale When inference, if `forTraining` flag is set, BN will be reserved "TransformBatchNormal", - + // remove onnx lstm unuseful op(Squeeze, Transpose after LSTM) "ResolveOnnxLSTM", + + // expand ShapeN to N Shapes + "ResolveTfShapeN", + + // WARNNING: should merge BN and Scale before Relu and Relu6 + + // Merge BN info Convolution, if `forTraining` flag is set, BN will be reserved + "MergeBNToConvolution", // Merge Scale info Convolution - "MergeToConvolution", + "MergeScaleToConvolution", + + // Merge Relu Convolution + "MergeReluToConvolution", + + // Merge Relu6 Convolution + "MergeRelu6ToConvolution", // conert some binary op(add, mul, sub...) to element wise op(sum, sub) accroding to input condition "ConvertBinaryToElementwise", @@ -138,6 +169,14 @@ std::unique_ptr optimizeNet(std::unique_ptr& originNet) { // Remove unuseful tensor "ReIndexTensor", }; + if (forTraining) { + std::vector::iterator iter; + for (iter = afterProgramConvert.begin(); iter != afterProgramConvert.end(); iter++) { + if (*iter == "TransformBatchNormal" || *iter == "MergeBNToConvolution") { + afterProgramConvert.erase(iter); + } + } + } for (auto pass : afterProgramConvert) { auto convert = PostConverter::get(pass); if (nullptr == convert) { @@ -149,10 +188,10 @@ std::unique_ptr optimizeNet(std::unique_ptr& originNet) { LOG(INFO) << "Run " << pass << "Error\n"; } } - + if (!printedInputOutput) { _printInputOutputs(newNet.get()); } - + return newNet; } diff --git a/tools/converter/source/optimizer/PostTreatUtils.cpp b/tools/converter/source/optimizer/PostTreatUtils.cpp index 52b6914c5..aff2a8074 100644 --- a/tools/converter/source/optimizer/PostTreatUtils.cpp +++ b/tools/converter/source/optimizer/PostTreatUtils.cpp @@ -10,6 +10,7 @@ #include #include using namespace MNN; + template bool inVector(const std::vector& vec, const T& val) { return std::find(vec.begin(), vec.end(), val) != vec.end(); diff --git a/tools/converter/source/optimizer/PostTreatUtils.hpp b/tools/converter/source/optimizer/PostTreatUtils.hpp index c84436771..e070baf29 100644 --- a/tools/converter/source/optimizer/PostTreatUtils.hpp +++ b/tools/converter/source/optimizer/PostTreatUtils.hpp @@ -48,7 +48,7 @@ class PostTreatUtils { static int _getOpDecestorCount(MNN::OpT* op, const MNN::NetT* net); static bool _replace(std::vector& indexes, int freshIndex, int oldIndex); - + private: PostTreatUtils(); }; diff --git a/tools/converter/source/optimizer/Program.cpp b/tools/converter/source/optimizer/Program.cpp index b1cac5c2a..1d7367808 100644 --- a/tools/converter/source/optimizer/Program.cpp +++ b/tools/converter/source/optimizer/Program.cpp @@ -7,9 +7,9 @@ // #include "Program.hpp" -#include "ExprCreator.hpp" +#include #define MNN_OPEN_TIME_TRACE -#include "AutoTime.hpp" +#include using namespace MNN::Express; using namespace MNN; #define UP_DIV(x) (((x) + 3) / 4) @@ -233,6 +233,7 @@ static void _create(std::map& varMap, std::vector& inputIndexes, inputVars.emplace_back(varMap[input]); } auto expr = Expr::create(op, inputVars, outputIndexes.size()); + expr->setName(op->name); for (int j = 0; j < outputIndexes.size(); ++j) { if (op->type == OpType_Input) { inputIndexes.emplace_back(outputIndexes[j]); @@ -313,7 +314,7 @@ std::shared_ptr Program::create(const MNN::NetT* net, bool supportExtra } newFrame->whileName = frameName.substr(0, pos); //MNN_PRINT("%s\n", newFrame->whileName.c_str()); - + newFrame->parent = currentFrame; currentFrame->children.push_back(newFrame); currentFrame->body.emplace_back(nullptr); diff --git a/tools/converter/source/optimizer/Program.hpp b/tools/converter/source/optimizer/Program.hpp index 92e930630..7c86cf2e9 100644 --- a/tools/converter/source/optimizer/Program.hpp +++ b/tools/converter/source/optimizer/Program.hpp @@ -12,7 +12,7 @@ #include #include #include -#include "Expr.hpp" +#include namespace MNN { namespace Express { diff --git a/tools/converter/source/optimizer/TemplateMerge.cpp b/tools/converter/source/optimizer/TemplateMerge.cpp index b37423a1a..5d3549c29 100644 --- a/tools/converter/source/optimizer/TemplateMerge.cpp +++ b/tools/converter/source/optimizer/TemplateMerge.cpp @@ -15,9 +15,12 @@ bool TemplateMerge::onExecute(const std::vector& outputs, std::shared_ptr< do { hasChange = false; for (auto& iter : mTemplates) { - std::set invalidVARP; + std::set invalidVARP; auto execute = Variable::getExecuteOrder(outputs); for (auto var : execute) { + if (var->get() == nullptr) { + continue; + } if (invalidVARP.find(var) != invalidVARP.end()) { continue; } @@ -40,8 +43,8 @@ TemplateMerge& TemplateMerge::getInstance(const std::string& pass) { return iter->second; } -void TemplateMerge::insertTemplate(std::string key, std::function compare, - std::function transform) { +void TemplateMerge::insertTemplate(std::string key, std::function compare, + std::function transform) { mTemplates.insert(std::make_pair(key, std::make_pair(compare, transform))); } } // namespace Express diff --git a/tools/converter/source/optimizer/TemplateMerge.hpp b/tools/converter/source/optimizer/TemplateMerge.hpp index 912b5b88b..47e340901 100644 --- a/tools/converter/source/optimizer/TemplateMerge.hpp +++ b/tools/converter/source/optimizer/TemplateMerge.hpp @@ -6,7 +6,7 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "Optimizer.hpp" +#include namespace MNN { namespace Express { class TemplateMerge : public Optimizer { @@ -20,16 +20,16 @@ class TemplateMerge : public Optimizer { static TemplateMerge& getInstance(const std::string& pass); - void insertTemplate(std::string key, std::function compare, std::function transform); + void insertTemplate(std::string key, std::function compare, std::function transform); private: TemplateMerge() { } - std::map, std::function>> mTemplates; + std::map, std::function>> mTemplates; }; class TemplateMergeRegister { public: - TemplateMergeRegister(const std::string& pass, std::string key, std::function compare, std::function transform) { + TemplateMergeRegister(const std::string& pass, std::string key, std::function compare, std::function transform) { TemplateMerge::getInstance(pass).insertTemplate(key, compare, transform); } }; diff --git a/tools/converter/source/optimizer/caffeextra/BiasTransform.cpp b/tools/converter/source/optimizer/caffeextra/BiasTransform.cpp new file mode 100644 index 000000000..f72a0fc1e --- /dev/null +++ b/tools/converter/source/optimizer/caffeextra/BiasTransform.cpp @@ -0,0 +1,53 @@ +// +// BiasTransform.cpp +// MNNConverter +// +// Created by MNN on 2019/12/13. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "CaffeExtraManager.hpp" +#include "MNN_generated.h" +#include + +namespace MNN { +namespace Express { + +class BiasTransform : public CaffeExtraManager::Transform { +public: + virtual EXPRP onExecute(EXPRP expr) const override { + auto op = expr->get(); + auto inputs = expr->inputs(); + auto axis = op->main_as_Extra()->attr()->GetAs(0)->i(); + auto numAxes = op->main_as_Extra()->attr()->GetAs(1)->i(); + + if (inputs.size() == 1) { + std::vector biasShape{1, 1, 1, 1}; + auto shape = op->main_as_Extra()->attr()->GetAs(2)->tensor()->dims(); + for (int i = 0; i < shape->size(); i++) { + biasShape[axis + i] = shape->data()[i]; + } + auto biasData = op->main_as_Extra()->attr()->GetAs(2)->tensor()->float32s()->data(); + auto newVar = _Add(inputs[0], _Const(biasData, biasShape, NCHW)); + return newVar->expr().first; + } else { + MNN_ASSERT(inputs.size() == 2); + + std::vector biasShape{1, 1, 1, 1}; + auto shape = inputs[1]->getInfo()->dim; + for (int i = 0; i < shape.size(); i++) { + biasShape[axis + i] = shape[i]; + } + auto newVar = _Add(inputs[0], _Const(inputs[1]->readMap(), biasShape, NCHW)); + return newVar->expr().first; + } + } +}; + +static auto gRegister = []() { + CaffeExtraManager::get()->insert("Bias", std::shared_ptr(new BiasTransform)); + return true; +}(); + +} // namespace Express +} // namespace MNN diff --git a/tools/converter/source/optimizer/caffeextra/CaffeExtraManager.cpp b/tools/converter/source/optimizer/caffeextra/CaffeExtraManager.cpp new file mode 100644 index 000000000..c7c51898e --- /dev/null +++ b/tools/converter/source/optimizer/caffeextra/CaffeExtraManager.cpp @@ -0,0 +1,70 @@ +// +// CaffeExtraManager.cpp +// MNNConverter +// +// Created by MNN on 2019/12/12. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "CaffeExtraManager.hpp" +#include +#include "MNN_generated.h" +namespace MNN { +namespace Express { +std::shared_ptr CaffeExtraManager::gInstance; +std::shared_ptr CaffeExtraManager::get() { + if (nullptr == gInstance) { + gInstance.reset(new CaffeExtraManager); + } + return gInstance; +} + +void CaffeExtraManager::insert(const std::string& name, std::shared_ptr transform) { + mTransform.insert(std::make_pair(name, transform)); +} +std::shared_ptr CaffeExtraManager::find(const std::string& name) const { + auto iter = mTransform.find(name); + if (iter == mTransform.end()) { + return nullptr; + } + return iter->second; +} + + +static auto gRegister = []() { + auto extra = CaffeExtraManager::get(); + auto judge = [extra](EXPRP expr) { + auto op = expr->get(); + if (op->type() != OpType_Extra) { + return false; + } + auto engine = op->main_as_Extra()->engine()->str(); + if (engine != "Caffe") { + return false; + } + auto type = op->main_as_Extra()->type()->str(); + if (extra->find(type) == nullptr) { + return false; + } + return true; + }; + auto modify = [extra](EXPRP expr) { + auto op = expr->get(); + MNN_ASSERT(op->type() == OpType_Extra); + auto type = op->main_as_Extra()->type()->str(); + auto transformer = extra->find(type); + MNN_ASSERT(nullptr != transformer); + auto newExpr = transformer->onExecute(expr); + if (nullptr == newExpr) { + MNN_ERROR("Converte Caffe's Op %s , type = %s, failed, may be some node is not const\n", expr->name().c_str(), type.c_str()); + return false; + } + newExpr->setName(expr->name()); + Expr::replace(expr, newExpr); + return true; + }; + TemplateMerge::getInstance("CaffeExtra").insertTemplate("CaffeExtraManager", judge, modify); + return true; +}(); +} // namespace Express +} // namespace MNN diff --git a/tools/converter/source/optimizer/caffeextra/CaffeExtraManager.hpp b/tools/converter/source/optimizer/caffeextra/CaffeExtraManager.hpp new file mode 100644 index 000000000..944e10fde --- /dev/null +++ b/tools/converter/source/optimizer/caffeextra/CaffeExtraManager.hpp @@ -0,0 +1,31 @@ +// +// CaffeExtraManager.hpp +// MNNConverter +// +// Created by MNN on 2019/12/12. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "../TemplateMerge.hpp" +#include +namespace MNN { +namespace Express { +class CaffeExtraManager { +public: + class Transform { + public: + virtual ~ Transform() = default; + Transform() = default; + + virtual EXPRP onExecute(EXPRP expr) const = 0; + }; + + void insert(const std::string& name, std::shared_ptr transform); + std::shared_ptr find(const std::string& name) const; + static std::shared_ptr get(); +private: + std::map> mTransform; + static std::shared_ptr gInstance; +}; +} // namespace Express +} // namespace MNN diff --git a/tools/converter/source/optimizer/caffeextra/ClipTransform.cpp b/tools/converter/source/optimizer/caffeextra/ClipTransform.cpp new file mode 100644 index 000000000..e0104ffc1 --- /dev/null +++ b/tools/converter/source/optimizer/caffeextra/ClipTransform.cpp @@ -0,0 +1,34 @@ +// +// ClipTransform.cpp +// MNNConverter +// +// Created by MNN on 2019/12/12. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "CaffeExtraManager.hpp" +#include "MNN_generated.h" + +namespace MNN { +namespace Express { + +class ClipTransform : public CaffeExtraManager::Transform { +public: + virtual EXPRP onExecute(EXPRP expr) const override { + auto op = expr->get(); + auto inputs = expr->inputs(); + auto min = op->main_as_Extra()->attr()->GetAs(0)->f(); + auto max = op->main_as_Extra()->attr()->GetAs(1)->f(); + + auto newVar = _Maximum(_Minimum(inputs[0], _Const(max)), _Const(min)); + return newVar->expr().first; + } +}; + +static auto gRegister = []() { + CaffeExtraManager::get()->insert("Clip", std::shared_ptr(new ClipTransform)); + return true; +}(); + +} // namespace Express +} // namespace MNN diff --git a/tools/converter/source/optimizer/caffeextra/EmbedTransform.cpp b/tools/converter/source/optimizer/caffeextra/EmbedTransform.cpp new file mode 100644 index 000000000..843ca5c1a --- /dev/null +++ b/tools/converter/source/optimizer/caffeextra/EmbedTransform.cpp @@ -0,0 +1,53 @@ +// +// EmbedTransform.cpp +// MNNConverter +// +// Created by MNN on 2019/12/13. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "CaffeExtraManager.hpp" +#include "MNN_generated.h" +#include + +namespace MNN { +namespace Express { + +class EmbedTransform : public CaffeExtraManager::Transform { +public: + virtual EXPRP onExecute(EXPRP expr) const override { + auto op = expr->get(); + auto inputs = expr->inputs(); + auto numOutput = op->main_as_Extra()->attr()->GetAs(0)->i(); + auto inputDim = op->main_as_Extra()->attr()->GetAs(1)->i(); + auto biasTerm = op->main_as_Extra()->attr()->GetAs(2)->b(); + auto weightsPtr = op->main_as_Extra()->attr()->GetAs(3)->tensor()->float32s()->data(); + auto shape = op->main_as_Extra()->attr()->GetAs(3)->tensor()->dims(); + MNN_ASSERT(shape->size() == 2); + + auto oneHot = _OneHot(_Cast(inputs[0], halide_type_of()), + _Cast(_Const(inputDim), halide_type_of()), _Const(1.0f), _Const(0.0f)); + auto weight = _Const(weightsPtr, {shape->data()[0], shape->data()[1]}, NCHW); + auto xW = _MatMul(oneHot, weight); + + if (!biasTerm) { + return xW->expr().first; + } else { + auto biasPtr = op->main_as_Extra()->attr()->GetAs(4)->tensor()->float32s()->data(); + auto biasShape = op->main_as_Extra()->attr()->GetAs(4)->tensor()->dims(); + MNN_ASSERT(biasShape->size() == 2); // in caffe source code embed_layer.cpp, bias_shape is 1 * num_output + auto bias = _Const(biasPtr, {1, biasShape->data()[1]}, NCHW); + auto output = _Add(xW, bias); + + return output->expr().first; + } + } +}; + +static auto gRegister = []() { + CaffeExtraManager::get()->insert("Embed", std::shared_ptr(new EmbedTransform)); + return true; +}(); + +} // namespace Express +} // namespace MNN diff --git a/tools/converter/source/optimizer/caffeextra/ExpTransform.cpp b/tools/converter/source/optimizer/caffeextra/ExpTransform.cpp new file mode 100644 index 000000000..93309c53d --- /dev/null +++ b/tools/converter/source/optimizer/caffeextra/ExpTransform.cpp @@ -0,0 +1,40 @@ +// +// ExpTransform.cpp +// MNNConverter +// +// Created by MNN on 2019/12/12. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "CaffeExtraManager.hpp" +#include "MNN_generated.h" +#include + +namespace MNN { +namespace Express { + +class ExpTransform : public CaffeExtraManager::Transform { +public: + virtual EXPRP onExecute(EXPRP expr) const override { + auto op = expr->get(); + auto inputs = expr->inputs(); + auto base = op->main_as_Extra()->attr()->GetAs(0)->f(); + auto scale = op->main_as_Extra()->attr()->GetAs(1)->f(); + auto shift = op->main_as_Extra()->attr()->GetAs(2)->f(); + + auto exponent = _Add(_Multiply(inputs[0], _Const(scale)), _Const(shift)); + if (fabs(base - (-1)) < 1e-6) { + base = exp(1); + } + auto newVar = _Pow(_Const(base), exponent); + return newVar->expr().first; + } +}; + +static auto gRegister = []() { + CaffeExtraManager::get()->insert("Exp", std::shared_ptr(new ExpTransform)); + return true; +}(); + +} // namespace Express +} // namespace MNN diff --git a/tools/converter/source/optimizer/caffeextra/LogTransform.cpp b/tools/converter/source/optimizer/caffeextra/LogTransform.cpp new file mode 100644 index 000000000..873f680db --- /dev/null +++ b/tools/converter/source/optimizer/caffeextra/LogTransform.cpp @@ -0,0 +1,40 @@ +// +// LogTransform.cpp +// MNNConverter +// +// Created by MNN on 2019/12/12. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "CaffeExtraManager.hpp" +#include "MNN_generated.h" +#include + +namespace MNN { +namespace Express { + +class LogTransform : public CaffeExtraManager::Transform { +public: + virtual EXPRP onExecute(EXPRP expr) const override { + auto op = expr->get(); + auto inputs = expr->inputs(); + auto base = op->main_as_Extra()->attr()->GetAs(0)->f(); + auto scale = op->main_as_Extra()->attr()->GetAs(1)->f(); + auto shift = op->main_as_Extra()->attr()->GetAs(2)->f(); + + auto exponent = _Add(_Multiply(inputs[0], _Const(scale)), _Const(shift)); + if (fabs(base - (-1)) < 1e-6) { // base == -1, which means natural base + return exponent->expr().first; + } + auto newVar = _Divide(_Log(exponent), _Log(_Const(base))); + return newVar->expr().first; + } +}; + +static auto gRegister = []() { + CaffeExtraManager::get()->insert("Log", std::shared_ptr(new LogTransform)); + return true; +}(); + +} // namespace Express +} // namespace MNN diff --git a/tools/converter/source/optimizer/caffeextra/MVNTransform.cpp b/tools/converter/source/optimizer/caffeextra/MVNTransform.cpp new file mode 100644 index 000000000..c3961a470 --- /dev/null +++ b/tools/converter/source/optimizer/caffeextra/MVNTransform.cpp @@ -0,0 +1,52 @@ +// +// MVNTransform.cpp +// MNNConverter +// +// Created by MNN on 2019/12/12. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "CaffeExtraManager.hpp" +#include "MNN_generated.h" +#include +namespace MNN { +namespace Express { + +class MVNTransform : public CaffeExtraManager::Transform { +public: + virtual EXPRP onExecute(EXPRP expr) const override { + auto op = expr->get(); + auto inputs = expr->inputs(); + auto acrossChannels = op->main_as_Extra()->attr()->GetAs(0)->b(); + auto eps = op->main_as_Extra()->attr()->GetAs(1)->f(); + auto normalizeVariance = op->main_as_Extra()->attr()->GetAs(2)->b(); + + std::vector reduceDims; + if (acrossChannels) { + reduceDims = {1, 2, 3}; + } else { + reduceDims = {2, 3}; + } + + auto mean = _ReduceMean(inputs[0], reduceDims, true); + auto subMean = _Subtract(inputs[0], mean); // of input shape + + if (!normalizeVariance) { + return subMean->expr().first; + } else { + auto s2 = _Square(subMean); // element wise of input shape + auto variance = _ReduceMean(s2, reduceDims, true); + auto stdv = _Add(_Sqrt(variance), _Const(eps)); + auto normedData = _Divide(inputs[0], stdv); + return normedData->expr().first; + } + } +}; + +static auto gRegister = []() { + CaffeExtraManager::get()->insert("MVN", std::shared_ptr(new MVNTransform)); + return true; +}(); + +} // namespace Express +} // namespace MNN diff --git a/tools/converter/source/optimizer/caffeextra/PowerTransform.cpp b/tools/converter/source/optimizer/caffeextra/PowerTransform.cpp new file mode 100644 index 000000000..5de580829 --- /dev/null +++ b/tools/converter/source/optimizer/caffeextra/PowerTransform.cpp @@ -0,0 +1,36 @@ +// +// PowerTransform.cpp +// MNNConverter +// +// Created by MNN on 2019/12/12. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "CaffeExtraManager.hpp" +#include "MNN_generated.h" + +namespace MNN { +namespace Express { + +class PowerTransform : public CaffeExtraManager::Transform { +public: + virtual EXPRP onExecute(EXPRP expr) const override { + auto op = expr->get(); + auto inputs = expr->inputs(); + auto scale = op->main_as_Extra()->attr()->GetAs(0)->f(); + auto shift = op->main_as_Extra()->attr()->GetAs(1)->f(); + auto power = op->main_as_Extra()->attr()->GetAs(2)->f(); + + auto base = _Add(_Multiply(inputs[0], _Const(scale)), _Const(shift)); + auto newVar = _Pow(base, _Const(power)); + return newVar->expr().first; + } +}; + +static auto gRegister = []() { + CaffeExtraManager::get()->insert("Power", std::shared_ptr(new PowerTransform)); + return true; +}(); + +} // namespace Express +} // namespace MNN diff --git a/tools/converter/source/optimizer/caffeextra/ReductionTransform.cpp b/tools/converter/source/optimizer/caffeextra/ReductionTransform.cpp new file mode 100644 index 000000000..c8f344dfa --- /dev/null +++ b/tools/converter/source/optimizer/caffeextra/ReductionTransform.cpp @@ -0,0 +1,59 @@ +// +// ReductionTransform.cpp +// MNNConverter +// +// Created by MNN on 2019/12/16. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "CaffeExtraManager.hpp" +#include "MNN_generated.h" +#include "logkit.h" +#include + +namespace MNN { +namespace Express { + +class ReductionTransform : public CaffeExtraManager::Transform { +public: + virtual EXPRP onExecute(EXPRP expr) const override { + auto op = expr->get(); + auto inputs = expr->inputs(); + + std::vector reductionDim; + auto beginAxis = op->main_as_Extra()->attr()->GetAs(0)->i(); + for (int i = beginAxis; i < 4; i++) { + reductionDim.emplace_back(i); + } + + auto opType = op->main_as_Extra()->attr()->GetAs(0)->key()->str(); + if (opType == "SUM") { + auto newVar = _ReduceSum(inputs[0], reductionDim, false); + return newVar->expr().first; + } + if (opType == "MEAN") { + auto newVar = _ReduceMean(inputs[0], reductionDim, false); + return newVar->expr().first; + } + if (opType == "ASUM") { + auto absVar = _Abs(inputs[0]); + auto newVar = _ReduceSum(absVar, reductionDim, false); + return newVar->expr().first; + } + if (opType == "SUMSQ") { + auto sqVar = _Square(inputs[0]); + auto newVar = _ReduceSum(sqVar, reductionDim, false); + return newVar->expr().first; + } + DLOG(FATAL) << "not supported caffe reduction type"; + return nullptr; + } +}; + +static auto gRegister = []() { + CaffeExtraManager::get()->insert("Reduction", std::shared_ptr(new ReductionTransform)); + return true; +}(); + +} // namespace Express +} // namespace MNN diff --git a/tools/converter/source/optimizer/merge/ConvBiasAdd.cpp b/tools/converter/source/optimizer/merge/ConvBiasAdd.cpp index 26e1b6017..8472f5b29 100644 --- a/tools/converter/source/optimizer/merge/ConvBiasAdd.cpp +++ b/tools/converter/source/optimizer/merge/ConvBiasAdd.cpp @@ -13,8 +13,10 @@ namespace MNN { namespace Express { static auto gRegister = []() { - auto compare = [](VARP var) { - auto expr = var->expr().first; + auto compare = [](EXPRP expr) { + if (nullptr == expr->get()) { + return false; + } if (expr->get()->type() != OpType_BinaryOp) { return false; } @@ -40,8 +42,7 @@ static auto gRegister = []() { } return true; }; - auto modify = [](VARP var) { - auto expr = var->expr().first; + auto modify = [](EXPRP expr) { auto inputs = expr->inputs(); auto inputExpr = inputs[0]->expr().first; auto biasVar = inputs[1]; @@ -54,16 +55,8 @@ static auto gRegister = []() { biasData[i] += biasPtr[i]; } auto newExpr = Expr::create(convOp.get(), inputExpr->inputs()); - newExpr->setName(var->expr().first->name()); - auto outputs = var->expr().first->outputs(); - for (auto weakVar : outputs) { - auto var = weakVar.lock(); - if (nullptr == var) { - continue; - } - auto index = var->expr().second; - Variable::setExpr(var, newExpr, index); - } + newExpr->setName(expr->name()); + Expr::replace(expr, newExpr); return true; }; TemplateMerge::getInstance("Merge").insertTemplate("ConvBiasAdd", compare, modify); diff --git a/tools/converter/source/optimizer/merge/SliceTFMerge.cpp b/tools/converter/source/optimizer/merge/SliceTFMerge.cpp index 88a957f0f..b71e3d81a 100644 --- a/tools/converter/source/optimizer/merge/SliceTFMerge.cpp +++ b/tools/converter/source/optimizer/merge/SliceTFMerge.cpp @@ -13,8 +13,10 @@ namespace MNN { namespace Express { static auto gRegister = []() { - auto compare = [](VARP var) { - auto expr = var->expr().first; + auto compare = [](EXPRP expr) { + if (nullptr == expr->get()) { + return false; + } if (expr->get()->type() != OpType_SliceTf) { return false; } @@ -38,17 +40,9 @@ static auto gRegister = []() { } return true; }; - auto modify = [](VARP var) { - auto expr = var->expr().first; + auto modify = [](EXPRP expr) { auto inputs = expr->inputs(); - auto outputs = var->expr().first->outputs(); - for (auto weakVar : outputs) { - auto var = weakVar.lock(); - if (nullptr == var) { - continue; - } - Variable::replace(var, inputs[0]); - } + Expr::replace(expr, inputs[0]->expr().first); return true; }; TemplateMerge::getInstance("Merge").insertTemplate("SliceTFMerge", compare, modify); diff --git a/tools/converter/source/optimizer/onnxextra/OnnxConvolutionMerge.cpp b/tools/converter/source/optimizer/onnxextra/OnnxConvolutionMerge.cpp index a185d8927..e63827d3e 100644 --- a/tools/converter/source/optimizer/onnxextra/OnnxConvolutionMerge.cpp +++ b/tools/converter/source/optimizer/onnxextra/OnnxConvolutionMerge.cpp @@ -11,9 +11,107 @@ namespace MNN { namespace Express { +static int convSpatialDim(EXPRP expr) { + auto attrs = expr->get()->main_as_Extra()->attr(); + for (int i = 0; i < attrs->size(); ++i) { + auto attr = attrs->GetAs(i); + if (attr->key()->str() == "kernel_shape") { + return attr->list()->i()->size(); + } + } + return -1; +} + +static EXPRP _transformConv3D(EXPRP expr) { + auto inputs = expr->inputs(); + const int inputSize = inputs.size(); + if (inputSize != 3 && inputSize != 2) { + MNN_ERROR("Convolution3D Input ERROR!\n"); + return nullptr; + } + auto weight = inputs[1]; + + auto weightInfo = weight->getInfo(); + if (nullptr == weightInfo) { + MNN_ERROR("Convolution3D should know weight shape infromation!\n"); + return nullptr; + } + auto& weightShape = weightInfo->dim; + + auto extraParam = expr->get()->main_as_Extra(); + + int co = weightShape[0]; + int ci = weightShape[1]; + int depth = weightShape[2]; + int kh = weightShape[3]; + int kw = weightShape[4]; + + std::unique_ptr conv3d(new MNN::Convolution3DT); + + auto weightDataPtr = weight->readMap(); + conv3d->weight.resize(weightInfo->size); + ::memcpy(conv3d->weight.data(), weightDataPtr, weightInfo->size * sizeof(float)); + conv3d->bias.resize(co); + std::fill(conv3d->bias.begin(), conv3d->bias.end(), 0.0f); + if (inputSize == 3) { + auto biasDataPtr = inputs[2]->readMap(); + ::memcpy(conv3d->bias.data(), biasDataPtr, co * sizeof(float)); + } + + conv3d->common.reset(new MNN::Convolution3DCommonT); + auto common = conv3d->common.get(); + + common->relu = common->relu6 = false; + common->outputCount = co; + common->inputCount = ci; + common->kernels = std::vector({depth, kh, kw}); + + const int attrSize = extraParam->attr()->size(); + for (int i = 0; i < attrSize; ++i) { + auto attr = extraParam->attr()->GetAs(i); + const auto& key = attr->key()->str(); + if (key == "dilations") { + auto values = attr->list()->i()->data(); + if (values[0] != 1 || values[1] != 1 || values[2] != 1) { + MNN_ERROR("conv3d not support dilation bigger than 1\n"); + return nullptr; + } + common->dilates = std::vector({values[0], values[1], values[2]}); + } else if (key == "group") { + if (attr->i() != 1) { + MNN_ERROR("group conv3d not support\n"); + return nullptr; + } + } else if (key == "strides") { + auto values = attr->list()->i()->data(); + if (values[0] != 1 || values[1] != 1 || values[2] != 1) { + MNN_ERROR("conv3d not support strides bigger than 1\n"); + return nullptr; + } + common->strides = std::vector({values[0], values[1], values[2]}); + } else if (key == "pads") { + auto values = attr->list()->i()->data(); + common->padMode = MNN::PadMode_CAFFE; + common->pads = std::vector({values[0], values[1], values[2]}); + } + } + + std::unique_ptr newOp(new OpT); + newOp->name = expr->name(); + newOp->type = OpType_Convolution3D; + newOp->main.type = OpParameter_Convolution3D; + newOp->main.value = conv3d.release(); + + auto newExpr = Expr::create(newOp.get(), {inputs[0]}, 1); + return newExpr; +} + class OnnxConvolutionTransform : public OnnxExtraManager::Transform { public: virtual EXPRP onExecute(EXPRP expr) const override { + if (convSpatialDim(expr) == 3) { + return _transformConv3D(expr); + } auto inputs = expr->inputs(); const int inputSize = inputs.size(); if (inputSize != 3 && inputSize != 2) { diff --git a/tools/converter/source/optimizer/onnxextra/OnnxExtraManager.cpp b/tools/converter/source/optimizer/onnxextra/OnnxExtraManager.cpp index 5922617de..92c018ba4 100644 --- a/tools/converter/source/optimizer/onnxextra/OnnxExtraManager.cpp +++ b/tools/converter/source/optimizer/onnxextra/OnnxExtraManager.cpp @@ -35,8 +35,11 @@ std::shared_ptr OnnxExtraManager::find(const std::s static auto gRegister = []() { auto extra = OnnxExtraManager::get(); - auto judge = [extra](VARP var) { - auto op = var->expr().first->get(); + auto judge = [extra](EXPRP expr) { + auto op = expr->get(); + if (nullptr == op) { + return false; + } if (op->type() != OpType_Extra) { return false; } @@ -50,27 +53,19 @@ static auto gRegister = []() { } return true; }; - auto modify = [extra](VARP var) { - auto op = var->expr().first->get(); + auto modify = [extra](EXPRP expr) { + auto op = expr->get(); MNN_ASSERT(op->type() == OpType_Extra); auto type = op->main_as_Extra()->type()->str(); auto transformer = extra->find(type); MNN_ASSERT(nullptr != transformer); - auto newExpr = transformer->onExecute(var->expr().first); + auto newExpr = transformer->onExecute(expr); if (nullptr == newExpr) { - MNN_ERROR("Convert Onnx's Op %s , type = %s, failed, may be some node is not const\n", var->expr().first->name().c_str(), type.c_str()); + MNN_ERROR("Convert Onnx's Op %s , type = %s, failed, may be some node is not const\n", expr->name().c_str(), type.c_str()); return false; } - newExpr->setName(var->expr().first->name()); - auto outputs = var->expr().first->outputs(); - for (auto weakVar : outputs) { - auto var = weakVar.lock(); - if (nullptr == var) { - continue; - } - auto index = var->expr().second; - Variable::setExpr(var, newExpr, index); - } + newExpr->setName(expr->name()); + Expr::replace(expr, newExpr); return true; }; TemplateMerge::getInstance("OnnxExtra").insertTemplate("OnnxExtraManager", judge, modify); diff --git a/tools/converter/source/optimizer/onnxextra/OnnxExtraManager.hpp b/tools/converter/source/optimizer/onnxextra/OnnxExtraManager.hpp index c44bb0176..e49732753 100644 --- a/tools/converter/source/optimizer/onnxextra/OnnxExtraManager.hpp +++ b/tools/converter/source/optimizer/onnxextra/OnnxExtraManager.hpp @@ -7,7 +7,7 @@ // #include "../TemplateMerge.hpp" -#include "ExprCreator.hpp" +#include namespace MNN { namespace Express { class OnnxExtraManager { @@ -16,10 +16,10 @@ class OnnxExtraManager { public: virtual ~ Transform() = default; Transform() = default; - + virtual EXPRP onExecute(EXPRP expr) const = 0; }; - + void insert(const std::string& name, std::shared_ptr transform); std::shared_ptr find(const std::string& name) const; static std::shared_ptr get(); diff --git a/tools/converter/source/optimizer/onnxextra/OnnxSlice.cpp b/tools/converter/source/optimizer/onnxextra/OnnxSlice.cpp index 2b9b7c69d..b2f1f3569 100644 --- a/tools/converter/source/optimizer/onnxextra/OnnxSlice.cpp +++ b/tools/converter/source/optimizer/onnxextra/OnnxSlice.cpp @@ -19,43 +19,60 @@ class OnnxSliceTransform : public OnnxExtraManager::Transform { MNN_ASSERT(op->type() == OpType_Extra); auto type = op->main_as_Extra()->type()->str(); auto inputs = expr->inputs(); - MNN_ASSERT(inputs.size() == 1); auto input = inputs[0]; auto inputInfo = input->getInfo(); if (nullptr == inputInfo) { - MNN_ERROR("Onnx slice must use the same dimensition"); + MNN_ERROR("Onnx slice must use the same dimensition\n"); return nullptr; } std::unique_ptr sliceOp(new OpT); sliceOp->type = OpType_SliceTf; sliceOp->name = op->name()->str(); auto attrs = op->main_as_Extra()->attr(); - if (nullptr == attrs) { + if (inputs.size() == 1 && nullptr == attrs) { + MNN_PRINT("Attrs of Slice in ONNX must not be null when inputs.size == 1\n"); return nullptr; } std::vector starts; std::vector ends; std::vector axes; - auto copyFunction = [](std::vector& dst, const MNN::Attribute* attr) { - MNN_ASSERT(nullptr != attr->list()); - MNN_ASSERT(nullptr != attr->list()->i()); - dst.resize(attr->list()->i()->size()); - ::memcpy(dst.data(), attr->list()->i()->data(), dst.size() * sizeof(int)); - }; - for (int i=0; isize(); ++i) { - auto attr = attrs->GetAs(i); - if (nullptr == attr->list()) { - return nullptr; - } - if (attr->key()->str() == "axes") { - copyFunction(axes, attr); - } - else if (attr->key()->str() == "ends") { - copyFunction(ends, attr); - } - else if (attr->key()->str() == "starts") { - copyFunction(starts, attr); + if (inputs.size() == 1) { + auto copyFunction = [](std::vector& dst, const MNN::Attribute* attr) { + MNN_ASSERT(nullptr != attr->list()); + MNN_ASSERT(nullptr != attr->list()->i()); + dst.resize(attr->list()->i()->size()); + ::memcpy(dst.data(), attr->list()->i()->data(), dst.size() * sizeof(int)); + }; + for (int i=0; isize(); ++i) { + auto attr = attrs->GetAs(i); + if (nullptr == attr->list()) { + return nullptr; + } + if (attr->key()->str() == "axes") { + copyFunction(axes, attr); + } + else if (attr->key()->str() == "ends") { + copyFunction(ends, attr); + } + else if (attr->key()->str() == "starts") { + copyFunction(starts, attr); + } } + } else if (inputs.size() == 4) { + auto copyFunction = [](std::vector& dst, const VARP& var) { + MNN_ASSERT(nullptr != var); + auto varInfo = var->getInfo(); + auto varData = var->readMap(); + MNN_ASSERT(nullptr != varInfo && nullptr != varData); + dst.resize(varInfo->size); + ::memcpy(dst.data(), varData, dst.size() * sizeof(int)); + }; + copyFunction(starts, inputs[1]); + copyFunction(ends, inputs[2]); + copyFunction(axes, inputs[3]); + } else { + MNN_ERROR("Slice onnx must have 1 or 4 input\n"); + return nullptr; } if (starts.size() != ends.size() || ends.size() != axes.size() || starts.size() != axes.size()) { return nullptr; diff --git a/tools/converter/source/optimizer/onnxextra/OnnxSoftplus.cpp b/tools/converter/source/optimizer/onnxextra/OnnxSoftplus.cpp new file mode 100644 index 000000000..cbe006f63 --- /dev/null +++ b/tools/converter/source/optimizer/onnxextra/OnnxSoftplus.cpp @@ -0,0 +1,31 @@ +// +// OnnxSoftplus.cpp +// MNNConverter +// +// Created by MNN on 2019/12/05. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include "OnnxExtraManager.hpp" +#include "MNN_generated.h" + +namespace MNN { +namespace Express { + +class OnnxSoftplusTransform : public OnnxExtraManager::Transform { +public: + virtual EXPRP onExecute(EXPRP expr) const override { + auto input = expr->inputs()[0]; + auto newExpr = _Softplus(input)->expr().first; + return newExpr; + } +}; + +static auto gRegister = []() { + OnnxExtraManager::get()->insert("Softplus", std::shared_ptr(new OnnxSoftplusTransform)); + return true; +}(); + +} // namespace Express +} // namespace MNN diff --git a/tools/converter/source/optimizer/onnxextra/OnnxSoftsign.cpp b/tools/converter/source/optimizer/onnxextra/OnnxSoftsign.cpp new file mode 100644 index 000000000..5e8edef2c --- /dev/null +++ b/tools/converter/source/optimizer/onnxextra/OnnxSoftsign.cpp @@ -0,0 +1,31 @@ +// +// OnnxSoftsign.cpp +// MNNConverter +// +// Created by MNN on 2019/12/05. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include "OnnxExtraManager.hpp" +#include "MNN_generated.h" + +namespace MNN { +namespace Express { + +class OnnxSoftsignTransform : public OnnxExtraManager::Transform { +public: + virtual EXPRP onExecute(EXPRP expr) const override { + auto input = expr->inputs()[0]; + auto newExpr = _Softsign(input)->expr().first; + return newExpr; + } +}; + +static auto gRegister = []() { + OnnxExtraManager::get()->insert("Softsign", std::shared_ptr(new OnnxSoftsignTransform)); + return true; +}(); + +} // namespace Express +} // namespace MNN diff --git a/tools/converter/source/optimizer/onnxextra/OnnxUpsample.cpp b/tools/converter/source/optimizer/onnxextra/OnnxUpsample.cpp index 96197d969..9ea12f188 100644 --- a/tools/converter/source/optimizer/onnxextra/OnnxUpsample.cpp +++ b/tools/converter/source/optimizer/onnxextra/OnnxUpsample.cpp @@ -97,6 +97,7 @@ class OnnxReiszeTransform : public OnnxExtraManager::Transform { MNN_CHECK(inputs.size() == 4, "Onnx Resize should have 4 inputs!"); std::string resizeMode = ""; + std::string coordMode = ""; // detect align_corner attribute auto op = expr->get(); auto extraParam = op->main_as_Extra(); const int attrSize = extraParam->attr()->size(); @@ -105,6 +106,8 @@ class OnnxReiszeTransform : public OnnxExtraManager::Transform { const auto& key = attr->key()->str(); if (key == "mode") { resizeMode = attr->s()->str(); + } else if (key == "coordinate_transformation_mode") { + coordMode = attr->s()->str(); } } @@ -121,6 +124,7 @@ class OnnxReiszeTransform : public OnnxExtraManager::Transform { } else { MNN_ERROR("Unsupported Upsample mode! ==> %s\n", resizeMode.c_str()); } + resizeParam->alignCorners = (coordMode == "align_corners"); auto sizes = inputs[3]; diff --git a/tools/converter/source/optimizer/onnxextra/ResolveIdentityOnnx.cpp b/tools/converter/source/optimizer/onnxextra/ResolveIdentityOnnx.cpp index 42d6d0c84..39f185f97 100644 --- a/tools/converter/source/optimizer/onnxextra/ResolveIdentityOnnx.cpp +++ b/tools/converter/source/optimizer/onnxextra/ResolveIdentityOnnx.cpp @@ -22,7 +22,7 @@ class ResolveIdentityOnnx : public OnnxExtraManager::Transform { MNN_CHECK(outputs.size() == 1, "Identity Should have one output"); auto outputVaribale = outputs.front(); - auto outputExpr = outputVaribale.lock()->expr().first; + auto outputExpr = outputVaribale.lock(); auto outputExprOp = outputExpr->get(); std::unique_ptr newOp(new OpT); @@ -36,7 +36,7 @@ class ResolveIdentityOnnx : public OnnxExtraManager::Transform { // find the matched input, then replace it const int size = outputExprInputs.size(); for (int i = 0; i < size; ++i) { - if (outputExprInputs[i] == outputVaribale.lock()) { + if (outputExprInputs[i]->expr().first.get() == outputExpr.get()) { outputExprInputs[i] = inputs[0]; break; } diff --git a/tools/converter/source/optimizer/postconvert/AddTensorFormatConverter.cpp b/tools/converter/source/optimizer/postconvert/AddTensorFormatConverter.cpp index f642d34d2..ed95cd838 100644 --- a/tools/converter/source/optimizer/postconvert/AddTensorFormatConverter.cpp +++ b/tools/converter/source/optimizer/postconvert/AddTensorFormatConverter.cpp @@ -32,11 +32,12 @@ const std::set NC4HW4_OPs = { MNN::OpType_QuantizedDepthwiseConv2D, MNN::OpType_BatchToSpaceND, MNN::OpType_SpaceToBatchND, - MNN::OpType_BatchNorm, + MNN::OpType_InstanceNorm, MNN::OpType_Moments, MNN::OpType_QuantizedAvgPool, MNN::OpType_QuantizedAdd, MNN::OpType_PReLU, + MNN::OpType_Dilation2D, }; const std::set COMPABILITY_OPs = {MNN::OpType_ReLU, MNN::OpType_ReLU6, MNN::OpType_Concat, MNN::OpType_Slice, MNN::OpType_Permute, MNN::OpType_Selu, @@ -285,10 +286,10 @@ class AddTensorFormatConverter : public PostConverter { reshape->dims[axisMap[i]] = originDim[i]; } } - if (MNN::OpType_ArgMax == op->type) { + if (MNN::OpType_ArgMax == op->type || MNN::OpType_ArgMin == op->type) { auto param = op->main.AsArgMax(); auto originAxis = param->axis; - DCHECK(originAxis >= 0 && originAxis <= 3) << "ArgMax axis ERROR!"; + DCHECK(originAxis >= 0 && originAxis <= 3) << "ArgMax / Argmin axis ERROR!"; param->axis = axisMap[originAxis]; } } diff --git a/tools/converter/source/optimizer/postconvert/MergeBNToConvolution.cpp b/tools/converter/source/optimizer/postconvert/MergeBNToConvolution.cpp new file mode 100644 index 000000000..ae4147da0 --- /dev/null +++ b/tools/converter/source/optimizer/postconvert/MergeBNToConvolution.cpp @@ -0,0 +1,121 @@ +// +// MergeBNToConvolution.cpp +// MNNConverter +// +// Created by MNN on 2019/11/27. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "../PostTreatUtils.hpp" +#include "MergeToConvolution.hpp" + +using namespace MNN; + +class MergeBNToConvolution : public MergeToConvolution { +public: + bool merge2Convolution(const MNN::OpT* inplaceOp, MNN::OpT* convolutionOp) const { + const auto& convCommon = convolutionOp->main.AsConvolution2D()->common; + if (convCommon->relu || convCommon->relu6) { + return false; + } + + if (inplaceOp->type == MNN::OpType_BatchNorm) { + std::vector alpha; + std::vector bias; + + auto l = inplaceOp->main.AsBatchNorm(); + alpha.resize(l->channels); + bias.resize(l->channels); + const float* slopePtr = l->slopeData.data(); + const float* meanDataPtr = l->meanData.data(); + const float* varDataPtr = l->varData.data(); + const float* biasDataPtr = l->biasData.data(); + + for (int i = 0; i < l->channels; i++) { + float sqrt_var = sqrt(varDataPtr[i]); + bias[i] = biasDataPtr[i] - slopePtr[i] * meanDataPtr[i] / sqrt_var; + alpha[i] = slopePtr[i] / sqrt_var; + } + + auto conv2D = convolutionOp->main.AsConvolution2D(); + int outputCount = conv2D->common->outputCount; + for (int i = 0; i < outputCount; ++i) { + conv2D->bias[i] = conv2D->bias[i] * alpha[i] + bias[i]; + } + + if (nullptr != conv2D->quanParameter.get()) { + for (int i = 0; i < outputCount; ++i) { + conv2D->quanParameter->alpha[i] *= alpha[i]; + } + } else { + int weightPartSize = conv2D->weight.size() / outputCount; + if (convolutionOp->type == OpType_Deconvolution) { + int inputCount = + conv2D->weight.size() / outputCount / conv2D->common->kernelX / conv2D->common->kernelY; + for (int i = 0; i < inputCount; ++i) { + auto dstPos = i * outputCount * conv2D->common->kernelY * conv2D->common->kernelX; + for (int j = 0; j < outputCount; ++j) { + auto dstPosJ = dstPos + j * conv2D->common->kernelY * conv2D->common->kernelX; + float a = alpha[j]; + for (int k = 0; k < conv2D->common->kernelY * conv2D->common->kernelX; ++k) { + conv2D->weight[dstPosJ + k] *= a; + } + } + } + } else { + for (int i = 0; i < outputCount; ++i) { + float a = alpha[i]; + for (int j = 0; j < weightPartSize; ++j) { + conv2D->weight[i * weightPartSize + j] *= a; + } + } + } + } + return true; + } + return false; + } + + bool merge2Convolution3D(const MNN::OpT* inplaceOp, MNN::OpT* convolutionOp) const { + const auto& convCommon = convolutionOp->main.AsConvolution3D()->common; + if (convCommon->relu || convCommon->relu6) { + return false; + } + + if (inplaceOp->type == MNN::OpType_BatchNorm) { + std::vector alpha; + std::vector bias; + + auto l = inplaceOp->main.AsBatchNorm(); + alpha.resize(l->channels); + bias.resize(l->channels); + const float* slopePtr = l->slopeData.data(); + const float* meanDataPtr = l->meanData.data(); + const float* varDataPtr = l->varData.data(); + const float* biasDataPtr = l->biasData.data(); + + for (int i = 0; i < l->channels; i++) { + float sqrt_var = sqrt(varDataPtr[i]); + bias[i] = biasDataPtr[i] - slopePtr[i] * meanDataPtr[i] / sqrt_var; + alpha[i] = slopePtr[i] / sqrt_var; + } + + auto conv3D = convolutionOp->main.AsConvolution3D(); + int outputCount = conv3D->common->outputCount; + for (int i = 0; i < outputCount; ++i) { + conv3D->bias[i] = conv3D->bias[i] * alpha[i] + bias[i]; + } + + int weightPartSize = conv3D->weight.size() / outputCount; + for (int i = 0; i < outputCount; ++i) { + float a = alpha[i]; + for (int j = 0; j < weightPartSize; ++j) { + conv3D->weight[i * weightPartSize + j] *= a; + } + } + return true; + } + return false; + } +}; +static PostConverterRegister __l("MergeBNToConvolution"); diff --git a/tools/converter/source/optimizer/postconvert/MergeRelu6ToConvolution.cpp b/tools/converter/source/optimizer/postconvert/MergeRelu6ToConvolution.cpp new file mode 100644 index 000000000..e4ddf7288 --- /dev/null +++ b/tools/converter/source/optimizer/postconvert/MergeRelu6ToConvolution.cpp @@ -0,0 +1,32 @@ +// +// MergeRelu6ToConvolution.cpp +// MNNConverter +// +// Created by MNN on 2019/11/27. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "../PostTreatUtils.hpp" +#include "MergeToConvolution.hpp" + +using namespace MNN; + +class MergeRelu6ToConvolution : public MergeToConvolution { +public: + bool merge2Convolution(const MNN::OpT* inplaceOp, MNN::OpT* convolutionOp) const { + if (inplaceOp->type == MNN::OpType_ReLU6) { + convolutionOp->main.AsConvolution2D()->common->relu6 = true; + return true; + } + return false; + } + + bool merge2Convolution3D(const MNN::OpT* inplaceOp, MNN::OpT* convolutionOp) const { + if (inplaceOp->type == MNN::OpType_ReLU6) { + convolutionOp->main.AsConvolution3D()->common->relu6 = true; + return true; + } + return false; + } +}; +static PostConverterRegister __l("MergeRelu6ToConvolution"); diff --git a/tools/converter/source/optimizer/postconvert/MergeReluToConvolution.cpp b/tools/converter/source/optimizer/postconvert/MergeReluToConvolution.cpp new file mode 100644 index 000000000..bfd40eeff --- /dev/null +++ b/tools/converter/source/optimizer/postconvert/MergeReluToConvolution.cpp @@ -0,0 +1,32 @@ +// +// MergeReluToConvolution.cpp +// MNNConverter +// +// Created by MNN on 2019/11/27. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "../PostTreatUtils.hpp" +#include "MergeToConvolution.hpp" + +using namespace MNN; + +class MergeReluToConvolution : public MergeToConvolution { +public: + bool merge2Convolution(const MNN::OpT* inplaceOp, MNN::OpT* convolutionOp) const { + if (inplaceOp->type == MNN::OpType_ReLU && inplaceOp->main.AsRelu()->slope == 0.0f) { + convolutionOp->main.AsConvolution2D()->common->relu = true; + return true; + } + return false; + } + + bool merge2Convolution3D(const MNN::OpT* inplaceOp, MNN::OpT* convolutionOp) const { + if (inplaceOp->type == MNN::OpType_ReLU && inplaceOp->main.AsRelu()->slope == 0.0f) { + convolutionOp->main.AsConvolution3D()->common->relu = true; + return true; + } + return false; + } +}; +static PostConverterRegister __l("MergeReluToConvolution"); diff --git a/tools/converter/source/optimizer/postconvert/MergeScaleToConvolution.cpp b/tools/converter/source/optimizer/postconvert/MergeScaleToConvolution.cpp new file mode 100644 index 000000000..bb06a606c --- /dev/null +++ b/tools/converter/source/optimizer/postconvert/MergeScaleToConvolution.cpp @@ -0,0 +1,99 @@ +// +// MergeScaleToConvolution.cpp +// MNNConverter +// +// Created by MNN on 2019/11/27. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "../PostTreatUtils.hpp" +#include "MergeToConvolution.hpp" + +using namespace MNN; + +class MergeScaleToConvolution : public MergeToConvolution { +public: + bool merge2Convolution(const MNN::OpT* inplaceOp, MNN::OpT* convolutionOp) const { + const auto& convCommon = convolutionOp->main.AsConvolution2D()->common; + if (convCommon->relu || convCommon->relu6) { + return false; + } + + if (inplaceOp->type == MNN::OpType_Scale) { + std::vector alpha; + std::vector bias; + + bias = inplaceOp->main.AsScale()->biasData; + alpha = inplaceOp->main.AsScale()->scaleData; + + auto conv2D = convolutionOp->main.AsConvolution2D(); + int outputCount = conv2D->common->outputCount; + for (int i = 0; i < outputCount; ++i) { + conv2D->bias[i] = conv2D->bias[i] * alpha[i] + bias[i]; + } + + if (nullptr != conv2D->quanParameter.get()) { + for (int i = 0; i < outputCount; ++i) { + conv2D->quanParameter->alpha[i] *= alpha[i]; + } + } else { + int weightPartSize = conv2D->weight.size() / outputCount; + if (convolutionOp->type == OpType_Deconvolution) { + int inputCount = + conv2D->weight.size() / outputCount / conv2D->common->kernelX / conv2D->common->kernelY; + for (int i = 0; i < inputCount; ++i) { + auto dstPos = i * outputCount * conv2D->common->kernelY * conv2D->common->kernelX; + for (int j = 0; j < outputCount; ++j) { + auto dstPosJ = dstPos + j * conv2D->common->kernelY * conv2D->common->kernelX; + float a = alpha[j]; + for (int k = 0; k < conv2D->common->kernelY * conv2D->common->kernelX; ++k) { + conv2D->weight[dstPosJ + k] *= a; + } + } + } + } else { + for (int i = 0; i < outputCount; ++i) { + float a = alpha[i]; + for (int j = 0; j < weightPartSize; ++j) { + conv2D->weight[i * weightPartSize + j] *= a; + } + } + } + } + return true; + } + return false; + } + + bool merge2Convolution3D(const MNN::OpT* inplaceOp, MNN::OpT* convolutionOp) const { + const auto& convCommon = convolutionOp->main.AsConvolution3D()->common; + if (convCommon->relu || convCommon->relu6) { + return false; + } + + if (inplaceOp->type == MNN::OpType_Scale) { + std::vector alpha; + std::vector bias; + + bias = inplaceOp->main.AsScale()->biasData; + alpha = inplaceOp->main.AsScale()->scaleData; + + auto conv3D = convolutionOp->main.AsConvolution3D(); + int outputCount = conv3D->common->outputCount; + for (int i = 0; i < outputCount; ++i) { + conv3D->bias[i] = conv3D->bias[i] * alpha[i] + bias[i]; + } + + int weightPartSize = conv3D->weight.size() / outputCount; + for (int i = 0; i < outputCount; ++i) { + float a = alpha[i]; + for (int j = 0; j < weightPartSize; ++j) { + conv3D->weight[i * weightPartSize + j] *= a; + } + } + return true; + } + return false; + } +}; +static PostConverterRegister __l("MergeScaleToConvolution"); diff --git a/tools/converter/source/optimizer/postconvert/MergeToConvolution.cpp b/tools/converter/source/optimizer/postconvert/MergeToConvolution.cpp deleted file mode 100644 index 6ea79bdbc..000000000 --- a/tools/converter/source/optimizer/postconvert/MergeToConvolution.cpp +++ /dev/null @@ -1,190 +0,0 @@ -// -// MergeToConvolution.cpp -// MNNConverter -// -// Created by MNN on 2019/09/05. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#include "../PostTreatUtils.hpp" -using namespace MNN; -static bool _merge2Convolution(const MNN::OpT* inplaceOp, MNN::OpT* convolutionOp) { - if (inplaceOp->type == MNN::OpType_ReLU && inplaceOp->main.AsRelu()->slope == 0.0f) { - convolutionOp->main.AsConvolution2D()->common->relu = true; - return true; - } - if (inplaceOp->type == MNN::OpType_ReLU6) { - convolutionOp->main.AsConvolution2D()->common->relu6 = true; - return true; - } - - const auto& convCommon = convolutionOp->main.AsConvolution2D()->common; - if (convCommon->relu || convCommon->relu6) { - return false; - } - - if (inplaceOp->type == MNN::OpType_BatchNorm || inplaceOp->type == MNN::OpType_Scale) { - std::vector alpha; - std::vector bias; - if (inplaceOp->type == MNN::OpType_BatchNorm) { - auto l = inplaceOp->main.AsBatchNorm(); - alpha.resize(l->channels); - bias.resize(l->channels); - const float* slopePtr = l->slopeData.data(); - const float* meanDataPtr = l->meanData.data(); - const float* varDataPtr = l->varData.data(); - const float* biasDataPtr = l->biasData.data(); - - for (int i = 0; i < l->channels; i++) { - float sqrt_var = sqrt(varDataPtr[i]); - bias[i] = biasDataPtr[i] - slopePtr[i] * meanDataPtr[i] / sqrt_var; - alpha[i] = slopePtr[i] / sqrt_var; - } - } - if (inplaceOp->type == MNN::OpType_Scale) { - bias = inplaceOp->main.AsScale()->biasData; - alpha = inplaceOp->main.AsScale()->scaleData; - } - - auto conv2D = convolutionOp->main.AsConvolution2D(); - int outputCount = conv2D->common->outputCount; - for (int i = 0; i < outputCount; ++i) { - conv2D->bias[i] = conv2D->bias[i] * alpha[i] + bias[i]; - } - - if (nullptr != conv2D->quanParameter.get()) { - for (int i = 0; i < outputCount; ++i) { - conv2D->quanParameter->alpha[i] *= alpha[i]; - } - } else { - int weightPartSize = conv2D->weight.size() / outputCount; - if (convolutionOp->type == OpType_Deconvolution) { - int inputCount = - conv2D->weight.size() / outputCount / conv2D->common->kernelX / conv2D->common->kernelY; - for (int i = 0; i < inputCount; ++i) { - auto dstPos = i * outputCount * conv2D->common->kernelY * conv2D->common->kernelX; - for (int j = 0; j < outputCount; ++j) { - auto dstPosJ = dstPos + j * conv2D->common->kernelY * conv2D->common->kernelX; - float a = alpha[j]; - for (int k = 0; k < conv2D->common->kernelY * conv2D->common->kernelX; ++k) { - conv2D->weight[dstPosJ + k] *= a; - } - } - } - } else { - for (int i = 0; i < outputCount; ++i) { - float a = alpha[i]; - for (int j = 0; j < weightPartSize; ++j) { - conv2D->weight[i * weightPartSize + j] *= a; - } - } - } - } - return true; - } - return false; -} - -static bool _merge2Convolution3D(const MNN::OpT* inplaceOp, MNN::OpT* convolutionOp) { - if (inplaceOp->type == MNN::OpType_ReLU && inplaceOp->main.AsRelu()->slope == 0.0f) { - convolutionOp->main.AsConvolution3D()->common->relu = true; - return true; - } - if (inplaceOp->type == MNN::OpType_ReLU6) { - convolutionOp->main.AsConvolution3D()->common->relu6 = true; - return true; - } - - const auto& convCommon = convolutionOp->main.AsConvolution3D()->common; - if (convCommon->relu || convCommon->relu6) { - return false; - } - - if (inplaceOp->type == MNN::OpType_BatchNorm || inplaceOp->type == MNN::OpType_Scale) { - std::vector alpha; - std::vector bias; - if (inplaceOp->type == MNN::OpType_BatchNorm) { - auto l = inplaceOp->main.AsBatchNorm(); - alpha.resize(l->channels); - bias.resize(l->channels); - const float* slopePtr = l->slopeData.data(); - const float* meanDataPtr = l->meanData.data(); - const float* varDataPtr = l->varData.data(); - const float* biasDataPtr = l->biasData.data(); - - for (int i = 0; i < l->channels; i++) { - float sqrt_var = sqrt(varDataPtr[i]); - bias[i] = biasDataPtr[i] - slopePtr[i] * meanDataPtr[i] / sqrt_var; - alpha[i] = slopePtr[i] / sqrt_var; - } - } - if (inplaceOp->type == MNN::OpType_Scale) { - bias = inplaceOp->main.AsScale()->biasData; - alpha = inplaceOp->main.AsScale()->scaleData; - } - - auto conv3D = convolutionOp->main.AsConvolution3D(); - int outputCount = conv3D->common->outputCount; - for (int i = 0; i < outputCount; ++i) { - conv3D->bias[i] = conv3D->bias[i] * alpha[i] + bias[i]; - } - - int weightPartSize = conv3D->weight.size() / outputCount; - for (int i = 0; i < outputCount; ++i) { - float a = alpha[i]; - for (int j = 0; j < weightPartSize; ++j) { - conv3D->weight[i * weightPartSize + j] *= a; - } - } - return true; - } - - return false; -} - -class MergeToConvolution : public PostConverter { -public: - virtual bool onExecute(std::unique_ptr& net) const override { - // Merge Layer - std::vector readyToDelete; - for (auto iter = net->oplists.begin(); iter != net->oplists.end(); iter++) { - MNN::OpT& currentOp = *(iter->get()); - if (currentOp.type != MNN::OpType_Convolution - && currentOp.type != MNN::OpType_Deconvolution - && currentOp.type != MNN::OpType_ConvolutionDepthwise - && currentOp.type != MNN::OpType_Convolution3D) { - continue; - } - DCHECK(currentOp.outputIndexes.size() == 1) << "Conv output ERROR!"; - - // merge Batchnorm/Relu/Relu6 to Convolution - std::vector nextOp = PostTreatUtils::_findOpByInputIndex(currentOp.outputIndexes[0], net.get()); - while (1) { - if (nextOp.size() != 1) { - break; - } - const int nextOutputIndex = nextOp[0]->outputIndexes[0]; - bool succ; - if (currentOp.type == MNN::OpType_Convolution3D) { - succ = _merge2Convolution3D(nextOp[0], ¤tOp); - } else { - succ = _merge2Convolution(nextOp[0], ¤tOp); - } - if (PostTreatUtils::_isSingleInputOutput(nextOp[0]) && succ) { - // LOG(INFO) << "Merge " << nextOp[0]->name.c_str()<< " into convolution: " << - // currentOp.name.c_str(); - currentOp.outputIndexes[0] = nextOp[0]->outputIndexes[0]; - readyToDelete.push_back(nextOp[0]); - nextOp = PostTreatUtils::_findOpByInputIndex(nextOutputIndex, net.get()); - } else { - break; - } - } - } - for (auto op : readyToDelete) { - PostTreatUtils::_removeOpInNet(op, net.get()); - } - return true; - } -}; -static PostConverterRegister __l("MergeToConvolution"); diff --git a/tools/converter/source/optimizer/postconvert/MergeToConvolution.hpp b/tools/converter/source/optimizer/postconvert/MergeToConvolution.hpp new file mode 100644 index 000000000..80f1b1139 --- /dev/null +++ b/tools/converter/source/optimizer/postconvert/MergeToConvolution.hpp @@ -0,0 +1,60 @@ +// +// MergeToConvolution.hpp +// MNNConverter +// +// Created by MNN on 2019/09/05. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "../PostTreatUtils.hpp" +using namespace MNN; + +class MergeToConvolution : public PostConverter { +public: + virtual bool merge2Convolution(const MNN::OpT* inplaceOp, MNN::OpT* convolutionOp) const = 0; + + virtual bool merge2Convolution3D(const MNN::OpT* inplaceOp, MNN::OpT* convolutionOp) const = 0; + + virtual bool onExecute(std::unique_ptr& net) const override { + // Merge Layer + std::vector readyToDelete; + for (auto iter = net->oplists.begin(); iter != net->oplists.end(); iter++) { + MNN::OpT& currentOp = *(iter->get()); + if (currentOp.type != MNN::OpType_Convolution + && currentOp.type != MNN::OpType_Deconvolution + && currentOp.type != MNN::OpType_ConvolutionDepthwise + && currentOp.type != MNN::OpType_Convolution3D) { + continue; + } + DCHECK(currentOp.outputIndexes.size() == 1) << "Conv output ERROR!"; + + // merge Batchnorm/Relu/Relu6 to Convolution + std::vector nextOp = PostTreatUtils::_findOpByInputIndex(currentOp.outputIndexes[0], net.get()); + while (1) { + if (nextOp.size() != 1) { + break; + } + const int nextOutputIndex = nextOp[0]->outputIndexes[0]; + bool succ; + if (currentOp.type == MNN::OpType_Convolution3D) { + succ = merge2Convolution3D(nextOp[0], ¤tOp); + } else { + succ = merge2Convolution(nextOp[0], ¤tOp); + } + if (PostTreatUtils::_isSingleInputOutput(nextOp[0]) && succ) { + // LOG(INFO) << "Merge " << nextOp[0]->name.c_str()<< " into convolution: " << + // currentOp.name.c_str(); + currentOp.outputIndexes[0] = nextOp[0]->outputIndexes[0]; + readyToDelete.push_back(nextOp[0]); + nextOp = PostTreatUtils::_findOpByInputIndex(nextOutputIndex, net.get()); + } else { + break; + } + } + } + for (auto op : readyToDelete) { + PostTreatUtils::_removeOpInNet(op, net.get()); + } + return true; + } +}; diff --git a/tools/converter/source/optimizer/postconvert/RemoveDropout.cpp b/tools/converter/source/optimizer/postconvert/RemoveDropout.cpp new file mode 100644 index 000000000..d1014909f --- /dev/null +++ b/tools/converter/source/optimizer/postconvert/RemoveDropout.cpp @@ -0,0 +1,54 @@ +// +// RemoveDropout.cpp +// MNNConverter +// +// Created by MNN on 2019/11/26. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include +#include +#include "../PostTreatUtils.hpp" +#include "RemoveTestNoUseOps.hpp" + +using namespace MNN; + +class RemoveDropout : public RemoveTestNoUseOps { +public: + /* The Op's output set as input */ + bool shouldDeleteJudge(const MNN::OpT* op, const MNN::NetT* const netPtr) const override { + static auto unuseOpType = std::vector({OpType_Dropout}); + static auto unuseExtraOpType = std::vector({"Dropout"}); + if (std::find(unuseOpType.begin(), unuseOpType.end(), op->type) != unuseOpType.end()) { + return true; + } + if (op->type == OpType_Extra) { + if (std::find(unuseExtraOpType.begin(), unuseExtraOpType.end(), op->main.AsExtra()->type) != unuseExtraOpType.end()) { + return true; + } + if (netPtr->sourceType == MNN::NetSource_CAFFE && op->main.AsExtra()->type == "Split") { + return true; + } + } + if (op->type == OpType_Cast) { + if (op->main.AsCastParam()->dstT == op->main.AsCastParam()->srcT) { + return true; + } + if (op->main.AsCastParam()->dstT == MNN::DataType_DT_INT32 && op->main.AsCastParam()->srcT == MNN::DataType_DT_INT64) { + return true; + } + if (op->main.AsCastParam()->srcT == MNN::DataType_DT_INT32 && op->main.AsCastParam()->dstT == MNN::DataType_DT_INT64) { + return true; + } + } + return false; + }; + bool shouldRemoveUnusefulInputs(const MNN::OpT* op) const override { + return false; + }; + bool shouldDeleteOutput(const MNN::OpT* op) const override { + return false; + }; +}; +static PostConverterRegister __l("RemoveDropout"); diff --git a/tools/converter/source/optimizer/postconvert/RemoveTestNoUseOps.hpp b/tools/converter/source/optimizer/postconvert/RemoveTestNoUseOps.hpp new file mode 100644 index 000000000..a1d768ef6 --- /dev/null +++ b/tools/converter/source/optimizer/postconvert/RemoveTestNoUseOps.hpp @@ -0,0 +1,97 @@ +// +// RemoveTestNoUseOps.hpp +// MNNConverter +// +// Created by MNN on 2019/11/26. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include +#include +#include "../PostTreatUtils.hpp" +using namespace MNN; + +class RemoveTestNoUseOps : public PostConverter { +public: + /* The Op's output set as input */ + virtual bool shouldDeleteJudge(const MNN::OpT* op, const MNN::NetT* const netPtr) const = 0; + + virtual bool shouldRemoveUnusefulInputs(const MNN::OpT* op) const = 0; + + virtual bool shouldDeleteOutput(const MNN::OpT* op) const = 0; + + virtual bool onExecute(std::unique_ptr& net) const override { + + const MNN::NetT* const netPtr = net.get(); + + std::set uselessIndex; + for (auto iter = net->oplists.begin(); iter != net->oplists.end();) { + auto& op = *iter; + bool shouldDelete = shouldDeleteJudge(op.get(), netPtr); + if (!shouldDelete) { + iter++; + continue; + } + bool deleteOutput = shouldDeleteOutput(op.get()); + // Find the next op + if (op->outputIndexes.empty() || op->inputIndexes.empty()) { + iter = net->oplists.erase(iter); + continue; + } + + auto originInput = op->inputIndexes[0]; + auto originOutputs = op->outputIndexes; + for (auto subIter = net->oplists.begin(); subIter != net->oplists.end(); subIter++) { + auto& subOp = *subIter; + if (deleteOutput) { + for (auto iter=subOp->inputIndexes.begin(); iter != subOp->inputIndexes.end();) { + if (std::find(originOutputs.begin(), originOutputs.end(), *iter) != originOutputs.end()) { + iter = subOp->inputIndexes.erase(iter); + continue; + } + iter++; + } + } else { + for (int v = 0; v < subOp->inputIndexes.size(); ++v) { + if (std::find(originOutputs.begin(), originOutputs.end(), subOp->inputIndexes[v]) != originOutputs.end()) { + subOp->inputIndexes[v] = originInput; + } + } + } + } + bool removeUselessInput = shouldRemoveUnusefulInputs(op.get()); + if (removeUselessInput) { + for (int index = 0; index < op->inputIndexes.size(); ++index) { + uselessIndex.insert(op->inputIndexes[index]); + } + } + iter = net->oplists.erase(iter); + } + for (auto iter = net->oplists.begin(); iter != net->oplists.end(); iter++) { + for (auto index : (*iter)->inputIndexes) { + if (uselessIndex.find(index) != uselessIndex.end()) { + uselessIndex.erase(index); + } + } + } + + for (auto iter = net->oplists.begin(); iter != net->oplists.end();) { + auto& op = *iter; + bool useless = true; + for (auto index : op->outputIndexes) { + if (uselessIndex.find(index) == uselessIndex.end()) { + useless = false; + break; + } + } + if (!useless) { + iter++; + continue; + } + iter = net->oplists.erase(iter); + } + + return true; + } +}; diff --git a/tools/converter/source/optimizer/postconvert/RemoveUnusefulOp.cpp b/tools/converter/source/optimizer/postconvert/RemoveUnusefulOp.cpp index 9b35298a8..c71a1fe9f 100644 --- a/tools/converter/source/optimizer/postconvert/RemoveUnusefulOp.cpp +++ b/tools/converter/source/optimizer/postconvert/RemoveUnusefulOp.cpp @@ -10,133 +10,62 @@ #include #include #include "../PostTreatUtils.hpp" +#include "RemoveTestNoUseOps.hpp" + using namespace MNN; -class RemoveUnusefulOp : public PostConverter { +class RemoveUnusefulOp : public RemoveTestNoUseOps { public: - virtual bool onExecute(std::unique_ptr& net) const override { - - const MNN::NetT* const netPtr = net.get(); - - /* The Op's output set as input */ - auto shouldDeleteJudge = [=](const MNN::OpT* op) { - static auto unuseOpType = std::vector({OpType_Seq2Out, OpType_Dropout}); - static auto unuseExtraOpType = std::vector({"Identity", "NoOp", "Dropout", "Print", "Assert", "StopGradient"}); - if (std::find(unuseOpType.begin(), unuseOpType.end(), op->type) != unuseOpType.end()) { + /* The Op's output set as input */ + bool shouldDeleteJudge(const MNN::OpT* op, const MNN::NetT* const netPtr) const override { + static auto unuseOpType = std::vector({OpType_Seq2Out}); + static auto unuseExtraOpType = std::vector({"Identity", "NoOp", "Print", "Assert", "StopGradient"}); + if (std::find(unuseOpType.begin(), unuseOpType.end(), op->type) != unuseOpType.end()) { + return true; + } + if (op->type == OpType_Extra) { + if (std::find(unuseExtraOpType.begin(), unuseExtraOpType.end(), op->main.AsExtra()->type) != unuseExtraOpType.end()) { return true; } - if (op->type == OpType_Extra) { - if (std::find(unuseExtraOpType.begin(), unuseExtraOpType.end(), op->main.AsExtra()->type) != unuseExtraOpType.end()) { - return true; - } - if (netPtr->sourceType == MNN::NetSource_CAFFE && op->main.AsExtra()->type == "Split") { - return true; - } - } - if (op->type == OpType_Cast) { - if (op->main.AsCastParam()->dstT == op->main.AsCastParam()->srcT) { - return true; - } - if (op->main.AsCastParam()->dstT == MNN::DataType_DT_INT32 && op->main.AsCastParam()->srcT == MNN::DataType_DT_INT64) { - return true; - } - if (op->main.AsCastParam()->srcT == MNN::DataType_DT_INT32 && op->main.AsCastParam()->dstT == MNN::DataType_DT_INT64) { - return true; - } + if (netPtr->sourceType == MNN::NetSource_CAFFE && op->main.AsExtra()->type == "Split") { + return true; } - return false; - }; - auto shouldRemoveUnusefulInputs = [=](const MNN::OpT* op) { - if (op->type == OpType_Extra) { - if (op->main.AsExtra()->type == "Assert") { - return true; - } - if (op->main.AsExtra()->type == "NoOp") { - return true; - } - if (op->main.AsExtra()->type == "Print") { - return true; - } - if (op->main.AsExtra()->type == "StopGradient") { - return true; - } + } + if (op->type == OpType_Cast) { + if (op->main.AsCastParam()->dstT == op->main.AsCastParam()->srcT) { + return true; } - return false; - }; - auto shouldDeleteOutput = [=](const MNN::OpT* op) { - if (op->type == OpType_Extra) { - return op->main.AsExtra()->type == "Assert"; + if (op->main.AsCastParam()->dstT == MNN::DataType_DT_INT32 && op->main.AsCastParam()->srcT == MNN::DataType_DT_INT64) { + return true; } - return false; - }; - std::set uselessIndex; - for (auto iter = net->oplists.begin(); iter != net->oplists.end();) { - auto& op = *iter; - bool shouldDelete = shouldDeleteJudge(op.get()); - if (!shouldDelete) { - iter++; - continue; + if (op->main.AsCastParam()->srcT == MNN::DataType_DT_INT32 && op->main.AsCastParam()->dstT == MNN::DataType_DT_INT64) { + return true; } - bool deleteOutput = shouldDeleteOutput(op.get()); - // Find the next op - if (op->outputIndexes.empty() || op->inputIndexes.empty()) { - iter = net->oplists.erase(iter); - continue; + } + return false; + }; + bool shouldRemoveUnusefulInputs(const MNN::OpT* op) const override { + if (op->type == OpType_Extra) { + if (op->main.AsExtra()->type == "Assert") { + return true; } - - auto originInput = op->inputIndexes[0]; - auto originOutputs = op->outputIndexes; - for (auto subIter = net->oplists.begin(); subIter != net->oplists.end(); subIter++) { - auto& subOp = *subIter; - if (deleteOutput) { - for (auto iter=subOp->inputIndexes.begin(); iter != subOp->inputIndexes.end();) { - if (std::find(originOutputs.begin(), originOutputs.end(), *iter) != originOutputs.end()) { - iter = subOp->inputIndexes.erase(iter); - continue; - } - iter++; - } - } else { - for (int v = 0; v < subOp->inputIndexes.size(); ++v) { - if (std::find(originOutputs.begin(), originOutputs.end(), subOp->inputIndexes[v]) != originOutputs.end()) { - subOp->inputIndexes[v] = originInput; - } - } - } + if (op->main.AsExtra()->type == "NoOp") { + return true; } - bool removeUselessInput = shouldRemoveUnusefulInputs(op.get()); - if (removeUselessInput) { - for (int index = 0; index < op->inputIndexes.size(); ++index) { - uselessIndex.insert(op->inputIndexes[index]); - } + if (op->main.AsExtra()->type == "Print") { + return true; } - iter = net->oplists.erase(iter); - } - for (auto iter = net->oplists.begin(); iter != net->oplists.end(); iter++) { - for (auto index : (*iter)->inputIndexes) { - if (uselessIndex.find(index) != uselessIndex.end()) { - uselessIndex.erase(index); - } + if (op->main.AsExtra()->type == "StopGradient") { + return true; } } - - for (auto iter = net->oplists.begin(); iter != net->oplists.end();) { - auto& op = *iter; - bool useless = true; - for (auto index : op->outputIndexes) { - if (uselessIndex.find(index) == uselessIndex.end()) { - useless = false; - break; - } - } - if (!useless) { - iter++; - continue; - } - iter = net->oplists.erase(iter); + return false; + }; + bool shouldDeleteOutput(const MNN::OpT* op) const override { + if (op->type == OpType_Extra) { + return op->main.AsExtra()->type == "Assert"; } - - return true; - } + return false; + }; }; static PostConverterRegister __l("RemoveUnusefulOp"); diff --git a/tools/converter/source/optimizer/postconvert/ResolveTfShapeN.cpp b/tools/converter/source/optimizer/postconvert/ResolveTfShapeN.cpp new file mode 100644 index 000000000..5a938ad6b --- /dev/null +++ b/tools/converter/source/optimizer/postconvert/ResolveTfShapeN.cpp @@ -0,0 +1,66 @@ +// +// ResolveTfShapeN.cpp +// MNNConverter +// +// Created by MNN on 2019/11/27. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "../PostTreatUtils.hpp" +#include "flatbuffers/util.h" + +class ResolveTfShapeN : public PostConverter { +public: + virtual bool onExecute(std::unique_ptr& net) const override { + if (net->sourceType != MNN::NetSource_TENSORFLOW) { + return true; + } + + std::set readyToDelete; + + for (auto iter = net->oplists.begin(); iter != net->oplists.end();) { + const auto op = iter->get(); + if (op->type != MNN::OpType_Extra) { + iter++; + continue; + } + auto attr = op->main.AsExtra(); + auto& optype = attr->type; + if (optype != "ShapeN") { + iter++; + continue; + } + int shapeNumber = 1; + const int attrSize = attr->attr.size(); + for (int k = 0; k < attrSize; ++k) { + auto& key = attr->attr[k]->key; + if (key == "N") { + shapeNumber = attr->attr[k]->i; + } + } + readyToDelete.insert(op); + // expand ShapeN to N Shapes + // insert N Shape before ShapeN, then delete the shapeN + for (int i = 0; i < shapeNumber; ++i) { + std::unique_ptr curShape(new MNN::OpT); + curShape->name = op->name + flatbuffers::NumToString(i); + curShape->type = MNN::OpType_Shape; + curShape->main.value = nullptr; + curShape->inputIndexes.push_back(op->inputIndexes[i]); + curShape->outputIndexes.push_back(op->outputIndexes[i]); + iter = net->oplists.insert(iter, std::move(curShape)); + iter++; + } + + iter++; + } + + for (auto op : readyToDelete) { + PostTreatUtils::_removeOpInNet(op, net.get()); + } + + return true; + } +}; + +static PostConverterRegister __shapen("ResolveTfShapeN"); diff --git a/tools/converter/source/optimizer/postconvert/TransformInnerProduct.cpp b/tools/converter/source/optimizer/postconvert/TransformInnerProduct.cpp index 46fa85ac2..0e51adbf2 100644 --- a/tools/converter/source/optimizer/postconvert/TransformInnerProduct.cpp +++ b/tools/converter/source/optimizer/postconvert/TransformInnerProduct.cpp @@ -112,6 +112,7 @@ class TransformInnerProduct : public PostConverter { convP->common->strideY = 1; convP->common->group = 1; convP->common->outputCount = originInner->outputCount; + convP->common->inputCount = originInner->weight.size() / originInner->outputCount; convP->common->padX = 0; convP->common->padY = 0; convP->common->padMode = MNN::PadMode_CAFFE; @@ -152,7 +153,7 @@ class TransformInnerProduct : public PostConverter { if (axis + 1 != 4) { MNN::OpT* afterReshapeT = new MNN::OpT; - reshapeT->name = "____reshape2____" + op->name; + afterReshapeT->name = "____reshape2____" + op->name; auto reshapeP = new MNN::ReshapeT; reshapeP->dims.resize(axis + 1); for (int i = 0; i < axis; ++i) { diff --git a/tools/converter/source/optimizer/tfextra/TFApproximateEqual.cpp b/tools/converter/source/optimizer/tfextra/TFApproximateEqual.cpp new file mode 100644 index 000000000..c16f4bd23 --- /dev/null +++ b/tools/converter/source/optimizer/tfextra/TFApproximateEqual.cpp @@ -0,0 +1,47 @@ +// +// TFApproximateEqual.cpp +// MNNConverter +// +// Created by MNN on 2019/12/16. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include "TFExtraManager.hpp" +#include "MNN_generated.h" + +namespace MNN { +namespace Express { + +class ApproximateEqualTransform : public TFExtraManager::Transform { +public: + virtual EXPRP onExecute(EXPRP expr) const override { + auto op = expr->get(); + auto inputs = expr->inputs(); + + float tolerance = 1e-5; + auto extra = op->main_as_Extra(); + if (nullptr != extra->attr()) { + for (int i=0; iattr()->size(); ++i) { + auto attr = extra->attr()->GetAs(i); + if (attr->key()->str() == "tolerance") { + tolerance = attr->f(); + } + } + } + + auto diff = _Abs(_Subtract(inputs[0], inputs[1])); + auto output = _Less(diff, _Const(tolerance)); + + auto newExpr = output->expr().first; + return newExpr; + } +}; + +static auto gRegister = []() { + TFExtraManager::get()->insert("ApproximateEqual", std::shared_ptr(new ApproximateEqualTransform)); + return true; +}(); + +} // namespace Express +} // namespace MNN diff --git a/tools/converter/source/optimizer/tfextra/TFArgMaxMerge.cpp b/tools/converter/source/optimizer/tfextra/TFArgMaxMerge.cpp index ae12de258..665d55dfb 100644 --- a/tools/converter/source/optimizer/tfextra/TFArgMaxMerge.cpp +++ b/tools/converter/source/optimizer/tfextra/TFArgMaxMerge.cpp @@ -14,12 +14,21 @@ namespace MNN { namespace Express { class ArgMaxTransform : public TFExtraManager::Transform { public: + enum ArgMinOrMax { + ARGMIN, + ARGMAX + }; + ArgMaxTransform(ArgMinOrMax mode) : mMode(mode) {} virtual EXPRP onExecute(EXPRP expr) const override { auto inputs = expr->inputs(); auto op = expr->get(); std::vector subInputs = {inputs[0]}; std::unique_ptr ArgMaxOp(new OpT); - ArgMaxOp->type = OpType_ArgMax; + if (mMode == ARGMIN) { + ArgMaxOp->type = OpType_ArgMin; + } else { + ArgMaxOp->type = OpType_ArgMax; + } ArgMaxOp->name = op->name()->str(); ArgMaxOp->main.type = OpParameter_ArgMax; ArgMaxOp->main.value = new ArgMaxT; @@ -37,9 +46,15 @@ class ArgMaxTransform : public TFExtraManager::Transform { auto newExpr = Expr::create(ArgMaxOp.get(), subInputs, expr->outputSize()); return newExpr; } + +private: + ArgMinOrMax mMode; }; static auto gRegister = []() { - TFExtraManager::get()->insert("ArgMax", std::shared_ptr(new ArgMaxTransform)); + TFExtraManager::get()->insert("ArgMin", std::shared_ptr( + new ArgMaxTransform(ArgMaxTransform::ArgMinOrMax::ARGMIN))); + TFExtraManager::get()->insert("ArgMax", std::shared_ptr( + new ArgMaxTransform(ArgMaxTransform::ArgMinOrMax::ARGMAX))); return true; }(); } diff --git a/tools/converter/source/optimizer/tfextra/TFBatchNormalMerge.cpp b/tools/converter/source/optimizer/tfextra/TFBatchNormalMerge.cpp index 99b265272..41639d3cd 100644 --- a/tools/converter/source/optimizer/tfextra/TFBatchNormalMerge.cpp +++ b/tools/converter/source/optimizer/tfextra/TFBatchNormalMerge.cpp @@ -101,6 +101,7 @@ class BatchNormalTransform : public TFExtraManager::Transform { }; static auto gRegister = []() { TFExtraManager::get()->insert("FusedBatchNorm", std::shared_ptr(new BatchNormalTransform)); + TFExtraManager::get()->insert("FusedBatchNormV3", std::shared_ptr(new BatchNormalTransform)); return true; }(); } diff --git a/tools/converter/source/optimizer/tfextra/TFConcatMerge.cpp b/tools/converter/source/optimizer/tfextra/TFConcatMerge.cpp index f689bb7b7..852a49699 100644 --- a/tools/converter/source/optimizer/tfextra/TFConcatMerge.cpp +++ b/tools/converter/source/optimizer/tfextra/TFConcatMerge.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "TFExtraManager.hpp" #include "MNN_generated.h" +#include "TFExtraManager.hpp" namespace MNN { namespace Express { @@ -15,42 +15,55 @@ class ConcatTransform : public TFExtraManager::Transform { public: virtual EXPRP onExecute(EXPRP expr) const override { auto op = expr->get(); - MNN_ASSERT(op->type() == OpType_Extra); - auto type = op->main_as_Extra()->type()->str(); - auto inputs = expr->inputs(); - MNN_ASSERT(inputs.size() > 1); - auto axisNode = inputs[0]; - std::vector subInputs; - if (type == "ConcatV2") { - axisNode = inputs[inputs.size() - 1]; - for (int i = 0; i < inputs.size() - 1; ++i) { - subInputs.emplace_back(inputs[i]); - } - } else { - for (int i = 0; i < inputs.size() - 1; ++i) { - subInputs.emplace_back(inputs[i]); - } - } - auto axisPtr = axisNode->readMap(); - if (nullptr == axisPtr) { - MNN_ERROR("Don't Support Axis not const for concat\n"); - return nullptr; - } - std::unique_ptr newOp(new OpT); - newOp->name = op->name()->str(); - newOp->type = OpType_Concat; - newOp->main.type = OpParameter_Axis; - newOp->main.value = new AxisT; - newOp->main.AsAxis()->axis = axisPtr[0]; - auto newExpr = Expr::create(newOp.get(), subInputs, 1); - return newExpr; + MNN_ASSERT(op->type() == OpType_Extra); + auto type = op->main_as_Extra()->type()->str(); + auto inputs = expr->inputs(); + MNN_ASSERT(inputs.size() > 1); + auto axisNode = inputs[0]; + std::vector subInputs; + if (type == "ConcatV2") { + axisNode = inputs[inputs.size() - 1]; + for (int i = 0; i < inputs.size() - 1; ++i) { + subInputs.emplace_back(inputs[i]); + } + } else if (type == "Concat") { + for (int i = 0; i < inputs.size() - 1; ++i) { + subInputs.emplace_back(inputs[i]); + } + } else { + for (int i = 0; i < inputs.size(); ++i) { + subInputs.emplace_back(inputs[i]); + } + } + + const int* axisPtr = nullptr; + if (type != "ParallelConcat") { + axisPtr = axisNode->readMap(); + if (nullptr == axisPtr) { + MNN_ERROR("Don't Support Axis not const for concat\n"); + return nullptr; + } + } + + std::unique_ptr newOp(new OpT); + newOp->name = op->name()->str(); + newOp->type = OpType_Concat; + newOp->main.type = OpParameter_Axis; + newOp->main.value = new AxisT; + if (type == "ParallelConcat") { + newOp->main.AsAxis()->axis = 0; + } else { + newOp->main.AsAxis()->axis = axisPtr[0]; + } + auto newExpr = Expr::create(newOp.get(), subInputs, 1); + return newExpr; } }; static auto gRegister = []() { TFExtraManager::get()->insert("ConcatV2", std::shared_ptr(new ConcatTransform)); TFExtraManager::get()->insert("Concat", std::shared_ptr(new ConcatTransform)); + TFExtraManager::get()->insert("ParallelConcat", std::shared_ptr(new ConcatTransform)); return true; }(); -} +} // namespace Express } // namespace MNN - diff --git a/tools/converter/source/optimizer/tfextra/TFConvolution3DMerge.cpp b/tools/converter/source/optimizer/tfextra/TFConvolution3DMerge.cpp new file mode 100644 index 000000000..c051df65d --- /dev/null +++ b/tools/converter/source/optimizer/tfextra/TFConvolution3DMerge.cpp @@ -0,0 +1,99 @@ +// +// TFConvolution3DMerge.cpp +// MNNConverter +// +// Created by MNN on 2019/12/03. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include "TFExtraManager.hpp" +#include "MNN_generated.h" + +namespace MNN { +namespace Express { + +class Convolution3DTransform : public TFExtraManager::Transform { +public: + virtual EXPRP onExecute(EXPRP expr) const override { + auto op = expr->get(); + auto inputs = expr->inputs(); + auto weight = inputs[1]; + auto weightInfo = weight->getInfo(); + auto weightTensorData = weight->readMap(); + if (nullptr == weightInfo || nullptr == weightTensorData) { + MNN_ERROR("For %s Convolution3D weight is not const\n", expr->name().c_str()); + return nullptr; + } + + std::unique_ptr conv3d(new MNN::Convolution3DT); + int depth = weightInfo->dim[0]; + int kh = weightInfo->dim[1]; + int kw = weightInfo->dim[2]; + int num_input = weightInfo->dim[3]; + int num_output = weightInfo->dim[4]; + weight = _Transpose(weight, {4, 3, 0, 1, 2}); + weightInfo = weight->getInfo(); + weightTensorData = weight->readMap(); + conv3d->bias.resize(num_output); + std::fill(conv3d->bias.begin(), conv3d->bias.end(), 0.0f); + + conv3d->weight.resize(weightInfo->size); + ::memcpy(conv3d->weight.data(), weightTensorData, weightInfo->size * sizeof(float)); + conv3d->common.reset(new MNN::Convolution3DCommonT); + auto common = conv3d->common.get(); + + common->relu = common->relu6 = false; + common->outputCount = num_output; + common->inputCount = num_input; + common->kernels = std::vector({depth, kh, kw}); + + auto extra = op->main_as_Extra(); + if (extra == nullptr || extra->attr() == nullptr) { + return nullptr; + } + for (int i = 0; i < extra->attr()->size(); ++i) { + auto attr = extra->attr()->GetAs(i); + const auto key = attr->key()->str(); + if (key == "dilations" || key == "rates") { + auto values = attr->list()->i()->data(); + if (values[1] != 1 || values[2] != 1 || values[3] != 1) { + MNN_ERROR("conv3d not support dilation bigger than 1\n"); + return nullptr; + } + common->dilates = std::vector({values[1], values[2], values[3]}); + } else if (key == "strides") { + auto values = attr->list()->i()->data(); + if (values[1] != 1 || values[2] != 1 || values[3] != 1) { + MNN_ERROR("conv3d not support strides bigger than 1\n"); + return nullptr; + } + common->strides = std::vector({values[1], values[2], values[3]}); + } else if (key == "padding") { + common->padMode = MNN::PadMode_SAME; + auto paddingType = attr->s()->str(); + if (paddingType == "VALID") { + common->padMode = MNN::PadMode_VALID; + common->pads = std::vector({0, 0, 0}); + } + } + } + + std::unique_ptr newOp(new OpT); + newOp->name = expr->name(); + newOp->type = OpType_Convolution3D; + newOp->main.type = OpParameter_Convolution3D; + newOp->main.value = conv3d.release(); + + auto newExpr = Expr::create(newOp.get(), {inputs[0]}, 1); + return newExpr; + } +}; + +static auto gRegister = []() { + TFExtraManager::get()->insert("Conv3D", std::shared_ptr(new Convolution3DTransform)); + return true; +}(); + +} // namespace Express +} // namespace MNN diff --git a/tools/converter/source/optimizer/tfextra/TFConvolutionMerge.cpp b/tools/converter/source/optimizer/tfextra/TFConvolutionMerge.cpp index 89ad41225..1e35a7d31 100644 --- a/tools/converter/source/optimizer/tfextra/TFConvolutionMerge.cpp +++ b/tools/converter/source/optimizer/tfextra/TFConvolutionMerge.cpp @@ -21,7 +21,10 @@ static bool _writeCommonAttr(Convolution2DCommonT* common, const Extra* extra, c auto attr = extra->attr()->GetAs(v); const auto key = attr->key()->str(); auto list = attr->list(); - if (key == "rate") { + // "rates" for tf.nn.atrous_conv2d + // "dilations" for tf.nn.conv2d or tf.nn.dilation2d or tf.nn.conv2d_transpose + // "rate" has been here when I change the code, so I reserve it though I don't know where use it + if (key == "rate" || key == "rates" || key == "dilations") { common->dilateX = list->i()->data()[2]; common->dilateY = list->i()->data()[1]; } else if (key == "strides") { @@ -152,6 +155,13 @@ class DeconvolutionTransform : public TFExtraManager::Transform { public: virtual EXPRP onExecute(EXPRP expr) const override { auto op = expr->get(); + bool depthwise = false; + { + std::unique_ptr extraT(op->main_as_Extra()->UnPack()); + if(extraT->type == "DepthwiseConv2dNativeBackpropInput") { + depthwise = true; + } + } auto inputs = expr->inputs(); auto weight = inputs[1]; auto weightInfo = weight->getInfo(); @@ -164,7 +174,7 @@ class DeconvolutionTransform : public TFExtraManager::Transform { int kh = weightInfo->dim[0]; int kw = weightInfo->dim[1]; int num_input = weightInfo->dim[2]; - int num_output = weightInfo->dim[3];; + int num_output = weightInfo->dim[3]; weight = _Transpose(weight, {3, 2, 0, 1}); weightInfo = weight->getInfo(); weightTensorData = weight->readMap(); @@ -183,7 +193,6 @@ class DeconvolutionTransform : public TFExtraManager::Transform { common->kernelY = kh; common->padX = 0; common->padY = 0; - bool success = _writeCommonAttr(common, op->main_as_Extra(), op->name()->str()); if (!success) { return nullptr; @@ -192,6 +201,9 @@ class DeconvolutionTransform : public TFExtraManager::Transform { std::unique_ptr newOp(new OpT); newOp->name = expr->name(); newOp->type = OpType_Deconvolution; + if (depthwise) { + newOp->type = OpType_DeconvolutionDepthwise; + } newOp->main.type = OpParameter_Convolution2D; newOp->main.value = convolution2D.release(); if (inputs.size() == 2) { @@ -199,13 +211,71 @@ class DeconvolutionTransform : public TFExtraManager::Transform { } MNN_ASSERT(inputs.size() == 3); auto newExpr = Expr::create(newOp.get(), {inputs[2]}, 1); + /* check shape consistent between tf's output_shape attribute and MNN inferred output shape + * When stride > 1, one output-shape can be reached from (stride - 1) input-shapes + */ + auto output = Variable::create(newExpr); + auto outputInfo = output->getInfo(); + auto realOutputShape = inputs[0]->readMap(); + int inferHeight = outputInfo->dim[2], inferWidth = outputInfo->dim[3]; // MNN format NCHW + int realHeight = realOutputShape[1], realWidth = realOutputShape[2]; // tf format NHWC + if (realHeight != inferHeight || realWidth != inferWidth) { + MNN_ERROR("==== output_shape is not consistent with inferred output shape in MNN. ====\n"); + MNN_ERROR("====(height,width): (%d,%d) vs (%d,%d)\n ====", realHeight, realWidth, inferHeight, inferWidth); + return nullptr; + } return newExpr; } }; + +class Dilation2DTransform : public TFExtraManager::Transform { +public: + virtual EXPRP onExecute(EXPRP expr) const override { + auto op = expr->get(); + auto inputs = expr->inputs(); + auto weight = inputs[1]; + auto weightInfo = weight->getInfo(); + auto weightTensorData = weight->readMap(); + if (nullptr == weightInfo || nullptr == weightTensorData) { + MNN_ERROR("For %s convolution weight is not const\n", expr->name().c_str()); + return nullptr; + } + std::unique_ptr convolution2D(new MNN::Convolution2DT); + int kh = weightInfo->dim[0]; + int kw = weightInfo->dim[1]; + int depth = weightInfo->dim[2]; + weight = _Transpose(weight, {2, 0, 1}); + weightInfo = weight->getInfo(); + weightTensorData = weight->readMap(); + convolution2D->weight.resize(weightInfo->size); + ::memcpy(convolution2D->weight.data(), weightTensorData, weightInfo->size * sizeof(float)); + convolution2D->common.reset(new MNN::Convolution2DCommonT); + auto common = convolution2D->common.get(); + common->outputCount = depth; + common->kernelX = kw; + common->kernelY = kh; + + bool success = _writeCommonAttr(common, op->main_as_Extra(), op->name()->str()); + if (!success) { + return nullptr; + } + + std::unique_ptr newOp(new OpT); + newOp->name = expr->name(); + newOp->type = OpType_Dilation2D; + newOp->main.type = OpParameter_Convolution2D; + newOp->main.value = convolution2D.release(); + + return Expr::create(newOp.get(), {inputs[0]}, 1); + } +}; + static auto gRegister = []() { TFExtraManager::get()->insert("Conv2D", std::shared_ptr(new ConvolutionTransform)); TFExtraManager::get()->insert("Conv2DBackpropInput", std::shared_ptr(new DeconvolutionTransform)); TFExtraManager::get()->insert("DepthwiseConv2dNative", std::shared_ptr(new ConvolutionDepthwiseTransform)); + TFExtraManager::get()->insert("DepthwiseConv2dNativeBackpropInput", std::shared_ptr(new DeconvolutionTransform)); + TFExtraManager::get()->insert("Dilation2D", std::shared_ptr(new Dilation2DTransform)); return true; }(); } diff --git a/tools/converter/source/optimizer/tfextra/TFExtraManager.cpp b/tools/converter/source/optimizer/tfextra/TFExtraManager.cpp index 5635bfd3c..d21f4176f 100644 --- a/tools/converter/source/optimizer/tfextra/TFExtraManager.cpp +++ b/tools/converter/source/optimizer/tfextra/TFExtraManager.cpp @@ -35,9 +35,9 @@ std::shared_ptr TFExtraManager::find(const std::strin static auto gRegister = []() { auto extra = TFExtraManager::get(); - auto judge = [extra](VARP var) { - auto op = var->expr().first->get(); - if (op->type() != OpType_Extra) { + auto judge = [extra](EXPRP expr) { + auto op = expr->get(); + if (nullptr == op || op->type() != OpType_Extra) { return false; } auto engine = op->main_as_Extra()->engine()->str(); @@ -50,27 +50,19 @@ static auto gRegister = []() { } return true; }; - auto modify = [extra](VARP var) { - auto op = var->expr().first->get(); + auto modify = [extra](EXPRP expr) { + auto op = expr->get(); MNN_ASSERT(op->type() == OpType_Extra); auto type = op->main_as_Extra()->type()->str(); auto transformer = extra->find(type); MNN_ASSERT(nullptr != transformer); - auto newExpr = transformer->onExecute(var->expr().first); + auto newExpr = transformer->onExecute(expr); if (nullptr == newExpr) { - MNN_ERROR("Converte Tensorflow's Op %s , type = %s, failed, may be some node is not const\n", var->expr().first->name().c_str(), type.c_str()); + MNN_ERROR("Converte Tensorflow's Op %s , type = %s, failed, may be some node is not const\n", expr->name().c_str(), type.c_str()); return false; } - newExpr->setName(var->expr().first->name()); - auto outputs = var->expr().first->outputs(); - for (auto weakVar : outputs) { - auto var = weakVar.lock(); - if (nullptr == var) { - continue; - } - auto index = var->expr().second; - Variable::setExpr(var, newExpr, index); - } + newExpr->setName(expr->name()); + Expr::replace(expr, newExpr); return true; }; TemplateMerge::getInstance("TFExtra").insertTemplate("TFExtraManager", judge, modify); diff --git a/tools/converter/source/optimizer/tfextra/TFExtraManager.hpp b/tools/converter/source/optimizer/tfextra/TFExtraManager.hpp index 639f38a4e..6630f2158 100644 --- a/tools/converter/source/optimizer/tfextra/TFExtraManager.hpp +++ b/tools/converter/source/optimizer/tfextra/TFExtraManager.hpp @@ -7,7 +7,7 @@ // #include "../TemplateMerge.hpp" -#include "ExprCreator.hpp" +#include namespace MNN { namespace Express { class TFExtraManager { @@ -16,10 +16,10 @@ class TFExtraManager { public: virtual ~ Transform() = default; Transform() = default; - + virtual EXPRP onExecute(EXPRP expr) const = 0; }; - + void insert(const std::string& name, std::shared_ptr transform); std::shared_ptr find(const std::string& name) const; static std::shared_ptr get(); diff --git a/tools/converter/source/optimizer/tfextra/TFExtraOp.cpp b/tools/converter/source/optimizer/tfextra/TFExtraOp.cpp index e88f65e14..383f61047 100644 --- a/tools/converter/source/optimizer/tfextra/TFExtraOp.cpp +++ b/tools/converter/source/optimizer/tfextra/TFExtraOp.cpp @@ -10,7 +10,7 @@ #include "TFExtraManager.hpp" #include "MNN_generated.h" -#include "ExprCreator.hpp" +#include namespace MNN { namespace Express { @@ -51,7 +51,7 @@ class LogicalNotTransform : public TFExtraManager::Transform { auto inputs = expr->inputs(); auto one = _Const(-1.0f); auto floatCast = _Cast(inputs[0], DataType_DT_BOOL, DataType_DT_FLOAT); - auto floatCompute = _Neg(_Add(floatCast, one)); + auto floatCompute = _Negative(_Add(floatCast, one)); auto newVar = _Cast(floatCompute, DataType_DT_FLOAT, DataType_DT_BOOL); return newVar->expr().first; } diff --git a/tools/converter/source/optimizer/tfextra/TFSoftplus.cpp b/tools/converter/source/optimizer/tfextra/TFSoftplus.cpp new file mode 100644 index 000000000..0abefc456 --- /dev/null +++ b/tools/converter/source/optimizer/tfextra/TFSoftplus.cpp @@ -0,0 +1,31 @@ +// +// TFSoftplus.cpp +// MNNConverter +// +// Created by MNN on 2019/12/05. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include "TFExtraManager.hpp" +#include "MNN_generated.h" + +namespace MNN { +namespace Express { + +class SoftplusTransform : public TFExtraManager::Transform { +public: + virtual EXPRP onExecute(EXPRP expr) const override { + auto input = expr->inputs()[0]; + auto newExpr = _Softplus(input)->expr().first; + return newExpr; + } +}; + +static auto gRegister = []() { + TFExtraManager::get()->insert("Softplus", std::shared_ptr(new SoftplusTransform)); + return true; +}(); + +} // namespace Express +} // namespace MNN diff --git a/tools/converter/source/optimizer/tfextra/TFSoftsign.cpp b/tools/converter/source/optimizer/tfextra/TFSoftsign.cpp new file mode 100644 index 000000000..c0df99762 --- /dev/null +++ b/tools/converter/source/optimizer/tfextra/TFSoftsign.cpp @@ -0,0 +1,31 @@ +// +// TFSoftsign.cpp +// MNNConverter +// +// Created by MNN on 2019/12/05. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include "TFExtraManager.hpp" +#include "MNN_generated.h" + +namespace MNN { +namespace Express { + +class SoftsignTransform : public TFExtraManager::Transform { +public: + virtual EXPRP onExecute(EXPRP expr) const override { + auto input = expr->inputs()[0]; + auto newExpr = _Softsign(input)->expr().first; + return newExpr; + } +}; + +static auto gRegister = []() { + TFExtraManager::get()->insert("Softsign", std::shared_ptr(new SoftsignTransform)); + return true; +}(); + +} // namespace Express +} // namespace MNN diff --git a/tools/converter/source/tensorflow/AddNTf.cpp b/tools/converter/source/tensorflow/AddNTf.cpp new file mode 100644 index 000000000..65be19c28 --- /dev/null +++ b/tools/converter/source/tensorflow/AddNTf.cpp @@ -0,0 +1,34 @@ +// +// AddNTf.cpp +// MNNConverter +// +// Created by MNN on 2019/12/10. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "TfUtils.hpp" +#include "tfOpConverter.hpp" +#include +#include +#include "graph.pb.h" + +using namespace MNN; + +DECLARE_OP_CONVERTER(AddNTf); + +MNN::OpType AddNTf::opType() { + return MNN::OpType_Eltwise; +} + +MNN::OpParameter AddNTf::type() { + return MNN::OpParameter_Eltwise; +} + +void AddNTf::run(MNN::OpT *dstOp, TmpNode *srcNode) { + auto elt = new MNN::EltwiseT; + dstOp->main.value = elt; + elt->type = MNN::EltwiseType_SUM; +} + +REGISTER_CONVERTER(AddNTf, AddN); +REGISTER_CONVERTER(AddNTf, AccumulateNV2); diff --git a/tools/converter/source/tensorflow/BatchMatMulTf.cpp b/tools/converter/source/tensorflow/BatchMatMulTf.cpp index fafab6e14..8b448999c 100644 --- a/tools/converter/source/tensorflow/BatchMatMulTf.cpp +++ b/tools/converter/source/tensorflow/BatchMatMulTf.cpp @@ -37,3 +37,4 @@ void BatchMatMulTf::run(MNN::OpT *dstOp, TmpNode *srcNode) { } REGISTER_CONVERTER(BatchMatMulTf, BatchMatMul); +REGISTER_CONVERTER(BatchMatMulTf, BatchMatMulV2); diff --git a/tools/converter/source/tensorflow/BinaryOpTf.cpp b/tools/converter/source/tensorflow/BinaryOpTf.cpp index 4941027fb..33069285a 100644 --- a/tools/converter/source/tensorflow/BinaryOpTf.cpp +++ b/tools/converter/source/tensorflow/BinaryOpTf.cpp @@ -53,6 +53,18 @@ void BinartOpTf::run(MNN::OpT *dstOp, TmpNode *srcNode) { parameter->opType = MNN::BinaryOpOperation_SquaredDifference; } else if (srcNode->opType == "Pow") { parameter->opType = MNN::BinaryOpOperation_POW; + } else if (srcNode->opType == "AddV2") { + parameter->opType = MNN::BinaryOpOperation_ADD; + } else if (srcNode->opType == "Atan2") { + parameter->opType = MNN::BinaryOpOperation_ATAN2; + } else if (srcNode->opType == "LogicalOr") { + parameter->opType = MNN::BinaryOpOperation_LOGICALOR; + } else if (srcNode->opType == "NotEqual") { + parameter->opType = MNN::BinaryOpOperation_NOTEQUAL; + } else if (srcNode->opType == "TruncateDiv") { + parameter->opType = MNN::BinaryOpOperation_REALDIV; + } else if (srcNode->opType == "Mod") { + parameter->opType = MNN::BinaryOpOperation_MOD; } else { DLOG(ERROR) << "MNN Converter Not " "Supported!!!"; @@ -82,3 +94,9 @@ REGISTER_CONVERTER(BinartOpTf, FloorDiv); REGISTER_CONVERTER(BinartOpTf, FloorMod); REGISTER_CONVERTER(BinartOpTf, SquaredDifference); REGISTER_CONVERTER(BinartOpTf, Pow); +REGISTER_CONVERTER(BinartOpTf, AddV2); +REGISTER_CONVERTER(BinartOpTf, Atan2); +REGISTER_CONVERTER(BinartOpTf, LogicalOr); +REGISTER_CONVERTER(BinartOpTf, NotEqual); +REGISTER_CONVERTER(BinartOpTf, TruncateDiv); +REGISTER_CONVERTER(BinartOpTf, Mod); diff --git a/tools/converter/source/tensorflow/BroadcastToTf.cpp b/tools/converter/source/tensorflow/BroadcastToTf.cpp new file mode 100644 index 000000000..a88bb578d --- /dev/null +++ b/tools/converter/source/tensorflow/BroadcastToTf.cpp @@ -0,0 +1,26 @@ +// +// BroadcastToTf.cpp +// MNNConverter +// +// Created by MNN on 2019/12/2. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "tfOpConverter.hpp" + +#include "graph.pb.h" + +DECLARE_OP_CONVERTER(BroadcastToTf); + +MNN::OpType BroadcastToTf::opType() { + return MNN::OpType_BroadcastTo; +} +MNN::OpParameter BroadcastToTf::type() { + return MNN::OpParameter_NONE; +} + +void BroadcastToTf::run(MNN::OpT *dstOp, TmpNode *srcNode) { + dstOp->main.value = nullptr; +} + +REGISTER_CONVERTER(BroadcastToTf, BroadcastTo); diff --git a/tools/converter/source/tensorflow/CMakeLists.txt b/tools/converter/source/tensorflow/CMakeLists.txt index f6d702ff2..97d75181f 100644 --- a/tools/converter/source/tensorflow/CMakeLists.txt +++ b/tools/converter/source/tensorflow/CMakeLists.txt @@ -1,48 +1,18 @@ - -set(CMAKE_C_STANDARD 99) -set(CMAKE_CXX_STANDARD 11) - -if (MSVC OR WIN32) - set(Protobuf_SRC_ROOT_FOLDER $ENV{Protobuf_SRC_ROOT_FOLDER}) -endif() -find_package(Protobuf REQUIRED) -if (${CMAKE_VERSION} VERSION_LESS "3.6.0") - set(Protobuf_LIBRARIES ${PROTOBUF_LIBRARIES}) - set(Protobuf_INCLUDE_DIRS ${PROTOBUF_INCLUDE_DIRS}) -endif() - -include_directories(${Protobuf_INCLUDE_DIRS}) - protobuf_generate_cpp(TENSORFLOW_PROTO_SRCS TENSORFLOW_PROTO_HDRS - attr_value.proto - function.proto - graph.proto - node_def.proto - op_def.proto - resource_handle.proto - tensor.proto - tensor_shape.proto - types.proto - versions.proto -) - -file(GLOB TENSORFLOW_SRC ${SRC_PATH}/tensorflow/*) - -if(MNN_BUILD_SHARED_LIBS) - add_library(tensorflow SHARED ${TENSORFLOW_SRC} ${TENSORFLOW_PROTO_SRCS}) -else() - add_library(tensorflow STATIC ${TENSORFLOW_SRC} ${TENSORFLOW_PROTO_SRCS}) -endif() - -if (MSVC OR WIN32) - target_compile_options(tensorflow PRIVATE "/wd4267" "/wd4244" "/wd4305") -endif() - -target_include_directories(tensorflow PRIVATE - ${Protobuf_INCLUDE_DIRS} - ${CMAKE_CURRENT_BINARY_DIR} - ${SRC_PATH}/IR - ${SRC_PATH}/include + ${CMAKE_CURRENT_LIST_DIR}/attr_value.proto + ${CMAKE_CURRENT_LIST_DIR}/function.proto + ${CMAKE_CURRENT_LIST_DIR}/graph.proto + ${CMAKE_CURRENT_LIST_DIR}/node_def.proto + ${CMAKE_CURRENT_LIST_DIR}/op_def.proto + ${CMAKE_CURRENT_LIST_DIR}/resource_handle.proto + ${CMAKE_CURRENT_LIST_DIR}/tensor.proto + ${CMAKE_CURRENT_LIST_DIR}/tensor_shape.proto + ${CMAKE_CURRENT_LIST_DIR}/types.proto + ${CMAKE_CURRENT_LIST_DIR}/versions.proto ) -target_link_libraries(tensorflow ${Protobuf_LIBRARIES}) +file(GLOB TENSORFLOW_SRC ${CMAKE_CURRENT_LIST_DIR}/*.cpp) +add_library(MNNConverterTF OBJECT ${TENSORFLOW_SRC} ${TENSORFLOW_PROTO_SRCS}) +target_include_directories(MNNConverterTF PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/) +list(APPEND MNN_CONVERTER_BACKENDS_OBJECTS $) +list(APPEND MNN_CONVERTER_BACKENDS_TARGETS MNNConverterTF) diff --git a/tools/converter/source/tensorflow/ConstTf.cpp b/tools/converter/source/tensorflow/ConstTf.cpp index 5d1c62878..1f91b1f2c 100644 --- a/tools/converter/source/tensorflow/ConstTf.cpp +++ b/tools/converter/source/tensorflow/ConstTf.cpp @@ -34,3 +34,4 @@ void ConstTf::run(MNN::OpT *dstOp, TmpNode *srcNode) { } REGISTER_CONVERTER(ConstTf, Const); +REGISTER_CONVERTER(ConstTf, HostConst); diff --git a/tools/converter/source/tensorflow/Detection_PostProcessTf.cpp b/tools/converter/source/tensorflow/Detection_PostProcessTf.cpp new file mode 100644 index 000000000..39bbf93a1 --- /dev/null +++ b/tools/converter/source/tensorflow/Detection_PostProcessTf.cpp @@ -0,0 +1,68 @@ +#include + +// +// Detection_PostProcessTf.cpp +// MNNConverter +// +// Created by MNN on 2019/11/21. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "TfUtils.hpp" +#include "tfOpConverter.hpp" + +#include "graph.pb.h" + +DECLARE_OP_CONVERTER(Detection_PostProcessTf); + +MNN::OpType Detection_PostProcessTf::opType(){ + return MNN::OpType_DetectionPostProcess; +} + +MNN::OpParameter Detection_PostProcessTf::type(){ + return MNN::OpParameter_DetectionPostProcessParam; +} + +void Detection_PostProcessTf::run(MNN::OpT *dstOp, TmpNode *srcNode){ + auto postProcessParam = new MNN::DetectionPostProcessParamT; + tensorflow::AttrValue value; + if(find_attr_value(srcNode->tfNode, "max_detections", value)){ + postProcessParam->maxDetections = value.i(); + } + if(find_attr_value(srcNode->tfNode, "max_classes_per_detection", value)){ + postProcessParam->maxClassesPerDetection = value.i(); + } + if(find_attr_value(srcNode->tfNode, "detections_per_class", value)){ + postProcessParam->detectionsPerClass = value.i(); + } + if(find_attr_value(srcNode->tfNode, "use_regular_nms", value)){ + postProcessParam->useRegularNMS = value.b(); + } + if(find_attr_value(srcNode->tfNode, "nms_score_threshold", value)){ + postProcessParam->nmsScoreThreshold = value.f(); + } + if(find_attr_value(srcNode->tfNode, "nms_iou_threshold", value)){ + postProcessParam->iouThreshold = value.f(); + } + if(find_attr_value(srcNode->tfNode, "num_classes", value)){ + postProcessParam->numClasses = value.i(); + } + if(find_attr_value(srcNode->tfNode, "y_scale", value)){ + postProcessParam->centerSizeEncoding.push_back(value.f()); + } + if(find_attr_value(srcNode->tfNode, "x_scale", value)){ + postProcessParam->centerSizeEncoding.push_back(value.f()); + } + if(find_attr_value(srcNode->tfNode, "h_scale", value)){ + postProcessParam->centerSizeEncoding.push_back(value.f()); + } + if(find_attr_value(srcNode->tfNode, "w_scale", value)){ + postProcessParam->centerSizeEncoding.push_back(value.f()); + } + dstOp->main.value = postProcessParam; + + // Detection_PostProcessTf output 4 tensors + dstOp->outputIndexes = {-1, -1, -1, -1}; +} + +REGISTER_CONVERTER(Detection_PostProcessTf, TFLite_Detection_PostProcess); diff --git a/tools/converter/source/tensorflow/LinSpaceTf.cpp b/tools/converter/source/tensorflow/LinSpaceTf.cpp new file mode 100644 index 000000000..540106ac8 --- /dev/null +++ b/tools/converter/source/tensorflow/LinSpaceTf.cpp @@ -0,0 +1,31 @@ +// +// LinSpaceTf.cpp +// MNNConverter +// +// Created by MNN on 2019/12/11. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "TfUtils.hpp" +#include "tfOpConverter.hpp" +#include +#include +#include "graph.pb.h" + +using namespace MNN; + +DECLARE_OP_CONVERTER(LinSpaceTf); + +MNN::OpType LinSpaceTf::opType() { + return MNN::OpType_LinSpace; +} + +MNN::OpParameter LinSpaceTf::type() { + return MNN::OpParameter_NONE; +} + +void LinSpaceTf::run(MNN::OpT *dstOp, TmpNode *srcNode) { + dstOp->main.value = nullptr; +} + +REGISTER_CONVERTER(LinSpaceTf, LinSpace); diff --git a/tools/converter/source/tensorflow/MatMulTf.cpp b/tools/converter/source/tensorflow/MatMulTf.cpp index 6eee1839c..3bca68b2a 100644 --- a/tools/converter/source/tensorflow/MatMulTf.cpp +++ b/tools/converter/source/tensorflow/MatMulTf.cpp @@ -55,3 +55,4 @@ void MatBandPartTf::run(MNN::OpT *dstOp, TmpNode *srcNode) { } REGISTER_CONVERTER(MatBandPartTf, MatrixBandPart); +REGISTER_CONVERTER(MatBandPartTf, BatchMatrixBandPart); diff --git a/tools/converter/source/tensorflow/OneHotTf.cpp b/tools/converter/source/tensorflow/OneHotTf.cpp new file mode 100644 index 000000000..74a02672a --- /dev/null +++ b/tools/converter/source/tensorflow/OneHotTf.cpp @@ -0,0 +1,39 @@ +// +// OneHotTf.cpp +// MNNConverter +// +// Created by MNN on 2019/11/29. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "TfUtils.hpp" +#include "tfOpConverter.hpp" + +#include "graph.pb.h" + +DECLARE_OP_CONVERTER(OneHotTf); + +MNN::OpType OneHotTf::opType() { + return MNN::OpType_OneHot; +} + +MNN::OpParameter OneHotTf::type() { + return MNN::OpParameter_OneHotParam; +} + +void OneHotTf::run(MNN::OpT *dstOp, TmpNode *srcNode) { + auto param = new MNN::OneHotParamT; + + tensorflow::AttrValue value; + if (find_attr_value(srcNode->tfNode, "T", value)) { + param->dType = static_cast(value.type()); + } + + if (find_attr_value(srcNode->tfNode, "axis", value)) { + param->axis = value.i(); + } + + dstOp->main.value = param; +} + +REGISTER_CONVERTER(OneHotTf, OneHot); diff --git a/tools/converter/source/tensorflow/OpMapper.hpp b/tools/converter/source/tensorflow/OpMapper.hpp index a50a3a6b4..0b6430f89 100644 --- a/tools/converter/source/tensorflow/OpMapper.hpp +++ b/tools/converter/source/tensorflow/OpMapper.hpp @@ -107,7 +107,7 @@ const std::unordered_map tfOp2MNNOp{ {"SplitV", MNN::OpType_Slice}, {"FloorDiv", MNN::OpType_BinaryOp}, {"Moments", MNN::OpType_Moments}, - {"InstanceNorm", MNN::OpType_BatchNorm}, + {"InstanceNorm", MNN::OpType_InstanceNorm}, {"RNNSequenceGRU", MNN::OpType_RNNSequenceGRU}, {"BatchMatMul", MNN::OpType_BatchMatMul}, {"Pow", MNN::OpType_BinaryOp}, diff --git a/tools/converter/source/tensorflow/PadTf.cpp b/tools/converter/source/tensorflow/PadTf.cpp index e87a4e526..0f511bb51 100644 --- a/tools/converter/source/tensorflow/PadTf.cpp +++ b/tools/converter/source/tensorflow/PadTf.cpp @@ -15,11 +15,27 @@ MNN::OpType PadTf::opType() { return MNN::OpType_Padding; } MNN::OpParameter PadTf::type() { - return MNN::OpParameter_NONE; + return MNN::OpParameter_PadParam; } void PadTf::run(MNN::OpT *dstOp, TmpNode *srcNode) { - // Do nothing + auto padparm = new MNN::PadParamT; + + padparm->mode = MNN::PadValueMode_CONSTANT; + if (srcNode->opType == "MirrorPad") { + tensorflow::AttrValue value; + if (find_attr_value(srcNode->tfNode, "mode", value)) { + if (value.s() == "SYMMETRIC") { + padparm->mode = MNN::PadValueMode_SYMMETRIC; + } else if (value.s() == "REFLECT") { + padparm->mode = MNN::PadValueMode_REFLECT; + } + } + } + + dstOp->main.value = padparm; } REGISTER_CONVERTER(PadTf, Pad); +REGISTER_CONVERTER(PadTf, PadV2); +REGISTER_CONVERTER(PadTf, MirrorPad); diff --git a/tools/converter/source/tensorflow/Pooling3DTf.cpp b/tools/converter/source/tensorflow/Pooling3DTf.cpp index bbc622b49..f42de9165 100644 --- a/tools/converter/source/tensorflow/Pooling3DTf.cpp +++ b/tools/converter/source/tensorflow/Pooling3DTf.cpp @@ -67,3 +67,4 @@ void Pooling3DTf::run(MNN::OpT *dstOp, TmpNode *srcNode) { } REGISTER_CONVERTER(Pooling3DTf, MaxPool3D); +REGISTER_CONVERTER(Pooling3DTf, AvgPool3D); diff --git a/tools/converter/source/tensorflow/ScatterNdTf.cpp b/tools/converter/source/tensorflow/ScatterNdTf.cpp new file mode 100644 index 000000000..bc1b7546d --- /dev/null +++ b/tools/converter/source/tensorflow/ScatterNdTf.cpp @@ -0,0 +1,24 @@ +// +// ScatterNdTf.cpp +// MNNConverter +// +// Created by MNN on 2019/11/27. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "tfOpConverter.hpp" + +DECLARE_OP_CONVERTER(ScatterNdTf); + +MNN::OpType ScatterNdTf::opType() { + return MNN::OpType_ScatterNd; +} +MNN::OpParameter ScatterNdTf::type() { + return MNN::OpParameter_NONE; +} + +void ScatterNdTf::run(MNN::OpT *dstOp, TmpNode *srcNode) { + dstOp->main.value = nullptr; +} + +REGISTER_CONVERTER(ScatterNdTf, ScatterNd); diff --git a/tools/converter/source/tensorflow/TmpGraph.cpp b/tools/converter/source/tensorflow/TmpGraph.cpp index e6761e485..6d6d3ea17 100644 --- a/tools/converter/source/tensorflow/TmpGraph.cpp +++ b/tools/converter/source/tensorflow/TmpGraph.cpp @@ -316,8 +316,8 @@ void TmpGraph::_genMinGraph() { DCHECK(inputNode->opType == "Const") << "FusedBatchNorm|SpaceToBatchND Lack Const Tensor"; inputNode->isCovered = true; } - } else if (typeOp == "Reshape" || typeOp == "ArgMax") { - DCHECK(curNode->inEdges.size() == 2) << "Reshape|ArgMax Should Have Two Input!!! ===> " << curNode->opName; + } else if (typeOp == "Reshape" || typeOp == "ArgMax" || typeOp == "ArgMin") { + DCHECK(curNode->inEdges.size() == 2) << "Reshape|ArgMax|ArgMin Should Have Two Input!!! ===> " << curNode->opName; TmpNode *shapeNode = this->_getTmpNode(curNode->inEdges[1]); // DCHECK(shapeNode->opType == "Const") << "Reshape Now Only Support // Const Shape Input!!! ===> " << curNode->opName; @@ -346,7 +346,7 @@ void TmpGraph::_genMinGraph() { DCHECK(dimInput->opType == "Const") << "Split Have no axis Input!!! => " << curNode->opName; dimInput->isCovered = true; } else if (typeOp == "ResizeBilinear" || typeOp == "Mean" || typeOp == "Sum" || typeOp == "Max" || - typeOp == "Min" || typeOp == "Prod" || typeOp == "ArgMax" || typeOp == "Moments") { + typeOp == "Min" || typeOp == "Prod" || typeOp == "ArgMax" || typeOp == "ArgMin" || typeOp == "Moments") { // size input parentNode = this->_getTmpNode(curNode->inEdges[1]); // const op read diff --git a/tools/converter/source/tensorflow/UnaryOp.cpp b/tools/converter/source/tensorflow/UnaryOp.cpp index 5ddd2783a..5a8fc4f3c 100644 --- a/tools/converter/source/tensorflow/UnaryOp.cpp +++ b/tools/converter/source/tensorflow/UnaryOp.cpp @@ -57,10 +57,37 @@ void UnaryOpTf::run(MNN::OpT *dstOp, TmpNode *srcNode) { parameter->opType = MNN::UnaryOpOperation_SIN; } else if (srcNode->opType == "ATan") { parameter->opType = MNN::UnaryOpOperation_ATAN; + } else if (srcNode->opType == "Acosh") { + parameter->opType = MNN::UnaryOpOperation_ACOSH; + } else if (srcNode->opType == "Sinh") { + parameter->opType = MNN::UnaryOpOperation_SINH; + } else if (srcNode->opType == "Asinh") { + parameter->opType = MNN::UnaryOpOperation_ASINH; + } else if (srcNode->opType == "Atanh") { + parameter->opType = MNN::UnaryOpOperation_ATANH; + } else if (srcNode->opType == "Sign") { + parameter->opType = MNN::UnaryOpOperation_SIGN; + } else if (srcNode->opType == "Round") { + parameter->opType = MNN::UnaryOpOperation_ROUND; + } else if (srcNode->opType == "Cosh") { + parameter->opType = MNN::UnaryOpOperation_COSH; + } else if (srcNode->opType == "Erf") { + parameter->opType = MNN::UnaryOpOperation_ERF; + } else if (srcNode->opType == "Erfc") { + parameter->opType = MNN::UnaryOpOperation_ERFC; + } else if (srcNode->opType == "Erfinv") { + parameter->opType = MNN::UnaryOpOperation_ERFINV; + } else if (srcNode->opType == "Expm1") { + parameter->opType = MNN::UnaryOpOperation_EXPM1; + } else if (srcNode->opType == "Inv") { + parameter->opType = MNN::UnaryOpOperation_RECIPROCAL; + // LogicalNot is handled in tfextra + // } else if (srcNode->opType == "LogicalNot") { + // parameter->opType = MNN::UnaryOpOperation_LOGICALNOT; } else { LOG(ERROR) << "MNN Converter Not " "Supported!!! UnaryOp: " - << srcNode->opType; + << srcNode->opType; } dstOp->main.value = parameter; @@ -80,3 +107,15 @@ REGISTER_CONVERTER(UnaryOpTf, Sin); REGISTER_CONVERTER(UnaryOpTf, ATan); REGISTER_CONVERTER(UnaryOpTf, Tan); REGISTER_CONVERTER(UnaryOpTf, Reciprocal); +REGISTER_CONVERTER(UnaryOpTf, Acosh); +REGISTER_CONVERTER(UnaryOpTf, Sinh); +REGISTER_CONVERTER(UnaryOpTf, Asinh); +REGISTER_CONVERTER(UnaryOpTf, Atanh); +REGISTER_CONVERTER(UnaryOpTf, Sign); +REGISTER_CONVERTER(UnaryOpTf, Round); +REGISTER_CONVERTER(UnaryOpTf, Cosh); +REGISTER_CONVERTER(UnaryOpTf, Erf); +REGISTER_CONVERTER(UnaryOpTf, Erfc); +REGISTER_CONVERTER(UnaryOpTf, Erfinv); +REGISTER_CONVERTER(UnaryOpTf, Expm1); +REGISTER_CONVERTER(UnaryOpTf, Inv); diff --git a/tools/converter/source/tensorflow/UnravelIndexTf.cpp b/tools/converter/source/tensorflow/UnravelIndexTf.cpp new file mode 100644 index 000000000..fbe229465 --- /dev/null +++ b/tools/converter/source/tensorflow/UnravelIndexTf.cpp @@ -0,0 +1,26 @@ +// +// UnravelIndexTf.cpp +// MNNConverter +// +// Created by MNN on 2019/11/26. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "TfUtils.hpp" +#include "tfOpConverter.hpp" + +DECLARE_OP_CONVERTER(UnravelIndexTf); + +MNN::OpType UnravelIndexTf::opType() { + return MNN::OpType_UnravelIndex; +} + +MNN::OpParameter UnravelIndexTf::type() { + return MNN::OpParameter_NONE; +} + +void UnravelIndexTf::run(MNN::OpT *dstOp, TmpNode *srcNode) { + return; +} + +REGISTER_CONVERTER(UnravelIndexTf, UnravelIndex); diff --git a/tools/converter/source/tensorflow/tensorflowConverter.cpp b/tools/converter/source/tensorflow/tensorflowConverter.cpp index 4e6117e67..d3d41d8ec 100644 --- a/tools/converter/source/tensorflow/tensorflowConverter.cpp +++ b/tools/converter/source/tensorflow/tensorflowConverter.cpp @@ -46,10 +46,16 @@ int tensorflow2MNNNet(const std::string inputModel, const std::string bizCode, s op->type = creator->opType(); op->main.type = creator->type(); nodes.insert(std::make_pair(tfNode.name(), op)); - creator->run(op, tempNode.get()); + + // resize the inputIndexes and outputIndexes auto inputSize = tfNode.input_size(); op->inputIndexes.resize(inputSize); + // -1 is placeholder value, and the number of -1 is the number of output tensors + // defalut: every op output one tensor, if the number of the output tensors is bigger than 1, set the outputIndexes in the op converter(void run(MNN::OpT *dstOp, TmpNode *srcNode)) op->outputIndexes = {-1}; + + creator->run(op, tempNode.get()); + for (int j = 0; j < inputSize; j++) { std::string inputName = tfNode.input(j); // may be input or input:0 or input:1 // delete the name that has "^" diff --git a/tools/converter/source/tflite/BinaryTflite.cpp b/tools/converter/source/tflite/BinaryTflite.cpp new file mode 100644 index 000000000..4e4dd2feb --- /dev/null +++ b/tools/converter/source/tflite/BinaryTflite.cpp @@ -0,0 +1,108 @@ +// +// BinaryTflite.cpp +// MNNConverter +// +// Created by MNN on 2019/11/25. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include "liteOpConverter.hpp" + +using namespace tflite; + +DECLARE_OP_COVERTER(BinaryTflite); + +MNN::OpType BinaryTflite::opType(bool quantizedModel) { + return MNN::OpType_BinaryOp; +} +MNN::OpParameter BinaryTflite::type(bool quantizedModel) { + return MNN::OpParameter_BinaryOp; +} + +void BinaryTflite::run(MNN::OpT* dstOp, const std::unique_ptr& tfliteOp, + const std::vector>& tfliteTensors, + const std::vector>& tfliteModelBuffer, + const std::vector>& tfliteOpSet, bool quantizedModel) { + auto param = new MNN::BinaryOpT; + switch (tfliteOp->opcode_index) { + case tflite::BuiltinOperator_POW: { + param->opType = MNN::BinaryOpOperation_POW; + break; + } + case tflite::BuiltinOperator_MAXIMUM: { + param->opType = MNN::BinaryOpOperation_MAXIMUM; + break; + } + case tflite::BuiltinOperator_MINIMUM: { + param->opType = MNN::BinaryOpOperation_MINIMUM; + break; + } + case tflite::BuiltinOperator_LESS: { + param->opType = MNN::BinaryOpOperation_LESS; + break; + } + case tflite::BuiltinOperator_GREATER_EQUAL: { + param->opType = MNN::BinaryOpOperation_GREATER_EQUAL; + break; + } + case tflite::BuiltinOperator_ADD: { + param->opType = MNN::BinaryOpOperation_ADD; + break; + } + case tflite::BuiltinOperator_SUB: { + param->opType = MNN::BinaryOpOperation_SUB; + break; + } + case tflite::BuiltinOperator_FLOOR_DIV: { + param->opType = MNN::BinaryOpOperation_FLOORDIV; + break; + } + case tflite::BuiltinOperator_FLOOR_MOD: { + param->opType = MNN::BinaryOpOperation_FLOORMOD; + break; + } + case tflite::BuiltinOperator_LESS_EQUAL: { + param->opType = MNN::BinaryOpOperation_LESS_EQUAL; + break; + } + case tflite::BuiltinOperator_GREATER: { + param->opType = MNN::BinaryOpOperation_GREATER; + break; + } + case tflite::BuiltinOperator_EQUAL: { + param->opType = MNN::BinaryOpOperation_EQUAL; + break; + } + case tflite::BuiltinOperator_SQUARED_DIFFERENCE: { + param->opType = MNN::BinaryOpOperation_SquaredDifference; + break; + } + case BuiltinOperator_MUL: + case BuiltinOperator_LOGICAL_AND: { + param->opType = MNN::BinaryOpOperation_MUL; + break; + } + default: { + LOG(ERROR) << "MNN Converter Not " + "Supported!!! BinaryOp: " + << tfliteOpSet[tfliteOp->opcode_index]->custom_code; + } + } + dstOp->main.value = param; +} +REGISTER_CONVERTER(BinaryTflite, BuiltinOperator_POW); +REGISTER_CONVERTER(BinaryTflite, BuiltinOperator_MAXIMUM); +REGISTER_CONVERTER(BinaryTflite, BuiltinOperator_MINIMUM); +REGISTER_CONVERTER(BinaryTflite, BuiltinOperator_LESS); +REGISTER_CONVERTER(BinaryTflite, BuiltinOperator_GREATER_EQUAL); +REGISTER_CONVERTER(BinaryTflite, BuiltinOperator_ADD); +REGISTER_CONVERTER(BinaryTflite, BuiltinOperator_SUB); +REGISTER_CONVERTER(BinaryTflite, BuiltinOperator_FLOOR_DIV); +REGISTER_CONVERTER(BinaryTflite, BuiltinOperator_FLOOR_MOD); +REGISTER_CONVERTER(BinaryTflite, BuiltinOperator_LESS_EQUAL); +REGISTER_CONVERTER(BinaryTflite, BuiltinOperator_GREATER); +REGISTER_CONVERTER(BinaryTflite, BuiltinOperator_EQUAL); +REGISTER_CONVERTER(BinaryTflite, BuiltinOperator_SQUARED_DIFFERENCE); +REGISTER_CONVERTER(BinaryTflite, BuiltinOperator_MUL); +REGISTER_CONVERTER(BinaryTflite, BuiltinOperator_LOGICAL_AND); diff --git a/tools/converter/source/tflite/BroadCastAdd.cpp b/tools/converter/source/tflite/BroadCastAdd.cpp index ce9a33872..1ce3be3a9 100644 --- a/tools/converter/source/tflite/BroadCastAdd.cpp +++ b/tools/converter/source/tflite/BroadCastAdd.cpp @@ -37,21 +37,21 @@ void AddTflite::run(MNN::OpT* dstOp, const std::unique_ptr& t const int input1Index = tfliteOp->inputs[0]; const auto& input1Tensor = tfliteTensors[input1Index]; AddParam->input1QuantizedParam = std::unique_ptr(new MNN::QuantizedParamT); - AddParam->input1QuantizedParam->zeroPoint = input1Tensor->quantization->zeroPoint[0]; + AddParam->input1QuantizedParam->zeroPoint = input1Tensor->quantization->zero_point[0]; AddParam->input1QuantizedParam->scale = input1Tensor->quantization->scale[0]; // input1 const int input2Index = tfliteOp->inputs[1]; const auto& input2Tensor = tfliteTensors[input2Index]; AddParam->input2QuantizedParam = std::unique_ptr(new MNN::QuantizedParamT); - AddParam->input2QuantizedParam->zeroPoint = input2Tensor->quantization->zeroPoint[0]; + AddParam->input2QuantizedParam->zeroPoint = input2Tensor->quantization->zero_point[0]; AddParam->input2QuantizedParam->scale = input2Tensor->quantization->scale[0]; // output const int outputIndex = tfliteOp->outputs[0]; const auto& outputTensor = tfliteTensors[outputIndex]; AddParam->outputQuantizedParam = std::unique_ptr(new MNN::QuantizedParamT); - AddParam->outputQuantizedParam->zeroPoint = outputTensor->quantization->zeroPoint[0]; + AddParam->outputQuantizedParam->zeroPoint = outputTensor->quantization->zero_point[0]; AddParam->outputQuantizedParam->scale = outputTensor->quantization->scale[0]; AddParam->activationType = static_cast(addOption->fused_activation_function); @@ -65,16 +65,6 @@ void AddTflite::run(MNN::OpT* dstOp, const std::unique_ptr& t binaryOpParam->opType = MNN::BinaryOpOperation_ADD; // defalut dstOp->main.value = binaryOpParam; } - - // set input output index - dstOp->inputIndexes.resize(tfliteOp->inputs.size()); - dstOp->outputIndexes.resize(tfliteOp->outputs.size()); - for (int i = 0; i < tfliteOp->inputs.size(); i++) { - dstOp->inputIndexes[i] = tfliteOp->inputs[i]; - } - for (int i = 0; i < tfliteOp->outputs.size(); i++) { - dstOp->outputIndexes[i] = tfliteOp->outputs[i]; - } } using namespace tflite; diff --git a/tools/converter/source/tflite/CMakeLists.txt b/tools/converter/source/tflite/CMakeLists.txt index 790cd5a5e..b52795348 100644 --- a/tools/converter/source/tflite/CMakeLists.txt +++ b/tools/converter/source/tflite/CMakeLists.txt @@ -1,21 +1,5 @@ - -set(CMAKE_CXX_STANDARD 11) - - -set(TFLITE_SRC_PATH ${SRC_PATH}/tflite) - -include_directories(${SRC_PATH}/include) -include_directories(${TFLITE_SRC_PATH}/schema) -include_directories(${TFLITE_SRC_PATH}) - -file(GLOB TFLITE_SRC ${TFLITE_SRC_PATH}/*) - -if(MNN_BUILD_SHARED_LIBS) - add_library(tflite SHARED ${TFLITE_SRC}) -else() - add_library(tflite STATIC ${TFLITE_SRC}) -endif() - -if (MSVC OR WIN32) - target_compile_options(tflite PRIVATE "/wd4267" "/wd4244") -endif() +file(GLOB TFLITE_SRC ${CMAKE_CURRENT_LIST_DIR}/*.cpp) +add_library(MNNConverterTFL OBJECT ${TFLITE_SRC}) +target_include_directories(MNNConverterTFL PRIVATE ${CMAKE_CURRENT_LIST_DIR}/schema/) +list(APPEND MNN_CONVERTER_BACKENDS_OBJECTS $) +list(APPEND MNN_CONVERTER_BACKENDS_TARGETS MNNConverterTFL) diff --git a/tools/converter/source/tflite/ConcatTflite.cpp b/tools/converter/source/tflite/ConcatTflite.cpp index d66e0cce5..25b3ff05a 100644 --- a/tools/converter/source/tflite/ConcatTflite.cpp +++ b/tools/converter/source/tflite/ConcatTflite.cpp @@ -34,14 +34,14 @@ void ConcatTflite::run(MNN::OpT* dstOp, const std::unique_ptr const int inputIndex = tfliteOp->inputs[i]; const auto& inputTensor = tfliteTensors[inputIndex]; auto quantized_param_ptr = std::unique_ptr(new MNN::QuantizedParamT); - concatParamQuan->inputZeroPoint.push_back(inputTensor->quantization->zeroPoint[0]); + concatParamQuan->inputZeroPoint.push_back(inputTensor->quantization->zero_point[0]); concatParamQuan->inputScale.push_back(inputTensor->quantization->scale[0]); } const int outputIndex = tfliteOp->outputs[0]; const auto& outputTensor = tfliteTensors[outputIndex]; concatParamQuan->outputQuantizedParam = std::unique_ptr(new MNN::QuantizedParamT); - concatParamQuan->outputQuantizedParam->zeroPoint = outputTensor->quantization->zeroPoint[0]; + concatParamQuan->outputQuantizedParam->zeroPoint = outputTensor->quantization->zero_point[0]; concatParamQuan->outputQuantizedParam->scale = outputTensor->quantization->scale[0]; concatParamQuan->activationType = static_cast(tfliteConcatOption->fused_activation_function); @@ -52,16 +52,6 @@ void ConcatTflite::run(MNN::OpT* dstOp, const std::unique_ptr concatParamFloat->axis = tfliteConcatOption->axis; dstOp->main.value = concatParamFloat; } - - // set input output index - dstOp->inputIndexes.resize(tfliteOp->inputs.size()); - dstOp->outputIndexes.resize(tfliteOp->outputs.size()); - for (int i = 0; i < tfliteOp->inputs.size(); i++) { - dstOp->inputIndexes[i] = tfliteOp->inputs[i]; - } - for (int i = 0; i < tfliteOp->outputs.size(); i++) { - dstOp->outputIndexes[i] = tfliteOp->outputs[i]; - } } using namespace tflite; diff --git a/tools/converter/source/tflite/ConvolutionTflite.cpp b/tools/converter/source/tflite/ConvolutionTflite.cpp index 8685c4a10..25cc0615b 100644 --- a/tools/converter/source/tflite/ConvolutionTflite.cpp +++ b/tools/converter/source/tflite/ConvolutionTflite.cpp @@ -50,8 +50,8 @@ void Conv2DTflite::run(MNN::OpT* dstOp, const std::unique_ptr conv2dParamQuan->common = std::unique_ptr(new MNN::Convolution2DCommonT); // filterOffset conv2dParamQuan->filterQuantizedParam = std::unique_ptr(new MNN::QuantizedParamT); - if (weightTensor->quantization->zeroPoint.size() > 0) { - conv2dParamQuan->filterQuantizedParam->zeroPoint = weightTensor->quantization->zeroPoint[0]; + if (weightTensor->quantization->zero_point.size() > 0) { + conv2dParamQuan->filterQuantizedParam->zeroPoint = weightTensor->quantization->zero_point[0]; } else { conv2dParamQuan->filterQuantizedParam->zeroPoint = 0; } @@ -65,8 +65,8 @@ void Conv2DTflite::run(MNN::OpT* dstOp, const std::unique_ptr const int inputIndex = tfliteOp->inputs[0]; const auto& inputTensor = tfliteTensors[inputIndex]; conv2dParamQuan->inputQuantizedParam = std::unique_ptr(new MNN::QuantizedParamT); - if (inputTensor->quantization->zeroPoint.size() > 0) { - conv2dParamQuan->inputQuantizedParam->zeroPoint = inputTensor->quantization->zeroPoint[0]; + if (inputTensor->quantization->zero_point.size() > 0) { + conv2dParamQuan->inputQuantizedParam->zeroPoint = inputTensor->quantization->zero_point[0]; } else { conv2dParamQuan->inputQuantizedParam->zeroPoint = 0; } @@ -82,7 +82,7 @@ void Conv2DTflite::run(MNN::OpT* dstOp, const std::unique_ptr conv2dParamQuan->outputQuantizedParam = std::unique_ptr(new MNN::QuantizedParamT); if (outputTensor->quantization->scale.size() > 0) { - conv2dParamQuan->outputQuantizedParam->zeroPoint = outputTensor->quantization->zeroPoint[0]; + conv2dParamQuan->outputQuantizedParam->zeroPoint = outputTensor->quantization->zero_point[0]; } else { conv2dParamQuan->outputQuantizedParam->zeroPoint = 0; } @@ -137,7 +137,7 @@ void Conv2DTflite::run(MNN::OpT* dstOp, const std::unique_ptr DCHECK(biasTensor->type == tflite::TensorType_INT32) << "Bias Type ERROR"; const auto& biasData = tfliteModelBuffer[biasTensor->buffer]->data; conv2dParamQuan->biasQuantizedParam = std::unique_ptr(new MNN::QuantizedParamT); - conv2dParamQuan->biasQuantizedParam->zeroPoint = biasTensor->quantization->zeroPoint[0]; + conv2dParamQuan->biasQuantizedParam->zeroPoint = biasTensor->quantization->zero_point[0]; conv2dParamQuan->biasQuantizedParam->scale = biasTensor->quantization->scale[0]; DCHECK(biasData.size() / 4 == co) << "Bias Data ERROR"; auto biasDataPtr = biasData.data(); @@ -195,7 +195,7 @@ void Conv2DTflite::run(MNN::OpT* dstOp, const std::unique_ptr dstOp->main.value = convolution2DFloat; } - + // set input output index dstOp->inputIndexes.resize(1); dstOp->outputIndexes.resize(1); diff --git a/tools/converter/source/tflite/CustomTflite.cpp b/tools/converter/source/tflite/CustomTflite.cpp index df89a0d19..f2e8bc21d 100644 --- a/tools/converter/source/tflite/CustomTflite.cpp +++ b/tools/converter/source/tflite/CustomTflite.cpp @@ -58,18 +58,8 @@ void CustomTflite::run(MNN::OpT *dstOp, const std::unique_ptr dstOp->main.value = postProcessParam; - const int inputNumbers = tfliteOp->inputs.size(); - DCHECK(inputNumbers == 3) << "TFLite_Detection_PostProcess should have 3 inputs!"; - dstOp->inputIndexes.resize(inputNumbers); - for (int i = 0; i < inputNumbers; ++i) { - dstOp->inputIndexes[i] = tfliteOp->inputs[i]; - } - const int outputNumbers = tfliteOp->outputs.size(); - DCHECK(outputNumbers == 4) << "TFLite_Detection_PostProcess should have 4 outputs!"; - dstOp->outputIndexes.resize(outputNumbers); - for (int i = 0; i < outputNumbers; ++i) { - dstOp->outputIndexes[i] = tfliteOp->outputs[i]; - } + DCHECK(tfliteOp->inputs.size() == 3) << "TFLite_Detection_PostProcess should have 3 inputs!"; + DCHECK(tfliteOp->outputs.size() == 4) << "TFLite_Detection_PostProcess should have 4 outputs!"; } using namespace tflite; diff --git a/tools/converter/source/tflite/DepthwiseConv2DTflite.cpp b/tools/converter/source/tflite/DepthwiseConv2DTflite.cpp index f3fb8d6aa..5e568a9d5 100644 --- a/tools/converter/source/tflite/DepthwiseConv2DTflite.cpp +++ b/tools/converter/source/tflite/DepthwiseConv2DTflite.cpp @@ -53,14 +53,14 @@ void DepthwiseConv2DTflite::run(MNN::OpT* dstOp, const std::unique_ptrfilterQuantizedParam = std::unique_ptr(new MNN::QuantizedParamT); - depthwiseConv2dParamQuan->filterQuantizedParam->zeroPoint = weightTensor->quantization->zeroPoint[0]; + depthwiseConv2dParamQuan->filterQuantizedParam->zeroPoint = weightTensor->quantization->zero_point[0]; depthwiseConv2dParamQuan->filterQuantizedParam->scale = weightTensor->quantization->scale[0]; // input const int inputIndex = tfliteOp->inputs[0]; const auto& inputTensor = tfliteTensors[inputIndex]; depthwiseConv2dParamQuan->inputQuantizedParam = std::unique_ptr(new MNN::QuantizedParamT); - depthwiseConv2dParamQuan->inputQuantizedParam->zeroPoint = inputTensor->quantization->zeroPoint[0]; + depthwiseConv2dParamQuan->inputQuantizedParam->zeroPoint = inputTensor->quantization->zero_point[0]; depthwiseConv2dParamQuan->inputQuantizedParam->scale = inputTensor->quantization->scale[0]; // output @@ -68,7 +68,7 @@ void DepthwiseConv2DTflite::run(MNN::OpT* dstOp, const std::unique_ptroutputQuantizedParam = std::unique_ptr(new MNN::QuantizedParamT); - depthwiseConv2dParamQuan->outputQuantizedParam->zeroPoint = outputTensor->quantization->zeroPoint[0]; + depthwiseConv2dParamQuan->outputQuantizedParam->zeroPoint = outputTensor->quantization->zero_point[0]; depthwiseConv2dParamQuan->outputQuantizedParam->scale = outputTensor->quantization->scale[0]; // kernel size @@ -105,7 +105,7 @@ void DepthwiseConv2DTflite::run(MNN::OpT* dstOp, const std::unique_ptrbuffer]->data; depthwiseConv2dParamQuan->biasQuantizedParam = std::unique_ptr(new MNN::QuantizedParamT); - depthwiseConv2dParamQuan->biasQuantizedParam->zeroPoint = biasTensor->quantization->zeroPoint[0]; + depthwiseConv2dParamQuan->biasQuantizedParam->zeroPoint = biasTensor->quantization->zero_point[0]; depthwiseConv2dParamQuan->biasQuantizedParam->scale = biasTensor->quantization->scale[0]; auto shape = biasTensor->shape; @@ -164,7 +164,7 @@ void DepthwiseConv2DTflite::run(MNN::OpT* dstOp, const std::unique_ptrmain.value = depthwiseConv2dParamFloat; } - + // set input output index dstOp->inputIndexes.resize(1); dstOp->outputIndexes.resize(1); diff --git a/tools/converter/source/tflite/FillTflite.cpp b/tools/converter/source/tflite/FillTflite.cpp new file mode 100644 index 000000000..5f9466f2b --- /dev/null +++ b/tools/converter/source/tflite/FillTflite.cpp @@ -0,0 +1,43 @@ +// +// FillTflite.cpp +// MNNConverter +// +// Created by MNN on 2019/11/25. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include "liteOpConverter.hpp" + +DECLARE_OP_COVERTER(FillTflite); + +MNN::OpType FillTflite::opType(bool quantizedModel) { + return MNN::OpType_Fill; +} +MNN::OpParameter FillTflite::type(bool quantizedModel) { + return MNN::OpParameter_Fill; +} + +void FillTflite::run(MNN::OpT* dstOp, const std::unique_ptr& tfliteOp, + const std::vector>& tfliteTensors, + const std::vector>& tfliteModelBuffer, + const std::vector>& tfliteOpSet, bool quantizedModel) { + dstOp->main.value = nullptr; +} +DECLARE_OP_COVERTER(ZerosLikeTflite); +MNN::OpType ZerosLikeTflite::opType(bool quantizedModel) { + return MNN::OpType_ZerosLike; +} +MNN::OpParameter ZerosLikeTflite::type(bool quantizedModel) { + return MNN::OpParameter_NONE; +} + +void ZerosLikeTflite::run(MNN::OpT* dstOp, const std::unique_ptr& tfliteOp, + const std::vector>& tfliteTensors, + const std::vector>& tfliteModelBuffer, + const std::vector>& tfliteOpSet, bool quantizedModel) { + dstOp->main.value = nullptr; +} +using namespace tflite; +REGISTER_CONVERTER(FillTflite, BuiltinOperator_FILL); +REGISTER_CONVERTER(ZerosLikeTflite, BuiltinOperator_ZEROS_LIKE); diff --git a/tools/converter/source/tflite/GatherTflite.cpp b/tools/converter/source/tflite/GatherTflite.cpp new file mode 100644 index 000000000..147d8db34 --- /dev/null +++ b/tools/converter/source/tflite/GatherTflite.cpp @@ -0,0 +1,32 @@ +// +// GatherTflite.cpp +// MNNConverter +// +// Created by MNN on 2019/12/5. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include "liteOpConverter.hpp" + +DECLARE_OP_COVERTER(GatherTflite); +MNN::OpType GatherTflite::opType(bool quantizedModel) { + return MNN::OpType_Gather; +} +MNN::OpParameter GatherTflite::type(bool quantizedModel) { + return MNN::OpParameter_Gather; +} + +void GatherTflite::run(MNN::OpT* dstOp, const std::unique_ptr& tfliteOp, + const std::vector>& tfliteTensors, + const std::vector>& tfliteModelBuffer, + const std::vector>& tfliteOpSet, bool quantizedModel){ + auto parameter = new MNN::GatherT; + auto opt=tfliteOp->builtin_options.AsGatherOptions(); + parameter->axis = opt->axis; + dstOp->main.value = parameter; +} + + +using namespace tflite; +REGISTER_CONVERTER(GatherTflite, BuiltinOperator_GATHER); diff --git a/tools/converter/source/tflite/LogisticTflite.cpp b/tools/converter/source/tflite/LogisticTflite.cpp index 3efb2bd11..83b8b3b52 100644 --- a/tools/converter/source/tflite/LogisticTflite.cpp +++ b/tools/converter/source/tflite/LogisticTflite.cpp @@ -32,29 +32,19 @@ void LogisticTflite::run(MNN::OpT* dstOp, const std::unique_ptrinputs[0]; const auto& inputTensor = tfliteTensors[inputIndex]; LogisticParam->inputQuantizedParam = std::unique_ptr(new MNN::QuantizedParamT); - LogisticParam->inputQuantizedParam->zeroPoint = inputTensor->quantization->zeroPoint[0]; + LogisticParam->inputQuantizedParam->zeroPoint = inputTensor->quantization->zero_point[0]; LogisticParam->inputQuantizedParam->scale = inputTensor->quantization->scale[0]; const int outputIndex = tfliteOp->outputs[0]; const auto& outputTensor = tfliteTensors[outputIndex]; LogisticParam->outputQuantizedParam = std::unique_ptr(new MNN::QuantizedParamT); - LogisticParam->outputQuantizedParam->zeroPoint = outputTensor->quantization->zeroPoint[0]; + LogisticParam->outputQuantizedParam->zeroPoint = outputTensor->quantization->zero_point[0]; LogisticParam->outputQuantizedParam->scale = outputTensor->quantization->scale[0]; dstOp->main.value = LogisticParam; } else { dstOp->main.value = nullptr; } - - // set input output index - dstOp->inputIndexes.resize(tfliteOp->inputs.size()); - dstOp->outputIndexes.resize(tfliteOp->outputs.size()); - for (int i = 0; i < tfliteOp->inputs.size(); i++) { - dstOp->inputIndexes[i] = tfliteOp->inputs[i]; - } - for (int i = 0; i < tfliteOp->outputs.size(); i++) { - dstOp->outputIndexes[i] = tfliteOp->outputs[i]; - } } using namespace tflite; diff --git a/tools/converter/source/tflite/PackTflite.cpp b/tools/converter/source/tflite/PackTflite.cpp new file mode 100644 index 000000000..d47eedef5 --- /dev/null +++ b/tools/converter/source/tflite/PackTflite.cpp @@ -0,0 +1,32 @@ +// +// PackTflite.cpp +// MNNConverter +// +// Created by MNN on 2019/11/25. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include "liteOpConverter.hpp" + +DECLARE_OP_COVERTER(PackTflite); +MNN::OpType PackTflite::opType(bool quantizedModel) { + return MNN::OpType_Pack; +} +MNN::OpParameter PackTflite::type(bool quantizedModel) { + return MNN::OpParameter_PackParam; +} + +void PackTflite::run(MNN::OpT* dstOp, const std::unique_ptr& tfliteOp, + const std::vector>& tfliteTensors, + const std::vector>& tfliteModelBuffer, + const std::vector>& tfliteOpSet, bool quantizedModel){ + auto param = new MNN::PackParamT; + auto opt=tfliteOp->builtin_options.AsPackOptions(); + param->axis=opt->axis; + dstOp->main.value = param; +} + + +using namespace tflite; +REGISTER_CONVERTER(PackTflite, BuiltinOperator_PACK); diff --git a/tools/converter/source/tflite/PadTflite.cpp b/tools/converter/source/tflite/PadTflite.cpp new file mode 100644 index 000000000..9b17722d0 --- /dev/null +++ b/tools/converter/source/tflite/PadTflite.cpp @@ -0,0 +1,28 @@ +// +// PadTflite.cpp +// MNNConverter +// +// Created by MNN on 2019/11/27. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include "liteOpConverter.hpp" + +using namespace tflite; +DECLARE_OP_COVERTER(PadTflite); + +MNN::OpType PadTflite::opType(bool quantizedModel) { + return MNN::OpType_Padding; +} +MNN::OpParameter PadTflite::type(bool quantizedModel) { + return MNN::OpParameter_NONE; +} +void PadTflite::run(MNN::OpT* dstOp, const std::unique_ptr& tfliteOp, + const std::vector>& tfliteTensors, + const std::vector>& tfliteModelBuffer, + const std::vector>& tfliteOpSet, bool quantizedModel) { + +} + +REGISTER_CONVERTER(PadTflite, BuiltinOperator_PAD); diff --git a/tools/converter/source/tflite/PoolingTflite.cpp b/tools/converter/source/tflite/PoolingTflite.cpp index c90949974..62dfdc202 100644 --- a/tools/converter/source/tflite/PoolingTflite.cpp +++ b/tools/converter/source/tflite/PoolingTflite.cpp @@ -79,7 +79,7 @@ void PoolingTflite::run(MNN::OpT* dstOp, const std::unique_ptrinputs.size() == 1) << "Tflite pooling input ERROR"; - + // set input output index dstOp->inputIndexes.resize(1); dstOp->outputIndexes.resize(1); diff --git a/tools/converter/source/tflite/ReductionTflite.cpp b/tools/converter/source/tflite/ReductionTflite.cpp new file mode 100644 index 000000000..d8fe3ec75 --- /dev/null +++ b/tools/converter/source/tflite/ReductionTflite.cpp @@ -0,0 +1,69 @@ +// +// ReductionTflite.cpp +// MNNConverter +// +// Created by MNN on 2019/12/09. +// Copyright © 2019, Alibaba Group Holding Limited +// + +#include +#include "liteOpConverter.hpp" + +DECLARE_OP_COVERTER(ReductionTflite); +MNN::OpType ReductionTflite::opType(bool quantizedModel) { + return MNN::OpType_Reduction; +} +MNN::OpParameter ReductionTflite::type(bool quantizedModel) { + return MNN::OpParameter_ReductionParam; +} + +void ReductionTflite::run(MNN::OpT* dstOp, const std::unique_ptr& tfliteOp, + const std::vector>& tfliteTensors, + const std::vector>& tfliteModelBuffer, + const std::vector>& tfliteOpSet, bool quantizedModel){ + auto param = new MNN::ReductionParamT; + auto opt = tfliteOp->builtin_options.AsReducerOptions(); + param->keepDims=opt->keep_dims; +#ifdef TF_CONVERT_ORIGIN + const int input1Idx = tfliteOp->inputs[1]; + const auto& input1Tensor = tfliteTensors[input1Idx]; + if(input1Tensor.is_variable == false){ + auto buffer1Idx=input1Tensor.buffer; + auto buffer1=tfliteModelBuffer[buffer1Idx]; + auto shape=input1Tensor.shape; + param->dim.resize(shape.size()); + for(decltype(shape.size()) x=0;xdim[x]=shape[x]; + } + } +#endif + switch(tfliteOp->opcode_index){ + case tflite::BuiltinOperator_REDUCE_MAX:{ + param->operation=MNN::ReductionType_MAXIMUM; + break; + } + case tflite::BuiltinOperator_REDUCE_MIN:{ + param->operation=MNN::ReductionType_MINIMUM; + break; + } + case tflite::BuiltinOperator_REDUCE_ANY:{ + param->operation=MNN::ReductionType_ANY; + break; + } + case tflite::BuiltinOperator_REDUCE_PROD:{ + param->operation=MNN::ReductionType_PROD; + break; + } + default:{ + LOG(ERROR) << "MNN Converter Not " + "Supported!!! Reduction Op: " + << tfliteOpSet[tfliteOp->opcode_index]->custom_code; + } + } + dstOp->main.value = param; +} +using namespace tflite; +REGISTER_CONVERTER(ReductionTflite,BuiltinOperator_REDUCE_MAX); +REGISTER_CONVERTER(ReductionTflite,BuiltinOperator_REDUCE_MIN); +REGISTER_CONVERTER(ReductionTflite,BuiltinOperator_REDUCE_ANY); +REGISTER_CONVERTER(ReductionTflite,BuiltinOperator_REDUCE_PROD); diff --git a/tools/converter/source/tflite/ReluTflite.cpp b/tools/converter/source/tflite/ReluTflite.cpp new file mode 100644 index 000000000..7c8fc7f7e --- /dev/null +++ b/tools/converter/source/tflite/ReluTflite.cpp @@ -0,0 +1,67 @@ +// +// ReluTflite.cpp +// MNNConverter +// +// Created by MNN on 2019/11/25. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include "liteOpConverter.hpp" + +DECLARE_OP_COVERTER(ReluTflite); +MNN::OpType ReluTflite::opType(bool quantizedModel) { + return MNN::OpType_ReLU; +} +MNN::OpParameter ReluTflite::type(bool quantizedModel) { + return MNN::OpParameter_Relu; +} + +void ReluTflite::run(MNN::OpT* dstOp, const std::unique_ptr& tfliteOp, + const std::vector>& tfliteTensors, + const std::vector>& tfliteModelBuffer, + const std::vector>& tfliteOpSet, bool quantizedModel){ + auto Relu = new MNN::ReluT; + Relu->slope = 0.0f; + dstOp->main.value = Relu; +} + +DECLARE_OP_COVERTER(LeakyReluTflite); +MNN::OpType LeakyReluTflite::opType(bool quantizedModel) { + return MNN::OpType_ReLU; +} +MNN::OpParameter LeakyReluTflite::type(bool quantizedModel) { + return MNN::OpParameter_Relu; +} + +void LeakyReluTflite::run(MNN::OpT* dstOp, const std::unique_ptr& tfliteOp, + const std::vector>& tfliteTensors, + const std::vector>& tfliteModelBuffer, + const std::vector>& tfliteOpSet, bool quantizedModel){ + auto Relu = new MNN::ReluT; + auto opt = tfliteOp->builtin_options.AsLeakyReluOptions(); + Relu->slope = opt->alpha; + dstOp->main.value = Relu; +} + +DECLARE_OP_COVERTER(Relu6Tflite); +MNN::OpType Relu6Tflite::opType(bool quantizedModel) { + return MNN::OpType_ReLU6; +} +MNN::OpParameter Relu6Tflite::type(bool quantizedModel) { + return MNN::OpParameter_Relu6; +} + +void Relu6Tflite::run(MNN::OpT* dstOp, const std::unique_ptr& tfliteOp, + const std::vector>& tfliteTensors, + const std::vector>& tfliteModelBuffer, + const std::vector>& tfliteOpSet, bool quantizedModel){ + auto relu6 = new MNN::Relu6T; + relu6->slope = 0.0f; + dstOp->main.value = relu6; +} + +using namespace tflite; +REGISTER_CONVERTER(ReluTflite, BuiltinOperator_RELU); +REGISTER_CONVERTER(LeakyReluTflite, BuiltinOperator_LEAKY_RELU); +REGISTER_CONVERTER(Relu6Tflite, BuiltinOperator_RELU6); diff --git a/tools/converter/source/tflite/ReshapeTflite.cpp b/tools/converter/source/tflite/ReshapeTflite.cpp index 59df4e3e5..46a6b131f 100644 --- a/tools/converter/source/tflite/ReshapeTflite.cpp +++ b/tools/converter/source/tflite/ReshapeTflite.cpp @@ -58,7 +58,7 @@ void ReshapeTflite::run(MNN::OpT* dstOp, const std::unique_ptrmain.value = reshapeParam; } - + // set input output index dstOp->inputIndexes.resize(1); dstOp->outputIndexes.resize(1); diff --git a/tools/converter/source/tflite/ResizeBilinear.cpp b/tools/converter/source/tflite/ResizeBilinear.cpp index 1b65f75f2..c2a008983 100644 --- a/tools/converter/source/tflite/ResizeBilinear.cpp +++ b/tools/converter/source/tflite/ResizeBilinear.cpp @@ -35,7 +35,7 @@ void ResizeBilinear::run(MNN::OpT *dstOp, const std::unique_ptrinputs[1]]; auto scaleDataPtr = reinterpret_cast(tfliteModelBuffer[scaleTensor->buffer]->data.data()); - resizeParam->alignCorners = resizeOption->alignCorners; + resizeParam->alignCorners = resizeOption->align_corners; resizeParam->resizeType = 2; resizeParam->outputHeight = scaleDataPtr[1]; @@ -43,13 +43,13 @@ void ResizeBilinear::run(MNN::OpT *dstOp, const std::unique_ptrwidthScale = 1.0; resizeParam->heightScale = 1.0; - + // set input output index dstOp->inputIndexes.resize(1); dstOp->outputIndexes.resize(1); dstOp->inputIndexes[0] = tfliteOp->inputs[0]; dstOp->outputIndexes[0] = tfliteOp->outputs[0]; - + dstOp->main.value = resizeParam; } diff --git a/tools/converter/source/tflite/SqueezeTflite.cpp b/tools/converter/source/tflite/SqueezeTflite.cpp index ec36ff870..a85f17f1f 100644 --- a/tools/converter/source/tflite/SqueezeTflite.cpp +++ b/tools/converter/source/tflite/SqueezeTflite.cpp @@ -32,13 +32,13 @@ void SqueezeTflite::run(MNN::OpT *dstOp, const std::unique_ptrbuiltin_options.AsSqueezeOptions(); - squeezeParam->squeezeDims = squeezeOption->squeezeDims; + squeezeParam->squeezeDims = squeezeOption->squeeze_dims; + // set input output index dstOp->inputIndexes.resize(1); dstOp->outputIndexes.resize(1); dstOp->inputIndexes[0] = tfliteOp->inputs[0]; dstOp->outputIndexes[0] = tfliteOp->outputs[0]; - dstOp->main.value = squeezeParam; } diff --git a/tools/converter/source/tflite/TfliteUtils.cpp b/tools/converter/source/tflite/TfliteUtils.cpp index 5945b9cc6..fdd5c7c88 100644 --- a/tools/converter/source/tflite/TfliteUtils.cpp +++ b/tools/converter/source/tflite/TfliteUtils.cpp @@ -17,7 +17,7 @@ void CalculateActivationRangeQuantizedImpl(const MNN::FusedActivation activation const int32_t qmax, const tfliteQuanParam& outputQuan, int32_t* act_min, int32_t* act_max) { const auto scale = outputQuan->scale[0]; - const int32_t zeroPoint = static_cast(outputQuan->zeroPoint[0]); + const int32_t zeroPoint = static_cast(outputQuan->zero_point[0]); auto quantize = [scale, zeroPoint](float f) { return zeroPoint + static_cast(std::round(f / scale)); }; @@ -55,7 +55,7 @@ void CalculateActivationRangeUint8(const MNN::FusedActivation activation, const const int32_t qmin = std::numeric_limits::min(); const int32_t qmax = std::numeric_limits::max(); const auto scale = outputQuan->scale[0]; - const int32_t zeroPoint = static_cast(outputQuan->zeroPoint[0]); + const int32_t zeroPoint = static_cast(outputQuan->zero_point[0]); auto quantize = [scale, zeroPoint](float f) { return zeroPoint + static_cast(std::round(f / scale)); }; diff --git a/tools/converter/source/tflite/TileTflite.cpp b/tools/converter/source/tflite/TileTflite.cpp new file mode 100644 index 000000000..f03a46485 --- /dev/null +++ b/tools/converter/source/tflite/TileTflite.cpp @@ -0,0 +1,29 @@ +// +// TileTflite.cpp +// MNNConverter +// +// Created by MNN on 2019/11/25. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include "liteOpConverter.hpp" + +DECLARE_OP_COVERTER(TileTflite); +MNN::OpType TileTflite::opType(bool quantizedModel) { + return MNN::OpType_Tile; +} +MNN::OpParameter TileTflite::type(bool quantizedModel) { + return MNN::OpParameter_NONE; +} + +void TileTflite::run(MNN::OpT* dstOp, const std::unique_ptr& tfliteOp, + const std::vector>& tfliteTensors, + const std::vector>& tfliteModelBuffer, + const std::vector>& tfliteOpSet, bool quantizedModel){ + dstOp->main.value = nullptr; +} + + +using namespace tflite; +REGISTER_CONVERTER(TileTflite, BuiltinOperator_TILE); diff --git a/tools/converter/source/tflite/TransposeTflite.cpp b/tools/converter/source/tflite/TransposeTflite.cpp new file mode 100644 index 000000000..46b638865 --- /dev/null +++ b/tools/converter/source/tflite/TransposeTflite.cpp @@ -0,0 +1,31 @@ +// +// TransposeTflite.cpp +// MNNConverter +// +// Created by MNN on 2019/12/09. +// Copyright © 2019, Alibaba Group Holding Limited +// + +#include +#include "liteOpConverter.hpp" + +DECLARE_OP_COVERTER(TransposeTflite); +MNN::OpType TransposeTflite::opType(bool quantizedModel) { + return MNN::OpType_Transpose; +} +MNN::OpParameter TransposeTflite::type(bool quantizedModel) { + return MNN::OpParameter_Transpose; +} +void TransposeTflite::run(MNN::OpT* dstOp, const std::unique_ptr& tfliteOp, + const std::vector>& tfliteTensors, + const std::vector>& tfliteModelBuffer, + const std::vector>& tfliteOpSet, bool quantizedModel){ + auto param = new MNN::TransposeT; + auto tfliteSoftmaxOption = tfliteOp->builtin_options.AsTransposeOptions(); + + dstOp->main.value = param; +} + + +using namespace tflite; +REGISTER_CONVERTER(TransposeTflite, BuiltinOperator_TRANSPOSE); diff --git a/tools/converter/source/tflite/UnaryTflite.cpp b/tools/converter/source/tflite/UnaryTflite.cpp new file mode 100644 index 000000000..b9c597fe4 --- /dev/null +++ b/tools/converter/source/tflite/UnaryTflite.cpp @@ -0,0 +1,76 @@ +// +// UnaryTflite.cpp +// MNNConverter +// +// Created by MNN on 2019/11/25. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include "liteOpConverter.hpp" + +DECLARE_OP_COVERTER(UnaryTflite); +MNN::OpType UnaryTflite::opType(bool quantizedModel) { + return MNN::OpType_UnaryOp; +} +MNN::OpParameter UnaryTflite::type(bool quantizedModel) { + return MNN::OpParameter_UnaryOp; +} + +void UnaryTflite::run(MNN::OpT* dstOp, const std::unique_ptr& tfliteOp, + const std::vector>& tfliteTensors, + const std::vector>& tfliteModelBuffer, + const std::vector>& tfliteOpSet, bool quantizedModel){ + auto param = new MNN::UnaryOpT; + switch(tfliteOp->opcode_index){ + case tflite::BuiltinOperator_FLOOR:{ + param->opType=MNN::UnaryOpOperation_FLOOR; + break; + } + case tflite::BuiltinOperator_SQUARE:{ + param->opType=MNN::UnaryOpOperation_SQUARE; + break; + } + case tflite::BuiltinOperator_RSQRT:{ + param->opType=MNN::UnaryOpOperation_RSQRT; + break; + } + case tflite::BuiltinOperator_EXP:{ + param->opType=MNN::UnaryOpOperation_EXP; + break; + } + case tflite::BuiltinOperator_NEG:{ + param->opType=MNN::UnaryOpOperation_NEG; + break; + } + case tflite::BuiltinOperator_SQRT:{ + param->opType=MNN::UnaryOpOperation_SQRT; + break; + } + case tflite::BuiltinOperator_LOG:{ + param->opType=MNN::UnaryOpOperation_LOG; + break; + } + case tflite::BuiltinOperator_SIN:{ + param->opType=MNN::UnaryOpOperation_SIN; + break; + } + default:{ + LOG(ERROR) << "MNN Converter Not " + "Supported!!! UnaryOp: " + << tfliteOpSet[tfliteOp->opcode_index]->custom_code; + } + } + dstOp->main.value = param; +} + + +using namespace tflite; +REGISTER_CONVERTER(UnaryTflite, BuiltinOperator_FLOOR); +REGISTER_CONVERTER(UnaryTflite, BuiltinOperator_SQUARE); +REGISTER_CONVERTER(UnaryTflite, BuiltinOperator_RSQRT); +REGISTER_CONVERTER(UnaryTflite, BuiltinOperator_EXP); +REGISTER_CONVERTER(UnaryTflite, BuiltinOperator_NEG); +REGISTER_CONVERTER(UnaryTflite, BuiltinOperator_SQRT); +REGISTER_CONVERTER(UnaryTflite, BuiltinOperator_LOG); +REGISTER_CONVERTER(UnaryTflite, BuiltinOperator_SIN); diff --git a/tools/converter/source/tflite/UnpackTflite.cpp b/tools/converter/source/tflite/UnpackTflite.cpp new file mode 100644 index 000000000..73633cf82 --- /dev/null +++ b/tools/converter/source/tflite/UnpackTflite.cpp @@ -0,0 +1,32 @@ +// +// UnpackTflite.cpp +// MNNConverter +// +// Created by MNN on 2019/11/26. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include "liteOpConverter.hpp" +using namespace tflite; + +DECLARE_OP_COVERTER(UnpackTflite); + +MNN::OpType UnpackTflite::opType(bool quantizedModel) { + return MNN::OpType_Unpack; +} +MNN::OpParameter UnpackTflite::type(bool quantizedModel) { + return MNN::OpParameter_Axis; +} + +void UnpackTflite::run(MNN::OpT* dstOp, const std::unique_ptr& tfliteOp, + const std::vector>& tfliteTensors, + const std::vector>& tfliteModelBuffer, + const std::vector>& tfliteOpSet, bool quantizedModel) { + auto axisT = new MNN::AxisT; + auto opt=tfliteOp->builtin_options.AsUnpackOptions(); + axisT->axis = opt->axis; + dstOp->main.value = axisT; +} + +REGISTER_CONVERTER(UnpackTflite, BuiltinOperator_UNPACK); diff --git a/tools/converter/source/tflite/liteConverter.cpp b/tools/converter/source/tflite/liteConverter.cpp index 482923894..214bb6510 100644 --- a/tools/converter/source/tflite/liteConverter.cpp +++ b/tools/converter/source/tflite/liteConverter.cpp @@ -157,6 +157,16 @@ int tflite2MNNNet(const std::string inputModel, const std::string bizCode, std:: op->name = tensors[ops[j]->outputs[0]]->name; op->type = creator->opType(quantizedModel); op->main.type = creator->type(quantizedModel); + // set default input output index + op->inputIndexes.resize(ops[j]->inputs.size()); + op->outputIndexes.resize(ops[j]->outputs.size()); + for (int i = 0; i < ops[j]->inputs.size(); i++) { + op->inputIndexes[i] = ops[j]->inputs[i]; + } + for (int i = 0; i < ops[j]->outputs.size(); i++) { + op->outputIndexes[i] = ops[j]->outputs[i]; + } + // Run actual conversion creator->run(op, ops[j], tensors, tfliteModelBuffer, tfliteOpSet, quantizedModel); MNNNetT->oplists.emplace_back(op); } diff --git a/tools/converter/source/tflite/schema/schema_generated.h b/tools/converter/source/tflite/schema/schema_generated.h index cf76632b2..73bbdd717 100644 --- a/tools/converter/source/tflite/schema/schema_generated.h +++ b/tools/converter/source/tflite/schema/schema_generated.h @@ -8,6 +8,9 @@ namespace tflite { +struct CustomQuantization; +struct CustomQuantizationT; + struct QuantizationParameters; struct QuantizationParametersT; @@ -65,9 +68,18 @@ struct LocalResponseNormalizationOptionsT; struct LSTMOptions; struct LSTMOptionsT; +struct UnidirectionalSequenceLSTMOptions; +struct UnidirectionalSequenceLSTMOptionsT; + +struct BidirectionalSequenceLSTMOptions; +struct BidirectionalSequenceLSTMOptionsT; + struct ResizeBilinearOptions; struct ResizeBilinearOptionsT; +struct ResizeNearestNeighborOptions; +struct ResizeNearestNeighborOptionsT; + struct CallOptions; struct CallOptionsT; @@ -92,6 +104,9 @@ struct SkipGramOptionsT; struct SpaceToDepthOptions; struct SpaceToDepthOptionsT; +struct DepthToSpaceOptions; +struct DepthToSpaceOptionsT; + struct SubOptions; struct SubOptionsT; @@ -113,6 +128,9 @@ struct TransposeOptionsT; struct ExpOptions; struct ExpOptionsT; +struct CosOptions; +struct CosOptionsT; + struct ReducerOptions; struct ReducerOptionsT; @@ -122,6 +140,9 @@ struct SqueezeOptionsT; struct SplitOptions; struct SplitOptionsT; +struct SplitVOptions; +struct SplitVOptionsT; + struct StridedSliceOptions; struct StridedSliceOptionsT; @@ -185,6 +206,9 @@ struct NotEqualOptionsT; struct ShapeOptions; struct ShapeOptionsT; +struct RankOptions; +struct RankOptionsT; + struct PowOptions; struct PowOptionsT; @@ -200,6 +224,12 @@ struct LogicalOrOptionsT; struct OneHotOptions; struct OneHotOptionsT; +struct AbsOptions; +struct AbsOptionsT; + +struct HardSwishOptions; +struct HardSwishOptionsT; + struct LogicalAndOptions; struct LogicalAndOptionsT; @@ -221,172 +251,80 @@ struct ZerosLikeOptionsT; struct FillOptions; struct FillOptionsT; -struct OperatorCode; -struct OperatorCodeT; - -struct Operator; -struct OperatorT; - -struct SubGraph; -struct SubGraphT; - -struct Buffer; -struct BufferT; - -struct Model; -struct ModelT; - -inline const flatbuffers::TypeTable *QuantizationParametersTypeTable(); - -inline const flatbuffers::TypeTable *TensorTypeTable(); - -inline const flatbuffers::TypeTable *Conv2DOptionsTypeTable(); - -inline const flatbuffers::TypeTable *Pool2DOptionsTypeTable(); - -inline const flatbuffers::TypeTable *DepthwiseConv2DOptionsTypeTable(); - -inline const flatbuffers::TypeTable *ConcatEmbeddingsOptionsTypeTable(); - -inline const flatbuffers::TypeTable *LSHProjectionOptionsTypeTable(); - -inline const flatbuffers::TypeTable *SVDFOptionsTypeTable(); - -inline const flatbuffers::TypeTable *RNNOptionsTypeTable(); - -inline const flatbuffers::TypeTable *SequenceRNNOptionsTypeTable(); - -inline const flatbuffers::TypeTable *BidirectionalSequenceRNNOptionsTypeTable(); - -inline const flatbuffers::TypeTable *FullyConnectedOptionsTypeTable(); - -inline const flatbuffers::TypeTable *SoftmaxOptionsTypeTable(); - -inline const flatbuffers::TypeTable *ConcatenationOptionsTypeTable(); - -inline const flatbuffers::TypeTable *AddOptionsTypeTable(); - -inline const flatbuffers::TypeTable *MulOptionsTypeTable(); - -inline const flatbuffers::TypeTable *L2NormOptionsTypeTable(); - -inline const flatbuffers::TypeTable *LocalResponseNormalizationOptionsTypeTable(); - -inline const flatbuffers::TypeTable *LSTMOptionsTypeTable(); - -inline const flatbuffers::TypeTable *ResizeBilinearOptionsTypeTable(); - -inline const flatbuffers::TypeTable *CallOptionsTypeTable(); - -inline const flatbuffers::TypeTable *PadOptionsTypeTable(); - -inline const flatbuffers::TypeTable *PadV2OptionsTypeTable(); - -inline const flatbuffers::TypeTable *ReshapeOptionsTypeTable(); - -inline const flatbuffers::TypeTable *SpaceToBatchNDOptionsTypeTable(); - -inline const flatbuffers::TypeTable *BatchToSpaceNDOptionsTypeTable(); - -inline const flatbuffers::TypeTable *SkipGramOptionsTypeTable(); - -inline const flatbuffers::TypeTable *SpaceToDepthOptionsTypeTable(); - -inline const flatbuffers::TypeTable *SubOptionsTypeTable(); - -inline const flatbuffers::TypeTable *DivOptionsTypeTable(); - -inline const flatbuffers::TypeTable *TopKV2OptionsTypeTable(); - -inline const flatbuffers::TypeTable *EmbeddingLookupSparseOptionsTypeTable(); - -inline const flatbuffers::TypeTable *GatherOptionsTypeTable(); - -inline const flatbuffers::TypeTable *TransposeOptionsTypeTable(); - -inline const flatbuffers::TypeTable *ExpOptionsTypeTable(); - -inline const flatbuffers::TypeTable *ReducerOptionsTypeTable(); - -inline const flatbuffers::TypeTable *SqueezeOptionsTypeTable(); - -inline const flatbuffers::TypeTable *SplitOptionsTypeTable(); - -inline const flatbuffers::TypeTable *StridedSliceOptionsTypeTable(); - -inline const flatbuffers::TypeTable *LogSoftmaxOptionsTypeTable(); +struct FloorModOptions; +struct FloorModOptionsT; -inline const flatbuffers::TypeTable *CastOptionsTypeTable(); +struct RangeOptions; +struct RangeOptionsT; -inline const flatbuffers::TypeTable *DequantizeOptionsTypeTable(); +struct LeakyReluOptions; +struct LeakyReluOptionsT; -inline const flatbuffers::TypeTable *MaximumMinimumOptionsTypeTable(); +struct SquaredDifferenceOptions; +struct SquaredDifferenceOptionsT; -inline const flatbuffers::TypeTable *TileOptionsTypeTable(); +struct MirrorPadOptions; +struct MirrorPadOptionsT; -inline const flatbuffers::TypeTable *ArgMaxOptionsTypeTable(); +struct UniqueOptions; +struct UniqueOptionsT; -inline const flatbuffers::TypeTable *ArgMinOptionsTypeTable(); +struct ReverseV2Options; +struct ReverseV2OptionsT; -inline const flatbuffers::TypeTable *GreaterOptionsTypeTable(); +struct AddNOptions; +struct AddNOptionsT; -inline const flatbuffers::TypeTable *GreaterEqualOptionsTypeTable(); +struct GatherNdOptions; +struct GatherNdOptionsT; -inline const flatbuffers::TypeTable *LessOptionsTypeTable(); +struct WhereOptions; +struct WhereOptionsT; -inline const flatbuffers::TypeTable *LessEqualOptionsTypeTable(); +struct ReverseSequenceOptions; +struct ReverseSequenceOptionsT; -inline const flatbuffers::TypeTable *NegOptionsTypeTable(); +struct MatrixDiagOptions; +struct MatrixDiagOptionsT; -inline const flatbuffers::TypeTable *SelectOptionsTypeTable(); +struct QuantizeOptions; +struct QuantizeOptionsT; -inline const flatbuffers::TypeTable *SliceOptionsTypeTable(); +struct MatrixSetDiagOptions; +struct MatrixSetDiagOptionsT; -inline const flatbuffers::TypeTable *TransposeConvOptionsTypeTable(); +struct IfOptions; +struct IfOptionsT; -inline const flatbuffers::TypeTable *ExpandDimsOptionsTypeTable(); +struct WhileOptions; +struct WhileOptionsT; -inline const flatbuffers::TypeTable *SparseToDenseOptionsTypeTable(); +struct NonMaxSuppressionV4Options; +struct NonMaxSuppressionV4OptionsT; -inline const flatbuffers::TypeTable *EqualOptionsTypeTable(); +struct NonMaxSuppressionV5Options; +struct NonMaxSuppressionV5OptionsT; -inline const flatbuffers::TypeTable *NotEqualOptionsTypeTable(); +struct ScatterNdOptions; +struct ScatterNdOptionsT; -inline const flatbuffers::TypeTable *ShapeOptionsTypeTable(); - -inline const flatbuffers::TypeTable *PowOptionsTypeTable(); - -inline const flatbuffers::TypeTable *FakeQuantOptionsTypeTable(); - -inline const flatbuffers::TypeTable *PackOptionsTypeTable(); - -inline const flatbuffers::TypeTable *LogicalOrOptionsTypeTable(); - -inline const flatbuffers::TypeTable *OneHotOptionsTypeTable(); - -inline const flatbuffers::TypeTable *LogicalAndOptionsTypeTable(); - -inline const flatbuffers::TypeTable *LogicalNotOptionsTypeTable(); - -inline const flatbuffers::TypeTable *UnpackOptionsTypeTable(); - -inline const flatbuffers::TypeTable *FloorDivOptionsTypeTable(); - -inline const flatbuffers::TypeTable *SquareOptionsTypeTable(); - -inline const flatbuffers::TypeTable *ZerosLikeOptionsTypeTable(); - -inline const flatbuffers::TypeTable *FillOptionsTypeTable(); +struct OperatorCode; +struct OperatorCodeT; -inline const flatbuffers::TypeTable *OperatorCodeTypeTable(); +struct Operator; +struct OperatorT; -inline const flatbuffers::TypeTable *OperatorTypeTable(); +struct SubGraph; +struct SubGraphT; -inline const flatbuffers::TypeTable *SubGraphTypeTable(); +struct Buffer; +struct BufferT; -inline const flatbuffers::TypeTable *BufferTypeTable(); +struct Metadata; +struct MetadataT; -inline const flatbuffers::TypeTable *ModelTypeTable(); +struct Model; +struct ModelT; enum TensorType { TensorType_FLOAT32 = 0, @@ -398,11 +336,12 @@ enum TensorType { TensorType_BOOL = 6, TensorType_INT16 = 7, TensorType_COMPLEX64 = 8, + TensorType_INT8 = 9, TensorType_MIN = TensorType_FLOAT32, - TensorType_MAX = TensorType_COMPLEX64 + TensorType_MAX = TensorType_INT8 }; -inline const TensorType (&EnumValuesTensorType())[9] { +inline const TensorType (&EnumValuesTensorType())[10] { static const TensorType values[] = { TensorType_FLOAT32, TensorType_FLOAT16, @@ -412,7 +351,8 @@ inline const TensorType (&EnumValuesTensorType())[9] { TensorType_STRING, TensorType_BOOL, TensorType_INT16, - TensorType_COMPLEX64 + TensorType_COMPLEX64, + TensorType_INT8 }; return values; } @@ -428,23 +368,108 @@ inline const char * const *EnumNamesTensorType() { "BOOL", "INT16", "COMPLEX64", + "INT8", nullptr }; return names; } inline const char *EnumNameTensorType(TensorType e) { - if (e < TensorType_FLOAT32 || e > TensorType_COMPLEX64) return ""; - const size_t index = static_cast(e); + if (e < TensorType_FLOAT32 || e > TensorType_INT8) return ""; + const size_t index = static_cast(e); return EnumNamesTensorType()[index]; } +enum QuantizationDetails { + QuantizationDetails_NONE = 0, + QuantizationDetails_CustomQuantization = 1, + QuantizationDetails_MIN = QuantizationDetails_NONE, + QuantizationDetails_MAX = QuantizationDetails_CustomQuantization +}; + +inline const QuantizationDetails (&EnumValuesQuantizationDetails())[2] { + static const QuantizationDetails values[] = { + QuantizationDetails_NONE, + QuantizationDetails_CustomQuantization + }; + return values; +} + +inline const char * const *EnumNamesQuantizationDetails() { + static const char * const names[] = { + "NONE", + "CustomQuantization", + nullptr + }; + return names; +} + +inline const char *EnumNameQuantizationDetails(QuantizationDetails e) { + if (e < QuantizationDetails_NONE || e > QuantizationDetails_CustomQuantization) return ""; + const size_t index = static_cast(e); + return EnumNamesQuantizationDetails()[index]; +} + +template struct QuantizationDetailsTraits { + static const QuantizationDetails enum_value = QuantizationDetails_NONE; +}; + +template<> struct QuantizationDetailsTraits { + static const QuantizationDetails enum_value = QuantizationDetails_CustomQuantization; +}; + +struct QuantizationDetailsUnion { + QuantizationDetails type; + void *value; + + QuantizationDetailsUnion() : type(QuantizationDetails_NONE), value(nullptr) {} + QuantizationDetailsUnion(QuantizationDetailsUnion&& u) FLATBUFFERS_NOEXCEPT : + type(QuantizationDetails_NONE), value(nullptr) + { std::swap(type, u.type); std::swap(value, u.value); } + QuantizationDetailsUnion(const QuantizationDetailsUnion &) FLATBUFFERS_NOEXCEPT; + QuantizationDetailsUnion &operator=(const QuantizationDetailsUnion &u) FLATBUFFERS_NOEXCEPT + { QuantizationDetailsUnion t(u); std::swap(type, t.type); std::swap(value, t.value); return *this; } + QuantizationDetailsUnion &operator=(QuantizationDetailsUnion &&u) FLATBUFFERS_NOEXCEPT + { std::swap(type, u.type); std::swap(value, u.value); return *this; } + ~QuantizationDetailsUnion() { Reset(); } + + void Reset(); + +#ifndef FLATBUFFERS_CPP98_STL + template + void Set(T&& val) { + using RT = typename std::remove_reference::type; + Reset(); + type = QuantizationDetailsTraits::enum_value; + if (type != QuantizationDetails_NONE) { + value = new RT(std::forward(val)); + } + } +#endif // FLATBUFFERS_CPP98_STL + + static void *UnPack(const void *obj, QuantizationDetails type, const flatbuffers::resolver_function_t *resolver); + flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher = nullptr) const; + + CustomQuantizationT *AsCustomQuantization() { + return type == QuantizationDetails_CustomQuantization ? + reinterpret_cast(value) : nullptr; + } + const CustomQuantizationT *AsCustomQuantization() const { + return type == QuantizationDetails_CustomQuantization ? + reinterpret_cast(value) : nullptr; + } +}; + +bool VerifyQuantizationDetails(flatbuffers::Verifier &verifier, const void *obj, QuantizationDetails type); +bool VerifyQuantizationDetailsVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types); + enum BuiltinOperator { BuiltinOperator_ADD = 0, BuiltinOperator_AVERAGE_POOL_2D = 1, BuiltinOperator_CONCATENATION = 2, BuiltinOperator_CONV_2D = 3, BuiltinOperator_DEPTHWISE_CONV_2D = 4, + BuiltinOperator_DEPTH_TO_SPACE = 5, BuiltinOperator_DEQUANTIZE = 6, BuiltinOperator_EMBEDDING_LOOKUP = 7, BuiltinOperator_FLOOR = 8, @@ -534,17 +559,46 @@ enum BuiltinOperator { BuiltinOperator_SQUARE = 92, BuiltinOperator_ZEROS_LIKE = 93, BuiltinOperator_FILL = 94, + BuiltinOperator_FLOOR_MOD = 95, + BuiltinOperator_RANGE = 96, + BuiltinOperator_RESIZE_NEAREST_NEIGHBOR = 97, + BuiltinOperator_LEAKY_RELU = 98, + BuiltinOperator_SQUARED_DIFFERENCE = 99, + BuiltinOperator_MIRROR_PAD = 100, + BuiltinOperator_ABS = 101, + BuiltinOperator_SPLIT_V = 102, + BuiltinOperator_UNIQUE = 103, + BuiltinOperator_CEIL = 104, + BuiltinOperator_REVERSE_V2 = 105, + BuiltinOperator_ADD_N = 106, + BuiltinOperator_GATHER_ND = 107, + BuiltinOperator_COS = 108, + BuiltinOperator_WHERE = 109, + BuiltinOperator_RANK = 110, + BuiltinOperator_ELU = 111, + BuiltinOperator_REVERSE_SEQUENCE = 112, + BuiltinOperator_MATRIX_DIAG = 113, + BuiltinOperator_QUANTIZE = 114, + BuiltinOperator_MATRIX_SET_DIAG = 115, + BuiltinOperator_ROUND = 116, + BuiltinOperator_HARD_SWISH = 117, + BuiltinOperator_IF = 118, + BuiltinOperator_WHILE = 119, + BuiltinOperator_NON_MAX_SUPPRESSION_V4 = 120, + BuiltinOperator_NON_MAX_SUPPRESSION_V5 = 121, + BuiltinOperator_SCATTER_ND = 122, BuiltinOperator_MIN = BuiltinOperator_ADD, - BuiltinOperator_MAX = BuiltinOperator_FILL + BuiltinOperator_MAX = BuiltinOperator_SCATTER_ND }; -inline const BuiltinOperator (&EnumValuesBuiltinOperator())[94] { +inline const BuiltinOperator (&EnumValuesBuiltinOperator())[123] { static const BuiltinOperator values[] = { BuiltinOperator_ADD, BuiltinOperator_AVERAGE_POOL_2D, BuiltinOperator_CONCATENATION, BuiltinOperator_CONV_2D, BuiltinOperator_DEPTHWISE_CONV_2D, + BuiltinOperator_DEPTH_TO_SPACE, BuiltinOperator_DEQUANTIZE, BuiltinOperator_EMBEDDING_LOOKUP, BuiltinOperator_FLOOR, @@ -633,7 +687,35 @@ inline const BuiltinOperator (&EnumValuesBuiltinOperator())[94] { BuiltinOperator_REDUCE_ANY, BuiltinOperator_SQUARE, BuiltinOperator_ZEROS_LIKE, - BuiltinOperator_FILL + BuiltinOperator_FILL, + BuiltinOperator_FLOOR_MOD, + BuiltinOperator_RANGE, + BuiltinOperator_RESIZE_NEAREST_NEIGHBOR, + BuiltinOperator_LEAKY_RELU, + BuiltinOperator_SQUARED_DIFFERENCE, + BuiltinOperator_MIRROR_PAD, + BuiltinOperator_ABS, + BuiltinOperator_SPLIT_V, + BuiltinOperator_UNIQUE, + BuiltinOperator_CEIL, + BuiltinOperator_REVERSE_V2, + BuiltinOperator_ADD_N, + BuiltinOperator_GATHER_ND, + BuiltinOperator_COS, + BuiltinOperator_WHERE, + BuiltinOperator_RANK, + BuiltinOperator_ELU, + BuiltinOperator_REVERSE_SEQUENCE, + BuiltinOperator_MATRIX_DIAG, + BuiltinOperator_QUANTIZE, + BuiltinOperator_MATRIX_SET_DIAG, + BuiltinOperator_ROUND, + BuiltinOperator_HARD_SWISH, + BuiltinOperator_IF, + BuiltinOperator_WHILE, + BuiltinOperator_NON_MAX_SUPPRESSION_V4, + BuiltinOperator_NON_MAX_SUPPRESSION_V5, + BuiltinOperator_SCATTER_ND }; return values; } @@ -645,7 +727,7 @@ inline const char * const *EnumNamesBuiltinOperator() { "CONCATENATION", "CONV_2D", "DEPTHWISE_CONV_2D", - "", + "DEPTH_TO_SPACE", "DEQUANTIZE", "EMBEDDING_LOOKUP", "FLOOR", @@ -735,14 +817,42 @@ inline const char * const *EnumNamesBuiltinOperator() { "SQUARE", "ZEROS_LIKE", "FILL", + "FLOOR_MOD", + "RANGE", + "RESIZE_NEAREST_NEIGHBOR", + "LEAKY_RELU", + "SQUARED_DIFFERENCE", + "MIRROR_PAD", + "ABS", + "SPLIT_V", + "UNIQUE", + "CEIL", + "REVERSE_V2", + "ADD_N", + "GATHER_ND", + "COS", + "WHERE", + "RANK", + "ELU", + "REVERSE_SEQUENCE", + "MATRIX_DIAG", + "QUANTIZE", + "MATRIX_SET_DIAG", + "ROUND", + "HARD_SWISH", + "IF", + "WHILE", + "NON_MAX_SUPPRESSION_V4", + "NON_MAX_SUPPRESSION_V5", + "SCATTER_ND", nullptr }; return names; } inline const char *EnumNameBuiltinOperator(BuiltinOperator e) { - if (e < BuiltinOperator_ADD || e > BuiltinOperator_FILL) return ""; - const size_t index = static_cast(e); + if (e < BuiltinOperator_ADD || e > BuiltinOperator_SCATTER_ND) return ""; + const size_t index = static_cast(e); return EnumNamesBuiltinOperator()[index]; } @@ -816,11 +926,40 @@ enum BuiltinOptions { BuiltinOptions_SquareOptions = 66, BuiltinOptions_ZerosLikeOptions = 67, BuiltinOptions_FillOptions = 68, + BuiltinOptions_BidirectionalSequenceLSTMOptions = 69, + BuiltinOptions_BidirectionalSequenceRNNOptions = 70, + BuiltinOptions_UnidirectionalSequenceLSTMOptions = 71, + BuiltinOptions_FloorModOptions = 72, + BuiltinOptions_RangeOptions = 73, + BuiltinOptions_ResizeNearestNeighborOptions = 74, + BuiltinOptions_LeakyReluOptions = 75, + BuiltinOptions_SquaredDifferenceOptions = 76, + BuiltinOptions_MirrorPadOptions = 77, + BuiltinOptions_AbsOptions = 78, + BuiltinOptions_SplitVOptions = 79, + BuiltinOptions_UniqueOptions = 80, + BuiltinOptions_ReverseV2Options = 81, + BuiltinOptions_AddNOptions = 82, + BuiltinOptions_GatherNdOptions = 83, + BuiltinOptions_CosOptions = 84, + BuiltinOptions_WhereOptions = 85, + BuiltinOptions_RankOptions = 86, + BuiltinOptions_ReverseSequenceOptions = 87, + BuiltinOptions_MatrixDiagOptions = 88, + BuiltinOptions_QuantizeOptions = 89, + BuiltinOptions_MatrixSetDiagOptions = 90, + BuiltinOptions_HardSwishOptions = 91, + BuiltinOptions_IfOptions = 92, + BuiltinOptions_WhileOptions = 93, + BuiltinOptions_DepthToSpaceOptions = 94, + BuiltinOptions_NonMaxSuppressionV4Options = 95, + BuiltinOptions_NonMaxSuppressionV5Options = 96, + BuiltinOptions_ScatterNdOptions = 97, BuiltinOptions_MIN = BuiltinOptions_NONE, - BuiltinOptions_MAX = BuiltinOptions_FillOptions + BuiltinOptions_MAX = BuiltinOptions_ScatterNdOptions }; -inline const BuiltinOptions (&EnumValuesBuiltinOptions())[69] { +inline const BuiltinOptions (&EnumValuesBuiltinOptions())[98] { static const BuiltinOptions values[] = { BuiltinOptions_NONE, BuiltinOptions_Conv2DOptions, @@ -890,7 +1029,36 @@ inline const BuiltinOptions (&EnumValuesBuiltinOptions())[69] { BuiltinOptions_FloorDivOptions, BuiltinOptions_SquareOptions, BuiltinOptions_ZerosLikeOptions, - BuiltinOptions_FillOptions + BuiltinOptions_FillOptions, + BuiltinOptions_BidirectionalSequenceLSTMOptions, + BuiltinOptions_BidirectionalSequenceRNNOptions, + BuiltinOptions_UnidirectionalSequenceLSTMOptions, + BuiltinOptions_FloorModOptions, + BuiltinOptions_RangeOptions, + BuiltinOptions_ResizeNearestNeighborOptions, + BuiltinOptions_LeakyReluOptions, + BuiltinOptions_SquaredDifferenceOptions, + BuiltinOptions_MirrorPadOptions, + BuiltinOptions_AbsOptions, + BuiltinOptions_SplitVOptions, + BuiltinOptions_UniqueOptions, + BuiltinOptions_ReverseV2Options, + BuiltinOptions_AddNOptions, + BuiltinOptions_GatherNdOptions, + BuiltinOptions_CosOptions, + BuiltinOptions_WhereOptions, + BuiltinOptions_RankOptions, + BuiltinOptions_ReverseSequenceOptions, + BuiltinOptions_MatrixDiagOptions, + BuiltinOptions_QuantizeOptions, + BuiltinOptions_MatrixSetDiagOptions, + BuiltinOptions_HardSwishOptions, + BuiltinOptions_IfOptions, + BuiltinOptions_WhileOptions, + BuiltinOptions_DepthToSpaceOptions, + BuiltinOptions_NonMaxSuppressionV4Options, + BuiltinOptions_NonMaxSuppressionV5Options, + BuiltinOptions_ScatterNdOptions }; return values; } @@ -966,14 +1134,43 @@ inline const char * const *EnumNamesBuiltinOptions() { "SquareOptions", "ZerosLikeOptions", "FillOptions", + "BidirectionalSequenceLSTMOptions", + "BidirectionalSequenceRNNOptions", + "UnidirectionalSequenceLSTMOptions", + "FloorModOptions", + "RangeOptions", + "ResizeNearestNeighborOptions", + "LeakyReluOptions", + "SquaredDifferenceOptions", + "MirrorPadOptions", + "AbsOptions", + "SplitVOptions", + "UniqueOptions", + "ReverseV2Options", + "AddNOptions", + "GatherNdOptions", + "CosOptions", + "WhereOptions", + "RankOptions", + "ReverseSequenceOptions", + "MatrixDiagOptions", + "QuantizeOptions", + "MatrixSetDiagOptions", + "HardSwishOptions", + "IfOptions", + "WhileOptions", + "DepthToSpaceOptions", + "NonMaxSuppressionV4Options", + "NonMaxSuppressionV5Options", + "ScatterNdOptions", nullptr }; return names; } inline const char *EnumNameBuiltinOptions(BuiltinOptions e) { - if (e < BuiltinOptions_NONE || e > BuiltinOptions_FillOptions) return ""; - const size_t index = static_cast(e); + if (e < BuiltinOptions_NONE || e > BuiltinOptions_ScatterNdOptions) return ""; + const size_t index = static_cast(e); return EnumNamesBuiltinOptions()[index]; } @@ -1253,6 +1450,122 @@ template<> struct BuiltinOptionsTraits { static const BuiltinOptions enum_value = BuiltinOptions_FillOptions; }; +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_BidirectionalSequenceLSTMOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_BidirectionalSequenceRNNOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_UnidirectionalSequenceLSTMOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_FloorModOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_RangeOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ResizeNearestNeighborOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_LeakyReluOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SquaredDifferenceOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_MirrorPadOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_AbsOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SplitVOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_UniqueOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ReverseV2Options; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_AddNOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_GatherNdOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_CosOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_WhereOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_RankOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ReverseSequenceOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_MatrixDiagOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_QuantizeOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_MatrixSetDiagOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_HardSwishOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_IfOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_WhileOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_DepthToSpaceOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_NonMaxSuppressionV4Options; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_NonMaxSuppressionV5Options; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ScatterNdOptions; +}; + struct BuiltinOptionsUnion { BuiltinOptions type; void *value; @@ -1273,10 +1586,11 @@ struct BuiltinOptionsUnion { #ifndef FLATBUFFERS_CPP98_STL template void Set(T&& val) { + using RT = typename std::remove_reference::type; Reset(); - type = BuiltinOptionsTraits::enum_value; + type = BuiltinOptionsTraits::enum_value; if (type != BuiltinOptions_NONE) { - value = new T(std::forward(val)); + value = new RT(std::forward(val)); } } #endif // FLATBUFFERS_CPP98_STL @@ -1828,6 +2142,238 @@ struct BuiltinOptionsUnion { return type == BuiltinOptions_FillOptions ? reinterpret_cast(value) : nullptr; } + BidirectionalSequenceLSTMOptionsT *AsBidirectionalSequenceLSTMOptions() { + return type == BuiltinOptions_BidirectionalSequenceLSTMOptions ? + reinterpret_cast(value) : nullptr; + } + const BidirectionalSequenceLSTMOptionsT *AsBidirectionalSequenceLSTMOptions() const { + return type == BuiltinOptions_BidirectionalSequenceLSTMOptions ? + reinterpret_cast(value) : nullptr; + } + BidirectionalSequenceRNNOptionsT *AsBidirectionalSequenceRNNOptions() { + return type == BuiltinOptions_BidirectionalSequenceRNNOptions ? + reinterpret_cast(value) : nullptr; + } + const BidirectionalSequenceRNNOptionsT *AsBidirectionalSequenceRNNOptions() const { + return type == BuiltinOptions_BidirectionalSequenceRNNOptions ? + reinterpret_cast(value) : nullptr; + } + UnidirectionalSequenceLSTMOptionsT *AsUnidirectionalSequenceLSTMOptions() { + return type == BuiltinOptions_UnidirectionalSequenceLSTMOptions ? + reinterpret_cast(value) : nullptr; + } + const UnidirectionalSequenceLSTMOptionsT *AsUnidirectionalSequenceLSTMOptions() const { + return type == BuiltinOptions_UnidirectionalSequenceLSTMOptions ? + reinterpret_cast(value) : nullptr; + } + FloorModOptionsT *AsFloorModOptions() { + return type == BuiltinOptions_FloorModOptions ? + reinterpret_cast(value) : nullptr; + } + const FloorModOptionsT *AsFloorModOptions() const { + return type == BuiltinOptions_FloorModOptions ? + reinterpret_cast(value) : nullptr; + } + RangeOptionsT *AsRangeOptions() { + return type == BuiltinOptions_RangeOptions ? + reinterpret_cast(value) : nullptr; + } + const RangeOptionsT *AsRangeOptions() const { + return type == BuiltinOptions_RangeOptions ? + reinterpret_cast(value) : nullptr; + } + ResizeNearestNeighborOptionsT *AsResizeNearestNeighborOptions() { + return type == BuiltinOptions_ResizeNearestNeighborOptions ? + reinterpret_cast(value) : nullptr; + } + const ResizeNearestNeighborOptionsT *AsResizeNearestNeighborOptions() const { + return type == BuiltinOptions_ResizeNearestNeighborOptions ? + reinterpret_cast(value) : nullptr; + } + LeakyReluOptionsT *AsLeakyReluOptions() { + return type == BuiltinOptions_LeakyReluOptions ? + reinterpret_cast(value) : nullptr; + } + const LeakyReluOptionsT *AsLeakyReluOptions() const { + return type == BuiltinOptions_LeakyReluOptions ? + reinterpret_cast(value) : nullptr; + } + SquaredDifferenceOptionsT *AsSquaredDifferenceOptions() { + return type == BuiltinOptions_SquaredDifferenceOptions ? + reinterpret_cast(value) : nullptr; + } + const SquaredDifferenceOptionsT *AsSquaredDifferenceOptions() const { + return type == BuiltinOptions_SquaredDifferenceOptions ? + reinterpret_cast(value) : nullptr; + } + MirrorPadOptionsT *AsMirrorPadOptions() { + return type == BuiltinOptions_MirrorPadOptions ? + reinterpret_cast(value) : nullptr; + } + const MirrorPadOptionsT *AsMirrorPadOptions() const { + return type == BuiltinOptions_MirrorPadOptions ? + reinterpret_cast(value) : nullptr; + } + AbsOptionsT *AsAbsOptions() { + return type == BuiltinOptions_AbsOptions ? + reinterpret_cast(value) : nullptr; + } + const AbsOptionsT *AsAbsOptions() const { + return type == BuiltinOptions_AbsOptions ? + reinterpret_cast(value) : nullptr; + } + SplitVOptionsT *AsSplitVOptions() { + return type == BuiltinOptions_SplitVOptions ? + reinterpret_cast(value) : nullptr; + } + const SplitVOptionsT *AsSplitVOptions() const { + return type == BuiltinOptions_SplitVOptions ? + reinterpret_cast(value) : nullptr; + } + UniqueOptionsT *AsUniqueOptions() { + return type == BuiltinOptions_UniqueOptions ? + reinterpret_cast(value) : nullptr; + } + const UniqueOptionsT *AsUniqueOptions() const { + return type == BuiltinOptions_UniqueOptions ? + reinterpret_cast(value) : nullptr; + } + ReverseV2OptionsT *AsReverseV2Options() { + return type == BuiltinOptions_ReverseV2Options ? + reinterpret_cast(value) : nullptr; + } + const ReverseV2OptionsT *AsReverseV2Options() const { + return type == BuiltinOptions_ReverseV2Options ? + reinterpret_cast(value) : nullptr; + } + AddNOptionsT *AsAddNOptions() { + return type == BuiltinOptions_AddNOptions ? + reinterpret_cast(value) : nullptr; + } + const AddNOptionsT *AsAddNOptions() const { + return type == BuiltinOptions_AddNOptions ? + reinterpret_cast(value) : nullptr; + } + GatherNdOptionsT *AsGatherNdOptions() { + return type == BuiltinOptions_GatherNdOptions ? + reinterpret_cast(value) : nullptr; + } + const GatherNdOptionsT *AsGatherNdOptions() const { + return type == BuiltinOptions_GatherNdOptions ? + reinterpret_cast(value) : nullptr; + } + CosOptionsT *AsCosOptions() { + return type == BuiltinOptions_CosOptions ? + reinterpret_cast(value) : nullptr; + } + const CosOptionsT *AsCosOptions() const { + return type == BuiltinOptions_CosOptions ? + reinterpret_cast(value) : nullptr; + } + WhereOptionsT *AsWhereOptions() { + return type == BuiltinOptions_WhereOptions ? + reinterpret_cast(value) : nullptr; + } + const WhereOptionsT *AsWhereOptions() const { + return type == BuiltinOptions_WhereOptions ? + reinterpret_cast(value) : nullptr; + } + RankOptionsT *AsRankOptions() { + return type == BuiltinOptions_RankOptions ? + reinterpret_cast(value) : nullptr; + } + const RankOptionsT *AsRankOptions() const { + return type == BuiltinOptions_RankOptions ? + reinterpret_cast(value) : nullptr; + } + ReverseSequenceOptionsT *AsReverseSequenceOptions() { + return type == BuiltinOptions_ReverseSequenceOptions ? + reinterpret_cast(value) : nullptr; + } + const ReverseSequenceOptionsT *AsReverseSequenceOptions() const { + return type == BuiltinOptions_ReverseSequenceOptions ? + reinterpret_cast(value) : nullptr; + } + MatrixDiagOptionsT *AsMatrixDiagOptions() { + return type == BuiltinOptions_MatrixDiagOptions ? + reinterpret_cast(value) : nullptr; + } + const MatrixDiagOptionsT *AsMatrixDiagOptions() const { + return type == BuiltinOptions_MatrixDiagOptions ? + reinterpret_cast(value) : nullptr; + } + QuantizeOptionsT *AsQuantizeOptions() { + return type == BuiltinOptions_QuantizeOptions ? + reinterpret_cast(value) : nullptr; + } + const QuantizeOptionsT *AsQuantizeOptions() const { + return type == BuiltinOptions_QuantizeOptions ? + reinterpret_cast(value) : nullptr; + } + MatrixSetDiagOptionsT *AsMatrixSetDiagOptions() { + return type == BuiltinOptions_MatrixSetDiagOptions ? + reinterpret_cast(value) : nullptr; + } + const MatrixSetDiagOptionsT *AsMatrixSetDiagOptions() const { + return type == BuiltinOptions_MatrixSetDiagOptions ? + reinterpret_cast(value) : nullptr; + } + HardSwishOptionsT *AsHardSwishOptions() { + return type == BuiltinOptions_HardSwishOptions ? + reinterpret_cast(value) : nullptr; + } + const HardSwishOptionsT *AsHardSwishOptions() const { + return type == BuiltinOptions_HardSwishOptions ? + reinterpret_cast(value) : nullptr; + } + IfOptionsT *AsIfOptions() { + return type == BuiltinOptions_IfOptions ? + reinterpret_cast(value) : nullptr; + } + const IfOptionsT *AsIfOptions() const { + return type == BuiltinOptions_IfOptions ? + reinterpret_cast(value) : nullptr; + } + WhileOptionsT *AsWhileOptions() { + return type == BuiltinOptions_WhileOptions ? + reinterpret_cast(value) : nullptr; + } + const WhileOptionsT *AsWhileOptions() const { + return type == BuiltinOptions_WhileOptions ? + reinterpret_cast(value) : nullptr; + } + DepthToSpaceOptionsT *AsDepthToSpaceOptions() { + return type == BuiltinOptions_DepthToSpaceOptions ? + reinterpret_cast(value) : nullptr; + } + const DepthToSpaceOptionsT *AsDepthToSpaceOptions() const { + return type == BuiltinOptions_DepthToSpaceOptions ? + reinterpret_cast(value) : nullptr; + } + NonMaxSuppressionV4OptionsT *AsNonMaxSuppressionV4Options() { + return type == BuiltinOptions_NonMaxSuppressionV4Options ? + reinterpret_cast(value) : nullptr; + } + const NonMaxSuppressionV4OptionsT *AsNonMaxSuppressionV4Options() const { + return type == BuiltinOptions_NonMaxSuppressionV4Options ? + reinterpret_cast(value) : nullptr; + } + NonMaxSuppressionV5OptionsT *AsNonMaxSuppressionV5Options() { + return type == BuiltinOptions_NonMaxSuppressionV5Options ? + reinterpret_cast(value) : nullptr; + } + const NonMaxSuppressionV5OptionsT *AsNonMaxSuppressionV5Options() const { + return type == BuiltinOptions_NonMaxSuppressionV5Options ? + reinterpret_cast(value) : nullptr; + } + ScatterNdOptionsT *AsScatterNdOptions() { + return type == BuiltinOptions_ScatterNdOptions ? + reinterpret_cast(value) : nullptr; + } + const ScatterNdOptionsT *AsScatterNdOptions() const { + return type == BuiltinOptions_ScatterNdOptions ? + reinterpret_cast(value) : nullptr; + } }; bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type); @@ -1859,7 +2405,7 @@ inline const char * const *EnumNamesPadding() { inline const char *EnumNamePadding(Padding e) { if (e < Padding_SAME || e > Padding_VALID) return ""; - const size_t index = static_cast(e); + const size_t index = static_cast(e); return EnumNamesPadding()[index]; } @@ -1901,7 +2447,7 @@ inline const char * const *EnumNamesActivationFunctionType() { inline const char *EnumNameActivationFunctionType(ActivationFunctionType e) { if (e < ActivationFunctionType_NONE || e > ActivationFunctionType_SIGN_BIT) return ""; - const size_t index = static_cast(e); + const size_t index = static_cast(e); return EnumNamesActivationFunctionType()[index]; } @@ -1934,7 +2480,7 @@ inline const char * const *EnumNamesLSHProjectionType() { inline const char *EnumNameLSHProjectionType(LSHProjectionType e) { if (e < LSHProjectionType_UNKNOWN || e > LSHProjectionType_DENSE) return ""; - const size_t index = static_cast(e); + const size_t index = static_cast(e); return EnumNamesLSHProjectionType()[index]; } @@ -1964,7 +2510,7 @@ inline const char * const *EnumNamesFullyConnectedOptionsWeightsFormat() { inline const char *EnumNameFullyConnectedOptionsWeightsFormat(FullyConnectedOptionsWeightsFormat e) { if (e < FullyConnectedOptionsWeightsFormat_DEFAULT || e > FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8) return ""; - const size_t index = static_cast(e); + const size_t index = static_cast(e); return EnumNamesFullyConnectedOptionsWeightsFormat()[index]; } @@ -1994,7 +2540,7 @@ inline const char * const *EnumNamesLSTMKernelType() { inline const char *EnumNameLSTMKernelType(LSTMKernelType e) { if (e < LSTMKernelType_FULL || e > LSTMKernelType_BASIC) return ""; - const size_t index = static_cast(e); + const size_t index = static_cast(e); return EnumNamesLSTMKernelType()[index]; } @@ -2027,81 +2573,178 @@ inline const char * const *EnumNamesCombinerType() { inline const char *EnumNameCombinerType(CombinerType e) { if (e < CombinerType_SUM || e > CombinerType_SQRTN) return ""; - const size_t index = static_cast(e); + const size_t index = static_cast(e); return EnumNamesCombinerType()[index]; } -enum CustomOptionsFormat { - CustomOptionsFormat_FLEXBUFFERS = 0, - CustomOptionsFormat_MIN = CustomOptionsFormat_FLEXBUFFERS, - CustomOptionsFormat_MAX = CustomOptionsFormat_FLEXBUFFERS +enum MirrorPadMode { + MirrorPadMode_REFLECT = 0, + MirrorPadMode_SYMMETRIC = 1, + MirrorPadMode_MIN = MirrorPadMode_REFLECT, + MirrorPadMode_MAX = MirrorPadMode_SYMMETRIC }; -inline const CustomOptionsFormat (&EnumValuesCustomOptionsFormat())[1] { - static const CustomOptionsFormat values[] = { - CustomOptionsFormat_FLEXBUFFERS +inline const MirrorPadMode (&EnumValuesMirrorPadMode())[2] { + static const MirrorPadMode values[] = { + MirrorPadMode_REFLECT, + MirrorPadMode_SYMMETRIC }; return values; } -inline const char * const *EnumNamesCustomOptionsFormat() { +inline const char * const *EnumNamesMirrorPadMode() { static const char * const names[] = { - "FLEXBUFFERS", + "REFLECT", + "SYMMETRIC", nullptr }; return names; } -inline const char *EnumNameCustomOptionsFormat(CustomOptionsFormat e) { - if (e < CustomOptionsFormat_FLEXBUFFERS || e > CustomOptionsFormat_FLEXBUFFERS) return ""; - const size_t index = static_cast(e); - return EnumNamesCustomOptionsFormat()[index]; +inline const char *EnumNameMirrorPadMode(MirrorPadMode e) { + if (e < MirrorPadMode_REFLECT || e > MirrorPadMode_SYMMETRIC) return ""; + const size_t index = static_cast(e); + return EnumNamesMirrorPadMode()[index]; } -struct QuantizationParametersT : public flatbuffers::NativeTable { +enum CustomOptionsFormat { + CustomOptionsFormat_FLEXBUFFERS = 0, + CustomOptionsFormat_MIN = CustomOptionsFormat_FLEXBUFFERS, + CustomOptionsFormat_MAX = CustomOptionsFormat_FLEXBUFFERS +}; + +inline const CustomOptionsFormat (&EnumValuesCustomOptionsFormat())[1] { + static const CustomOptionsFormat values[] = { + CustomOptionsFormat_FLEXBUFFERS + }; + return values; +} + +inline const char * const *EnumNamesCustomOptionsFormat() { + static const char * const names[] = { + "FLEXBUFFERS", + nullptr + }; + return names; +} + +inline const char *EnumNameCustomOptionsFormat(CustomOptionsFormat e) { + if (e < CustomOptionsFormat_FLEXBUFFERS || e > CustomOptionsFormat_FLEXBUFFERS) return ""; + const size_t index = static_cast(e); + return EnumNamesCustomOptionsFormat()[index]; +} + +struct CustomQuantizationT : public flatbuffers::NativeTable { + typedef CustomQuantization TableType; + std::vector custom; + CustomQuantizationT() { + } +}; + +struct CustomQuantization FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef CustomQuantizationT NativeTableType; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_CUSTOM = 4 + }; + const flatbuffers::Vector *custom() const { + return GetPointer *>(VT_CUSTOM); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_CUSTOM) && + verifier.VerifyVector(custom()) && + verifier.EndTable(); + } + CustomQuantizationT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(CustomQuantizationT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct CustomQuantizationBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_custom(flatbuffers::Offset> custom) { + fbb_.AddOffset(CustomQuantization::VT_CUSTOM, custom); + } + explicit CustomQuantizationBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + CustomQuantizationBuilder &operator=(const CustomQuantizationBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateCustomQuantization( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> custom = 0) { + CustomQuantizationBuilder builder_(_fbb); + builder_.add_custom(custom); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateCustomQuantizationDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *custom = nullptr) { + auto custom__ = custom ? _fbb.CreateVector(*custom) : 0; + return tflite::CreateCustomQuantization( + _fbb, + custom__); +} + +flatbuffers::Offset CreateCustomQuantization(flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct QuantizationParametersT : public flatbuffers::NativeTable { typedef QuantizationParameters TableType; std::vector min; std::vector max; std::vector scale; - std::vector zeroPoint; - QuantizationParametersT() { + std::vector zero_point; + QuantizationDetailsUnion details; + int32_t quantized_dimension; + QuantizationParametersT() + : quantized_dimension(0) { } }; struct QuantizationParameters FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef QuantizationParametersT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return QuantizationParametersTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_MIN = 4, VT_MAX = 6, VT_SCALE = 8, - VT_ZEROPOINT = 10 + VT_ZERO_POINT = 10, + VT_DETAILS_TYPE = 12, + VT_DETAILS = 14, + VT_QUANTIZED_DIMENSION = 16 }; const flatbuffers::Vector *min() const { return GetPointer *>(VT_MIN); } - flatbuffers::Vector *mutable_min() { - return GetPointer *>(VT_MIN); - } const flatbuffers::Vector *max() const { return GetPointer *>(VT_MAX); } - flatbuffers::Vector *mutable_max() { - return GetPointer *>(VT_MAX); - } const flatbuffers::Vector *scale() const { return GetPointer *>(VT_SCALE); } - flatbuffers::Vector *mutable_scale() { - return GetPointer *>(VT_SCALE); + const flatbuffers::Vector *zero_point() const { + return GetPointer *>(VT_ZERO_POINT); + } + QuantizationDetails details_type() const { + return static_cast(GetField(VT_DETAILS_TYPE, 0)); + } + const void *details() const { + return GetPointer(VT_DETAILS); } - const flatbuffers::Vector *zeroPoint() const { - return GetPointer *>(VT_ZEROPOINT); + template const T *details_as() const; + const CustomQuantization *details_as_CustomQuantization() const { + return details_type() == QuantizationDetails_CustomQuantization ? static_cast(details()) : nullptr; } - flatbuffers::Vector *mutable_zeroPoint() { - return GetPointer *>(VT_ZEROPOINT); + int32_t quantized_dimension() const { + return GetField(VT_QUANTIZED_DIMENSION, 0); } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && @@ -2111,8 +2754,12 @@ struct QuantizationParameters FLATBUFFERS_FINAL_CLASS : private flatbuffers::Tab verifier.VerifyVector(max()) && VerifyOffset(verifier, VT_SCALE) && verifier.VerifyVector(scale()) && - VerifyOffset(verifier, VT_ZEROPOINT) && - verifier.VerifyVector(zeroPoint()) && + VerifyOffset(verifier, VT_ZERO_POINT) && + verifier.VerifyVector(zero_point()) && + VerifyField(verifier, VT_DETAILS_TYPE) && + VerifyOffset(verifier, VT_DETAILS) && + VerifyQuantizationDetails(verifier, details(), details_type()) && + VerifyField(verifier, VT_QUANTIZED_DIMENSION) && verifier.EndTable(); } QuantizationParametersT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; @@ -2120,6 +2767,10 @@ struct QuantizationParameters FLATBUFFERS_FINAL_CLASS : private flatbuffers::Tab static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); }; +template<> inline const CustomQuantization *QuantizationParameters::details_as() const { + return details_as_CustomQuantization(); +} + struct QuantizationParametersBuilder { flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; @@ -2132,8 +2783,17 @@ struct QuantizationParametersBuilder { void add_scale(flatbuffers::Offset> scale) { fbb_.AddOffset(QuantizationParameters::VT_SCALE, scale); } - void add_zeroPoint(flatbuffers::Offset> zeroPoint) { - fbb_.AddOffset(QuantizationParameters::VT_ZEROPOINT, zeroPoint); + void add_zero_point(flatbuffers::Offset> zero_point) { + fbb_.AddOffset(QuantizationParameters::VT_ZERO_POINT, zero_point); + } + void add_details_type(QuantizationDetails details_type) { + fbb_.AddElement(QuantizationParameters::VT_DETAILS_TYPE, static_cast(details_type), 0); + } + void add_details(flatbuffers::Offset details) { + fbb_.AddOffset(QuantizationParameters::VT_DETAILS, details); + } + void add_quantized_dimension(int32_t quantized_dimension) { + fbb_.AddElement(QuantizationParameters::VT_QUANTIZED_DIMENSION, quantized_dimension, 0); } explicit QuantizationParametersBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { @@ -2152,12 +2812,18 @@ inline flatbuffers::Offset CreateQuantizationParameters( flatbuffers::Offset> min = 0, flatbuffers::Offset> max = 0, flatbuffers::Offset> scale = 0, - flatbuffers::Offset> zeroPoint = 0) { + flatbuffers::Offset> zero_point = 0, + QuantizationDetails details_type = QuantizationDetails_NONE, + flatbuffers::Offset details = 0, + int32_t quantized_dimension = 0) { QuantizationParametersBuilder builder_(_fbb); - builder_.add_zeroPoint(zeroPoint); + builder_.add_quantized_dimension(quantized_dimension); + builder_.add_details(details); + builder_.add_zero_point(zero_point); builder_.add_scale(scale); builder_.add_max(max); builder_.add_min(min); + builder_.add_details_type(details_type); return builder_.Finish(); } @@ -2166,17 +2832,23 @@ inline flatbuffers::Offset CreateQuantizationParametersD const std::vector *min = nullptr, const std::vector *max = nullptr, const std::vector *scale = nullptr, - const std::vector *zeroPoint = nullptr) { + const std::vector *zero_point = nullptr, + QuantizationDetails details_type = QuantizationDetails_NONE, + flatbuffers::Offset details = 0, + int32_t quantized_dimension = 0) { auto min__ = min ? _fbb.CreateVector(*min) : 0; auto max__ = max ? _fbb.CreateVector(*max) : 0; auto scale__ = scale ? _fbb.CreateVector(*scale) : 0; - auto zeroPoint__ = zeroPoint ? _fbb.CreateVector(*zeroPoint) : 0; + auto zero_point__ = zero_point ? _fbb.CreateVector(*zero_point) : 0; return tflite::CreateQuantizationParameters( _fbb, min__, max__, scale__, - zeroPoint__); + zero_point__, + details_type, + details, + quantized_dimension); } flatbuffers::Offset CreateQuantizationParameters(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); @@ -2198,9 +2870,6 @@ struct TensorT : public flatbuffers::NativeTable { struct Tensor FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef TensorT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return TensorTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_SHAPE = 4, VT_TYPE = 6, @@ -2212,39 +2881,21 @@ struct Tensor FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { const flatbuffers::Vector *shape() const { return GetPointer *>(VT_SHAPE); } - flatbuffers::Vector *mutable_shape() { - return GetPointer *>(VT_SHAPE); - } TensorType type() const { return static_cast(GetField(VT_TYPE, 0)); } - bool mutate_type(TensorType _type) { - return SetField(VT_TYPE, static_cast(_type), 0); - } uint32_t buffer() const { return GetField(VT_BUFFER, 0); } - bool mutate_buffer(uint32_t _buffer) { - return SetField(VT_BUFFER, _buffer, 0); - } const flatbuffers::String *name() const { return GetPointer(VT_NAME); } - flatbuffers::String *mutable_name() { - return GetPointer(VT_NAME); - } const QuantizationParameters *quantization() const { return GetPointer(VT_QUANTIZATION); } - QuantizationParameters *mutable_quantization() { - return GetPointer(VT_QUANTIZATION); - } bool is_variable() const { return GetField(VT_IS_VARIABLE, 0) != 0; } - bool mutate_is_variable(bool _is_variable) { - return SetField(VT_IS_VARIABLE, static_cast(_is_variable), 0); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_SHAPE) && @@ -2356,9 +3007,6 @@ struct Conv2DOptionsT : public flatbuffers::NativeTable { struct Conv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef Conv2DOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return Conv2DOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_PADDING = 4, VT_STRIDE_W = 6, @@ -2370,39 +3018,21 @@ struct Conv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { Padding padding() const { return static_cast(GetField(VT_PADDING, 0)); } - bool mutate_padding(Padding _padding) { - return SetField(VT_PADDING, static_cast(_padding), 0); - } int32_t stride_w() const { return GetField(VT_STRIDE_W, 0); } - bool mutate_stride_w(int32_t _stride_w) { - return SetField(VT_STRIDE_W, _stride_w, 0); - } int32_t stride_h() const { return GetField(VT_STRIDE_H, 0); } - bool mutate_stride_h(int32_t _stride_h) { - return SetField(VT_STRIDE_H, _stride_h, 0); - } ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } - bool mutate_fused_activation_function(ActivationFunctionType _fused_activation_function) { - return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); - } int32_t dilation_w_factor() const { return GetField(VT_DILATION_W_FACTOR, 1); } - bool mutate_dilation_w_factor(int32_t _dilation_w_factor) { - return SetField(VT_DILATION_W_FACTOR, _dilation_w_factor, 1); - } int32_t dilation_h_factor() const { return GetField(VT_DILATION_H_FACTOR, 1); } - bool mutate_dilation_h_factor(int32_t _dilation_h_factor) { - return SetField(VT_DILATION_H_FACTOR, _dilation_h_factor, 1); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_PADDING) && @@ -2491,9 +3121,6 @@ struct Pool2DOptionsT : public flatbuffers::NativeTable { struct Pool2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef Pool2DOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return Pool2DOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_PADDING = 4, VT_STRIDE_W = 6, @@ -2505,39 +3132,21 @@ struct Pool2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { Padding padding() const { return static_cast(GetField(VT_PADDING, 0)); } - bool mutate_padding(Padding _padding) { - return SetField(VT_PADDING, static_cast(_padding), 0); - } int32_t stride_w() const { return GetField(VT_STRIDE_W, 0); } - bool mutate_stride_w(int32_t _stride_w) { - return SetField(VT_STRIDE_W, _stride_w, 0); - } int32_t stride_h() const { return GetField(VT_STRIDE_H, 0); } - bool mutate_stride_h(int32_t _stride_h) { - return SetField(VT_STRIDE_H, _stride_h, 0); - } int32_t filter_width() const { return GetField(VT_FILTER_WIDTH, 0); } - bool mutate_filter_width(int32_t _filter_width) { - return SetField(VT_FILTER_WIDTH, _filter_width, 0); - } int32_t filter_height() const { return GetField(VT_FILTER_HEIGHT, 0); } - bool mutate_filter_height(int32_t _filter_height) { - return SetField(VT_FILTER_HEIGHT, _filter_height, 0); - } ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } - bool mutate_fused_activation_function(ActivationFunctionType _fused_activation_function) { - return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_PADDING) && @@ -2628,9 +3237,6 @@ struct DepthwiseConv2DOptionsT : public flatbuffers::NativeTable { struct DepthwiseConv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef DepthwiseConv2DOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return DepthwiseConv2DOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_PADDING = 4, VT_STRIDE_W = 6, @@ -2643,45 +3249,24 @@ struct DepthwiseConv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Tab Padding padding() const { return static_cast(GetField(VT_PADDING, 0)); } - bool mutate_padding(Padding _padding) { - return SetField(VT_PADDING, static_cast(_padding), 0); - } int32_t stride_w() const { return GetField(VT_STRIDE_W, 0); } - bool mutate_stride_w(int32_t _stride_w) { - return SetField(VT_STRIDE_W, _stride_w, 0); - } int32_t stride_h() const { return GetField(VT_STRIDE_H, 0); } - bool mutate_stride_h(int32_t _stride_h) { - return SetField(VT_STRIDE_H, _stride_h, 0); - } int32_t depth_multiplier() const { return GetField(VT_DEPTH_MULTIPLIER, 0); } - bool mutate_depth_multiplier(int32_t _depth_multiplier) { - return SetField(VT_DEPTH_MULTIPLIER, _depth_multiplier, 0); - } ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } - bool mutate_fused_activation_function(ActivationFunctionType _fused_activation_function) { - return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); - } int32_t dilation_w_factor() const { return GetField(VT_DILATION_W_FACTOR, 1); } - bool mutate_dilation_w_factor(int32_t _dilation_w_factor) { - return SetField(VT_DILATION_W_FACTOR, _dilation_w_factor, 1); - } int32_t dilation_h_factor() const { return GetField(VT_DILATION_H_FACTOR, 1); } - bool mutate_dilation_h_factor(int32_t _dilation_h_factor) { - return SetField(VT_DILATION_H_FACTOR, _dilation_h_factor, 1); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_PADDING) && @@ -2768,9 +3353,6 @@ struct ConcatEmbeddingsOptionsT : public flatbuffers::NativeTable { struct ConcatEmbeddingsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef ConcatEmbeddingsOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return ConcatEmbeddingsOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_NUM_CHANNELS = 4, VT_NUM_COLUMNS_PER_CHANNEL = 6, @@ -2779,21 +3361,12 @@ struct ConcatEmbeddingsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Ta int32_t num_channels() const { return GetField(VT_NUM_CHANNELS, 0); } - bool mutate_num_channels(int32_t _num_channels) { - return SetField(VT_NUM_CHANNELS, _num_channels, 0); - } const flatbuffers::Vector *num_columns_per_channel() const { return GetPointer *>(VT_NUM_COLUMNS_PER_CHANNEL); } - flatbuffers::Vector *mutable_num_columns_per_channel() { - return GetPointer *>(VT_NUM_COLUMNS_PER_CHANNEL); - } const flatbuffers::Vector *embedding_dim_per_channel() const { return GetPointer *>(VT_EMBEDDING_DIM_PER_CHANNEL); } - flatbuffers::Vector *mutable_embedding_dim_per_channel() { - return GetPointer *>(VT_EMBEDDING_DIM_PER_CHANNEL); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_NUM_CHANNELS) && @@ -2870,18 +3443,12 @@ struct LSHProjectionOptionsT : public flatbuffers::NativeTable { struct LSHProjectionOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef LSHProjectionOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return LSHProjectionOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_TYPE = 4 }; LSHProjectionType type() const { return static_cast(GetField(VT_TYPE, 0)); } - bool mutate_type(LSHProjectionType _type) { - return SetField(VT_TYPE, static_cast(_type), 0); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_TYPE) && @@ -2932,9 +3499,6 @@ struct SVDFOptionsT : public flatbuffers::NativeTable { struct SVDFOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef SVDFOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return SVDFOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_RANK = 4, VT_FUSED_ACTIVATION_FUNCTION = 6 @@ -2942,15 +3506,9 @@ struct SVDFOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { int32_t rank() const { return GetField(VT_RANK, 0); } - bool mutate_rank(int32_t _rank) { - return SetField(VT_RANK, _rank, 0); - } ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } - bool mutate_fused_activation_function(ActivationFunctionType _fused_activation_function) { - return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_RANK) && @@ -3005,18 +3563,12 @@ struct RNNOptionsT : public flatbuffers::NativeTable { struct RNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef RNNOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return RNNOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_FUSED_ACTIVATION_FUNCTION = 4 }; ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } - bool mutate_fused_activation_function(ActivationFunctionType _fused_activation_function) { - return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && @@ -3067,9 +3619,6 @@ struct SequenceRNNOptionsT : public flatbuffers::NativeTable { struct SequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef SequenceRNNOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return SequenceRNNOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_TIME_MAJOR = 4, VT_FUSED_ACTIVATION_FUNCTION = 6 @@ -3077,15 +3626,9 @@ struct SequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { bool time_major() const { return GetField(VT_TIME_MAJOR, 0) != 0; } - bool mutate_time_major(bool _time_major) { - return SetField(VT_TIME_MAJOR, static_cast(_time_major), 0); - } ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } - bool mutate_fused_activation_function(ActivationFunctionType _fused_activation_function) { - return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_TIME_MAJOR) && @@ -3134,37 +3677,35 @@ struct BidirectionalSequenceRNNOptionsT : public flatbuffers::NativeTable { typedef BidirectionalSequenceRNNOptions TableType; bool time_major; ActivationFunctionType fused_activation_function; + bool merge_outputs; BidirectionalSequenceRNNOptionsT() : time_major(false), - fused_activation_function(ActivationFunctionType_NONE) { + fused_activation_function(ActivationFunctionType_NONE), + merge_outputs(false) { } }; struct BidirectionalSequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef BidirectionalSequenceRNNOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return BidirectionalSequenceRNNOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_TIME_MAJOR = 4, - VT_FUSED_ACTIVATION_FUNCTION = 6 + VT_FUSED_ACTIVATION_FUNCTION = 6, + VT_MERGE_OUTPUTS = 8 }; bool time_major() const { return GetField(VT_TIME_MAJOR, 0) != 0; } - bool mutate_time_major(bool _time_major) { - return SetField(VT_TIME_MAJOR, static_cast(_time_major), 0); - } ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } - bool mutate_fused_activation_function(ActivationFunctionType _fused_activation_function) { - return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); + bool merge_outputs() const { + return GetField(VT_MERGE_OUTPUTS, 0) != 0; } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_TIME_MAJOR) && VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && + VerifyField(verifier, VT_MERGE_OUTPUTS) && verifier.EndTable(); } BidirectionalSequenceRNNOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; @@ -3181,6 +3722,9 @@ struct BidirectionalSequenceRNNOptionsBuilder { void add_fused_activation_function(ActivationFunctionType fused_activation_function) { fbb_.AddElement(BidirectionalSequenceRNNOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); } + void add_merge_outputs(bool merge_outputs) { + fbb_.AddElement(BidirectionalSequenceRNNOptions::VT_MERGE_OUTPUTS, static_cast(merge_outputs), 0); + } explicit BidirectionalSequenceRNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); @@ -3196,8 +3740,10 @@ struct BidirectionalSequenceRNNOptionsBuilder { inline flatbuffers::Offset CreateBidirectionalSequenceRNNOptions( flatbuffers::FlatBufferBuilder &_fbb, bool time_major = false, - ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE) { + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE, + bool merge_outputs = false) { BidirectionalSequenceRNNOptionsBuilder builder_(_fbb); + builder_.add_merge_outputs(merge_outputs); builder_.add_fused_activation_function(fused_activation_function); builder_.add_time_major(time_major); return builder_.Finish(); @@ -3209,37 +3755,35 @@ struct FullyConnectedOptionsT : public flatbuffers::NativeTable { typedef FullyConnectedOptions TableType; ActivationFunctionType fused_activation_function; FullyConnectedOptionsWeightsFormat weights_format; + bool keep_num_dims; FullyConnectedOptionsT() : fused_activation_function(ActivationFunctionType_NONE), - weights_format(FullyConnectedOptionsWeightsFormat_DEFAULT) { + weights_format(FullyConnectedOptionsWeightsFormat_DEFAULT), + keep_num_dims(false) { } }; struct FullyConnectedOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef FullyConnectedOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return FullyConnectedOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_FUSED_ACTIVATION_FUNCTION = 4, - VT_WEIGHTS_FORMAT = 6 + VT_WEIGHTS_FORMAT = 6, + VT_KEEP_NUM_DIMS = 8 }; ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } - bool mutate_fused_activation_function(ActivationFunctionType _fused_activation_function) { - return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); - } FullyConnectedOptionsWeightsFormat weights_format() const { return static_cast(GetField(VT_WEIGHTS_FORMAT, 0)); } - bool mutate_weights_format(FullyConnectedOptionsWeightsFormat _weights_format) { - return SetField(VT_WEIGHTS_FORMAT, static_cast(_weights_format), 0); + bool keep_num_dims() const { + return GetField(VT_KEEP_NUM_DIMS, 0) != 0; } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && VerifyField(verifier, VT_WEIGHTS_FORMAT) && + VerifyField(verifier, VT_KEEP_NUM_DIMS) && verifier.EndTable(); } FullyConnectedOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; @@ -3256,6 +3800,9 @@ struct FullyConnectedOptionsBuilder { void add_weights_format(FullyConnectedOptionsWeightsFormat weights_format) { fbb_.AddElement(FullyConnectedOptions::VT_WEIGHTS_FORMAT, static_cast(weights_format), 0); } + void add_keep_num_dims(bool keep_num_dims) { + fbb_.AddElement(FullyConnectedOptions::VT_KEEP_NUM_DIMS, static_cast(keep_num_dims), 0); + } explicit FullyConnectedOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); @@ -3271,8 +3818,10 @@ struct FullyConnectedOptionsBuilder { inline flatbuffers::Offset CreateFullyConnectedOptions( flatbuffers::FlatBufferBuilder &_fbb, ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE, - FullyConnectedOptionsWeightsFormat weights_format = FullyConnectedOptionsWeightsFormat_DEFAULT) { + FullyConnectedOptionsWeightsFormat weights_format = FullyConnectedOptionsWeightsFormat_DEFAULT, + bool keep_num_dims = false) { FullyConnectedOptionsBuilder builder_(_fbb); + builder_.add_keep_num_dims(keep_num_dims); builder_.add_weights_format(weights_format); builder_.add_fused_activation_function(fused_activation_function); return builder_.Finish(); @@ -3290,18 +3839,12 @@ struct SoftmaxOptionsT : public flatbuffers::NativeTable { struct SoftmaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef SoftmaxOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return SoftmaxOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_BETA = 4 }; float beta() const { return GetField(VT_BETA, 0.0f); } - bool mutate_beta(float _beta) { - return SetField(VT_BETA, _beta, 0.0f); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_BETA) && @@ -3352,9 +3895,6 @@ struct ConcatenationOptionsT : public flatbuffers::NativeTable { struct ConcatenationOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef ConcatenationOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return ConcatenationOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_AXIS = 4, VT_FUSED_ACTIVATION_FUNCTION = 6 @@ -3362,15 +3902,9 @@ struct ConcatenationOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table int32_t axis() const { return GetField(VT_AXIS, 0); } - bool mutate_axis(int32_t _axis) { - return SetField(VT_AXIS, _axis, 0); - } ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } - bool mutate_fused_activation_function(ActivationFunctionType _fused_activation_function) { - return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_AXIS) && @@ -3425,18 +3959,12 @@ struct AddOptionsT : public flatbuffers::NativeTable { struct AddOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef AddOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return AddOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_FUSED_ACTIVATION_FUNCTION = 4 }; ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } - bool mutate_fused_activation_function(ActivationFunctionType _fused_activation_function) { - return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && @@ -3485,18 +4013,12 @@ struct MulOptionsT : public flatbuffers::NativeTable { struct MulOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef MulOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return MulOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_FUSED_ACTIVATION_FUNCTION = 4 }; ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } - bool mutate_fused_activation_function(ActivationFunctionType _fused_activation_function) { - return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && @@ -3545,18 +4067,12 @@ struct L2NormOptionsT : public flatbuffers::NativeTable { struct L2NormOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef L2NormOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return L2NormOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_FUSED_ACTIVATION_FUNCTION = 4 }; ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } - bool mutate_fused_activation_function(ActivationFunctionType _fused_activation_function) { - return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && @@ -3611,9 +4127,6 @@ struct LocalResponseNormalizationOptionsT : public flatbuffers::NativeTable { struct LocalResponseNormalizationOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef LocalResponseNormalizationOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return LocalResponseNormalizationOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_RADIUS = 4, VT_BIAS = 6, @@ -3623,27 +4136,15 @@ struct LocalResponseNormalizationOptions FLATBUFFERS_FINAL_CLASS : private flatb int32_t radius() const { return GetField(VT_RADIUS, 0); } - bool mutate_radius(int32_t _radius) { - return SetField(VT_RADIUS, _radius, 0); - } float bias() const { return GetField(VT_BIAS, 0.0f); } - bool mutate_bias(float _bias) { - return SetField(VT_BIAS, _bias, 0.0f); - } float alpha() const { return GetField(VT_ALPHA, 0.0f); } - bool mutate_alpha(float _alpha) { - return SetField(VT_ALPHA, _alpha, 0.0f); - } float beta() const { return GetField(VT_BETA, 0.0f); } - bool mutate_beta(float _beta) { - return SetField(VT_BETA, _beta, 0.0f); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_RADIUS) && @@ -3716,9 +4217,6 @@ struct LSTMOptionsT : public flatbuffers::NativeTable { struct LSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef LSTMOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return LSTMOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_FUSED_ACTIVATION_FUNCTION = 4, VT_CELL_CLIP = 6, @@ -3728,27 +4226,15 @@ struct LSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } - bool mutate_fused_activation_function(ActivationFunctionType _fused_activation_function) { - return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); - } float cell_clip() const { return GetField(VT_CELL_CLIP, 0.0f); } - bool mutate_cell_clip(float _cell_clip) { - return SetField(VT_CELL_CLIP, _cell_clip, 0.0f); - } float proj_clip() const { return GetField(VT_PROJ_CLIP, 0.0f); } - bool mutate_proj_clip(float _proj_clip) { - return SetField(VT_PROJ_CLIP, _proj_clip, 0.0f); - } LSTMKernelType kernel_type() const { return static_cast(GetField(VT_KERNEL_TYPE, 0)); } - bool mutate_kernel_type(LSTMKernelType _kernel_type) { - return SetField(VT_KERNEL_TYPE, static_cast(_kernel_type), 0); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && @@ -3805,120 +4291,354 @@ inline flatbuffers::Offset CreateLSTMOptions( flatbuffers::Offset CreateLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct ResizeBilinearOptionsT : public flatbuffers::NativeTable { - typedef ResizeBilinearOptions TableType; - bool alignCorners; - ResizeBilinearOptionsT() - : alignCorners(false) { +struct UnidirectionalSequenceLSTMOptionsT : public flatbuffers::NativeTable { + typedef UnidirectionalSequenceLSTMOptions TableType; + ActivationFunctionType fused_activation_function; + float cell_clip; + float proj_clip; + bool time_major; + UnidirectionalSequenceLSTMOptionsT() + : fused_activation_function(ActivationFunctionType_NONE), + cell_clip(0.0f), + proj_clip(0.0f), + time_major(false) { } }; -struct ResizeBilinearOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ResizeBilinearOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return ResizeBilinearOptionsTypeTable(); - } +struct UnidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef UnidirectionalSequenceLSTMOptionsT NativeTableType; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_ALIGNCORNERS = 8 + VT_FUSED_ACTIVATION_FUNCTION = 4, + VT_CELL_CLIP = 6, + VT_PROJ_CLIP = 8, + VT_TIME_MAJOR = 10 }; - bool alignCorners() const { - return GetField(VT_ALIGNCORNERS, 0) != 0; + ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + float cell_clip() const { + return GetField(VT_CELL_CLIP, 0.0f); + } + float proj_clip() const { + return GetField(VT_PROJ_CLIP, 0.0f); } - bool mutate_alignCorners(bool _alignCorners) { - return SetField(VT_ALIGNCORNERS, static_cast(_alignCorners), 0); + bool time_major() const { + return GetField(VT_TIME_MAJOR, 0) != 0; } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_ALIGNCORNERS) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && + VerifyField(verifier, VT_CELL_CLIP) && + VerifyField(verifier, VT_PROJ_CLIP) && + VerifyField(verifier, VT_TIME_MAJOR) && verifier.EndTable(); } - ResizeBilinearOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ResizeBilinearOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + UnidirectionalSequenceLSTMOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(UnidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct ResizeBilinearOptionsBuilder { +struct UnidirectionalSequenceLSTMOptionsBuilder { flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; - void add_alignCorners(bool alignCorners) { - fbb_.AddElement(ResizeBilinearOptions::VT_ALIGNCORNERS, static_cast(alignCorners), 0); + void add_fused_activation_function(ActivationFunctionType fused_activation_function) { + fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); } - explicit ResizeBilinearOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + void add_cell_clip(float cell_clip) { + fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f); + } + void add_proj_clip(float proj_clip) { + fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f); + } + void add_time_major(bool time_major) { + fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_TIME_MAJOR, static_cast(time_major), 0); + } + explicit UnidirectionalSequenceLSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - ResizeBilinearOptionsBuilder &operator=(const ResizeBilinearOptionsBuilder &); - flatbuffers::Offset Finish() { + UnidirectionalSequenceLSTMOptionsBuilder &operator=(const UnidirectionalSequenceLSTMOptionsBuilder &); + flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateResizeBilinearOptions( +inline flatbuffers::Offset CreateUnidirectionalSequenceLSTMOptions( flatbuffers::FlatBufferBuilder &_fbb, - bool alignCorners = false) { - ResizeBilinearOptionsBuilder builder_(_fbb); - builder_.add_alignCorners(alignCorners); + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE, + float cell_clip = 0.0f, + float proj_clip = 0.0f, + bool time_major = false) { + UnidirectionalSequenceLSTMOptionsBuilder builder_(_fbb); + builder_.add_proj_clip(proj_clip); + builder_.add_cell_clip(cell_clip); + builder_.add_time_major(time_major); + builder_.add_fused_activation_function(fused_activation_function); return builder_.Finish(); } -flatbuffers::Offset CreateResizeBilinearOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +flatbuffers::Offset CreateUnidirectionalSequenceLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct CallOptionsT : public flatbuffers::NativeTable { - typedef CallOptions TableType; - uint32_t subgraph; - CallOptionsT() - : subgraph(0) { +struct BidirectionalSequenceLSTMOptionsT : public flatbuffers::NativeTable { + typedef BidirectionalSequenceLSTMOptions TableType; + ActivationFunctionType fused_activation_function; + float cell_clip; + float proj_clip; + bool merge_outputs; + bool time_major; + BidirectionalSequenceLSTMOptionsT() + : fused_activation_function(ActivationFunctionType_NONE), + cell_clip(0.0f), + proj_clip(0.0f), + merge_outputs(false), + time_major(true) { } }; -struct CallOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef CallOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return CallOptionsTypeTable(); - } +struct BidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef BidirectionalSequenceLSTMOptionsT NativeTableType; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_SUBGRAPH = 4 + VT_FUSED_ACTIVATION_FUNCTION = 4, + VT_CELL_CLIP = 6, + VT_PROJ_CLIP = 8, + VT_MERGE_OUTPUTS = 10, + VT_TIME_MAJOR = 12 }; - uint32_t subgraph() const { - return GetField(VT_SUBGRAPH, 0); + ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + float cell_clip() const { + return GetField(VT_CELL_CLIP, 0.0f); + } + float proj_clip() const { + return GetField(VT_PROJ_CLIP, 0.0f); } - bool mutate_subgraph(uint32_t _subgraph) { - return SetField(VT_SUBGRAPH, _subgraph, 0); + bool merge_outputs() const { + return GetField(VT_MERGE_OUTPUTS, 0) != 0; + } + bool time_major() const { + return GetField(VT_TIME_MAJOR, 1) != 0; } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_SUBGRAPH) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && + VerifyField(verifier, VT_CELL_CLIP) && + VerifyField(verifier, VT_PROJ_CLIP) && + VerifyField(verifier, VT_MERGE_OUTPUTS) && + VerifyField(verifier, VT_TIME_MAJOR) && verifier.EndTable(); } - CallOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(CallOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + BidirectionalSequenceLSTMOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(BidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct CallOptionsBuilder { +struct BidirectionalSequenceLSTMOptionsBuilder { flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; - void add_subgraph(uint32_t subgraph) { - fbb_.AddElement(CallOptions::VT_SUBGRAPH, subgraph, 0); + void add_fused_activation_function(ActivationFunctionType fused_activation_function) { + fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); } - explicit CallOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + void add_cell_clip(float cell_clip) { + fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f); + } + void add_proj_clip(float proj_clip) { + fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f); + } + void add_merge_outputs(bool merge_outputs) { + fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_MERGE_OUTPUTS, static_cast(merge_outputs), 0); + } + void add_time_major(bool time_major) { + fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_TIME_MAJOR, static_cast(time_major), 1); + } + explicit BidirectionalSequenceLSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - CallOptionsBuilder &operator=(const CallOptionsBuilder &); - flatbuffers::Offset Finish() { + BidirectionalSequenceLSTMOptionsBuilder &operator=(const BidirectionalSequenceLSTMOptionsBuilder &); + flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateCallOptions( +inline flatbuffers::Offset CreateBidirectionalSequenceLSTMOptions( flatbuffers::FlatBufferBuilder &_fbb, - uint32_t subgraph = 0) { - CallOptionsBuilder builder_(_fbb); + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE, + float cell_clip = 0.0f, + float proj_clip = 0.0f, + bool merge_outputs = false, + bool time_major = true) { + BidirectionalSequenceLSTMOptionsBuilder builder_(_fbb); + builder_.add_proj_clip(proj_clip); + builder_.add_cell_clip(cell_clip); + builder_.add_time_major(time_major); + builder_.add_merge_outputs(merge_outputs); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +flatbuffers::Offset CreateBidirectionalSequenceLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ResizeBilinearOptionsT : public flatbuffers::NativeTable { + typedef ResizeBilinearOptions TableType; + bool align_corners; + ResizeBilinearOptionsT() + : align_corners(false) { + } +}; + +struct ResizeBilinearOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ResizeBilinearOptionsT NativeTableType; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ALIGN_CORNERS = 8 + }; + bool align_corners() const { + return GetField(VT_ALIGN_CORNERS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_ALIGN_CORNERS) && + verifier.EndTable(); + } + ResizeBilinearOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ResizeBilinearOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ResizeBilinearOptionsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_align_corners(bool align_corners) { + fbb_.AddElement(ResizeBilinearOptions::VT_ALIGN_CORNERS, static_cast(align_corners), 0); + } + explicit ResizeBilinearOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ResizeBilinearOptionsBuilder &operator=(const ResizeBilinearOptionsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateResizeBilinearOptions( + flatbuffers::FlatBufferBuilder &_fbb, + bool align_corners = false) { + ResizeBilinearOptionsBuilder builder_(_fbb); + builder_.add_align_corners(align_corners); + return builder_.Finish(); +} + +flatbuffers::Offset CreateResizeBilinearOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ResizeNearestNeighborOptionsT : public flatbuffers::NativeTable { + typedef ResizeNearestNeighborOptions TableType; + bool align_corners; + ResizeNearestNeighborOptionsT() + : align_corners(false) { + } +}; + +struct ResizeNearestNeighborOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ResizeNearestNeighborOptionsT NativeTableType; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ALIGN_CORNERS = 4 + }; + bool align_corners() const { + return GetField(VT_ALIGN_CORNERS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_ALIGN_CORNERS) && + verifier.EndTable(); + } + ResizeNearestNeighborOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ResizeNearestNeighborOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ResizeNearestNeighborOptionsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_align_corners(bool align_corners) { + fbb_.AddElement(ResizeNearestNeighborOptions::VT_ALIGN_CORNERS, static_cast(align_corners), 0); + } + explicit ResizeNearestNeighborOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ResizeNearestNeighborOptionsBuilder &operator=(const ResizeNearestNeighborOptionsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateResizeNearestNeighborOptions( + flatbuffers::FlatBufferBuilder &_fbb, + bool align_corners = false) { + ResizeNearestNeighborOptionsBuilder builder_(_fbb); + builder_.add_align_corners(align_corners); + return builder_.Finish(); +} + +flatbuffers::Offset CreateResizeNearestNeighborOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct CallOptionsT : public flatbuffers::NativeTable { + typedef CallOptions TableType; + uint32_t subgraph; + CallOptionsT() + : subgraph(0) { + } +}; + +struct CallOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef CallOptionsT NativeTableType; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SUBGRAPH = 4 + }; + uint32_t subgraph() const { + return GetField(VT_SUBGRAPH, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_SUBGRAPH) && + verifier.EndTable(); + } + CallOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(CallOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct CallOptionsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_subgraph(uint32_t subgraph) { + fbb_.AddElement(CallOptions::VT_SUBGRAPH, subgraph, 0); + } + explicit CallOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + CallOptionsBuilder &operator=(const CallOptionsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateCallOptions( + flatbuffers::FlatBufferBuilder &_fbb, + uint32_t subgraph = 0) { + CallOptionsBuilder builder_(_fbb); builder_.add_subgraph(subgraph); return builder_.Finish(); } @@ -3933,9 +4653,6 @@ struct PadOptionsT : public flatbuffers::NativeTable { struct PadOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef PadOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return PadOptionsTypeTable(); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); @@ -3976,9 +4693,6 @@ struct PadV2OptionsT : public flatbuffers::NativeTable { struct PadV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef PadV2OptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return PadV2OptionsTypeTable(); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); @@ -4020,18 +4734,12 @@ struct ReshapeOptionsT : public flatbuffers::NativeTable { struct ReshapeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef ReshapeOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return ReshapeOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_NEW_SHAPE = 4 }; const flatbuffers::Vector *new_shape() const { return GetPointer *>(VT_NEW_SHAPE); } - flatbuffers::Vector *mutable_new_shape() { - return GetPointer *>(VT_NEW_SHAPE); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_NEW_SHAPE) && @@ -4088,9 +4796,6 @@ struct SpaceToBatchNDOptionsT : public flatbuffers::NativeTable { struct SpaceToBatchNDOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef SpaceToBatchNDOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return SpaceToBatchNDOptionsTypeTable(); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); @@ -4131,9 +4836,6 @@ struct BatchToSpaceNDOptionsT : public flatbuffers::NativeTable { struct BatchToSpaceNDOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef BatchToSpaceNDOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return BatchToSpaceNDOptionsTypeTable(); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); @@ -4180,9 +4882,6 @@ struct SkipGramOptionsT : public flatbuffers::NativeTable { struct SkipGramOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef SkipGramOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return SkipGramOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_NGRAM_SIZE = 4, VT_MAX_SKIP_SIZE = 6, @@ -4191,21 +4890,12 @@ struct SkipGramOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { int32_t ngram_size() const { return GetField(VT_NGRAM_SIZE, 0); } - bool mutate_ngram_size(int32_t _ngram_size) { - return SetField(VT_NGRAM_SIZE, _ngram_size, 0); - } int32_t max_skip_size() const { return GetField(VT_MAX_SKIP_SIZE, 0); } - bool mutate_max_skip_size(int32_t _max_skip_size) { - return SetField(VT_MAX_SKIP_SIZE, _max_skip_size, 0); - } bool include_all_ngrams() const { return GetField(VT_INCLUDE_ALL_NGRAMS, 0) != 0; } - bool mutate_include_all_ngrams(bool _include_all_ngrams) { - return SetField(VT_INCLUDE_ALL_NGRAMS, static_cast(_include_all_ngrams), 0); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_NGRAM_SIZE) && @@ -4266,18 +4956,12 @@ struct SpaceToDepthOptionsT : public flatbuffers::NativeTable { struct SpaceToDepthOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef SpaceToDepthOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return SpaceToDepthOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_BLOCK_SIZE = 4 }; int32_t block_size() const { return GetField(VT_BLOCK_SIZE, 0); } - bool mutate_block_size(int32_t _block_size) { - return SetField(VT_BLOCK_SIZE, _block_size, 0); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_BLOCK_SIZE) && @@ -4316,6 +5000,60 @@ inline flatbuffers::Offset CreateSpaceToDepthOptions( flatbuffers::Offset CreateSpaceToDepthOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +struct DepthToSpaceOptionsT : public flatbuffers::NativeTable { + typedef DepthToSpaceOptions TableType; + int32_t block_size; + DepthToSpaceOptionsT() + : block_size(0) { + } +}; + +struct DepthToSpaceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef DepthToSpaceOptionsT NativeTableType; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BLOCK_SIZE = 4 + }; + int32_t block_size() const { + return GetField(VT_BLOCK_SIZE, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_BLOCK_SIZE) && + verifier.EndTable(); + } + DepthToSpaceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(DepthToSpaceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct DepthToSpaceOptionsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_block_size(int32_t block_size) { + fbb_.AddElement(DepthToSpaceOptions::VT_BLOCK_SIZE, block_size, 0); + } + explicit DepthToSpaceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + DepthToSpaceOptionsBuilder &operator=(const DepthToSpaceOptionsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateDepthToSpaceOptions( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t block_size = 0) { + DepthToSpaceOptionsBuilder builder_(_fbb); + builder_.add_block_size(block_size); + return builder_.Finish(); +} + +flatbuffers::Offset CreateDepthToSpaceOptions(flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + struct SubOptionsT : public flatbuffers::NativeTable { typedef SubOptions TableType; ActivationFunctionType fused_activation_function; @@ -4326,18 +5064,12 @@ struct SubOptionsT : public flatbuffers::NativeTable { struct SubOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef SubOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return SubOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_FUSED_ACTIVATION_FUNCTION = 4 }; ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } - bool mutate_fused_activation_function(ActivationFunctionType _fused_activation_function) { - return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && @@ -4386,18 +5118,12 @@ struct DivOptionsT : public flatbuffers::NativeTable { struct DivOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef DivOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return DivOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_FUSED_ACTIVATION_FUNCTION = 4 }; ActivationFunctionType fused_activation_function() const { return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); } - bool mutate_fused_activation_function(ActivationFunctionType _fused_activation_function) { - return SetField(VT_FUSED_ACTIVATION_FUNCTION, static_cast(_fused_activation_function), 0); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && @@ -4444,9 +5170,6 @@ struct TopKV2OptionsT : public flatbuffers::NativeTable { struct TopKV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef TopKV2OptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return TopKV2OptionsTypeTable(); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); @@ -4489,18 +5212,12 @@ struct EmbeddingLookupSparseOptionsT : public flatbuffers::NativeTable { struct EmbeddingLookupSparseOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef EmbeddingLookupSparseOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return EmbeddingLookupSparseOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_COMBINER = 4 }; CombinerType combiner() const { return static_cast(GetField(VT_COMBINER, 0)); } - bool mutate_combiner(CombinerType _combiner) { - return SetField(VT_COMBINER, static_cast(_combiner), 0); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_COMBINER) && @@ -4549,18 +5266,12 @@ struct GatherOptionsT : public flatbuffers::NativeTable { struct GatherOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef GatherOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return GatherOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_AXIS = 4 }; int32_t axis() const { return GetField(VT_AXIS, 0); } - bool mutate_axis(int32_t _axis) { - return SetField(VT_AXIS, _axis, 0); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_AXIS) && @@ -4607,9 +5318,6 @@ struct TransposeOptionsT : public flatbuffers::NativeTable { struct TransposeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef TransposeOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return TransposeOptionsTypeTable(); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); @@ -4650,9 +5358,6 @@ struct ExpOptionsT : public flatbuffers::NativeTable { struct ExpOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef ExpOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return ExpOptionsTypeTable(); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); @@ -4685,31 +5390,65 @@ inline flatbuffers::Offset CreateExpOptions( flatbuffers::Offset CreateExpOptions(flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +struct CosOptionsT : public flatbuffers::NativeTable { + typedef CosOptions TableType; + CosOptionsT() { + } +}; + +struct CosOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef CosOptionsT NativeTableType; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + CosOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(CosOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct CosOptionsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit CosOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + CosOptionsBuilder &operator=(const CosOptionsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateCosOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + CosOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateCosOptions(flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + struct ReducerOptionsT : public flatbuffers::NativeTable { typedef ReducerOptions TableType; - bool keepDims; + bool keep_dims; ReducerOptionsT() - : keepDims(false) { + : keep_dims(false) { } }; struct ReducerOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef ReducerOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return ReducerOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_KEEPDIMS = 4 + VT_KEEP_DIMS = 4 }; - bool keepDims() const { - return GetField(VT_KEEPDIMS, 0) != 0; - } - bool mutate_keepDims(bool _keepDims) { - return SetField(VT_KEEPDIMS, static_cast(_keepDims), 0); + bool keep_dims() const { + return GetField(VT_KEEP_DIMS, 0) != 0; } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_KEEPDIMS) && + VerifyField(verifier, VT_KEEP_DIMS) && verifier.EndTable(); } ReducerOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; @@ -4720,8 +5459,8 @@ struct ReducerOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { struct ReducerOptionsBuilder { flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; - void add_keepDims(bool keepDims) { - fbb_.AddElement(ReducerOptions::VT_KEEPDIMS, static_cast(keepDims), 0); + void add_keep_dims(bool keep_dims) { + fbb_.AddElement(ReducerOptions::VT_KEEP_DIMS, static_cast(keep_dims), 0); } explicit ReducerOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { @@ -4737,9 +5476,9 @@ struct ReducerOptionsBuilder { inline flatbuffers::Offset CreateReducerOptions( flatbuffers::FlatBufferBuilder &_fbb, - bool keepDims = false) { + bool keep_dims = false) { ReducerOptionsBuilder builder_(_fbb); - builder_.add_keepDims(keepDims); + builder_.add_keep_dims(keep_dims); return builder_.Finish(); } @@ -4747,29 +5486,23 @@ flatbuffers::Offset CreateReducerOptions(flatbuffers::FlatBuffer struct SqueezeOptionsT : public flatbuffers::NativeTable { typedef SqueezeOptions TableType; - std::vector squeezeDims; + std::vector squeeze_dims; SqueezeOptionsT() { } }; struct SqueezeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef SqueezeOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return SqueezeOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_SQUEEZEDIMS = 4 + VT_SQUEEZE_DIMS = 4 }; - const flatbuffers::Vector *squeezeDims() const { - return GetPointer *>(VT_SQUEEZEDIMS); - } - flatbuffers::Vector *mutable_squeezeDims() { - return GetPointer *>(VT_SQUEEZEDIMS); + const flatbuffers::Vector *squeeze_dims() const { + return GetPointer *>(VT_SQUEEZE_DIMS); } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_SQUEEZEDIMS) && - verifier.VerifyVector(squeezeDims()) && + VerifyOffset(verifier, VT_SQUEEZE_DIMS) && + verifier.VerifyVector(squeeze_dims()) && verifier.EndTable(); } SqueezeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; @@ -4780,8 +5513,8 @@ struct SqueezeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { struct SqueezeOptionsBuilder { flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; - void add_squeezeDims(flatbuffers::Offset> squeezeDims) { - fbb_.AddOffset(SqueezeOptions::VT_SQUEEZEDIMS, squeezeDims); + void add_squeeze_dims(flatbuffers::Offset> squeeze_dims) { + fbb_.AddOffset(SqueezeOptions::VT_SQUEEZE_DIMS, squeeze_dims); } explicit SqueezeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { @@ -4797,19 +5530,19 @@ struct SqueezeOptionsBuilder { inline flatbuffers::Offset CreateSqueezeOptions( flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> squeezeDims = 0) { + flatbuffers::Offset> squeeze_dims = 0) { SqueezeOptionsBuilder builder_(_fbb); - builder_.add_squeezeDims(squeezeDims); + builder_.add_squeeze_dims(squeeze_dims); return builder_.Finish(); } inline flatbuffers::Offset CreateSqueezeOptionsDirect( flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *squeezeDims = nullptr) { - auto squeezeDims__ = squeezeDims ? _fbb.CreateVector(*squeezeDims) : 0; + const std::vector *squeeze_dims = nullptr) { + auto squeeze_dims__ = squeeze_dims ? _fbb.CreateVector(*squeeze_dims) : 0; return tflite::CreateSqueezeOptions( _fbb, - squeezeDims__); + squeeze_dims__); } flatbuffers::Offset CreateSqueezeOptions(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); @@ -4824,18 +5557,12 @@ struct SplitOptionsT : public flatbuffers::NativeTable { struct SplitOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef SplitOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return SplitOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_NUM_SPLITS = 4 }; int32_t num_splits() const { return GetField(VT_NUM_SPLITS, 0); } - bool mutate_num_splits(int32_t _num_splits) { - return SetField(VT_NUM_SPLITS, _num_splits, 0); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_NUM_SPLITS) && @@ -4874,121 +5601,157 @@ inline flatbuffers::Offset CreateSplitOptions( flatbuffers::Offset CreateSplitOptions(flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct StridedSliceOptionsT : public flatbuffers::NativeTable { - typedef StridedSliceOptions TableType; - int32_t beginMask; - int32_t endMask; - int32_t ellipsisMask; - int32_t newAxisMask; - int32_t shrinkAxisMask; - StridedSliceOptionsT() - : beginMask(0), - endMask(0), - ellipsisMask(0), - newAxisMask(0), - shrinkAxisMask(0) { +struct SplitVOptionsT : public flatbuffers::NativeTable { + typedef SplitVOptions TableType; + int32_t num_splits; + SplitVOptionsT() + : num_splits(0) { } }; -struct StridedSliceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef StridedSliceOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return StridedSliceOptionsTypeTable(); - } +struct SplitVOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SplitVOptionsT NativeTableType; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_BEGINMASK = 4, - VT_ENDMASK = 6, - VT_ELLIPSISMASK = 8, - VT_NEWAXISMASK = 10, - VT_SHRINKAXISMASK = 12 + VT_NUM_SPLITS = 4 }; - int32_t beginMask() const { - return GetField(VT_BEGINMASK, 0); - } - bool mutate_beginMask(int32_t _beginMask) { - return SetField(VT_BEGINMASK, _beginMask, 0); - } - int32_t endMask() const { - return GetField(VT_ENDMASK, 0); - } - bool mutate_endMask(int32_t _endMask) { - return SetField(VT_ENDMASK, _endMask, 0); - } - int32_t ellipsisMask() const { - return GetField(VT_ELLIPSISMASK, 0); - } - bool mutate_ellipsisMask(int32_t _ellipsisMask) { - return SetField(VT_ELLIPSISMASK, _ellipsisMask, 0); - } - int32_t newAxisMask() const { - return GetField(VT_NEWAXISMASK, 0); - } - bool mutate_newAxisMask(int32_t _newAxisMask) { - return SetField(VT_NEWAXISMASK, _newAxisMask, 0); - } - int32_t shrinkAxisMask() const { - return GetField(VT_SHRINKAXISMASK, 0); - } - bool mutate_shrinkAxisMask(int32_t _shrinkAxisMask) { - return SetField(VT_SHRINKAXISMASK, _shrinkAxisMask, 0); + int32_t num_splits() const { + return GetField(VT_NUM_SPLITS, 0); } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_BEGINMASK) && - VerifyField(verifier, VT_ENDMASK) && - VerifyField(verifier, VT_ELLIPSISMASK) && - VerifyField(verifier, VT_NEWAXISMASK) && - VerifyField(verifier, VT_SHRINKAXISMASK) && + VerifyField(verifier, VT_NUM_SPLITS) && verifier.EndTable(); } - StridedSliceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(StridedSliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + SplitVOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SplitVOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct StridedSliceOptionsBuilder { +struct SplitVOptionsBuilder { flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; - void add_beginMask(int32_t beginMask) { - fbb_.AddElement(StridedSliceOptions::VT_BEGINMASK, beginMask, 0); - } - void add_endMask(int32_t endMask) { - fbb_.AddElement(StridedSliceOptions::VT_ENDMASK, endMask, 0); - } - void add_ellipsisMask(int32_t ellipsisMask) { - fbb_.AddElement(StridedSliceOptions::VT_ELLIPSISMASK, ellipsisMask, 0); - } - void add_newAxisMask(int32_t newAxisMask) { - fbb_.AddElement(StridedSliceOptions::VT_NEWAXISMASK, newAxisMask, 0); - } - void add_shrinkAxisMask(int32_t shrinkAxisMask) { - fbb_.AddElement(StridedSliceOptions::VT_SHRINKAXISMASK, shrinkAxisMask, 0); + void add_num_splits(int32_t num_splits) { + fbb_.AddElement(SplitVOptions::VT_NUM_SPLITS, num_splits, 0); } - explicit StridedSliceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + explicit SplitVOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - StridedSliceOptionsBuilder &operator=(const StridedSliceOptionsBuilder &); - flatbuffers::Offset Finish() { + SplitVOptionsBuilder &operator=(const SplitVOptionsBuilder &); + flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateStridedSliceOptions( +inline flatbuffers::Offset CreateSplitVOptions( flatbuffers::FlatBufferBuilder &_fbb, - int32_t beginMask = 0, - int32_t endMask = 0, - int32_t ellipsisMask = 0, - int32_t newAxisMask = 0, - int32_t shrinkAxisMask = 0) { + int32_t num_splits = 0) { + SplitVOptionsBuilder builder_(_fbb); + builder_.add_num_splits(num_splits); + return builder_.Finish(); +} + +flatbuffers::Offset CreateSplitVOptions(flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct StridedSliceOptionsT : public flatbuffers::NativeTable { + typedef StridedSliceOptions TableType; + int32_t begin_mask; + int32_t end_mask; + int32_t ellipsis_mask; + int32_t new_axis_mask; + int32_t shrink_axis_mask; + StridedSliceOptionsT() + : begin_mask(0), + end_mask(0), + ellipsis_mask(0), + new_axis_mask(0), + shrink_axis_mask(0) { + } +}; + +struct StridedSliceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef StridedSliceOptionsT NativeTableType; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BEGIN_MASK = 4, + VT_END_MASK = 6, + VT_ELLIPSIS_MASK = 8, + VT_NEW_AXIS_MASK = 10, + VT_SHRINK_AXIS_MASK = 12 + }; + int32_t begin_mask() const { + return GetField(VT_BEGIN_MASK, 0); + } + int32_t end_mask() const { + return GetField(VT_END_MASK, 0); + } + int32_t ellipsis_mask() const { + return GetField(VT_ELLIPSIS_MASK, 0); + } + int32_t new_axis_mask() const { + return GetField(VT_NEW_AXIS_MASK, 0); + } + int32_t shrink_axis_mask() const { + return GetField(VT_SHRINK_AXIS_MASK, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_BEGIN_MASK) && + VerifyField(verifier, VT_END_MASK) && + VerifyField(verifier, VT_ELLIPSIS_MASK) && + VerifyField(verifier, VT_NEW_AXIS_MASK) && + VerifyField(verifier, VT_SHRINK_AXIS_MASK) && + verifier.EndTable(); + } + StridedSliceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(StridedSliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct StridedSliceOptionsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_begin_mask(int32_t begin_mask) { + fbb_.AddElement(StridedSliceOptions::VT_BEGIN_MASK, begin_mask, 0); + } + void add_end_mask(int32_t end_mask) { + fbb_.AddElement(StridedSliceOptions::VT_END_MASK, end_mask, 0); + } + void add_ellipsis_mask(int32_t ellipsis_mask) { + fbb_.AddElement(StridedSliceOptions::VT_ELLIPSIS_MASK, ellipsis_mask, 0); + } + void add_new_axis_mask(int32_t new_axis_mask) { + fbb_.AddElement(StridedSliceOptions::VT_NEW_AXIS_MASK, new_axis_mask, 0); + } + void add_shrink_axis_mask(int32_t shrink_axis_mask) { + fbb_.AddElement(StridedSliceOptions::VT_SHRINK_AXIS_MASK, shrink_axis_mask, 0); + } + explicit StridedSliceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + StridedSliceOptionsBuilder &operator=(const StridedSliceOptionsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateStridedSliceOptions( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t begin_mask = 0, + int32_t end_mask = 0, + int32_t ellipsis_mask = 0, + int32_t new_axis_mask = 0, + int32_t shrink_axis_mask = 0) { StridedSliceOptionsBuilder builder_(_fbb); - builder_.add_shrinkAxisMask(shrinkAxisMask); - builder_.add_newAxisMask(newAxisMask); - builder_.add_ellipsisMask(ellipsisMask); - builder_.add_endMask(endMask); - builder_.add_beginMask(beginMask); + builder_.add_shrink_axis_mask(shrink_axis_mask); + builder_.add_new_axis_mask(new_axis_mask); + builder_.add_ellipsis_mask(ellipsis_mask); + builder_.add_end_mask(end_mask); + builder_.add_begin_mask(begin_mask); return builder_.Finish(); } @@ -5002,9 +5765,6 @@ struct LogSoftmaxOptionsT : public flatbuffers::NativeTable { struct LogSoftmaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef LogSoftmaxOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return LogSoftmaxOptionsTypeTable(); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); @@ -5049,9 +5809,6 @@ struct CastOptionsT : public flatbuffers::NativeTable { struct CastOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef CastOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return CastOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_IN_DATA_TYPE = 4, VT_OUT_DATA_TYPE = 6 @@ -5059,15 +5816,9 @@ struct CastOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { TensorType in_data_type() const { return static_cast(GetField(VT_IN_DATA_TYPE, 0)); } - bool mutate_in_data_type(TensorType _in_data_type) { - return SetField(VT_IN_DATA_TYPE, static_cast(_in_data_type), 0); - } TensorType out_data_type() const { return static_cast(GetField(VT_OUT_DATA_TYPE, 0)); } - bool mutate_out_data_type(TensorType _out_data_type) { - return SetField(VT_OUT_DATA_TYPE, static_cast(_out_data_type), 0); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_IN_DATA_TYPE) && @@ -5120,9 +5871,6 @@ struct DequantizeOptionsT : public flatbuffers::NativeTable { struct DequantizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef DequantizeOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return DequantizeOptionsTypeTable(); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); @@ -5163,9 +5911,6 @@ struct MaximumMinimumOptionsT : public flatbuffers::NativeTable { struct MaximumMinimumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef MaximumMinimumOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return MaximumMinimumOptionsTypeTable(); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); @@ -5206,9 +5951,6 @@ struct TileOptionsT : public flatbuffers::NativeTable { struct TileOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef TileOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return TileOptionsTypeTable(); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); @@ -5251,18 +5993,12 @@ struct ArgMaxOptionsT : public flatbuffers::NativeTable { struct ArgMaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef ArgMaxOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return ArgMaxOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_OUTPUT_TYPE = 4 }; TensorType output_type() const { return static_cast(GetField(VT_OUTPUT_TYPE, 0)); } - bool mutate_output_type(TensorType _output_type) { - return SetField(VT_OUTPUT_TYPE, static_cast(_output_type), 0); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_OUTPUT_TYPE) && @@ -5311,18 +6047,12 @@ struct ArgMinOptionsT : public flatbuffers::NativeTable { struct ArgMinOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef ArgMinOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return ArgMinOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_OUTPUT_TYPE = 4 }; TensorType output_type() const { return static_cast(GetField(VT_OUTPUT_TYPE, 0)); } - bool mutate_output_type(TensorType _output_type) { - return SetField(VT_OUTPUT_TYPE, static_cast(_output_type), 0); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_OUTPUT_TYPE) && @@ -5369,9 +6099,6 @@ struct GreaterOptionsT : public flatbuffers::NativeTable { struct GreaterOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef GreaterOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return GreaterOptionsTypeTable(); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); @@ -5412,9 +6139,6 @@ struct GreaterEqualOptionsT : public flatbuffers::NativeTable { struct GreaterEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef GreaterEqualOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return GreaterEqualOptionsTypeTable(); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); @@ -5455,9 +6179,6 @@ struct LessOptionsT : public flatbuffers::NativeTable { struct LessOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef LessOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return LessOptionsTypeTable(); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); @@ -5498,9 +6219,6 @@ struct LessEqualOptionsT : public flatbuffers::NativeTable { struct LessEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef LessEqualOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return LessEqualOptionsTypeTable(); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); @@ -5541,9 +6259,6 @@ struct NegOptionsT : public flatbuffers::NativeTable { struct NegOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef NegOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return NegOptionsTypeTable(); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); @@ -5584,9 +6299,6 @@ struct SelectOptionsT : public flatbuffers::NativeTable { struct SelectOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef SelectOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return SelectOptionsTypeTable(); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); @@ -5627,9 +6339,6 @@ struct SliceOptionsT : public flatbuffers::NativeTable { struct SliceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef SliceOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return SliceOptionsTypeTable(); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); @@ -5676,9 +6385,6 @@ struct TransposeConvOptionsT : public flatbuffers::NativeTable { struct TransposeConvOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef TransposeConvOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return TransposeConvOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_PADDING = 4, VT_STRIDE_W = 6, @@ -5687,21 +6393,12 @@ struct TransposeConvOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table Padding padding() const { return static_cast(GetField(VT_PADDING, 0)); } - bool mutate_padding(Padding _padding) { - return SetField(VT_PADDING, static_cast(_padding), 0); - } int32_t stride_w() const { return GetField(VT_STRIDE_W, 0); } - bool mutate_stride_w(int32_t _stride_w) { - return SetField(VT_STRIDE_W, _stride_w, 0); - } int32_t stride_h() const { return GetField(VT_STRIDE_H, 0); } - bool mutate_stride_h(int32_t _stride_h) { - return SetField(VT_STRIDE_H, _stride_h, 0); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_PADDING) && @@ -5760,9 +6457,6 @@ struct ExpandDimsOptionsT : public flatbuffers::NativeTable { struct ExpandDimsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef ExpandDimsOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return ExpandDimsOptionsTypeTable(); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); @@ -5797,29 +6491,23 @@ flatbuffers::Offset CreateExpandDimsOptions(flatbuffers::Flat struct SparseToDenseOptionsT : public flatbuffers::NativeTable { typedef SparseToDenseOptions TableType; - bool validateIndices; + bool validate_indices; SparseToDenseOptionsT() - : validateIndices(false) { + : validate_indices(false) { } }; struct SparseToDenseOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef SparseToDenseOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return SparseToDenseOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_VALIDATEINDICES = 4 + VT_VALIDATE_INDICES = 4 }; - bool validateIndices() const { - return GetField(VT_VALIDATEINDICES, 0) != 0; - } - bool mutate_validateIndices(bool _validateIndices) { - return SetField(VT_VALIDATEINDICES, static_cast(_validateIndices), 0); + bool validate_indices() const { + return GetField(VT_VALIDATE_INDICES, 0) != 0; } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_VALIDATEINDICES) && + VerifyField(verifier, VT_VALIDATE_INDICES) && verifier.EndTable(); } SparseToDenseOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; @@ -5830,8 +6518,8 @@ struct SparseToDenseOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table struct SparseToDenseOptionsBuilder { flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; - void add_validateIndices(bool validateIndices) { - fbb_.AddElement(SparseToDenseOptions::VT_VALIDATEINDICES, static_cast(validateIndices), 0); + void add_validate_indices(bool validate_indices) { + fbb_.AddElement(SparseToDenseOptions::VT_VALIDATE_INDICES, static_cast(validate_indices), 0); } explicit SparseToDenseOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { @@ -5847,9 +6535,9 @@ struct SparseToDenseOptionsBuilder { inline flatbuffers::Offset CreateSparseToDenseOptions( flatbuffers::FlatBufferBuilder &_fbb, - bool validateIndices = false) { + bool validate_indices = false) { SparseToDenseOptionsBuilder builder_(_fbb); - builder_.add_validateIndices(validateIndices); + builder_.add_validate_indices(validate_indices); return builder_.Finish(); } @@ -5863,9 +6551,6 @@ struct EqualOptionsT : public flatbuffers::NativeTable { struct EqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef EqualOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return EqualOptionsTypeTable(); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); @@ -5906,9 +6591,6 @@ struct NotEqualOptionsT : public flatbuffers::NativeTable { struct NotEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef NotEqualOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return NotEqualOptionsTypeTable(); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); @@ -5951,18 +6633,12 @@ struct ShapeOptionsT : public flatbuffers::NativeTable { struct ShapeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef ShapeOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return ShapeOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_OUT_TYPE = 4 }; TensorType out_type() const { return static_cast(GetField(VT_OUT_TYPE, 0)); } - bool mutate_out_type(TensorType _out_type) { - return SetField(VT_OUT_TYPE, static_cast(_out_type), 0); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_OUT_TYPE) && @@ -6001,6 +6677,46 @@ inline flatbuffers::Offset CreateShapeOptions( flatbuffers::Offset CreateShapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +struct RankOptionsT : public flatbuffers::NativeTable { + typedef RankOptions TableType; + RankOptionsT() { + } +}; + +struct RankOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef RankOptionsT NativeTableType; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + RankOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(RankOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct RankOptionsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit RankOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + RankOptionsBuilder &operator=(const RankOptionsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateRankOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + RankOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateRankOptions(flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + struct PowOptionsT : public flatbuffers::NativeTable { typedef PowOptions TableType; PowOptionsT() { @@ -6009,9 +6725,6 @@ struct PowOptionsT : public flatbuffers::NativeTable { struct PowOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef PowOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return PowOptionsTypeTable(); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); @@ -6060,9 +6773,6 @@ struct FakeQuantOptionsT : public flatbuffers::NativeTable { struct FakeQuantOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef FakeQuantOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return FakeQuantOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_MIN = 4, VT_MAX = 6, @@ -6072,27 +6782,15 @@ struct FakeQuantOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { float min() const { return GetField(VT_MIN, 0.0f); } - bool mutate_min(float _min) { - return SetField(VT_MIN, _min, 0.0f); - } float max() const { return GetField(VT_MAX, 0.0f); } - bool mutate_max(float _max) { - return SetField(VT_MAX, _max, 0.0f); - } int32_t num_bits() const { return GetField(VT_NUM_BITS, 0); } - bool mutate_num_bits(int32_t _num_bits) { - return SetField(VT_NUM_BITS, _num_bits, 0); - } bool narrow_range() const { return GetField(VT_NARROW_RANGE, 0) != 0; } - bool mutate_narrow_range(bool _narrow_range) { - return SetField(VT_NARROW_RANGE, static_cast(_narrow_range), 0); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_MIN) && @@ -6161,9 +6859,6 @@ struct PackOptionsT : public flatbuffers::NativeTable { struct PackOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef PackOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return PackOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_VALUES_COUNT = 4, VT_AXIS = 6 @@ -6171,15 +6866,9 @@ struct PackOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { int32_t values_count() const { return GetField(VT_VALUES_COUNT, 0); } - bool mutate_values_count(int32_t _values_count) { - return SetField(VT_VALUES_COUNT, _values_count, 0); - } int32_t axis() const { return GetField(VT_AXIS, 0); } - bool mutate_axis(int32_t _axis) { - return SetField(VT_AXIS, _axis, 0); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_VALUES_COUNT) && @@ -6232,9 +6921,6 @@ struct LogicalOrOptionsT : public flatbuffers::NativeTable { struct LogicalOrOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef LogicalOrOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return LogicalOrOptionsTypeTable(); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); @@ -6277,18 +6963,12 @@ struct OneHotOptionsT : public flatbuffers::NativeTable { struct OneHotOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef OneHotOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return OneHotOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_AXIS = 4 }; int32_t axis() const { return GetField(VT_AXIS, 0); } - bool mutate_axis(int32_t _axis) { - return SetField(VT_AXIS, _axis, 0); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_AXIS) && @@ -6327,6 +7007,86 @@ inline flatbuffers::Offset CreateOneHotOptions( flatbuffers::Offset CreateOneHotOptions(flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +struct AbsOptionsT : public flatbuffers::NativeTable { + typedef AbsOptions TableType; + AbsOptionsT() { + } +}; + +struct AbsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef AbsOptionsT NativeTableType; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + AbsOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(AbsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct AbsOptionsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit AbsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + AbsOptionsBuilder &operator=(const AbsOptionsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateAbsOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + AbsOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateAbsOptions(flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct HardSwishOptionsT : public flatbuffers::NativeTable { + typedef HardSwishOptions TableType; + HardSwishOptionsT() { + } +}; + +struct HardSwishOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef HardSwishOptionsT NativeTableType; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + HardSwishOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(HardSwishOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct HardSwishOptionsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit HardSwishOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + HardSwishOptionsBuilder &operator=(const HardSwishOptionsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateHardSwishOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + HardSwishOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateHardSwishOptions(flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + struct LogicalAndOptionsT : public flatbuffers::NativeTable { typedef LogicalAndOptions TableType; LogicalAndOptionsT() { @@ -6335,9 +7095,6 @@ struct LogicalAndOptionsT : public flatbuffers::NativeTable { struct LogicalAndOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef LogicalAndOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return LogicalAndOptionsTypeTable(); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); @@ -6378,9 +7135,6 @@ struct LogicalNotOptionsT : public flatbuffers::NativeTable { struct LogicalNotOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef LogicalNotOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return LogicalNotOptionsTypeTable(); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); @@ -6425,9 +7179,6 @@ struct UnpackOptionsT : public flatbuffers::NativeTable { struct UnpackOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef UnpackOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return UnpackOptionsTypeTable(); - } enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_NUM = 4, VT_AXIS = 6 @@ -6435,15 +7186,9 @@ struct UnpackOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { int32_t num() const { return GetField(VT_NUM, 0); } - bool mutate_num(int32_t _num) { - return SetField(VT_NUM, _num, 0); - } int32_t axis() const { return GetField(VT_AXIS, 0); } - bool mutate_axis(int32_t _axis) { - return SetField(VT_AXIS, _axis, 0); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_NUM) && @@ -6496,9 +7241,6 @@ struct FloorDivOptionsT : public flatbuffers::NativeTable { struct FloorDivOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef FloorDivOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return FloorDivOptionsTypeTable(); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); @@ -6539,9 +7281,6 @@ struct SquareOptionsT : public flatbuffers::NativeTable { struct SquareOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef SquareOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return SquareOptionsTypeTable(); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); @@ -6582,9 +7321,6 @@ struct ZerosLikeOptionsT : public flatbuffers::NativeTable { struct ZerosLikeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef ZerosLikeOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return ZerosLikeOptionsTypeTable(); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); @@ -6625,9 +7361,6 @@ struct FillOptionsT : public flatbuffers::NativeTable { struct FillOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef FillOptionsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return FillOptionsTypeTable(); - } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && verifier.EndTable(); @@ -6660,6355 +7393,7286 @@ inline flatbuffers::Offset CreateFillOptions( flatbuffers::Offset CreateFillOptions(flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct OperatorCodeT : public flatbuffers::NativeTable { - typedef OperatorCode TableType; - BuiltinOperator builtin_code; - std::string custom_code; - int32_t version; - OperatorCodeT() - : builtin_code(BuiltinOperator_ADD), - version(1) { +struct FloorModOptionsT : public flatbuffers::NativeTable { + typedef FloorModOptions TableType; + FloorModOptionsT() { } }; -struct OperatorCode FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef OperatorCodeT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return OperatorCodeTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_BUILTIN_CODE = 4, - VT_CUSTOM_CODE = 6, - VT_VERSION = 8 - }; - BuiltinOperator builtin_code() const { - return static_cast(GetField(VT_BUILTIN_CODE, 0)); - } - bool mutate_builtin_code(BuiltinOperator _builtin_code) { - return SetField(VT_BUILTIN_CODE, static_cast(_builtin_code), 0); - } - const flatbuffers::String *custom_code() const { - return GetPointer(VT_CUSTOM_CODE); +struct FloorModOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef FloorModOptionsT NativeTableType; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); } - flatbuffers::String *mutable_custom_code() { - return GetPointer(VT_CUSTOM_CODE); + FloorModOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(FloorModOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct FloorModOptionsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit FloorModOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - int32_t version() const { - return GetField(VT_VERSION, 1); + FloorModOptionsBuilder &operator=(const FloorModOptionsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; } - bool mutate_version(int32_t _version) { - return SetField(VT_VERSION, _version, 1); +}; + +inline flatbuffers::Offset CreateFloorModOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + FloorModOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateFloorModOptions(flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct RangeOptionsT : public flatbuffers::NativeTable { + typedef RangeOptions TableType; + RangeOptionsT() { } +}; + +struct RangeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef RangeOptionsT NativeTableType; bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_BUILTIN_CODE) && - VerifyOffset(verifier, VT_CUSTOM_CODE) && - verifier.VerifyString(custom_code()) && - VerifyField(verifier, VT_VERSION) && verifier.EndTable(); } - OperatorCodeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(OperatorCodeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + RangeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(RangeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -struct OperatorCodeBuilder { +struct RangeOptionsBuilder { flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; - void add_builtin_code(BuiltinOperator builtin_code) { - fbb_.AddElement(OperatorCode::VT_BUILTIN_CODE, static_cast(builtin_code), 0); - } - void add_custom_code(flatbuffers::Offset custom_code) { - fbb_.AddOffset(OperatorCode::VT_CUSTOM_CODE, custom_code); - } - void add_version(int32_t version) { - fbb_.AddElement(OperatorCode::VT_VERSION, version, 1); - } - explicit OperatorCodeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + explicit RangeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } - OperatorCodeBuilder &operator=(const OperatorCodeBuilder &); - flatbuffers::Offset Finish() { + RangeOptionsBuilder &operator=(const RangeOptionsBuilder &); + flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); + auto o = flatbuffers::Offset(end); return o; } }; -inline flatbuffers::Offset CreateOperatorCode( - flatbuffers::FlatBufferBuilder &_fbb, - BuiltinOperator builtin_code = BuiltinOperator_ADD, - flatbuffers::Offset custom_code = 0, - int32_t version = 1) { - OperatorCodeBuilder builder_(_fbb); - builder_.add_version(version); - builder_.add_custom_code(custom_code); - builder_.add_builtin_code(builtin_code); +inline flatbuffers::Offset CreateRangeOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + RangeOptionsBuilder builder_(_fbb); return builder_.Finish(); } -inline flatbuffers::Offset CreateOperatorCodeDirect( - flatbuffers::FlatBufferBuilder &_fbb, - BuiltinOperator builtin_code = BuiltinOperator_ADD, - const char *custom_code = nullptr, - int32_t version = 1) { - auto custom_code__ = custom_code ? _fbb.CreateString(custom_code) : 0; - return tflite::CreateOperatorCode( - _fbb, - builtin_code, - custom_code__, - version); -} - -flatbuffers::Offset CreateOperatorCode(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +flatbuffers::Offset CreateRangeOptions(flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -struct OperatorT : public flatbuffers::NativeTable { - typedef Operator TableType; - uint32_t opcode_index; - std::vector inputs; - std::vector outputs; - BuiltinOptionsUnion builtin_options; - std::vector custom_options; - CustomOptionsFormat custom_options_format; - std::vector mutating_variable_inputs; - OperatorT() - : opcode_index(0), - custom_options_format(CustomOptionsFormat_FLEXBUFFERS) { +struct LeakyReluOptionsT : public flatbuffers::NativeTable { + typedef LeakyReluOptions TableType; + float alpha; + LeakyReluOptionsT() + : alpha(0.0f) { } }; -struct Operator FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef OperatorT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return OperatorTypeTable(); - } +struct LeakyReluOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef LeakyReluOptionsT NativeTableType; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_OPCODE_INDEX = 4, - VT_INPUTS = 6, - VT_OUTPUTS = 8, - VT_BUILTIN_OPTIONS_TYPE = 10, - VT_BUILTIN_OPTIONS = 12, - VT_CUSTOM_OPTIONS = 14, - VT_CUSTOM_OPTIONS_FORMAT = 16, - VT_MUTATING_VARIABLE_INPUTS = 18 + VT_ALPHA = 4 }; - uint32_t opcode_index() const { - return GetField(VT_OPCODE_INDEX, 0); - } - bool mutate_opcode_index(uint32_t _opcode_index) { - return SetField(VT_OPCODE_INDEX, _opcode_index, 0); + float alpha() const { + return GetField(VT_ALPHA, 0.0f); } - const flatbuffers::Vector *inputs() const { - return GetPointer *>(VT_INPUTS); + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_ALPHA) && + verifier.EndTable(); } - flatbuffers::Vector *mutable_inputs() { - return GetPointer *>(VT_INPUTS); + LeakyReluOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(LeakyReluOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct LeakyReluOptionsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_alpha(float alpha) { + fbb_.AddElement(LeakyReluOptions::VT_ALPHA, alpha, 0.0f); } - const flatbuffers::Vector *outputs() const { - return GetPointer *>(VT_OUTPUTS); + explicit LeakyReluOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - flatbuffers::Vector *mutable_outputs() { - return GetPointer *>(VT_OUTPUTS); + LeakyReluOptionsBuilder &operator=(const LeakyReluOptionsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; } - BuiltinOptions builtin_options_type() const { - return static_cast(GetField(VT_BUILTIN_OPTIONS_TYPE, 0)); +}; + +inline flatbuffers::Offset CreateLeakyReluOptions( + flatbuffers::FlatBufferBuilder &_fbb, + float alpha = 0.0f) { + LeakyReluOptionsBuilder builder_(_fbb); + builder_.add_alpha(alpha); + return builder_.Finish(); +} + +flatbuffers::Offset CreateLeakyReluOptions(flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SquaredDifferenceOptionsT : public flatbuffers::NativeTable { + typedef SquaredDifferenceOptions TableType; + SquaredDifferenceOptionsT() { } - bool mutate_builtin_options_type(BuiltinOptions _builtin_options_type) { - return SetField(VT_BUILTIN_OPTIONS_TYPE, static_cast(_builtin_options_type), 0); +}; + +struct SquaredDifferenceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SquaredDifferenceOptionsT NativeTableType; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); } - const void *builtin_options() const { - return GetPointer(VT_BUILTIN_OPTIONS); + SquaredDifferenceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SquaredDifferenceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SquaredDifferenceOptionsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SquaredDifferenceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - template const T *builtin_options_as() const; - const Conv2DOptions *builtin_options_as_Conv2DOptions() const { - return builtin_options_type() == BuiltinOptions_Conv2DOptions ? static_cast(builtin_options()) : nullptr; + SquaredDifferenceOptionsBuilder &operator=(const SquaredDifferenceOptionsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; } - const DepthwiseConv2DOptions *builtin_options_as_DepthwiseConv2DOptions() const { - return builtin_options_type() == BuiltinOptions_DepthwiseConv2DOptions ? static_cast(builtin_options()) : nullptr; +}; + +inline flatbuffers::Offset CreateSquaredDifferenceOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + SquaredDifferenceOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateSquaredDifferenceOptions(flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct MirrorPadOptionsT : public flatbuffers::NativeTable { + typedef MirrorPadOptions TableType; + MirrorPadMode mode; + MirrorPadOptionsT() + : mode(MirrorPadMode_REFLECT) { } - const ConcatEmbeddingsOptions *builtin_options_as_ConcatEmbeddingsOptions() const { - return builtin_options_type() == BuiltinOptions_ConcatEmbeddingsOptions ? static_cast(builtin_options()) : nullptr; +}; + +struct MirrorPadOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef MirrorPadOptionsT NativeTableType; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_MODE = 4 + }; + MirrorPadMode mode() const { + return static_cast(GetField(VT_MODE, 0)); } - const LSHProjectionOptions *builtin_options_as_LSHProjectionOptions() const { - return builtin_options_type() == BuiltinOptions_LSHProjectionOptions ? static_cast(builtin_options()) : nullptr; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_MODE) && + verifier.EndTable(); } - const Pool2DOptions *builtin_options_as_Pool2DOptions() const { - return builtin_options_type() == BuiltinOptions_Pool2DOptions ? static_cast(builtin_options()) : nullptr; + MirrorPadOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(MirrorPadOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct MirrorPadOptionsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_mode(MirrorPadMode mode) { + fbb_.AddElement(MirrorPadOptions::VT_MODE, static_cast(mode), 0); } - const SVDFOptions *builtin_options_as_SVDFOptions() const { - return builtin_options_type() == BuiltinOptions_SVDFOptions ? static_cast(builtin_options()) : nullptr; + explicit MirrorPadOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const RNNOptions *builtin_options_as_RNNOptions() const { - return builtin_options_type() == BuiltinOptions_RNNOptions ? static_cast(builtin_options()) : nullptr; + MirrorPadOptionsBuilder &operator=(const MirrorPadOptionsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; } - const FullyConnectedOptions *builtin_options_as_FullyConnectedOptions() const { - return builtin_options_type() == BuiltinOptions_FullyConnectedOptions ? static_cast(builtin_options()) : nullptr; +}; + +inline flatbuffers::Offset CreateMirrorPadOptions( + flatbuffers::FlatBufferBuilder &_fbb, + MirrorPadMode mode = MirrorPadMode_REFLECT) { + MirrorPadOptionsBuilder builder_(_fbb); + builder_.add_mode(mode); + return builder_.Finish(); +} + +flatbuffers::Offset CreateMirrorPadOptions(flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct UniqueOptionsT : public flatbuffers::NativeTable { + typedef UniqueOptions TableType; + TensorType idx_out_type; + UniqueOptionsT() + : idx_out_type(TensorType_INT32) { } - const SoftmaxOptions *builtin_options_as_SoftmaxOptions() const { - return builtin_options_type() == BuiltinOptions_SoftmaxOptions ? static_cast(builtin_options()) : nullptr; +}; + +struct UniqueOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef UniqueOptionsT NativeTableType; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_IDX_OUT_TYPE = 4 + }; + TensorType idx_out_type() const { + return static_cast(GetField(VT_IDX_OUT_TYPE, 2)); } - const ConcatenationOptions *builtin_options_as_ConcatenationOptions() const { - return builtin_options_type() == BuiltinOptions_ConcatenationOptions ? static_cast(builtin_options()) : nullptr; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_IDX_OUT_TYPE) && + verifier.EndTable(); } - const AddOptions *builtin_options_as_AddOptions() const { - return builtin_options_type() == BuiltinOptions_AddOptions ? static_cast(builtin_options()) : nullptr; + UniqueOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(UniqueOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct UniqueOptionsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_idx_out_type(TensorType idx_out_type) { + fbb_.AddElement(UniqueOptions::VT_IDX_OUT_TYPE, static_cast(idx_out_type), 2); } - const L2NormOptions *builtin_options_as_L2NormOptions() const { - return builtin_options_type() == BuiltinOptions_L2NormOptions ? static_cast(builtin_options()) : nullptr; + explicit UniqueOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const LocalResponseNormalizationOptions *builtin_options_as_LocalResponseNormalizationOptions() const { - return builtin_options_type() == BuiltinOptions_LocalResponseNormalizationOptions ? static_cast(builtin_options()) : nullptr; + UniqueOptionsBuilder &operator=(const UniqueOptionsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; } - const LSTMOptions *builtin_options_as_LSTMOptions() const { - return builtin_options_type() == BuiltinOptions_LSTMOptions ? static_cast(builtin_options()) : nullptr; +}; + +inline flatbuffers::Offset CreateUniqueOptions( + flatbuffers::FlatBufferBuilder &_fbb, + TensorType idx_out_type = TensorType_INT32) { + UniqueOptionsBuilder builder_(_fbb); + builder_.add_idx_out_type(idx_out_type); + return builder_.Finish(); +} + +flatbuffers::Offset CreateUniqueOptions(flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ReverseV2OptionsT : public flatbuffers::NativeTable { + typedef ReverseV2Options TableType; + ReverseV2OptionsT() { } - const ResizeBilinearOptions *builtin_options_as_ResizeBilinearOptions() const { - return builtin_options_type() == BuiltinOptions_ResizeBilinearOptions ? static_cast(builtin_options()) : nullptr; +}; + +struct ReverseV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ReverseV2OptionsT NativeTableType; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); } - const CallOptions *builtin_options_as_CallOptions() const { - return builtin_options_type() == BuiltinOptions_CallOptions ? static_cast(builtin_options()) : nullptr; + ReverseV2OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ReverseV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ReverseV2OptionsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit ReverseV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const ReshapeOptions *builtin_options_as_ReshapeOptions() const { - return builtin_options_type() == BuiltinOptions_ReshapeOptions ? static_cast(builtin_options()) : nullptr; + ReverseV2OptionsBuilder &operator=(const ReverseV2OptionsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; } - const SkipGramOptions *builtin_options_as_SkipGramOptions() const { - return builtin_options_type() == BuiltinOptions_SkipGramOptions ? static_cast(builtin_options()) : nullptr; +}; + +inline flatbuffers::Offset CreateReverseV2Options( + flatbuffers::FlatBufferBuilder &_fbb) { + ReverseV2OptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateReverseV2Options(flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct AddNOptionsT : public flatbuffers::NativeTable { + typedef AddNOptions TableType; + AddNOptionsT() { } - const SpaceToDepthOptions *builtin_options_as_SpaceToDepthOptions() const { - return builtin_options_type() == BuiltinOptions_SpaceToDepthOptions ? static_cast(builtin_options()) : nullptr; +}; + +struct AddNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef AddNOptionsT NativeTableType; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); } - const EmbeddingLookupSparseOptions *builtin_options_as_EmbeddingLookupSparseOptions() const { - return builtin_options_type() == BuiltinOptions_EmbeddingLookupSparseOptions ? static_cast(builtin_options()) : nullptr; + AddNOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(AddNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct AddNOptionsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit AddNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const MulOptions *builtin_options_as_MulOptions() const { - return builtin_options_type() == BuiltinOptions_MulOptions ? static_cast(builtin_options()) : nullptr; + AddNOptionsBuilder &operator=(const AddNOptionsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; } - const PadOptions *builtin_options_as_PadOptions() const { - return builtin_options_type() == BuiltinOptions_PadOptions ? static_cast(builtin_options()) : nullptr; +}; + +inline flatbuffers::Offset CreateAddNOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + AddNOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateAddNOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct GatherNdOptionsT : public flatbuffers::NativeTable { + typedef GatherNdOptions TableType; + GatherNdOptionsT() { } - const GatherOptions *builtin_options_as_GatherOptions() const { - return builtin_options_type() == BuiltinOptions_GatherOptions ? static_cast(builtin_options()) : nullptr; +}; + +struct GatherNdOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef GatherNdOptionsT NativeTableType; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); } - const BatchToSpaceNDOptions *builtin_options_as_BatchToSpaceNDOptions() const { - return builtin_options_type() == BuiltinOptions_BatchToSpaceNDOptions ? static_cast(builtin_options()) : nullptr; + GatherNdOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(GatherNdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct GatherNdOptionsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit GatherNdOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const SpaceToBatchNDOptions *builtin_options_as_SpaceToBatchNDOptions() const { - return builtin_options_type() == BuiltinOptions_SpaceToBatchNDOptions ? static_cast(builtin_options()) : nullptr; + GatherNdOptionsBuilder &operator=(const GatherNdOptionsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; } - const TransposeOptions *builtin_options_as_TransposeOptions() const { - return builtin_options_type() == BuiltinOptions_TransposeOptions ? static_cast(builtin_options()) : nullptr; +}; + +inline flatbuffers::Offset CreateGatherNdOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + GatherNdOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateGatherNdOptions(flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct WhereOptionsT : public flatbuffers::NativeTable { + typedef WhereOptions TableType; + WhereOptionsT() { } - const ReducerOptions *builtin_options_as_ReducerOptions() const { - return builtin_options_type() == BuiltinOptions_ReducerOptions ? static_cast(builtin_options()) : nullptr; +}; + +struct WhereOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef WhereOptionsT NativeTableType; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); } - const SubOptions *builtin_options_as_SubOptions() const { - return builtin_options_type() == BuiltinOptions_SubOptions ? static_cast(builtin_options()) : nullptr; + WhereOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(WhereOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct WhereOptionsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit WhereOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const DivOptions *builtin_options_as_DivOptions() const { - return builtin_options_type() == BuiltinOptions_DivOptions ? static_cast(builtin_options()) : nullptr; - } - const SqueezeOptions *builtin_options_as_SqueezeOptions() const { - return builtin_options_type() == BuiltinOptions_SqueezeOptions ? static_cast(builtin_options()) : nullptr; - } - const SequenceRNNOptions *builtin_options_as_SequenceRNNOptions() const { - return builtin_options_type() == BuiltinOptions_SequenceRNNOptions ? static_cast(builtin_options()) : nullptr; + WhereOptionsBuilder &operator=(const WhereOptionsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; } - const StridedSliceOptions *builtin_options_as_StridedSliceOptions() const { - return builtin_options_type() == BuiltinOptions_StridedSliceOptions ? static_cast(builtin_options()) : nullptr; +}; + +inline flatbuffers::Offset CreateWhereOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + WhereOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateWhereOptions(flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ReverseSequenceOptionsT : public flatbuffers::NativeTable { + typedef ReverseSequenceOptions TableType; + int32_t seq_dim; + int32_t batch_dim; + ReverseSequenceOptionsT() + : seq_dim(0), + batch_dim(0) { } - const ExpOptions *builtin_options_as_ExpOptions() const { - return builtin_options_type() == BuiltinOptions_ExpOptions ? static_cast(builtin_options()) : nullptr; +}; + +struct ReverseSequenceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ReverseSequenceOptionsT NativeTableType; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SEQ_DIM = 4, + VT_BATCH_DIM = 6 + }; + int32_t seq_dim() const { + return GetField(VT_SEQ_DIM, 0); } - const TopKV2Options *builtin_options_as_TopKV2Options() const { - return builtin_options_type() == BuiltinOptions_TopKV2Options ? static_cast(builtin_options()) : nullptr; + int32_t batch_dim() const { + return GetField(VT_BATCH_DIM, 0); } - const SplitOptions *builtin_options_as_SplitOptions() const { - return builtin_options_type() == BuiltinOptions_SplitOptions ? static_cast(builtin_options()) : nullptr; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_SEQ_DIM) && + VerifyField(verifier, VT_BATCH_DIM) && + verifier.EndTable(); } - const LogSoftmaxOptions *builtin_options_as_LogSoftmaxOptions() const { - return builtin_options_type() == BuiltinOptions_LogSoftmaxOptions ? static_cast(builtin_options()) : nullptr; + ReverseSequenceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ReverseSequenceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ReverseSequenceOptionsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_seq_dim(int32_t seq_dim) { + fbb_.AddElement(ReverseSequenceOptions::VT_SEQ_DIM, seq_dim, 0); } - const CastOptions *builtin_options_as_CastOptions() const { - return builtin_options_type() == BuiltinOptions_CastOptions ? static_cast(builtin_options()) : nullptr; + void add_batch_dim(int32_t batch_dim) { + fbb_.AddElement(ReverseSequenceOptions::VT_BATCH_DIM, batch_dim, 0); } - const DequantizeOptions *builtin_options_as_DequantizeOptions() const { - return builtin_options_type() == BuiltinOptions_DequantizeOptions ? static_cast(builtin_options()) : nullptr; + explicit ReverseSequenceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const MaximumMinimumOptions *builtin_options_as_MaximumMinimumOptions() const { - return builtin_options_type() == BuiltinOptions_MaximumMinimumOptions ? static_cast(builtin_options()) : nullptr; + ReverseSequenceOptionsBuilder &operator=(const ReverseSequenceOptionsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; } - const ArgMaxOptions *builtin_options_as_ArgMaxOptions() const { - return builtin_options_type() == BuiltinOptions_ArgMaxOptions ? static_cast(builtin_options()) : nullptr; +}; + +inline flatbuffers::Offset CreateReverseSequenceOptions( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t seq_dim = 0, + int32_t batch_dim = 0) { + ReverseSequenceOptionsBuilder builder_(_fbb); + builder_.add_batch_dim(batch_dim); + builder_.add_seq_dim(seq_dim); + return builder_.Finish(); +} + +flatbuffers::Offset CreateReverseSequenceOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct MatrixDiagOptionsT : public flatbuffers::NativeTable { + typedef MatrixDiagOptions TableType; + MatrixDiagOptionsT() { } - const LessOptions *builtin_options_as_LessOptions() const { - return builtin_options_type() == BuiltinOptions_LessOptions ? static_cast(builtin_options()) : nullptr; +}; + +struct MatrixDiagOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef MatrixDiagOptionsT NativeTableType; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); } - const NegOptions *builtin_options_as_NegOptions() const { - return builtin_options_type() == BuiltinOptions_NegOptions ? static_cast(builtin_options()) : nullptr; + MatrixDiagOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(MatrixDiagOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct MatrixDiagOptionsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit MatrixDiagOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const PadV2Options *builtin_options_as_PadV2Options() const { - return builtin_options_type() == BuiltinOptions_PadV2Options ? static_cast(builtin_options()) : nullptr; + MatrixDiagOptionsBuilder &operator=(const MatrixDiagOptionsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; } - const GreaterOptions *builtin_options_as_GreaterOptions() const { - return builtin_options_type() == BuiltinOptions_GreaterOptions ? static_cast(builtin_options()) : nullptr; +}; + +inline flatbuffers::Offset CreateMatrixDiagOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + MatrixDiagOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateMatrixDiagOptions(flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct QuantizeOptionsT : public flatbuffers::NativeTable { + typedef QuantizeOptions TableType; + QuantizeOptionsT() { } - const GreaterEqualOptions *builtin_options_as_GreaterEqualOptions() const { - return builtin_options_type() == BuiltinOptions_GreaterEqualOptions ? static_cast(builtin_options()) : nullptr; +}; + +struct QuantizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef QuantizeOptionsT NativeTableType; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); } - const LessEqualOptions *builtin_options_as_LessEqualOptions() const { - return builtin_options_type() == BuiltinOptions_LessEqualOptions ? static_cast(builtin_options()) : nullptr; + QuantizeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(QuantizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct QuantizeOptionsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit QuantizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const SelectOptions *builtin_options_as_SelectOptions() const { - return builtin_options_type() == BuiltinOptions_SelectOptions ? static_cast(builtin_options()) : nullptr; + QuantizeOptionsBuilder &operator=(const QuantizeOptionsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; } - const SliceOptions *builtin_options_as_SliceOptions() const { - return builtin_options_type() == BuiltinOptions_SliceOptions ? static_cast(builtin_options()) : nullptr; +}; + +inline flatbuffers::Offset CreateQuantizeOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + QuantizeOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateQuantizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct MatrixSetDiagOptionsT : public flatbuffers::NativeTable { + typedef MatrixSetDiagOptions TableType; + MatrixSetDiagOptionsT() { } - const TransposeConvOptions *builtin_options_as_TransposeConvOptions() const { - return builtin_options_type() == BuiltinOptions_TransposeConvOptions ? static_cast(builtin_options()) : nullptr; +}; + +struct MatrixSetDiagOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef MatrixSetDiagOptionsT NativeTableType; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); } - const SparseToDenseOptions *builtin_options_as_SparseToDenseOptions() const { - return builtin_options_type() == BuiltinOptions_SparseToDenseOptions ? static_cast(builtin_options()) : nullptr; + MatrixSetDiagOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(MatrixSetDiagOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct MatrixSetDiagOptionsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit MatrixSetDiagOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const TileOptions *builtin_options_as_TileOptions() const { - return builtin_options_type() == BuiltinOptions_TileOptions ? static_cast(builtin_options()) : nullptr; + MatrixSetDiagOptionsBuilder &operator=(const MatrixSetDiagOptionsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; } - const ExpandDimsOptions *builtin_options_as_ExpandDimsOptions() const { - return builtin_options_type() == BuiltinOptions_ExpandDimsOptions ? static_cast(builtin_options()) : nullptr; +}; + +inline flatbuffers::Offset CreateMatrixSetDiagOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + MatrixSetDiagOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateMatrixSetDiagOptions(flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct IfOptionsT : public flatbuffers::NativeTable { + typedef IfOptions TableType; + int32_t then_subgraph_index; + int32_t else_subgraph_index; + IfOptionsT() + : then_subgraph_index(0), + else_subgraph_index(0) { } - const EqualOptions *builtin_options_as_EqualOptions() const { - return builtin_options_type() == BuiltinOptions_EqualOptions ? static_cast(builtin_options()) : nullptr; +}; + +struct IfOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef IfOptionsT NativeTableType; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_THEN_SUBGRAPH_INDEX = 4, + VT_ELSE_SUBGRAPH_INDEX = 6 + }; + int32_t then_subgraph_index() const { + return GetField(VT_THEN_SUBGRAPH_INDEX, 0); } - const NotEqualOptions *builtin_options_as_NotEqualOptions() const { - return builtin_options_type() == BuiltinOptions_NotEqualOptions ? static_cast(builtin_options()) : nullptr; + int32_t else_subgraph_index() const { + return GetField(VT_ELSE_SUBGRAPH_INDEX, 0); } - const ShapeOptions *builtin_options_as_ShapeOptions() const { - return builtin_options_type() == BuiltinOptions_ShapeOptions ? static_cast(builtin_options()) : nullptr; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_THEN_SUBGRAPH_INDEX) && + VerifyField(verifier, VT_ELSE_SUBGRAPH_INDEX) && + verifier.EndTable(); } - const PowOptions *builtin_options_as_PowOptions() const { - return builtin_options_type() == BuiltinOptions_PowOptions ? static_cast(builtin_options()) : nullptr; + IfOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(IfOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct IfOptionsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_then_subgraph_index(int32_t then_subgraph_index) { + fbb_.AddElement(IfOptions::VT_THEN_SUBGRAPH_INDEX, then_subgraph_index, 0); } - const ArgMinOptions *builtin_options_as_ArgMinOptions() const { - return builtin_options_type() == BuiltinOptions_ArgMinOptions ? static_cast(builtin_options()) : nullptr; + void add_else_subgraph_index(int32_t else_subgraph_index) { + fbb_.AddElement(IfOptions::VT_ELSE_SUBGRAPH_INDEX, else_subgraph_index, 0); } - const FakeQuantOptions *builtin_options_as_FakeQuantOptions() const { - return builtin_options_type() == BuiltinOptions_FakeQuantOptions ? static_cast(builtin_options()) : nullptr; + explicit IfOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const PackOptions *builtin_options_as_PackOptions() const { - return builtin_options_type() == BuiltinOptions_PackOptions ? static_cast(builtin_options()) : nullptr; + IfOptionsBuilder &operator=(const IfOptionsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; } - const LogicalOrOptions *builtin_options_as_LogicalOrOptions() const { - return builtin_options_type() == BuiltinOptions_LogicalOrOptions ? static_cast(builtin_options()) : nullptr; +}; + +inline flatbuffers::Offset CreateIfOptions( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t then_subgraph_index = 0, + int32_t else_subgraph_index = 0) { + IfOptionsBuilder builder_(_fbb); + builder_.add_else_subgraph_index(else_subgraph_index); + builder_.add_then_subgraph_index(then_subgraph_index); + return builder_.Finish(); +} + +flatbuffers::Offset CreateIfOptions(flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct WhileOptionsT : public flatbuffers::NativeTable { + typedef WhileOptions TableType; + int32_t cond_subgraph_index; + int32_t body_subgraph_index; + WhileOptionsT() + : cond_subgraph_index(0), + body_subgraph_index(0) { } - const OneHotOptions *builtin_options_as_OneHotOptions() const { - return builtin_options_type() == BuiltinOptions_OneHotOptions ? static_cast(builtin_options()) : nullptr; +}; + +struct WhileOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef WhileOptionsT NativeTableType; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_COND_SUBGRAPH_INDEX = 4, + VT_BODY_SUBGRAPH_INDEX = 6 + }; + int32_t cond_subgraph_index() const { + return GetField(VT_COND_SUBGRAPH_INDEX, 0); } - const LogicalAndOptions *builtin_options_as_LogicalAndOptions() const { - return builtin_options_type() == BuiltinOptions_LogicalAndOptions ? static_cast(builtin_options()) : nullptr; + int32_t body_subgraph_index() const { + return GetField(VT_BODY_SUBGRAPH_INDEX, 0); } - const LogicalNotOptions *builtin_options_as_LogicalNotOptions() const { - return builtin_options_type() == BuiltinOptions_LogicalNotOptions ? static_cast(builtin_options()) : nullptr; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_COND_SUBGRAPH_INDEX) && + VerifyField(verifier, VT_BODY_SUBGRAPH_INDEX) && + verifier.EndTable(); } - const UnpackOptions *builtin_options_as_UnpackOptions() const { - return builtin_options_type() == BuiltinOptions_UnpackOptions ? static_cast(builtin_options()) : nullptr; + WhileOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(WhileOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct WhileOptionsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_cond_subgraph_index(int32_t cond_subgraph_index) { + fbb_.AddElement(WhileOptions::VT_COND_SUBGRAPH_INDEX, cond_subgraph_index, 0); } - const FloorDivOptions *builtin_options_as_FloorDivOptions() const { - return builtin_options_type() == BuiltinOptions_FloorDivOptions ? static_cast(builtin_options()) : nullptr; + void add_body_subgraph_index(int32_t body_subgraph_index) { + fbb_.AddElement(WhileOptions::VT_BODY_SUBGRAPH_INDEX, body_subgraph_index, 0); } - const SquareOptions *builtin_options_as_SquareOptions() const { - return builtin_options_type() == BuiltinOptions_SquareOptions ? static_cast(builtin_options()) : nullptr; + explicit WhileOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - const ZerosLikeOptions *builtin_options_as_ZerosLikeOptions() const { - return builtin_options_type() == BuiltinOptions_ZerosLikeOptions ? static_cast(builtin_options()) : nullptr; + WhileOptionsBuilder &operator=(const WhileOptionsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; } - const FillOptions *builtin_options_as_FillOptions() const { - return builtin_options_type() == BuiltinOptions_FillOptions ? static_cast(builtin_options()) : nullptr; +}; + +inline flatbuffers::Offset CreateWhileOptions( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t cond_subgraph_index = 0, + int32_t body_subgraph_index = 0) { + WhileOptionsBuilder builder_(_fbb); + builder_.add_body_subgraph_index(body_subgraph_index); + builder_.add_cond_subgraph_index(cond_subgraph_index); + return builder_.Finish(); +} + +flatbuffers::Offset CreateWhileOptions(flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct NonMaxSuppressionV4OptionsT : public flatbuffers::NativeTable { + typedef NonMaxSuppressionV4Options TableType; + NonMaxSuppressionV4OptionsT() { } - void *mutable_builtin_options() { - return GetPointer(VT_BUILTIN_OPTIONS); +}; + +struct NonMaxSuppressionV4Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef NonMaxSuppressionV4OptionsT NativeTableType; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); } - const flatbuffers::Vector *custom_options() const { - return GetPointer *>(VT_CUSTOM_OPTIONS); + NonMaxSuppressionV4OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(NonMaxSuppressionV4OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct NonMaxSuppressionV4OptionsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit NonMaxSuppressionV4OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - flatbuffers::Vector *mutable_custom_options() { - return GetPointer *>(VT_CUSTOM_OPTIONS); + NonMaxSuppressionV4OptionsBuilder &operator=(const NonMaxSuppressionV4OptionsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; } - CustomOptionsFormat custom_options_format() const { - return static_cast(GetField(VT_CUSTOM_OPTIONS_FORMAT, 0)); +}; + +inline flatbuffers::Offset CreateNonMaxSuppressionV4Options( + flatbuffers::FlatBufferBuilder &_fbb) { + NonMaxSuppressionV4OptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateNonMaxSuppressionV4Options(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct NonMaxSuppressionV5OptionsT : public flatbuffers::NativeTable { + typedef NonMaxSuppressionV5Options TableType; + NonMaxSuppressionV5OptionsT() { } - bool mutate_custom_options_format(CustomOptionsFormat _custom_options_format) { - return SetField(VT_CUSTOM_OPTIONS_FORMAT, static_cast(_custom_options_format), 0); +}; + +struct NonMaxSuppressionV5Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef NonMaxSuppressionV5OptionsT NativeTableType; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); } - const flatbuffers::Vector *mutating_variable_inputs() const { - return GetPointer *>(VT_MUTATING_VARIABLE_INPUTS); + NonMaxSuppressionV5OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(NonMaxSuppressionV5OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct NonMaxSuppressionV5OptionsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit NonMaxSuppressionV5OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); } - flatbuffers::Vector *mutable_mutating_variable_inputs() { - return GetPointer *>(VT_MUTATING_VARIABLE_INPUTS); + NonMaxSuppressionV5OptionsBuilder &operator=(const NonMaxSuppressionV5OptionsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_OPCODE_INDEX) && - VerifyOffset(verifier, VT_INPUTS) && - verifier.VerifyVector(inputs()) && - VerifyOffset(verifier, VT_OUTPUTS) && - verifier.VerifyVector(outputs()) && - VerifyField(verifier, VT_BUILTIN_OPTIONS_TYPE) && - VerifyOffset(verifier, VT_BUILTIN_OPTIONS) && - VerifyBuiltinOptions(verifier, builtin_options(), builtin_options_type()) && - VerifyOffset(verifier, VT_CUSTOM_OPTIONS) && - verifier.VerifyVector(custom_options()) && - VerifyField(verifier, VT_CUSTOM_OPTIONS_FORMAT) && - VerifyOffset(verifier, VT_MUTATING_VARIABLE_INPUTS) && - verifier.VerifyVector(mutating_variable_inputs()) && - verifier.EndTable(); - } - OperatorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(OperatorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -template<> inline const Conv2DOptions *Operator::builtin_options_as() const { - return builtin_options_as_Conv2DOptions(); -} - -template<> inline const DepthwiseConv2DOptions *Operator::builtin_options_as() const { - return builtin_options_as_DepthwiseConv2DOptions(); -} - -template<> inline const ConcatEmbeddingsOptions *Operator::builtin_options_as() const { - return builtin_options_as_ConcatEmbeddingsOptions(); -} - -template<> inline const LSHProjectionOptions *Operator::builtin_options_as() const { - return builtin_options_as_LSHProjectionOptions(); -} - -template<> inline const Pool2DOptions *Operator::builtin_options_as() const { - return builtin_options_as_Pool2DOptions(); -} - -template<> inline const SVDFOptions *Operator::builtin_options_as() const { - return builtin_options_as_SVDFOptions(); -} - -template<> inline const RNNOptions *Operator::builtin_options_as() const { - return builtin_options_as_RNNOptions(); -} - -template<> inline const FullyConnectedOptions *Operator::builtin_options_as() const { - return builtin_options_as_FullyConnectedOptions(); -} - -template<> inline const SoftmaxOptions *Operator::builtin_options_as() const { - return builtin_options_as_SoftmaxOptions(); -} - -template<> inline const ConcatenationOptions *Operator::builtin_options_as() const { - return builtin_options_as_ConcatenationOptions(); -} - -template<> inline const AddOptions *Operator::builtin_options_as() const { - return builtin_options_as_AddOptions(); -} - -template<> inline const L2NormOptions *Operator::builtin_options_as() const { - return builtin_options_as_L2NormOptions(); -} - -template<> inline const LocalResponseNormalizationOptions *Operator::builtin_options_as() const { - return builtin_options_as_LocalResponseNormalizationOptions(); -} - -template<> inline const LSTMOptions *Operator::builtin_options_as() const { - return builtin_options_as_LSTMOptions(); -} - -template<> inline const ResizeBilinearOptions *Operator::builtin_options_as() const { - return builtin_options_as_ResizeBilinearOptions(); -} - -template<> inline const CallOptions *Operator::builtin_options_as() const { - return builtin_options_as_CallOptions(); -} - -template<> inline const ReshapeOptions *Operator::builtin_options_as() const { - return builtin_options_as_ReshapeOptions(); -} - -template<> inline const SkipGramOptions *Operator::builtin_options_as() const { - return builtin_options_as_SkipGramOptions(); -} - -template<> inline const SpaceToDepthOptions *Operator::builtin_options_as() const { - return builtin_options_as_SpaceToDepthOptions(); -} - -template<> inline const EmbeddingLookupSparseOptions *Operator::builtin_options_as() const { - return builtin_options_as_EmbeddingLookupSparseOptions(); -} - -template<> inline const MulOptions *Operator::builtin_options_as() const { - return builtin_options_as_MulOptions(); +inline flatbuffers::Offset CreateNonMaxSuppressionV5Options( + flatbuffers::FlatBufferBuilder &_fbb) { + NonMaxSuppressionV5OptionsBuilder builder_(_fbb); + return builder_.Finish(); } -template<> inline const PadOptions *Operator::builtin_options_as() const { - return builtin_options_as_PadOptions(); -} +flatbuffers::Offset CreateNonMaxSuppressionV5Options(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -template<> inline const GatherOptions *Operator::builtin_options_as() const { - return builtin_options_as_GatherOptions(); -} +struct ScatterNdOptionsT : public flatbuffers::NativeTable { + typedef ScatterNdOptions TableType; + ScatterNdOptionsT() { + } +}; -template<> inline const BatchToSpaceNDOptions *Operator::builtin_options_as() const { - return builtin_options_as_BatchToSpaceNDOptions(); -} +struct ScatterNdOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ScatterNdOptionsT NativeTableType; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + ScatterNdOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ScatterNdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; -template<> inline const SpaceToBatchNDOptions *Operator::builtin_options_as() const { - return builtin_options_as_SpaceToBatchNDOptions(); -} +struct ScatterNdOptionsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit ScatterNdOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ScatterNdOptionsBuilder &operator=(const ScatterNdOptionsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; -template<> inline const TransposeOptions *Operator::builtin_options_as() const { - return builtin_options_as_TransposeOptions(); +inline flatbuffers::Offset CreateScatterNdOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + ScatterNdOptionsBuilder builder_(_fbb); + return builder_.Finish(); } -template<> inline const ReducerOptions *Operator::builtin_options_as() const { - return builtin_options_as_ReducerOptions(); -} +flatbuffers::Offset CreateScatterNdOptions(flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -template<> inline const SubOptions *Operator::builtin_options_as() const { - return builtin_options_as_SubOptions(); -} +struct OperatorCodeT : public flatbuffers::NativeTable { + typedef OperatorCode TableType; + BuiltinOperator builtin_code; + std::string custom_code; + int32_t version; + OperatorCodeT() + : builtin_code(BuiltinOperator_ADD), + version(1) { + } +}; -template<> inline const DivOptions *Operator::builtin_options_as() const { - return builtin_options_as_DivOptions(); -} +struct OperatorCode FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef OperatorCodeT NativeTableType; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BUILTIN_CODE = 4, + VT_CUSTOM_CODE = 6, + VT_VERSION = 8 + }; + BuiltinOperator builtin_code() const { + return static_cast(GetField(VT_BUILTIN_CODE, 0)); + } + const flatbuffers::String *custom_code() const { + return GetPointer(VT_CUSTOM_CODE); + } + int32_t version() const { + return GetField(VT_VERSION, 1); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_BUILTIN_CODE) && + VerifyOffset(verifier, VT_CUSTOM_CODE) && + verifier.VerifyString(custom_code()) && + VerifyField(verifier, VT_VERSION) && + verifier.EndTable(); + } + OperatorCodeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(OperatorCodeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; -template<> inline const SqueezeOptions *Operator::builtin_options_as() const { - return builtin_options_as_SqueezeOptions(); -} +struct OperatorCodeBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_builtin_code(BuiltinOperator builtin_code) { + fbb_.AddElement(OperatorCode::VT_BUILTIN_CODE, static_cast(builtin_code), 0); + } + void add_custom_code(flatbuffers::Offset custom_code) { + fbb_.AddOffset(OperatorCode::VT_CUSTOM_CODE, custom_code); + } + void add_version(int32_t version) { + fbb_.AddElement(OperatorCode::VT_VERSION, version, 1); + } + explicit OperatorCodeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + OperatorCodeBuilder &operator=(const OperatorCodeBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; -template<> inline const SequenceRNNOptions *Operator::builtin_options_as() const { - return builtin_options_as_SequenceRNNOptions(); +inline flatbuffers::Offset CreateOperatorCode( + flatbuffers::FlatBufferBuilder &_fbb, + BuiltinOperator builtin_code = BuiltinOperator_ADD, + flatbuffers::Offset custom_code = 0, + int32_t version = 1) { + OperatorCodeBuilder builder_(_fbb); + builder_.add_version(version); + builder_.add_custom_code(custom_code); + builder_.add_builtin_code(builtin_code); + return builder_.Finish(); } -template<> inline const StridedSliceOptions *Operator::builtin_options_as() const { - return builtin_options_as_StridedSliceOptions(); +inline flatbuffers::Offset CreateOperatorCodeDirect( + flatbuffers::FlatBufferBuilder &_fbb, + BuiltinOperator builtin_code = BuiltinOperator_ADD, + const char *custom_code = nullptr, + int32_t version = 1) { + auto custom_code__ = custom_code ? _fbb.CreateString(custom_code) : 0; + return tflite::CreateOperatorCode( + _fbb, + builtin_code, + custom_code__, + version); } -template<> inline const ExpOptions *Operator::builtin_options_as() const { - return builtin_options_as_ExpOptions(); -} +flatbuffers::Offset CreateOperatorCode(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -template<> inline const TopKV2Options *Operator::builtin_options_as() const { - return builtin_options_as_TopKV2Options(); -} +struct OperatorT : public flatbuffers::NativeTable { + typedef Operator TableType; + uint32_t opcode_index; + std::vector inputs; + std::vector outputs; + BuiltinOptionsUnion builtin_options; + std::vector custom_options; + CustomOptionsFormat custom_options_format; + std::vector mutating_variable_inputs; + std::vector intermediates; + OperatorT() + : opcode_index(0), + custom_options_format(CustomOptionsFormat_FLEXBUFFERS) { + } +}; -template<> inline const SplitOptions *Operator::builtin_options_as() const { - return builtin_options_as_SplitOptions(); -} - -template<> inline const LogSoftmaxOptions *Operator::builtin_options_as() const { - return builtin_options_as_LogSoftmaxOptions(); -} - -template<> inline const CastOptions *Operator::builtin_options_as() const { - return builtin_options_as_CastOptions(); -} - -template<> inline const DequantizeOptions *Operator::builtin_options_as() const { - return builtin_options_as_DequantizeOptions(); -} - -template<> inline const MaximumMinimumOptions *Operator::builtin_options_as() const { - return builtin_options_as_MaximumMinimumOptions(); -} - -template<> inline const ArgMaxOptions *Operator::builtin_options_as() const { - return builtin_options_as_ArgMaxOptions(); -} - -template<> inline const LessOptions *Operator::builtin_options_as() const { - return builtin_options_as_LessOptions(); -} - -template<> inline const NegOptions *Operator::builtin_options_as() const { - return builtin_options_as_NegOptions(); -} - -template<> inline const PadV2Options *Operator::builtin_options_as() const { - return builtin_options_as_PadV2Options(); -} - -template<> inline const GreaterOptions *Operator::builtin_options_as() const { - return builtin_options_as_GreaterOptions(); -} - -template<> inline const GreaterEqualOptions *Operator::builtin_options_as() const { - return builtin_options_as_GreaterEqualOptions(); -} - -template<> inline const LessEqualOptions *Operator::builtin_options_as() const { - return builtin_options_as_LessEqualOptions(); -} - -template<> inline const SelectOptions *Operator::builtin_options_as() const { - return builtin_options_as_SelectOptions(); -} - -template<> inline const SliceOptions *Operator::builtin_options_as() const { - return builtin_options_as_SliceOptions(); -} - -template<> inline const TransposeConvOptions *Operator::builtin_options_as() const { - return builtin_options_as_TransposeConvOptions(); -} - -template<> inline const SparseToDenseOptions *Operator::builtin_options_as() const { - return builtin_options_as_SparseToDenseOptions(); -} - -template<> inline const TileOptions *Operator::builtin_options_as() const { - return builtin_options_as_TileOptions(); -} - -template<> inline const ExpandDimsOptions *Operator::builtin_options_as() const { - return builtin_options_as_ExpandDimsOptions(); -} - -template<> inline const EqualOptions *Operator::builtin_options_as() const { - return builtin_options_as_EqualOptions(); -} - -template<> inline const NotEqualOptions *Operator::builtin_options_as() const { - return builtin_options_as_NotEqualOptions(); -} - -template<> inline const ShapeOptions *Operator::builtin_options_as() const { - return builtin_options_as_ShapeOptions(); -} - -template<> inline const PowOptions *Operator::builtin_options_as() const { - return builtin_options_as_PowOptions(); -} - -template<> inline const ArgMinOptions *Operator::builtin_options_as() const { - return builtin_options_as_ArgMinOptions(); -} - -template<> inline const FakeQuantOptions *Operator::builtin_options_as() const { - return builtin_options_as_FakeQuantOptions(); -} - -template<> inline const PackOptions *Operator::builtin_options_as() const { - return builtin_options_as_PackOptions(); -} - -template<> inline const LogicalOrOptions *Operator::builtin_options_as() const { - return builtin_options_as_LogicalOrOptions(); -} - -template<> inline const OneHotOptions *Operator::builtin_options_as() const { - return builtin_options_as_OneHotOptions(); -} - -template<> inline const LogicalAndOptions *Operator::builtin_options_as() const { - return builtin_options_as_LogicalAndOptions(); -} - -template<> inline const LogicalNotOptions *Operator::builtin_options_as() const { - return builtin_options_as_LogicalNotOptions(); -} - -template<> inline const UnpackOptions *Operator::builtin_options_as() const { - return builtin_options_as_UnpackOptions(); -} - -template<> inline const FloorDivOptions *Operator::builtin_options_as() const { - return builtin_options_as_FloorDivOptions(); -} - -template<> inline const SquareOptions *Operator::builtin_options_as() const { - return builtin_options_as_SquareOptions(); -} - -template<> inline const ZerosLikeOptions *Operator::builtin_options_as() const { - return builtin_options_as_ZerosLikeOptions(); -} - -template<> inline const FillOptions *Operator::builtin_options_as() const { - return builtin_options_as_FillOptions(); -} - -struct OperatorBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_opcode_index(uint32_t opcode_index) { - fbb_.AddElement(Operator::VT_OPCODE_INDEX, opcode_index, 0); - } - void add_inputs(flatbuffers::Offset> inputs) { - fbb_.AddOffset(Operator::VT_INPUTS, inputs); - } - void add_outputs(flatbuffers::Offset> outputs) { - fbb_.AddOffset(Operator::VT_OUTPUTS, outputs); - } - void add_builtin_options_type(BuiltinOptions builtin_options_type) { - fbb_.AddElement(Operator::VT_BUILTIN_OPTIONS_TYPE, static_cast(builtin_options_type), 0); - } - void add_builtin_options(flatbuffers::Offset builtin_options) { - fbb_.AddOffset(Operator::VT_BUILTIN_OPTIONS, builtin_options); - } - void add_custom_options(flatbuffers::Offset> custom_options) { - fbb_.AddOffset(Operator::VT_CUSTOM_OPTIONS, custom_options); - } - void add_custom_options_format(CustomOptionsFormat custom_options_format) { - fbb_.AddElement(Operator::VT_CUSTOM_OPTIONS_FORMAT, static_cast(custom_options_format), 0); - } - void add_mutating_variable_inputs(flatbuffers::Offset> mutating_variable_inputs) { - fbb_.AddOffset(Operator::VT_MUTATING_VARIABLE_INPUTS, mutating_variable_inputs); - } - explicit OperatorBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - OperatorBuilder &operator=(const OperatorBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateOperator( - flatbuffers::FlatBufferBuilder &_fbb, - uint32_t opcode_index = 0, - flatbuffers::Offset> inputs = 0, - flatbuffers::Offset> outputs = 0, - BuiltinOptions builtin_options_type = BuiltinOptions_NONE, - flatbuffers::Offset builtin_options = 0, - flatbuffers::Offset> custom_options = 0, - CustomOptionsFormat custom_options_format = CustomOptionsFormat_FLEXBUFFERS, - flatbuffers::Offset> mutating_variable_inputs = 0) { - OperatorBuilder builder_(_fbb); - builder_.add_mutating_variable_inputs(mutating_variable_inputs); - builder_.add_custom_options(custom_options); - builder_.add_builtin_options(builtin_options); - builder_.add_outputs(outputs); - builder_.add_inputs(inputs); - builder_.add_opcode_index(opcode_index); - builder_.add_custom_options_format(custom_options_format); - builder_.add_builtin_options_type(builtin_options_type); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateOperatorDirect( - flatbuffers::FlatBufferBuilder &_fbb, - uint32_t opcode_index = 0, - const std::vector *inputs = nullptr, - const std::vector *outputs = nullptr, - BuiltinOptions builtin_options_type = BuiltinOptions_NONE, - flatbuffers::Offset builtin_options = 0, - const std::vector *custom_options = nullptr, - CustomOptionsFormat custom_options_format = CustomOptionsFormat_FLEXBUFFERS, - const std::vector *mutating_variable_inputs = nullptr) { - auto inputs__ = inputs ? _fbb.CreateVector(*inputs) : 0; - auto outputs__ = outputs ? _fbb.CreateVector(*outputs) : 0; - auto custom_options__ = custom_options ? _fbb.CreateVector(*custom_options) : 0; - auto mutating_variable_inputs__ = mutating_variable_inputs ? _fbb.CreateVector(*mutating_variable_inputs) : 0; - return tflite::CreateOperator( - _fbb, - opcode_index, - inputs__, - outputs__, - builtin_options_type, - builtin_options, - custom_options__, - custom_options_format, - mutating_variable_inputs__); -} - -flatbuffers::Offset CreateOperator(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SubGraphT : public flatbuffers::NativeTable { - typedef SubGraph TableType; - std::vector> tensors; - std::vector inputs; - std::vector outputs; - std::vector> operators; - std::string name; - SubGraphT() { - } -}; - -struct SubGraph FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SubGraphT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return SubGraphTypeTable(); - } +struct Operator FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef OperatorT NativeTableType; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TENSORS = 4, + VT_OPCODE_INDEX = 4, VT_INPUTS = 6, VT_OUTPUTS = 8, - VT_OPERATORS = 10, - VT_NAME = 12 + VT_BUILTIN_OPTIONS_TYPE = 10, + VT_BUILTIN_OPTIONS = 12, + VT_CUSTOM_OPTIONS = 14, + VT_CUSTOM_OPTIONS_FORMAT = 16, + VT_MUTATING_VARIABLE_INPUTS = 18, + VT_INTERMEDIATES = 20 }; - const flatbuffers::Vector> *tensors() const { - return GetPointer> *>(VT_TENSORS); - } - flatbuffers::Vector> *mutable_tensors() { - return GetPointer> *>(VT_TENSORS); + uint32_t opcode_index() const { + return GetField(VT_OPCODE_INDEX, 0); } const flatbuffers::Vector *inputs() const { return GetPointer *>(VT_INPUTS); } - flatbuffers::Vector *mutable_inputs() { - return GetPointer *>(VT_INPUTS); - } const flatbuffers::Vector *outputs() const { return GetPointer *>(VT_OUTPUTS); } - flatbuffers::Vector *mutable_outputs() { - return GetPointer *>(VT_OUTPUTS); - } - const flatbuffers::Vector> *operators() const { - return GetPointer> *>(VT_OPERATORS); + BuiltinOptions builtin_options_type() const { + return static_cast(GetField(VT_BUILTIN_OPTIONS_TYPE, 0)); } - flatbuffers::Vector> *mutable_operators() { - return GetPointer> *>(VT_OPERATORS); + const void *builtin_options() const { + return GetPointer(VT_BUILTIN_OPTIONS); } - const flatbuffers::String *name() const { - return GetPointer(VT_NAME); + template const T *builtin_options_as() const; + const Conv2DOptions *builtin_options_as_Conv2DOptions() const { + return builtin_options_type() == BuiltinOptions_Conv2DOptions ? static_cast(builtin_options()) : nullptr; } - flatbuffers::String *mutable_name() { - return GetPointer(VT_NAME); + const DepthwiseConv2DOptions *builtin_options_as_DepthwiseConv2DOptions() const { + return builtin_options_type() == BuiltinOptions_DepthwiseConv2DOptions ? static_cast(builtin_options()) : nullptr; } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_TENSORS) && - verifier.VerifyVector(tensors()) && - verifier.VerifyVectorOfTables(tensors()) && - VerifyOffset(verifier, VT_INPUTS) && - verifier.VerifyVector(inputs()) && - VerifyOffset(verifier, VT_OUTPUTS) && - verifier.VerifyVector(outputs()) && - VerifyOffset(verifier, VT_OPERATORS) && - verifier.VerifyVector(operators()) && - verifier.VerifyVectorOfTables(operators()) && - VerifyOffset(verifier, VT_NAME) && - verifier.VerifyString(name()) && - verifier.EndTable(); + const ConcatEmbeddingsOptions *builtin_options_as_ConcatEmbeddingsOptions() const { + return builtin_options_type() == BuiltinOptions_ConcatEmbeddingsOptions ? static_cast(builtin_options()) : nullptr; } - SubGraphT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SubGraphT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SubGraphBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_tensors(flatbuffers::Offset>> tensors) { - fbb_.AddOffset(SubGraph::VT_TENSORS, tensors); + const LSHProjectionOptions *builtin_options_as_LSHProjectionOptions() const { + return builtin_options_type() == BuiltinOptions_LSHProjectionOptions ? static_cast(builtin_options()) : nullptr; } - void add_inputs(flatbuffers::Offset> inputs) { - fbb_.AddOffset(SubGraph::VT_INPUTS, inputs); + const Pool2DOptions *builtin_options_as_Pool2DOptions() const { + return builtin_options_type() == BuiltinOptions_Pool2DOptions ? static_cast(builtin_options()) : nullptr; } - void add_outputs(flatbuffers::Offset> outputs) { - fbb_.AddOffset(SubGraph::VT_OUTPUTS, outputs); + const SVDFOptions *builtin_options_as_SVDFOptions() const { + return builtin_options_type() == BuiltinOptions_SVDFOptions ? static_cast(builtin_options()) : nullptr; } - void add_operators(flatbuffers::Offset>> operators) { - fbb_.AddOffset(SubGraph::VT_OPERATORS, operators); + const RNNOptions *builtin_options_as_RNNOptions() const { + return builtin_options_type() == BuiltinOptions_RNNOptions ? static_cast(builtin_options()) : nullptr; } - void add_name(flatbuffers::Offset name) { - fbb_.AddOffset(SubGraph::VT_NAME, name); + const FullyConnectedOptions *builtin_options_as_FullyConnectedOptions() const { + return builtin_options_type() == BuiltinOptions_FullyConnectedOptions ? static_cast(builtin_options()) : nullptr; } - explicit SubGraphBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); + const SoftmaxOptions *builtin_options_as_SoftmaxOptions() const { + return builtin_options_type() == BuiltinOptions_SoftmaxOptions ? static_cast(builtin_options()) : nullptr; } - SubGraphBuilder &operator=(const SubGraphBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; + const ConcatenationOptions *builtin_options_as_ConcatenationOptions() const { + return builtin_options_type() == BuiltinOptions_ConcatenationOptions ? static_cast(builtin_options()) : nullptr; } -}; - -inline flatbuffers::Offset CreateSubGraph( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset>> tensors = 0, - flatbuffers::Offset> inputs = 0, - flatbuffers::Offset> outputs = 0, - flatbuffers::Offset>> operators = 0, - flatbuffers::Offset name = 0) { - SubGraphBuilder builder_(_fbb); - builder_.add_name(name); - builder_.add_operators(operators); - builder_.add_outputs(outputs); - builder_.add_inputs(inputs); - builder_.add_tensors(tensors); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateSubGraphDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector> *tensors = nullptr, - const std::vector *inputs = nullptr, - const std::vector *outputs = nullptr, - const std::vector> *operators = nullptr, - const char *name = nullptr) { - auto tensors__ = tensors ? _fbb.CreateVector>(*tensors) : 0; - auto inputs__ = inputs ? _fbb.CreateVector(*inputs) : 0; - auto outputs__ = outputs ? _fbb.CreateVector(*outputs) : 0; - auto operators__ = operators ? _fbb.CreateVector>(*operators) : 0; - auto name__ = name ? _fbb.CreateString(name) : 0; - return tflite::CreateSubGraph( - _fbb, - tensors__, - inputs__, - outputs__, - operators__, - name__); -} - -flatbuffers::Offset CreateSubGraph(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct BufferT : public flatbuffers::NativeTable { - typedef Buffer TableType; - std::vector data; - BufferT() { + const AddOptions *builtin_options_as_AddOptions() const { + return builtin_options_type() == BuiltinOptions_AddOptions ? static_cast(builtin_options()) : nullptr; } -}; - -struct Buffer FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef BufferT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return BufferTypeTable(); + const L2NormOptions *builtin_options_as_L2NormOptions() const { + return builtin_options_type() == BuiltinOptions_L2NormOptions ? static_cast(builtin_options()) : nullptr; } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_DATA = 4 - }; - const flatbuffers::Vector *data() const { - return GetPointer *>(VT_DATA); + const LocalResponseNormalizationOptions *builtin_options_as_LocalResponseNormalizationOptions() const { + return builtin_options_type() == BuiltinOptions_LocalResponseNormalizationOptions ? static_cast(builtin_options()) : nullptr; } - flatbuffers::Vector *mutable_data() { - return GetPointer *>(VT_DATA); + const LSTMOptions *builtin_options_as_LSTMOptions() const { + return builtin_options_type() == BuiltinOptions_LSTMOptions ? static_cast(builtin_options()) : nullptr; } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_DATA) && - verifier.VerifyVector(data()) && - verifier.EndTable(); + const ResizeBilinearOptions *builtin_options_as_ResizeBilinearOptions() const { + return builtin_options_type() == BuiltinOptions_ResizeBilinearOptions ? static_cast(builtin_options()) : nullptr; } - BufferT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(BufferT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BufferT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct BufferBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_data(flatbuffers::Offset> data) { - fbb_.AddOffset(Buffer::VT_DATA, data); + const CallOptions *builtin_options_as_CallOptions() const { + return builtin_options_type() == BuiltinOptions_CallOptions ? static_cast(builtin_options()) : nullptr; } - explicit BufferBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); + const ReshapeOptions *builtin_options_as_ReshapeOptions() const { + return builtin_options_type() == BuiltinOptions_ReshapeOptions ? static_cast(builtin_options()) : nullptr; } - BufferBuilder &operator=(const BufferBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; + const SkipGramOptions *builtin_options_as_SkipGramOptions() const { + return builtin_options_type() == BuiltinOptions_SkipGramOptions ? static_cast(builtin_options()) : nullptr; } -}; - -inline flatbuffers::Offset CreateBuffer( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> data = 0) { - BufferBuilder builder_(_fbb); - builder_.add_data(data); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateBufferDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *data = nullptr) { - auto data__ = data ? _fbb.CreateVector(*data) : 0; - return tflite::CreateBuffer( - _fbb, - data__); -} - -flatbuffers::Offset CreateBuffer(flatbuffers::FlatBufferBuilder &_fbb, const BufferT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ModelT : public flatbuffers::NativeTable { - typedef Model TableType; - uint32_t version; - std::vector> operator_codes; - std::vector> subgraphs; - std::string description; - std::vector> buffers; - std::vector metadata_buffer; - ModelT() - : version(0) { + const SpaceToDepthOptions *builtin_options_as_SpaceToDepthOptions() const { + return builtin_options_type() == BuiltinOptions_SpaceToDepthOptions ? static_cast(builtin_options()) : nullptr; } -}; - -struct Model FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ModelT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return ModelTypeTable(); + const EmbeddingLookupSparseOptions *builtin_options_as_EmbeddingLookupSparseOptions() const { + return builtin_options_type() == BuiltinOptions_EmbeddingLookupSparseOptions ? static_cast(builtin_options()) : nullptr; } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_VERSION = 4, - VT_OPERATOR_CODES = 6, - VT_SUBGRAPHS = 8, - VT_DESCRIPTION = 10, - VT_BUFFERS = 12, - VT_METADATA_BUFFER = 14 - }; - uint32_t version() const { - return GetField(VT_VERSION, 0); + const MulOptions *builtin_options_as_MulOptions() const { + return builtin_options_type() == BuiltinOptions_MulOptions ? static_cast(builtin_options()) : nullptr; } - bool mutate_version(uint32_t _version) { - return SetField(VT_VERSION, _version, 0); + const PadOptions *builtin_options_as_PadOptions() const { + return builtin_options_type() == BuiltinOptions_PadOptions ? static_cast(builtin_options()) : nullptr; } - const flatbuffers::Vector> *operator_codes() const { - return GetPointer> *>(VT_OPERATOR_CODES); + const GatherOptions *builtin_options_as_GatherOptions() const { + return builtin_options_type() == BuiltinOptions_GatherOptions ? static_cast(builtin_options()) : nullptr; } - flatbuffers::Vector> *mutable_operator_codes() { - return GetPointer> *>(VT_OPERATOR_CODES); + const BatchToSpaceNDOptions *builtin_options_as_BatchToSpaceNDOptions() const { + return builtin_options_type() == BuiltinOptions_BatchToSpaceNDOptions ? static_cast(builtin_options()) : nullptr; } - const flatbuffers::Vector> *subgraphs() const { - return GetPointer> *>(VT_SUBGRAPHS); + const SpaceToBatchNDOptions *builtin_options_as_SpaceToBatchNDOptions() const { + return builtin_options_type() == BuiltinOptions_SpaceToBatchNDOptions ? static_cast(builtin_options()) : nullptr; } - flatbuffers::Vector> *mutable_subgraphs() { - return GetPointer> *>(VT_SUBGRAPHS); + const TransposeOptions *builtin_options_as_TransposeOptions() const { + return builtin_options_type() == BuiltinOptions_TransposeOptions ? static_cast(builtin_options()) : nullptr; } - const flatbuffers::String *description() const { - return GetPointer(VT_DESCRIPTION); + const ReducerOptions *builtin_options_as_ReducerOptions() const { + return builtin_options_type() == BuiltinOptions_ReducerOptions ? static_cast(builtin_options()) : nullptr; } - flatbuffers::String *mutable_description() { - return GetPointer(VT_DESCRIPTION); + const SubOptions *builtin_options_as_SubOptions() const { + return builtin_options_type() == BuiltinOptions_SubOptions ? static_cast(builtin_options()) : nullptr; } - const flatbuffers::Vector> *buffers() const { - return GetPointer> *>(VT_BUFFERS); + const DivOptions *builtin_options_as_DivOptions() const { + return builtin_options_type() == BuiltinOptions_DivOptions ? static_cast(builtin_options()) : nullptr; } - flatbuffers::Vector> *mutable_buffers() { - return GetPointer> *>(VT_BUFFERS); + const SqueezeOptions *builtin_options_as_SqueezeOptions() const { + return builtin_options_type() == BuiltinOptions_SqueezeOptions ? static_cast(builtin_options()) : nullptr; } - const flatbuffers::Vector *metadata_buffer() const { - return GetPointer *>(VT_METADATA_BUFFER); + const SequenceRNNOptions *builtin_options_as_SequenceRNNOptions() const { + return builtin_options_type() == BuiltinOptions_SequenceRNNOptions ? static_cast(builtin_options()) : nullptr; } - flatbuffers::Vector *mutable_metadata_buffer() { - return GetPointer *>(VT_METADATA_BUFFER); + const StridedSliceOptions *builtin_options_as_StridedSliceOptions() const { + return builtin_options_type() == BuiltinOptions_StridedSliceOptions ? static_cast(builtin_options()) : nullptr; } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_VERSION) && - VerifyOffset(verifier, VT_OPERATOR_CODES) && - verifier.VerifyVector(operator_codes()) && - verifier.VerifyVectorOfTables(operator_codes()) && - VerifyOffset(verifier, VT_SUBGRAPHS) && - verifier.VerifyVector(subgraphs()) && - verifier.VerifyVectorOfTables(subgraphs()) && - VerifyOffset(verifier, VT_DESCRIPTION) && - verifier.VerifyString(description()) && - VerifyOffset(verifier, VT_BUFFERS) && - verifier.VerifyVector(buffers()) && - verifier.VerifyVectorOfTables(buffers()) && - VerifyOffset(verifier, VT_METADATA_BUFFER) && - verifier.VerifyVector(metadata_buffer()) && - verifier.EndTable(); + const ExpOptions *builtin_options_as_ExpOptions() const { + return builtin_options_type() == BuiltinOptions_ExpOptions ? static_cast(builtin_options()) : nullptr; } - ModelT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ModelT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ModelT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ModelBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_version(uint32_t version) { - fbb_.AddElement(Model::VT_VERSION, version, 0); + const TopKV2Options *builtin_options_as_TopKV2Options() const { + return builtin_options_type() == BuiltinOptions_TopKV2Options ? static_cast(builtin_options()) : nullptr; } - void add_operator_codes(flatbuffers::Offset>> operator_codes) { - fbb_.AddOffset(Model::VT_OPERATOR_CODES, operator_codes); + const SplitOptions *builtin_options_as_SplitOptions() const { + return builtin_options_type() == BuiltinOptions_SplitOptions ? static_cast(builtin_options()) : nullptr; } - void add_subgraphs(flatbuffers::Offset>> subgraphs) { - fbb_.AddOffset(Model::VT_SUBGRAPHS, subgraphs); + const LogSoftmaxOptions *builtin_options_as_LogSoftmaxOptions() const { + return builtin_options_type() == BuiltinOptions_LogSoftmaxOptions ? static_cast(builtin_options()) : nullptr; } - void add_description(flatbuffers::Offset description) { - fbb_.AddOffset(Model::VT_DESCRIPTION, description); + const CastOptions *builtin_options_as_CastOptions() const { + return builtin_options_type() == BuiltinOptions_CastOptions ? static_cast(builtin_options()) : nullptr; } - void add_buffers(flatbuffers::Offset>> buffers) { - fbb_.AddOffset(Model::VT_BUFFERS, buffers); + const DequantizeOptions *builtin_options_as_DequantizeOptions() const { + return builtin_options_type() == BuiltinOptions_DequantizeOptions ? static_cast(builtin_options()) : nullptr; } - void add_metadata_buffer(flatbuffers::Offset> metadata_buffer) { - fbb_.AddOffset(Model::VT_METADATA_BUFFER, metadata_buffer); + const MaximumMinimumOptions *builtin_options_as_MaximumMinimumOptions() const { + return builtin_options_type() == BuiltinOptions_MaximumMinimumOptions ? static_cast(builtin_options()) : nullptr; } - explicit ModelBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); + const ArgMaxOptions *builtin_options_as_ArgMaxOptions() const { + return builtin_options_type() == BuiltinOptions_ArgMaxOptions ? static_cast(builtin_options()) : nullptr; } - ModelBuilder &operator=(const ModelBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; + const LessOptions *builtin_options_as_LessOptions() const { + return builtin_options_type() == BuiltinOptions_LessOptions ? static_cast(builtin_options()) : nullptr; + } + const NegOptions *builtin_options_as_NegOptions() const { + return builtin_options_type() == BuiltinOptions_NegOptions ? static_cast(builtin_options()) : nullptr; + } + const PadV2Options *builtin_options_as_PadV2Options() const { + return builtin_options_type() == BuiltinOptions_PadV2Options ? static_cast(builtin_options()) : nullptr; + } + const GreaterOptions *builtin_options_as_GreaterOptions() const { + return builtin_options_type() == BuiltinOptions_GreaterOptions ? static_cast(builtin_options()) : nullptr; + } + const GreaterEqualOptions *builtin_options_as_GreaterEqualOptions() const { + return builtin_options_type() == BuiltinOptions_GreaterEqualOptions ? static_cast(builtin_options()) : nullptr; + } + const LessEqualOptions *builtin_options_as_LessEqualOptions() const { + return builtin_options_type() == BuiltinOptions_LessEqualOptions ? static_cast(builtin_options()) : nullptr; + } + const SelectOptions *builtin_options_as_SelectOptions() const { + return builtin_options_type() == BuiltinOptions_SelectOptions ? static_cast(builtin_options()) : nullptr; + } + const SliceOptions *builtin_options_as_SliceOptions() const { + return builtin_options_type() == BuiltinOptions_SliceOptions ? static_cast(builtin_options()) : nullptr; + } + const TransposeConvOptions *builtin_options_as_TransposeConvOptions() const { + return builtin_options_type() == BuiltinOptions_TransposeConvOptions ? static_cast(builtin_options()) : nullptr; + } + const SparseToDenseOptions *builtin_options_as_SparseToDenseOptions() const { + return builtin_options_type() == BuiltinOptions_SparseToDenseOptions ? static_cast(builtin_options()) : nullptr; + } + const TileOptions *builtin_options_as_TileOptions() const { + return builtin_options_type() == BuiltinOptions_TileOptions ? static_cast(builtin_options()) : nullptr; + } + const ExpandDimsOptions *builtin_options_as_ExpandDimsOptions() const { + return builtin_options_type() == BuiltinOptions_ExpandDimsOptions ? static_cast(builtin_options()) : nullptr; + } + const EqualOptions *builtin_options_as_EqualOptions() const { + return builtin_options_type() == BuiltinOptions_EqualOptions ? static_cast(builtin_options()) : nullptr; + } + const NotEqualOptions *builtin_options_as_NotEqualOptions() const { + return builtin_options_type() == BuiltinOptions_NotEqualOptions ? static_cast(builtin_options()) : nullptr; + } + const ShapeOptions *builtin_options_as_ShapeOptions() const { + return builtin_options_type() == BuiltinOptions_ShapeOptions ? static_cast(builtin_options()) : nullptr; + } + const PowOptions *builtin_options_as_PowOptions() const { + return builtin_options_type() == BuiltinOptions_PowOptions ? static_cast(builtin_options()) : nullptr; + } + const ArgMinOptions *builtin_options_as_ArgMinOptions() const { + return builtin_options_type() == BuiltinOptions_ArgMinOptions ? static_cast(builtin_options()) : nullptr; + } + const FakeQuantOptions *builtin_options_as_FakeQuantOptions() const { + return builtin_options_type() == BuiltinOptions_FakeQuantOptions ? static_cast(builtin_options()) : nullptr; + } + const PackOptions *builtin_options_as_PackOptions() const { + return builtin_options_type() == BuiltinOptions_PackOptions ? static_cast(builtin_options()) : nullptr; + } + const LogicalOrOptions *builtin_options_as_LogicalOrOptions() const { + return builtin_options_type() == BuiltinOptions_LogicalOrOptions ? static_cast(builtin_options()) : nullptr; + } + const OneHotOptions *builtin_options_as_OneHotOptions() const { + return builtin_options_type() == BuiltinOptions_OneHotOptions ? static_cast(builtin_options()) : nullptr; + } + const LogicalAndOptions *builtin_options_as_LogicalAndOptions() const { + return builtin_options_type() == BuiltinOptions_LogicalAndOptions ? static_cast(builtin_options()) : nullptr; + } + const LogicalNotOptions *builtin_options_as_LogicalNotOptions() const { + return builtin_options_type() == BuiltinOptions_LogicalNotOptions ? static_cast(builtin_options()) : nullptr; + } + const UnpackOptions *builtin_options_as_UnpackOptions() const { + return builtin_options_type() == BuiltinOptions_UnpackOptions ? static_cast(builtin_options()) : nullptr; + } + const FloorDivOptions *builtin_options_as_FloorDivOptions() const { + return builtin_options_type() == BuiltinOptions_FloorDivOptions ? static_cast(builtin_options()) : nullptr; + } + const SquareOptions *builtin_options_as_SquareOptions() const { + return builtin_options_type() == BuiltinOptions_SquareOptions ? static_cast(builtin_options()) : nullptr; + } + const ZerosLikeOptions *builtin_options_as_ZerosLikeOptions() const { + return builtin_options_type() == BuiltinOptions_ZerosLikeOptions ? static_cast(builtin_options()) : nullptr; + } + const FillOptions *builtin_options_as_FillOptions() const { + return builtin_options_type() == BuiltinOptions_FillOptions ? static_cast(builtin_options()) : nullptr; + } + const BidirectionalSequenceLSTMOptions *builtin_options_as_BidirectionalSequenceLSTMOptions() const { + return builtin_options_type() == BuiltinOptions_BidirectionalSequenceLSTMOptions ? static_cast(builtin_options()) : nullptr; + } + const BidirectionalSequenceRNNOptions *builtin_options_as_BidirectionalSequenceRNNOptions() const { + return builtin_options_type() == BuiltinOptions_BidirectionalSequenceRNNOptions ? static_cast(builtin_options()) : nullptr; + } + const UnidirectionalSequenceLSTMOptions *builtin_options_as_UnidirectionalSequenceLSTMOptions() const { + return builtin_options_type() == BuiltinOptions_UnidirectionalSequenceLSTMOptions ? static_cast(builtin_options()) : nullptr; + } + const FloorModOptions *builtin_options_as_FloorModOptions() const { + return builtin_options_type() == BuiltinOptions_FloorModOptions ? static_cast(builtin_options()) : nullptr; + } + const RangeOptions *builtin_options_as_RangeOptions() const { + return builtin_options_type() == BuiltinOptions_RangeOptions ? static_cast(builtin_options()) : nullptr; + } + const ResizeNearestNeighborOptions *builtin_options_as_ResizeNearestNeighborOptions() const { + return builtin_options_type() == BuiltinOptions_ResizeNearestNeighborOptions ? static_cast(builtin_options()) : nullptr; + } + const LeakyReluOptions *builtin_options_as_LeakyReluOptions() const { + return builtin_options_type() == BuiltinOptions_LeakyReluOptions ? static_cast(builtin_options()) : nullptr; + } + const SquaredDifferenceOptions *builtin_options_as_SquaredDifferenceOptions() const { + return builtin_options_type() == BuiltinOptions_SquaredDifferenceOptions ? static_cast(builtin_options()) : nullptr; + } + const MirrorPadOptions *builtin_options_as_MirrorPadOptions() const { + return builtin_options_type() == BuiltinOptions_MirrorPadOptions ? static_cast(builtin_options()) : nullptr; + } + const AbsOptions *builtin_options_as_AbsOptions() const { + return builtin_options_type() == BuiltinOptions_AbsOptions ? static_cast(builtin_options()) : nullptr; + } + const SplitVOptions *builtin_options_as_SplitVOptions() const { + return builtin_options_type() == BuiltinOptions_SplitVOptions ? static_cast(builtin_options()) : nullptr; + } + const UniqueOptions *builtin_options_as_UniqueOptions() const { + return builtin_options_type() == BuiltinOptions_UniqueOptions ? static_cast(builtin_options()) : nullptr; + } + const ReverseV2Options *builtin_options_as_ReverseV2Options() const { + return builtin_options_type() == BuiltinOptions_ReverseV2Options ? static_cast(builtin_options()) : nullptr; + } + const AddNOptions *builtin_options_as_AddNOptions() const { + return builtin_options_type() == BuiltinOptions_AddNOptions ? static_cast(builtin_options()) : nullptr; + } + const GatherNdOptions *builtin_options_as_GatherNdOptions() const { + return builtin_options_type() == BuiltinOptions_GatherNdOptions ? static_cast(builtin_options()) : nullptr; + } + const CosOptions *builtin_options_as_CosOptions() const { + return builtin_options_type() == BuiltinOptions_CosOptions ? static_cast(builtin_options()) : nullptr; + } + const WhereOptions *builtin_options_as_WhereOptions() const { + return builtin_options_type() == BuiltinOptions_WhereOptions ? static_cast(builtin_options()) : nullptr; + } + const RankOptions *builtin_options_as_RankOptions() const { + return builtin_options_type() == BuiltinOptions_RankOptions ? static_cast(builtin_options()) : nullptr; + } + const ReverseSequenceOptions *builtin_options_as_ReverseSequenceOptions() const { + return builtin_options_type() == BuiltinOptions_ReverseSequenceOptions ? static_cast(builtin_options()) : nullptr; + } + const MatrixDiagOptions *builtin_options_as_MatrixDiagOptions() const { + return builtin_options_type() == BuiltinOptions_MatrixDiagOptions ? static_cast(builtin_options()) : nullptr; + } + const QuantizeOptions *builtin_options_as_QuantizeOptions() const { + return builtin_options_type() == BuiltinOptions_QuantizeOptions ? static_cast(builtin_options()) : nullptr; + } + const MatrixSetDiagOptions *builtin_options_as_MatrixSetDiagOptions() const { + return builtin_options_type() == BuiltinOptions_MatrixSetDiagOptions ? static_cast(builtin_options()) : nullptr; + } + const HardSwishOptions *builtin_options_as_HardSwishOptions() const { + return builtin_options_type() == BuiltinOptions_HardSwishOptions ? static_cast(builtin_options()) : nullptr; + } + const IfOptions *builtin_options_as_IfOptions() const { + return builtin_options_type() == BuiltinOptions_IfOptions ? static_cast(builtin_options()) : nullptr; + } + const WhileOptions *builtin_options_as_WhileOptions() const { + return builtin_options_type() == BuiltinOptions_WhileOptions ? static_cast(builtin_options()) : nullptr; + } + const DepthToSpaceOptions *builtin_options_as_DepthToSpaceOptions() const { + return builtin_options_type() == BuiltinOptions_DepthToSpaceOptions ? static_cast(builtin_options()) : nullptr; + } + const NonMaxSuppressionV4Options *builtin_options_as_NonMaxSuppressionV4Options() const { + return builtin_options_type() == BuiltinOptions_NonMaxSuppressionV4Options ? static_cast(builtin_options()) : nullptr; + } + const NonMaxSuppressionV5Options *builtin_options_as_NonMaxSuppressionV5Options() const { + return builtin_options_type() == BuiltinOptions_NonMaxSuppressionV5Options ? static_cast(builtin_options()) : nullptr; + } + const ScatterNdOptions *builtin_options_as_ScatterNdOptions() const { + return builtin_options_type() == BuiltinOptions_ScatterNdOptions ? static_cast(builtin_options()) : nullptr; + } + const flatbuffers::Vector *custom_options() const { + return GetPointer *>(VT_CUSTOM_OPTIONS); + } + CustomOptionsFormat custom_options_format() const { + return static_cast(GetField(VT_CUSTOM_OPTIONS_FORMAT, 0)); + } + const flatbuffers::Vector *mutating_variable_inputs() const { + return GetPointer *>(VT_MUTATING_VARIABLE_INPUTS); + } + const flatbuffers::Vector *intermediates() const { + return GetPointer *>(VT_INTERMEDIATES); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_OPCODE_INDEX) && + VerifyOffset(verifier, VT_INPUTS) && + verifier.VerifyVector(inputs()) && + VerifyOffset(verifier, VT_OUTPUTS) && + verifier.VerifyVector(outputs()) && + VerifyField(verifier, VT_BUILTIN_OPTIONS_TYPE) && + VerifyOffset(verifier, VT_BUILTIN_OPTIONS) && + VerifyBuiltinOptions(verifier, builtin_options(), builtin_options_type()) && + VerifyOffset(verifier, VT_CUSTOM_OPTIONS) && + verifier.VerifyVector(custom_options()) && + VerifyField(verifier, VT_CUSTOM_OPTIONS_FORMAT) && + VerifyOffset(verifier, VT_MUTATING_VARIABLE_INPUTS) && + verifier.VerifyVector(mutating_variable_inputs()) && + VerifyOffset(verifier, VT_INTERMEDIATES) && + verifier.VerifyVector(intermediates()) && + verifier.EndTable(); } + OperatorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(OperatorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); }; -inline flatbuffers::Offset CreateModel( - flatbuffers::FlatBufferBuilder &_fbb, - uint32_t version = 0, - flatbuffers::Offset>> operator_codes = 0, - flatbuffers::Offset>> subgraphs = 0, - flatbuffers::Offset description = 0, - flatbuffers::Offset>> buffers = 0, - flatbuffers::Offset> metadata_buffer = 0) { - ModelBuilder builder_(_fbb); - builder_.add_metadata_buffer(metadata_buffer); - builder_.add_buffers(buffers); - builder_.add_description(description); - builder_.add_subgraphs(subgraphs); - builder_.add_operator_codes(operator_codes); - builder_.add_version(version); - return builder_.Finish(); +template<> inline const Conv2DOptions *Operator::builtin_options_as() const { + return builtin_options_as_Conv2DOptions(); } -inline flatbuffers::Offset CreateModelDirect( - flatbuffers::FlatBufferBuilder &_fbb, - uint32_t version = 0, - const std::vector> *operator_codes = nullptr, - const std::vector> *subgraphs = nullptr, - const char *description = nullptr, - const std::vector> *buffers = nullptr, - const std::vector *metadata_buffer = nullptr) { - auto operator_codes__ = operator_codes ? _fbb.CreateVector>(*operator_codes) : 0; - auto subgraphs__ = subgraphs ? _fbb.CreateVector>(*subgraphs) : 0; - auto description__ = description ? _fbb.CreateString(description) : 0; - auto buffers__ = buffers ? _fbb.CreateVector>(*buffers) : 0; - auto metadata_buffer__ = metadata_buffer ? _fbb.CreateVector(*metadata_buffer) : 0; - return tflite::CreateModel( - _fbb, - version, - operator_codes__, - subgraphs__, - description__, - buffers__, - metadata_buffer__); +template<> inline const DepthwiseConv2DOptions *Operator::builtin_options_as() const { + return builtin_options_as_DepthwiseConv2DOptions(); } -flatbuffers::Offset CreateModel(flatbuffers::FlatBufferBuilder &_fbb, const ModelT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> inline const ConcatEmbeddingsOptions *Operator::builtin_options_as() const { + return builtin_options_as_ConcatEmbeddingsOptions(); +} -inline QuantizationParametersT *QuantizationParameters::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new QuantizationParametersT(); - UnPackTo(_o, _resolver); - return _o; +template<> inline const LSHProjectionOptions *Operator::builtin_options_as() const { + return builtin_options_as_LSHProjectionOptions(); } -inline void QuantizationParameters::UnPackTo(QuantizationParametersT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = min(); if (_e) { _o->min.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->min[_i] = _e->Get(_i); } } }; - { auto _e = max(); if (_e) { _o->max.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->max[_i] = _e->Get(_i); } } }; - { auto _e = scale(); if (_e) { _o->scale.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->scale[_i] = _e->Get(_i); } } }; - { auto _e = zeroPoint(); if (_e) { _o->zeroPoint.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->zeroPoint[_i] = _e->Get(_i); } } }; +template<> inline const Pool2DOptions *Operator::builtin_options_as() const { + return builtin_options_as_Pool2DOptions(); } -inline flatbuffers::Offset QuantizationParameters::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateQuantizationParameters(_fbb, _o, _rehasher); +template<> inline const SVDFOptions *Operator::builtin_options_as() const { + return builtin_options_as_SVDFOptions(); } -inline flatbuffers::Offset CreateQuantizationParameters(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizationParametersT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _min = _o->min.size() ? _fbb.CreateVector(_o->min) : 0; - auto _max = _o->max.size() ? _fbb.CreateVector(_o->max) : 0; - auto _scale = _o->scale.size() ? _fbb.CreateVector(_o->scale) : 0; - auto _zeroPoint = _o->zeroPoint.size() ? _fbb.CreateVector(_o->zeroPoint) : 0; - return tflite::CreateQuantizationParameters( - _fbb, - _min, - _max, - _scale, - _zeroPoint); +template<> inline const RNNOptions *Operator::builtin_options_as() const { + return builtin_options_as_RNNOptions(); } -inline TensorT *Tensor::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new TensorT(); - UnPackTo(_o, _resolver); - return _o; +template<> inline const FullyConnectedOptions *Operator::builtin_options_as() const { + return builtin_options_as_FullyConnectedOptions(); } -inline void Tensor::UnPackTo(TensorT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = shape(); if (_e) { _o->shape.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->shape[_i] = _e->Get(_i); } } }; - { auto _e = type(); _o->type = _e; }; - { auto _e = buffer(); _o->buffer = _e; }; - { auto _e = name(); if (_e) _o->name = _e->str(); }; - { auto _e = quantization(); if (_e) _o->quantization = std::unique_ptr(_e->UnPack(_resolver)); }; - { auto _e = is_variable(); _o->is_variable = _e; }; +template<> inline const SoftmaxOptions *Operator::builtin_options_as() const { + return builtin_options_as_SoftmaxOptions(); } -inline flatbuffers::Offset Tensor::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateTensor(_fbb, _o, _rehasher); +template<> inline const ConcatenationOptions *Operator::builtin_options_as() const { + return builtin_options_as_ConcatenationOptions(); } -inline flatbuffers::Offset CreateTensor(flatbuffers::FlatBufferBuilder &_fbb, const TensorT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TensorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _shape = _o->shape.size() ? _fbb.CreateVector(_o->shape) : 0; - auto _type = _o->type; - auto _buffer = _o->buffer; - auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); - auto _quantization = _o->quantization ? CreateQuantizationParameters(_fbb, _o->quantization.get(), _rehasher) : 0; - auto _is_variable = _o->is_variable; - return tflite::CreateTensor( - _fbb, - _shape, - _type, - _buffer, - _name, - _quantization, - _is_variable); +template<> inline const AddOptions *Operator::builtin_options_as() const { + return builtin_options_as_AddOptions(); } -inline Conv2DOptionsT *Conv2DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new Conv2DOptionsT(); - UnPackTo(_o, _resolver); - return _o; +template<> inline const L2NormOptions *Operator::builtin_options_as() const { + return builtin_options_as_L2NormOptions(); } -inline void Conv2DOptions::UnPackTo(Conv2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = padding(); _o->padding = _e; }; - { auto _e = stride_w(); _o->stride_w = _e; }; - { auto _e = stride_h(); _o->stride_h = _e; }; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }; - { auto _e = dilation_w_factor(); _o->dilation_w_factor = _e; }; - { auto _e = dilation_h_factor(); _o->dilation_h_factor = _e; }; +template<> inline const LocalResponseNormalizationOptions *Operator::builtin_options_as() const { + return builtin_options_as_LocalResponseNormalizationOptions(); } -inline flatbuffers::Offset Conv2DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateConv2DOptions(_fbb, _o, _rehasher); +template<> inline const LSTMOptions *Operator::builtin_options_as() const { + return builtin_options_as_LSTMOptions(); } -inline flatbuffers::Offset CreateConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Conv2DOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _padding = _o->padding; - auto _stride_w = _o->stride_w; - auto _stride_h = _o->stride_h; - auto _fused_activation_function = _o->fused_activation_function; - auto _dilation_w_factor = _o->dilation_w_factor; - auto _dilation_h_factor = _o->dilation_h_factor; - return tflite::CreateConv2DOptions( - _fbb, - _padding, - _stride_w, - _stride_h, - _fused_activation_function, - _dilation_w_factor, - _dilation_h_factor); +template<> inline const ResizeBilinearOptions *Operator::builtin_options_as() const { + return builtin_options_as_ResizeBilinearOptions(); } -inline Pool2DOptionsT *Pool2DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new Pool2DOptionsT(); - UnPackTo(_o, _resolver); - return _o; +template<> inline const CallOptions *Operator::builtin_options_as() const { + return builtin_options_as_CallOptions(); } -inline void Pool2DOptions::UnPackTo(Pool2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = padding(); _o->padding = _e; }; - { auto _e = stride_w(); _o->stride_w = _e; }; - { auto _e = stride_h(); _o->stride_h = _e; }; - { auto _e = filter_width(); _o->filter_width = _e; }; - { auto _e = filter_height(); _o->filter_height = _e; }; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }; +template<> inline const ReshapeOptions *Operator::builtin_options_as() const { + return builtin_options_as_ReshapeOptions(); } -inline flatbuffers::Offset Pool2DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreatePool2DOptions(_fbb, _o, _rehasher); +template<> inline const SkipGramOptions *Operator::builtin_options_as() const { + return builtin_options_as_SkipGramOptions(); } -inline flatbuffers::Offset CreatePool2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Pool2DOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _padding = _o->padding; - auto _stride_w = _o->stride_w; - auto _stride_h = _o->stride_h; - auto _filter_width = _o->filter_width; - auto _filter_height = _o->filter_height; - auto _fused_activation_function = _o->fused_activation_function; - return tflite::CreatePool2DOptions( - _fbb, - _padding, - _stride_w, - _stride_h, - _filter_width, - _filter_height, - _fused_activation_function); +template<> inline const SpaceToDepthOptions *Operator::builtin_options_as() const { + return builtin_options_as_SpaceToDepthOptions(); } -inline DepthwiseConv2DOptionsT *DepthwiseConv2DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new DepthwiseConv2DOptionsT(); - UnPackTo(_o, _resolver); - return _o; +template<> inline const EmbeddingLookupSparseOptions *Operator::builtin_options_as() const { + return builtin_options_as_EmbeddingLookupSparseOptions(); } -inline void DepthwiseConv2DOptions::UnPackTo(DepthwiseConv2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = padding(); _o->padding = _e; }; - { auto _e = stride_w(); _o->stride_w = _e; }; - { auto _e = stride_h(); _o->stride_h = _e; }; - { auto _e = depth_multiplier(); _o->depth_multiplier = _e; }; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }; - { auto _e = dilation_w_factor(); _o->dilation_w_factor = _e; }; - { auto _e = dilation_h_factor(); _o->dilation_h_factor = _e; }; +template<> inline const MulOptions *Operator::builtin_options_as() const { + return builtin_options_as_MulOptions(); } -inline flatbuffers::Offset DepthwiseConv2DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateDepthwiseConv2DOptions(_fbb, _o, _rehasher); +template<> inline const PadOptions *Operator::builtin_options_as() const { + return builtin_options_as_PadOptions(); } -inline flatbuffers::Offset CreateDepthwiseConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DepthwiseConv2DOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _padding = _o->padding; - auto _stride_w = _o->stride_w; - auto _stride_h = _o->stride_h; - auto _depth_multiplier = _o->depth_multiplier; - auto _fused_activation_function = _o->fused_activation_function; - auto _dilation_w_factor = _o->dilation_w_factor; - auto _dilation_h_factor = _o->dilation_h_factor; - return tflite::CreateDepthwiseConv2DOptions( - _fbb, - _padding, - _stride_w, - _stride_h, - _depth_multiplier, - _fused_activation_function, - _dilation_w_factor, - _dilation_h_factor); +template<> inline const GatherOptions *Operator::builtin_options_as() const { + return builtin_options_as_GatherOptions(); } -inline ConcatEmbeddingsOptionsT *ConcatEmbeddingsOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ConcatEmbeddingsOptionsT(); - UnPackTo(_o, _resolver); - return _o; +template<> inline const BatchToSpaceNDOptions *Operator::builtin_options_as() const { + return builtin_options_as_BatchToSpaceNDOptions(); } -inline void ConcatEmbeddingsOptions::UnPackTo(ConcatEmbeddingsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = num_channels(); _o->num_channels = _e; }; - { auto _e = num_columns_per_channel(); if (_e) { _o->num_columns_per_channel.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->num_columns_per_channel[_i] = _e->Get(_i); } } }; - { auto _e = embedding_dim_per_channel(); if (_e) { _o->embedding_dim_per_channel.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->embedding_dim_per_channel[_i] = _e->Get(_i); } } }; +template<> inline const SpaceToBatchNDOptions *Operator::builtin_options_as() const { + return builtin_options_as_SpaceToBatchNDOptions(); } -inline flatbuffers::Offset ConcatEmbeddingsOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateConcatEmbeddingsOptions(_fbb, _o, _rehasher); +template<> inline const TransposeOptions *Operator::builtin_options_as() const { + return builtin_options_as_TransposeOptions(); } -inline flatbuffers::Offset CreateConcatEmbeddingsOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ConcatEmbeddingsOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _num_channels = _o->num_channels; - auto _num_columns_per_channel = _o->num_columns_per_channel.size() ? _fbb.CreateVector(_o->num_columns_per_channel) : 0; - auto _embedding_dim_per_channel = _o->embedding_dim_per_channel.size() ? _fbb.CreateVector(_o->embedding_dim_per_channel) : 0; - return tflite::CreateConcatEmbeddingsOptions( - _fbb, - _num_channels, - _num_columns_per_channel, - _embedding_dim_per_channel); +template<> inline const ReducerOptions *Operator::builtin_options_as() const { + return builtin_options_as_ReducerOptions(); } -inline LSHProjectionOptionsT *LSHProjectionOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LSHProjectionOptionsT(); - UnPackTo(_o, _resolver); - return _o; +template<> inline const SubOptions *Operator::builtin_options_as() const { + return builtin_options_as_SubOptions(); } -inline void LSHProjectionOptions::UnPackTo(LSHProjectionOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = type(); _o->type = _e; }; +template<> inline const DivOptions *Operator::builtin_options_as() const { + return builtin_options_as_DivOptions(); } -inline flatbuffers::Offset LSHProjectionOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLSHProjectionOptions(_fbb, _o, _rehasher); +template<> inline const SqueezeOptions *Operator::builtin_options_as() const { + return builtin_options_as_SqueezeOptions(); } -inline flatbuffers::Offset CreateLSHProjectionOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LSHProjectionOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _type = _o->type; - return tflite::CreateLSHProjectionOptions( - _fbb, - _type); +template<> inline const SequenceRNNOptions *Operator::builtin_options_as() const { + return builtin_options_as_SequenceRNNOptions(); } -inline SVDFOptionsT *SVDFOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SVDFOptionsT(); - UnPackTo(_o, _resolver); - return _o; +template<> inline const StridedSliceOptions *Operator::builtin_options_as() const { + return builtin_options_as_StridedSliceOptions(); } -inline void SVDFOptions::UnPackTo(SVDFOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = rank(); _o->rank = _e; }; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }; +template<> inline const ExpOptions *Operator::builtin_options_as() const { + return builtin_options_as_ExpOptions(); } -inline flatbuffers::Offset SVDFOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSVDFOptions(_fbb, _o, _rehasher); +template<> inline const TopKV2Options *Operator::builtin_options_as() const { + return builtin_options_as_TopKV2Options(); } -inline flatbuffers::Offset CreateSVDFOptions(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SVDFOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _rank = _o->rank; - auto _fused_activation_function = _o->fused_activation_function; - return tflite::CreateSVDFOptions( - _fbb, - _rank, - _fused_activation_function); +template<> inline const SplitOptions *Operator::builtin_options_as() const { + return builtin_options_as_SplitOptions(); } -inline RNNOptionsT *RNNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new RNNOptionsT(); - UnPackTo(_o, _resolver); - return _o; +template<> inline const LogSoftmaxOptions *Operator::builtin_options_as() const { + return builtin_options_as_LogSoftmaxOptions(); } -inline void RNNOptions::UnPackTo(RNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }; +template<> inline const CastOptions *Operator::builtin_options_as() const { + return builtin_options_as_CastOptions(); } -inline flatbuffers::Offset RNNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateRNNOptions(_fbb, _o, _rehasher); +template<> inline const DequantizeOptions *Operator::builtin_options_as() const { + return builtin_options_as_DequantizeOptions(); } -inline flatbuffers::Offset CreateRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RNNOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - return tflite::CreateRNNOptions( - _fbb, - _fused_activation_function); +template<> inline const MaximumMinimumOptions *Operator::builtin_options_as() const { + return builtin_options_as_MaximumMinimumOptions(); } -inline SequenceRNNOptionsT *SequenceRNNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SequenceRNNOptionsT(); - UnPackTo(_o, _resolver); - return _o; +template<> inline const ArgMaxOptions *Operator::builtin_options_as() const { + return builtin_options_as_ArgMaxOptions(); } -inline void SequenceRNNOptions::UnPackTo(SequenceRNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = time_major(); _o->time_major = _e; }; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }; +template<> inline const LessOptions *Operator::builtin_options_as() const { + return builtin_options_as_LessOptions(); } -inline flatbuffers::Offset SequenceRNNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSequenceRNNOptions(_fbb, _o, _rehasher); +template<> inline const NegOptions *Operator::builtin_options_as() const { + return builtin_options_as_NegOptions(); } -inline flatbuffers::Offset CreateSequenceRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SequenceRNNOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _time_major = _o->time_major; - auto _fused_activation_function = _o->fused_activation_function; - return tflite::CreateSequenceRNNOptions( - _fbb, - _time_major, - _fused_activation_function); +template<> inline const PadV2Options *Operator::builtin_options_as() const { + return builtin_options_as_PadV2Options(); } -inline BidirectionalSequenceRNNOptionsT *BidirectionalSequenceRNNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new BidirectionalSequenceRNNOptionsT(); - UnPackTo(_o, _resolver); - return _o; +template<> inline const GreaterOptions *Operator::builtin_options_as() const { + return builtin_options_as_GreaterOptions(); } -inline void BidirectionalSequenceRNNOptions::UnPackTo(BidirectionalSequenceRNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = time_major(); _o->time_major = _e; }; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }; +template<> inline const GreaterEqualOptions *Operator::builtin_options_as() const { + return builtin_options_as_GreaterEqualOptions(); } -inline flatbuffers::Offset BidirectionalSequenceRNNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateBidirectionalSequenceRNNOptions(_fbb, _o, _rehasher); +template<> inline const LessEqualOptions *Operator::builtin_options_as() const { + return builtin_options_as_LessEqualOptions(); } -inline flatbuffers::Offset CreateBidirectionalSequenceRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BidirectionalSequenceRNNOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _time_major = _o->time_major; - auto _fused_activation_function = _o->fused_activation_function; - return tflite::CreateBidirectionalSequenceRNNOptions( - _fbb, - _time_major, - _fused_activation_function); +template<> inline const SelectOptions *Operator::builtin_options_as() const { + return builtin_options_as_SelectOptions(); } -inline FullyConnectedOptionsT *FullyConnectedOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new FullyConnectedOptionsT(); - UnPackTo(_o, _resolver); - return _o; +template<> inline const SliceOptions *Operator::builtin_options_as() const { + return builtin_options_as_SliceOptions(); } -inline void FullyConnectedOptions::UnPackTo(FullyConnectedOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }; - { auto _e = weights_format(); _o->weights_format = _e; }; +template<> inline const TransposeConvOptions *Operator::builtin_options_as() const { + return builtin_options_as_TransposeConvOptions(); } -inline flatbuffers::Offset FullyConnectedOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateFullyConnectedOptions(_fbb, _o, _rehasher); +template<> inline const SparseToDenseOptions *Operator::builtin_options_as() const { + return builtin_options_as_SparseToDenseOptions(); } -inline flatbuffers::Offset CreateFullyConnectedOptions(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FullyConnectedOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - auto _weights_format = _o->weights_format; - return tflite::CreateFullyConnectedOptions( - _fbb, - _fused_activation_function, - _weights_format); +template<> inline const TileOptions *Operator::builtin_options_as() const { + return builtin_options_as_TileOptions(); } -inline SoftmaxOptionsT *SoftmaxOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SoftmaxOptionsT(); - UnPackTo(_o, _resolver); - return _o; +template<> inline const ExpandDimsOptions *Operator::builtin_options_as() const { + return builtin_options_as_ExpandDimsOptions(); } -inline void SoftmaxOptions::UnPackTo(SoftmaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = beta(); _o->beta = _e; }; +template<> inline const EqualOptions *Operator::builtin_options_as() const { + return builtin_options_as_EqualOptions(); } -inline flatbuffers::Offset SoftmaxOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSoftmaxOptions(_fbb, _o, _rehasher); +template<> inline const NotEqualOptions *Operator::builtin_options_as() const { + return builtin_options_as_NotEqualOptions(); } -inline flatbuffers::Offset CreateSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SoftmaxOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _beta = _o->beta; - return tflite::CreateSoftmaxOptions( - _fbb, - _beta); +template<> inline const ShapeOptions *Operator::builtin_options_as() const { + return builtin_options_as_ShapeOptions(); } -inline ConcatenationOptionsT *ConcatenationOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ConcatenationOptionsT(); - UnPackTo(_o, _resolver); - return _o; +template<> inline const PowOptions *Operator::builtin_options_as() const { + return builtin_options_as_PowOptions(); } -inline void ConcatenationOptions::UnPackTo(ConcatenationOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = axis(); _o->axis = _e; }; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }; +template<> inline const ArgMinOptions *Operator::builtin_options_as() const { + return builtin_options_as_ArgMinOptions(); } -inline flatbuffers::Offset ConcatenationOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateConcatenationOptions(_fbb, _o, _rehasher); +template<> inline const FakeQuantOptions *Operator::builtin_options_as() const { + return builtin_options_as_FakeQuantOptions(); } -inline flatbuffers::Offset CreateConcatenationOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ConcatenationOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _axis = _o->axis; - auto _fused_activation_function = _o->fused_activation_function; - return tflite::CreateConcatenationOptions( - _fbb, - _axis, - _fused_activation_function); +template<> inline const PackOptions *Operator::builtin_options_as() const { + return builtin_options_as_PackOptions(); } -inline AddOptionsT *AddOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new AddOptionsT(); - UnPackTo(_o, _resolver); - return _o; +template<> inline const LogicalOrOptions *Operator::builtin_options_as() const { + return builtin_options_as_LogicalOrOptions(); } -inline void AddOptions::UnPackTo(AddOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }; +template<> inline const OneHotOptions *Operator::builtin_options_as() const { + return builtin_options_as_OneHotOptions(); } -inline flatbuffers::Offset AddOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateAddOptions(_fbb, _o, _rehasher); +template<> inline const LogicalAndOptions *Operator::builtin_options_as() const { + return builtin_options_as_LogicalAndOptions(); } -inline flatbuffers::Offset CreateAddOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const AddOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - return tflite::CreateAddOptions( - _fbb, - _fused_activation_function); +template<> inline const LogicalNotOptions *Operator::builtin_options_as() const { + return builtin_options_as_LogicalNotOptions(); } -inline MulOptionsT *MulOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new MulOptionsT(); - UnPackTo(_o, _resolver); - return _o; +template<> inline const UnpackOptions *Operator::builtin_options_as() const { + return builtin_options_as_UnpackOptions(); } -inline void MulOptions::UnPackTo(MulOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }; +template<> inline const FloorDivOptions *Operator::builtin_options_as() const { + return builtin_options_as_FloorDivOptions(); } -inline flatbuffers::Offset MulOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateMulOptions(_fbb, _o, _rehasher); +template<> inline const SquareOptions *Operator::builtin_options_as() const { + return builtin_options_as_SquareOptions(); } -inline flatbuffers::Offset CreateMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MulOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - return tflite::CreateMulOptions( - _fbb, - _fused_activation_function); +template<> inline const ZerosLikeOptions *Operator::builtin_options_as() const { + return builtin_options_as_ZerosLikeOptions(); } -inline L2NormOptionsT *L2NormOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new L2NormOptionsT(); - UnPackTo(_o, _resolver); - return _o; +template<> inline const FillOptions *Operator::builtin_options_as() const { + return builtin_options_as_FillOptions(); } -inline void L2NormOptions::UnPackTo(L2NormOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }; +template<> inline const BidirectionalSequenceLSTMOptions *Operator::builtin_options_as() const { + return builtin_options_as_BidirectionalSequenceLSTMOptions(); } -inline flatbuffers::Offset L2NormOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateL2NormOptions(_fbb, _o, _rehasher); +template<> inline const BidirectionalSequenceRNNOptions *Operator::builtin_options_as() const { + return builtin_options_as_BidirectionalSequenceRNNOptions(); } -inline flatbuffers::Offset CreateL2NormOptions(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const L2NormOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - return tflite::CreateL2NormOptions( - _fbb, - _fused_activation_function); +template<> inline const UnidirectionalSequenceLSTMOptions *Operator::builtin_options_as() const { + return builtin_options_as_UnidirectionalSequenceLSTMOptions(); } -inline LocalResponseNormalizationOptionsT *LocalResponseNormalizationOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LocalResponseNormalizationOptionsT(); - UnPackTo(_o, _resolver); - return _o; +template<> inline const FloorModOptions *Operator::builtin_options_as() const { + return builtin_options_as_FloorModOptions(); } -inline void LocalResponseNormalizationOptions::UnPackTo(LocalResponseNormalizationOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = radius(); _o->radius = _e; }; - { auto _e = bias(); _o->bias = _e; }; - { auto _e = alpha(); _o->alpha = _e; }; - { auto _e = beta(); _o->beta = _e; }; +template<> inline const RangeOptions *Operator::builtin_options_as() const { + return builtin_options_as_RangeOptions(); } -inline flatbuffers::Offset LocalResponseNormalizationOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLocalResponseNormalizationOptions(_fbb, _o, _rehasher); +template<> inline const ResizeNearestNeighborOptions *Operator::builtin_options_as() const { + return builtin_options_as_ResizeNearestNeighborOptions(); } -inline flatbuffers::Offset CreateLocalResponseNormalizationOptions(flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LocalResponseNormalizationOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _radius = _o->radius; - auto _bias = _o->bias; - auto _alpha = _o->alpha; - auto _beta = _o->beta; - return tflite::CreateLocalResponseNormalizationOptions( - _fbb, - _radius, - _bias, - _alpha, - _beta); -} - -inline LSTMOptionsT *LSTMOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LSTMOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void LSTMOptions::UnPackTo(LSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }; - { auto _e = cell_clip(); _o->cell_clip = _e; }; - { auto _e = proj_clip(); _o->proj_clip = _e; }; - { auto _e = kernel_type(); _o->kernel_type = _e; }; -} - -inline flatbuffers::Offset LSTMOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLSTMOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LSTMOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - auto _cell_clip = _o->cell_clip; - auto _proj_clip = _o->proj_clip; - auto _kernel_type = _o->kernel_type; - return tflite::CreateLSTMOptions( - _fbb, - _fused_activation_function, - _cell_clip, - _proj_clip, - _kernel_type); -} - -inline ResizeBilinearOptionsT *ResizeBilinearOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ResizeBilinearOptionsT(); - UnPackTo(_o, _resolver); - return _o; +template<> inline const LeakyReluOptions *Operator::builtin_options_as() const { + return builtin_options_as_LeakyReluOptions(); } -inline void ResizeBilinearOptions::UnPackTo(ResizeBilinearOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = alignCorners(); _o->alignCorners = _e; }; +template<> inline const SquaredDifferenceOptions *Operator::builtin_options_as() const { + return builtin_options_as_SquaredDifferenceOptions(); } -inline flatbuffers::Offset ResizeBilinearOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateResizeBilinearOptions(_fbb, _o, _rehasher); +template<> inline const MirrorPadOptions *Operator::builtin_options_as() const { + return builtin_options_as_MirrorPadOptions(); } -inline flatbuffers::Offset CreateResizeBilinearOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ResizeBilinearOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _alignCorners = _o->alignCorners; - return tflite::CreateResizeBilinearOptions( - _fbb, - _alignCorners); +template<> inline const AbsOptions *Operator::builtin_options_as() const { + return builtin_options_as_AbsOptions(); } -inline CallOptionsT *CallOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new CallOptionsT(); - UnPackTo(_o, _resolver); - return _o; +template<> inline const SplitVOptions *Operator::builtin_options_as() const { + return builtin_options_as_SplitVOptions(); } -inline void CallOptions::UnPackTo(CallOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = subgraph(); _o->subgraph = _e; }; +template<> inline const UniqueOptions *Operator::builtin_options_as() const { + return builtin_options_as_UniqueOptions(); } -inline flatbuffers::Offset CallOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateCallOptions(_fbb, _o, _rehasher); +template<> inline const ReverseV2Options *Operator::builtin_options_as() const { + return builtin_options_as_ReverseV2Options(); } -inline flatbuffers::Offset CreateCallOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CallOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _subgraph = _o->subgraph; - return tflite::CreateCallOptions( - _fbb, - _subgraph); +template<> inline const AddNOptions *Operator::builtin_options_as() const { + return builtin_options_as_AddNOptions(); } -inline PadOptionsT *PadOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new PadOptionsT(); - UnPackTo(_o, _resolver); - return _o; +template<> inline const GatherNdOptions *Operator::builtin_options_as() const { + return builtin_options_as_GatherNdOptions(); } -inline void PadOptions::UnPackTo(PadOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; +template<> inline const CosOptions *Operator::builtin_options_as() const { + return builtin_options_as_CosOptions(); } -inline flatbuffers::Offset PadOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreatePadOptions(_fbb, _o, _rehasher); +template<> inline const WhereOptions *Operator::builtin_options_as() const { + return builtin_options_as_WhereOptions(); } -inline flatbuffers::Offset CreatePadOptions(flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PadOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreatePadOptions( - _fbb); +template<> inline const RankOptions *Operator::builtin_options_as() const { + return builtin_options_as_RankOptions(); } -inline PadV2OptionsT *PadV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new PadV2OptionsT(); - UnPackTo(_o, _resolver); - return _o; +template<> inline const ReverseSequenceOptions *Operator::builtin_options_as() const { + return builtin_options_as_ReverseSequenceOptions(); } -inline void PadV2Options::UnPackTo(PadV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; +template<> inline const MatrixDiagOptions *Operator::builtin_options_as() const { + return builtin_options_as_MatrixDiagOptions(); } -inline flatbuffers::Offset PadV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreatePadV2Options(_fbb, _o, _rehasher); +template<> inline const QuantizeOptions *Operator::builtin_options_as() const { + return builtin_options_as_QuantizeOptions(); } -inline flatbuffers::Offset CreatePadV2Options(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PadV2OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreatePadV2Options( - _fbb); +template<> inline const MatrixSetDiagOptions *Operator::builtin_options_as() const { + return builtin_options_as_MatrixSetDiagOptions(); } -inline ReshapeOptionsT *ReshapeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ReshapeOptionsT(); - UnPackTo(_o, _resolver); - return _o; +template<> inline const HardSwishOptions *Operator::builtin_options_as() const { + return builtin_options_as_HardSwishOptions(); } -inline void ReshapeOptions::UnPackTo(ReshapeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = new_shape(); if (_e) { _o->new_shape.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->new_shape[_i] = _e->Get(_i); } } }; +template<> inline const IfOptions *Operator::builtin_options_as() const { + return builtin_options_as_IfOptions(); } -inline flatbuffers::Offset ReshapeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateReshapeOptions(_fbb, _o, _rehasher); +template<> inline const WhileOptions *Operator::builtin_options_as() const { + return builtin_options_as_WhileOptions(); } -inline flatbuffers::Offset CreateReshapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReshapeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _new_shape = _o->new_shape.size() ? _fbb.CreateVector(_o->new_shape) : 0; - return tflite::CreateReshapeOptions( - _fbb, - _new_shape); +template<> inline const DepthToSpaceOptions *Operator::builtin_options_as() const { + return builtin_options_as_DepthToSpaceOptions(); } -inline SpaceToBatchNDOptionsT *SpaceToBatchNDOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SpaceToBatchNDOptionsT(); - UnPackTo(_o, _resolver); - return _o; +template<> inline const NonMaxSuppressionV4Options *Operator::builtin_options_as() const { + return builtin_options_as_NonMaxSuppressionV4Options(); } -inline void SpaceToBatchNDOptions::UnPackTo(SpaceToBatchNDOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; +template<> inline const NonMaxSuppressionV5Options *Operator::builtin_options_as() const { + return builtin_options_as_NonMaxSuppressionV5Options(); } -inline flatbuffers::Offset SpaceToBatchNDOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSpaceToBatchNDOptions(_fbb, _o, _rehasher); +template<> inline const ScatterNdOptions *Operator::builtin_options_as() const { + return builtin_options_as_ScatterNdOptions(); } -inline flatbuffers::Offset CreateSpaceToBatchNDOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SpaceToBatchNDOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateSpaceToBatchNDOptions( - _fbb); -} +struct OperatorBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_opcode_index(uint32_t opcode_index) { + fbb_.AddElement(Operator::VT_OPCODE_INDEX, opcode_index, 0); + } + void add_inputs(flatbuffers::Offset> inputs) { + fbb_.AddOffset(Operator::VT_INPUTS, inputs); + } + void add_outputs(flatbuffers::Offset> outputs) { + fbb_.AddOffset(Operator::VT_OUTPUTS, outputs); + } + void add_builtin_options_type(BuiltinOptions builtin_options_type) { + fbb_.AddElement(Operator::VT_BUILTIN_OPTIONS_TYPE, static_cast(builtin_options_type), 0); + } + void add_builtin_options(flatbuffers::Offset builtin_options) { + fbb_.AddOffset(Operator::VT_BUILTIN_OPTIONS, builtin_options); + } + void add_custom_options(flatbuffers::Offset> custom_options) { + fbb_.AddOffset(Operator::VT_CUSTOM_OPTIONS, custom_options); + } + void add_custom_options_format(CustomOptionsFormat custom_options_format) { + fbb_.AddElement(Operator::VT_CUSTOM_OPTIONS_FORMAT, static_cast(custom_options_format), 0); + } + void add_mutating_variable_inputs(flatbuffers::Offset> mutating_variable_inputs) { + fbb_.AddOffset(Operator::VT_MUTATING_VARIABLE_INPUTS, mutating_variable_inputs); + } + void add_intermediates(flatbuffers::Offset> intermediates) { + fbb_.AddOffset(Operator::VT_INTERMEDIATES, intermediates); + } + explicit OperatorBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + OperatorBuilder &operator=(const OperatorBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; -inline BatchToSpaceNDOptionsT *BatchToSpaceNDOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new BatchToSpaceNDOptionsT(); - UnPackTo(_o, _resolver); - return _o; +inline flatbuffers::Offset CreateOperator( + flatbuffers::FlatBufferBuilder &_fbb, + uint32_t opcode_index = 0, + flatbuffers::Offset> inputs = 0, + flatbuffers::Offset> outputs = 0, + BuiltinOptions builtin_options_type = BuiltinOptions_NONE, + flatbuffers::Offset builtin_options = 0, + flatbuffers::Offset> custom_options = 0, + CustomOptionsFormat custom_options_format = CustomOptionsFormat_FLEXBUFFERS, + flatbuffers::Offset> mutating_variable_inputs = 0, + flatbuffers::Offset> intermediates = 0) { + OperatorBuilder builder_(_fbb); + builder_.add_intermediates(intermediates); + builder_.add_mutating_variable_inputs(mutating_variable_inputs); + builder_.add_custom_options(custom_options); + builder_.add_builtin_options(builtin_options); + builder_.add_outputs(outputs); + builder_.add_inputs(inputs); + builder_.add_opcode_index(opcode_index); + builder_.add_custom_options_format(custom_options_format); + builder_.add_builtin_options_type(builtin_options_type); + return builder_.Finish(); } -inline void BatchToSpaceNDOptions::UnPackTo(BatchToSpaceNDOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; +inline flatbuffers::Offset CreateOperatorDirect( + flatbuffers::FlatBufferBuilder &_fbb, + uint32_t opcode_index = 0, + const std::vector *inputs = nullptr, + const std::vector *outputs = nullptr, + BuiltinOptions builtin_options_type = BuiltinOptions_NONE, + flatbuffers::Offset builtin_options = 0, + const std::vector *custom_options = nullptr, + CustomOptionsFormat custom_options_format = CustomOptionsFormat_FLEXBUFFERS, + const std::vector *mutating_variable_inputs = nullptr, + const std::vector *intermediates = nullptr) { + auto inputs__ = inputs ? _fbb.CreateVector(*inputs) : 0; + auto outputs__ = outputs ? _fbb.CreateVector(*outputs) : 0; + auto custom_options__ = custom_options ? _fbb.CreateVector(*custom_options) : 0; + auto mutating_variable_inputs__ = mutating_variable_inputs ? _fbb.CreateVector(*mutating_variable_inputs) : 0; + auto intermediates__ = intermediates ? _fbb.CreateVector(*intermediates) : 0; + return tflite::CreateOperator( + _fbb, + opcode_index, + inputs__, + outputs__, + builtin_options_type, + builtin_options, + custom_options__, + custom_options_format, + mutating_variable_inputs__, + intermediates__); } -inline flatbuffers::Offset BatchToSpaceNDOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateBatchToSpaceNDOptions(_fbb, _o, _rehasher); -} +flatbuffers::Offset CreateOperator(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -inline flatbuffers::Offset CreateBatchToSpaceNDOptions(flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BatchToSpaceNDOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateBatchToSpaceNDOptions( - _fbb); -} +struct SubGraphT : public flatbuffers::NativeTable { + typedef SubGraph TableType; + std::vector> tensors; + std::vector inputs; + std::vector outputs; + std::vector> operators; + std::string name; + SubGraphT() { + } +}; -inline SkipGramOptionsT *SkipGramOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SkipGramOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} +struct SubGraph FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SubGraphT NativeTableType; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TENSORS = 4, + VT_INPUTS = 6, + VT_OUTPUTS = 8, + VT_OPERATORS = 10, + VT_NAME = 12 + }; + const flatbuffers::Vector> *tensors() const { + return GetPointer> *>(VT_TENSORS); + } + const flatbuffers::Vector *inputs() const { + return GetPointer *>(VT_INPUTS); + } + const flatbuffers::Vector *outputs() const { + return GetPointer *>(VT_OUTPUTS); + } + const flatbuffers::Vector> *operators() const { + return GetPointer> *>(VT_OPERATORS); + } + const flatbuffers::String *name() const { + return GetPointer(VT_NAME); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_TENSORS) && + verifier.VerifyVector(tensors()) && + verifier.VerifyVectorOfTables(tensors()) && + VerifyOffset(verifier, VT_INPUTS) && + verifier.VerifyVector(inputs()) && + VerifyOffset(verifier, VT_OUTPUTS) && + verifier.VerifyVector(outputs()) && + VerifyOffset(verifier, VT_OPERATORS) && + verifier.VerifyVector(operators()) && + verifier.VerifyVectorOfTables(operators()) && + VerifyOffset(verifier, VT_NAME) && + verifier.VerifyString(name()) && + verifier.EndTable(); + } + SubGraphT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SubGraphT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; -inline void SkipGramOptions::UnPackTo(SkipGramOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = ngram_size(); _o->ngram_size = _e; }; - { auto _e = max_skip_size(); _o->max_skip_size = _e; }; - { auto _e = include_all_ngrams(); _o->include_all_ngrams = _e; }; -} +struct SubGraphBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_tensors(flatbuffers::Offset>> tensors) { + fbb_.AddOffset(SubGraph::VT_TENSORS, tensors); + } + void add_inputs(flatbuffers::Offset> inputs) { + fbb_.AddOffset(SubGraph::VT_INPUTS, inputs); + } + void add_outputs(flatbuffers::Offset> outputs) { + fbb_.AddOffset(SubGraph::VT_OUTPUTS, outputs); + } + void add_operators(flatbuffers::Offset>> operators) { + fbb_.AddOffset(SubGraph::VT_OPERATORS, operators); + } + void add_name(flatbuffers::Offset name) { + fbb_.AddOffset(SubGraph::VT_NAME, name); + } + explicit SubGraphBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + SubGraphBuilder &operator=(const SubGraphBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; -inline flatbuffers::Offset SkipGramOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSkipGramOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset CreateSubGraph( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset>> tensors = 0, + flatbuffers::Offset> inputs = 0, + flatbuffers::Offset> outputs = 0, + flatbuffers::Offset>> operators = 0, + flatbuffers::Offset name = 0) { + SubGraphBuilder builder_(_fbb); + builder_.add_name(name); + builder_.add_operators(operators); + builder_.add_outputs(outputs); + builder_.add_inputs(inputs); + builder_.add_tensors(tensors); + return builder_.Finish(); } -inline flatbuffers::Offset CreateSkipGramOptions(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SkipGramOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _ngram_size = _o->ngram_size; - auto _max_skip_size = _o->max_skip_size; - auto _include_all_ngrams = _o->include_all_ngrams; - return tflite::CreateSkipGramOptions( +inline flatbuffers::Offset CreateSubGraphDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector> *tensors = nullptr, + const std::vector *inputs = nullptr, + const std::vector *outputs = nullptr, + const std::vector> *operators = nullptr, + const char *name = nullptr) { + auto tensors__ = tensors ? _fbb.CreateVector>(*tensors) : 0; + auto inputs__ = inputs ? _fbb.CreateVector(*inputs) : 0; + auto outputs__ = outputs ? _fbb.CreateVector(*outputs) : 0; + auto operators__ = operators ? _fbb.CreateVector>(*operators) : 0; + auto name__ = name ? _fbb.CreateString(name) : 0; + return tflite::CreateSubGraph( _fbb, - _ngram_size, - _max_skip_size, - _include_all_ngrams); -} - -inline SpaceToDepthOptionsT *SpaceToDepthOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SpaceToDepthOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SpaceToDepthOptions::UnPackTo(SpaceToDepthOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = block_size(); _o->block_size = _e; }; + tensors__, + inputs__, + outputs__, + operators__, + name__); } -inline flatbuffers::Offset SpaceToDepthOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSpaceToDepthOptions(_fbb, _o, _rehasher); -} +flatbuffers::Offset CreateSubGraph(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -inline flatbuffers::Offset CreateSpaceToDepthOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SpaceToDepthOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _block_size = _o->block_size; - return tflite::CreateSpaceToDepthOptions( - _fbb, - _block_size); -} +struct BufferT : public flatbuffers::NativeTable { + typedef Buffer TableType; + std::vector data; + BufferT() { + } +}; -inline SubOptionsT *SubOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SubOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} +struct Buffer FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef BufferT NativeTableType; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_DATA = 4 + }; + const flatbuffers::Vector *data() const { + return GetPointer *>(VT_DATA); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_DATA) && + verifier.VerifyVector(data()) && + verifier.EndTable(); + } + BufferT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(BufferT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BufferT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; -inline void SubOptions::UnPackTo(SubOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }; -} +struct BufferBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_data(flatbuffers::Offset> data) { + fbb_.AddOffset(Buffer::VT_DATA, data); + } + explicit BufferBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + BufferBuilder &operator=(const BufferBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; -inline flatbuffers::Offset SubOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSubOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset CreateBuffer( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> data = 0) { + BufferBuilder builder_(_fbb); + builder_.add_data(data); + return builder_.Finish(); } -inline flatbuffers::Offset CreateSubOptions(flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SubOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - return tflite::CreateSubOptions( +inline flatbuffers::Offset CreateBufferDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *data = nullptr) { + auto data__ = data ? _fbb.CreateVector(*data) : 0; + return tflite::CreateBuffer( _fbb, - _fused_activation_function); -} - -inline DivOptionsT *DivOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new DivOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void DivOptions::UnPackTo(DivOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }; + data__); } -inline flatbuffers::Offset DivOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateDivOptions(_fbb, _o, _rehasher); -} +flatbuffers::Offset CreateBuffer(flatbuffers::FlatBufferBuilder &_fbb, const BufferT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -inline flatbuffers::Offset CreateDivOptions(flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DivOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - return tflite::CreateDivOptions( - _fbb, - _fused_activation_function); -} +struct MetadataT : public flatbuffers::NativeTable { + typedef Metadata TableType; + std::string name; + uint32_t buffer; + MetadataT() + : buffer(0) { + } +}; -inline TopKV2OptionsT *TopKV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new TopKV2OptionsT(); - UnPackTo(_o, _resolver); - return _o; +struct Metadata FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef MetadataT NativeTableType; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NAME = 4, + VT_BUFFER = 6 + }; + const flatbuffers::String *name() const { + return GetPointer(VT_NAME); + } + uint32_t buffer() const { + return GetField(VT_BUFFER, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_NAME) && + verifier.VerifyString(name()) && + VerifyField(verifier, VT_BUFFER) && + verifier.EndTable(); + } + MetadataT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(MetadataT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MetadataT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct MetadataBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_name(flatbuffers::Offset name) { + fbb_.AddOffset(Metadata::VT_NAME, name); + } + void add_buffer(uint32_t buffer) { + fbb_.AddElement(Metadata::VT_BUFFER, buffer, 0); + } + explicit MetadataBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + MetadataBuilder &operator=(const MetadataBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateMetadata( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset name = 0, + uint32_t buffer = 0) { + MetadataBuilder builder_(_fbb); + builder_.add_buffer(buffer); + builder_.add_name(name); + return builder_.Finish(); } -inline void TopKV2Options::UnPackTo(TopKV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; +inline flatbuffers::Offset CreateMetadataDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const char *name = nullptr, + uint32_t buffer = 0) { + auto name__ = name ? _fbb.CreateString(name) : 0; + return tflite::CreateMetadata( + _fbb, + name__, + buffer); } -inline flatbuffers::Offset TopKV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateTopKV2Options(_fbb, _o, _rehasher); +flatbuffers::Offset CreateMetadata(flatbuffers::FlatBufferBuilder &_fbb, const MetadataT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ModelT : public flatbuffers::NativeTable { + typedef Model TableType; + uint32_t version; + std::vector> operator_codes; + std::vector> subgraphs; + std::string description; + std::vector> buffers; + std::vector metadata_buffer; + std::vector> metadata; + ModelT() + : version(0) { + } +}; + +struct Model FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ModelT NativeTableType; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_VERSION = 4, + VT_OPERATOR_CODES = 6, + VT_SUBGRAPHS = 8, + VT_DESCRIPTION = 10, + VT_BUFFERS = 12, + VT_METADATA_BUFFER = 14, + VT_METADATA = 16 + }; + uint32_t version() const { + return GetField(VT_VERSION, 0); + } + const flatbuffers::Vector> *operator_codes() const { + return GetPointer> *>(VT_OPERATOR_CODES); + } + const flatbuffers::Vector> *subgraphs() const { + return GetPointer> *>(VT_SUBGRAPHS); + } + const flatbuffers::String *description() const { + return GetPointer(VT_DESCRIPTION); + } + const flatbuffers::Vector> *buffers() const { + return GetPointer> *>(VT_BUFFERS); + } + const flatbuffers::Vector *metadata_buffer() const { + return GetPointer *>(VT_METADATA_BUFFER); + } + const flatbuffers::Vector> *metadata() const { + return GetPointer> *>(VT_METADATA); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_VERSION) && + VerifyOffset(verifier, VT_OPERATOR_CODES) && + verifier.VerifyVector(operator_codes()) && + verifier.VerifyVectorOfTables(operator_codes()) && + VerifyOffset(verifier, VT_SUBGRAPHS) && + verifier.VerifyVector(subgraphs()) && + verifier.VerifyVectorOfTables(subgraphs()) && + VerifyOffset(verifier, VT_DESCRIPTION) && + verifier.VerifyString(description()) && + VerifyOffset(verifier, VT_BUFFERS) && + verifier.VerifyVector(buffers()) && + verifier.VerifyVectorOfTables(buffers()) && + VerifyOffset(verifier, VT_METADATA_BUFFER) && + verifier.VerifyVector(metadata_buffer()) && + VerifyOffset(verifier, VT_METADATA) && + verifier.VerifyVector(metadata()) && + verifier.VerifyVectorOfTables(metadata()) && + verifier.EndTable(); + } + ModelT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ModelT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ModelT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ModelBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_version(uint32_t version) { + fbb_.AddElement(Model::VT_VERSION, version, 0); + } + void add_operator_codes(flatbuffers::Offset>> operator_codes) { + fbb_.AddOffset(Model::VT_OPERATOR_CODES, operator_codes); + } + void add_subgraphs(flatbuffers::Offset>> subgraphs) { + fbb_.AddOffset(Model::VT_SUBGRAPHS, subgraphs); + } + void add_description(flatbuffers::Offset description) { + fbb_.AddOffset(Model::VT_DESCRIPTION, description); + } + void add_buffers(flatbuffers::Offset>> buffers) { + fbb_.AddOffset(Model::VT_BUFFERS, buffers); + } + void add_metadata_buffer(flatbuffers::Offset> metadata_buffer) { + fbb_.AddOffset(Model::VT_METADATA_BUFFER, metadata_buffer); + } + void add_metadata(flatbuffers::Offset>> metadata) { + fbb_.AddOffset(Model::VT_METADATA, metadata); + } + explicit ModelBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ModelBuilder &operator=(const ModelBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateModel( + flatbuffers::FlatBufferBuilder &_fbb, + uint32_t version = 0, + flatbuffers::Offset>> operator_codes = 0, + flatbuffers::Offset>> subgraphs = 0, + flatbuffers::Offset description = 0, + flatbuffers::Offset>> buffers = 0, + flatbuffers::Offset> metadata_buffer = 0, + flatbuffers::Offset>> metadata = 0) { + ModelBuilder builder_(_fbb); + builder_.add_metadata(metadata); + builder_.add_metadata_buffer(metadata_buffer); + builder_.add_buffers(buffers); + builder_.add_description(description); + builder_.add_subgraphs(subgraphs); + builder_.add_operator_codes(operator_codes); + builder_.add_version(version); + return builder_.Finish(); } -inline flatbuffers::Offset CreateTopKV2Options(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TopKV2OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateTopKV2Options( - _fbb); +inline flatbuffers::Offset CreateModelDirect( + flatbuffers::FlatBufferBuilder &_fbb, + uint32_t version = 0, + const std::vector> *operator_codes = nullptr, + const std::vector> *subgraphs = nullptr, + const char *description = nullptr, + const std::vector> *buffers = nullptr, + const std::vector *metadata_buffer = nullptr, + const std::vector> *metadata = nullptr) { + auto operator_codes__ = operator_codes ? _fbb.CreateVector>(*operator_codes) : 0; + auto subgraphs__ = subgraphs ? _fbb.CreateVector>(*subgraphs) : 0; + auto description__ = description ? _fbb.CreateString(description) : 0; + auto buffers__ = buffers ? _fbb.CreateVector>(*buffers) : 0; + auto metadata_buffer__ = metadata_buffer ? _fbb.CreateVector(*metadata_buffer) : 0; + auto metadata__ = metadata ? _fbb.CreateVector>(*metadata) : 0; + return tflite::CreateModel( + _fbb, + version, + operator_codes__, + subgraphs__, + description__, + buffers__, + metadata_buffer__, + metadata__); } -inline EmbeddingLookupSparseOptionsT *EmbeddingLookupSparseOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new EmbeddingLookupSparseOptionsT(); +flatbuffers::Offset CreateModel(flatbuffers::FlatBufferBuilder &_fbb, const ModelT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +inline CustomQuantizationT *CustomQuantization::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new CustomQuantizationT(); UnPackTo(_o, _resolver); return _o; } -inline void EmbeddingLookupSparseOptions::UnPackTo(EmbeddingLookupSparseOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void CustomQuantization::UnPackTo(CustomQuantizationT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = combiner(); _o->combiner = _e; }; + { auto _e = custom(); if (_e) { _o->custom.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->custom[_i] = _e->Get(_i); } } }; } -inline flatbuffers::Offset EmbeddingLookupSparseOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateEmbeddingLookupSparseOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset CustomQuantization::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateCustomQuantization(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateEmbeddingLookupSparseOptions(flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateCustomQuantization(flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const EmbeddingLookupSparseOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _combiner = _o->combiner; - return tflite::CreateEmbeddingLookupSparseOptions( + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CustomQuantizationT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _custom = _o->custom.size() ? _fbb.CreateVector(_o->custom) : 0; + return tflite::CreateCustomQuantization( _fbb, - _combiner); + _custom); } -inline GatherOptionsT *GatherOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new GatherOptionsT(); +inline QuantizationParametersT *QuantizationParameters::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new QuantizationParametersT(); UnPackTo(_o, _resolver); return _o; } -inline void GatherOptions::UnPackTo(GatherOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void QuantizationParameters::UnPackTo(QuantizationParametersT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = axis(); _o->axis = _e; }; + { auto _e = min(); if (_e) { _o->min.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->min[_i] = _e->Get(_i); } } }; + { auto _e = max(); if (_e) { _o->max.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->max[_i] = _e->Get(_i); } } }; + { auto _e = scale(); if (_e) { _o->scale.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->scale[_i] = _e->Get(_i); } } }; + { auto _e = zero_point(); if (_e) { _o->zero_point.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->zero_point[_i] = _e->Get(_i); } } }; + { auto _e = details_type(); _o->details.type = _e; }; + { auto _e = details(); if (_e) _o->details.value = QuantizationDetailsUnion::UnPack(_e, details_type(), _resolver); }; + { auto _e = quantized_dimension(); _o->quantized_dimension = _e; }; } -inline flatbuffers::Offset GatherOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateGatherOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset QuantizationParameters::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateQuantizationParameters(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateGatherOptions(flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateQuantizationParameters(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GatherOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _axis = _o->axis; - return tflite::CreateGatherOptions( + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizationParametersT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _min = _o->min.size() ? _fbb.CreateVector(_o->min) : 0; + auto _max = _o->max.size() ? _fbb.CreateVector(_o->max) : 0; + auto _scale = _o->scale.size() ? _fbb.CreateVector(_o->scale) : 0; + auto _zero_point = _o->zero_point.size() ? _fbb.CreateVector(_o->zero_point) : 0; + auto _details_type = _o->details.type; + auto _details = _o->details.Pack(_fbb); + auto _quantized_dimension = _o->quantized_dimension; + return tflite::CreateQuantizationParameters( _fbb, - _axis); + _min, + _max, + _scale, + _zero_point, + _details_type, + _details, + _quantized_dimension); } -inline TransposeOptionsT *TransposeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new TransposeOptionsT(); +inline TensorT *Tensor::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new TensorT(); UnPackTo(_o, _resolver); return _o; } -inline void TransposeOptions::UnPackTo(TransposeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void Tensor::UnPackTo(TensorT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; + { auto _e = shape(); if (_e) { _o->shape.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->shape[_i] = _e->Get(_i); } } }; + { auto _e = type(); _o->type = _e; }; + { auto _e = buffer(); _o->buffer = _e; }; + { auto _e = name(); if (_e) _o->name = _e->str(); }; + { auto _e = quantization(); if (_e) _o->quantization = std::unique_ptr(_e->UnPack(_resolver)); }; + { auto _e = is_variable(); _o->is_variable = _e; }; } -inline flatbuffers::Offset TransposeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateTransposeOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset Tensor::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateTensor(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateTransposeOptions(flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateTensor(flatbuffers::FlatBufferBuilder &_fbb, const TensorT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TransposeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateTransposeOptions( - _fbb); -} - -inline ExpOptionsT *ExpOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ExpOptionsT(); - UnPackTo(_o, _resolver); + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TensorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _shape = _o->shape.size() ? _fbb.CreateVector(_o->shape) : 0; + auto _type = _o->type; + auto _buffer = _o->buffer; + auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); + auto _quantization = _o->quantization ? CreateQuantizationParameters(_fbb, _o->quantization.get(), _rehasher) : 0; + auto _is_variable = _o->is_variable; + return tflite::CreateTensor( + _fbb, + _shape, + _type, + _buffer, + _name, + _quantization, + _is_variable); +} + +inline Conv2DOptionsT *Conv2DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new Conv2DOptionsT(); + UnPackTo(_o, _resolver); return _o; } -inline void ExpOptions::UnPackTo(ExpOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void Conv2DOptions::UnPackTo(Conv2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; + { auto _e = padding(); _o->padding = _e; }; + { auto _e = stride_w(); _o->stride_w = _e; }; + { auto _e = stride_h(); _o->stride_h = _e; }; + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }; + { auto _e = dilation_w_factor(); _o->dilation_w_factor = _e; }; + { auto _e = dilation_h_factor(); _o->dilation_h_factor = _e; }; } -inline flatbuffers::Offset ExpOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateExpOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset Conv2DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateConv2DOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateExpOptions(flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ExpOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateExpOptions( - _fbb); + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Conv2DOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _padding = _o->padding; + auto _stride_w = _o->stride_w; + auto _stride_h = _o->stride_h; + auto _fused_activation_function = _o->fused_activation_function; + auto _dilation_w_factor = _o->dilation_w_factor; + auto _dilation_h_factor = _o->dilation_h_factor; + return tflite::CreateConv2DOptions( + _fbb, + _padding, + _stride_w, + _stride_h, + _fused_activation_function, + _dilation_w_factor, + _dilation_h_factor); } -inline ReducerOptionsT *ReducerOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ReducerOptionsT(); +inline Pool2DOptionsT *Pool2DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new Pool2DOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void ReducerOptions::UnPackTo(ReducerOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void Pool2DOptions::UnPackTo(Pool2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = keepDims(); _o->keepDims = _e; }; + { auto _e = padding(); _o->padding = _e; }; + { auto _e = stride_w(); _o->stride_w = _e; }; + { auto _e = stride_h(); _o->stride_h = _e; }; + { auto _e = filter_width(); _o->filter_width = _e; }; + { auto _e = filter_height(); _o->filter_height = _e; }; + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }; } -inline flatbuffers::Offset ReducerOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateReducerOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset Pool2DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreatePool2DOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateReducerOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreatePool2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReducerOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _keepDims = _o->keepDims; - return tflite::CreateReducerOptions( + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Pool2DOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _padding = _o->padding; + auto _stride_w = _o->stride_w; + auto _stride_h = _o->stride_h; + auto _filter_width = _o->filter_width; + auto _filter_height = _o->filter_height; + auto _fused_activation_function = _o->fused_activation_function; + return tflite::CreatePool2DOptions( _fbb, - _keepDims); + _padding, + _stride_w, + _stride_h, + _filter_width, + _filter_height, + _fused_activation_function); } -inline SqueezeOptionsT *SqueezeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SqueezeOptionsT(); +inline DepthwiseConv2DOptionsT *DepthwiseConv2DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new DepthwiseConv2DOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void SqueezeOptions::UnPackTo(SqueezeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void DepthwiseConv2DOptions::UnPackTo(DepthwiseConv2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = squeezeDims(); if (_e) { _o->squeezeDims.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->squeezeDims[_i] = _e->Get(_i); } } }; + { auto _e = padding(); _o->padding = _e; }; + { auto _e = stride_w(); _o->stride_w = _e; }; + { auto _e = stride_h(); _o->stride_h = _e; }; + { auto _e = depth_multiplier(); _o->depth_multiplier = _e; }; + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }; + { auto _e = dilation_w_factor(); _o->dilation_w_factor = _e; }; + { auto _e = dilation_h_factor(); _o->dilation_h_factor = _e; }; } -inline flatbuffers::Offset SqueezeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSqueezeOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset DepthwiseConv2DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateDepthwiseConv2DOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateSqueezeOptions(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateDepthwiseConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SqueezeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _squeezeDims = _o->squeezeDims.size() ? _fbb.CreateVector(_o->squeezeDims) : 0; - return tflite::CreateSqueezeOptions( + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DepthwiseConv2DOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _padding = _o->padding; + auto _stride_w = _o->stride_w; + auto _stride_h = _o->stride_h; + auto _depth_multiplier = _o->depth_multiplier; + auto _fused_activation_function = _o->fused_activation_function; + auto _dilation_w_factor = _o->dilation_w_factor; + auto _dilation_h_factor = _o->dilation_h_factor; + return tflite::CreateDepthwiseConv2DOptions( _fbb, - _squeezeDims); + _padding, + _stride_w, + _stride_h, + _depth_multiplier, + _fused_activation_function, + _dilation_w_factor, + _dilation_h_factor); } -inline SplitOptionsT *SplitOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SplitOptionsT(); +inline ConcatEmbeddingsOptionsT *ConcatEmbeddingsOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new ConcatEmbeddingsOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void SplitOptions::UnPackTo(SplitOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void ConcatEmbeddingsOptions::UnPackTo(ConcatEmbeddingsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = num_splits(); _o->num_splits = _e; }; + { auto _e = num_channels(); _o->num_channels = _e; }; + { auto _e = num_columns_per_channel(); if (_e) { _o->num_columns_per_channel.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->num_columns_per_channel[_i] = _e->Get(_i); } } }; + { auto _e = embedding_dim_per_channel(); if (_e) { _o->embedding_dim_per_channel.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->embedding_dim_per_channel[_i] = _e->Get(_i); } } }; } -inline flatbuffers::Offset SplitOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSplitOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset ConcatEmbeddingsOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateConcatEmbeddingsOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateSplitOptions(flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateConcatEmbeddingsOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SplitOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _num_splits = _o->num_splits; - return tflite::CreateSplitOptions( + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ConcatEmbeddingsOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _num_channels = _o->num_channels; + auto _num_columns_per_channel = _o->num_columns_per_channel.size() ? _fbb.CreateVector(_o->num_columns_per_channel) : 0; + auto _embedding_dim_per_channel = _o->embedding_dim_per_channel.size() ? _fbb.CreateVector(_o->embedding_dim_per_channel) : 0; + return tflite::CreateConcatEmbeddingsOptions( _fbb, - _num_splits); + _num_channels, + _num_columns_per_channel, + _embedding_dim_per_channel); } -inline StridedSliceOptionsT *StridedSliceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new StridedSliceOptionsT(); +inline LSHProjectionOptionsT *LSHProjectionOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new LSHProjectionOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void StridedSliceOptions::UnPackTo(StridedSliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void LSHProjectionOptions::UnPackTo(LSHProjectionOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = beginMask(); _o->beginMask = _e; }; - { auto _e = endMask(); _o->endMask = _e; }; - { auto _e = ellipsisMask(); _o->ellipsisMask = _e; }; - { auto _e = newAxisMask(); _o->newAxisMask = _e; }; - { auto _e = shrinkAxisMask(); _o->shrinkAxisMask = _e; }; + { auto _e = type(); _o->type = _e; }; } -inline flatbuffers::Offset StridedSliceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateStridedSliceOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset LSHProjectionOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateLSHProjectionOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateStridedSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateLSHProjectionOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const StridedSliceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _beginMask = _o->beginMask; - auto _endMask = _o->endMask; - auto _ellipsisMask = _o->ellipsisMask; - auto _newAxisMask = _o->newAxisMask; - auto _shrinkAxisMask = _o->shrinkAxisMask; - return tflite::CreateStridedSliceOptions( + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LSHProjectionOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _type = _o->type; + return tflite::CreateLSHProjectionOptions( _fbb, - _beginMask, - _endMask, - _ellipsisMask, - _newAxisMask, - _shrinkAxisMask); + _type); } -inline LogSoftmaxOptionsT *LogSoftmaxOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LogSoftmaxOptionsT(); +inline SVDFOptionsT *SVDFOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new SVDFOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void LogSoftmaxOptions::UnPackTo(LogSoftmaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void SVDFOptions::UnPackTo(SVDFOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; + { auto _e = rank(); _o->rank = _e; }; + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }; } -inline flatbuffers::Offset LogSoftmaxOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLogSoftmaxOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset SVDFOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSVDFOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateLogSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateSVDFOptions(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LogSoftmaxOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateLogSoftmaxOptions( - _fbb); + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SVDFOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _rank = _o->rank; + auto _fused_activation_function = _o->fused_activation_function; + return tflite::CreateSVDFOptions( + _fbb, + _rank, + _fused_activation_function); } -inline CastOptionsT *CastOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new CastOptionsT(); +inline RNNOptionsT *RNNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new RNNOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void CastOptions::UnPackTo(CastOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void RNNOptions::UnPackTo(RNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = in_data_type(); _o->in_data_type = _e; }; - { auto _e = out_data_type(); _o->out_data_type = _e; }; + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }; } -inline flatbuffers::Offset CastOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateCastOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset RNNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateRNNOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateCastOptions(flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CastOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _in_data_type = _o->in_data_type; - auto _out_data_type = _o->out_data_type; - return tflite::CreateCastOptions( + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RNNOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _fused_activation_function = _o->fused_activation_function; + return tflite::CreateRNNOptions( _fbb, - _in_data_type, - _out_data_type); + _fused_activation_function); } -inline DequantizeOptionsT *DequantizeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new DequantizeOptionsT(); +inline SequenceRNNOptionsT *SequenceRNNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new SequenceRNNOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void DequantizeOptions::UnPackTo(DequantizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void SequenceRNNOptions::UnPackTo(SequenceRNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; + { auto _e = time_major(); _o->time_major = _e; }; + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }; } -inline flatbuffers::Offset DequantizeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateDequantizeOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset SequenceRNNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSequenceRNNOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateDequantizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateSequenceRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DequantizeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateDequantizeOptions( - _fbb); + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SequenceRNNOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _time_major = _o->time_major; + auto _fused_activation_function = _o->fused_activation_function; + return tflite::CreateSequenceRNNOptions( + _fbb, + _time_major, + _fused_activation_function); } -inline MaximumMinimumOptionsT *MaximumMinimumOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new MaximumMinimumOptionsT(); +inline BidirectionalSequenceRNNOptionsT *BidirectionalSequenceRNNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new BidirectionalSequenceRNNOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void MaximumMinimumOptions::UnPackTo(MaximumMinimumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void BidirectionalSequenceRNNOptions::UnPackTo(BidirectionalSequenceRNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; + { auto _e = time_major(); _o->time_major = _e; }; + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }; + { auto _e = merge_outputs(); _o->merge_outputs = _e; }; } -inline flatbuffers::Offset MaximumMinimumOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateMaximumMinimumOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset BidirectionalSequenceRNNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateBidirectionalSequenceRNNOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateMaximumMinimumOptions(flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateBidirectionalSequenceRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MaximumMinimumOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateMaximumMinimumOptions( - _fbb); + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BidirectionalSequenceRNNOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _time_major = _o->time_major; + auto _fused_activation_function = _o->fused_activation_function; + auto _merge_outputs = _o->merge_outputs; + return tflite::CreateBidirectionalSequenceRNNOptions( + _fbb, + _time_major, + _fused_activation_function, + _merge_outputs); } -inline TileOptionsT *TileOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new TileOptionsT(); +inline FullyConnectedOptionsT *FullyConnectedOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new FullyConnectedOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void TileOptions::UnPackTo(TileOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void FullyConnectedOptions::UnPackTo(FullyConnectedOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }; + { auto _e = weights_format(); _o->weights_format = _e; }; + { auto _e = keep_num_dims(); _o->keep_num_dims = _e; }; } -inline flatbuffers::Offset TileOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateTileOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset FullyConnectedOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateFullyConnectedOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateTileOptions(flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateFullyConnectedOptions(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TileOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateTileOptions( - _fbb); + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FullyConnectedOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _fused_activation_function = _o->fused_activation_function; + auto _weights_format = _o->weights_format; + auto _keep_num_dims = _o->keep_num_dims; + return tflite::CreateFullyConnectedOptions( + _fbb, + _fused_activation_function, + _weights_format, + _keep_num_dims); } -inline ArgMaxOptionsT *ArgMaxOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ArgMaxOptionsT(); +inline SoftmaxOptionsT *SoftmaxOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new SoftmaxOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void ArgMaxOptions::UnPackTo(ArgMaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void SoftmaxOptions::UnPackTo(SoftmaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = output_type(); _o->output_type = _e; }; + { auto _e = beta(); _o->beta = _e; }; } -inline flatbuffers::Offset ArgMaxOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateArgMaxOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset SoftmaxOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSoftmaxOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateArgMaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ArgMaxOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _output_type = _o->output_type; - return tflite::CreateArgMaxOptions( + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SoftmaxOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _beta = _o->beta; + return tflite::CreateSoftmaxOptions( _fbb, - _output_type); + _beta); } -inline ArgMinOptionsT *ArgMinOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ArgMinOptionsT(); +inline ConcatenationOptionsT *ConcatenationOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new ConcatenationOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void ArgMinOptions::UnPackTo(ArgMinOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void ConcatenationOptions::UnPackTo(ConcatenationOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = output_type(); _o->output_type = _e; }; + { auto _e = axis(); _o->axis = _e; }; + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }; } -inline flatbuffers::Offset ArgMinOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateArgMinOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset ConcatenationOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateConcatenationOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateArgMinOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateConcatenationOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ArgMinOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _output_type = _o->output_type; - return tflite::CreateArgMinOptions( + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ConcatenationOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _axis = _o->axis; + auto _fused_activation_function = _o->fused_activation_function; + return tflite::CreateConcatenationOptions( _fbb, - _output_type); + _axis, + _fused_activation_function); } -inline GreaterOptionsT *GreaterOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new GreaterOptionsT(); +inline AddOptionsT *AddOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new AddOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void GreaterOptions::UnPackTo(GreaterOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void AddOptions::UnPackTo(AddOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }; } -inline flatbuffers::Offset GreaterOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateGreaterOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset AddOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateAddOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateGreaterOptions(flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateAddOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GreaterOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateGreaterOptions( - _fbb); + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const AddOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _fused_activation_function = _o->fused_activation_function; + return tflite::CreateAddOptions( + _fbb, + _fused_activation_function); } -inline GreaterEqualOptionsT *GreaterEqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new GreaterEqualOptionsT(); +inline MulOptionsT *MulOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new MulOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void GreaterEqualOptions::UnPackTo(GreaterEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void MulOptions::UnPackTo(MulOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }; } -inline flatbuffers::Offset GreaterEqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateGreaterEqualOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset MulOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateMulOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateGreaterEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GreaterEqualOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateGreaterEqualOptions( - _fbb); + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MulOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _fused_activation_function = _o->fused_activation_function; + return tflite::CreateMulOptions( + _fbb, + _fused_activation_function); } -inline LessOptionsT *LessOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LessOptionsT(); +inline L2NormOptionsT *L2NormOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new L2NormOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void LessOptions::UnPackTo(LessOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void L2NormOptions::UnPackTo(L2NormOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }; } -inline flatbuffers::Offset LessOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLessOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset L2NormOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateL2NormOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateLessOptions(flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateL2NormOptions(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LessOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateLessOptions( - _fbb); + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const L2NormOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _fused_activation_function = _o->fused_activation_function; + return tflite::CreateL2NormOptions( + _fbb, + _fused_activation_function); } -inline LessEqualOptionsT *LessEqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LessEqualOptionsT(); +inline LocalResponseNormalizationOptionsT *LocalResponseNormalizationOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new LocalResponseNormalizationOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void LessEqualOptions::UnPackTo(LessEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void LocalResponseNormalizationOptions::UnPackTo(LocalResponseNormalizationOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; + { auto _e = radius(); _o->radius = _e; }; + { auto _e = bias(); _o->bias = _e; }; + { auto _e = alpha(); _o->alpha = _e; }; + { auto _e = beta(); _o->beta = _e; }; } -inline flatbuffers::Offset LessEqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLessEqualOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset LocalResponseNormalizationOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateLocalResponseNormalizationOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateLessEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateLocalResponseNormalizationOptions(flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LessEqualOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateLessEqualOptions( - _fbb); + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LocalResponseNormalizationOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _radius = _o->radius; + auto _bias = _o->bias; + auto _alpha = _o->alpha; + auto _beta = _o->beta; + return tflite::CreateLocalResponseNormalizationOptions( + _fbb, + _radius, + _bias, + _alpha, + _beta); } -inline NegOptionsT *NegOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new NegOptionsT(); +inline LSTMOptionsT *LSTMOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new LSTMOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void NegOptions::UnPackTo(NegOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void LSTMOptions::UnPackTo(LSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }; + { auto _e = cell_clip(); _o->cell_clip = _e; }; + { auto _e = proj_clip(); _o->proj_clip = _e; }; + { auto _e = kernel_type(); _o->kernel_type = _e; }; } -inline flatbuffers::Offset NegOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateNegOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset LSTMOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateLSTMOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateNegOptions(flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NegOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateNegOptions( - _fbb); + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LSTMOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _fused_activation_function = _o->fused_activation_function; + auto _cell_clip = _o->cell_clip; + auto _proj_clip = _o->proj_clip; + auto _kernel_type = _o->kernel_type; + return tflite::CreateLSTMOptions( + _fbb, + _fused_activation_function, + _cell_clip, + _proj_clip, + _kernel_type); } -inline SelectOptionsT *SelectOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SelectOptionsT(); +inline UnidirectionalSequenceLSTMOptionsT *UnidirectionalSequenceLSTMOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new UnidirectionalSequenceLSTMOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void SelectOptions::UnPackTo(SelectOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void UnidirectionalSequenceLSTMOptions::UnPackTo(UnidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }; + { auto _e = cell_clip(); _o->cell_clip = _e; }; + { auto _e = proj_clip(); _o->proj_clip = _e; }; + { auto _e = time_major(); _o->time_major = _e; }; } -inline flatbuffers::Offset SelectOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSelectOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset UnidirectionalSequenceLSTMOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateUnidirectionalSequenceLSTMOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateSelectOptions(flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateUnidirectionalSequenceLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SelectOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateSelectOptions( - _fbb); + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UnidirectionalSequenceLSTMOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _fused_activation_function = _o->fused_activation_function; + auto _cell_clip = _o->cell_clip; + auto _proj_clip = _o->proj_clip; + auto _time_major = _o->time_major; + return tflite::CreateUnidirectionalSequenceLSTMOptions( + _fbb, + _fused_activation_function, + _cell_clip, + _proj_clip, + _time_major); } -inline SliceOptionsT *SliceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SliceOptionsT(); +inline BidirectionalSequenceLSTMOptionsT *BidirectionalSequenceLSTMOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new BidirectionalSequenceLSTMOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void SliceOptions::UnPackTo(SliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void BidirectionalSequenceLSTMOptions::UnPackTo(BidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }; + { auto _e = cell_clip(); _o->cell_clip = _e; }; + { auto _e = proj_clip(); _o->proj_clip = _e; }; + { auto _e = merge_outputs(); _o->merge_outputs = _e; }; + { auto _e = time_major(); _o->time_major = _e; }; } -inline flatbuffers::Offset SliceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSliceOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset BidirectionalSequenceLSTMOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateBidirectionalSequenceLSTMOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateBidirectionalSequenceLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SliceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateSliceOptions( - _fbb); + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BidirectionalSequenceLSTMOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _fused_activation_function = _o->fused_activation_function; + auto _cell_clip = _o->cell_clip; + auto _proj_clip = _o->proj_clip; + auto _merge_outputs = _o->merge_outputs; + auto _time_major = _o->time_major; + return tflite::CreateBidirectionalSequenceLSTMOptions( + _fbb, + _fused_activation_function, + _cell_clip, + _proj_clip, + _merge_outputs, + _time_major); } -inline TransposeConvOptionsT *TransposeConvOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new TransposeConvOptionsT(); +inline ResizeBilinearOptionsT *ResizeBilinearOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new ResizeBilinearOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void TransposeConvOptions::UnPackTo(TransposeConvOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void ResizeBilinearOptions::UnPackTo(ResizeBilinearOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = padding(); _o->padding = _e; }; - { auto _e = stride_w(); _o->stride_w = _e; }; - { auto _e = stride_h(); _o->stride_h = _e; }; + { auto _e = align_corners(); _o->align_corners = _e; }; } -inline flatbuffers::Offset TransposeConvOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateTransposeConvOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset ResizeBilinearOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateResizeBilinearOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateTransposeConvOptions(flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateResizeBilinearOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TransposeConvOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _padding = _o->padding; - auto _stride_w = _o->stride_w; - auto _stride_h = _o->stride_h; - return tflite::CreateTransposeConvOptions( + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ResizeBilinearOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _align_corners = _o->align_corners; + return tflite::CreateResizeBilinearOptions( _fbb, - _padding, - _stride_w, - _stride_h); + _align_corners); } -inline ExpandDimsOptionsT *ExpandDimsOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ExpandDimsOptionsT(); +inline ResizeNearestNeighborOptionsT *ResizeNearestNeighborOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new ResizeNearestNeighborOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void ExpandDimsOptions::UnPackTo(ExpandDimsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void ResizeNearestNeighborOptions::UnPackTo(ResizeNearestNeighborOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; + { auto _e = align_corners(); _o->align_corners = _e; }; } -inline flatbuffers::Offset ExpandDimsOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateExpandDimsOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset ResizeNearestNeighborOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateResizeNearestNeighborOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateExpandDimsOptions(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateResizeNearestNeighborOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ExpandDimsOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateExpandDimsOptions( - _fbb); + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ResizeNearestNeighborOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _align_corners = _o->align_corners; + return tflite::CreateResizeNearestNeighborOptions( + _fbb, + _align_corners); } -inline SparseToDenseOptionsT *SparseToDenseOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SparseToDenseOptionsT(); +inline CallOptionsT *CallOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new CallOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void SparseToDenseOptions::UnPackTo(SparseToDenseOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void CallOptions::UnPackTo(CallOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = validateIndices(); _o->validateIndices = _e; }; + { auto _e = subgraph(); _o->subgraph = _e; }; } -inline flatbuffers::Offset SparseToDenseOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSparseToDenseOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset CallOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateCallOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateSparseToDenseOptions(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateCallOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SparseToDenseOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _validateIndices = _o->validateIndices; - return tflite::CreateSparseToDenseOptions( + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CallOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _subgraph = _o->subgraph; + return tflite::CreateCallOptions( _fbb, - _validateIndices); + _subgraph); } -inline EqualOptionsT *EqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new EqualOptionsT(); +inline PadOptionsT *PadOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new PadOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void EqualOptions::UnPackTo(EqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void PadOptions::UnPackTo(PadOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset EqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateEqualOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset PadOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreatePadOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreatePadOptions(flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const EqualOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateEqualOptions( + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PadOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreatePadOptions( _fbb); } -inline NotEqualOptionsT *NotEqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new NotEqualOptionsT(); +inline PadV2OptionsT *PadV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new PadV2OptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void NotEqualOptions::UnPackTo(NotEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void PadV2Options::UnPackTo(PadV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset NotEqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateNotEqualOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset PadV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreatePadV2Options(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateNotEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreatePadV2Options(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NotEqualOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateNotEqualOptions( + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PadV2OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreatePadV2Options( _fbb); } -inline ShapeOptionsT *ShapeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ShapeOptionsT(); +inline ReshapeOptionsT *ReshapeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new ReshapeOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void ShapeOptions::UnPackTo(ShapeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void ReshapeOptions::UnPackTo(ReshapeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = out_type(); _o->out_type = _e; }; + { auto _e = new_shape(); if (_e) { _o->new_shape.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->new_shape[_i] = _e->Get(_i); } } }; } -inline flatbuffers::Offset ShapeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateShapeOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset ReshapeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateReshapeOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateShapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateReshapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ShapeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _out_type = _o->out_type; - return tflite::CreateShapeOptions( + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReshapeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _new_shape = _o->new_shape.size() ? _fbb.CreateVector(_o->new_shape) : 0; + return tflite::CreateReshapeOptions( _fbb, - _out_type); + _new_shape); } -inline PowOptionsT *PowOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new PowOptionsT(); +inline SpaceToBatchNDOptionsT *SpaceToBatchNDOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new SpaceToBatchNDOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void PowOptions::UnPackTo(PowOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void SpaceToBatchNDOptions::UnPackTo(SpaceToBatchNDOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset PowOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreatePowOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset SpaceToBatchNDOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSpaceToBatchNDOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreatePowOptions(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateSpaceToBatchNDOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PowOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreatePowOptions( + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SpaceToBatchNDOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateSpaceToBatchNDOptions( _fbb); } -inline FakeQuantOptionsT *FakeQuantOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new FakeQuantOptionsT(); +inline BatchToSpaceNDOptionsT *BatchToSpaceNDOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new BatchToSpaceNDOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void FakeQuantOptions::UnPackTo(FakeQuantOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void BatchToSpaceNDOptions::UnPackTo(BatchToSpaceNDOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = min(); _o->min = _e; }; - { auto _e = max(); _o->max = _e; }; - { auto _e = num_bits(); _o->num_bits = _e; }; - { auto _e = narrow_range(); _o->narrow_range = _e; }; } -inline flatbuffers::Offset FakeQuantOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateFakeQuantOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset BatchToSpaceNDOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateBatchToSpaceNDOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateFakeQuantOptions(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateBatchToSpaceNDOptions(flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FakeQuantOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _min = _o->min; - auto _max = _o->max; - auto _num_bits = _o->num_bits; - auto _narrow_range = _o->narrow_range; - return tflite::CreateFakeQuantOptions( - _fbb, - _min, - _max, - _num_bits, - _narrow_range); + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BatchToSpaceNDOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateBatchToSpaceNDOptions( + _fbb); } -inline PackOptionsT *PackOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new PackOptionsT(); +inline SkipGramOptionsT *SkipGramOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new SkipGramOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void PackOptions::UnPackTo(PackOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void SkipGramOptions::UnPackTo(SkipGramOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = values_count(); _o->values_count = _e; }; - { auto _e = axis(); _o->axis = _e; }; + { auto _e = ngram_size(); _o->ngram_size = _e; }; + { auto _e = max_skip_size(); _o->max_skip_size = _e; }; + { auto _e = include_all_ngrams(); _o->include_all_ngrams = _e; }; } -inline flatbuffers::Offset PackOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreatePackOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset SkipGramOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSkipGramOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreatePackOptions(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateSkipGramOptions(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PackOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _values_count = _o->values_count; - auto _axis = _o->axis; - return tflite::CreatePackOptions( + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SkipGramOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _ngram_size = _o->ngram_size; + auto _max_skip_size = _o->max_skip_size; + auto _include_all_ngrams = _o->include_all_ngrams; + return tflite::CreateSkipGramOptions( _fbb, - _values_count, - _axis); + _ngram_size, + _max_skip_size, + _include_all_ngrams); } -inline LogicalOrOptionsT *LogicalOrOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LogicalOrOptionsT(); +inline SpaceToDepthOptionsT *SpaceToDepthOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new SpaceToDepthOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void LogicalOrOptions::UnPackTo(LogicalOrOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void SpaceToDepthOptions::UnPackTo(SpaceToDepthOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; + { auto _e = block_size(); _o->block_size = _e; }; } -inline flatbuffers::Offset LogicalOrOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLogicalOrOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset SpaceToDepthOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSpaceToDepthOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateLogicalOrOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateSpaceToDepthOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LogicalOrOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateLogicalOrOptions( - _fbb); + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SpaceToDepthOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _block_size = _o->block_size; + return tflite::CreateSpaceToDepthOptions( + _fbb, + _block_size); } -inline OneHotOptionsT *OneHotOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new OneHotOptionsT(); +inline DepthToSpaceOptionsT *DepthToSpaceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new DepthToSpaceOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void OneHotOptions::UnPackTo(OneHotOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void DepthToSpaceOptions::UnPackTo(DepthToSpaceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = axis(); _o->axis = _e; }; + { auto _e = block_size(); _o->block_size = _e; }; } -inline flatbuffers::Offset OneHotOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateOneHotOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset DepthToSpaceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateDepthToSpaceOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateOneHotOptions(flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateDepthToSpaceOptions(flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const OneHotOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _axis = _o->axis; - return tflite::CreateOneHotOptions( + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DepthToSpaceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _block_size = _o->block_size; + return tflite::CreateDepthToSpaceOptions( _fbb, - _axis); + _block_size); } -inline LogicalAndOptionsT *LogicalAndOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LogicalAndOptionsT(); +inline SubOptionsT *SubOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new SubOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void LogicalAndOptions::UnPackTo(LogicalAndOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void SubOptions::UnPackTo(SubOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }; } -inline flatbuffers::Offset LogicalAndOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLogicalAndOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset SubOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSubOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateLogicalAndOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateSubOptions(flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LogicalAndOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateLogicalAndOptions( - _fbb); + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SubOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _fused_activation_function = _o->fused_activation_function; + return tflite::CreateSubOptions( + _fbb, + _fused_activation_function); } -inline LogicalNotOptionsT *LogicalNotOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LogicalNotOptionsT(); +inline DivOptionsT *DivOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new DivOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void LogicalNotOptions::UnPackTo(LogicalNotOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void DivOptions::UnPackTo(DivOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; }; } -inline flatbuffers::Offset LogicalNotOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLogicalNotOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset DivOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateDivOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateLogicalNotOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateDivOptions(flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LogicalNotOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateLogicalNotOptions( - _fbb); + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DivOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _fused_activation_function = _o->fused_activation_function; + return tflite::CreateDivOptions( + _fbb, + _fused_activation_function); } -inline UnpackOptionsT *UnpackOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new UnpackOptionsT(); +inline TopKV2OptionsT *TopKV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new TopKV2OptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void UnpackOptions::UnPackTo(UnpackOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void TopKV2Options::UnPackTo(TopKV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = num(); _o->num = _e; }; - { auto _e = axis(); _o->axis = _e; }; } -inline flatbuffers::Offset UnpackOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateUnpackOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset TopKV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateTopKV2Options(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateUnpackOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateTopKV2Options(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UnpackOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _num = _o->num; - auto _axis = _o->axis; - return tflite::CreateUnpackOptions( + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TopKV2OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateTopKV2Options( + _fbb); +} + +inline EmbeddingLookupSparseOptionsT *EmbeddingLookupSparseOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new EmbeddingLookupSparseOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void EmbeddingLookupSparseOptions::UnPackTo(EmbeddingLookupSparseOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = combiner(); _o->combiner = _e; }; +} + +inline flatbuffers::Offset EmbeddingLookupSparseOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateEmbeddingLookupSparseOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateEmbeddingLookupSparseOptions(flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const EmbeddingLookupSparseOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _combiner = _o->combiner; + return tflite::CreateEmbeddingLookupSparseOptions( _fbb, - _num, - _axis); + _combiner); } -inline FloorDivOptionsT *FloorDivOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new FloorDivOptionsT(); +inline GatherOptionsT *GatherOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new GatherOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void FloorDivOptions::UnPackTo(FloorDivOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void GatherOptions::UnPackTo(GatherOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; + { auto _e = axis(); _o->axis = _e; }; } -inline flatbuffers::Offset FloorDivOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateFloorDivOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset GatherOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateGatherOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateFloorDivOptions(flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateGatherOptions(flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FloorDivOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateFloorDivOptions( - _fbb); + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GatherOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _axis = _o->axis; + return tflite::CreateGatherOptions( + _fbb, + _axis); } -inline SquareOptionsT *SquareOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SquareOptionsT(); +inline TransposeOptionsT *TransposeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new TransposeOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void SquareOptions::UnPackTo(SquareOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void TransposeOptions::UnPackTo(TransposeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset SquareOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSquareOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset TransposeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateTransposeOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateSquareOptions(flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateTransposeOptions(flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SquareOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateSquareOptions( + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TransposeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateTransposeOptions( _fbb); } -inline ZerosLikeOptionsT *ZerosLikeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ZerosLikeOptionsT(); +inline ExpOptionsT *ExpOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new ExpOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void ZerosLikeOptions::UnPackTo(ZerosLikeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void ExpOptions::UnPackTo(ExpOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset ZerosLikeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateZerosLikeOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset ExpOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateExpOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateZerosLikeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateExpOptions(flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ZerosLikeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateZerosLikeOptions( + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ExpOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateExpOptions( _fbb); } -inline FillOptionsT *FillOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new FillOptionsT(); +inline CosOptionsT *CosOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new CosOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void FillOptions::UnPackTo(FillOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void CosOptions::UnPackTo(CosOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; } -inline flatbuffers::Offset FillOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateFillOptions(_fbb, _o, _rehasher); +inline flatbuffers::Offset CosOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateCosOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateFillOptions(flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateCosOptions(flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FillOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateFillOptions( + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CosOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateCosOptions( _fbb); } -inline OperatorCodeT *OperatorCode::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new OperatorCodeT(); +inline ReducerOptionsT *ReducerOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new ReducerOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void OperatorCode::UnPackTo(OperatorCodeT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void ReducerOptions::UnPackTo(ReducerOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = builtin_code(); _o->builtin_code = _e; }; - { auto _e = custom_code(); if (_e) _o->custom_code = _e->str(); }; - { auto _e = version(); _o->version = _e; }; + { auto _e = keep_dims(); _o->keep_dims = _e; }; } -inline flatbuffers::Offset OperatorCode::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateOperatorCode(_fbb, _o, _rehasher); +inline flatbuffers::Offset ReducerOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateReducerOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateOperatorCode(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateReducerOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const OperatorCodeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _builtin_code = _o->builtin_code; - auto _custom_code = _o->custom_code.empty() ? 0 : _fbb.CreateString(_o->custom_code); - auto _version = _o->version; - return tflite::CreateOperatorCode( + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReducerOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _keep_dims = _o->keep_dims; + return tflite::CreateReducerOptions( _fbb, - _builtin_code, - _custom_code, - _version); + _keep_dims); } -inline OperatorT *Operator::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new OperatorT(); +inline SqueezeOptionsT *SqueezeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new SqueezeOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void Operator::UnPackTo(OperatorT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void SqueezeOptions::UnPackTo(SqueezeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = opcode_index(); _o->opcode_index = _e; }; - { auto _e = inputs(); if (_e) { _o->inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputs[_i] = _e->Get(_i); } } }; - { auto _e = outputs(); if (_e) { _o->outputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputs[_i] = _e->Get(_i); } } }; - { auto _e = builtin_options_type(); _o->builtin_options.type = _e; }; - { auto _e = builtin_options(); if (_e) _o->builtin_options.value = BuiltinOptionsUnion::UnPack(_e, builtin_options_type(), _resolver); }; - { auto _e = custom_options(); if (_e) { _o->custom_options.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->custom_options[_i] = _e->Get(_i); } } }; - { auto _e = custom_options_format(); _o->custom_options_format = _e; }; - { auto _e = mutating_variable_inputs(); if (_e) { _o->mutating_variable_inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->mutating_variable_inputs[_i] = _e->Get(_i) != 0; } } }; + { auto _e = squeeze_dims(); if (_e) { _o->squeeze_dims.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->squeeze_dims[_i] = _e->Get(_i); } } }; } -inline flatbuffers::Offset Operator::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateOperator(_fbb, _o, _rehasher); +inline flatbuffers::Offset SqueezeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSqueezeOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateOperator(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateSqueezeOptions(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const OperatorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _opcode_index = _o->opcode_index; - auto _inputs = _o->inputs.size() ? _fbb.CreateVector(_o->inputs) : 0; - auto _outputs = _o->outputs.size() ? _fbb.CreateVector(_o->outputs) : 0; - auto _builtin_options_type = _o->builtin_options.type; - auto _builtin_options = _o->builtin_options.Pack(_fbb); - auto _custom_options = _o->custom_options.size() ? _fbb.CreateVector(_o->custom_options) : 0; - auto _custom_options_format = _o->custom_options_format; - auto _mutating_variable_inputs = _o->mutating_variable_inputs.size() ? _fbb.CreateVector(_o->mutating_variable_inputs) : 0; - return tflite::CreateOperator( + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SqueezeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _squeeze_dims = _o->squeeze_dims.size() ? _fbb.CreateVector(_o->squeeze_dims) : 0; + return tflite::CreateSqueezeOptions( _fbb, - _opcode_index, - _inputs, - _outputs, - _builtin_options_type, - _builtin_options, - _custom_options, - _custom_options_format, - _mutating_variable_inputs); + _squeeze_dims); } -inline SubGraphT *SubGraph::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SubGraphT(); +inline SplitOptionsT *SplitOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new SplitOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void SubGraph::UnPackTo(SubGraphT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void SplitOptions::UnPackTo(SplitOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = tensors(); if (_e) { _o->tensors.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->tensors[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } }; - { auto _e = inputs(); if (_e) { _o->inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputs[_i] = _e->Get(_i); } } }; - { auto _e = outputs(); if (_e) { _o->outputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputs[_i] = _e->Get(_i); } } }; - { auto _e = operators(); if (_e) { _o->operators.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->operators[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } }; - { auto _e = name(); if (_e) _o->name = _e->str(); }; + { auto _e = num_splits(); _o->num_splits = _e; }; } -inline flatbuffers::Offset SubGraph::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSubGraph(_fbb, _o, _rehasher); +inline flatbuffers::Offset SplitOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSplitOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateSubGraph(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateSplitOptions(flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SubGraphT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _tensors = _o->tensors.size() ? _fbb.CreateVector> (_o->tensors.size(), [](size_t i, _VectorArgs *__va) { return CreateTensor(*__va->__fbb, __va->__o->tensors[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _inputs = _o->inputs.size() ? _fbb.CreateVector(_o->inputs) : 0; - auto _outputs = _o->outputs.size() ? _fbb.CreateVector(_o->outputs) : 0; - auto _operators = _o->operators.size() ? _fbb.CreateVector> (_o->operators.size(), [](size_t i, _VectorArgs *__va) { return CreateOperator(*__va->__fbb, __va->__o->operators[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); - return tflite::CreateSubGraph( + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SplitOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _num_splits = _o->num_splits; + return tflite::CreateSplitOptions( _fbb, - _tensors, - _inputs, - _outputs, - _operators, - _name); + _num_splits); } -inline BufferT *Buffer::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new BufferT(); +inline SplitVOptionsT *SplitVOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new SplitVOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void Buffer::UnPackTo(BufferT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void SplitVOptions::UnPackTo(SplitVOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = data(); if (_e) { _o->data.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->data[_i] = _e->Get(_i); } } }; + { auto _e = num_splits(); _o->num_splits = _e; }; } -inline flatbuffers::Offset Buffer::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BufferT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateBuffer(_fbb, _o, _rehasher); +inline flatbuffers::Offset SplitVOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSplitVOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateBuffer(flatbuffers::FlatBufferBuilder &_fbb, const BufferT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateSplitVOptions(flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BufferT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _data = _o->data.size() ? _fbb.CreateVector(_o->data) : 0; - return tflite::CreateBuffer( + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SplitVOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _num_splits = _o->num_splits; + return tflite::CreateSplitVOptions( _fbb, - _data); + _num_splits); } -inline ModelT *Model::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ModelT(); +inline StridedSliceOptionsT *StridedSliceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new StridedSliceOptionsT(); UnPackTo(_o, _resolver); return _o; } -inline void Model::UnPackTo(ModelT *_o, const flatbuffers::resolver_function_t *_resolver) const { +inline void StridedSliceOptions::UnPackTo(StridedSliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = version(); _o->version = _e; }; - { auto _e = operator_codes(); if (_e) { _o->operator_codes.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->operator_codes[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } }; - { auto _e = subgraphs(); if (_e) { _o->subgraphs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->subgraphs[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } }; - { auto _e = description(); if (_e) _o->description = _e->str(); }; - { auto _e = buffers(); if (_e) { _o->buffers.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->buffers[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } }; - { auto _e = metadata_buffer(); if (_e) { _o->metadata_buffer.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->metadata_buffer[_i] = _e->Get(_i); } } }; + { auto _e = begin_mask(); _o->begin_mask = _e; }; + { auto _e = end_mask(); _o->end_mask = _e; }; + { auto _e = ellipsis_mask(); _o->ellipsis_mask = _e; }; + { auto _e = new_axis_mask(); _o->new_axis_mask = _e; }; + { auto _e = shrink_axis_mask(); _o->shrink_axis_mask = _e; }; } -inline flatbuffers::Offset Model::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ModelT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateModel(_fbb, _o, _rehasher); +inline flatbuffers::Offset StridedSliceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateStridedSliceOptions(_fbb, _o, _rehasher); } -inline flatbuffers::Offset CreateModel(flatbuffers::FlatBufferBuilder &_fbb, const ModelT *_o, const flatbuffers::rehasher_function_t *_rehasher) { +inline flatbuffers::Offset CreateStridedSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ModelT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _version = _o->version; - auto _operator_codes = _o->operator_codes.size() ? _fbb.CreateVector> (_o->operator_codes.size(), [](size_t i, _VectorArgs *__va) { return CreateOperatorCode(*__va->__fbb, __va->__o->operator_codes[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _subgraphs = _o->subgraphs.size() ? _fbb.CreateVector> (_o->subgraphs.size(), [](size_t i, _VectorArgs *__va) { return CreateSubGraph(*__va->__fbb, __va->__o->subgraphs[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _description = _o->description.empty() ? 0 : _fbb.CreateString(_o->description); - auto _buffers = _o->buffers.size() ? _fbb.CreateVector> (_o->buffers.size(), [](size_t i, _VectorArgs *__va) { return CreateBuffer(*__va->__fbb, __va->__o->buffers[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _metadata_buffer = _o->metadata_buffer.size() ? _fbb.CreateVector(_o->metadata_buffer) : 0; - return tflite::CreateModel( + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const StridedSliceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _begin_mask = _o->begin_mask; + auto _end_mask = _o->end_mask; + auto _ellipsis_mask = _o->ellipsis_mask; + auto _new_axis_mask = _o->new_axis_mask; + auto _shrink_axis_mask = _o->shrink_axis_mask; + return tflite::CreateStridedSliceOptions( _fbb, - _version, - _operator_codes, - _subgraphs, - _description, - _buffers, - _metadata_buffer); + _begin_mask, + _end_mask, + _ellipsis_mask, + _new_axis_mask, + _shrink_axis_mask); } -inline bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type) { - switch (type) { - case BuiltinOptions_NONE: { - return true; - } - case BuiltinOptions_Conv2DOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_DepthwiseConv2DOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ConcatEmbeddingsOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LSHProjectionOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_Pool2DOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SVDFOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_RNNOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_FullyConnectedOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SoftmaxOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ConcatenationOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_AddOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_L2NormOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LocalResponseNormalizationOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LSTMOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ResizeBilinearOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_CallOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ReshapeOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SkipGramOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SpaceToDepthOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_EmbeddingLookupSparseOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_MulOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_PadOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_GatherOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_BatchToSpaceNDOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SpaceToBatchNDOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_TransposeOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ReducerOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SubOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_DivOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SqueezeOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SequenceRNNOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_StridedSliceOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ExpOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_TopKV2Options: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SplitOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LogSoftmaxOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_CastOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_DequantizeOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_MaximumMinimumOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ArgMaxOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LessOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_NegOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_PadV2Options: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_GreaterOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_GreaterEqualOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LessEqualOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SelectOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SliceOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_TransposeConvOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SparseToDenseOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_TileOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ExpandDimsOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_EqualOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_NotEqualOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ShapeOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_PowOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ArgMinOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_FakeQuantOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_PackOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LogicalOrOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_OneHotOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LogicalAndOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LogicalNotOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_UnpackOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_FloorDivOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SquareOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ZerosLikeOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_FillOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - default: return false; - } -} - -inline bool VerifyBuiltinOptionsVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types) { - if (!values || !types) return !values && !types; - if (values->size() != types->size()) return false; - for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) { - if (!VerifyBuiltinOptions( - verifier, values->Get(i), types->GetEnum(i))) { - return false; - } - } - return true; -} - -inline void *BuiltinOptionsUnion::UnPack(const void *obj, BuiltinOptions type, const flatbuffers::resolver_function_t *resolver) { - switch (type) { - case BuiltinOptions_Conv2DOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_DepthwiseConv2DOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ConcatEmbeddingsOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LSHProjectionOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_Pool2DOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SVDFOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_RNNOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_FullyConnectedOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SoftmaxOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ConcatenationOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_AddOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_L2NormOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LocalResponseNormalizationOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LSTMOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ResizeBilinearOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_CallOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ReshapeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SkipGramOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SpaceToDepthOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_EmbeddingLookupSparseOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_MulOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_PadOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_GatherOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_BatchToSpaceNDOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SpaceToBatchNDOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_TransposeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ReducerOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SubOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_DivOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SqueezeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SequenceRNNOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_StridedSliceOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ExpOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_TopKV2Options: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SplitOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LogSoftmaxOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_CastOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_DequantizeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_MaximumMinimumOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ArgMaxOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LessOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_NegOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_PadV2Options: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_GreaterOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_GreaterEqualOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LessEqualOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SelectOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SliceOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_TransposeConvOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SparseToDenseOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_TileOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ExpandDimsOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_EqualOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_NotEqualOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ShapeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_PowOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ArgMinOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_FakeQuantOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_PackOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LogicalOrOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_OneHotOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LogicalAndOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LogicalNotOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_UnpackOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_FloorDivOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SquareOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ZerosLikeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_FillOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - default: return nullptr; - } -} - -inline flatbuffers::Offset BuiltinOptionsUnion::Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher) const { - switch (type) { - case BuiltinOptions_Conv2DOptions: { - auto ptr = reinterpret_cast(value); - return CreateConv2DOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_DepthwiseConv2DOptions: { - auto ptr = reinterpret_cast(value); - return CreateDepthwiseConv2DOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ConcatEmbeddingsOptions: { - auto ptr = reinterpret_cast(value); - return CreateConcatEmbeddingsOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LSHProjectionOptions: { - auto ptr = reinterpret_cast(value); - return CreateLSHProjectionOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_Pool2DOptions: { - auto ptr = reinterpret_cast(value); - return CreatePool2DOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SVDFOptions: { - auto ptr = reinterpret_cast(value); - return CreateSVDFOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_RNNOptions: { - auto ptr = reinterpret_cast(value); - return CreateRNNOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_FullyConnectedOptions: { - auto ptr = reinterpret_cast(value); - return CreateFullyConnectedOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SoftmaxOptions: { - auto ptr = reinterpret_cast(value); - return CreateSoftmaxOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ConcatenationOptions: { - auto ptr = reinterpret_cast(value); - return CreateConcatenationOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_AddOptions: { - auto ptr = reinterpret_cast(value); - return CreateAddOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_L2NormOptions: { - auto ptr = reinterpret_cast(value); - return CreateL2NormOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LocalResponseNormalizationOptions: { - auto ptr = reinterpret_cast(value); - return CreateLocalResponseNormalizationOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LSTMOptions: { - auto ptr = reinterpret_cast(value); - return CreateLSTMOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ResizeBilinearOptions: { - auto ptr = reinterpret_cast(value); - return CreateResizeBilinearOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_CallOptions: { - auto ptr = reinterpret_cast(value); - return CreateCallOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ReshapeOptions: { - auto ptr = reinterpret_cast(value); - return CreateReshapeOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SkipGramOptions: { - auto ptr = reinterpret_cast(value); - return CreateSkipGramOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SpaceToDepthOptions: { - auto ptr = reinterpret_cast(value); - return CreateSpaceToDepthOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_EmbeddingLookupSparseOptions: { - auto ptr = reinterpret_cast(value); - return CreateEmbeddingLookupSparseOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_MulOptions: { - auto ptr = reinterpret_cast(value); - return CreateMulOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_PadOptions: { - auto ptr = reinterpret_cast(value); - return CreatePadOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_GatherOptions: { - auto ptr = reinterpret_cast(value); - return CreateGatherOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_BatchToSpaceNDOptions: { - auto ptr = reinterpret_cast(value); - return CreateBatchToSpaceNDOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SpaceToBatchNDOptions: { - auto ptr = reinterpret_cast(value); - return CreateSpaceToBatchNDOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_TransposeOptions: { - auto ptr = reinterpret_cast(value); - return CreateTransposeOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ReducerOptions: { - auto ptr = reinterpret_cast(value); - return CreateReducerOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SubOptions: { - auto ptr = reinterpret_cast(value); - return CreateSubOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_DivOptions: { - auto ptr = reinterpret_cast(value); - return CreateDivOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SqueezeOptions: { - auto ptr = reinterpret_cast(value); - return CreateSqueezeOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SequenceRNNOptions: { - auto ptr = reinterpret_cast(value); - return CreateSequenceRNNOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_StridedSliceOptions: { - auto ptr = reinterpret_cast(value); - return CreateStridedSliceOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ExpOptions: { - auto ptr = reinterpret_cast(value); - return CreateExpOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_TopKV2Options: { - auto ptr = reinterpret_cast(value); - return CreateTopKV2Options(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SplitOptions: { - auto ptr = reinterpret_cast(value); - return CreateSplitOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LogSoftmaxOptions: { - auto ptr = reinterpret_cast(value); - return CreateLogSoftmaxOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_CastOptions: { - auto ptr = reinterpret_cast(value); - return CreateCastOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_DequantizeOptions: { - auto ptr = reinterpret_cast(value); - return CreateDequantizeOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_MaximumMinimumOptions: { - auto ptr = reinterpret_cast(value); - return CreateMaximumMinimumOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ArgMaxOptions: { - auto ptr = reinterpret_cast(value); - return CreateArgMaxOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LessOptions: { - auto ptr = reinterpret_cast(value); - return CreateLessOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_NegOptions: { - auto ptr = reinterpret_cast(value); - return CreateNegOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_PadV2Options: { - auto ptr = reinterpret_cast(value); - return CreatePadV2Options(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_GreaterOptions: { - auto ptr = reinterpret_cast(value); - return CreateGreaterOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_GreaterEqualOptions: { - auto ptr = reinterpret_cast(value); - return CreateGreaterEqualOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LessEqualOptions: { - auto ptr = reinterpret_cast(value); - return CreateLessEqualOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SelectOptions: { - auto ptr = reinterpret_cast(value); - return CreateSelectOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SliceOptions: { - auto ptr = reinterpret_cast(value); - return CreateSliceOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_TransposeConvOptions: { - auto ptr = reinterpret_cast(value); - return CreateTransposeConvOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SparseToDenseOptions: { - auto ptr = reinterpret_cast(value); - return CreateSparseToDenseOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_TileOptions: { - auto ptr = reinterpret_cast(value); - return CreateTileOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ExpandDimsOptions: { - auto ptr = reinterpret_cast(value); - return CreateExpandDimsOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_EqualOptions: { - auto ptr = reinterpret_cast(value); - return CreateEqualOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_NotEqualOptions: { - auto ptr = reinterpret_cast(value); - return CreateNotEqualOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ShapeOptions: { - auto ptr = reinterpret_cast(value); - return CreateShapeOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_PowOptions: { - auto ptr = reinterpret_cast(value); - return CreatePowOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ArgMinOptions: { - auto ptr = reinterpret_cast(value); - return CreateArgMinOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_FakeQuantOptions: { - auto ptr = reinterpret_cast(value); - return CreateFakeQuantOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_PackOptions: { - auto ptr = reinterpret_cast(value); - return CreatePackOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LogicalOrOptions: { - auto ptr = reinterpret_cast(value); - return CreateLogicalOrOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_OneHotOptions: { - auto ptr = reinterpret_cast(value); - return CreateOneHotOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LogicalAndOptions: { - auto ptr = reinterpret_cast(value); - return CreateLogicalAndOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LogicalNotOptions: { - auto ptr = reinterpret_cast(value); - return CreateLogicalNotOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_UnpackOptions: { - auto ptr = reinterpret_cast(value); - return CreateUnpackOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_FloorDivOptions: { - auto ptr = reinterpret_cast(value); - return CreateFloorDivOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SquareOptions: { - auto ptr = reinterpret_cast(value); - return CreateSquareOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ZerosLikeOptions: { - auto ptr = reinterpret_cast(value); - return CreateZerosLikeOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_FillOptions: { - auto ptr = reinterpret_cast(value); - return CreateFillOptions(_fbb, ptr, _rehasher).Union(); - } - default: return 0; - } -} - -inline BuiltinOptionsUnion::BuiltinOptionsUnion(const BuiltinOptionsUnion &u) FLATBUFFERS_NOEXCEPT : type(u.type), value(nullptr) { - switch (type) { - case BuiltinOptions_Conv2DOptions: { - value = new Conv2DOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_DepthwiseConv2DOptions: { - value = new DepthwiseConv2DOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ConcatEmbeddingsOptions: { - value = new ConcatEmbeddingsOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LSHProjectionOptions: { - value = new LSHProjectionOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_Pool2DOptions: { - value = new Pool2DOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SVDFOptions: { - value = new SVDFOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_RNNOptions: { - value = new RNNOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_FullyConnectedOptions: { - value = new FullyConnectedOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SoftmaxOptions: { - value = new SoftmaxOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ConcatenationOptions: { - value = new ConcatenationOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_AddOptions: { - value = new AddOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_L2NormOptions: { - value = new L2NormOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LocalResponseNormalizationOptions: { - value = new LocalResponseNormalizationOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LSTMOptions: { - value = new LSTMOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ResizeBilinearOptions: { - value = new ResizeBilinearOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_CallOptions: { - value = new CallOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ReshapeOptions: { - value = new ReshapeOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SkipGramOptions: { - value = new SkipGramOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SpaceToDepthOptions: { - value = new SpaceToDepthOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_EmbeddingLookupSparseOptions: { - value = new EmbeddingLookupSparseOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_MulOptions: { - value = new MulOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_PadOptions: { - value = new PadOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_GatherOptions: { - value = new GatherOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_BatchToSpaceNDOptions: { - value = new BatchToSpaceNDOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SpaceToBatchNDOptions: { - value = new SpaceToBatchNDOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_TransposeOptions: { - value = new TransposeOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ReducerOptions: { - value = new ReducerOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SubOptions: { - value = new SubOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_DivOptions: { - value = new DivOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SqueezeOptions: { - value = new SqueezeOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SequenceRNNOptions: { - value = new SequenceRNNOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_StridedSliceOptions: { - value = new StridedSliceOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ExpOptions: { - value = new ExpOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_TopKV2Options: { - value = new TopKV2OptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SplitOptions: { - value = new SplitOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LogSoftmaxOptions: { - value = new LogSoftmaxOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_CastOptions: { - value = new CastOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_DequantizeOptions: { - value = new DequantizeOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_MaximumMinimumOptions: { - value = new MaximumMinimumOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ArgMaxOptions: { - value = new ArgMaxOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LessOptions: { - value = new LessOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_NegOptions: { - value = new NegOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_PadV2Options: { - value = new PadV2OptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_GreaterOptions: { - value = new GreaterOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_GreaterEqualOptions: { - value = new GreaterEqualOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LessEqualOptions: { - value = new LessEqualOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SelectOptions: { - value = new SelectOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SliceOptions: { - value = new SliceOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_TransposeConvOptions: { - value = new TransposeConvOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SparseToDenseOptions: { - value = new SparseToDenseOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_TileOptions: { - value = new TileOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ExpandDimsOptions: { - value = new ExpandDimsOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_EqualOptions: { - value = new EqualOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_NotEqualOptions: { - value = new NotEqualOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ShapeOptions: { - value = new ShapeOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_PowOptions: { - value = new PowOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ArgMinOptions: { - value = new ArgMinOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_FakeQuantOptions: { - value = new FakeQuantOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_PackOptions: { - value = new PackOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LogicalOrOptions: { - value = new LogicalOrOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_OneHotOptions: { - value = new OneHotOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LogicalAndOptions: { - value = new LogicalAndOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LogicalNotOptions: { - value = new LogicalNotOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_UnpackOptions: { - value = new UnpackOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_FloorDivOptions: { - value = new FloorDivOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SquareOptions: { - value = new SquareOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ZerosLikeOptions: { - value = new ZerosLikeOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_FillOptions: { - value = new FillOptionsT(*reinterpret_cast(u.value)); - break; - } - default: - break; - } -} - -inline void BuiltinOptionsUnion::Reset() { - switch (type) { - case BuiltinOptions_Conv2DOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_DepthwiseConv2DOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ConcatEmbeddingsOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LSHProjectionOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_Pool2DOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SVDFOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_RNNOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_FullyConnectedOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SoftmaxOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ConcatenationOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_AddOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_L2NormOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LocalResponseNormalizationOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LSTMOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ResizeBilinearOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_CallOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ReshapeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SkipGramOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SpaceToDepthOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_EmbeddingLookupSparseOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_MulOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_PadOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_GatherOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_BatchToSpaceNDOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SpaceToBatchNDOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_TransposeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ReducerOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SubOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_DivOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SqueezeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SequenceRNNOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_StridedSliceOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ExpOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_TopKV2Options: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SplitOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LogSoftmaxOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_CastOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_DequantizeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_MaximumMinimumOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ArgMaxOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LessOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_NegOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_PadV2Options: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_GreaterOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_GreaterEqualOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LessEqualOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SelectOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SliceOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_TransposeConvOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SparseToDenseOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_TileOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ExpandDimsOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_EqualOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_NotEqualOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ShapeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_PowOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ArgMinOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_FakeQuantOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_PackOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LogicalOrOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_OneHotOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LogicalAndOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LogicalNotOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_UnpackOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_FloorDivOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SquareOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ZerosLikeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_FillOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - default: break; - } - value = nullptr; - type = BuiltinOptions_NONE; +inline LogSoftmaxOptionsT *LogSoftmaxOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new LogSoftmaxOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void LogSoftmaxOptions::UnPackTo(LogSoftmaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset LogSoftmaxOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateLogSoftmaxOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateLogSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LogSoftmaxOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateLogSoftmaxOptions( + _fbb); +} + +inline CastOptionsT *CastOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new CastOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void CastOptions::UnPackTo(CastOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = in_data_type(); _o->in_data_type = _e; }; + { auto _e = out_data_type(); _o->out_data_type = _e; }; +} + +inline flatbuffers::Offset CastOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateCastOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateCastOptions(flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CastOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _in_data_type = _o->in_data_type; + auto _out_data_type = _o->out_data_type; + return tflite::CreateCastOptions( + _fbb, + _in_data_type, + _out_data_type); +} + +inline DequantizeOptionsT *DequantizeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new DequantizeOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void DequantizeOptions::UnPackTo(DequantizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset DequantizeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateDequantizeOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateDequantizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DequantizeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateDequantizeOptions( + _fbb); +} + +inline MaximumMinimumOptionsT *MaximumMinimumOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new MaximumMinimumOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void MaximumMinimumOptions::UnPackTo(MaximumMinimumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset MaximumMinimumOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateMaximumMinimumOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateMaximumMinimumOptions(flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MaximumMinimumOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateMaximumMinimumOptions( + _fbb); +} + +inline TileOptionsT *TileOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new TileOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void TileOptions::UnPackTo(TileOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset TileOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateTileOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateTileOptions(flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TileOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateTileOptions( + _fbb); +} + +inline ArgMaxOptionsT *ArgMaxOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new ArgMaxOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void ArgMaxOptions::UnPackTo(ArgMaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = output_type(); _o->output_type = _e; }; +} + +inline flatbuffers::Offset ArgMaxOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateArgMaxOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateArgMaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ArgMaxOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _output_type = _o->output_type; + return tflite::CreateArgMaxOptions( + _fbb, + _output_type); +} + +inline ArgMinOptionsT *ArgMinOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new ArgMinOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void ArgMinOptions::UnPackTo(ArgMinOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = output_type(); _o->output_type = _e; }; +} + +inline flatbuffers::Offset ArgMinOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateArgMinOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateArgMinOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ArgMinOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _output_type = _o->output_type; + return tflite::CreateArgMinOptions( + _fbb, + _output_type); +} + +inline GreaterOptionsT *GreaterOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new GreaterOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void GreaterOptions::UnPackTo(GreaterOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset GreaterOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateGreaterOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateGreaterOptions(flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GreaterOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateGreaterOptions( + _fbb); +} + +inline GreaterEqualOptionsT *GreaterEqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new GreaterEqualOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void GreaterEqualOptions::UnPackTo(GreaterEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset GreaterEqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateGreaterEqualOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateGreaterEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GreaterEqualOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateGreaterEqualOptions( + _fbb); +} + +inline LessOptionsT *LessOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new LessOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void LessOptions::UnPackTo(LessOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset LessOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateLessOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateLessOptions(flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LessOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateLessOptions( + _fbb); +} + +inline LessEqualOptionsT *LessEqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new LessEqualOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void LessEqualOptions::UnPackTo(LessEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset LessEqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateLessEqualOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateLessEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LessEqualOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateLessEqualOptions( + _fbb); +} + +inline NegOptionsT *NegOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new NegOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void NegOptions::UnPackTo(NegOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset NegOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateNegOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateNegOptions(flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NegOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateNegOptions( + _fbb); +} + +inline SelectOptionsT *SelectOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new SelectOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void SelectOptions::UnPackTo(SelectOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset SelectOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSelectOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateSelectOptions(flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SelectOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateSelectOptions( + _fbb); +} + +inline SliceOptionsT *SliceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new SliceOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void SliceOptions::UnPackTo(SliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset SliceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSliceOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SliceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateSliceOptions( + _fbb); +} + +inline TransposeConvOptionsT *TransposeConvOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new TransposeConvOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void TransposeConvOptions::UnPackTo(TransposeConvOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = padding(); _o->padding = _e; }; + { auto _e = stride_w(); _o->stride_w = _e; }; + { auto _e = stride_h(); _o->stride_h = _e; }; +} + +inline flatbuffers::Offset TransposeConvOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateTransposeConvOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateTransposeConvOptions(flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TransposeConvOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _padding = _o->padding; + auto _stride_w = _o->stride_w; + auto _stride_h = _o->stride_h; + return tflite::CreateTransposeConvOptions( + _fbb, + _padding, + _stride_w, + _stride_h); +} + +inline ExpandDimsOptionsT *ExpandDimsOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new ExpandDimsOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void ExpandDimsOptions::UnPackTo(ExpandDimsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset ExpandDimsOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateExpandDimsOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateExpandDimsOptions(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ExpandDimsOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateExpandDimsOptions( + _fbb); +} + +inline SparseToDenseOptionsT *SparseToDenseOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new SparseToDenseOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void SparseToDenseOptions::UnPackTo(SparseToDenseOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = validate_indices(); _o->validate_indices = _e; }; +} + +inline flatbuffers::Offset SparseToDenseOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSparseToDenseOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateSparseToDenseOptions(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SparseToDenseOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _validate_indices = _o->validate_indices; + return tflite::CreateSparseToDenseOptions( + _fbb, + _validate_indices); +} + +inline EqualOptionsT *EqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new EqualOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void EqualOptions::UnPackTo(EqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset EqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateEqualOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const EqualOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateEqualOptions( + _fbb); +} + +inline NotEqualOptionsT *NotEqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new NotEqualOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void NotEqualOptions::UnPackTo(NotEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset NotEqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateNotEqualOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateNotEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NotEqualOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateNotEqualOptions( + _fbb); +} + +inline ShapeOptionsT *ShapeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new ShapeOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void ShapeOptions::UnPackTo(ShapeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = out_type(); _o->out_type = _e; }; +} + +inline flatbuffers::Offset ShapeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateShapeOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateShapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ShapeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _out_type = _o->out_type; + return tflite::CreateShapeOptions( + _fbb, + _out_type); +} + +inline RankOptionsT *RankOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new RankOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void RankOptions::UnPackTo(RankOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset RankOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateRankOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateRankOptions(flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RankOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateRankOptions( + _fbb); +} + +inline PowOptionsT *PowOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new PowOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void PowOptions::UnPackTo(PowOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset PowOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreatePowOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreatePowOptions(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PowOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreatePowOptions( + _fbb); +} + +inline FakeQuantOptionsT *FakeQuantOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new FakeQuantOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void FakeQuantOptions::UnPackTo(FakeQuantOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = min(); _o->min = _e; }; + { auto _e = max(); _o->max = _e; }; + { auto _e = num_bits(); _o->num_bits = _e; }; + { auto _e = narrow_range(); _o->narrow_range = _e; }; +} + +inline flatbuffers::Offset FakeQuantOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateFakeQuantOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateFakeQuantOptions(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FakeQuantOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _min = _o->min; + auto _max = _o->max; + auto _num_bits = _o->num_bits; + auto _narrow_range = _o->narrow_range; + return tflite::CreateFakeQuantOptions( + _fbb, + _min, + _max, + _num_bits, + _narrow_range); +} + +inline PackOptionsT *PackOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new PackOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void PackOptions::UnPackTo(PackOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = values_count(); _o->values_count = _e; }; + { auto _e = axis(); _o->axis = _e; }; +} + +inline flatbuffers::Offset PackOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreatePackOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreatePackOptions(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PackOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _values_count = _o->values_count; + auto _axis = _o->axis; + return tflite::CreatePackOptions( + _fbb, + _values_count, + _axis); +} + +inline LogicalOrOptionsT *LogicalOrOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new LogicalOrOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void LogicalOrOptions::UnPackTo(LogicalOrOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset LogicalOrOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateLogicalOrOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateLogicalOrOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LogicalOrOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateLogicalOrOptions( + _fbb); +} + +inline OneHotOptionsT *OneHotOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new OneHotOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void OneHotOptions::UnPackTo(OneHotOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = axis(); _o->axis = _e; }; +} + +inline flatbuffers::Offset OneHotOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateOneHotOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateOneHotOptions(flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const OneHotOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _axis = _o->axis; + return tflite::CreateOneHotOptions( + _fbb, + _axis); +} + +inline AbsOptionsT *AbsOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new AbsOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void AbsOptions::UnPackTo(AbsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset AbsOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateAbsOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateAbsOptions(flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const AbsOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateAbsOptions( + _fbb); +} + +inline HardSwishOptionsT *HardSwishOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new HardSwishOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void HardSwishOptions::UnPackTo(HardSwishOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset HardSwishOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateHardSwishOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateHardSwishOptions(flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const HardSwishOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateHardSwishOptions( + _fbb); +} + +inline LogicalAndOptionsT *LogicalAndOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new LogicalAndOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void LogicalAndOptions::UnPackTo(LogicalAndOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset LogicalAndOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateLogicalAndOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateLogicalAndOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LogicalAndOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateLogicalAndOptions( + _fbb); +} + +inline LogicalNotOptionsT *LogicalNotOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new LogicalNotOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void LogicalNotOptions::UnPackTo(LogicalNotOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset LogicalNotOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateLogicalNotOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateLogicalNotOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LogicalNotOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateLogicalNotOptions( + _fbb); +} + +inline UnpackOptionsT *UnpackOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new UnpackOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void UnpackOptions::UnPackTo(UnpackOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = num(); _o->num = _e; }; + { auto _e = axis(); _o->axis = _e; }; +} + +inline flatbuffers::Offset UnpackOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateUnpackOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateUnpackOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UnpackOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _num = _o->num; + auto _axis = _o->axis; + return tflite::CreateUnpackOptions( + _fbb, + _num, + _axis); +} + +inline FloorDivOptionsT *FloorDivOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new FloorDivOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void FloorDivOptions::UnPackTo(FloorDivOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset FloorDivOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateFloorDivOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateFloorDivOptions(flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FloorDivOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateFloorDivOptions( + _fbb); +} + +inline SquareOptionsT *SquareOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new SquareOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void SquareOptions::UnPackTo(SquareOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset SquareOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSquareOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateSquareOptions(flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SquareOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateSquareOptions( + _fbb); +} + +inline ZerosLikeOptionsT *ZerosLikeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new ZerosLikeOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void ZerosLikeOptions::UnPackTo(ZerosLikeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset ZerosLikeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateZerosLikeOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateZerosLikeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ZerosLikeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateZerosLikeOptions( + _fbb); +} + +inline FillOptionsT *FillOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new FillOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void FillOptions::UnPackTo(FillOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset FillOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateFillOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateFillOptions(flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FillOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateFillOptions( + _fbb); +} + +inline FloorModOptionsT *FloorModOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new FloorModOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void FloorModOptions::UnPackTo(FloorModOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset FloorModOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateFloorModOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateFloorModOptions(flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FloorModOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateFloorModOptions( + _fbb); +} + +inline RangeOptionsT *RangeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new RangeOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void RangeOptions::UnPackTo(RangeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset RangeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateRangeOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateRangeOptions(flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RangeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateRangeOptions( + _fbb); +} + +inline LeakyReluOptionsT *LeakyReluOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new LeakyReluOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void LeakyReluOptions::UnPackTo(LeakyReluOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = alpha(); _o->alpha = _e; }; +} + +inline flatbuffers::Offset LeakyReluOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateLeakyReluOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateLeakyReluOptions(flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LeakyReluOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _alpha = _o->alpha; + return tflite::CreateLeakyReluOptions( + _fbb, + _alpha); +} + +inline SquaredDifferenceOptionsT *SquaredDifferenceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new SquaredDifferenceOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void SquaredDifferenceOptions::UnPackTo(SquaredDifferenceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset SquaredDifferenceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSquaredDifferenceOptions(_fbb, _o, _rehasher); } -inline const flatbuffers::TypeTable *TensorTypeTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - TensorTypeTypeTable - }; - static const char * const names[] = { - "FLOAT32", - "FLOAT16", - "INT32", - "UINT8", - "INT64", - "STRING", - "BOOL", - "INT16", - "COMPLEX64" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_ENUM, 9, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *BuiltinOperatorTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - BuiltinOperatorTypeTable - }; - static const int64_t values[] = { 0, 1, 2, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94 }; - static const char * const names[] = { - "ADD", - "AVERAGE_POOL_2D", - "CONCATENATION", - "CONV_2D", - "DEPTHWISE_CONV_2D", - "DEQUANTIZE", - "EMBEDDING_LOOKUP", - "FLOOR", - "FULLY_CONNECTED", - "HASHTABLE_LOOKUP", - "L2_NORMALIZATION", - "L2_POOL_2D", - "LOCAL_RESPONSE_NORMALIZATION", - "LOGISTIC", - "LSH_PROJECTION", - "LSTM", - "MAX_POOL_2D", - "MUL", - "RELU", - "RELU_N1_TO_1", - "RELU6", - "RESHAPE", - "RESIZE_BILINEAR", - "RNN", - "SOFTMAX", - "SPACE_TO_DEPTH", - "SVDF", - "TANH", - "CONCAT_EMBEDDINGS", - "SKIP_GRAM", - "CALL", - "CUSTOM", - "EMBEDDING_LOOKUP_SPARSE", - "PAD", - "UNIDIRECTIONAL_SEQUENCE_RNN", - "GATHER", - "BATCH_TO_SPACE_ND", - "SPACE_TO_BATCH_ND", - "TRANSPOSE", - "MEAN", - "SUB", - "DIV", - "SQUEEZE", - "UNIDIRECTIONAL_SEQUENCE_LSTM", - "STRIDED_SLICE", - "BIDIRECTIONAL_SEQUENCE_RNN", - "EXP", - "TOPK_V2", - "SPLIT", - "LOG_SOFTMAX", - "DELEGATE", - "BIDIRECTIONAL_SEQUENCE_LSTM", - "CAST", - "PRELU", - "MAXIMUM", - "ARG_MAX", - "MINIMUM", - "LESS", - "NEG", - "PADV2", - "GREATER", - "GREATER_EQUAL", - "LESS_EQUAL", - "SELECT", - "SLICE", - "SIN", - "TRANSPOSE_CONV", - "SPARSE_TO_DENSE", - "TILE", - "EXPAND_DIMS", - "EQUAL", - "NOT_EQUAL", - "LOG", - "SUM", - "SQRT", - "RSQRT", - "SHAPE", - "POW", - "ARG_MIN", - "FAKE_QUANT", - "REDUCE_PROD", - "REDUCE_MAX", - "PACK", - "LOGICAL_OR", - "ONE_HOT", - "LOGICAL_AND", - "LOGICAL_NOT", - "UNPACK", - "REDUCE_MIN", - "FLOOR_DIV", - "REDUCE_ANY", - "SQUARE", - "ZEROS_LIKE", - "FILL" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_ENUM, 94, type_codes, type_refs, values, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *BuiltinOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_SEQUENCE, 0, -1 }, - { flatbuffers::ET_SEQUENCE, 0, 0 }, - { flatbuffers::ET_SEQUENCE, 0, 1 }, - { flatbuffers::ET_SEQUENCE, 0, 2 }, - { flatbuffers::ET_SEQUENCE, 0, 3 }, - { flatbuffers::ET_SEQUENCE, 0, 4 }, - { flatbuffers::ET_SEQUENCE, 0, 5 }, - { flatbuffers::ET_SEQUENCE, 0, 6 }, - { flatbuffers::ET_SEQUENCE, 0, 7 }, - { flatbuffers::ET_SEQUENCE, 0, 8 }, - { flatbuffers::ET_SEQUENCE, 0, 9 }, - { flatbuffers::ET_SEQUENCE, 0, 10 }, - { flatbuffers::ET_SEQUENCE, 0, 11 }, - { flatbuffers::ET_SEQUENCE, 0, 12 }, - { flatbuffers::ET_SEQUENCE, 0, 13 }, - { flatbuffers::ET_SEQUENCE, 0, 14 }, - { flatbuffers::ET_SEQUENCE, 0, 15 }, - { flatbuffers::ET_SEQUENCE, 0, 16 }, - { flatbuffers::ET_SEQUENCE, 0, 17 }, - { flatbuffers::ET_SEQUENCE, 0, 18 }, - { flatbuffers::ET_SEQUENCE, 0, 19 }, - { flatbuffers::ET_SEQUENCE, 0, 20 }, - { flatbuffers::ET_SEQUENCE, 0, 21 }, - { flatbuffers::ET_SEQUENCE, 0, 22 }, - { flatbuffers::ET_SEQUENCE, 0, 23 }, - { flatbuffers::ET_SEQUENCE, 0, 24 }, - { flatbuffers::ET_SEQUENCE, 0, 25 }, - { flatbuffers::ET_SEQUENCE, 0, 26 }, - { flatbuffers::ET_SEQUENCE, 0, 27 }, - { flatbuffers::ET_SEQUENCE, 0, 28 }, - { flatbuffers::ET_SEQUENCE, 0, 29 }, - { flatbuffers::ET_SEQUENCE, 0, 30 }, - { flatbuffers::ET_SEQUENCE, 0, 31 }, - { flatbuffers::ET_SEQUENCE, 0, 32 }, - { flatbuffers::ET_SEQUENCE, 0, 33 }, - { flatbuffers::ET_SEQUENCE, 0, 34 }, - { flatbuffers::ET_SEQUENCE, 0, 35 }, - { flatbuffers::ET_SEQUENCE, 0, 36 }, - { flatbuffers::ET_SEQUENCE, 0, 37 }, - { flatbuffers::ET_SEQUENCE, 0, 38 }, - { flatbuffers::ET_SEQUENCE, 0, 39 }, - { flatbuffers::ET_SEQUENCE, 0, 40 }, - { flatbuffers::ET_SEQUENCE, 0, 41 }, - { flatbuffers::ET_SEQUENCE, 0, 42 }, - { flatbuffers::ET_SEQUENCE, 0, 43 }, - { flatbuffers::ET_SEQUENCE, 0, 44 }, - { flatbuffers::ET_SEQUENCE, 0, 45 }, - { flatbuffers::ET_SEQUENCE, 0, 46 }, - { flatbuffers::ET_SEQUENCE, 0, 47 }, - { flatbuffers::ET_SEQUENCE, 0, 48 }, - { flatbuffers::ET_SEQUENCE, 0, 49 }, - { flatbuffers::ET_SEQUENCE, 0, 50 }, - { flatbuffers::ET_SEQUENCE, 0, 51 }, - { flatbuffers::ET_SEQUENCE, 0, 52 }, - { flatbuffers::ET_SEQUENCE, 0, 53 }, - { flatbuffers::ET_SEQUENCE, 0, 54 }, - { flatbuffers::ET_SEQUENCE, 0, 55 }, - { flatbuffers::ET_SEQUENCE, 0, 56 }, - { flatbuffers::ET_SEQUENCE, 0, 57 }, - { flatbuffers::ET_SEQUENCE, 0, 58 }, - { flatbuffers::ET_SEQUENCE, 0, 59 }, - { flatbuffers::ET_SEQUENCE, 0, 60 }, - { flatbuffers::ET_SEQUENCE, 0, 61 }, - { flatbuffers::ET_SEQUENCE, 0, 62 }, - { flatbuffers::ET_SEQUENCE, 0, 63 }, - { flatbuffers::ET_SEQUENCE, 0, 64 }, - { flatbuffers::ET_SEQUENCE, 0, 65 }, - { flatbuffers::ET_SEQUENCE, 0, 66 }, - { flatbuffers::ET_SEQUENCE, 0, 67 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - Conv2DOptionsTypeTable, - DepthwiseConv2DOptionsTypeTable, - ConcatEmbeddingsOptionsTypeTable, - LSHProjectionOptionsTypeTable, - Pool2DOptionsTypeTable, - SVDFOptionsTypeTable, - RNNOptionsTypeTable, - FullyConnectedOptionsTypeTable, - SoftmaxOptionsTypeTable, - ConcatenationOptionsTypeTable, - AddOptionsTypeTable, - L2NormOptionsTypeTable, - LocalResponseNormalizationOptionsTypeTable, - LSTMOptionsTypeTable, - ResizeBilinearOptionsTypeTable, - CallOptionsTypeTable, - ReshapeOptionsTypeTable, - SkipGramOptionsTypeTable, - SpaceToDepthOptionsTypeTable, - EmbeddingLookupSparseOptionsTypeTable, - MulOptionsTypeTable, - PadOptionsTypeTable, - GatherOptionsTypeTable, - BatchToSpaceNDOptionsTypeTable, - SpaceToBatchNDOptionsTypeTable, - TransposeOptionsTypeTable, - ReducerOptionsTypeTable, - SubOptionsTypeTable, - DivOptionsTypeTable, - SqueezeOptionsTypeTable, - SequenceRNNOptionsTypeTable, - StridedSliceOptionsTypeTable, - ExpOptionsTypeTable, - TopKV2OptionsTypeTable, - SplitOptionsTypeTable, - LogSoftmaxOptionsTypeTable, - CastOptionsTypeTable, - DequantizeOptionsTypeTable, - MaximumMinimumOptionsTypeTable, - ArgMaxOptionsTypeTable, - LessOptionsTypeTable, - NegOptionsTypeTable, - PadV2OptionsTypeTable, - GreaterOptionsTypeTable, - GreaterEqualOptionsTypeTable, - LessEqualOptionsTypeTable, - SelectOptionsTypeTable, - SliceOptionsTypeTable, - TransposeConvOptionsTypeTable, - SparseToDenseOptionsTypeTable, - TileOptionsTypeTable, - ExpandDimsOptionsTypeTable, - EqualOptionsTypeTable, - NotEqualOptionsTypeTable, - ShapeOptionsTypeTable, - PowOptionsTypeTable, - ArgMinOptionsTypeTable, - FakeQuantOptionsTypeTable, - PackOptionsTypeTable, - LogicalOrOptionsTypeTable, - OneHotOptionsTypeTable, - LogicalAndOptionsTypeTable, - LogicalNotOptionsTypeTable, - UnpackOptionsTypeTable, - FloorDivOptionsTypeTable, - SquareOptionsTypeTable, - ZerosLikeOptionsTypeTable, - FillOptionsTypeTable - }; - static const char * const names[] = { - "NONE", - "Conv2DOptions", - "DepthwiseConv2DOptions", - "ConcatEmbeddingsOptions", - "LSHProjectionOptions", - "Pool2DOptions", - "SVDFOptions", - "RNNOptions", - "FullyConnectedOptions", - "SoftmaxOptions", - "ConcatenationOptions", - "AddOptions", - "L2NormOptions", - "LocalResponseNormalizationOptions", - "LSTMOptions", - "ResizeBilinearOptions", - "CallOptions", - "ReshapeOptions", - "SkipGramOptions", - "SpaceToDepthOptions", - "EmbeddingLookupSparseOptions", - "MulOptions", - "PadOptions", - "GatherOptions", - "BatchToSpaceNDOptions", - "SpaceToBatchNDOptions", - "TransposeOptions", - "ReducerOptions", - "SubOptions", - "DivOptions", - "SqueezeOptions", - "SequenceRNNOptions", - "StridedSliceOptions", - "ExpOptions", - "TopKV2Options", - "SplitOptions", - "LogSoftmaxOptions", - "CastOptions", - "DequantizeOptions", - "MaximumMinimumOptions", - "ArgMaxOptions", - "LessOptions", - "NegOptions", - "PadV2Options", - "GreaterOptions", - "GreaterEqualOptions", - "LessEqualOptions", - "SelectOptions", - "SliceOptions", - "TransposeConvOptions", - "SparseToDenseOptions", - "TileOptions", - "ExpandDimsOptions", - "EqualOptions", - "NotEqualOptions", - "ShapeOptions", - "PowOptions", - "ArgMinOptions", - "FakeQuantOptions", - "PackOptions", - "LogicalOrOptions", - "OneHotOptions", - "LogicalAndOptions", - "LogicalNotOptions", - "UnpackOptions", - "FloorDivOptions", - "SquareOptions", - "ZerosLikeOptions", - "FillOptions" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_UNION, 69, type_codes, type_refs, nullptr, names - }; - return &tt; +inline flatbuffers::Offset CreateSquaredDifferenceOptions(flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SquaredDifferenceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateSquaredDifferenceOptions( + _fbb); } -inline const flatbuffers::TypeTable *PaddingTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - PaddingTypeTable - }; - static const char * const names[] = { - "SAME", - "VALID" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_ENUM, 2, type_codes, type_refs, nullptr, names - }; - return &tt; +inline MirrorPadOptionsT *MirrorPadOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new MirrorPadOptionsT(); + UnPackTo(_o, _resolver); + return _o; } -inline const flatbuffers::TypeTable *ActivationFunctionTypeTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - ActivationFunctionTypeTypeTable - }; - static const char * const names[] = { - "NONE", - "RELU", - "RELU_N1_TO_1", - "RELU6", - "TANH", - "SIGN_BIT" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_ENUM, 6, type_codes, type_refs, nullptr, names - }; - return &tt; +inline void MirrorPadOptions::UnPackTo(MirrorPadOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = mode(); _o->mode = _e; }; +} + +inline flatbuffers::Offset MirrorPadOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateMirrorPadOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateMirrorPadOptions(flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MirrorPadOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _mode = _o->mode; + return tflite::CreateMirrorPadOptions( + _fbb, + _mode); +} + +inline UniqueOptionsT *UniqueOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new UniqueOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void UniqueOptions::UnPackTo(UniqueOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = idx_out_type(); _o->idx_out_type = _e; }; +} + +inline flatbuffers::Offset UniqueOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateUniqueOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateUniqueOptions(flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UniqueOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _idx_out_type = _o->idx_out_type; + return tflite::CreateUniqueOptions( + _fbb, + _idx_out_type); +} + +inline ReverseV2OptionsT *ReverseV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new ReverseV2OptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void ReverseV2Options::UnPackTo(ReverseV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset ReverseV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateReverseV2Options(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateReverseV2Options(flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReverseV2OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateReverseV2Options( + _fbb); +} + +inline AddNOptionsT *AddNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new AddNOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void AddNOptions::UnPackTo(AddNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset AddNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateAddNOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateAddNOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const AddNOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateAddNOptions( + _fbb); } -inline const flatbuffers::TypeTable *LSHProjectionTypeTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - LSHProjectionTypeTypeTable - }; - static const char * const names[] = { - "UNKNOWN", - "SPARSE", - "DENSE" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_ENUM, 3, type_codes, type_refs, nullptr, names - }; - return &tt; +inline GatherNdOptionsT *GatherNdOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new GatherNdOptionsT(); + UnPackTo(_o, _resolver); + return _o; } -inline const flatbuffers::TypeTable *FullyConnectedOptionsWeightsFormatTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - FullyConnectedOptionsWeightsFormatTypeTable - }; - static const char * const names[] = { - "DEFAULT", - "SHUFFLED4x16INT8" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_ENUM, 2, type_codes, type_refs, nullptr, names - }; - return &tt; +inline void GatherNdOptions::UnPackTo(GatherNdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; } -inline const flatbuffers::TypeTable *LSTMKernelTypeTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - LSTMKernelTypeTypeTable - }; - static const char * const names[] = { - "FULL", - "BASIC" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_ENUM, 2, type_codes, type_refs, nullptr, names - }; - return &tt; +inline flatbuffers::Offset GatherNdOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateGatherNdOptions(_fbb, _o, _rehasher); } -inline const flatbuffers::TypeTable *CombinerTypeTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - CombinerTypeTypeTable - }; - static const char * const names[] = { - "SUM", - "MEAN", - "SQRTN" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_ENUM, 3, type_codes, type_refs, nullptr, names - }; - return &tt; +inline flatbuffers::Offset CreateGatherNdOptions(flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GatherNdOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateGatherNdOptions( + _fbb); } -inline const flatbuffers::TypeTable *CustomOptionsFormatTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - CustomOptionsFormatTypeTable - }; - static const char * const names[] = { - "FLEXBUFFERS" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_ENUM, 1, type_codes, type_refs, nullptr, names - }; - return &tt; +inline WhereOptionsT *WhereOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new WhereOptionsT(); + UnPackTo(_o, _resolver); + return _o; } -inline const flatbuffers::TypeTable *QuantizationParametersTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_FLOAT, 1, -1 }, - { flatbuffers::ET_FLOAT, 1, -1 }, - { flatbuffers::ET_FLOAT, 1, -1 }, - { flatbuffers::ET_LONG, 1, -1 } - }; - static const char * const names[] = { - "min", - "max", - "scale", - "zeroPoint" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 4, type_codes, nullptr, nullptr, names - }; - return &tt; +inline void WhereOptions::UnPackTo(WhereOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; } -inline const flatbuffers::TypeTable *TensorTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_UINT, 0, -1 }, - { flatbuffers::ET_STRING, 0, -1 }, - { flatbuffers::ET_SEQUENCE, 0, 1 }, - { flatbuffers::ET_BOOL, 0, -1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - TensorTypeTypeTable, - QuantizationParametersTypeTable - }; - static const char * const names[] = { - "shape", - "type", - "buffer", - "name", - "quantization", - "is_variable" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 6, type_codes, type_refs, nullptr, names - }; - return &tt; +inline flatbuffers::Offset WhereOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateWhereOptions(_fbb, _o, _rehasher); } -inline const flatbuffers::TypeTable *Conv2DOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_CHAR, 0, 1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - PaddingTypeTable, - ActivationFunctionTypeTypeTable - }; - static const char * const names[] = { - "padding", - "stride_w", - "stride_h", - "fused_activation_function", - "dilation_w_factor", - "dilation_h_factor" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 6, type_codes, type_refs, nullptr, names - }; - return &tt; +inline flatbuffers::Offset CreateWhereOptions(flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const WhereOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateWhereOptions( + _fbb); } -inline const flatbuffers::TypeTable *Pool2DOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_CHAR, 0, 1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - PaddingTypeTable, - ActivationFunctionTypeTypeTable - }; - static const char * const names[] = { - "padding", - "stride_w", - "stride_h", - "filter_width", - "filter_height", - "fused_activation_function" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 6, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *DepthwiseConv2DOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_CHAR, 0, 1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - PaddingTypeTable, - ActivationFunctionTypeTypeTable - }; - static const char * const names[] = { - "padding", - "stride_w", - "stride_h", - "depth_multiplier", - "fused_activation_function", - "dilation_w_factor", - "dilation_h_factor" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 7, type_codes, type_refs, nullptr, names - }; - return &tt; +inline ReverseSequenceOptionsT *ReverseSequenceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new ReverseSequenceOptionsT(); + UnPackTo(_o, _resolver); + return _o; } -inline const flatbuffers::TypeTable *ConcatEmbeddingsOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_INT, 1, -1 } - }; - static const char * const names[] = { - "num_channels", - "num_columns_per_channel", - "embedding_dim_per_channel" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 3, type_codes, nullptr, nullptr, names - }; - return &tt; +inline void ReverseSequenceOptions::UnPackTo(ReverseSequenceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = seq_dim(); _o->seq_dim = _e; }; + { auto _e = batch_dim(); _o->batch_dim = _e; }; } -inline const flatbuffers::TypeTable *LSHProjectionOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - LSHProjectionTypeTypeTable - }; - static const char * const names[] = { - "type" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, names - }; - return &tt; +inline flatbuffers::Offset ReverseSequenceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateReverseSequenceOptions(_fbb, _o, _rehasher); } -inline const flatbuffers::TypeTable *SVDFOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - ActivationFunctionTypeTypeTable - }; - static const char * const names[] = { - "rank", - "fused_activation_function" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names - }; - return &tt; +inline flatbuffers::Offset CreateReverseSequenceOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReverseSequenceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _seq_dim = _o->seq_dim; + auto _batch_dim = _o->batch_dim; + return tflite::CreateReverseSequenceOptions( + _fbb, + _seq_dim, + _batch_dim); } -inline const flatbuffers::TypeTable *RNNOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - ActivationFunctionTypeTypeTable - }; - static const char * const names[] = { - "fused_activation_function" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, names - }; - return &tt; +inline MatrixDiagOptionsT *MatrixDiagOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new MatrixDiagOptionsT(); + UnPackTo(_o, _resolver); + return _o; } -inline const flatbuffers::TypeTable *SequenceRNNOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_BOOL, 0, -1 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - ActivationFunctionTypeTypeTable - }; - static const char * const names[] = { - "time_major", - "fused_activation_function" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names - }; - return &tt; +inline void MatrixDiagOptions::UnPackTo(MatrixDiagOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; } -inline const flatbuffers::TypeTable *BidirectionalSequenceRNNOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_BOOL, 0, -1 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - ActivationFunctionTypeTypeTable - }; - static const char * const names[] = { - "time_major", - "fused_activation_function" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names - }; - return &tt; +inline flatbuffers::Offset MatrixDiagOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateMatrixDiagOptions(_fbb, _o, _rehasher); } -inline const flatbuffers::TypeTable *FullyConnectedOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - ActivationFunctionTypeTypeTable, - FullyConnectedOptionsWeightsFormatTypeTable - }; - static const char * const names[] = { - "fused_activation_function", - "weights_format" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names - }; - return &tt; +inline flatbuffers::Offset CreateMatrixDiagOptions(flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MatrixDiagOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateMatrixDiagOptions( + _fbb); } -inline const flatbuffers::TypeTable *SoftmaxOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_FLOAT, 0, -1 } - }; - static const char * const names[] = { - "beta" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, names - }; - return &tt; +inline QuantizeOptionsT *QuantizeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new QuantizeOptionsT(); + UnPackTo(_o, _resolver); + return _o; } -inline const flatbuffers::TypeTable *ConcatenationOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - ActivationFunctionTypeTypeTable - }; - static const char * const names[] = { - "axis", - "fused_activation_function" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names - }; - return &tt; +inline void QuantizeOptions::UnPackTo(QuantizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; } -inline const flatbuffers::TypeTable *AddOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - ActivationFunctionTypeTypeTable - }; - static const char * const names[] = { - "fused_activation_function" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, names - }; - return &tt; +inline flatbuffers::Offset QuantizeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateQuantizeOptions(_fbb, _o, _rehasher); } -inline const flatbuffers::TypeTable *MulOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - ActivationFunctionTypeTypeTable - }; - static const char * const names[] = { - "fused_activation_function" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, names - }; - return &tt; +inline flatbuffers::Offset CreateQuantizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateQuantizeOptions( + _fbb); } -inline const flatbuffers::TypeTable *L2NormOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - ActivationFunctionTypeTypeTable - }; - static const char * const names[] = { - "fused_activation_function" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, names - }; - return &tt; +inline MatrixSetDiagOptionsT *MatrixSetDiagOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new MatrixSetDiagOptionsT(); + UnPackTo(_o, _resolver); + return _o; } -inline const flatbuffers::TypeTable *LocalResponseNormalizationOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_FLOAT, 0, -1 }, - { flatbuffers::ET_FLOAT, 0, -1 }, - { flatbuffers::ET_FLOAT, 0, -1 } - }; - static const char * const names[] = { - "radius", - "bias", - "alpha", - "beta" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 4, type_codes, nullptr, nullptr, names - }; - return &tt; +inline void MatrixSetDiagOptions::UnPackTo(MatrixSetDiagOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; } -inline const flatbuffers::TypeTable *LSTMOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_FLOAT, 0, -1 }, - { flatbuffers::ET_FLOAT, 0, -1 }, - { flatbuffers::ET_CHAR, 0, 1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - ActivationFunctionTypeTypeTable, - LSTMKernelTypeTypeTable - }; - static const char * const names[] = { - "fused_activation_function", - "cell_clip", - "proj_clip", - "kernel_type" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 4, type_codes, type_refs, nullptr, names - }; - return &tt; +inline flatbuffers::Offset MatrixSetDiagOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateMatrixSetDiagOptions(_fbb, _o, _rehasher); } -inline const flatbuffers::TypeTable *ResizeBilinearOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_BOOL, 0, -1 } - }; - static const char * const names[] = { - "new_height", - "new_width", - "alignCorners" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 3, type_codes, nullptr, nullptr, names - }; - return &tt; +inline flatbuffers::Offset CreateMatrixSetDiagOptions(flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MatrixSetDiagOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateMatrixSetDiagOptions( + _fbb); } -inline const flatbuffers::TypeTable *CallOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_UINT, 0, -1 } - }; - static const char * const names[] = { - "subgraph" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, names - }; - return &tt; +inline IfOptionsT *IfOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new IfOptionsT(); + UnPackTo(_o, _resolver); + return _o; } -inline const flatbuffers::TypeTable *PadOptionsTypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; +inline void IfOptions::UnPackTo(IfOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = then_subgraph_index(); _o->then_subgraph_index = _e; }; + { auto _e = else_subgraph_index(); _o->else_subgraph_index = _e; }; } -inline const flatbuffers::TypeTable *PadV2OptionsTypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; +inline flatbuffers::Offset IfOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateIfOptions(_fbb, _o, _rehasher); } -inline const flatbuffers::TypeTable *ReshapeOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 1, -1 } - }; - static const char * const names[] = { - "new_shape" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, names - }; - return &tt; +inline flatbuffers::Offset CreateIfOptions(flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const IfOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _then_subgraph_index = _o->then_subgraph_index; + auto _else_subgraph_index = _o->else_subgraph_index; + return tflite::CreateIfOptions( + _fbb, + _then_subgraph_index, + _else_subgraph_index); } -inline const flatbuffers::TypeTable *SpaceToBatchNDOptionsTypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; +inline WhileOptionsT *WhileOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new WhileOptionsT(); + UnPackTo(_o, _resolver); + return _o; } -inline const flatbuffers::TypeTable *BatchToSpaceNDOptionsTypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; +inline void WhileOptions::UnPackTo(WhileOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = cond_subgraph_index(); _o->cond_subgraph_index = _e; }; + { auto _e = body_subgraph_index(); _o->body_subgraph_index = _e; }; } -inline const flatbuffers::TypeTable *SkipGramOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_BOOL, 0, -1 } - }; - static const char * const names[] = { - "ngram_size", - "max_skip_size", - "include_all_ngrams" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 3, type_codes, nullptr, nullptr, names - }; - return &tt; +inline flatbuffers::Offset WhileOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateWhileOptions(_fbb, _o, _rehasher); } -inline const flatbuffers::TypeTable *SpaceToDepthOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 } - }; - static const char * const names[] = { - "block_size" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, names - }; - return &tt; +inline flatbuffers::Offset CreateWhileOptions(flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const WhileOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _cond_subgraph_index = _o->cond_subgraph_index; + auto _body_subgraph_index = _o->body_subgraph_index; + return tflite::CreateWhileOptions( + _fbb, + _cond_subgraph_index, + _body_subgraph_index); } -inline const flatbuffers::TypeTable *SubOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - ActivationFunctionTypeTypeTable - }; - static const char * const names[] = { - "fused_activation_function" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, names - }; - return &tt; +inline NonMaxSuppressionV4OptionsT *NonMaxSuppressionV4Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new NonMaxSuppressionV4OptionsT(); + UnPackTo(_o, _resolver); + return _o; } -inline const flatbuffers::TypeTable *DivOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - ActivationFunctionTypeTypeTable - }; - static const char * const names[] = { - "fused_activation_function" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, names - }; - return &tt; +inline void NonMaxSuppressionV4Options::UnPackTo(NonMaxSuppressionV4OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; } -inline const flatbuffers::TypeTable *TopKV2OptionsTypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; +inline flatbuffers::Offset NonMaxSuppressionV4Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateNonMaxSuppressionV4Options(_fbb, _o, _rehasher); } -inline const flatbuffers::TypeTable *EmbeddingLookupSparseOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - CombinerTypeTypeTable - }; - static const char * const names[] = { - "combiner" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, names - }; - return &tt; +inline flatbuffers::Offset CreateNonMaxSuppressionV4Options(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NonMaxSuppressionV4OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateNonMaxSuppressionV4Options( + _fbb); } -inline const flatbuffers::TypeTable *GatherOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 } - }; - static const char * const names[] = { - "axis" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, names - }; - return &tt; +inline NonMaxSuppressionV5OptionsT *NonMaxSuppressionV5Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new NonMaxSuppressionV5OptionsT(); + UnPackTo(_o, _resolver); + return _o; } -inline const flatbuffers::TypeTable *TransposeOptionsTypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; +inline void NonMaxSuppressionV5Options::UnPackTo(NonMaxSuppressionV5OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; } -inline const flatbuffers::TypeTable *ExpOptionsTypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; +inline flatbuffers::Offset NonMaxSuppressionV5Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateNonMaxSuppressionV5Options(_fbb, _o, _rehasher); } -inline const flatbuffers::TypeTable *ReducerOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_BOOL, 0, -1 } - }; - static const char * const names[] = { - "keepDims" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, names - }; - return &tt; +inline flatbuffers::Offset CreateNonMaxSuppressionV5Options(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NonMaxSuppressionV5OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateNonMaxSuppressionV5Options( + _fbb); } -inline const flatbuffers::TypeTable *SqueezeOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 1, -1 } - }; - static const char * const names[] = { - "squeezeDims" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, names - }; - return &tt; +inline ScatterNdOptionsT *ScatterNdOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new ScatterNdOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void ScatterNdOptions::UnPackTo(ScatterNdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; } -inline const flatbuffers::TypeTable *SplitOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 } - }; - static const char * const names[] = { - "num_splits" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, names - }; - return &tt; +inline flatbuffers::Offset ScatterNdOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateScatterNdOptions(_fbb, _o, _rehasher); } -inline const flatbuffers::TypeTable *StridedSliceOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 } - }; - static const char * const names[] = { - "beginMask", - "endMask", - "ellipsisMask", - "newAxisMask", - "shrinkAxisMask" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 5, type_codes, nullptr, nullptr, names - }; - return &tt; +inline flatbuffers::Offset CreateScatterNdOptions(flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ScatterNdOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateScatterNdOptions( + _fbb); } -inline const flatbuffers::TypeTable *LogSoftmaxOptionsTypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; +inline OperatorCodeT *OperatorCode::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new OperatorCodeT(); + UnPackTo(_o, _resolver); + return _o; } -inline const flatbuffers::TypeTable *CastOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - TensorTypeTypeTable - }; - static const char * const names[] = { - "in_data_type", - "out_data_type" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names - }; - return &tt; +inline void OperatorCode::UnPackTo(OperatorCodeT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = builtin_code(); _o->builtin_code = _e; }; + { auto _e = custom_code(); if (_e) _o->custom_code = _e->str(); }; + { auto _e = version(); _o->version = _e; }; } -inline const flatbuffers::TypeTable *DequantizeOptionsTypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; +inline flatbuffers::Offset OperatorCode::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateOperatorCode(_fbb, _o, _rehasher); } -inline const flatbuffers::TypeTable *MaximumMinimumOptionsTypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; +inline flatbuffers::Offset CreateOperatorCode(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const OperatorCodeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _builtin_code = _o->builtin_code; + auto _custom_code = _o->custom_code.empty() ? 0 : _fbb.CreateString(_o->custom_code); + auto _version = _o->version; + return tflite::CreateOperatorCode( + _fbb, + _builtin_code, + _custom_code, + _version); } -inline const flatbuffers::TypeTable *TileOptionsTypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; +inline OperatorT *Operator::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new OperatorT(); + UnPackTo(_o, _resolver); + return _o; } -inline const flatbuffers::TypeTable *ArgMaxOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - TensorTypeTypeTable - }; - static const char * const names[] = { - "output_type" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, names - }; - return &tt; +inline void Operator::UnPackTo(OperatorT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = opcode_index(); _o->opcode_index = _e; }; + { auto _e = inputs(); if (_e) { _o->inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputs[_i] = _e->Get(_i); } } }; + { auto _e = outputs(); if (_e) { _o->outputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputs[_i] = _e->Get(_i); } } }; + { auto _e = builtin_options_type(); _o->builtin_options.type = _e; }; + { auto _e = builtin_options(); if (_e) _o->builtin_options.value = BuiltinOptionsUnion::UnPack(_e, builtin_options_type(), _resolver); }; + { auto _e = custom_options(); if (_e) { _o->custom_options.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->custom_options[_i] = _e->Get(_i); } } }; + { auto _e = custom_options_format(); _o->custom_options_format = _e; }; + { auto _e = mutating_variable_inputs(); if (_e) { _o->mutating_variable_inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->mutating_variable_inputs[_i] = _e->Get(_i) != 0; } } }; + { auto _e = intermediates(); if (_e) { _o->intermediates.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->intermediates[_i] = _e->Get(_i); } } }; } -inline const flatbuffers::TypeTable *ArgMinOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - TensorTypeTypeTable - }; - static const char * const names[] = { - "output_type" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, names - }; - return &tt; +inline flatbuffers::Offset Operator::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateOperator(_fbb, _o, _rehasher); } -inline const flatbuffers::TypeTable *GreaterOptionsTypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; +inline flatbuffers::Offset CreateOperator(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const OperatorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _opcode_index = _o->opcode_index; + auto _inputs = _o->inputs.size() ? _fbb.CreateVector(_o->inputs) : 0; + auto _outputs = _o->outputs.size() ? _fbb.CreateVector(_o->outputs) : 0; + auto _builtin_options_type = _o->builtin_options.type; + auto _builtin_options = _o->builtin_options.Pack(_fbb); + auto _custom_options = _o->custom_options.size() ? _fbb.CreateVector(_o->custom_options) : 0; + auto _custom_options_format = _o->custom_options_format; + auto _mutating_variable_inputs = _o->mutating_variable_inputs.size() ? _fbb.CreateVector(_o->mutating_variable_inputs) : 0; + auto _intermediates = _o->intermediates.size() ? _fbb.CreateVector(_o->intermediates) : 0; + return tflite::CreateOperator( + _fbb, + _opcode_index, + _inputs, + _outputs, + _builtin_options_type, + _builtin_options, + _custom_options, + _custom_options_format, + _mutating_variable_inputs, + _intermediates); } -inline const flatbuffers::TypeTable *GreaterEqualOptionsTypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; +inline SubGraphT *SubGraph::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new SubGraphT(); + UnPackTo(_o, _resolver); + return _o; } -inline const flatbuffers::TypeTable *LessOptionsTypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; +inline void SubGraph::UnPackTo(SubGraphT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = tensors(); if (_e) { _o->tensors.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->tensors[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } }; + { auto _e = inputs(); if (_e) { _o->inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputs[_i] = _e->Get(_i); } } }; + { auto _e = outputs(); if (_e) { _o->outputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputs[_i] = _e->Get(_i); } } }; + { auto _e = operators(); if (_e) { _o->operators.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->operators[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } }; + { auto _e = name(); if (_e) _o->name = _e->str(); }; } -inline const flatbuffers::TypeTable *LessEqualOptionsTypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; +inline flatbuffers::Offset SubGraph::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSubGraph(_fbb, _o, _rehasher); } -inline const flatbuffers::TypeTable *NegOptionsTypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; +inline flatbuffers::Offset CreateSubGraph(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SubGraphT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _tensors = _o->tensors.size() ? _fbb.CreateVector> (_o->tensors.size(), [](size_t i, _VectorArgs *__va) { return CreateTensor(*__va->__fbb, __va->__o->tensors[i].get(), __va->__rehasher); }, &_va ) : 0; + auto _inputs = _o->inputs.size() ? _fbb.CreateVector(_o->inputs) : 0; + auto _outputs = _o->outputs.size() ? _fbb.CreateVector(_o->outputs) : 0; + auto _operators = _o->operators.size() ? _fbb.CreateVector> (_o->operators.size(), [](size_t i, _VectorArgs *__va) { return CreateOperator(*__va->__fbb, __va->__o->operators[i].get(), __va->__rehasher); }, &_va ) : 0; + auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); + return tflite::CreateSubGraph( + _fbb, + _tensors, + _inputs, + _outputs, + _operators, + _name); } -inline const flatbuffers::TypeTable *SelectOptionsTypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; +inline BufferT *Buffer::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new BufferT(); + UnPackTo(_o, _resolver); + return _o; } -inline const flatbuffers::TypeTable *SliceOptionsTypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; +inline void Buffer::UnPackTo(BufferT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = data(); if (_e) { _o->data.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->data[_i] = _e->Get(_i); } } }; } -inline const flatbuffers::TypeTable *TransposeConvOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - PaddingTypeTable - }; - static const char * const names[] = { - "padding", - "stride_w", - "stride_h" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 3, type_codes, type_refs, nullptr, names - }; - return &tt; +inline flatbuffers::Offset Buffer::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BufferT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateBuffer(_fbb, _o, _rehasher); } -inline const flatbuffers::TypeTable *ExpandDimsOptionsTypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; +inline flatbuffers::Offset CreateBuffer(flatbuffers::FlatBufferBuilder &_fbb, const BufferT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BufferT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _data = _o->data.size() ? _fbb.CreateVector(_o->data) : 0; + return tflite::CreateBuffer( + _fbb, + _data); } -inline const flatbuffers::TypeTable *SparseToDenseOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_BOOL, 0, -1 } - }; - static const char * const names[] = { - "validateIndices" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, names - }; - return &tt; +inline MetadataT *Metadata::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new MetadataT(); + UnPackTo(_o, _resolver); + return _o; } -inline const flatbuffers::TypeTable *EqualOptionsTypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; +inline void Metadata::UnPackTo(MetadataT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = name(); if (_e) _o->name = _e->str(); }; + { auto _e = buffer(); _o->buffer = _e; }; } -inline const flatbuffers::TypeTable *NotEqualOptionsTypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; +inline flatbuffers::Offset Metadata::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MetadataT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateMetadata(_fbb, _o, _rehasher); } -inline const flatbuffers::TypeTable *ShapeOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - TensorTypeTypeTable - }; - static const char * const names[] = { - "out_type" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, names - }; - return &tt; +inline flatbuffers::Offset CreateMetadata(flatbuffers::FlatBufferBuilder &_fbb, const MetadataT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MetadataT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); + auto _buffer = _o->buffer; + return tflite::CreateMetadata( + _fbb, + _name, + _buffer); } -inline const flatbuffers::TypeTable *PowOptionsTypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; +inline ModelT *Model::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new ModelT(); + UnPackTo(_o, _resolver); + return _o; } -inline const flatbuffers::TypeTable *FakeQuantOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_FLOAT, 0, -1 }, - { flatbuffers::ET_FLOAT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_BOOL, 0, -1 } - }; - static const char * const names[] = { - "min", - "max", - "num_bits", - "narrow_range" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 4, type_codes, nullptr, nullptr, names - }; - return &tt; +inline void Model::UnPackTo(ModelT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = version(); _o->version = _e; }; + { auto _e = operator_codes(); if (_e) { _o->operator_codes.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->operator_codes[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } }; + { auto _e = subgraphs(); if (_e) { _o->subgraphs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->subgraphs[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } }; + { auto _e = description(); if (_e) _o->description = _e->str(); }; + { auto _e = buffers(); if (_e) { _o->buffers.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->buffers[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } }; + { auto _e = metadata_buffer(); if (_e) { _o->metadata_buffer.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->metadata_buffer[_i] = _e->Get(_i); } } }; + { auto _e = metadata(); if (_e) { _o->metadata.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->metadata[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } }; } -inline const flatbuffers::TypeTable *PackOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 } - }; - static const char * const names[] = { - "values_count", - "axis" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, names - }; - return &tt; +inline flatbuffers::Offset Model::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ModelT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateModel(_fbb, _o, _rehasher); } -inline const flatbuffers::TypeTable *LogicalOrOptionsTypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; +inline flatbuffers::Offset CreateModel(flatbuffers::FlatBufferBuilder &_fbb, const ModelT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ModelT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _version = _o->version; + auto _operator_codes = _o->operator_codes.size() ? _fbb.CreateVector> (_o->operator_codes.size(), [](size_t i, _VectorArgs *__va) { return CreateOperatorCode(*__va->__fbb, __va->__o->operator_codes[i].get(), __va->__rehasher); }, &_va ) : 0; + auto _subgraphs = _o->subgraphs.size() ? _fbb.CreateVector> (_o->subgraphs.size(), [](size_t i, _VectorArgs *__va) { return CreateSubGraph(*__va->__fbb, __va->__o->subgraphs[i].get(), __va->__rehasher); }, &_va ) : 0; + auto _description = _o->description.empty() ? 0 : _fbb.CreateString(_o->description); + auto _buffers = _o->buffers.size() ? _fbb.CreateVector> (_o->buffers.size(), [](size_t i, _VectorArgs *__va) { return CreateBuffer(*__va->__fbb, __va->__o->buffers[i].get(), __va->__rehasher); }, &_va ) : 0; + auto _metadata_buffer = _o->metadata_buffer.size() ? _fbb.CreateVector(_o->metadata_buffer) : 0; + auto _metadata = _o->metadata.size() ? _fbb.CreateVector> (_o->metadata.size(), [](size_t i, _VectorArgs *__va) { return CreateMetadata(*__va->__fbb, __va->__o->metadata[i].get(), __va->__rehasher); }, &_va ) : 0; + return tflite::CreateModel( + _fbb, + _version, + _operator_codes, + _subgraphs, + _description, + _buffers, + _metadata_buffer, + _metadata); } -inline const flatbuffers::TypeTable *OneHotOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 } - }; - static const char * const names[] = { - "axis" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, names - }; - return &tt; +inline bool VerifyQuantizationDetails(flatbuffers::Verifier &verifier, const void *obj, QuantizationDetails type) { + switch (type) { + case QuantizationDetails_NONE: { + return true; + } + case QuantizationDetails_CustomQuantization: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + default: return false; + } } -inline const flatbuffers::TypeTable *LogicalAndOptionsTypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; +inline bool VerifyQuantizationDetailsVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types) { + if (!values || !types) return !values && !types; + if (values->size() != types->size()) return false; + for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) { + if (!VerifyQuantizationDetails( + verifier, values->Get(i), types->GetEnum(i))) { + return false; + } + } + return true; } -inline const flatbuffers::TypeTable *LogicalNotOptionsTypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; +inline void *QuantizationDetailsUnion::UnPack(const void *obj, QuantizationDetails type, const flatbuffers::resolver_function_t *resolver) { + switch (type) { + case QuantizationDetails_CustomQuantization: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + default: return nullptr; + } } -inline const flatbuffers::TypeTable *UnpackOptionsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 } - }; - static const char * const names[] = { - "num", - "axis" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, names - }; - return &tt; +inline flatbuffers::Offset QuantizationDetailsUnion::Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher) const { + switch (type) { + case QuantizationDetails_CustomQuantization: { + auto ptr = reinterpret_cast(value); + return CreateCustomQuantization(_fbb, ptr, _rehasher).Union(); + } + default: return 0; + } } -inline const flatbuffers::TypeTable *FloorDivOptionsTypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; +inline QuantizationDetailsUnion::QuantizationDetailsUnion(const QuantizationDetailsUnion &u) FLATBUFFERS_NOEXCEPT : type(u.type), value(nullptr) { + switch (type) { + case QuantizationDetails_CustomQuantization: { + value = new CustomQuantizationT(*reinterpret_cast(u.value)); + break; + } + default: + break; + } } -inline const flatbuffers::TypeTable *SquareOptionsTypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; +inline void QuantizationDetailsUnion::Reset() { + switch (type) { + case QuantizationDetails_CustomQuantization: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + default: break; + } + value = nullptr; + type = QuantizationDetails_NONE; } -inline const flatbuffers::TypeTable *ZerosLikeOptionsTypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; +inline bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type) { + switch (type) { + case BuiltinOptions_NONE: { + return true; + } + case BuiltinOptions_Conv2DOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_DepthwiseConv2DOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ConcatEmbeddingsOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LSHProjectionOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_Pool2DOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SVDFOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_RNNOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_FullyConnectedOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SoftmaxOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ConcatenationOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_AddOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_L2NormOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LocalResponseNormalizationOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LSTMOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ResizeBilinearOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_CallOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ReshapeOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SkipGramOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SpaceToDepthOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_EmbeddingLookupSparseOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_MulOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_PadOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_GatherOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_BatchToSpaceNDOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SpaceToBatchNDOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_TransposeOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ReducerOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SubOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_DivOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SqueezeOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SequenceRNNOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_StridedSliceOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ExpOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_TopKV2Options: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SplitOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LogSoftmaxOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_CastOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_DequantizeOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_MaximumMinimumOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ArgMaxOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LessOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_NegOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_PadV2Options: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_GreaterOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_GreaterEqualOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LessEqualOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SelectOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SliceOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_TransposeConvOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SparseToDenseOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_TileOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ExpandDimsOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_EqualOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_NotEqualOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ShapeOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_PowOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ArgMinOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_FakeQuantOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_PackOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LogicalOrOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_OneHotOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LogicalAndOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LogicalNotOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_UnpackOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_FloorDivOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SquareOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ZerosLikeOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_FillOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_BidirectionalSequenceLSTMOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_BidirectionalSequenceRNNOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_UnidirectionalSequenceLSTMOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_FloorModOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_RangeOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ResizeNearestNeighborOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LeakyReluOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SquaredDifferenceOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_MirrorPadOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_AbsOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SplitVOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_UniqueOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ReverseV2Options: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_AddNOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_GatherNdOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_CosOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_WhereOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_RankOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ReverseSequenceOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_MatrixDiagOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_QuantizeOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_MatrixSetDiagOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_HardSwishOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_IfOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_WhileOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_DepthToSpaceOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_NonMaxSuppressionV4Options: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_NonMaxSuppressionV5Options: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ScatterNdOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + default: return false; + } } -inline const flatbuffers::TypeTable *FillOptionsTypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; +inline bool VerifyBuiltinOptionsVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types) { + if (!values || !types) return !values && !types; + if (values->size() != types->size()) return false; + for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) { + if (!VerifyBuiltinOptions( + verifier, values->Get(i), types->GetEnum(i))) { + return false; + } + } + return true; } -inline const flatbuffers::TypeTable *OperatorCodeTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_STRING, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - BuiltinOperatorTypeTable - }; - static const char * const names[] = { - "builtin_code", - "custom_code", - "version" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 3, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *OperatorTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_UINT, 0, -1 }, - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_UTYPE, 0, 0 }, - { flatbuffers::ET_SEQUENCE, 0, 0 }, - { flatbuffers::ET_UCHAR, 1, -1 }, - { flatbuffers::ET_CHAR, 0, 1 }, - { flatbuffers::ET_BOOL, 1, -1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - BuiltinOptionsTypeTable, - CustomOptionsFormatTypeTable - }; - static const char * const names[] = { - "opcode_index", - "inputs", - "outputs", - "builtin_options_type", - "builtin_options", - "custom_options", - "custom_options_format", - "mutating_variable_inputs" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 8, type_codes, type_refs, nullptr, names - }; - return &tt; +inline void *BuiltinOptionsUnion::UnPack(const void *obj, BuiltinOptions type, const flatbuffers::resolver_function_t *resolver) { + switch (type) { + case BuiltinOptions_Conv2DOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_DepthwiseConv2DOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ConcatEmbeddingsOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_LSHProjectionOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_Pool2DOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SVDFOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_RNNOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_FullyConnectedOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SoftmaxOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ConcatenationOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_AddOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_L2NormOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_LocalResponseNormalizationOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_LSTMOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ResizeBilinearOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_CallOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ReshapeOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SkipGramOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SpaceToDepthOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_EmbeddingLookupSparseOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_MulOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_PadOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_GatherOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_BatchToSpaceNDOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SpaceToBatchNDOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_TransposeOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ReducerOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SubOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_DivOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SqueezeOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SequenceRNNOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_StridedSliceOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ExpOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_TopKV2Options: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SplitOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_LogSoftmaxOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_CastOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_DequantizeOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_MaximumMinimumOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ArgMaxOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_LessOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_NegOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_PadV2Options: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_GreaterOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_GreaterEqualOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_LessEqualOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SelectOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SliceOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_TransposeConvOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SparseToDenseOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_TileOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ExpandDimsOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_EqualOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_NotEqualOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ShapeOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_PowOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ArgMinOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_FakeQuantOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_PackOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_LogicalOrOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_OneHotOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_LogicalAndOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_LogicalNotOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_UnpackOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_FloorDivOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SquareOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ZerosLikeOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_FillOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_BidirectionalSequenceLSTMOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_BidirectionalSequenceRNNOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_UnidirectionalSequenceLSTMOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_FloorModOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_RangeOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ResizeNearestNeighborOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_LeakyReluOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SquaredDifferenceOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_MirrorPadOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_AbsOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SplitVOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_UniqueOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ReverseV2Options: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_AddNOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_GatherNdOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_CosOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_WhereOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_RankOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ReverseSequenceOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_MatrixDiagOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_QuantizeOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_MatrixSetDiagOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_HardSwishOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_IfOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_WhileOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_DepthToSpaceOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_NonMaxSuppressionV4Options: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_NonMaxSuppressionV5Options: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ScatterNdOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + default: return nullptr; + } } -inline const flatbuffers::TypeTable *SubGraphTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_SEQUENCE, 1, 0 }, - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_SEQUENCE, 1, 1 }, - { flatbuffers::ET_STRING, 0, -1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - TensorTypeTable, - OperatorTypeTable - }; - static const char * const names[] = { - "tensors", - "inputs", - "outputs", - "operators", - "name" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 5, type_codes, type_refs, nullptr, names - }; - return &tt; +inline flatbuffers::Offset BuiltinOptionsUnion::Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher) const { + switch (type) { + case BuiltinOptions_Conv2DOptions: { + auto ptr = reinterpret_cast(value); + return CreateConv2DOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_DepthwiseConv2DOptions: { + auto ptr = reinterpret_cast(value); + return CreateDepthwiseConv2DOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ConcatEmbeddingsOptions: { + auto ptr = reinterpret_cast(value); + return CreateConcatEmbeddingsOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_LSHProjectionOptions: { + auto ptr = reinterpret_cast(value); + return CreateLSHProjectionOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_Pool2DOptions: { + auto ptr = reinterpret_cast(value); + return CreatePool2DOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SVDFOptions: { + auto ptr = reinterpret_cast(value); + return CreateSVDFOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_RNNOptions: { + auto ptr = reinterpret_cast(value); + return CreateRNNOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_FullyConnectedOptions: { + auto ptr = reinterpret_cast(value); + return CreateFullyConnectedOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SoftmaxOptions: { + auto ptr = reinterpret_cast(value); + return CreateSoftmaxOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ConcatenationOptions: { + auto ptr = reinterpret_cast(value); + return CreateConcatenationOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_AddOptions: { + auto ptr = reinterpret_cast(value); + return CreateAddOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_L2NormOptions: { + auto ptr = reinterpret_cast(value); + return CreateL2NormOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_LocalResponseNormalizationOptions: { + auto ptr = reinterpret_cast(value); + return CreateLocalResponseNormalizationOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_LSTMOptions: { + auto ptr = reinterpret_cast(value); + return CreateLSTMOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ResizeBilinearOptions: { + auto ptr = reinterpret_cast(value); + return CreateResizeBilinearOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_CallOptions: { + auto ptr = reinterpret_cast(value); + return CreateCallOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ReshapeOptions: { + auto ptr = reinterpret_cast(value); + return CreateReshapeOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SkipGramOptions: { + auto ptr = reinterpret_cast(value); + return CreateSkipGramOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SpaceToDepthOptions: { + auto ptr = reinterpret_cast(value); + return CreateSpaceToDepthOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_EmbeddingLookupSparseOptions: { + auto ptr = reinterpret_cast(value); + return CreateEmbeddingLookupSparseOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_MulOptions: { + auto ptr = reinterpret_cast(value); + return CreateMulOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_PadOptions: { + auto ptr = reinterpret_cast(value); + return CreatePadOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_GatherOptions: { + auto ptr = reinterpret_cast(value); + return CreateGatherOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_BatchToSpaceNDOptions: { + auto ptr = reinterpret_cast(value); + return CreateBatchToSpaceNDOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SpaceToBatchNDOptions: { + auto ptr = reinterpret_cast(value); + return CreateSpaceToBatchNDOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_TransposeOptions: { + auto ptr = reinterpret_cast(value); + return CreateTransposeOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ReducerOptions: { + auto ptr = reinterpret_cast(value); + return CreateReducerOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SubOptions: { + auto ptr = reinterpret_cast(value); + return CreateSubOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_DivOptions: { + auto ptr = reinterpret_cast(value); + return CreateDivOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SqueezeOptions: { + auto ptr = reinterpret_cast(value); + return CreateSqueezeOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SequenceRNNOptions: { + auto ptr = reinterpret_cast(value); + return CreateSequenceRNNOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_StridedSliceOptions: { + auto ptr = reinterpret_cast(value); + return CreateStridedSliceOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ExpOptions: { + auto ptr = reinterpret_cast(value); + return CreateExpOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_TopKV2Options: { + auto ptr = reinterpret_cast(value); + return CreateTopKV2Options(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SplitOptions: { + auto ptr = reinterpret_cast(value); + return CreateSplitOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_LogSoftmaxOptions: { + auto ptr = reinterpret_cast(value); + return CreateLogSoftmaxOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_CastOptions: { + auto ptr = reinterpret_cast(value); + return CreateCastOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_DequantizeOptions: { + auto ptr = reinterpret_cast(value); + return CreateDequantizeOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_MaximumMinimumOptions: { + auto ptr = reinterpret_cast(value); + return CreateMaximumMinimumOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ArgMaxOptions: { + auto ptr = reinterpret_cast(value); + return CreateArgMaxOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_LessOptions: { + auto ptr = reinterpret_cast(value); + return CreateLessOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_NegOptions: { + auto ptr = reinterpret_cast(value); + return CreateNegOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_PadV2Options: { + auto ptr = reinterpret_cast(value); + return CreatePadV2Options(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_GreaterOptions: { + auto ptr = reinterpret_cast(value); + return CreateGreaterOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_GreaterEqualOptions: { + auto ptr = reinterpret_cast(value); + return CreateGreaterEqualOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_LessEqualOptions: { + auto ptr = reinterpret_cast(value); + return CreateLessEqualOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SelectOptions: { + auto ptr = reinterpret_cast(value); + return CreateSelectOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SliceOptions: { + auto ptr = reinterpret_cast(value); + return CreateSliceOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_TransposeConvOptions: { + auto ptr = reinterpret_cast(value); + return CreateTransposeConvOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SparseToDenseOptions: { + auto ptr = reinterpret_cast(value); + return CreateSparseToDenseOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_TileOptions: { + auto ptr = reinterpret_cast(value); + return CreateTileOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ExpandDimsOptions: { + auto ptr = reinterpret_cast(value); + return CreateExpandDimsOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_EqualOptions: { + auto ptr = reinterpret_cast(value); + return CreateEqualOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_NotEqualOptions: { + auto ptr = reinterpret_cast(value); + return CreateNotEqualOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ShapeOptions: { + auto ptr = reinterpret_cast(value); + return CreateShapeOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_PowOptions: { + auto ptr = reinterpret_cast(value); + return CreatePowOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ArgMinOptions: { + auto ptr = reinterpret_cast(value); + return CreateArgMinOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_FakeQuantOptions: { + auto ptr = reinterpret_cast(value); + return CreateFakeQuantOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_PackOptions: { + auto ptr = reinterpret_cast(value); + return CreatePackOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_LogicalOrOptions: { + auto ptr = reinterpret_cast(value); + return CreateLogicalOrOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_OneHotOptions: { + auto ptr = reinterpret_cast(value); + return CreateOneHotOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_LogicalAndOptions: { + auto ptr = reinterpret_cast(value); + return CreateLogicalAndOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_LogicalNotOptions: { + auto ptr = reinterpret_cast(value); + return CreateLogicalNotOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_UnpackOptions: { + auto ptr = reinterpret_cast(value); + return CreateUnpackOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_FloorDivOptions: { + auto ptr = reinterpret_cast(value); + return CreateFloorDivOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SquareOptions: { + auto ptr = reinterpret_cast(value); + return CreateSquareOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ZerosLikeOptions: { + auto ptr = reinterpret_cast(value); + return CreateZerosLikeOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_FillOptions: { + auto ptr = reinterpret_cast(value); + return CreateFillOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_BidirectionalSequenceLSTMOptions: { + auto ptr = reinterpret_cast(value); + return CreateBidirectionalSequenceLSTMOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_BidirectionalSequenceRNNOptions: { + auto ptr = reinterpret_cast(value); + return CreateBidirectionalSequenceRNNOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_UnidirectionalSequenceLSTMOptions: { + auto ptr = reinterpret_cast(value); + return CreateUnidirectionalSequenceLSTMOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_FloorModOptions: { + auto ptr = reinterpret_cast(value); + return CreateFloorModOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_RangeOptions: { + auto ptr = reinterpret_cast(value); + return CreateRangeOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ResizeNearestNeighborOptions: { + auto ptr = reinterpret_cast(value); + return CreateResizeNearestNeighborOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_LeakyReluOptions: { + auto ptr = reinterpret_cast(value); + return CreateLeakyReluOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SquaredDifferenceOptions: { + auto ptr = reinterpret_cast(value); + return CreateSquaredDifferenceOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_MirrorPadOptions: { + auto ptr = reinterpret_cast(value); + return CreateMirrorPadOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_AbsOptions: { + auto ptr = reinterpret_cast(value); + return CreateAbsOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SplitVOptions: { + auto ptr = reinterpret_cast(value); + return CreateSplitVOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_UniqueOptions: { + auto ptr = reinterpret_cast(value); + return CreateUniqueOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ReverseV2Options: { + auto ptr = reinterpret_cast(value); + return CreateReverseV2Options(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_AddNOptions: { + auto ptr = reinterpret_cast(value); + return CreateAddNOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_GatherNdOptions: { + auto ptr = reinterpret_cast(value); + return CreateGatherNdOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_CosOptions: { + auto ptr = reinterpret_cast(value); + return CreateCosOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_WhereOptions: { + auto ptr = reinterpret_cast(value); + return CreateWhereOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_RankOptions: { + auto ptr = reinterpret_cast(value); + return CreateRankOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ReverseSequenceOptions: { + auto ptr = reinterpret_cast(value); + return CreateReverseSequenceOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_MatrixDiagOptions: { + auto ptr = reinterpret_cast(value); + return CreateMatrixDiagOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_QuantizeOptions: { + auto ptr = reinterpret_cast(value); + return CreateQuantizeOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_MatrixSetDiagOptions: { + auto ptr = reinterpret_cast(value); + return CreateMatrixSetDiagOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_HardSwishOptions: { + auto ptr = reinterpret_cast(value); + return CreateHardSwishOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_IfOptions: { + auto ptr = reinterpret_cast(value); + return CreateIfOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_WhileOptions: { + auto ptr = reinterpret_cast(value); + return CreateWhileOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_DepthToSpaceOptions: { + auto ptr = reinterpret_cast(value); + return CreateDepthToSpaceOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_NonMaxSuppressionV4Options: { + auto ptr = reinterpret_cast(value); + return CreateNonMaxSuppressionV4Options(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_NonMaxSuppressionV5Options: { + auto ptr = reinterpret_cast(value); + return CreateNonMaxSuppressionV5Options(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ScatterNdOptions: { + auto ptr = reinterpret_cast(value); + return CreateScatterNdOptions(_fbb, ptr, _rehasher).Union(); + } + default: return 0; + } } -inline const flatbuffers::TypeTable *BufferTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_UCHAR, 1, -1 } - }; - static const char * const names[] = { - "data" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, names - }; - return &tt; +inline BuiltinOptionsUnion::BuiltinOptionsUnion(const BuiltinOptionsUnion &u) FLATBUFFERS_NOEXCEPT : type(u.type), value(nullptr) { + switch (type) { + case BuiltinOptions_Conv2DOptions: { + value = new Conv2DOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_DepthwiseConv2DOptions: { + value = new DepthwiseConv2DOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ConcatEmbeddingsOptions: { + value = new ConcatEmbeddingsOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_LSHProjectionOptions: { + value = new LSHProjectionOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_Pool2DOptions: { + value = new Pool2DOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_SVDFOptions: { + value = new SVDFOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_RNNOptions: { + value = new RNNOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_FullyConnectedOptions: { + value = new FullyConnectedOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_SoftmaxOptions: { + value = new SoftmaxOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ConcatenationOptions: { + value = new ConcatenationOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_AddOptions: { + value = new AddOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_L2NormOptions: { + value = new L2NormOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_LocalResponseNormalizationOptions: { + value = new LocalResponseNormalizationOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_LSTMOptions: { + value = new LSTMOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ResizeBilinearOptions: { + value = new ResizeBilinearOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_CallOptions: { + value = new CallOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ReshapeOptions: { + value = new ReshapeOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_SkipGramOptions: { + value = new SkipGramOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_SpaceToDepthOptions: { + value = new SpaceToDepthOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_EmbeddingLookupSparseOptions: { + value = new EmbeddingLookupSparseOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_MulOptions: { + value = new MulOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_PadOptions: { + value = new PadOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_GatherOptions: { + value = new GatherOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_BatchToSpaceNDOptions: { + value = new BatchToSpaceNDOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_SpaceToBatchNDOptions: { + value = new SpaceToBatchNDOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_TransposeOptions: { + value = new TransposeOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ReducerOptions: { + value = new ReducerOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_SubOptions: { + value = new SubOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_DivOptions: { + value = new DivOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_SqueezeOptions: { + value = new SqueezeOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_SequenceRNNOptions: { + value = new SequenceRNNOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_StridedSliceOptions: { + value = new StridedSliceOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ExpOptions: { + value = new ExpOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_TopKV2Options: { + value = new TopKV2OptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_SplitOptions: { + value = new SplitOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_LogSoftmaxOptions: { + value = new LogSoftmaxOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_CastOptions: { + value = new CastOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_DequantizeOptions: { + value = new DequantizeOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_MaximumMinimumOptions: { + value = new MaximumMinimumOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ArgMaxOptions: { + value = new ArgMaxOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_LessOptions: { + value = new LessOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_NegOptions: { + value = new NegOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_PadV2Options: { + value = new PadV2OptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_GreaterOptions: { + value = new GreaterOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_GreaterEqualOptions: { + value = new GreaterEqualOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_LessEqualOptions: { + value = new LessEqualOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_SelectOptions: { + value = new SelectOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_SliceOptions: { + value = new SliceOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_TransposeConvOptions: { + value = new TransposeConvOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_SparseToDenseOptions: { + value = new SparseToDenseOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_TileOptions: { + value = new TileOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ExpandDimsOptions: { + value = new ExpandDimsOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_EqualOptions: { + value = new EqualOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_NotEqualOptions: { + value = new NotEqualOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ShapeOptions: { + value = new ShapeOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_PowOptions: { + value = new PowOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ArgMinOptions: { + value = new ArgMinOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_FakeQuantOptions: { + value = new FakeQuantOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_PackOptions: { + value = new PackOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_LogicalOrOptions: { + value = new LogicalOrOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_OneHotOptions: { + value = new OneHotOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_LogicalAndOptions: { + value = new LogicalAndOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_LogicalNotOptions: { + value = new LogicalNotOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_UnpackOptions: { + value = new UnpackOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_FloorDivOptions: { + value = new FloorDivOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_SquareOptions: { + value = new SquareOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ZerosLikeOptions: { + value = new ZerosLikeOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_FillOptions: { + value = new FillOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_BidirectionalSequenceLSTMOptions: { + value = new BidirectionalSequenceLSTMOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_BidirectionalSequenceRNNOptions: { + value = new BidirectionalSequenceRNNOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_UnidirectionalSequenceLSTMOptions: { + value = new UnidirectionalSequenceLSTMOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_FloorModOptions: { + value = new FloorModOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_RangeOptions: { + value = new RangeOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ResizeNearestNeighborOptions: { + value = new ResizeNearestNeighborOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_LeakyReluOptions: { + value = new LeakyReluOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_SquaredDifferenceOptions: { + value = new SquaredDifferenceOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_MirrorPadOptions: { + value = new MirrorPadOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_AbsOptions: { + value = new AbsOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_SplitVOptions: { + value = new SplitVOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_UniqueOptions: { + value = new UniqueOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ReverseV2Options: { + value = new ReverseV2OptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_AddNOptions: { + value = new AddNOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_GatherNdOptions: { + value = new GatherNdOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_CosOptions: { + value = new CosOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_WhereOptions: { + value = new WhereOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_RankOptions: { + value = new RankOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ReverseSequenceOptions: { + value = new ReverseSequenceOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_MatrixDiagOptions: { + value = new MatrixDiagOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_QuantizeOptions: { + value = new QuantizeOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_MatrixSetDiagOptions: { + value = new MatrixSetDiagOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_HardSwishOptions: { + value = new HardSwishOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_IfOptions: { + value = new IfOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_WhileOptions: { + value = new WhileOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_DepthToSpaceOptions: { + value = new DepthToSpaceOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_NonMaxSuppressionV4Options: { + value = new NonMaxSuppressionV4OptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_NonMaxSuppressionV5Options: { + value = new NonMaxSuppressionV5OptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ScatterNdOptions: { + value = new ScatterNdOptionsT(*reinterpret_cast(u.value)); + break; + } + default: + break; + } } -inline const flatbuffers::TypeTable *ModelTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_UINT, 0, -1 }, - { flatbuffers::ET_SEQUENCE, 1, 0 }, - { flatbuffers::ET_SEQUENCE, 1, 1 }, - { flatbuffers::ET_STRING, 0, -1 }, - { flatbuffers::ET_SEQUENCE, 1, 2 }, - { flatbuffers::ET_INT, 1, -1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - OperatorCodeTypeTable, - SubGraphTypeTable, - BufferTypeTable - }; - static const char * const names[] = { - "version", - "operator_codes", - "subgraphs", - "description", - "buffers", - "metadata_buffer" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 6, type_codes, type_refs, nullptr, names - }; - return &tt; +inline void BuiltinOptionsUnion::Reset() { + switch (type) { + case BuiltinOptions_Conv2DOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_DepthwiseConv2DOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_ConcatEmbeddingsOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_LSHProjectionOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_Pool2DOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_SVDFOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_RNNOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_FullyConnectedOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_SoftmaxOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_ConcatenationOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_AddOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_L2NormOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_LocalResponseNormalizationOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_LSTMOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_ResizeBilinearOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_CallOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_ReshapeOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_SkipGramOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_SpaceToDepthOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_EmbeddingLookupSparseOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_MulOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_PadOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_GatherOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_BatchToSpaceNDOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_SpaceToBatchNDOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_TransposeOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_ReducerOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_SubOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_DivOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_SqueezeOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_SequenceRNNOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_StridedSliceOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_ExpOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_TopKV2Options: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_SplitOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_LogSoftmaxOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_CastOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_DequantizeOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_MaximumMinimumOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_ArgMaxOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_LessOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_NegOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_PadV2Options: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_GreaterOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_GreaterEqualOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_LessEqualOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_SelectOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_SliceOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_TransposeConvOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_SparseToDenseOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_TileOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_ExpandDimsOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_EqualOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_NotEqualOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_ShapeOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_PowOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_ArgMinOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_FakeQuantOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_PackOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_LogicalOrOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_OneHotOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_LogicalAndOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_LogicalNotOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_UnpackOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_FloorDivOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_SquareOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_ZerosLikeOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_FillOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_BidirectionalSequenceLSTMOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_BidirectionalSequenceRNNOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_UnidirectionalSequenceLSTMOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_FloorModOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_RangeOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_ResizeNearestNeighborOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_LeakyReluOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_SquaredDifferenceOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_MirrorPadOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_AbsOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_SplitVOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_UniqueOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_ReverseV2Options: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_AddNOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_GatherNdOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_CosOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_WhereOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_RankOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_ReverseSequenceOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_MatrixDiagOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_QuantizeOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_MatrixSetDiagOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_HardSwishOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_IfOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_WhileOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_DepthToSpaceOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_NonMaxSuppressionV4Options: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_NonMaxSuppressionV5Options: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_ScatterNdOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + default: break; + } + value = nullptr; + type = BuiltinOptions_NONE; } inline const tflite::Model *GetModel(const void *buf) { @@ -13019,10 +14683,6 @@ inline const tflite::Model *GetSizePrefixedModel(const void *buf) { return flatbuffers::GetSizePrefixedRoot(buf); } -inline Model *GetMutableModel(void *buf) { - return flatbuffers::GetMutableRoot(buf); -} - inline const char *ModelIdentifier() { return "TFL3"; } diff --git a/tools/cpp/CMakeLists.txt b/tools/cpp/CMakeLists.txt index 20b5c6774..15e780eae 100644 --- a/tools/cpp/CMakeLists.txt +++ b/tools/cpp/CMakeLists.txt @@ -1,40 +1,34 @@ -# put output to build dir -SET( CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}) +add_executable(MNNV2Basic.out ${CMAKE_CURRENT_LIST_DIR}/MNNV2Basic.cpp ${CMAKE_CURRENT_LIST_DIR}/revertMNNModel.cpp) +target_link_libraries(MNNV2Basic.out ${MNN_DEPS}) -add_executable(MNNV2Basic.out MNNV2Basic.cpp revertMNNModel.cpp) -target_link_libraries(MNNV2Basic.out ${MNN_DEPEND}) +add_executable(mobilenetTest.out ${CMAKE_CURRENT_LIST_DIR}/mobilenetTest.cpp ) +target_link_libraries(mobilenetTest.out ${MNN_DEPS}) -include_directories(${MNN_SOURCE_DIR}/3rd_party/imageHelper/) -add_executable(mobilenetTest.out mobilenetTest.cpp ) -target_link_libraries(mobilenetTest.out ${MNN_DEPEND}) +add_executable(backendTest.out ${CMAKE_CURRENT_LIST_DIR}/backendTest.cpp) +target_link_libraries(backendTest.out ${MNN_DEPS}) -add_executable(backendTest.out backendTest.cpp) -target_link_libraries(backendTest.out ${MNN_DEPEND}) +add_executable(testModel.out ${CMAKE_CURRENT_LIST_DIR}/testModel.cpp) +target_link_libraries(testModel.out ${MNN_DEPS}) -add_executable(testModel.out testModel.cpp) -target_link_libraries(testModel.out ${MNN_DEPEND}) +add_executable(testModelWithDescrisbe.out ${CMAKE_CURRENT_LIST_DIR}/testModelWithDescrisbe.cpp ${CMAKE_CURRENT_LIST_DIR}/Config.cpp) +target_link_libraries(testModelWithDescrisbe.out ${MNN_DEPS}) -add_executable(testModelWithDescrisbe.out testModelWithDescrisbe.cpp Config.cpp) -target_link_libraries(testModelWithDescrisbe.out ${MNN_DEPEND}) +add_executable(getPerformance.out ${CMAKE_CURRENT_LIST_DIR}/getPerformance.cpp) +target_link_libraries(getPerformance.out ${MNN_DEPS}) -add_executable(getPerformance.out getPerformance.cpp) -target_link_libraries(getPerformance.out ${MNN_DEPEND}) +add_executable(checkInvalidValue.out ${CMAKE_CURRENT_LIST_DIR}/checkInvalidValue.cpp) +target_link_libraries(checkInvalidValue.out ${MNN_DEPS}) -add_executable(checkInvalidValue.out checkInvalidValue.cpp) -target_link_libraries(checkInvalidValue.out ${MNN_DEPEND}) - -# include converter's MNN_generated.h -include_directories(..) -add_executable(timeProfile.out timeProfile.cpp revertMNNModel.cpp Profiler.hpp Profiler.cpp) -target_link_libraries(timeProfile.out ${MNN_DEPEND}) +add_executable(timeProfile.out ${CMAKE_CURRENT_LIST_DIR}/timeProfile.cpp ${CMAKE_CURRENT_LIST_DIR}/revertMNNModel.cpp ${CMAKE_CURRENT_LIST_DIR}/Profiler.cpp) +target_link_libraries(timeProfile.out ${MNN_DEPS}) if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin") - add_executable(checkDir.out checkDir.cpp) - add_executable(checkFile.out checkFile.cpp) - add_executable(winogradExample.out winogradExample.cpp) - target_link_libraries(winogradExample.out ${MNN_DEPEND}) - add_executable(winogradGenerateGLSL.out winogradGenerateGLSL.cpp) - target_link_libraries(winogradGenerateGLSL.out ${MNN_DEPEND}) - add_executable(winogradGenerateCL.out winogradGenerateCL.cpp) - target_link_libraries(winogradGenerateCL.out ${MNN_DEPEND}) + add_executable(checkDir.out ${CMAKE_CURRENT_LIST_DIR}/checkDir.cpp) + add_executable(checkFile.out ${CMAKE_CURRENT_LIST_DIR}/checkFile.cpp) + add_executable(winogradExample.out ${CMAKE_CURRENT_LIST_DIR}/winogradExample.cpp) + target_link_libraries(winogradExample.out ${MNN_DEPS}) + add_executable(winogradGenerateGLSL.out ${CMAKE_CURRENT_LIST_DIR}/winogradGenerateGLSL.cpp) + target_link_libraries(winogradGenerateGLSL.out ${MNN_DEPS}) + add_executable(winogradGenerateCL.out ${CMAKE_CURRENT_LIST_DIR}/winogradGenerateCL.cpp) + target_link_libraries(winogradGenerateCL.out ${MNN_DEPS}) endif() diff --git a/tools/cpp/MNNV2Basic.cpp b/tools/cpp/MNNV2Basic.cpp index 1a84002bc..3a9ad682f 100644 --- a/tools/cpp/MNNV2Basic.cpp +++ b/tools/cpp/MNNV2Basic.cpp @@ -24,10 +24,10 @@ #else #include #endif -#include "AutoTime.hpp" -#include "Interpreter.hpp" -#include "MNNDefine.h" -#include "Tensor.hpp" +#include +#include +#include +#include //#define FEED_INPUT_NAME_VALUE @@ -309,7 +309,7 @@ static int test_main(int argc, const char* argv[]) { auto size = givenTensor.elementSize(); for (int i = 0; i < size; ++i) { input >> inputData[i]; - //inputData[i] = 1.0f; + // inputData[i] = 1.0f; } } inputTensor->copyFromHostTensor(&givenTensor); diff --git a/tools/cpp/Profiler.cpp b/tools/cpp/Profiler.cpp index 3ad079e48..9dbb3fcb6 100644 --- a/tools/cpp/Profiler.cpp +++ b/tools/cpp/Profiler.cpp @@ -17,7 +17,7 @@ #include #endif #include "Profiler.hpp" -#include "Macro.h" +#include "core/Macro.h" #define MFLOPS (1e6) diff --git a/tools/cpp/Profiler.hpp b/tools/cpp/Profiler.hpp index 43fdb70ea..272172538 100644 --- a/tools/cpp/Profiler.hpp +++ b/tools/cpp/Profiler.hpp @@ -14,8 +14,8 @@ #include #include #include -#include "Interpreter.hpp" -#include "Tensor.hpp" +#include +#include namespace MNN { diff --git a/tools/cpp/backendTest.cpp b/tools/cpp/backendTest.cpp index 8a98ecdfe..a06eca594 100644 --- a/tools/cpp/backendTest.cpp +++ b/tools/cpp/backendTest.cpp @@ -17,10 +17,10 @@ #include #include #include -#include "AutoTime.hpp" -#include "Interpreter.hpp" -#include "Tensor.hpp" -#include "TensorUtils.hpp" +#include +#include +#include +#include "core/TensorUtils.hpp" using namespace MNN; diff --git a/tools/cpp/checkDir.cpp b/tools/cpp/checkDir.cpp index 0abf927de..0305e7d13 100644 --- a/tools/cpp/checkDir.cpp +++ b/tools/cpp/checkDir.cpp @@ -7,14 +7,21 @@ // #include -#include #include -#include #include #include #include #include +#if defined(_MSC_VER) +#include +#undef min +#undef max +#else +#include +#include +#endif + using namespace std; #define NONE "\e[0m" @@ -41,18 +48,34 @@ int main(int argc, char* argv[]) { } printf("tolerance=%f\n", tolerance); + std::vector compareFiles; +#if defined(_MSC_VER) + WIN32_FIND_DATA ffd; + HANDLE hFind = INVALID_HANDLE_VALUE; + hFind = FindFirstFile(argv[1], &ffd); + if (INVALID_HANDLE_VALUE == hFind) { + printf("Error to open %s\n", argv[1]); + return 0; + } + do { + if(INVALID_FILE_ATTRIBUTES != GetFileAttributes(ffd.cFileName) && GetLastError() != ERROR_FILE_NOT_FOUND) { + compareFiles.push_back(ffd.cFileName); + } + } while (FindNextFile(hFind, &ffd) != 0); + FindClose(hFind); +#else // open dir DIR* root = opendir(argv[1]); if (NULL == root) { printf("Error to open %s\n", argv[1]); return 0; } - std::vector compareFiles; struct dirent* ent; while ((ent = readdir(root)) != NULL) { compareFiles.push_back(ent->d_name); } closedir(root); +#endif // compare files for (auto s : compareFiles) { diff --git a/tools/cpp/checkInvalidValue.cpp b/tools/cpp/checkInvalidValue.cpp index 6cc459955..ba5af5315 100644 --- a/tools/cpp/checkInvalidValue.cpp +++ b/tools/cpp/checkInvalidValue.cpp @@ -7,15 +7,22 @@ // #include -#include #include -#include #include #include #include #include #include +#if defined(_MSC_VER) +#include +#undef min +#undef max +#else +#include +#include +#endif + using namespace std; #define NONE "\e[0m" @@ -42,18 +49,34 @@ int main(int argc, char* argv[]) { } printf("limit=%d\n", limit); + std::vector compareFiles; +#if defined(_MSC_VER) + WIN32_FIND_DATA ffd; + HANDLE hFind = INVALID_HANDLE_VALUE; + hFind = FindFirstFile(argv[1], &ffd); + if (INVALID_HANDLE_VALUE == hFind) { + printf("Error to open %s\n", argv[1]); + return 0; + } + do { + if(INVALID_FILE_ATTRIBUTES != GetFileAttributes(ffd.cFileName) && GetLastError() != ERROR_FILE_NOT_FOUND) { + compareFiles.push_back(ffd.cFileName); + } + } while (FindNextFile(hFind, &ffd) != 0); + FindClose(hFind); +#else // open dir DIR* root = opendir(argv[1]); if (NULL == root) { printf("Error to open %s\n", argv[1]); return 0; } - std::vector compareFiles; struct dirent* ent; while ((ent = readdir(root)) != NULL) { compareFiles.push_back(ent->d_name); } closedir(root); +#endif auto limitValue = powf(10, limit); // compare files diff --git a/tools/cpp/getPerformance.cpp b/tools/cpp/getPerformance.cpp index 71486d0f9..d56c3c0bb 100644 --- a/tools/cpp/getPerformance.cpp +++ b/tools/cpp/getPerformance.cpp @@ -11,8 +11,8 @@ #include #include #include -#include "MNNDefine.h" -#include "Macro.h" +#include +#include "core/Macro.h" #ifdef MNN_USE_NEON #include #endif diff --git a/tools/cpp/mobilenetTest.cpp b/tools/cpp/mobilenetTest.cpp index f525277f2..8ddf7162d 100644 --- a/tools/cpp/mobilenetTest.cpp +++ b/tools/cpp/mobilenetTest.cpp @@ -7,8 +7,8 @@ // #include -#include "ImageProcess.hpp" -#include "Interpreter.hpp" +#include +#include #define MNN_OPEN_TIME_TRACE #include #include @@ -16,7 +16,7 @@ #include #include #include -#include "AutoTime.hpp" +#include #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" @@ -26,7 +26,7 @@ using namespace MNN; using namespace MNN::CV; int main(int argc, const char* argv[]) { - + if (argc < 3) { MNN_PRINT("Usage: ./mobilenetTest.out model.mnn input.jpg [word.txt]\n"); return 0; @@ -38,30 +38,30 @@ int main(int argc, const char* argv[]) { if (argc >= 4) { config.type = (MNNForwardType)::atoi(argv[4]); } - + Session* session = net->createSession(config); - + Tensor* inputTensor = net->getSessionInput(session, NULL); Tensor* outputTensor = net->getSessionOutput(session, NULL); - + Tensor inputTensorUser(inputTensor, Tensor::DimensionType::TENSORFLOW); Tensor outputTensorUser(outputTensor, outputTensor->getDimensionType()); - + //image preproccess { int netInputHeight = inputTensorUser.height(); int netInputWidth = inputTensorUser.width(); - + int imageChannel, imageWidth, imageHeight; unsigned char* inputImage = stbi_load(argv[2], &imageWidth, &imageHeight, &imageChannel, 4); - + Matrix trans; trans.setScale(1.0 / imageWidth, 1.0 / imageHeight); trans.postRotate(0, 0.5f, 0.5f); trans.postScale(netInputWidth, netInputHeight); trans.invert(&trans); - + ImageProcess::Config config; config.filterType = BILINEAR; float mean[3] = {103.94f, 116.78f, 123.68f}; @@ -70,14 +70,14 @@ int main(int argc, const char* argv[]) { ::memcpy(config.normal, normals, sizeof(normals)); config.sourceFormat = RGBA; config.destFormat = RGB; - + std::shared_ptr pretreat(ImageProcess::create(config)); pretreat->setMatrix(trans); pretreat->convert(inputImage, imageWidth, imageHeight, 0, &inputTensorUser); - + stbi_image_free(inputImage); } - + //run { AUTOTIME; @@ -85,11 +85,11 @@ int main(int argc, const char* argv[]) { net->runSession(session); outputTensor->copyToHostTensor(&outputTensorUser); } - - + + //get predict labels { - + std::vector words; if (argc >= 4) { std::ifstream inputOs(argv[3]); @@ -98,10 +98,10 @@ int main(int argc, const char* argv[]) { words.emplace_back(line); } } - + MNN_PRINT("output size:%d\n", outputTensorUser.elementSize()); auto type = outputTensorUser.getType(); - + auto size = outputTensorUser.elementSize(); std::vector> tempValues(size); if (type.code == halide_type_float) { @@ -119,7 +119,7 @@ int main(int argc, const char* argv[]) { // Find Max std::sort(tempValues.begin(), tempValues.end(), [](std::pair a, std::pair b) { return a.second > b.second; }); - + int length = size > 10 ? 10 : size; if (words.empty()) { for (int i = 0; i < length; ++i) { @@ -131,6 +131,6 @@ int main(int argc, const char* argv[]) { } } } - + return 0; } diff --git a/tools/cpp/revertMNNModel.cpp b/tools/cpp/revertMNNModel.cpp index 6fb1d23fa..96136d12b 100644 --- a/tools/cpp/revertMNNModel.cpp +++ b/tools/cpp/revertMNNModel.cpp @@ -12,7 +12,7 @@ #include #include -#include "MNNDefine.h" +#include #include "revertMNNModel.hpp" const float MIN_VALUE = -2.0; diff --git a/tools/cpp/testModel.cpp b/tools/cpp/testModel.cpp index e3ad65dbb..b1e603447 100644 --- a/tools/cpp/testModel.cpp +++ b/tools/cpp/testModel.cpp @@ -15,13 +15,13 @@ #include #include #include -#include "AutoTime.hpp" -#include "Backend.hpp" -#include "Interpreter.hpp" -#include "MNNDefine.h" -#include "Macro.h" -#include "Tensor.hpp" -#include "TensorUtils.hpp" +#include +#include "core/Backend.hpp" +#include +#include +#include "core/Macro.h" +#include +#include "core/TensorUtils.hpp" #define NONE "\e[0m" #define RED "\e[0;31m" @@ -70,6 +70,9 @@ int main(int argc, const char* argv[]) { // create session MNN::ScheduleConfig config; config.type = type; + MNN::BackendConfig backendConfig; + backendConfig.precision = MNN::BackendConfig::Precision_High; + config.backendConfig = &backendConfig; auto session = net->createSession(config); auto allInput = net->getSessionInputAll(session); diff --git a/tools/cpp/testModelWithDescrisbe.cpp b/tools/cpp/testModelWithDescrisbe.cpp index 80ba0e840..a62ac50f3 100644 --- a/tools/cpp/testModelWithDescrisbe.cpp +++ b/tools/cpp/testModelWithDescrisbe.cpp @@ -9,21 +9,20 @@ #define MNN_OPEN_TIME_TRACE #include -#include #include #include #include #include #include #include -#include "AutoTime.hpp" -#include "Backend.hpp" +#include +#include "core/Backend.hpp" #include "Config.hpp" -#include "Interpreter.hpp" -#include "MNNDefine.h" -#include "Macro.h" -#include "Tensor.hpp" -#include "TensorUtils.hpp" +#include +#include +#include "core/Macro.h" +#include +#include "core/TensorUtils.hpp" #define NONE "\e[0m" #define RED "\e[0;31m" diff --git a/tools/cpp/timeProfile.cpp b/tools/cpp/timeProfile.cpp index c4b3d1ce9..01f77d189 100644 --- a/tools/cpp/timeProfile.cpp +++ b/tools/cpp/timeProfile.cpp @@ -11,12 +11,12 @@ #include #include #include -#include "AutoTime.hpp" -#include "Interpreter.hpp" -#include "MNNDefine.h" -#include "Macro.h" +#include +#include +#include +#include "core/Macro.h" #include "Profiler.hpp" -#include "Tensor.hpp" +#include #include "revertMNNModel.hpp" #define MNN_PRINT_TIME_BY_NAME diff --git a/tools/cpp/winogradExample.cpp b/tools/cpp/winogradExample.cpp index 5638a3ab2..456717079 100644 --- a/tools/cpp/winogradExample.cpp +++ b/tools/cpp/winogradExample.cpp @@ -8,7 +8,7 @@ #include #include -#include "MNNDefine.h" +#include #include "math/Matrix.hpp" #include "math/WingoradGenerater.hpp" diff --git a/tools/cpp/winogradGenerateCL.cpp b/tools/cpp/winogradGenerateCL.cpp index 0716e91d3..7f29bef7c 100644 --- a/tools/cpp/winogradGenerateCL.cpp +++ b/tools/cpp/winogradGenerateCL.cpp @@ -10,7 +10,7 @@ #include #include #include -#include "MNNDefine.h" +#include #include "math/Matrix.hpp" #include "math/WingoradGenerater.hpp" diff --git a/tools/cpp/winogradGenerateGLSL.cpp b/tools/cpp/winogradGenerateGLSL.cpp index 88125b65f..43dab4492 100644 --- a/tools/cpp/winogradGenerateGLSL.cpp +++ b/tools/cpp/winogradGenerateGLSL.cpp @@ -9,7 +9,7 @@ #include #include #include -#include "MNNDefine.h" +#include #include "math/Matrix.hpp" #include "math/WingoradGenerater.hpp" diff --git a/tools/evaluation/CMakeLists.txt b/tools/evaluation/CMakeLists.txt index 1a14e4a9a..5d833274d 100644 --- a/tools/evaluation/CMakeLists.txt +++ b/tools/evaluation/CMakeLists.txt @@ -1,22 +1,4 @@ -cmake_minimum_required(VERSION 2.6) - -project(MNNEvaluation) -SET( CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}) -if(${CMAKE_SYSTEM_NAME} MATCHES "Android") - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -pie -fPIE -s") -endif() - -set(CMAKE_C_STANDARD 99) -set(CMAKE_CXX_STANDARD 11) - -set(PROJECT_VERSION "0.0.0.1") - -set(MNN_PATH ${MNN_SOURCE_DIR}) - -include_directories(${MNN_PATH}/3rd_party) -include_directories(${MNN_PATH}/3rd_party/imageHelper) -include_directories(${MNN_PATH}/source/core/) -include_directories(${MNN_PATH}/include) - -add_executable(classficationTopkEval.out classficationTopkEval.cpp) -target_link_libraries(classficationTopkEval.out ${MNN_DEPEND}) +IF(MNN_EVALUATION) + add_executable(classficationTopkEval.out ${CMAKE_CURRENT_LIST_DIR}/classficationTopkEval.cpp) + target_link_libraries(classficationTopkEval.out ${MNN_DEPS}) +ENDIF() diff --git a/tools/evaluation/classficationTopkEval.cpp b/tools/evaluation/classficationTopkEval.cpp index 97f90374c..4765dff1e 100644 --- a/tools/evaluation/classficationTopkEval.cpp +++ b/tools/evaluation/classficationTopkEval.cpp @@ -14,8 +14,8 @@ #include #include #include -#include "ImageProcess.hpp" -#include "Interpreter.hpp" +#include +#include #define STB_IMAGE_IMPLEMENTATION #include "rapidjson/document.h" #include "stb_image.h" diff --git a/tools/quantization/CMakeLists.txt b/tools/quantization/CMakeLists.txt index a33faf540..be9622df6 100644 --- a/tools/quantization/CMakeLists.txt +++ b/tools/quantization/CMakeLists.txt @@ -1,26 +1,6 @@ -cmake_minimum_required(VERSION 2.6) - -project(MNNQuan) -SET( CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}) -if(${CMAKE_SYSTEM_NAME} MATCHES "Android") - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -pie -fPIE -s") -endif() - -set(CMAKE_C_STANDARD 99) -set(CMAKE_CXX_STANDARD 11) - -set(PROJECT_VERSION "0.0.0.1") - -set(MNN_PATH ${MNN_SOURCE_DIR}) - -include_directories(${MNN_PATH}/3rd_party/imageHelper) -include_directories(${MNN_PATH}/3rd_party) -include_directories(${MNN_PATH}/tools) -include_directories(${MNN_PATH}/source/core/) -include_directories(${MNN_PATH}/include) -file(GLOB BASIC_INCLUDE ${MNN_PATH}/include/*) -file(GLOB SCHEMA ${MNN_PATH}/tools/converter/source/IR/*.h ${MNN_PATH}/tools/converter/source/IR/flatbuffers/*.h ) -file(GLOB QUANFILES TensorStatistic.hpp TensorStatistic.cpp quantizeWeight.hpp quantizeWeight.cpp calibration.hpp calibration.cpp Helper.hpp Helper.cpp) - -add_executable(quantized.out ${QUANFILES} quantized.cpp) -target_link_libraries(quantized.out ${MNN_DEPEND}) +IF(MNN_BUILD_QUANTOOLS) + file(GLOB QUANFILES ${CMAKE_CURRENT_LIST_DIR}/*.cpp) + add_executable(quantized.out ${QUANFILES}) + target_link_libraries(quantized.out ${MNN_DEPS}) + add_dependencies(quantized.out MNN_SCHEMA_GEN) +ENDIF() diff --git a/tools/quantization/Helper.hpp b/tools/quantization/Helper.hpp index ceea942f8..1fe956ca5 100644 --- a/tools/quantization/Helper.hpp +++ b/tools/quantization/Helper.hpp @@ -8,8 +8,8 @@ #include #include -#include "ImageProcess.hpp" -#include "Tensor.hpp" +#include +#include #include "MNN_generated.h" #include "logkit.h" diff --git a/tools/quantization/TensorStatistic.cpp b/tools/quantization/TensorStatistic.cpp index 5259925e3..a97de4aa5 100644 --- a/tools/quantization/TensorStatistic.cpp +++ b/tools/quantization/TensorStatistic.cpp @@ -10,7 +10,7 @@ #include #include #include -#include "MNNDefine.h" +#include #include "logkit.h" // Given distribution P and Q, KL-Divergence is diff --git a/tools/quantization/TensorStatistic.hpp b/tools/quantization/TensorStatistic.hpp index 0fcf53472..1578a07c8 100644 --- a/tools/quantization/TensorStatistic.hpp +++ b/tools/quantization/TensorStatistic.hpp @@ -8,7 +8,7 @@ #include #include -#include "Tensor.hpp" +#include #include enum GET_THRESHOLD_METHOD { diff --git a/tools/quantization/calibration.cpp b/tools/quantization/calibration.cpp index 4f5d4184e..fa3557478 100644 --- a/tools/quantization/calibration.cpp +++ b/tools/quantization/calibration.cpp @@ -11,15 +11,15 @@ #include #include #include -#include "ImageProcess.hpp" +#include #include "flatbuffers/util.h" #include "logkit.h" #include "quantizeWeight.hpp" #include "rapidjson/document.h" //#define MNN_OPEN_TIME_TRACE -#include "AutoTime.hpp" +#include #include "Helper.hpp" -#include "TensorUtils.hpp" +#include "core/TensorUtils.hpp" using namespace MNN::CV; diff --git a/tools/quantization/calibration.hpp b/tools/quantization/calibration.hpp index 7418ad307..c3a7ee025 100644 --- a/tools/quantization/calibration.hpp +++ b/tools/quantization/calibration.hpp @@ -11,8 +11,8 @@ #include -#include "ImageProcess.hpp" -#include "Interpreter.hpp" +#include +#include #include "TensorStatistic.hpp" #include "MNN_generated.h" diff --git a/tools/quantization/quantizeWeight.cpp b/tools/quantization/quantizeWeight.cpp index 3325e8627..9ee811569 100644 --- a/tools/quantization/quantizeWeight.cpp +++ b/tools/quantization/quantizeWeight.cpp @@ -11,7 +11,7 @@ #include #include #include "logkit.h" -#include "MNNDefine.h" +#include void InitAlpha(const float* weight, const int weightNum, const int kernelNum, float* alpha, const int quantizeBits) { const int kernelDim = weightNum / kernelNum; diff --git a/tools/train/CMakeLists.txt b/tools/train/CMakeLists.txt index 1c9c8f8b3..acd13e709 100644 --- a/tools/train/CMakeLists.txt +++ b/tools/train/CMakeLists.txt @@ -1,41 +1,45 @@ -cmake_minimum_required(VERSION 2.6) - -project(MNNTrain) -SET( CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/../../) -if(${CMAKE_SYSTEM_NAME} MATCHES "Android") - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -pie -fPIE -s") -endif() - -set(CMAKE_C_STANDARD 99) -set(CMAKE_CXX_STANDARD 11) - -set(PROJECT_VERSION "0.0.0.1") - -set(MNN_PATH ../../) - -include_directories(../../3rd_party) -include_directories(${MNN_PATH}/tools) -include_directories(${MNN_PATH}/source/core/) -include_directories(include) -include_directories(${MNN_PATH}/express/include) -include_directories(source/transform) -include_directories(source/train) -file(GLOB BASIC_INCLUDE ${MNN_PATH}/include/*) -file(GLOB TRANSFORMER ./source/transform/*) -file(GLOB OPERATOR ./source/operator/*) - -include_directories(source/operator) -add_library(transformer SHARED ${TRANSFORMER} ${BASIC_INCLUDE}) -target_link_libraries(transformer MNN MNN_Express) - -add_executable(transformer.out ./source/exec/transformer.cpp) -target_link_libraries(transformer.out transformer) - -add_executable(train.out ./source/exec/train.cpp ${SCHEMA} ${BASIC_INCLUDE}) -target_link_libraries(train.out ${MNN_DEPEND}) - -add_executable(rawDataTransform.out ./source/exec/rawDataTransform.cpp ${SCHEMA} ${BASIC_INCLUDE}) +include_directories(${CMAKE_CURRENT_LIST_DIR}/source/grad) +include_directories(${CMAKE_CURRENT_LIST_DIR}/source/optimizer) +include_directories(${CMAKE_CURRENT_LIST_DIR}/source/parameters) +include_directories(${CMAKE_CURRENT_LIST_DIR}/source/module) +include_directories(${CMAKE_CURRENT_LIST_DIR}/source/transformer) +include_directories(${CMAKE_CURRENT_LIST_DIR}/source/data) +file(GLOB GRAD ${CMAKE_CURRENT_LIST_DIR}/source/grad/*) +file(GLOB TRANSFORMER ${CMAKE_CURRENT_LIST_DIR}/source/transformer/*) +file(GLOB MODULES ${CMAKE_CURRENT_LIST_DIR}/source/module/*) +file(GLOB PARAMETER ${CMAKE_CURRENT_LIST_DIR}/source/parameters/*) +file(GLOB OPTIMIZER ${CMAKE_CURRENT_LIST_DIR}/source/optimizer/*) +file(GLOB DATALOADER ${CMAKE_CURRENT_LIST_DIR}/source/data/*) + +add_library(MNNTrain SHARED ${GRAD} ${BASIC_INCLUDE} ${PARAMETER} ${OPTIMIZER} ${MODULES} ${DATALOADER} ${TRANSFORMER}) +target_link_libraries(MNNTrain ${MNN_DEPS}) +IF(CMAKE_BUILD_TYPE MATCHES Release) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3") +ENDIF() + +add_executable(transformer.out ${CMAKE_CURRENT_LIST_DIR}/source/exec/transformerExecution.cpp) +target_link_libraries(transformer.out MNNTrain) + +add_executable(train.out ${CMAKE_CURRENT_LIST_DIR}/source/exec/train.cpp ${SCHEMA} ${BASIC_INCLUDE}) +target_link_libraries(train.out MNN) + + +add_executable(rawDataTransform.out ${CMAKE_CURRENT_LIST_DIR}/source/exec/rawDataTransform.cpp ${SCHEMA} ${BASIC_INCLUDE}) include_directories(../../3rd_party/imageHelper/) -add_executable(dataTransformer.out ./source/exec/dataTransformer.cpp ${SCHEMA} ${BASIC_INCLUDE}) +add_executable(dataTransformer.out ${CMAKE_CURRENT_LIST_DIR}/source/exec/dataTransformer.cpp ${SCHEMA} ${BASIC_INCLUDE}) target_link_libraries(dataTransformer.out MNN) + +option(MNN_USE_OPENCV "Use opencv" OFF) + +file(GLOB DEMOSOURCE ${CMAKE_CURRENT_LIST_DIR}/source/demo/*) +add_executable(runTrainDemo.out ${DEMOSOURCE} ${BASIC_INCLUDE}) +target_link_libraries(runTrainDemo.out MNNTrain) +if (MNN_USE_OPENCV) + set(CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS_ORIGIN}) + set(CMAKE_C_FLAGS ${CMAKE_C_FLAGS_ORIGIN}) + find_package(OpenCV REQUIRED) + include_directories(${OpenCV_INCLUDE_DIRS}) + add_definitions(-D MNN_USE_OPENCV) + target_link_libraries(runTrainDemo.out ${OpenCV_LIBS}) +endif() diff --git a/tools/train/source/data/BlockingQueue.hpp b/tools/train/source/data/BlockingQueue.hpp new file mode 100644 index 000000000..232394a22 --- /dev/null +++ b/tools/train/source/data/BlockingQueue.hpp @@ -0,0 +1,76 @@ +// +// BlockingQueue.hpp +// MNN +// +// Created by MNN on 2019/11/19. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef BlockingQueue_hpp +#define BlockingQueue_hpp +#include +#include +#include +#include + +namespace MNN { +namespace Train { + +template +class BlockingQueue { +public: + BlockingQueue() = default; + BlockingQueue(size_t maxSize) : mMaxSize(maxSize) { + } + + bool isFull() { + return mQueue.size() == mMaxSize; + } + + bool isEmpty() { + return mQueue.empty(); + } + + void push(T value) { + { + std::unique_lock lock(mMutex); + mCondVar.wait(lock, [&] { return !isFull(); }); + MNN_ASSERT(!isFull()); + mQueue.push(std::move(value)); + lock.unlock(); + } + mCondVar.notify_one(); + } + + T pop() { + std::unique_lock lock(mMutex); + mCondVar.wait(lock, [&] { return !isEmpty(); }); + MNN_ASSERT(!isEmpty()); + T value = mQueue.front(); + mQueue.pop(); + mCondVar.notify_one(); + lock.unlock(); + + return std::move(value); + } + + size_t clear() { + std::lock_guard lock(mMutex); + const auto size = mQueue.size(); + while (!isEmpty()) { + mQueue.pop(); + } + return size; + } + +private: + size_t mMaxSize; + std::queue mQueue; + std::mutex mMutex; + std::condition_variable_any mCondVar; +}; + +} // namespace Train +} // namespace MNN + +#endif // BlockingQueue_hpp diff --git a/tools/train/source/data/DataLoader.cpp b/tools/train/source/data/DataLoader.cpp new file mode 100644 index 000000000..132d710a6 --- /dev/null +++ b/tools/train/source/data/DataLoader.cpp @@ -0,0 +1,129 @@ +// +// DataLoader.cpp +// MNN +// +// Created by MNN on 2019/11/15. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "DataLoader.hpp" + +namespace MNN { +namespace Train { + +std::vector DataLoader::next() { + if (mConfig->numWorkers == 0) { + auto batchIndices = mSampler->next(mConfig->batchSize); + MNN_ASSERT(batchIndices.size() != 0); // the sampler is exhausted, should reset the data loader + if (mConfig->dropLast && batchIndices.size() < mConfig->batchSize) { + MNN_ASSERT(false); // the sampler is exhausted + } + auto batch = mDataset->getBatch(batchIndices); + return batch; + } else { + auto batch = mDataQueue->pop(); + prefetch(1); + return batch; + } +} + +void DataLoader::prefetch(size_t nJobs) { + MNN_ASSERT(mJobs != nullptr); + for (int i = 0; i < nJobs; i++) { + auto batchIndices = mSampler->next(mConfig->batchSize); + Job j; + j.job = batchIndices; + if (batchIndices.size() != 0) { + if (mConfig->dropLast && batchIndices.size() < mConfig->batchSize) { + // drop the job + } else { + mJobs->push(std::move(j)); // the job may be empty when sampler is exhausted + } + } + } +} + +void DataLoader::workerThread() { + while (true) { + auto currentJob = mJobs->pop(); + if (currentJob.quit) { + break; + } + // make sure there are no empty jobs, so that there are no empty batch + MNN_ASSERT(currentJob.job.size() != 0); + auto batch = mDataset->getBatch(currentJob.job); + mDataQueue->push(std::move(batch)); + } +} + +void DataLoader::join() { + for (int i = 0; i < mConfig->numWorkers; i++) { + Job j; + j.quit = true; + mJobs->push(std::move(j)); + } + for (auto& worker : mWorkers) { + worker.join(); + } +} + +void DataLoader::reset() { + clean(); + + if (mConfig->numWorkers > 0) { + prefetch(mConfig->numJobs); + for (int i = 0; i < mConfig->numWorkers; i++) { + mWorkers.emplace_back([&] { workerThread(); }); + } + } +} + +void DataLoader::clean() { + if (mJobs != nullptr) { + join(); + mWorkers.clear(); + mJobs->clear(); + mDataQueue->clear(); + } + // should reset sampler before prefetch + mSampler->reset(mSampler->size()); +} + +std::shared_ptr DataLoader::makeDataLoader(std::shared_ptr dataset, + std::vector> transforms, + const int batchSize, const bool shuffle, const int numWorkers) { + if (transforms.size() > 0) { + std::shared_ptr transDataset = nullptr; + bool flag = true; + for (int i = 0; i < transforms.size(); i++) { + if (transforms[i] != nullptr) { + if (flag) { + transDataset = std::make_shared(dataset, transforms[i]); + flag = false; + } else { + transDataset = std::make_shared(transDataset, transforms[i]); + } + } + } + + if (transDataset != nullptr) { + auto sampler = std::make_shared(transDataset->size(), shuffle); + auto config = std::make_shared(batchSize, numWorkers); + + return std::make_shared(transDataset, sampler, config); + } else { + auto sampler = std::make_shared(dataset->size(), shuffle); + auto config = std::make_shared(batchSize, numWorkers); + + return std::make_shared(dataset, sampler, config); + } + } else { + auto sampler = std::make_shared(dataset->size(), shuffle); + auto config = std::make_shared(batchSize, numWorkers); + + return std::make_shared(dataset, sampler, config); + } +} + +} // namespace Train +} // namespace MNN diff --git a/tools/train/source/data/DataLoader.hpp b/tools/train/source/data/DataLoader.hpp new file mode 100644 index 000000000..4da67f06d --- /dev/null +++ b/tools/train/source/data/DataLoader.hpp @@ -0,0 +1,83 @@ +// +// DataLoader.hpp +// MNN +// +// Created by MNN on 2019/11/15. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef DataLoader_hpp +#define DataLoader_hpp + +#include +#include +#include +#include "BlockingQueue.hpp" +#include "DataLoaderConfig.hpp" +#include "Dataset.hpp" +#include "Example.hpp" +#include "RandomSampler.hpp" +#include "Sampler.hpp" +#include "Transform.hpp" +#include "TransformDataset.hpp" +#include "StackTransform.hpp" +#include "LambdaTransform.hpp" + +namespace MNN { +namespace Train { + +class MNN_PUBLIC DataLoader { +public: + DataLoader(std::shared_ptr dataset, std::shared_ptr sampler, + std::shared_ptr config) { + mDataset = dataset; + mSampler = sampler; + mConfig = config; + if (mConfig->numJobs > 0) { + mJobs = std::make_shared>(mConfig->numJobs); + mDataQueue = std::make_shared>>(mConfig->numJobs); + prefetch(mConfig->numJobs); + for (int i = 0; i < mConfig->numWorkers; i++) { + mWorkers.emplace_back([&] { workerThread(); }); + } + } + } + + virtual ~DataLoader() { + join(); + }; + + void prefetch(size_t nJobs); + + void workerThread(); + + void join(); + + std::vector next(); + + void reset(); + + void clean(); + + static std::shared_ptr makeDataLoader(std::shared_ptr dataset, + std::vector> transforms, + const int batchSize, const bool shuffle = true, + const int numWorkers = 0); + +private: + struct Job { + std::vector job; + bool quit = false; + }; + std::shared_ptr mDataset; + std::shared_ptr mSampler; + std::shared_ptr mConfig; + std::shared_ptr> mJobs; + std::shared_ptr>> mDataQueue; + std::vector mWorkers; +}; + +} // namespace Train +} // namespace MNN + +#endif // DataLoader_hpp \ No newline at end of file diff --git a/tools/train/source/data/DataLoaderConfig.hpp b/tools/train/source/data/DataLoaderConfig.hpp new file mode 100644 index 000000000..4c4080198 --- /dev/null +++ b/tools/train/source/data/DataLoaderConfig.hpp @@ -0,0 +1,30 @@ +// +// DataLoaderConfig.hpp +// MNN +// +// Created by MNN on 2019/11/15. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef DataLoaderConfig_hpp +#define DataLoaderConfig_hpp +#include +namespace MNN { +namespace Train { + +class MNN_PUBLIC DataLoaderConfig { +public: + DataLoaderConfig() = default; + DataLoaderConfig(size_t batchSize, size_t nWorkers = 0) : batchSize(batchSize), numWorkers(nWorkers) { + } + + size_t batchSize = 1; + size_t numWorkers = 0; + size_t numJobs = numWorkers * 2; + bool dropLast = true; +}; + +} // namespace Train +} // namespace MNN + +#endif // DataLoaderConfig diff --git a/tools/train/source/data/Dataset.hpp b/tools/train/source/data/Dataset.hpp new file mode 100644 index 000000000..d1635c11f --- /dev/null +++ b/tools/train/source/data/Dataset.hpp @@ -0,0 +1,49 @@ +// +// Dataset.hpp +// MNN +// +// Created by MNN on 2019/11/14. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef Dataset_hpp +#define Dataset_hpp + +#include +#include +#include "Example.hpp" + +namespace MNN { +namespace Train { + +class MNN_PUBLIC BatchDataset { +public: + virtual ~BatchDataset() = default; + + // get batch using given indices + virtual std::vector getBatch(std::vector indices) = 0; + + // size of the dataset + virtual size_t size() = 0; +}; + +class MNN_PUBLIC Dataset : public BatchDataset { +public: + // return a specific example with given index + virtual Example get(size_t index) = 0; + + std::vector getBatch(std::vector indices) { + std::vector batch; + batch.reserve(indices.size()); + for (const auto i : indices) { + batch.emplace_back(get(i)); + } + MNN_ASSERT(batch.size() != 0); + return batch; + } +}; + +} // namespace Train +} // namespace MNN + +#endif /* Dataset_hpp */ diff --git a/tools/train/source/data/Example.hpp b/tools/train/source/data/Example.hpp new file mode 100644 index 000000000..44303a3ce --- /dev/null +++ b/tools/train/source/data/Example.hpp @@ -0,0 +1,40 @@ +// +// Example.hpp +// MNN +// +// Created by MNN on 2019/11/14. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef Example_hpp +#define Example_hpp + +#include +#include +#include + +using namespace MNN::Express; + +namespace MNN { +namespace Train { + +class MNN_PUBLIC Example { +public: + std::vector data, target; + + Example() = default; + Example(std::vector data, std::vector label) : data(std::move(data)), target(std::move(label)) { + } +}; + +// class MNN_PUBLIC TensorExample { +// public: +// VARP data; +// +// TensorExample(VARP data) : data(std::move(data)) {} +// }; + +} // namespace Train +} // namespace MNN + +#endif /* Example_hpp */ diff --git a/tools/train/source/data/LambdaTransform.hpp b/tools/train/source/data/LambdaTransform.hpp new file mode 100644 index 000000000..366f31819 --- /dev/null +++ b/tools/train/source/data/LambdaTransform.hpp @@ -0,0 +1,50 @@ +// +// LambdaTransform.hpp +// MNN +// +// Created by MNN on 2019/11/14. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef LambdaTransform_hpp +#define LambdaTransform_hpp + +#include +#include "Example.hpp" +#include "Transform.hpp" + +namespace MNN { +namespace Train { + +class MNN_PUBLIC BatchLambdaTransform : public BatchTransform { +public: + explicit BatchLambdaTransform(std::function(std::vector)> f) { + func_ = f; + } + + std::vector transformBatch(std::vector batch) override { + return func_(std::move(batch)); + } + +private: + std::function(std::vector)> func_; +}; + +class MNN_PUBLIC LambdaTransform : public Transform { +public: + explicit LambdaTransform(std::function f) { + mFunc = f; + } + + Example transformExample(Example example) override { + return mFunc(std::move(example)); + } + +private: + std::function mFunc; +}; + +} // namespace Train +} // namespace MNN + +#endif // LambdaTransform_hpp \ No newline at end of file diff --git a/tools/train/source/data/MnistDataset.cpp b/tools/train/source/data/MnistDataset.cpp new file mode 100644 index 000000000..9f7e1ef0a --- /dev/null +++ b/tools/train/source/data/MnistDataset.cpp @@ -0,0 +1,141 @@ +// +// MnistDataset.cpp +// MNN +// +// Created by MNN on 2019/11/15. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "MnistDataset.hpp" +#include +#include +#include + +// referenced from pytorch C++ frontend mnist.cpp +// https://github.com/pytorch/pytorch/blob/master/torch/csrc/api/src/data/datasets/mnist.cpp +const int32_t kTrainSize = 60000; +const int32_t kTestSize = 10000; +const int32_t kImageMagicNumber = 2051; +const int32_t kTargetMagicNumber = 2049; +const int32_t kImageRows = 28; +const int32_t kImageColumns = 28; +const char* kTrainImagesFilename = "train-images-idx3-ubyte"; +const char* kTrainTargetsFilename = "train-labels-idx1-ubyte"; +const char* kTestImagesFilename = "t10k-images-idx3-ubyte"; +const char* kTestTargetsFilename = "t10k-labels-idx1-ubyte"; + +bool check_is_little_endian() { + const uint32_t word = 1; + return reinterpret_cast(&word)[0] == 1; +} + +constexpr uint32_t flip_endianness(uint32_t value) { + return ((value & 0xffu) << 24u) | ((value & 0xff00u) << 8u) | ((value & 0xff0000u) >> 8u) | + ((value & 0xff000000u) >> 24u); +} + +uint32_t read_int32(std::ifstream& stream) { + static const bool is_little_endian = check_is_little_endian(); + uint32_t value; + stream.read(reinterpret_cast(&value), sizeof value); + return is_little_endian ? flip_endianness(value) : value; +} + +uint32_t expect_int32(std::ifstream& stream, uint32_t expected) { + const auto value = read_int32(stream); + // clang-format off + MNN_ASSERT(value == expected); + // clang-format on + return value; +} + +std::string join_paths(std::string head, const std::string& tail) { + if (head.back() != '/') { + head.push_back('/'); + } + head += tail; + return head; +} + +VARP read_images(const std::string& root, bool train) { + const auto path = join_paths(root, train ? kTrainImagesFilename : kTestImagesFilename); + std::ifstream images(path, std::ios::binary); + if (!images.is_open()) { + MNN_PRINT("Error opening images file at %s", path.c_str()); + MNN_ASSERT(false); + } + + const auto count = train ? kTrainSize : kTestSize; + + // From http://yann.lecun.com/exdb/mnist/ + expect_int32(images, kImageMagicNumber); + expect_int32(images, count); + expect_int32(images, kImageRows); + expect_int32(images, kImageColumns); + + std::vector dims = {count, 1, kImageRows, kImageColumns}; + int length = 1; + for (int i = 0; i < dims.size(); ++i) { + length *= dims[i]; + } + auto data = _Input(dims, NCHW, halide_type_of()); + images.read(reinterpret_cast(data->writeMap()), length); + return data; +} + +VARP read_targets(const std::string& root, bool train) { + const auto path = join_paths(root, train ? kTrainTargetsFilename : kTestTargetsFilename); + std::ifstream targets(path, std::ios::binary); + if (!targets.is_open()) { + MNN_PRINT("Error opening images file at %s", path.c_str()); + MNN_ASSERT(false); + } + + const auto count = train ? kTrainSize : kTestSize; + + expect_int32(targets, kTargetMagicNumber); + expect_int32(targets, count); + + std::vector dims = {count}; + int length = 1; + for (int i = 0; i < dims.size(); ++i) { + length *= dims[i]; + } + auto labels = _Input(dims, NCHW, halide_type_of()); + targets.read(reinterpret_cast(labels->writeMap()), length); + + return labels; +} + +MnistDataset::MnistDataset(const std::string root, Mode mode) + : mImages(read_images(root, mode == Mode::TRAIN)), mLabels(read_targets(root, mode == Mode::TRAIN)) { + mImagePtr = mImages->readMap(); + mLabelsPtr = mLabels->readMap(); +} + +Example MnistDataset::get(size_t index) { + auto data = _Input({1, kImageRows, kImageColumns}, NCHW, halide_type_of()); + auto label = _Input({}, NCHW, halide_type_of()); + + auto dataPtr = mImagePtr + index * kImageRows * kImageColumns; + ::memcpy(data->writeMap(), dataPtr, kImageRows * kImageColumns); + + auto labelPtr = mLabelsPtr + index; + ::memcpy(label->writeMap(), labelPtr, 1); + + auto returnIndex = _Const(index); + // return the index for test + return {{data, returnIndex}, {label}}; +} + +size_t MnistDataset::size() { + return mImages->getInfo()->dim[0]; +} + +const VARP MnistDataset::images() { + return mImages; +} + +const VARP MnistDataset::labels() { + return mLabels; +} diff --git a/tools/train/source/data/MnistDataset.hpp b/tools/train/source/data/MnistDataset.hpp new file mode 100644 index 000000000..ba7e552f2 --- /dev/null +++ b/tools/train/source/data/MnistDataset.hpp @@ -0,0 +1,39 @@ +// +// MnistDataset.hpp +// MNN +// +// Created by MNN on 2019/11/15. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef MnistDataset_hpp +#define MnistDataset_hpp + +#include +#include "Dataset.hpp" +#include "Example.hpp" + +using namespace MNN; +using namespace MNN::Train; + +class MNN_PUBLIC MnistDataset : public Dataset { +public: + enum Mode { TRAIN, TEST }; + + explicit MnistDataset(const std::string path, Mode mode = Mode::TRAIN); + + Example get(size_t index) override; + + size_t size() override; + + const VARP images(); + + const VARP labels(); + +private: + VARP mImages, mLabels; + const uint8_t* mImagePtr = nullptr; + const uint8_t* mLabelsPtr = nullptr; +}; + +#endif // MnistDataset_hpp diff --git a/tools/train/source/data/RandomSampler.cpp b/tools/train/source/data/RandomSampler.cpp new file mode 100644 index 000000000..fe8ac24ba --- /dev/null +++ b/tools/train/source/data/RandomSampler.cpp @@ -0,0 +1,74 @@ +// +// RandomSampler.cpp +// MNN +// +// Created by MNN on 2019/11/14. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "RandomSampler.hpp" +#include +#include +#include +#include "Distributions.hpp" +#include "RandomGenerator.hpp" + +namespace MNN { +namespace Train { + +RandomSampler::RandomSampler(size_t size, bool shuffle) { + mIndices.reserve(size); + for (int i = 0; i < size; i++) { + mIndices.emplace_back(i); + } + + mShuffle = shuffle; + if (mShuffle) { + std::shuffle(mIndices.begin(), mIndices.end(), RandomGenerator::generator()); + } +} + +void RandomSampler::reset(size_t size) { + mIndices.clear(); + mIndices.reserve(size); + for (int i = 0; i < size; i++) { + mIndices.emplace_back(i); + } + + if (mShuffle) { + std::shuffle(mIndices.begin(), mIndices.end(), RandomGenerator::generator()); + } + + mIndex = 0; +} + +size_t RandomSampler::size() { + return mIndices.size(); +} + +const std::vector RandomSampler::indices() { + return mIndices; +} + +size_t RandomSampler::index() { + return mIndex; +} + +std::vector RandomSampler::next(size_t batchSize) { + MNN_ASSERT(mIndex <= mIndices.size()); + + auto remainIndices = mIndices.size() - mIndex; + if (remainIndices == 0) { + return {}; + } + + std::vector batchIndex(std::min(batchSize, remainIndices)); + std::copy(mIndices.begin() + mIndex, mIndices.begin() + mIndex + batchIndex.size(), batchIndex.begin()); + + mIndex += batchIndex.size(); + + return batchIndex; +} + +} // namespace Train +} // namespace MNN \ No newline at end of file diff --git a/tools/train/source/data/RandomSampler.hpp b/tools/train/source/data/RandomSampler.hpp new file mode 100644 index 000000000..75d550888 --- /dev/null +++ b/tools/train/source/data/RandomSampler.hpp @@ -0,0 +1,41 @@ +// +// RandomSampler.hpp +// MNN +// +// Created by MNN on 2019/11/14. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef RandomSampler_hpp +#define RandomSampler_hpp + +#include +#include "Sampler.hpp" + +namespace MNN { +namespace Train { + +class MNN_PUBLIC RandomSampler : public Sampler { +public: + explicit RandomSampler(size_t size, bool shuffle = true); + + void reset(size_t size) override; + + size_t size() override; + + const std::vector indices(); + + size_t index(); + + std::vector next(size_t batchSize) override; + +private: + std::vector mIndices; + size_t mIndex = 0; + bool mShuffle; +}; + +} // namespace Train +} // namespace MNN + +#endif // RandomSampler diff --git a/tools/train/source/data/Sampler.hpp b/tools/train/source/data/Sampler.hpp new file mode 100644 index 000000000..52875d597 --- /dev/null +++ b/tools/train/source/data/Sampler.hpp @@ -0,0 +1,32 @@ +// +// Sampler.hpp +// MNN +// +// Created by MNN on 2019/11/14. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef Sampler_hpp +#define Sampler_hpp + +#include +#include + +namespace MNN { +namespace Train { + +class MNN_PUBLIC Sampler { +public: + virtual ~Sampler() = default; + + virtual void reset(size_t size) = 0; + + virtual size_t size() = 0; + + virtual std::vector next(size_t batchSize) = 0; +}; + +} // namespace Train +} // namespace MNN + +#endif // Sampler diff --git a/tools/train/source/data/StackTransform.hpp b/tools/train/source/data/StackTransform.hpp new file mode 100644 index 000000000..71605959a --- /dev/null +++ b/tools/train/source/data/StackTransform.hpp @@ -0,0 +1,49 @@ +// +// StackTransform.hpp +// MNN +// +// Created by MNN on 2019/11/20. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef StackTransform_hpp +#define StackTransform_hpp + +#include +#include "Transform.hpp" + +namespace MNN { +namespace Train { + +class MNN_PUBLIC StackTransform : public BatchTransform { + std::vector transformBatch(std::vector batch) override { + std::vector> batchData(batch[0].data.size()); + std::vector> batchTarget(batch[0].target.size()); + for (int i = 0; i < batch.size(); i++) { + for (int j = 0; j < batchData.size(); j++) { + batchData[j].emplace_back(batch[i].data[j]); + } + } + + for (int i = 0; i < batch.size(); i++) { + for (int j = 0; j < batchTarget.size(); j++) { + batchTarget[j].emplace_back(batch[i].target[j]); + } + } + + Example example; + for (int i = 0; i < batchData.size(); i++) { + example.data.emplace_back(_Stack(batchData[i], 0)); + } + for (int i = 0; i < batchTarget.size(); i++) { + example.target.emplace_back(_Stack(batchTarget[i], 0)); + } + + return {example}; + } +}; + +} // namespace Train +} // namespace MNN + +#endif // StackTransform_hpp diff --git a/tools/train/source/data/Transform.hpp b/tools/train/source/data/Transform.hpp new file mode 100644 index 000000000..ad8a3626e --- /dev/null +++ b/tools/train/source/data/Transform.hpp @@ -0,0 +1,42 @@ +// +// Transform.hpp +// MNN +// +// Created by MNN on 2019/11/14. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef Transform_hpp +#define Transform_hpp + +#include +#include "Example.hpp" + +namespace MNN { +namespace Train { + +class MNN_PUBLIC BatchTransform { +public: + virtual ~BatchTransform() = default; + + virtual std::vector transformBatch(std::vector batch) = 0; +}; + +class MNN_PUBLIC Transform : public BatchTransform { +public: + virtual Example transformExample(Example example) = 0; + + std::vector transformBatch(std::vector batch) { + std::vector outputBatch; + outputBatch.reserve(batch.size()); + for (auto& example : batch) { + outputBatch.emplace_back(transformExample(std::move(example))); + } + return outputBatch; + } +}; + +} // namespace Train +} // namespace MNN + +#endif // Transform_hpp diff --git a/tools/train/source/data/TransformDataset.hpp b/tools/train/source/data/TransformDataset.hpp new file mode 100644 index 000000000..b1d7ca154 --- /dev/null +++ b/tools/train/source/data/TransformDataset.hpp @@ -0,0 +1,75 @@ +// +// TransformDataset.hpp +// MNN +// +// Created by MNN on 2019/11/15. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef TransformDataset_hpp +#define TransformDataset_hpp + +#include +#include "Dataset.hpp" +#include "Example.hpp" +#include "Transform.hpp" + +namespace MNN { +namespace Train { + +class MNN_PUBLIC BatchTransformDataset : public BatchDataset { +public: + BatchTransformDataset(std::shared_ptr dataset, std::shared_ptr transform) { + MNN_ASSERT(dataset != nullptr); + mDataset = dataset; + mTransform = transform; + } + + std::vector getBatch(std::vector indices) override { + auto batch = mDataset->getBatch(indices); + if (mTransform != nullptr) { + batch = mTransform->transformBatch(std::move(batch)); + } + + return batch; + } + + size_t size() override { + return mDataset->size(); + } + +private: + std::shared_ptr mDataset; + std::shared_ptr mTransform; +}; + +class MNN_PUBLIC TransformDataset : public Dataset { +public: + TransformDataset(std::shared_ptr dataset, std::shared_ptr transform) { + MNN_ASSERT(dataset != nullptr); + mDataset = dataset; + mTransform = transform; + } + + Example get(size_t index) override { + auto example = mDataset->get(index); + if (mTransform != nullptr) { + example = mTransform->transformExample(std::move(example)); + } + + return example; + } + + size_t size() override { + return mDataset->size(); + } + +private: + std::shared_ptr mDataset; + std::shared_ptr mTransform; +}; + +} // namespace Train +} // namespace MNN + +#endif diff --git a/tools/train/source/demo/DemoUnit.cpp b/tools/train/source/demo/DemoUnit.cpp new file mode 100644 index 000000000..003e5649b --- /dev/null +++ b/tools/train/source/demo/DemoUnit.cpp @@ -0,0 +1,39 @@ +// +// DemoUnit.cpp +// MNN +// +// Created by MNN on 2019/11/27. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "DemoUnit.hpp" +#include + +DemoUnitSet* DemoUnitSet::gInstance = NULL; + +DemoUnitSet* DemoUnitSet::get() { + if (gInstance == NULL) + gInstance = new DemoUnitSet; + return gInstance; +} + +DemoUnitSet::~DemoUnitSet() { + for (auto iter : mUnit) { + delete iter.second; + } +} + +void DemoUnitSet::add(DemoUnit* test, const char* name) { + test->name = name; + mUnit.insert(std::make_pair(name, test)); +} + +DemoUnit* DemoUnitSet::search(const char* key) { + std::string prefix = key; + std::vector wrongs; + auto iter = mUnit.find(prefix); + if (iter == mUnit.end()) { + return nullptr; + } + return iter->second; +} diff --git a/tools/train/source/demo/DemoUnit.hpp b/tools/train/source/demo/DemoUnit.hpp new file mode 100644 index 000000000..59f231e24 --- /dev/null +++ b/tools/train/source/demo/DemoUnit.hpp @@ -0,0 +1,106 @@ +// +// DemoUnit.hpp +// MNN +// +// Created by MNN on 2019/11/27. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef DemoUnit_hpp +#define DemoUnit_hpp + +#include +#include +#include +#include +#include + +/** test case */ +class DemoUnit { + friend class DemoUnitSet; + +public: + /** + * @brief deinitializer + */ + virtual ~DemoUnit() = default; + /** + * @brief run test case + */ + virtual int run(int argc, const char* argv[]) = 0; + +private: + /** case name */ + std::string name; +}; + +/** test suite */ +class DemoUnitSet { +public: + /** + * @brief deinitializer + */ + ~DemoUnitSet(); + /** + * @brief get shared instance + * @return shared instance + */ + static DemoUnitSet* get(); + +public: + /** + * @brief register runable test case + * @param test test case + * @param name case name + */ + void add(DemoUnit* test, const char* name); + + /** + * @brief run registered test case that matches in name + * @param name case name + */ + DemoUnit* search(const char* name); + + const std::map& list() const { + return mUnit; + } + +private: + DemoUnitSet(){}; + /** get shared instance */ + static DemoUnitSet* gInstance; + /** registered test cases */ + std::map mUnit; +}; + +/** + static register for test case + */ +template +class DemoUnitRegister { +public: + /** + * @brief initializer. register test case to suite. + * @param name test case name + */ + DemoUnitRegister(const char* name) { + DemoUnitSet::get()->add(new Case, name); + } + /** + * @brief deinitializer + */ + ~DemoUnitRegister() { + } +}; + +#define DemoUnitSetRegister(Case, name) static DemoUnitRegister __r##Case(name) +#define MNNTEST_ASSERT(x) \ + { \ + int res = (x); \ + if (!res) { \ + MNN_ERROR("Error for %s, %d\n", __func__, __LINE__); \ + return false; \ + } \ + } + +#endif diff --git a/tools/train/source/demo/dataLoaderDemo.cpp b/tools/train/source/demo/dataLoaderDemo.cpp new file mode 100644 index 000000000..6e8abc48d --- /dev/null +++ b/tools/train/source/demo/dataLoaderDemo.cpp @@ -0,0 +1,112 @@ +// +// dataLoaderDemo.cpp +// MNN +// +// Created by MNN on 2019/11/20. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include "DataLoader.hpp" +#include "DataLoaderConfig.hpp" +#include "DemoUnit.hpp" +#include "LambdaTransform.hpp" +#include "MNN_generated.h" +#include "MnistDataset.hpp" +#include "RandomSampler.hpp" +#include "StackTransform.hpp" +#include "TransformDataset.hpp" + +#ifdef MNN_USE_OPENCV +#include // use opencv to show pictures +using namespace cv; +#endif + +using namespace std; + +/* + * this is an demo for how to use the DataLoader + */ + +class DataLoaderDemo : public DemoUnit { +public: + // this function is an example to use the lambda transform + // here we use lambda transform to normalize data from 0~255 to 0~1 + static Example func(Example example) { + // // an easier way to do this + auto cast = _Cast(example.data[0], halide_type_of()); + example.data[0] = _Multiply(cast, _Const(1.0f / 255.0f)); + return example; + } + virtual int run(int argc, const char* argv[]) override { + if (argc != 2) { + cout << "usage: ./runTrainDemo.out DataLoaderDemo /path/to/unzipped/mnist/data/" << endl; + return 0; + } + + std::string root = argv[1]; + + // train data loader + const size_t trainDatasetSize = 60000; + auto trainDataset = std::make_shared(root, MnistDataset::Mode::TRAIN); + + // the lambda transform for one example, we also can do it in batch + auto trainTransform = std::make_shared(func); + + // // the stack transform, stack [1, 28, 28] to [n, 1, 28, 28] + // auto trainTransform = std::make_shared(); + + const int trainBatchSize = 7; + const int trainNumWorkers = 4; + + auto trainDataLoader = + DataLoader::makeDataLoader(trainDataset, {trainTransform}, trainBatchSize, true, trainNumWorkers); + + // test data loader + const size_t testDatasetSize = 10000; + auto testDataset = std::make_shared(root, MnistDataset::Mode::TEST); + + // the lambda transform for one example, we also can do it in batch + auto testTransform = std::make_shared(func); + + // // the stack transform, stack [1, 28, 28] to [n, 1, 28, 28] + // auto testTransform = std::make_shared(); + + const int testBatchSize = 3; + const int testNumWorkers = 4; + + auto testDataLoader = + DataLoader::makeDataLoader(testDataset, {testTransform}, testBatchSize, false, testNumWorkers); + + const size_t iterations = testDatasetSize / testBatchSize; + + for (int i = 0; i < iterations; i++) { + auto trainData = trainDataLoader->next(); + auto testData = testDataLoader->next(); + + auto data = trainData[0].data[0]->readMap(); + auto label = trainData[0].target[0]->readMap(); + + cout << "index: " << i << " train label: " << int(label[0]) << endl; + + // // only show the first picture in the batch + // imshow("train", Mat(28, 28, CV_32FC1, (void*)data)); + + data = testData[0].data[0]->readMap(); + label = testData[0].target[0]->readMap(); + + cout << "index: " << i << " test label: " << int(label[0]) << endl; + + // // only show the first picture in the batch + // imshow("test", Mat(28, 28, CV_32FC1, (void*)data)); + // waitKey(-1); + } + // this will reset the sampler's internal state, not necessary here + trainDataLoader->reset(); + + // this will reset the sampler's internal state, necessary here, because the test dataset is exhausted + testDataLoader->reset(); + return 0; + } +}; +DemoUnitSetRegister(DataLoaderDemo, "DataLoaderDemo"); diff --git a/tools/train/source/demo/dataLoaderTest.cpp b/tools/train/source/demo/dataLoaderTest.cpp new file mode 100644 index 000000000..b1fadf00d --- /dev/null +++ b/tools/train/source/demo/dataLoaderTest.cpp @@ -0,0 +1,298 @@ +// +// dataLoaderTest.cpp +// MNN +// +// Created by MNN on 2019/11/20. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include +#include +#include +#include +#include "DataLoader.hpp" +#include "DataLoaderConfig.hpp" +#include "DemoUnit.hpp" +#include "LambdaTransform.hpp" +#include "MnistDataset.hpp" +#include "RandomSampler.hpp" +#include "StackTransform.hpp" +#include "TransformDataset.hpp" + +using namespace std; + +class DataLoaderTest : public DemoUnit { +public: + // this function is an example to use the lambda transform + // here we use lambda transform to normalize data from 0~255 to 0~1 + static Example func(Example example) { + // an easier way to do this + auto cast = _Cast(example.data[0], halide_type_of()); + return {{_Multiply(cast, _Const(1.0f / 255.0f)), example.data[1]}, {example.target}}; + } + + virtual int run(int argc, const char* argv[]) override { + if (argc != 2) { + cout << "usage: ./runTrainDemo.out DataLoaderTest /path/to/unzipped/mnist/data/" << endl; + return 0; + } + + const int testCount = 6; + int passedTestCount = 0; + + std::string root = argv[1]; + + // train data loader + const size_t trainDatasetSize = 60000; + auto trainDataset = std::make_shared(root, MnistDataset::Mode::TRAIN); + + auto trainSampler = std::make_shared(trainDataset->size()); + + const size_t trainBatchSize = 7; + const size_t trainNumWorkers = 4; + auto trainConfig = std::make_shared(trainBatchSize, trainNumWorkers); + + DataLoader trainDataLoader(trainDataset, trainSampler, trainConfig); + + auto images = trainDataset->images(); + auto labels = trainDataset->labels(); + const int32_t kImageRows = 28; + const int32_t kImageColumns = 28; + + const size_t iterations = trainDatasetSize / trainBatchSize; + + auto samplerIndices = trainSampler->indices(); + sort(samplerIndices.begin(), samplerIndices.end()); + for (int i = 0; i < samplerIndices.size(); i++) { + MNN_ASSERT(samplerIndices[i] == i); + } + + for (int i = 0; i < iterations; i++) { + auto trainData = trainDataLoader.next(); + + for (int j = 0; j < trainData.size(); j++) { + auto index = int(trainData[j].data[1]->readMap()[0]); + + auto data = trainData[j].data[0]->readMap(); + auto label = trainData[j].target[0]->readMap(); + + auto trueData = images->readMap() + kImageRows * kImageColumns * index; + auto trueLabel = labels->readMap() + index; + + for (int k = 0; k < kImageRows * kImageColumns; k++) { + MNN_ASSERT(data[k] == trueData[k]); + } + MNN_ASSERT(label[0] == trueLabel[0]); + } + } + trainDataLoader.clean(); + + passedTestCount++; + cout << "[" << passedTestCount << " / " << testCount << "] passed." << endl; + + // the lambda transform for one example, we also can do it in batch + auto trainLambdaTransform = std::make_shared(func); + auto trainLambdaTransDataset = std::make_shared(trainDataset, trainLambdaTransform); + + DataLoader trainLambdaDataLoader(trainLambdaTransDataset, trainSampler, trainConfig); + + samplerIndices = trainSampler->indices(); + sort(samplerIndices.begin(), samplerIndices.end()); + for (int i = 0; i < samplerIndices.size(); i++) { + MNN_ASSERT(samplerIndices[i] == i); + } + std::vector tempIndex; + for (int i = 0; i < iterations; i++) { + auto trainData = trainLambdaDataLoader.next(); + + for (int j = 0; j < trainData.size(); j++) { + auto index = int(trainData[j].data[1]->readMap()[0]); + tempIndex.emplace_back(index); + + auto data = trainData[j].data[0]->readMap(); + auto label = trainData[j].target[0]->readMap(); + + auto trueData = images->readMap() + kImageRows * kImageColumns * index; + auto trueLabel = labels->readMap() + index; + + for (int k = 0; k < kImageRows * kImageColumns; k++) { + MNN_ASSERT(fabs(data[k] - (trueData[k] / 255.0f)) < 1e-6); + } + MNN_ASSERT(label[0] == trueLabel[0]); + } + } + trainLambdaDataLoader.clean(); + + passedTestCount++; + cout << "[" << passedTestCount << " / " << testCount << "] passed." << endl; + + // the stack transform, stack [1, 28, 28] to [n, 1, 28, 28] + auto trainStackTransform = std::make_shared(); + auto trainStackTransDataset = std::make_shared(trainDataset, trainStackTransform); + + DataLoader trainStackDataLoader(trainStackTransDataset, trainSampler, trainConfig); + + samplerIndices = trainSampler->indices(); + sort(samplerIndices.begin(), samplerIndices.end()); + for (int i = 0; i < samplerIndices.size(); i++) { + MNN_ASSERT(samplerIndices[i] == i); + } + + for (int i = 0; i < iterations; i++) { + auto trainData = trainStackDataLoader.next(); + MNN_ASSERT(trainData.size() == 1); + + std::vector shape = {trainBatchSize, 1, 28, 28}; + MNN_ASSERT(trainData[0].data[0]->getInfo()->dim == shape); + + shape = {trainBatchSize}; + MNN_ASSERT(trainData[0].target[0]->getInfo()->dim == shape); + + auto data = trainData[0].data[0]->readMap(); + auto label = trainData[0].target[0]->readMap(); + + for (int j = 0; j < trainBatchSize; j++) { + auto index = int(trainData[0].data[1]->readMap()[j]); + + auto trueData = images->readMap() + kImageRows * kImageColumns * index; + auto trueLabel = labels->readMap() + index; + + for (int k = 0; k < kImageRows * kImageColumns; k++) { + int dataIndex = j * (kImageRows * kImageColumns) + k; + MNN_ASSERT(data[dataIndex] == trueData[k]); + } + MNN_ASSERT(label[j] == trueLabel[0]); + } + } + trainStackDataLoader.clean(); + + passedTestCount++; + cout << "[" << passedTestCount << " / " << testCount << "] passed." << endl; + + // here we test Lambda + Stack + auto trainLambdaStackTransDataset = + std::make_shared(trainLambdaTransDataset, trainStackTransform); + + DataLoader trainLambdaStackDataLoader(trainLambdaStackTransDataset, trainSampler, trainConfig); + + samplerIndices = trainSampler->indices(); + sort(samplerIndices.begin(), samplerIndices.end()); + for (int i = 0; i < samplerIndices.size(); i++) { + MNN_ASSERT(samplerIndices[i] == i); + } + + for (int i = 0; i < iterations; i++) { + auto trainData = trainLambdaStackDataLoader.next(); + MNN_ASSERT(trainData.size() == 1); + + std::vector shape = {trainBatchSize, 1, 28, 28}; + MNN_ASSERT(trainData[0].data[0]->getInfo()->dim == shape); + + shape = {trainBatchSize}; + MNN_ASSERT(trainData[0].target[0]->getInfo()->dim == shape); + + auto data = trainData[0].data[0]->readMap(); + auto label = trainData[0].target[0]->readMap(); + + for (int j = 0; j < trainBatchSize; j++) { + auto index = int(trainData[0].data[1]->readMap()[j]); + + auto trueData = images->readMap() + kImageRows * kImageColumns * index; + auto trueLabel = labels->readMap() + index; + + for (int k = 0; k < kImageRows * kImageColumns; k++) { + int dataIndex = j * (kImageRows * kImageColumns) + k; + MNN_ASSERT(fabs(data[dataIndex] - (trueData[k] / 255.0f)) < 1e-6); + } + MNN_ASSERT(label[j] == trueLabel[0]); + } + } + trainLambdaStackDataLoader.clean(); + + passedTestCount++; + cout << "[" << passedTestCount << " / " << testCount << "] passed." << endl; + + // here we test Stack + Lambda + auto trainStackLambdaTransDataset = + std::make_shared(trainStackTransDataset, trainLambdaTransform); + + DataLoader trainStackLamdaDataLoader(trainStackLambdaTransDataset, trainSampler, trainConfig); + + samplerIndices = trainSampler->indices(); + sort(samplerIndices.begin(), samplerIndices.end()); + for (int i = 0; i < samplerIndices.size(); i++) { + MNN_ASSERT(samplerIndices[i] == i); + } + + for (int i = 0; i < iterations; i++) { + auto trainData = trainStackLamdaDataLoader.next(); + MNN_ASSERT(trainData.size() == 1); + + std::vector shape = {trainBatchSize, 1, 28, 28}; + MNN_ASSERT(trainData[0].data[0]->getInfo()->dim == shape); + + shape = {trainBatchSize}; + MNN_ASSERT(trainData[0].target[0]->getInfo()->dim == shape); + + auto data = trainData[0].data[0]->readMap(); + auto label = trainData[0].target[0]->readMap(); + + for (int j = 0; j < trainBatchSize; j++) { + auto index = int(trainData[0].data[1]->readMap()[j]); + + auto trueData = images->readMap() + kImageRows * kImageColumns * index; + auto trueLabel = labels->readMap() + index; + + for (int k = 0; k < kImageRows * kImageColumns; k++) { + int dataIndex = j * (kImageRows * kImageColumns) + k; + MNN_ASSERT(fabs(data[dataIndex] - (trueData[k] / 255.0f)) < 1e-6); + } + MNN_ASSERT(label[j] == trueLabel[0]); + } + } + trainStackLamdaDataLoader.clean(); + + passedTestCount++; + cout << "[" << passedTestCount << " / " << testCount << "] passed." << endl; + + // test makeDataLoader + auto madeDataLoader = DataLoader::makeDataLoader( + trainDataset, {nullptr, trainStackTransform, nullptr, trainLambdaTransform, nullptr}, 7); + + for (int i = 0; i < iterations; i++) { + auto trainData = madeDataLoader->next(); + MNN_ASSERT(trainData.size() == 1); + + std::vector shape = {trainBatchSize, 1, 28, 28}; + MNN_ASSERT(trainData[0].data[0]->getInfo()->dim == shape); + + shape = {trainBatchSize}; + MNN_ASSERT(trainData[0].target[0]->getInfo()->dim == shape); + + auto data = trainData[0].data[0]->readMap(); + auto label = trainData[0].target[0]->readMap(); + + for (int j = 0; j < trainBatchSize; j++) { + auto index = int(trainData[0].data[1]->readMap()[j]); + + auto trueData = images->readMap() + kImageRows * kImageColumns * index; + auto trueLabel = labels->readMap() + index; + + for (int k = 0; k < kImageRows * kImageColumns; k++) { + int dataIndex = j * (kImageRows * kImageColumns) + k; + MNN_ASSERT(fabs(data[dataIndex] - (trueData[k] / 255.0f)) < 1e-6); + } + MNN_ASSERT(label[j] == trueLabel[0]); + } + } + madeDataLoader->clean(); + + passedTestCount++; + cout << "[" << passedTestCount << " / " << testCount << "] passed." << endl; + + return 0; + } +}; + +DemoUnitSetRegister(DataLoaderTest, "DataLoaderTest"); diff --git a/tools/train/source/demo/demoMain.cpp b/tools/train/source/demo/demoMain.cpp new file mode 100644 index 000000000..a7987143b --- /dev/null +++ b/tools/train/source/demo/demoMain.cpp @@ -0,0 +1,29 @@ +// +// demoMain.cpp +// MNN +// +// Created by MNN on 2019/11/27. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include "DemoUnit.hpp" +int main(int argc, const char* argv[]) { + if (argc < 2) { + MNN_ERROR("Usage: ./runTrainDemo.out CASENAME [ARGS]\n"); + auto& list = DemoUnitSet::get()->list(); + MNN_PRINT("Valid Case: \n"); + + for (auto iter : list) { + MNN_PRINT("%s\n", iter.first.c_str()); + } + return 0; + } + auto demo = DemoUnitSet::get()->search(argv[1]); + if (nullptr == demo) { + MNN_ERROR("Can't find demo %s\n", argv[1]); + return 0; + } + demo->run(argc - 1, argv + 1); + return 0; +} diff --git a/tools/train/source/demo/linearRegression.cpp b/tools/train/source/demo/linearRegression.cpp new file mode 100644 index 000000000..154551fa4 --- /dev/null +++ b/tools/train/source/demo/linearRegression.cpp @@ -0,0 +1,58 @@ +// +// linearRegression.cpp +// MNN +// +// Created by MNN on 2019/11/22. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include +#include "DemoUnit.hpp" +#include "SGD.hpp" +using namespace MNN::Express; +using namespace MNN::Train; +std::random_device gRandom; +class LinearRegress : public DemoUnit { +public: + virtual int run(int argc, const char* argv[]) override { + VARP w = _Const(0.3f); + VARP b = _Const(0.1f); + + std::shared_ptr opt(new SGD); + opt->append({w, b}); + opt->setLearningRate(0.1f); + + const int number = 10; + const int limit = 300; + for (int i = 0; i < limit; ++i) { + VARP x = _Input({number}, NHWC); + // Fill x + auto xPtr = x->writeMap(); + for (int v = 0; v < number; ++v) { + xPtr[v] = (gRandom() % 10000) / 10000.0f; + } + VARP label = _Input({number}, NHWC); + // Fill label + auto ptr = label->writeMap(); + for (int v = 0; v < number; ++v) { + ptr[v] = xPtr[v] * 0.8f + 0.7f; + } + VARP y = x * w + b; + + VARP diff = y - label; + VARP loss = (diff * diff).mean({}); + + if (i == limit - 1) { + MNN_PRINT("w = %f, b = %f, Target w = 0.8f, Target b = 0.7f\n", w->readMap()[0], + b->readMap()[0]); + Variable::save({y}, "linear.mnn"); + } else { + opt->step(loss); + } + } + return 0; + } +}; + +DemoUnitSetRegister(LinearRegress, "LinearRegress"); diff --git a/tools/train/source/demo/mnistTrain.cpp b/tools/train/source/demo/mnistTrain.cpp new file mode 100644 index 000000000..f8d65a10e --- /dev/null +++ b/tools/train/source/demo/mnistTrain.cpp @@ -0,0 +1,241 @@ +// +// mnistTrain.cpp +// MNN +// +// Created by MNN on 2019/11/27. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include +#include +#include +#include +#include "DataLoader.hpp" +#include "MnistDataset.hpp" +#include "DemoUnit.hpp" +#include "NN.hpp" +#include "SGD.hpp" +#define MNN_OPEN_TIME_TRACE +#include +#include "ADAM.hpp" +#include "LearningRateScheduler.hpp" +#include "Loss.hpp" +#include "RandomGenerator.hpp" +#include "Transformer.hpp" + +using namespace MNN::Train; +using namespace MNN::Express; +class MnistV2 : public Module { +public: + MnistV2() { + NN::ConvOption convOption; + convOption.kernelSize = {5, 5}; + convOption.channel = {1, 10}; + convOption.depthwise = false; + conv1 = NN::Conv(convOption); + convOption.reset(); + convOption.kernelSize = {5, 5}; + convOption.channel = {10, 10}; + convOption.depthwise = true; + conv2 = NN::Conv(convOption); + ip1 = NN::Linear(160, 100); + ip2 = NN::Linear(100, 10); + registerModel({conv1, conv2, ip1, ip2}); + } + + virtual std::vector onForward(const std::vector& inputs) override { + VARP x = inputs[0]; + x = conv1->forward(x); + x = _MaxPool(x, {2, 2}, {2, 2}); + x = conv2->forward(x); + x = _MaxPool(x, {2, 2}, {2, 2}); + x = _Convert(x, NCHW); + x = _Reshape(x, {0, -1}); + x = ip1->forward(x); + x = _Relu(x); + x = ip2->forward(x); + x = _Softmax(x, 1); + return {x}; + } + std::shared_ptr conv1; + std::shared_ptr conv2; + std::shared_ptr ip1; + std::shared_ptr ip2; +}; +class Mnist : public Module { +public: + Mnist() { + NN::ConvOption convOption; + convOption.kernelSize = {5, 5}; + convOption.channel = {1, 20}; + conv1 = NN::Conv(convOption); + convOption.reset(); + convOption.kernelSize = {5, 5}; + convOption.channel = {20, 50}; + conv2 = NN::Conv(convOption); + ip1 = NN::Linear(800, 500); + ip2 = NN::Linear(500, 10); + dropout = NN::Dropout(0.5); + registerModel({conv1, conv2, ip1, ip2, dropout}); + AUTOTIME; + } + + virtual std::vector onForward(const std::vector& inputs) override { + VARP x = inputs[0]; + x = conv1->forward(x); + x = _MaxPool(x, {2, 2}, {2, 2}); + x = conv2->forward(x); + x = _MaxPool(x, {2, 2}, {2, 2}); + x = _Convert(x, NCHW); + x = _Reshape(x, {0, -1}); + x = ip1->forward(x); + x = _Relu(x); + x = dropout->forward(x); + x = ip2->forward(x); + x = _Softmax(x, 1); + return {x}; + } + std::shared_ptr conv1; + std::shared_ptr conv2; + std::shared_ptr ip1; + std::shared_ptr ip2; + std::shared_ptr dropout; +}; + +static void train(std::shared_ptr model, std::string root) { + auto exe = Executor::getGlobalExecutor(); + BackendConfig config; + exe->setGlobalExecutorConfig(MNN_FORWARD_CPU, config, 2); + std::shared_ptr sgd(new SGD); + sgd->append(model->parameters()); + sgd->setMomentum(0.9f); + // sgd->setMomentum2(0.99f); + sgd->setWeightDecay(0.0005f); + + auto dataset = std::make_shared(root, MnistDataset::Mode::TRAIN); + // the stack transform, stack [1, 28, 28] to [n, 1, 28, 28] + auto transform = std::make_shared(); + + const size_t batchSize = 64; + const size_t numWorkers = 4; + bool shuffle = true; + + auto dataLoader = DataLoader::makeDataLoader(dataset, {transform}, batchSize, shuffle, numWorkers); + + const size_t iterations = dataset->size() / batchSize; + + auto testDataset = std::make_shared(root, MnistDataset::Mode::TEST); + const size_t testBatchSize = 20; + const size_t testNumWorkers = 1; + shuffle = false; + + auto testDataLoader = DataLoader::makeDataLoader(testDataset, {transform}, testBatchSize, shuffle, testNumWorkers); + + const size_t testIterations = testDataset->size() / testBatchSize; + + for (int epoch = 0; epoch < 50; ++epoch) { + exe->gc(); + int correct = 0; + testDataLoader->reset(); + model->setIsTraining(false); + for (int i = 0; i < testIterations; i++) { + if ((i + 1) % 100 == 0) { + std::cout << "test iteration: " << (i + 1) << std::endl; + } + auto data = testDataLoader->next(); + auto example = data[0]; + auto cast = _Cast(example.data[0]); + example.data[0] = cast * _Const(1.0f / 255.0f); + auto predict = model->forward(example.data[0]); + predict = _ArgMax(predict, 1); + auto accu = _Cast(_Equal(predict, _Cast(example.target[0]))).sum({}); + correct += accu->readMap()[0]; + } + auto accu = (float)correct / (float)testDataset->size(); + std::cout << "epoch: " << epoch << " accuracy: " << accu << std::endl; + + dataLoader->reset(); + AUTOTIME; + model->setIsTraining(true); + for (int i = 0; i < iterations; i++) { + // AUTOTIME; + auto trainData = dataLoader->next(); + auto example = trainData[0]; + auto cast = _Cast(example.data[0]); + example.data[0] = cast * _Const(1.0f / 255.0f); + + // Compute One-Hot + auto newTarget = _OneHot(_Cast(example.target[0]), _Scalar(10), _Scalar(1.0f), + _Scalar(0.0f)); + + auto predict = model->forward(example.data[0]); + auto loss = _CrossEntropy(predict, newTarget); + float rate = LrScheduler::inv(0.01, epoch * iterations + i, 0.0001, 0.75); + sgd->setLearningRate(rate); + if ((epoch * iterations + i) % 100 == 0) { + std::cout << "train iteration: " << epoch * iterations + i; + std::cout << " loss: " << loss->readMap()[0]; + std::cout << " lr: " << rate << std::endl; + } + sgd->step(loss); + if (i == iterations - 1) { + model->setIsTraining(false); + predict = model->forward(_Input({1, 1, 28, 28}, NCHW)); + Variable::save({predict}, "temp.mnist.mnn"); + } + } + } +} + +class MnistTrain : public DemoUnit { +public: + virtual int run(int argc, const char* argv[]) override { + if (argc < 2) { + std::cout << "usage: ./runTrainDemo.out MnistTrain /path/to/unzipped/mnist/data/ [depthwise]" << std::endl; + return 0; + } + // global random number generator, should invoke before construct the model and dataset + RandomGenerator::generator(17); + + auto exe = Executor::getGlobalExecutor(); + BackendConfig config; + exe->setGlobalExecutorConfig(MNN_FORWARD_CPU, config, 2); + + std::string root = argv[1]; + std::shared_ptr model(new Mnist); + if (argc >= 3) { + model.reset(new MnistV2); + } + train(model, root); + return 0; + } +}; + +class PostTrain : public DemoUnit { +public: + virtual int run(int argc, const char* argv[]) override { + if (argc < 3) { + std::cout << "usage: ./runTrainDemo.out PostTrain /path/to/mnistModel /path/to/unzipped/mnist/data/ " + << std::endl; + return 0; + } + std::string root = argv[2]; + auto varMap = Variable::loadMap(argv[1]); + if (varMap.empty()) { + MNN_ERROR("Can not load model %s\n", argv[1]); + return 0; + } + auto inputOutputs = Variable::getInputAndOutput(varMap); + Transformer::turnModelToTrainable(Transformer::TrainConfig()) + ->onExecute(Variable::mapToSequence(inputOutputs.second)); + std::shared_ptr model(Module::transform(Variable::mapToSequence(inputOutputs.first), + (Variable::mapToSequence(inputOutputs.second)))); + + train(model, root); + return 0; + } +}; + +DemoUnitSetRegister(MnistTrain, "MnistTrain"); +DemoUnitSetRegister(PostTrain, "PostTrain"); diff --git a/tools/train/source/demo/nnGradTest.cpp b/tools/train/source/demo/nnGradTest.cpp new file mode 100644 index 000000000..452dac537 --- /dev/null +++ b/tools/train/source/demo/nnGradTest.cpp @@ -0,0 +1,314 @@ +// +// nnGradTest.cpp +// MNN +// +// Created by MNN on 2019/11/27. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "DemoUnit.hpp" +#include "NN.hpp" +#include "SGD.hpp" +#include +using namespace MNN::Express; +using namespace MNN::Train; +#include +std::random_device gDevice; +class NNGrad : public DemoUnit { +public: + virtual int run(int argc, const char* argv[]) override { + MNN_PRINT("Test grad for convolution, pool, concat\n"); + int ic = 13; + int oc = 11; + int kw = 3; + int kh = 4; + int iw = 100; + int ih = 120; + int weightSize = ic * oc * kw * kh; + std::vector targetVecs(weightSize); + for (int i = 0; i < weightSize; ++i) { + auto v = ((float)(gDevice() % 2000) - 1000.0f) / 1000.0f; + targetVecs[i] = v; + } + auto weightTarget = _Const(targetVecs.data(), {oc, ic, kh, kw}, NCHW); + std::vector targetVecsBias(oc); + for (int i = 0; i < oc; ++i) { + targetVecsBias[i] = ((float)(gDevice() % 2000) - 1000.0f) / 1000.0f; + } + auto biasTarget = _Const(targetVecsBias.data(), {oc}, NCHW); + + NN::ConvOption convOption; + convOption.channel = {ic, oc}; + convOption.kernelSize = {kw, kh}; + convOption.stride = {2, 2}; + convOption.dilate = {1, 2}; + auto convModule = NN::Conv(convOption); + + std::shared_ptr sgd(new SGD); + sgd->setLearningRate(0.01f); + sgd->append(convModule->parameters()); + std::vector randomInputs(1 * ic * ih * iw); + for (int i = 0; i < randomInputs.size(); ++i) { + randomInputs[i] = ((float)(gDevice() % 2000) - 1000.0f) / 1000.0f; + } + + for (int i = 0; i < 100; ++i) { + auto input = _Input({1, ic, ih, iw}, NCHW); + auto inputPtr = input->writeMap(); + ::memcpy(inputPtr, randomInputs.data(), randomInputs.size() * sizeof(float)); + + auto targetValue = _Conv(weightTarget, biasTarget, _Convert(input, NC4HW4), convOption.padMode, + convOption.stride, convOption.dilate); + auto predictValue = convModule->forward(input); + + auto targetValue1 = _MaxPool(targetValue, {2, 2}, {2, 2}); + auto targetValue2 = _AvePool(targetValue, {2, 2}, {2, 2}); + auto predictValue1 = _MaxPool(predictValue, {2, 2}, {2, 2}); + auto predictValue2 = _AvePool(predictValue, {2, 2}, {2, 2}); + targetValue = _Concat({targetValue1, targetValue2}, 1); + predictValue = _Concat({predictValue1, predictValue2}, 1); + targetValue = _Convert(targetValue, NCHW); + predictValue = _Convert(predictValue, NCHW); + auto loss = _ReduceMean(_Square(_Subtract(targetValue, predictValue)), {}); + MNN_PRINT("Loss = %f\n", loss->readMap()[0]); + sgd->step(loss); + } + return 0; + } +}; +class NNGradV2 : public DemoUnit { +public: + virtual int run(int argc, const char* argv[]) override { + MNN_PRINT("Test grad for concat, split, transpose\n"); + int ic = 7; + int oc = 7; + int kw = 3; + int kh = 4; + int iw = 100; + int ih = 120; + int weightSize = ic * oc * kw * kh; + std::vector targetVecs(weightSize); + for (int i = 0; i < weightSize; ++i) { + auto v = ((float)(gDevice() % 2000) - 1000.0f) / 1000.0f; + targetVecs[i] = v; + } + auto weightTarget = _Const(targetVecs.data(), {1, ic, kh, kw}, NCHW); + std::vector targetVecsBias(oc); + for (int i = 0; i < oc; ++i) { + targetVecsBias[i] = ((float)(gDevice() % 2000) - 1000.0f) / 1000.0f; + } + auto biasTarget = _Const(targetVecsBias.data(), {oc}, NCHW); + + NN::ConvOption convOption; + convOption.channel = {ic, oc}; + convOption.kernelSize = {kw, kh}; + convOption.stride = {2, 2}; + convOption.dilate = {1, 2}; + convOption.depthwise = true; + auto convModule = NN::Conv(convOption); + + std::shared_ptr sgd(new SGD); + sgd->setLearningRate(0.1f); + sgd->append(convModule->parameters()); + sgd->setWeightDecay(0.0f); + sgd->setMomentum(0.0f); + + std::vector randomInputs(1 * ic * ih * iw); + for (int i = 0; i < randomInputs.size(); ++i) { + randomInputs[i] = ((float)(gDevice() % 2000) - 1000.0f) / 1000.0f; + } + + for (int i = 0; i < 100; ++i) { + auto input = _Input({1, ic, ih, iw}, NCHW); + auto inputPtr = input->writeMap(); + ::memcpy(inputPtr, randomInputs.data(), randomInputs.size() * sizeof(float)); + auto targetValue = _Conv(weightTarget, biasTarget, _Convert(input, NC4HW4), convOption.padMode, + convOption.stride, convOption.dilate, ic); + auto predictValue = convModule->forward(input); + + auto targetValue1 = _MaxPool(targetValue, {2, 2}, {2, 2}); + auto targetValue2 = _AvePool(targetValue, {2, 2}, {2, 2}); + auto predictValue1 = _MaxPool(predictValue, {2, 2}, {2, 2}); + auto predictValue2 = _AvePool(predictValue, {2, 2}, {2, 2}); + targetValue = _Concat({targetValue1, targetValue2}, 1); + predictValue = _Concat({predictValue1, predictValue2}, 1); + + auto slicetarget = _Split(targetValue, {2}, 2); + auto slicePredict = _Split(predictValue, {2}, 2); + targetValue = slicetarget[0]; + predictValue = slicePredict[0]; + targetValue = _Convert(targetValue, NCHW); + targetValue = _Transpose(targetValue, {1, 3, 2, 0}); + predictValue = _Convert(predictValue, NCHW); + predictValue = _Transpose(predictValue, {1, 3, 2, 0}); + auto loss = _ReduceMean(_Square(_Subtract(targetValue, predictValue)), {}); + MNN_PRINT("Loss = %f\n", loss->readMap()[0]); + sgd->step(loss); + } + return 0; + } +}; +class NNGradV3 : public DemoUnit { +public: + virtual int run(int argc, const char* argv[]) override { + MNN_PRINT("Test grad for Deconvolution(+dw), Resize\n"); + int ic = 13; + int oc = 11; + int kw = 3; + int kh = 4; + int iw = 100; + int ih = 120; + int weightSize = ic * oc * kw * kh; + std::vector targetVecs(weightSize); + for (int i = 0; i < weightSize; ++i) { + auto v = ((float)(gDevice() % 2000) - 1000.0f) / 1000.0f; + targetVecs[i] = v; + } + auto weightTarget = _Const(targetVecs.data(), {ic, oc, kh, kw}, NCHW); + std::vector targetVecsBias(oc); + for (int i = 0; i < oc; ++i) { + targetVecsBias[i] = ((float)(gDevice() % 2000) - 1000.0f) / 1000.0f; + } + auto biasTarget = _Const(targetVecsBias.data(), {oc}, NCHW); + + NN::ConvOption convOption; + convOption.channel = {ic, oc}; + convOption.kernelSize = {kw, kh}; + convOption.stride = {2, 2}; + convOption.dilate = {1, 2}; + auto convModule = NN::ConvTranspose(convOption); + + convOption.depthwise = true; + convOption.channel = {oc, oc}; + auto convModule2 = NN::ConvTranspose(convOption, false); + VARP weightTarget2; + { + int weightSize = oc * kw * kh; + std::vector targetVecs(weightSize); + for (int i = 0; i < weightSize; ++i) { + auto v = ((float)(gDevice() % 2000) - 1000.0f) / 1000.0f; + targetVecs[i] = v; + } + weightTarget2 = _Const(targetVecs.data(), {1, oc, kh, kw}, NCHW); + } + + std::shared_ptr sgd(new SGD); + sgd->setLearningRate(0.01f); + sgd->append(convModule->parameters()); + std::vector randomInputs(1 * ic * ih * iw); + for (int i = 0; i < randomInputs.size(); ++i) { + randomInputs[i] = ((float)(gDevice() % 2000) - 1000.0f) / 1000.0f; + } + + for (int i = 0; i < 1000; ++i) { + auto input = _Input({1, ic, ih, iw}, NCHW); + auto inputPtr = input->writeMap(); + ::memcpy(inputPtr, randomInputs.data(), randomInputs.size() * sizeof(float)); + + auto targetValue = _Deconv(weightTarget, biasTarget, _Convert(input, NC4HW4), convOption.padMode, + convOption.stride, convOption.dilate); + auto predictValue = convModule->forward(input); + targetValue = _Deconv(weightTarget2, nullptr, targetValue, convOption.padMode, convOption.stride, + convOption.dilate, oc); + predictValue = convModule2->forward(predictValue); + + auto targetValue1 = _MaxPool(targetValue, {2, 2}, {2, 2}); + auto targetValue2 = _AvePool(targetValue, {2, 2}, {2, 2}); + auto predictValue1 = _MaxPool(predictValue, {2, 2}, {2, 2}); + auto predictValue2 = _AvePool(predictValue, {2, 2}, {2, 2}); + targetValue = _Concat({targetValue1, targetValue2}, 1); + predictValue = _Concat({predictValue1, predictValue2}, 1); + targetValue = _Resize(targetValue, 0.5f, 0.5f); + predictValue = _Resize(predictValue, 0.5f, 0.5f); + + targetValue = _Convert(targetValue, NCHW); + predictValue = _Convert(predictValue, NCHW); + auto loss = _ReduceMean(_Square(_Subtract(targetValue, predictValue)), {}); + MNN_PRINT("Loss = %f\n", loss->readMap()[0]); + sgd->step(loss); + } + return 0; + } +}; +class MatMulGradTest : public DemoUnit { +public: + virtual int run(int argc, const char* argv[]) override { + MNN_PRINT("Test grad for MatMul, BatchMatMul\n"); + { + int e = 13; + int l = 11; + int h = 30; + int weightSize = l * h; + std::vector targetVecs(weightSize); + for (int i = 0; i < weightSize; ++i) { + auto v = ((float)(gDevice() % 2000) - 1000.0f) / 1000.0f; + targetVecs[i] = v; + } + auto weightTarget = _Const(targetVecs.data(), {l, h}, NCHW); + auto weightOrigin = _Const(0.0f, {l, h}, NCHW); + std::shared_ptr sgd(new SGD); + sgd->setLearningRate(0.01f); + sgd->append({weightOrigin}); + std::vector randomInputs(e * l); + for (int i = 0; i < randomInputs.size(); ++i) { + randomInputs[i] = ((float)(gDevice() % 2000) - 1000.0f) / 1000.0f; + } + + for (int i = 0; i < 1000; ++i) { + auto input = _Input({e, l}, NCHW); + auto inputPtr = input->writeMap(); + ::memcpy(inputPtr, randomInputs.data(), randomInputs.size() * sizeof(float)); + + auto targetValue = _MatMul(input, weightTarget); + auto predictValue = _MatMul(input, weightOrigin); + auto loss = _ReduceMean(_Square(_Subtract(targetValue, predictValue)), {}); + if (i % 100 == 0) { + MNN_PRINT("Loss = %f\n", loss->readMap()[0]); + } + sgd->step(loss); + } + } + MNN_PRINT("Test for BatchMatMul\n"); + { + int e = 13; + int l = 11; + int h = 30; + int b = 5; + int weightSize = b * l * h; + std::vector targetVecs(weightSize); + for (int i = 0; i < weightSize; ++i) { + auto v = ((float)(gDevice() % 2000) - 1000.0f) / 1000.0f; + targetVecs[i] = v; + } + auto weightTarget = _Const(targetVecs.data(), {b, l, h}, NCHW); + auto weightOrigin = _Const(0.0f, {b, l, h}, NCHW); + std::shared_ptr sgd(new SGD); + sgd->setLearningRate(0.01f); + sgd->append({weightOrigin}); + std::vector randomInputs(b * e * l); + for (int i = 0; i < randomInputs.size(); ++i) { + randomInputs[i] = ((float)(gDevice() % 2000) - 1000.0f) / 1000.0f; + } + + for (int i = 0; i < 10000; ++i) { + auto input = _Input({b, e, l}, NCHW); + auto inputPtr = input->writeMap(); + ::memcpy(inputPtr, randomInputs.data(), randomInputs.size() * sizeof(float)); + + auto targetValue = _BatchMatMul(input, weightTarget); + auto predictValue = _BatchMatMul(input, weightOrigin); + auto loss = _ReduceMean(_Square(_Subtract(targetValue, predictValue)), {}); + if (i % 1000 == 0) { + MNN_PRINT("Loss = %f\n", loss->readMap()[0]); + } + sgd->step(loss); + } + } + return 0; + } +}; + +DemoUnitSetRegister(NNGrad, "NNGrad"); +DemoUnitSetRegister(NNGradV2, "NNGradV2"); +DemoUnitSetRegister(NNGradV3, "NNGradV3"); +DemoUnitSetRegister(MatMulGradTest, "MatMulGradTest"); diff --git a/tools/train/source/exec/dataTransformer.cpp b/tools/train/source/exec/dataTransformer.cpp index 3ee5cf7db..8a36408f9 100644 --- a/tools/train/source/exec/dataTransformer.cpp +++ b/tools/train/source/exec/dataTransformer.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // +#include +#include #include #include -#include "ImageProcess.hpp" -#include "Interpreter.hpp" #include "MNN_generated.h" #include "rapidjson/document.h" using namespace MNN; @@ -97,7 +97,7 @@ int main(int argc, const char* argv[]) { std::shared_ptr probUserTensor(new Tensor(probTensor, probTensor->getDimensionType())); probTensor->copyToHostTensor(probUserTensor.get()); - //FUNC_PRINT(probTensor->elementSize()); + // FUNC_PRINT(probTensor->elementSize()); result.emplace_back(std::make_pair(userTensor, probUserTensor)); stbi_image_free(inputImage); diff --git a/tools/train/source/exec/rawDataTransform.cpp b/tools/train/source/exec/rawDataTransform.cpp index 64a997412..8ab1a2b7a 100644 --- a/tools/train/source/exec/rawDataTransform.cpp +++ b/tools/train/source/exec/rawDataTransform.cpp @@ -6,10 +6,10 @@ // Copyright © 2018, Alibaba Group Holding Limited // +#include #include #include #include -#include "MNNDefine.h" #include "MNN_generated.h" #include "rapidjson/document.h" using namespace MNN; diff --git a/tools/train/source/exec/train.cpp b/tools/train/source/exec/train.cpp index 47b95c2c5..a3d3f665d 100644 --- a/tools/train/source/exec/train.cpp +++ b/tools/train/source/exec/train.cpp @@ -6,8 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // +#include #include #include +#include +#include #include #include #include @@ -16,13 +19,10 @@ #include #include #include -#include "Interpreter.hpp" -#include "MNNDefine.h" #include "MNN_generated.h" -#include "Macro.h" -#include "Tensor.hpp" +#include "core/Macro.h" //#define MNN_OPEN_TIME_TRACE -#include "AutoTime.hpp" +#include using namespace MNN; using namespace std; std::random_device gDevice; @@ -32,33 +32,37 @@ inline std::string numberToString(int index) { return os.str(); } static void dumpTensorToFile(const Tensor* tensor, std::string fileName) { + std::unique_ptr hostTensor(new Tensor(tensor, MNN::Tensor::TENSORFLOW, true)); + tensor->copyToHostTensor(hostTensor.get()); if (tensor->getType().code == halide_type_float) { - auto origin0 = tensor->host(); + auto origin0 = hostTensor->host(); std::ofstream prob(fileName); - auto size = tensor->elementSize(); + auto size = hostTensor->elementSize(); for (int i = 0; i < size; ++i) { prob << origin0[i] << "\n"; } } else if (tensor->getType().code == halide_type_int && tensor->getType().bytes() == 4) { - auto origin0 = tensor->host(); + auto origin0 = hostTensor->host(); std::ofstream prob(fileName); - auto size = tensor->elementSize(); + auto size = hostTensor->elementSize(); for (int i = 0; i < size; ++i) { prob << origin0[i] << "\n"; } } } -//#define TEST_TRAIN +#define TEST_TRAIN int main(int argc, const char* argv[]) { if (argc < 5) { - MNN_PRINT("Usage: ./train.out model.mnn data.bin test.bin times [learningRate] [LossName]\n"); + MNN_PRINT( + "Usage: ./train.out model.mnn data.bin test.bin times [learningRate] [LossName] [backend " + "{0:CPU,1:OPENCL}]\n"); return 0; } unique_ptr net(Interpreter::createFromFile(argv[1])); - int time = atoi(argv[4]); - int trainStep = 500; - float lr = 0.00001f; + int time = atoi(argv[4]); + int trainStep = 1; + float lr = 0.00001f; if (argc > 5) { lr = atof(argv[5]); } @@ -67,10 +71,20 @@ int main(int argc, const char* argv[]) { lossName = argv[6]; } ScheduleConfig config; + if (argc > 7) { + int backend = atoi(argv[7]); + if (backend == 1) { + config.type = MNN_FORWARD_OPENCL; + } + } config.numThread = 1; config.saveTensors.emplace_back(lossName); - auto session = net->createSession(config); - auto loss = net->getSessionOutput(session, lossName.c_str()); + BackendConfig backendConfig; + backendConfig.precision = MNN::BackendConfig::Precision_High; + config.backendConfig = &backendConfig; + auto session = net->createSession(config); + auto loss = net->getSessionOutput(session, lossName.c_str()); + std::unique_ptr lossHost(Tensor::createHostTensorFromDevice(loss, false)); int maxBatch = 0; if (nullptr == loss) { MNN_ERROR("Can't find loss\n"); @@ -113,25 +127,27 @@ int main(int argc, const char* argv[]) { ::memcpy(tensor->host(), sourcePtr, tensor->size()); auto name = netC->oplists()->GetAs(i)->name()->str(); auto inputOrigin = net->getSessionInput(session, name.c_str()); - batch = inputOrigin->shape()[0]; + batch = inputOrigin->shape()[0]; FUNC_PRINT(batch); - std::unique_ptr inputOriginUser(new Tensor(inputOrigin, inputOrigin->getDimensionType())); + std::unique_ptr inputOriginUser(new Tensor(inputOrigin, dimType)); tensorInputStorage.insert( std::make_pair(name, std::make_tuple(std::move(tensor), std::move(inputOriginUser), inputOrigin))); FUNC_PRINT_ALL(name.c_str(), s); } } - auto learnRate = net->getSessionInput(session, "LearningRate"); - TensorCallBack begin = [](const std::vector& inputs, const std::string& name) { return true; }; - TensorCallBack afterEval = [lossName](const std::vector& output, const std::string& name) { + auto learnRate = net->getSessionInput(session, "LearningRate"); + std::unique_ptr learnRateHost(Tensor::createHostTensorFromDevice(learnRate, false)); + learnRateHost->host()[0] = lr; + TensorCallBack begin = [](const std::vector& inputs, const std::string& name) { return true; }; + TensorCallBack afterEval = [lossName](const std::vector& output, const std::string& name) { if (name == lossName) { return false; } return true; }; - int offset = 0; + int offset = 0; for (int l = 0; l < time; ++l) { AUTOTIME; @@ -155,9 +171,10 @@ int main(int argc, const char* argv[]) { auto& dst = get<2>(iter.second); dst->copyFromHostTensor(src.get()); } - learnRate->host()[0] = lr; - net->runSessionWithCallBack(session, begin, afterEval); - meanloss += loss->host()[0]; + learnRate->copyFromHostTensor(learnRateHost.get()); + net->runSessionWithCallBack(session, begin, afterEval, true); + loss->copyToHostTensor(lossHost.get()); + meanloss += lossHost->host()[0]; } meanloss = meanloss / ((float)batchSize * batch); FUNC_PRINT_ALL(meanloss, f); @@ -181,11 +198,12 @@ int main(int argc, const char* argv[]) { auto& dst = get<2>(iter.second); dst->copyFromHostTensor(src.get()); } - learnRate->host()[0] = lr; + learnRate->copyFromHostTensor(learnRateHost.get()); net->runSession(session); #ifdef TEST_TRAIN static float historyLossValue = 1000000.0f; - auto lossValue = loss->host()[0]; + loss->copyToHostTensor(lossHost.get()); + auto lossValue = lossHost->host()[0]; FUNC_PRINT_ALL(lossValue, f); if (lossValue > historyLossValue) { MNN_ERROR("Loss value error, from %f to %f \n", historyLossValue, lossValue); @@ -209,7 +227,7 @@ int main(int argc, const char* argv[]) { } return true; }; - TensorCallBack after = [](const std::vector& output, const std::string& oname) { + TensorCallBack after = [lossName](const std::vector& output, const std::string& oname) { std::string name = oname; for (int i = 0; i < name.size(); ++i) { if (name[i] == '/') { @@ -217,13 +235,16 @@ int main(int argc, const char* argv[]) { } } float maxValue = 0.0f; + std::unique_ptr hostOutput; for (int index = 0; index < output.size(); ++index) { if (output[index]->getType().code != halide_type_float) { continue; } std::ofstream prob("output/" + name + "_" + numberToString(index)); - auto origin0 = output[index]->host(); - auto size = output[index]->elementSize(); + hostOutput.reset(new Tensor(output[index], MNN::Tensor::TENSORFLOW, true)); + output[index]->copyToHostTensor(hostOutput.get()); + auto origin0 = hostOutput->host(); + auto size = hostOutput->elementSize(); for (int i = 0; i < size; ++i) { auto value = origin0[i]; if ((!(value > 0.0f)) && (!(value <= 0.0f))) { @@ -252,17 +273,17 @@ int main(int argc, const char* argv[]) { auto& src = get<1>(iter.second); auto& dst = get<2>(iter.second); dst->copyFromHostTensor(src.get()); -// auto fileName = iter.first; -// for (int i = 0; i < fileName.size(); ++i) { -// if (fileName[i] == '/') { -// fileName[i] = '_'; -// } -// } -// dumpTensorToFile(src.get(), "output/Input_Src_" + fileName); -// dumpTensorToFile(dst, "output/Input_Dst_" + fileName); + // auto fileName = iter.first; + // for (int i = 0; i < fileName.size(); ++i) { + // if (fileName[i] == '/') { + // fileName[i] = '_'; + // } + // } + // dumpTensorToFile(src.get(), "output/Input_Src_" + fileName); + // dumpTensorToFile(dst, "output/Input_Dst_" + fileName); } - learnRate->host()[0] = lr; - net->runSessionWithCallBack(session, begin, after); + learnRate->copyFromHostTensor(learnRateHost.get()); + net->runSessionWithCallBack(session, begin, after, true); } net->updateSessionToModel(session); { diff --git a/tools/train/source/exec/transformer.cpp b/tools/train/source/exec/transformer.cpp deleted file mode 100644 index e21b7a90f..000000000 --- a/tools/train/source/exec/transformer.cpp +++ /dev/null @@ -1,272 +0,0 @@ -// -// transformer.cpp -// MNN -// -// Created by MNN on 2019/05/05. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#include -#include -#include -#include -#include -#include -#include -#include "Interpreter.hpp" -#include "OpConverter.hpp" -#include "Macro.h" -#include "OpGrad.hpp" -#include "ExprCreator.hpp" -#define MNN_OPEN_TIME_TRACE -#include "AutoTime.hpp" -#include "rapidjson/document.h" - -using namespace MNN; -using namespace MNN::Express; -using namespace std; - -int main(int argc, const char* argv[]) { - if (argc < 4) { - MNN_PRINT("Usage: ./transformer.out temp.bin dst.bin config.json\n"); - return 0; - } - rapidjson::Document document; - { - std::ifstream fileNames(argv[3]); - std::ostringstream output; - output << fileNames.rdbuf(); - auto outputStr = output.str(); - document.Parse(outputStr.c_str()); - if (document.HasParseError()) { - MNN_ERROR("Invalid json\n"); - return 0; - } - FUNC_PRINT(document.HasParseError()); - FUNC_PRINT(document.IsArray()); - FUNC_PRINT(document.IsObject()); - } - auto configObject = document.GetObject(); - std::vector variableLimits; - if (configObject.HasMember("Optimizor")) { - auto optimizor = configObject["Optimizor"].GetObject(); - if (optimizor.HasMember("Variables")) { - auto limitArray = optimizor["Variables"].GetArray(); - for (auto vIter = limitArray.begin(); vIter != limitArray.end(); vIter++) { - variableLimits.emplace_back(vIter->GetString()); - MNN_PRINT("Variabale contain : %s \n", vIter->GetString()); - } - } - } - const char* inputModeFileName = argv[1]; - FUNC_PRINT_ALL(inputModeFileName, s); - auto inputsOutputs = Variable::getInputAndOutput(Variable::loadMap(argv[1])); - auto variables = Variable::getExecuteOrder(Variable::mapToSequence( inputsOutputs.second)); - if (configObject.HasMember("Shape")) { - auto shapeArray = configObject["Shape"].GetObject(); - for (auto shapeIter = shapeArray.begin(); shapeIter != shapeArray.end(); shapeIter++) { - auto dimArray = shapeIter->value.GetArray(); - std::vector dims; - for (auto dimIter = dimArray.begin(); dimIter != dimArray.end(); dimIter++) { - dims.emplace_back(dimIter->GetInt()); - } - FUNC_PRINT_ALL(shapeIter->name.GetString(), s); - std::string key = shapeIter->name.GetString(); - for (auto& var : variables) { - if (var->name() == key) { - var->resize(dims); - break; - } - } - } - } - { - AUTOTIME; - // Turn convolution be trainable convolution - for (auto current : variables) { - auto expr = current->expr(); - FUNC_PRINT_ALL(expr.first->name().c_str(), s); - expr.first = OpConverter::convert(expr.first); - Variable::setExpr(current, expr.first, expr.second); - } - } - variables = Variable::getExecuteOrder(Variable::mapToSequence(inputsOutputs.second)); - - // Collect Const Variable - std::set updateExprs; - for (auto v : variables) { - if (v->expr().first->get()->type() == OpType_Const) { - auto name = v->name(); - bool match = variableLimits.empty(); - for (auto limit : variableLimits) { - if (name.find(limit) != std::string::npos) { - match = true; - break; - } - } - if (match) { - MNN_PRINT("Add Variable: %s\n", name.c_str()); - updateExprs.insert(v->expr().first); - } - } - } - - VARP loss; - bool hasLoss = configObject.HasMember("Loss"); - if (!hasLoss) { - auto output = inputsOutputs.second.begin()->second; - auto outputShape = output->getInfo(); - if (outputShape->order == NC4HW4) { - auto outputName = output->name(); - output->setName(outputName + "Origin"); - output = _Convert(output, NHWC); - outputShape = output->getInfo(); - output->setName(outputName); - } - auto outputReal = _Input(outputShape->dim, outputShape->order); - outputReal->setName(output->name() + "_Compare"); -#ifdef USE_ELU - auto sub = _Sub(output, outputReal); - sub->setName(output->name() + "_Sub"); - loss = (_Sum(_Mul(sub, sub), {})); -#else - auto mul = _Mul(_Log(output), outputReal); - mul->setName(output->name() + "_Mul"); - loss = _Neg(_Sum(mul, {})); -#endif - auto l2 = _Const(0.0f); - for (auto expr : updateExprs) { - auto var = expr->outputs().begin()->lock(); - MNN_ASSERT(nullptr != var); - l2 = _Add(l2, _Sum(_Mul(var, var), {})); - } - loss = _Add(loss, _Mul(l2, _Const(0.0005f))); - loss->setName("Loss"); - inputsOutputs.second.insert(std::make_pair("Loss", loss)); - variables = Variable::getExecuteOrder(Variable::mapToSequence( inputsOutputs.second)); - } else { - for (auto v : variables) { - auto name = v->expr().first->get()->name()->str(); - if (name == configObject["Loss"].GetObject()["op"].GetString()) { - loss = v; - break; - } - } - } - MNN_ASSERT(nullptr != loss); - std::map> backwardMap; - { - auto shape = loss->getInfo(); - MNN_ASSERT(shape->size == 1); - auto init = _Const(1.0f, shape->dim, shape->order); - backwardMap[loss->expr().first] = std::vector{init}; - } - { - AUTOTIME; - std::map exprRef; - std::set exprSet; - std::stack exprExecuteOrder; - std::map> exprExecuted; - for (auto v : variables) { - auto express = v->expr().first; - if (exprSet.find(express) != exprSet.end()) { - continue; - } - exprExecuteOrder.push(express); - exprSet.insert(express); - } - while (!exprExecuteOrder.empty()) { - auto expr = exprExecuteOrder.top(); - exprExecuteOrder.pop(); - auto& inputs = expr->inputs(); - if (backwardMap.find(expr) == backwardMap.end()) { - continue; - } - auto grad = OpGrad::get(expr->get()->type()); - if (nullptr == grad) { - continue; - } - std::vector outputs(expr->outputSize()); - for (auto v : expr->outputs()) { - auto vp = v.lock(); - if (nullptr == vp) { - continue; - } - outputs[vp->expr().second] = vp; - } - auto inputGrad = grad->onGrad(expr, outputs, backwardMap[expr]); - if (inputGrad.empty()) { - continue; - } - MNN_ASSERT(inputGrad.size() == inputs.size()); - for (int i=0; iexpr().first; - auto index = inputs[i]->expr().second; - auto backward = inputGrad[i]; - if (nullptr == backward) { - continue; - } - if (backwardMap.find(inputExpr) == backwardMap.end()) { - backwardMap.insert(std::make_pair(inputExpr, std::vector(inputExpr->outputSize()))); - } - auto& inputVarMap = backwardMap[inputExpr]; - if (nullptr == inputVarMap[index]) { - inputVarMap[index] = backward; - } else { - inputVarMap[index] = _Add(inputVarMap[index], backward); - } - } - } - } - //Make Update - std::map varUpdateMap; - auto learningRate = _Input(); - learningRate->setName("LearningRate"); - for (auto expr : updateExprs) { - auto iter = backwardMap.find(expr); - if (iter == backwardMap.end()) { - continue; - } - auto& vars = iter->second; - MNN_ASSERT(vars.size() == 1); - auto originVar = expr->outputs(); - auto var = originVar.begin()->lock(); - MNN_ASSERT(nullptr != var); - vars[0] = _Sub(var, _Mul(vars[0], learningRate)); - vars[0]->setName("update_" + var->name()); - varUpdateMap[var] = vars[0]; - } - std::unique_ptr netStruct(new MNN::NetT); - std::vector resultOutputs{loss}; - for (auto output : inputsOutputs.second) { - resultOutputs.emplace_back(output.second); - } - for (auto iter : varUpdateMap) { - resultOutputs.emplace_back(iter.second); - } - Variable::save(resultOutputs, netStruct.get()); - for (int i=0; ioplists.size(); ++i) { - auto& op = netStruct->oplists[i]; - for (auto iter : varUpdateMap) { - if (iter.second->name() == op->name) { - for (int j=0; joplists.size(); ++j) { - auto& opSub = netStruct->oplists[j]; - if (opSub->name == iter.first->name()) { - op->outputIndexes = opSub->outputIndexes; - } - } - } - } - } - { - flatbuffers::FlatBufferBuilder builder(1024); - auto offset = Net::Pack(builder, netStruct.get()); - builder.Finish(offset); - // TODO, use FileWriter instead - FILE* f = fopen(argv[2], "wb"); - fwrite(builder.GetBufferPointer(), 1, builder.GetSize(), f); - fclose(f); - } - - return 0; -} diff --git a/tools/train/source/exec/transformerExecution.cpp b/tools/train/source/exec/transformerExecution.cpp new file mode 100644 index 000000000..1b2e08f9c --- /dev/null +++ b/tools/train/source/exec/transformerExecution.cpp @@ -0,0 +1,184 @@ +// +// transformerExecution.cpp +// MNN +// +// Created by MNN on 2019/05/05. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include +#include +#include +#include +#include +#include +#include +#include "Module.hpp" +#include "OpGrad.hpp" +#include "Transformer.hpp" +#include "core/Macro.h" +#define MNN_OPEN_TIME_TRACE +#include +#include "rapidjson/document.h" + +using namespace MNN; +using namespace MNN::Express; +using namespace MNN::Train; +using namespace std; + +int main(int argc, const char* argv[]) { + if (argc < 4) { + MNN_PRINT("Usage: ./transformer.out temp.bin dst.bin config.json\n"); + return 0; + } + rapidjson::Document document; + { + std::ifstream fileNames(argv[3]); + std::ostringstream output; + output << fileNames.rdbuf(); + auto outputStr = output.str(); + document.Parse(outputStr.c_str()); + if (document.HasParseError()) { + MNN_ERROR("Invalid json\n"); + return 0; + } + FUNC_PRINT(document.HasParseError()); + FUNC_PRINT(document.IsArray()); + FUNC_PRINT(document.IsObject()); + } + auto configObject = document.GetObject(); + std::vector variableLimits; + if (configObject.HasMember("Optimizor")) { + auto optimizor = configObject["Optimizor"].GetObject(); + if (optimizor.HasMember("Variables")) { + auto limitArray = optimizor["Variables"].GetArray(); + for (auto vIter = limitArray.begin(); vIter != limitArray.end(); vIter++) { + variableLimits.emplace_back(vIter->GetString()); + MNN_PRINT("Variabale contain : %s \n", vIter->GetString()); + } + } + } + const char* inputModeFileName = argv[1]; + FUNC_PRINT_ALL(inputModeFileName, s); + auto inputsOutputs = Variable::getInputAndOutput(Variable::loadMap(argv[1])); + Transformer::TrainConfig trainConfig; + trainConfig.variableLimits = std::move(variableLimits); + Transformer::turnModelToTrainable(trainConfig)->onExecute(Variable::mapToSequence(inputsOutputs.second)); + if (configObject.HasMember("Shape")) { + auto shapeArray = configObject["Shape"].GetObject(); + for (auto shapeIter = shapeArray.begin(); shapeIter != shapeArray.end(); shapeIter++) { + auto dimArray = shapeIter->value.GetArray(); + std::vector dims; + for (auto dimIter = dimArray.begin(); dimIter != dimArray.end(); dimIter++) { + dims.emplace_back(dimIter->GetInt()); + } + FUNC_PRINT_ALL(shapeIter->name.GetString(), s); + std::string key = shapeIter->name.GetString(); + for (auto& varIter : inputsOutputs.second) { + auto var = varIter.second; + if (var->name() == key) { + var->resize(dims); + break; + } + } + } + } + auto exprs = Variable::getExecuteOrder(Variable::mapToSequence(inputsOutputs.second)); + + // Collect Const Variable + std::set parameters; + for (auto v : exprs) { + if (v->get() == nullptr && VARP::TRAINABLE == v->inputType()) { + auto va = Variable::create(v, 0); + parameters.insert(va); + } + } + + VARP loss; + bool hasLoss = configObject.HasMember("Loss"); + if (!hasLoss) { + auto output = inputsOutputs.second.begin()->second; + auto outputShape = output->getInfo(); + if (outputShape->order == NC4HW4) { + auto outputName = output->name(); + output->setName(outputName + "Origin"); + output = _Convert(output, NHWC); + outputShape = output->getInfo(); + output->setName(outputName); + } + auto outputReal = _Input(outputShape->dim, outputShape->order); + outputReal->setName(output->name() + "_Compare"); +#ifdef USE_ELU + auto sub = _Subtract(output, outputReal); + sub->setName(output->name() + "_Sub"); + loss = (_ReduceSum(_Multiply(sub, sub), {})); +#else + auto mul = _Multiply(_Log(output), outputReal); + mul->setName(output->name() + "_Mul"); + loss = _Negative(_ReduceSum(mul, {})); +#endif + auto l2 = _Const(0.0f); + for (auto var : parameters) { + l2 = l2 + (var * var).sum({}); + } + loss = loss + _Multiply(l2, _Const(0.0005f)); + loss->setName("Loss"); + inputsOutputs.second.insert(std::make_pair("Loss", loss)); + exprs = Variable::getExecuteOrder(Variable::mapToSequence(inputsOutputs.second)); + } else { + for (auto expr : exprs) { + if (expr->name() == configObject["Loss"].GetObject()["op"].GetString()) { + loss = Variable::create(expr); + break; + } + } + } + MNN_ASSERT(nullptr != loss); + auto gradMap = OpGrad::grad(loss, parameters); + // Make Update + std::map varUpdateMap; + auto learningRate = _Input(); + learningRate->setName("LearningRate"); + for (auto iter : gradMap) { + auto p = iter.first; + auto q = iter.second; + q = _Subtract(p, _Multiply(q, learningRate)); + q->setName("update_" + p->name()); + varUpdateMap[p] = q; + } + std::unique_ptr netStruct(new MNN::NetT); + netStruct->usage = Usage_TRAIN; + std::vector resultOutputs{loss}; + for (auto output : inputsOutputs.second) { + resultOutputs.emplace_back(output.second); + } + for (auto iter : varUpdateMap) { + resultOutputs.emplace_back(iter.second); + } + Variable::save(resultOutputs, netStruct.get()); + for (int i = 0; i < netStruct->oplists.size(); ++i) { + auto& op = netStruct->oplists[i]; + for (auto iter : varUpdateMap) { + if (iter.second->name() == op->name) { + for (int j = 0; j < netStruct->oplists.size(); ++j) { + auto& opSub = netStruct->oplists[j]; + if (opSub->name == iter.first->name()) { + op->outputIndexes = opSub->outputIndexes; + } + } + } + } + } + { + flatbuffers::FlatBufferBuilder builder(1024); + auto offset = Net::Pack(builder, netStruct.get()); + builder.Finish(offset); + // TODO, use FileWriter instead + FILE* f = fopen(argv[2], "wb"); + fwrite(builder.GetBufferPointer(), 1, builder.GetSize(), f); + fclose(f); + } + + return 0; +} diff --git a/tools/train/source/grad/BatchNormGrad.cpp b/tools/train/source/grad/BatchNormGrad.cpp new file mode 100644 index 000000000..1a729f991 --- /dev/null +++ b/tools/train/source/grad/BatchNormGrad.cpp @@ -0,0 +1,55 @@ +// +// BatchNormGrad.cpp +// MNN +// +// Created by MNN on 2019/11/07. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "BatchNormGrad.hpp" +#include +#include +#include "core/Macro.h" + +using namespace std; +using namespace MNN; +using namespace MNN::Express; + +class BatchNormGrad : public OpGrad { +public: + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& outputs, + const std::vector& backDiff) override { + // input, scale, bias, running_mean, running_variance, epsilon, momentum, is_training + // scale and bias are learnable + std::shared_ptr forwardOp(expr->get()->UnPack()); + std::vector res; + auto inputs = expr->inputs(); + res.resize(inputs.size()); // only back propgate to input, scale, bias + + auto input = inputs[0]; + auto scale = inputs[1]; + auto bias = inputs[2]; + auto output = outputs[0]; + auto normalizedData = outputs[3]; // (input - sample_mean) / sqrt(sample_variance + epsilon) + auto rSampleStd = outputs[4]; // rsqrt(sample_variance + epsilon) + + MNN_ASSERT(scale->getInfo()->dim.size() == 1); + // reshape in order to use broadcast + auto factor = _Reshape(_Multiply(scale, rSampleStd), {1, scale->getInfo()->dim[0], 1, 1}, NCHW); + res[0] = _Multiply(backDiff[0], factor); + res[0]->setName(forwardOp->name + "_BN_Input_Grad"); + + res[1] = _ReduceSum(_Multiply(backDiff[0], normalizedData), {0, 2, 3}, false); + res[1]->setName(forwardOp->name + "_BN_Scale_Grad"); + + res[2] = _ReduceSum(backDiff[0], {0, 2, 3}, false); + res[2]->setName(forwardOp->name + "_BN_Bias_Grad"); + + return res; + } +}; +static const auto gRegister = []() { + static BatchNormGrad _c; + OpGrad::insert(OpType_BatchNorm, &_c); + return true; +}(); diff --git a/tools/train/source/grad/BatchNormGrad.hpp b/tools/train/source/grad/BatchNormGrad.hpp new file mode 100644 index 000000000..7904de1de --- /dev/null +++ b/tools/train/source/grad/BatchNormGrad.hpp @@ -0,0 +1,14 @@ +// +// BatchNormGrad.hpp +// MNN +// +// Created by MNN on 2019/11/07. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef BatchNormGrad_hpp +#define BatchNormGrad_hpp + +#include "OpGrad.hpp" + +#endif /* BatchNormGrad_hpp */ diff --git a/tools/train/source/grad/BinaryGrad.cpp b/tools/train/source/grad/BinaryGrad.cpp new file mode 100644 index 000000000..e1538aa10 --- /dev/null +++ b/tools/train/source/grad/BinaryGrad.cpp @@ -0,0 +1,169 @@ +// +// BinaryGrad.cpp +// MNN +// +// Created by MNN on 2019/05/04. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "BinaryGrad.hpp" +#include "core/Macro.h" +using namespace std; +using namespace MNN; +using namespace MNN::Express; +class EltwiseGrad : public OpGrad { +public: + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + const std::vector& backwardOutput) override { + std::vector res; + auto inputs = expr->inputs(); + res.resize(inputs.size()); + auto op = expr->get(); + auto outputDiff = backwardOutput[0]; + switch (op->main_as_Eltwise()->type()) { + case MNN::EltwiseType_SUM: { + for (int i = 0; i < res.size(); ++i) { + res[i] = outputDiff; + } + break; + } + case MNN::EltwiseType_SUB: { + res[0] = outputDiff; + auto negDiff = _Negative(outputDiff); + for (int i = 1; i < res.size(); ++i) { + res[i] = negDiff; + } + break; + } + case MNN::EltwiseType_PROD: { + for (int i = 0; i < res.size(); ++i) { + std::vector prods{outputDiff}; + for (int j = 0; j < inputs.size(); ++j) { + if (j == i) { + continue; + } + prods.emplace_back(inputs[j]); + } + std::unique_ptr eltOp(new OpT); + eltOp->type = OpType_Eltwise; + eltOp->main.type = OpParameter_Eltwise; + eltOp->main.value = new EltwiseT; + eltOp->main.AsEltwise()->type = EltwiseType_PROD; + res[i] = Variable::create(Expr::create(eltOp.get(), prods)); + } + break; + } + case MNN::EltwiseType_MAXIMUM: { + for (int i = 0; i < inputs.size(); ++i) { + auto mask = _Sign(inputs[i] - output[i]) + _Const(1.0f, {}, NCHW); + res[i] = mask * outputDiff; + } + break; + } + default: + return res; + } + return res; + } +}; +class BinaryGrad : public OpGrad { +public: + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + const std::vector& backwardOutput) override { + std::vector res; + auto inputs = expr->inputs(); + res.resize(inputs.size()); + auto op = expr->get(); + auto outputDiff = backwardOutput[0]; + switch (op->main_as_BinaryOp()->opType()) { + case BinaryOpOperation_ADD: { + res[0] = outputDiff; + res[1] = outputDiff; + break; + } + case BinaryOpOperation_SUB: { + res[0] = outputDiff; + res[1] = _Negative(outputDiff); + break; + } + case BinaryOpOperation_MUL: { + res[0] = outputDiff * inputs[1]; + res[1] = outputDiff * inputs[0]; + break; + } + case BinaryOpOperation_MAXIMUM: { + auto mask0 = _Sign(inputs[0] - output[0]) + _Const(1.0f, {}, NCHW); + auto mask1 = _Sign(inputs[1] - output[0]) + _Const(1.0f, {}, NCHW); + res[0] = outputDiff * mask0; + res[1] = outputDiff * mask1; + break; + } + case BinaryOpOperation_MINIMUM: { + auto mask0 = _Sign(output[0] - inputs[0]) + _Const(1.0f, {}, NCHW); + auto mask1 = _Sign(output[0] - inputs[1]) + _Const(1.0f, {}, NCHW); + res[0] = outputDiff * mask0; + res[1] = outputDiff * mask1; + break; + } + case BinaryOpOperation_REALDIV: { + res[0] = _Divide(outputDiff, inputs[1]); + // d (u / v) = dx / v , -dx*u(1/v)*(1/v) + res[1] = _Negative(_Multiply(outputDiff, _Divide(output[0], inputs[1]))); + break; + } + default: + return res; + } + for (int i = 0; i < inputs.size(); ++i) { + auto inputShape = inputs[i]->getInfo(); + auto backShape = res[i]->getInfo(); + std::vector reduceDims; + bool keepDim = true; + MNN_ASSERT(inputShape->dim.size() <= backShape->dim.size()); + if (inputShape->dim.size() < backShape->dim.size()) { + // case like: shape(7, 2, 3, 3) + shape(2, 3, 1) + // will only be handled a part here + // because we need keepDim = false for dim[0] = 7 + // and keepDim = true for dim[-1] = 3 + auto diff = (int)backShape->dim.size() - (int)inputShape->dim.size(); + for (int i = 0; i < diff; ++i) { + reduceDims.emplace_back(i); + } + keepDim = false; + } else { + for (int i = 0; i < backShape->dim.size(); ++i) { + if (backShape->dim[i] > 1 && inputShape->dim[i] == 1) { + reduceDims.emplace_back(i); + } + } + keepDim = true; + } + if (!reduceDims.empty()) { + res[i] = _ReduceSum(res[i], reduceDims, keepDim); + // for case like: shape(7, 2, 3, 3) + shape(2, 3, 1) + if (keepDim == false) { + reduceDims.clear(); + auto diff = (int)backShape->dim.size() - (int)inputShape->dim.size(); + for (int j = 0; j < inputShape->dim.size(); j++) { + if (backShape->dim[j+diff] > 1 && inputShape->dim[j] == 1) { + reduceDims.emplace_back(j); + } + } + keepDim = true; + if (!reduceDims.empty()) { + res[i] = _ReduceSum(res[i], reduceDims, keepDim); + } + } + } + } + return res; + } +}; + +static const auto gRegister = []() { + static BinaryGrad _c; + OpGrad::insert((int)OpType_BinaryOp, &_c); + static EltwiseGrad _d; + OpGrad::insert((int)OpType_Eltwise, &_d); + return true; +}(); diff --git a/tools/train/source/transform/BinaryGrad.hpp b/tools/train/source/grad/BinaryGrad.hpp similarity index 100% rename from tools/train/source/transform/BinaryGrad.hpp rename to tools/train/source/grad/BinaryGrad.hpp diff --git a/tools/train/source/grad/ConcatGrad.cpp b/tools/train/source/grad/ConcatGrad.cpp new file mode 100644 index 000000000..0f0131816 --- /dev/null +++ b/tools/train/source/grad/ConcatGrad.cpp @@ -0,0 +1,41 @@ +// +// ConcatGrad.cpp +// MNN +// +// Created by MNN on 2019/12/11. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "OpGrad.hpp" +#include "core/Macro.h" +using namespace std; +using namespace MNN; +using namespace MNN::Express; + +class ConcatGrad : public OpGrad { +public: + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + const std::vector& backwardOutput) override { + std::vector res(expr->inputs().size()); + if (!expr->requireInfo()) { + return res; + } + auto axis = expr->get()->main_as_Axis()->axis(); + if (axis < 0) { + axis = expr->outputInfo(0)->dim.size() + axis; + } + std::vector points(res.size()); + for (int i = 0; i < res.size(); ++i) { + auto input = expr->inputs()[i]; + points[i] = input->getInfo()->dim[axis]; + } + res = _Split(backwardOutput[0], points, axis); + return res; + } +}; + +static const auto gRegister = []() { + static ConcatGrad _c; + OpGrad::insert((int)OpType_Concat, &_c); + return true; +}(); diff --git a/tools/train/source/grad/ConvGrad.cpp b/tools/train/source/grad/ConvGrad.cpp new file mode 100644 index 000000000..d6965c3a4 --- /dev/null +++ b/tools/train/source/grad/ConvGrad.cpp @@ -0,0 +1,133 @@ +// +// ConvGrad.cpp +// MNN +// +// Created by MNN on 2019/04/22. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "ConvGrad.hpp" +#include "core/Macro.h" +using namespace std; +using namespace MNN::Express; +using namespace MNN; + +class ConvGrad : public OpGrad { +public: + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + const std::vector& backwardOutput) override { + auto inputs = expr->inputs(); + if (inputs.size() == 1) { + return std::vector{nullptr}; + } + std::vector res(inputs.size(), nullptr); + auto forwardName = expr->name(); + std::shared_ptr forwardOp(expr->get()->UnPack()); + auto outputDiff = backwardOutput[0]; + { + // Create Input Grad + unique_ptr newOp(new OpT); + if (forwardOp->type == OpType_Convolution) { + newOp->type = OpType_Deconvolution; + } else if (forwardOp->type == OpType_ConvolutionDepthwise) { + newOp->type = OpType_DeconvolutionDepthwise; + } + newOp->main.type = OpParameter_Convolution2D; + auto conv2D = new Convolution2DT; + conv2D->common.reset(new Convolution2DCommonT(*forwardOp->main.AsConvolution2D()->common)); + auto inputCount = conv2D->common->inputCount; + auto outputCount = conv2D->common->outputCount; + conv2D->common->inputCount = outputCount; + conv2D->common->outputCount = inputCount; + newOp->main.value = conv2D; + + auto expr = Expr::create(std::move(newOp), {outputDiff, inputs[1]}); + res[0] = Variable::create(expr); + res[0]->setName(forwardName + "_Input_Grad"); + } + // Add Filter Grad + { + unique_ptr newOp(new OpT); + newOp->type = OpType_Conv2DBackPropFilter; + newOp->main.type = OpParameter_Convolution2D; + auto conv2D = new Convolution2DT; + conv2D->common.reset(new Convolution2DCommonT(*forwardOp->main.AsConvolution2D()->common)); + newOp->main.value = conv2D; + auto expr = Expr::create(std::move(newOp), {inputs[1], inputs[0], outputDiff}); + res[1] = Variable::create(expr); + res[1]->setName(forwardName + "_Filter_Grad"); + } + // Add Bias Grad + if (inputs.size() > 2) { + auto gradConvert = _Convert(outputDiff, NHWC); + res[2] = _ReduceSum(gradConvert, {0, 1, 2}); + res[2]->setName(forwardName + "_Bias_Grad"); + } + return res; + } +}; + +class DeconvGrad : public OpGrad { +public: + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + const std::vector& backwardOutput) override { + auto inputs = expr->inputs(); + if (inputs.size() == 1) { + return std::vector{nullptr}; + } + std::vector res(inputs.size(), nullptr); + auto forwardName = expr->name(); + std::shared_ptr forwardOp(expr->get()->UnPack()); + auto outputDiff = backwardOutput[0]; + { + // Create Input Grad + unique_ptr newOp(new OpT); + if (forwardOp->type == OpType_Deconvolution) { + newOp->type = OpType_Convolution; + } else if (forwardOp->type == OpType_DeconvolutionDepthwise) { + newOp->type = OpType_ConvolutionDepthwise; + } + newOp->main.type = OpParameter_Convolution2D; + auto conv2D = new Convolution2DT; + conv2D->common.reset(new Convolution2DCommonT(*forwardOp->main.AsConvolution2D()->common)); + auto inputCount = conv2D->common->inputCount; + auto outputCount = conv2D->common->outputCount; + conv2D->common->inputCount = outputCount; + conv2D->common->outputCount = inputCount; + newOp->main.value = conv2D; + + auto expr = Expr::create(std::move(newOp), {outputDiff, inputs[1]}); + res[0] = Variable::create(expr); + res[0]->setName(forwardName + "_Input_Grad"); + } + // Add Filter Grad + { + unique_ptr newOp(new OpT); + newOp->type = OpType_Conv2DBackPropFilter; + newOp->main.type = OpParameter_Convolution2D; + auto conv2D = new Convolution2DT; + conv2D->common.reset(new Convolution2DCommonT(*forwardOp->main.AsConvolution2D()->common)); + newOp->main.value = conv2D; + auto expr = Expr::create(std::move(newOp), {inputs[1], outputDiff, inputs[0]}); + res[1] = Variable::create(expr); + res[1]->setName(forwardName + "_Filter_Grad"); + } + // Add Bias Grad + if (inputs.size() > 2) { + auto gradConvert = _Convert(outputDiff, NHWC); + res[2] = _ReduceSum(gradConvert, {0, 1, 2}); + res[2]->setName(forwardName + "_Bias_Grad"); + } + return res; + } +}; + +static const auto gRegister = []() { + static ConvGrad _c; + OpGrad::insert(OpType_Convolution, &_c); + OpGrad::insert(OpType_ConvolutionDepthwise, &_c); + static DeconvGrad _d; + OpGrad::insert(OpType_Deconvolution, &_d); + OpGrad::insert(OpType_DeconvolutionDepthwise, &_d); + return true; +}(); diff --git a/tools/train/source/transform/ConvGrad.hpp b/tools/train/source/grad/ConvGrad.hpp similarity index 100% rename from tools/train/source/transform/ConvGrad.hpp rename to tools/train/source/grad/ConvGrad.hpp diff --git a/tools/train/source/grad/InterpGrad.cpp b/tools/train/source/grad/InterpGrad.cpp new file mode 100644 index 000000000..bedf497fc --- /dev/null +++ b/tools/train/source/grad/InterpGrad.cpp @@ -0,0 +1,48 @@ +// +// InterpGrad.cpp +// MNN +// +// Created by MNN on 2019/12/13. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "OpGrad.hpp" +using namespace std; +using namespace MNN; +using namespace MNN::Express; + +class InterpGrad : public OpGrad { +public: + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + const std::vector& backwardOutput) override { + auto op = expr->get(); + // FIXME, the grad may be compute a little error + auto shapeInfo = expr->inputs()[0]->getInfo(); + MNN_ASSERT(nullptr != shapeInfo && shapeInfo->dim.size() == 4); + std::vector res{nullptr}; + std::vector shapeSize(shapeInfo->dim[2], shapeInfo->dim[3]); + VARP interpShape = _Const(shapeSize.data(), {2}, NHWC); + std::unique_ptr interpOp(new OpT); + interpOp->type = OpType_Interp; + interpOp->main.type = OpParameter_Interp; + interpOp->main.value = new InterpT; + if (OpType_Resize == op->type()) { + interpOp->main.AsInterp()->alignCorners = false; + interpOp->main.AsInterp()->resizeType = 2; // Bilinear + } else { + MNN_ASSERT(OpType_Interp == op->type()); + auto originInterpParam = op->main_as_Interp(); + interpOp->main.AsInterp()->resizeType = originInterpParam->resizeType(); + interpOp->main.AsInterp()->alignCorners = originInterpParam->alignCorners(); + } + res[0] = Variable::create(Expr::create(interpOp.get(), {backwardOutput[0], interpShape})); + return res; + } +}; + +static const auto gRegister = []() { + static InterpGrad _c; + OpGrad::insert((int)OpType_Interp, &_c); + OpGrad::insert((int)OpType_Resize, &_c); + return true; +}(); diff --git a/tools/train/source/grad/MatMulGrad.cpp b/tools/train/source/grad/MatMulGrad.cpp new file mode 100644 index 000000000..8bcd577a6 --- /dev/null +++ b/tools/train/source/grad/MatMulGrad.cpp @@ -0,0 +1,202 @@ +// +// MatMulGrad.cpp +// MNN +// +// Created by MNN on 2019/05/27. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "MatMulGrad.hpp" +using namespace std; +using namespace MNN; +using namespace MNN::Express; +class BatchMatMulGrad : public OpGrad { +public: + BatchMatMulGrad() { + mType = LINEAR; + } + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + const std::vector& backwardOutput) override { + std::vector res; + auto inputs = expr->inputs(); + res.resize(inputs.size()); + auto outputDiff = backwardOutput[0]; + + const bool transA = expr->get()->main_as_BatchMatMulParam()->adjX(); + const bool transB = expr->get()->main_as_BatchMatMulParam()->adjY(); + + if (!transA && !transB) { + { + // A' = C' * BT + res[0] = _BatchMatMul(outputDiff, inputs[1], false, true); + // B' = AT * C' + res[1] = _BatchMatMul(inputs[0], outputDiff, true, false); + } + } + + if (transA && !transB) { + { + // AT' = C' * BT ==> A' = B * CT' + res[0] = _BatchMatMul(inputs[1], outputDiff, false, true); + } + + { + // B' = ATT * C' = A * C' + res[1] = _BatchMatMul(inputs[0], outputDiff, false, false); + } + } + + if (!transA && transB) { + { + // A' = C' * BTT = C' * B + res[0] = _BatchMatMul(outputDiff, inputs[1], false, false); + } + + { + // BT' = AT * C' ==> B' = CT' * A + res[1] = _BatchMatMul(outputDiff, inputs[0], true, false); + } + } + + if (transA && transB) { + { + // AT' = C' * BTT ==> A' = BT * CT' + res[0] = _BatchMatMul(inputs[1], outputDiff, true, true); + } + + { + // BT' = ATT * C' ==> B' = CT' * AT + res[1] = _BatchMatMul(outputDiff, inputs[0], true, true); + } + } + + return res; + } +}; +class MatMulGrad : public OpGrad { +public: + MatMulGrad() { + mType = LINEAR; + } + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + const std::vector& backwardOutput) override { + std::vector res; + auto inputs = expr->inputs(); + res.resize(inputs.size()); + auto outputDiff = backwardOutput[0]; + + const bool transA = expr->get()->main_as_MatMul()->transposeA(); + const bool transB = expr->get()->main_as_MatMul()->transposeB(); + + if (!transA && !transB) { + { + // A' = C' * BT + unique_ptr newOp(new OpT); + newOp->type = OpType_MatMul; + newOp->main.type = OpParameter_MatMul; + newOp->main.value = new MatMulT; + newOp->main.AsMatMul()->transposeB = true; + auto expr = Expr::create(std::move(newOp), {outputDiff, inputs[1]}); + res[0] = Variable::create(expr); + } + + { + // B' = AT * C' + unique_ptr newOp(new OpT); + newOp->type = OpType_MatMul; + newOp->main.type = OpParameter_MatMul; + newOp->main.value = new MatMulT; + newOp->main.AsMatMul()->transposeA = true; + auto expr = Expr::create(std::move(newOp), {inputs[0], outputDiff}); + res[1] = Variable::create(expr); + } + } + + if (transA && !transB) { + { + // AT' = C' * BT ==> A' = B * CT' + unique_ptr newOp(new OpT); + newOp->type = OpType_MatMul; + newOp->main.type = OpParameter_MatMul; + newOp->main.value = new MatMulT; + newOp->main.AsMatMul()->transposeA = false; + newOp->main.AsMatMul()->transposeB = true; + auto expr = Expr::create(std::move(newOp), {inputs[1], outputDiff}); + res[0] = Variable::create(expr); + } + + { + // B' = ATT * C' = A * C' + unique_ptr newOp(new OpT); + newOp->type = OpType_MatMul; + newOp->main.type = OpParameter_MatMul; + newOp->main.value = new MatMulT; + newOp->main.AsMatMul()->transposeA = false; + newOp->main.AsMatMul()->transposeB = false; + auto expr = Expr::create(std::move(newOp), {inputs[0], outputDiff}); + res[1] = Variable::create(expr); + } + } + + if (!transA && transB) { + { + // A' = C' * BTT = C' * B + unique_ptr newOp(new OpT); + newOp->type = OpType_MatMul; + newOp->main.type = OpParameter_MatMul; + newOp->main.value = new MatMulT; + newOp->main.AsMatMul()->transposeA = false; + newOp->main.AsMatMul()->transposeB = false; + auto expr = Expr::create(std::move(newOp), {outputDiff, inputs[1]}); + res[0] = Variable::create(expr); + } + + { + // BT' = AT * C' ==> B' = CT' * A + unique_ptr newOp(new OpT); + newOp->type = OpType_MatMul; + newOp->main.type = OpParameter_MatMul; + newOp->main.value = new MatMulT; + newOp->main.AsMatMul()->transposeA = true; + newOp->main.AsMatMul()->transposeB = false; + auto expr = Expr::create(std::move(newOp), {outputDiff, inputs[0]}); + res[1] = Variable::create(expr); + } + } + + if (transA && transB) { + { + // AT' = C' * BTT ==> A' = BT * CT' + unique_ptr newOp(new OpT); + newOp->type = OpType_MatMul; + newOp->main.type = OpParameter_MatMul; + newOp->main.value = new MatMulT; + newOp->main.AsMatMul()->transposeA = true; + newOp->main.AsMatMul()->transposeB = true; + auto expr = Expr::create(std::move(newOp), {inputs[1], outputDiff}); + res[0] = Variable::create(expr); + } + + { + // BT' = ATT * C' ==> B' = CT' * AT + unique_ptr newOp(new OpT); + newOp->type = OpType_MatMul; + newOp->main.type = OpParameter_MatMul; + newOp->main.value = new MatMulT; + newOp->main.AsMatMul()->transposeA = true; + newOp->main.AsMatMul()->transposeB = true; + auto expr = Expr::create(std::move(newOp), {outputDiff, inputs[0]}); + res[1] = Variable::create(expr); + } + } + + return res; + } +}; +static const auto gRegister = []() { + static MatMulGrad _c; + OpGrad::insert(OpType_MatMul, &_c); + static BatchMatMulGrad _d; + OpGrad::insert(OpType_BatchMatMul, &_d); + return true; +}(); diff --git a/tools/train/source/transform/MatMulGrad.hpp b/tools/train/source/grad/MatMulGrad.hpp similarity index 100% rename from tools/train/source/transform/MatMulGrad.hpp rename to tools/train/source/grad/MatMulGrad.hpp diff --git a/tools/train/source/grad/OpGrad.cpp b/tools/train/source/grad/OpGrad.cpp new file mode 100644 index 000000000..b761670d5 --- /dev/null +++ b/tools/train/source/grad/OpGrad.cpp @@ -0,0 +1,105 @@ +// +// OpGrad.cpp +// MNN +// +// Created by MNN on 2019/05/05. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "OpGrad.hpp" +using namespace std; +using namespace MNN::Express; +namespace MNN { +static std::map& getConverter() { + static std::map gConverterMap; + return gConverterMap; +} + +OpGrad* OpGrad::get(int type) { + auto& converterMap = getConverter(); + auto iter = converterMap.find(type); + if (iter != converterMap.end()) { + return iter->second; + } + return nullptr; +} + +void OpGrad::insert(int type, OpGrad* converter) { + auto& converterMap = getConverter(); + converterMap.insert(std::make_pair(type, converter)); +} + +std::map OpGrad::grad(VARP loss, const std::set& parameters) { + std::map> backwardMap; + { + auto shape = loss->getInfo(); + MNN_ASSERT(shape->size == 1); + auto init = _Const(1.0f, shape->dim, shape->order); + backwardMap[loss->expr().first] = std::vector{init}; + } + auto executeOrder = Variable::getExecuteOrder({loss}); + for (auto iter = executeOrder.rbegin(); iter != executeOrder.rend(); iter++) { + auto expr = *iter; + auto& inputs = expr->inputs(); + if (backwardMap.find(expr) == backwardMap.end()) { + continue; + } + if (nullptr == expr->get()) { + continue; + } + auto grad = OpGrad::get(expr->get()->type()); + if (nullptr == grad) { + // MNN_PRINT("Can't grad for %s, %d\n", expr->name().c_str(), expr->get()->type()); + continue; + } + std::vector outputs(expr->outputSize()); + for (int i = 0; i < expr->outputSize(); ++i) { + outputs[i] = Variable::create(expr, i); + } + auto inputGrad = grad->onGrad(expr, outputs, backwardMap[expr]); + auto empty = true; + for (auto grad : inputGrad) { + if (nullptr != grad) { + empty = false; + break; + } + } + if (empty) { + MNN_PRINT("Can't grad for %s, %d\n", expr->name().c_str(), expr->get()->type()); + continue; + } + MNN_ASSERT(inputGrad.size() <= inputs.size()); + for (int i = 0; i < inputGrad.size(); ++i) { + auto inputExpr = inputs[i]->expr().first; + auto index = inputs[i]->expr().second; + auto backward = inputGrad[i]; + if (nullptr == backward) { + continue; + } + if (backwardMap.find(inputExpr) == backwardMap.end()) { + backwardMap.insert(std::make_pair(inputExpr, std::vector(inputExpr->outputSize()))); + } + auto& inputVarMap = backwardMap[inputExpr]; + if (nullptr == inputVarMap[index]) { + inputVarMap[index] = backward; + } else { + inputVarMap[index] = _Add(inputVarMap[index], backward); + } + } + } + std::map grads; + std::map parametersExpr; + for (auto p : parameters) { + parametersExpr.insert(std::make_pair(p->expr().first.get(), p)); + } + for (auto iter : backwardMap) { + auto expr = iter.first.get(); + if (parametersExpr.find(expr) != parametersExpr.end()) { + auto parameter = parametersExpr[expr]; + grads[parameter] = iter.second[parameter->expr().second]; + } + } + return grads; +} + +} // namespace MNN diff --git a/tools/train/source/grad/OpGrad.hpp b/tools/train/source/grad/OpGrad.hpp new file mode 100644 index 000000000..c9049b0cf --- /dev/null +++ b/tools/train/source/grad/OpGrad.hpp @@ -0,0 +1,42 @@ +// +// OpGrad.hpp +// MNN +// +// Created by MNN on 2019/05/05. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef OpGrad_hpp +#define OpGrad_hpp +#include +#include +#include +#include +#include +#include "MNN_generated.h" + +namespace MNN { +class MNN_PUBLIC OpGrad { +public: + enum Type { LINEAR, SEMI_LINEAR, NO_LINEAR }; + + OpGrad() = default; + virtual ~OpGrad() = default; + + Type type() const { + return mType; + } + + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + const std::vector& backwardOutput) = 0; + + static OpGrad* get(int type); + static void insert(int type, OpGrad* creator); + static std::map grad(Express::VARP loss, const std::set& parameters); + +protected: + Type mType = LINEAR; +}; +} // namespace MNN + +#endif diff --git a/tools/train/source/grad/PermuteGrad.cpp b/tools/train/source/grad/PermuteGrad.cpp new file mode 100644 index 000000000..0886c70f8 --- /dev/null +++ b/tools/train/source/grad/PermuteGrad.cpp @@ -0,0 +1,76 @@ +// +// PermuteGrad.cpp +// MNN +// +// Created by MNN on 2019/12/11. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "OpGrad.hpp" +using namespace std; +using namespace MNN; +using namespace MNN::Express; + +class TransposeGrad : public OpGrad { +public: + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + const std::vector& backwardOutput) override { + auto shapeInfo = expr->inputs()[1]->getInfo(); + auto shape = expr->inputs()[1]->readMap(); + std::vector res{nullptr}; + if (nullptr == shape || nullptr == shapeInfo) { + MNN_ERROR("Can't get shape info\n"); + return res; + } + MNN_ASSERT(nullptr != shape); + auto dimSize = shapeInfo->size; + std::vector dims(dimSize); + for (int i = 0; i < dimSize; ++i) { + for (int j = 0; j < dimSize; ++j) { + if (shape[j] == i) { + dims[i] = j; + break; + } + } + } + res[0] = _Transpose(backwardOutput[0], dims); + return res; + } +}; + +class PermuteGrad : public OpGrad { +public: + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + const std::vector& backwardOutput) override { + MNN_ASSERT(expr->inputs().size() == 1); + auto op = expr->get(); + auto shape = op->main_as_Permute()->dims(); + std::vector res{nullptr}; + MNN_ASSERT(nullptr != shape); + std::unique_ptr permuteOp(new OpT); + permuteOp->type = OpType_Permute; + permuteOp->main.type = OpParameter_Permute; + permuteOp->main.value = new PermuteT; + auto dimSize = shape->size(); + std::vector dims(dimSize); + for (int i = 0; i < dimSize; ++i) { + for (int j = 0; j < dimSize; ++j) { + if (shape->data()[j] == i) { + dims[i] = j; + break; + } + } + } + permuteOp->main.AsPermute()->dims = dims; + res[0] = Variable::create(Expr::create(permuteOp.get(), {backwardOutput[0]})); + return res; + } +}; + +static const auto gRegister = []() { + static PermuteGrad _c; + OpGrad::insert((int)OpType_Permute, &_c); + static TransposeGrad _d; + OpGrad::insert((int)OpType_Transpose, &_d); + return true; +}(); diff --git a/tools/train/source/transform/PoolGrad.cpp b/tools/train/source/grad/PoolGrad.cpp similarity index 62% rename from tools/train/source/transform/PoolGrad.cpp rename to tools/train/source/grad/PoolGrad.cpp index 627c6efbb..8cc3f2dd7 100644 --- a/tools/train/source/transform/PoolGrad.cpp +++ b/tools/train/source/grad/PoolGrad.cpp @@ -7,7 +7,7 @@ // #include "PoolGrad.hpp" -#include "Macro.h" +#include "core/Macro.h" using namespace std; using namespace MNN; using namespace MNN::Express; @@ -18,17 +18,19 @@ class PoolGrad : public OpGrad { mType = SEMI_LINEAR; } - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, const std::vector& backwardOutput) override { - std::vector result{nullptr}; + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + const std::vector& backwardOutput) override { + std::vector result(1, nullptr); auto outputDiff = backwardOutput[0]; std::unique_ptr forwardOp(expr->get()->UnPack()); unique_ptr newOp(new OpT); - newOp->type = OpType_PoolGrad; - auto copyP = new PoolT(*forwardOp->main.AsPool()); - newOp->main.type = OpParameter_Pool; - newOp->main.value = copyP; - + newOp->type = OpType_PoolGrad; + auto copyP = new PoolT(*forwardOp->main.AsPool()); + newOp->main.type = OpParameter_Pool; + newOp->main.value = copyP; + result[0] = Variable::create(Expr::create(std::move(newOp), {expr->inputs()[0], output[0], outputDiff})); + result[0]->setName(expr->name() + "_Grad"); return result; } }; diff --git a/tools/train/source/transform/PoolGrad.hpp b/tools/train/source/grad/PoolGrad.hpp similarity index 100% rename from tools/train/source/transform/PoolGrad.hpp rename to tools/train/source/grad/PoolGrad.hpp diff --git a/tools/train/source/grad/ReduceGrad.cpp b/tools/train/source/grad/ReduceGrad.cpp new file mode 100644 index 000000000..c638aea44 --- /dev/null +++ b/tools/train/source/grad/ReduceGrad.cpp @@ -0,0 +1,88 @@ +// +// ReduceGrad.cpp +// MNN +// +// Created by MNN on 2019/05/24. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "OpGrad.hpp" +using namespace std; +using namespace MNN; +using namespace MNN::Express; + +class ReduceGrad : public OpGrad { +public: + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + const std::vector& backwardOutput) override { + std::vector result; + auto inputs = expr->inputs(); + result.resize(inputs.size()); + std::unique_ptr forwardOp(expr->get()->UnPack()); + std::vector reductionDims = forwardOp->main.AsReductionParam()->dim; + auto keepDim = forwardOp->main.AsReductionParam()->keepDims; + if (inputs.size() > 1) { + reductionDims.clear(); + auto ptr = inputs[1]->readMap(); + auto shape = inputs[1]->getInfo(); + for (int i = 0; i < shape->size; ++i) { + reductionDims.emplace_back(ptr[i]); + } + inputs[1]->unMap(); + } + if (reductionDims.empty()) { + auto shape = inputs[0]->getInfo(); + for (int i = 0; i < shape->dim.size(); ++i) { + reductionDims.emplace_back(i); + } + } + VARP init; + { + unique_ptr newOp(new OpT); + newOp->name = forwardOp->name + "__Zero"; + newOp->type = OpType_ZerosLike; + init = Variable::create(Expr::create(std::move(newOp), {inputs[0]})); + } + auto outputDiff = backwardOutput[0]; + + // implement other reduction op's grad below + if (forwardOp->main.AsReductionParam()->operation == ReductionType_SUM) { + // do not need to modify grads, just copy them, so, pass + } + if (forwardOp->main.AsReductionParam()->operation == ReductionType_MEAN) { + float gradCount = outputDiff->getInfo()->size; + float inputCount = inputs[0]->getInfo()->size; + outputDiff = _Multiply(outputDiff, _Const(gradCount / inputCount)); + } + + // this should be common operations, to expand grads to inputs shape + if (!keepDim) { + // Create Unsqueeze Op + unique_ptr newOp(new OpT); + newOp->name = forwardOp->name + "__Unsqueeze"; + newOp->type = OpType_Unsqueeze; + newOp->main.type = OpParameter_SqueezeParam; + newOp->main.value = new SqueezeParamT; + newOp->main.AsSqueezeParam()->squeezeDims = reductionDims; + outputDiff = Variable::create(Expr::create(std::move(newOp), {outputDiff})); + } + result[0] = _Add(init, outputDiff); + + return result; + } +}; +class FillGrad : public OpGrad { +public: + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + const std::vector& backwardOutput) override { + return {backwardOutput[0].sum({})}; + } +}; + +static const auto gRegister = []() { + static ReduceGrad _c; + OpGrad::insert(OpType_Reduction, &_c); + static FillGrad _d; + OpGrad::insert(OpType_Fill, &_d); + return true; +}(); diff --git a/tools/train/source/grad/ReluGrad.cpp b/tools/train/source/grad/ReluGrad.cpp new file mode 100644 index 000000000..3a10abdfb --- /dev/null +++ b/tools/train/source/grad/ReluGrad.cpp @@ -0,0 +1,59 @@ +// +// ReluGrad.cpp +// MNN +// +// Created by MNN on 2019/04/22. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "ReluGrad.hpp" +#include "core/Macro.h" +using namespace std; +using namespace MNN; + +class ReluGrad : public OpGrad { +public: + ReluGrad() { + mType = SEMI_LINEAR; + } + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + const std::vector& backwardOutput) override { + std::vector result(1, nullptr); + + unique_ptr newOp(new OpT); + newOp->type = OpType_ReluGrad; + newOp->main.type = OpParameter_Relu; + newOp->main.value = new ReluT; + + result[0] = + Express::Variable::create(Express::Expr::create(std::move(newOp), {expr->inputs()[0], backwardOutput[0]})); + result[0]->setName(expr->name() + "_Grad"); + + return result; + } +}; +class Relu6Grad : public OpGrad { +public: + Relu6Grad() { + mType = SEMI_LINEAR; + } + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + const std::vector& backwardOutput) override { + std::vector result{nullptr}; + + unique_ptr newOp(new OpT); + newOp->type = OpType_Relu6Grad; + newOp->main.type = OpParameter_NONE; + result[0] = + Express::Variable::create(Express::Expr::create(std::move(newOp), {expr->inputs()[0], backwardOutput[0]})); + result[0]->setName(expr->name() + "_Grad"); + return result; + } +}; +static const auto gRegister = []() { + static ReluGrad _c; + OpGrad::insert(OpType_ReLU, &_c); + static Relu6Grad _d; + OpGrad::insert(OpType_ReLU6, &_d); + return true; +}(); diff --git a/tools/train/source/transform/ReluGrad.hpp b/tools/train/source/grad/ReluGrad.hpp similarity index 100% rename from tools/train/source/transform/ReluGrad.hpp rename to tools/train/source/grad/ReluGrad.hpp diff --git a/tools/train/source/transform/ReshapeGrad.cpp b/tools/train/source/grad/ReshapeGrad.cpp similarity index 62% rename from tools/train/source/transform/ReshapeGrad.cpp rename to tools/train/source/grad/ReshapeGrad.cpp index ba81d6a03..de4baf233 100644 --- a/tools/train/source/transform/ReshapeGrad.cpp +++ b/tools/train/source/grad/ReshapeGrad.cpp @@ -7,24 +7,25 @@ // #include "ReshapeGrad.hpp" -#include "Macro.h" +#include "core/Macro.h" using namespace std; using namespace MNN; using namespace MNN::Express; class ReshapeGrad : public OpGrad { public: - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, const std::vector& backwardOutput) override { - std::vector result; + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + const std::vector& backwardOutput) override { auto inputs = expr->inputs(); - result.resize(inputs.size()); + std::vector result(inputs.size(), nullptr); // Create Shape Op and Tensor unique_ptr newOp(new OpT); - newOp->type = OpType_Shape; - auto shape = Variable::create(Expr::create(std::move(newOp), {inputs[0]})); + newOp->type = OpType_Shape; + auto shape = Variable::create(Expr::create(std::move(newOp), {inputs[0]})); // Create Reshape Op result[0] = _Reshape(backwardOutput[0], shape); + result[0]->setName(expr->name() + "_Grad"); return result; } }; @@ -33,5 +34,6 @@ static const auto gRegister = []() { static ReshapeGrad _c; OpGrad::insert(OpType_Reshape, &_c); OpGrad::insert(OpType_Squeeze, &_c); + OpGrad::insert(OpType_Unsqueeze, &_c); return true; }(); diff --git a/tools/train/source/transform/ReshapeGrad.hpp b/tools/train/source/grad/ReshapeGrad.hpp similarity index 100% rename from tools/train/source/transform/ReshapeGrad.hpp rename to tools/train/source/grad/ReshapeGrad.hpp diff --git a/tools/train/source/transform/SelectGrad.cpp b/tools/train/source/grad/SelectGrad.cpp similarity index 76% rename from tools/train/source/transform/SelectGrad.cpp rename to tools/train/source/grad/SelectGrad.cpp index c97b7c536..42972dc8c 100644 --- a/tools/train/source/transform/SelectGrad.cpp +++ b/tools/train/source/grad/SelectGrad.cpp @@ -7,7 +7,7 @@ // #include "SelectGrad.hpp" -#include "Macro.h" +#include "core/Macro.h" using namespace std; using namespace MNN; using namespace MNN::Express; @@ -17,7 +17,8 @@ class SelectGrad : public OpGrad { SelectGrad() { mType = SEMI_LINEAR; } - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, const std::vector& backwardOutput) override { + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + const std::vector& backwardOutput) override { auto inputs = expr->inputs(); std::vector result(inputs.size(), nullptr); auto outputDiff = backwardOutput[0]; @@ -30,18 +31,17 @@ class SelectGrad : public OpGrad { mask->main.value = new CastParamT; mask->main.AsCastParam()->dstT = DataType_DT_FLOAT; mask->main.AsCastParam()->srcT = DataType_DT_BOOL; - + auto maskVar = Variable::create(Expr::create(std::move(mask), {inputs[0]})); // da * (x>0) - result[1] = _Mul(outputDiff, maskVar); + result[1] = _Multiply(outputDiff, maskVar); // db * -((x>0)-1) - auto one = _Const(1.0f); - auto sub = _Sub(maskVar, one); - auto neg = _Neg(sub); - result[2] = _Mul(outputDiff, neg); - + auto one = _Const(1.0f); + auto sub = _Subtract(maskVar, one); + auto neg = _Negative(sub); + result[2] = _Multiply(outputDiff, neg); } return result; diff --git a/tools/train/source/transform/SelectGrad.hpp b/tools/train/source/grad/SelectGrad.hpp similarity index 100% rename from tools/train/source/transform/SelectGrad.hpp rename to tools/train/source/grad/SelectGrad.hpp diff --git a/tools/train/source/grad/SliceGrad.cpp b/tools/train/source/grad/SliceGrad.cpp new file mode 100644 index 000000000..ed17f647a --- /dev/null +++ b/tools/train/source/grad/SliceGrad.cpp @@ -0,0 +1,46 @@ +// +// SliceGrad.cpp +// MNN +// +// Created by MNN on 2019/12/11. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "OpGrad.hpp" +using namespace std; +using namespace MNN; +using namespace MNN::Express; + +class SliceGrad : public OpGrad { +public: + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + const std::vector& backwardOutput) override { + MNN_ASSERT(expr->inputs().size() == 1); + auto slice = expr->get()->main_as_Slice(); + auto axis = slice->axis(); + std::vector res{nullptr}; + std::vector validBackward(backwardOutput.size()); + for (int i = 0; i < backwardOutput.size(); ++i) { + auto origin = output[i]; + if (nullptr != backwardOutput[i]) { + validBackward[i] = backwardOutput[i]; + continue; + } + auto info = origin->getInfo(); + if (nullptr == info) { + MNN_ERROR("Error for sliceGrad's %d output\n", i); + return res; + } + validBackward[i] = _Const(0.0f, info->dim, info->order); + } + res[0] = _Concat(validBackward, axis); + // FUNC_PRINT_ALL(_Sum(res[0], {})->readMap()[0], f); + return res; + } +}; + +static const auto gRegister = []() { + static SliceGrad _c; + OpGrad::insert((int)OpType_Slice, &_c); + return true; +}(); diff --git a/tools/train/source/transform/SoftmaxGrad.cpp b/tools/train/source/grad/SoftmaxGrad.cpp similarity index 62% rename from tools/train/source/transform/SoftmaxGrad.cpp rename to tools/train/source/grad/SoftmaxGrad.cpp index 09a4500a5..f7163988f 100644 --- a/tools/train/source/transform/SoftmaxGrad.cpp +++ b/tools/train/source/grad/SoftmaxGrad.cpp @@ -7,7 +7,7 @@ // #include "SoftmaxGrad.hpp" -#include "Macro.h" +#include "core/Macro.h" using namespace std; using namespace MNN; @@ -16,14 +16,16 @@ class SoftmaxGrad : public OpGrad { SoftmaxGrad() { mType = NO_LINEAR; } - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, const std::vector& backwardOutput) override { - std::vector result{nullptr}; + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + const std::vector& backwardOutput) override { + std::vector result(1, nullptr); unique_ptr newOp(new OpT); - newOp->type = OpType_SoftmaxGrad; - newOp->main.type = OpParameter_Axis; - newOp->main.value = new AxisT; + newOp->type = OpType_SoftmaxGrad; + newOp->main.type = OpParameter_Axis; + newOp->main.value = new AxisT; newOp->main.AsAxis()->axis = expr->get()->main_as_Axis()->axis(); result[0] = Express::Variable::create(Express::Expr::create(std::move(newOp), {output[0], backwardOutput[0]})); + result[0]->setName(expr->name() + "_Grad"); return result; } }; diff --git a/tools/train/source/transform/SoftmaxGrad.hpp b/tools/train/source/grad/SoftmaxGrad.hpp similarity index 100% rename from tools/train/source/transform/SoftmaxGrad.hpp rename to tools/train/source/grad/SoftmaxGrad.hpp diff --git a/tools/train/source/grad/TensorConvertGrad.cpp b/tools/train/source/grad/TensorConvertGrad.cpp new file mode 100644 index 000000000..e05990446 --- /dev/null +++ b/tools/train/source/grad/TensorConvertGrad.cpp @@ -0,0 +1,28 @@ +// +// TensorConvertGrad.cpp +// MNN +// +// Created by MNN on 2019/05/04. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "TensorConvertGrad.hpp" +using namespace std; +using namespace MNN; +using namespace MNN::Express; + +class TensorConvertGrad : public OpGrad { +public: + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + const std::vector& backwardOutput) override { + std::vector result{nullptr}; + auto originInput = expr->inputs()[0]; + result[0] = _Convert(backwardOutput[0], originInput->getInfo()->order); + return result; + } +}; +static const auto gRegister = []() { + static TensorConvertGrad _c; + OpGrad::insert(OpType_ConvertTensor, &_c); + return true; +}(); diff --git a/tools/train/source/transform/TensorConvertGrad.hpp b/tools/train/source/grad/TensorConvertGrad.hpp similarity index 100% rename from tools/train/source/transform/TensorConvertGrad.hpp rename to tools/train/source/grad/TensorConvertGrad.hpp diff --git a/tools/train/source/grad/UnaryGrad.cpp b/tools/train/source/grad/UnaryGrad.cpp new file mode 100644 index 000000000..200bbd09d --- /dev/null +++ b/tools/train/source/grad/UnaryGrad.cpp @@ -0,0 +1,106 @@ +// +// UnaryGrad.cpp +// MNN +// +// Created by MNN on 2019/05/25. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "OpGrad.hpp" +#include "core/Macro.h" +using namespace std; +using namespace MNN; +using namespace MNN::Express; + +class UnaryGrad : public OpGrad { +public: + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + const std::vector& backwardOutput) override { + std::unique_ptr forwardOp(expr->get()->UnPack()); + auto outputDiff = backwardOutput[0]; + auto input = expr->inputs()[0]; + std::vector res(1, nullptr); + + switch (forwardOp->main.AsUnaryOp()->opType) { + case MNN::UnaryOpOperation_LOG1P: { + // d log(1+x) = 1/(1+x) * dx = dx / (1+x) + auto oneConst = _Const(1.0f, {}, NHWC); + auto addOne = _Add(input, oneConst); + res[0] = _Divide(outputDiff, addOne); + break; + } + case MNN::UnaryOpOperation_EXP: { + // d Exp(x) = Exp(x) * dx + res[0] = _Multiply(outputDiff, output[0]); + break; + } + case MNN::UnaryOpOperation_LOG: { + // d Log(x) = dx / x + res[0] = _Divide(outputDiff, input); + break; + } + case MNN::UnaryOpOperation_NEG: { + // d (-x) = - dx + res[0] = _Negative(outputDiff); + break; + } + case MNN::UnaryOpOperation_SQRT: { + // d (-sqrt(x)) = 0.5 / sqrt(x) * dx + auto oneConst = _Const(0.5f, {}, NHWC); + auto mul = _Multiply(outputDiff, oneConst); + res[0] = _Divide(mul, output[0]); + break; + } + case MNN::UnaryOpOperation_SQUARE: { + // d (x^2) = (x*dx + x*dx) + auto mul = _Multiply(input, outputDiff); + res[0] = _Add(mul, mul); + break; + } + default: + MNN_ASSERT(false); + return res; + } + + res[0]->setName(expr->name() + "_Grad"); + return res; + } +}; +class SigmoidGrad : public OpGrad { +public: + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + const std::vector& backwardOutput) override { + std::vector result(1, nullptr); + auto outputDiff = backwardOutput[0]; + + // y = (1/(1+e(-x))) , dy = y(1-y) * dx = (y*y - y)*dx + auto mul = _Multiply(output[0], output[0]); + auto sub = _Subtract(mul, output[0]); + auto grad = _Multiply(sub, outputDiff); + result[0] = grad; + result[0]->setName(expr->name() + "_Grad"); + return result; + } +}; + +class TanhGrad : public OpGrad { +public: + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + const std::vector& backwardOutput) override { + std::vector result{nullptr}; + auto outputDiff = backwardOutput[0]; + // d tanh(x) = (1-tanh(x)^2)dx + result[0] = (_Const(1.0f, {}, NCHW) - _Square(output[0])) * outputDiff; + return result; + } +}; + +static const auto gRegister = []() { + static UnaryGrad _c; + static SigmoidGrad _s; + static TanhGrad _t; + OpGrad::insert(OpType_UnaryOp, &_c); + OpGrad::insert(OpType_Sigmoid, &_s); + OpGrad::insert(OpType_TanH, &_t); + return true; +}(); diff --git a/tools/train/source/module/FixModule.cpp b/tools/train/source/module/FixModule.cpp new file mode 100644 index 000000000..7a1a8ddcb --- /dev/null +++ b/tools/train/source/module/FixModule.cpp @@ -0,0 +1,32 @@ +// +// FixModule.cpp +// MNN +// +// Created by MNN on 2019/12/16. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "FixModule.hpp" +#include +using namespace MNN::Express; +namespace MNN { +namespace Train { +FixModule::FixModule(std::vector output, std::vector parameters, + std::vector> inputs) { + for (auto p : parameters) { + addParameter(p); + } + mInputs = std::move(inputs); + mOutput = std::move(output); +} +std::vector FixModule::onForward(const std::vector& inputs) { + MNN_ASSERT(inputs.size() == mInputs.size()); + for (int i = 0; i < inputs.size(); ++i) { + auto var = inputs[i]; + var = _Convert(var, mInputs[i].second); + Variable::replace(mInputs[i].first, var); + } + return mOutput; +} +} // namespace Train +} // namespace MNN diff --git a/tools/train/source/module/FixModule.hpp b/tools/train/source/module/FixModule.hpp new file mode 100644 index 000000000..785094973 --- /dev/null +++ b/tools/train/source/module/FixModule.hpp @@ -0,0 +1,29 @@ +// +// FixModule.hpp +// MNN +// +// Created by MNN on 2019/12/16. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef FixModule_hpp +#define FixModule_hpp +#include "Module.hpp" +namespace MNN { +namespace Train { + +class FixModule : public Module { +public: + FixModule(std::vector output, std::vector parameters, + std::vector> inputs); + virtual ~FixModule() = default; + virtual std::vector onForward(const std::vector& inputs) override; + +private: + std::vector> mInputs; + std::vector mOutput; +}; +} // namespace Train +} // namespace MNN + +#endif diff --git a/tools/train/source/module/Module.cpp b/tools/train/source/module/Module.cpp new file mode 100644 index 000000000..a4125bff4 --- /dev/null +++ b/tools/train/source/module/Module.cpp @@ -0,0 +1,87 @@ +// +// Module.cpp +// MNN +// +// Created by MNN on 2019/11/25. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "Module.hpp" +#include +#include "FixModule.hpp" +using namespace MNN::Express; +namespace MNN { +namespace Train { +Express::VARP Module::forward(Express::VARP input) { + return this->onForward({input})[0]; +} +std::set Module::parameters() const { + std::set result; + _collectParameters(result); + return result; +} + +void Module::setIsTraining(const bool isTraining) { + mIsTraining = isTraining; + for (auto c : mChildren) { + c->setIsTraining(isTraining); + } +} + +bool Module::getIsTraining() { + return mIsTraining; +} + +void Module::registerModel(const std::vector>& children) { + mChildren.insert(mChildren.begin(), children.begin(), children.end()); +} +void Module::addParameter(VARP parameter) { + mParameters.emplace_back(parameter); +} +void Module::_collectParameters(std::set& result) const { + for (auto p : mParameters) { + result.insert(p); + } + for (auto c : mChildren) { + c->_collectParameters(result); + } +} +std::shared_ptr Module::transform(const std::vector& inputs, + const std::vector& outputs) { + std::vector> inputsPair; + for (auto i : inputs) { + auto info = i->getInfo(); + if (nullptr == info) { + MNN_ERROR("Error to load inputs info for module\n"); + return nullptr; + } + inputsPair.emplace_back(std::make_pair(i, info->order)); + } + + // Load Parameters + auto order = Variable::getExecuteOrder(outputs); + std::vector parameters; + for (auto v : order) { + if (v->get() != nullptr) { + continue; + } + auto type = v->inputType(); + if (VARP::TRAINABLE == type) { + parameters.emplace_back(Variable::create(v, 0)); + } + } + + // FIXME: Find better way to tread NC4HW4 outputs + std::vector newOutputs = outputs; + for (auto& v : newOutputs) { + if (v->getInfo() != nullptr) { + if (v->getInfo()->order == NC4HW4) { + v = _Convert(v, NCHW); + } + } + } + std::shared_ptr m(new FixModule(newOutputs, parameters, inputsPair)); + return m; +} +} // namespace Train +} // namespace MNN diff --git a/tools/train/source/module/Module.hpp b/tools/train/source/module/Module.hpp new file mode 100644 index 000000000..80c378c30 --- /dev/null +++ b/tools/train/source/module/Module.hpp @@ -0,0 +1,40 @@ +// +// Module.hpp +// MNN +// +// Created by MNN on 2019/11/25. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef MNN_Train_Module_hpp +#define MNN_Train_Module_hpp +#include +#include +namespace MNN { +namespace Train { +class MNN_PUBLIC Module { +public: + Module() = default; + virtual ~Module() = default; + virtual std::vector onForward(const std::vector& inputs) = 0; + Express::VARP forward(Express::VARP input); + std::set parameters() const; + void setIsTraining(const bool isTraining); + bool getIsTraining(); + static std::shared_ptr transform(const std::vector& inputs, + const std::vector& outputs); + +protected: + void registerModel(const std::vector>& children); + void addParameter(Express::VARP parameter); + +private: + void _collectParameters(std::set& result) const; + std::vector> mChildren; + std::vector mParameters; + bool mIsTraining = true; +}; +} // namespace Train +} // namespace MNN + +#endif diff --git a/tools/train/source/module/NN.cpp b/tools/train/source/module/NN.cpp new file mode 100644 index 000000000..a8a4796fa --- /dev/null +++ b/tools/train/source/module/NN.cpp @@ -0,0 +1,231 @@ +// +// NN.cpp +// MNN +// +// Created by MNN on 2019/11/25. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "NN.hpp" +#include "Distributions.hpp" +#include "FixModule.hpp" +#include "Initializer.hpp" +#include "RandomGenerator.hpp" + +using namespace MNN::Express; +namespace MNN { +namespace Train { +class DropoutModule : public Module { +public: + DropoutModule(const float dropRatio) { + mDropRatio = dropRatio; + } + + virtual std::vector onForward(const std::vector& inputs) override { + Express::VARP x = inputs[0]; + + if (getIsTraining()) { + float scale = 1. / (1. - mDropRatio); + auto mask = _Input(x->getInfo()->dim, x->getInfo()->order, x->getInfo()->type); + auto maskPtr = mask->writeMap(); + auto eltSize = x->getInfo()->size; + Distributions::uniform(eltSize, 0, 1, maskPtr, RandomGenerator::generator()); + for (int i = 0; i < eltSize; i++) { + maskPtr[i] = maskPtr[i] < mDropRatio ? 0.0f : scale; + } + x = x * mask; + } + + return {x}; + } + +private: + float mDropRatio; +}; + +class BatchNormModule : public Module { +public: + BatchNormModule(const int channels, const int dims = 4, const float m = 0.999, const float e = 1e-5) { + mMomentum = m; + mEps = e; + mChannels = channels; + + MNN_ASSERT((dims == 2) || (dims == 4)); + + std::vector statShape; + std::vector reductionDims; + if (dims == 2) { + statShape = {channels}; + mReductionDims = {0}; + } + if (dims == 4) { + statShape = {channels, 1 , 1}; + mReductionDims = {0, 2, 3}; + } + + mScale = _Const(1.0f, statShape, NCHW); + mBias = _Const(0.0f, statShape, NCHW); + mRunningMean = _Const(0.0f, statShape, NCHW); + mRunningVariance = _Const(0.0f, statShape, NCHW); + + addParameter(mScale); + addParameter(mBias); + } + + virtual std::vector onForward(const std::vector& inputs) override { + Express::VARP x = inputs[0]; + + MNN_ASSERT(x->getInfo()->dim[1] == mChannels); + + auto dimFormat = x->getInfo()->order; + if (dimFormat == NC4HW4 || dimFormat == NHWC) { + x = _Convert(x, NCHW); + } + + VARP outputData = nullptr; + + if (getIsTraining()) { + auto sampleMean = _ReduceMean(x, mReductionDims, true); // mean for each channel in the batch + auto sampleVar = _ReduceMean(_Square(_Subtract(x, sampleMean)), mReductionDims, true); // variance for each channel in the batch + auto rSampleStd = _Const(1.0f) / _Sqrt(sampleVar + _Const(mEps)); + auto normalizedData = _Subtract(x, sampleMean) * rSampleStd; + outputData = normalizedData * mScale + mBias; + + mRunningMean = _Const(mMomentum) * mRunningMean + _Const(1 - mMomentum) * sampleMean; + mRunningMean.fix(Express::VARP::CONST); + mRunningVariance = _Const(mMomentum) * mRunningVariance + _Const(1 - mMomentum) * sampleVar; + mRunningVariance.fix(Express::VARP::CONST); + } else { + auto rStd = _Const(1.0f) / _Sqrt(mRunningVariance + _Const(mEps)); + auto normalizedData = _Subtract(x, mRunningMean) * rStd; + outputData = normalizedData * mScale + mBias; + } + + if (dimFormat != NCHW) { + outputData = _Convert(outputData, dimFormat); + } + + return {outputData}; + } + +private: + float mMomentum = 0.999; + float mEps = 1e-5; + VARP mScale = nullptr; + VARP mBias = nullptr; + VARP mRunningMean = nullptr; + VARP mRunningVariance = nullptr; + int mChannels; + std::vector mReductionDims; +}; + +void NN::ConvOption::reset(int size) { + stride = std::vector(size, 1); + channel = std::vector(size, 0); + kernelSize = std::vector(size, 1); + dilate = std::vector(size, 1); + padMode = VALID; + pads = std::vector(size, 0); + depthwise = false; +} + +static std::tuple _initParameters(const NN::ConvOption& option, bool hasBias, + std::shared_ptr weightInit, + std::shared_ptr biasInit) { + std::tuple defaultRes; + if (nullptr == weightInit) { + weightInit.reset(Initializer::xavier()); + } + if (nullptr == biasInit) { + biasInit.reset(Initializer::constValue(0.0f)); + } + VARP weight; + int group = 1; + if (option.depthwise) { + if (option.channel[1] != option.channel[0]) { + MNN_ERROR("Can't support not the same channel for convolution depthwise\n"); + return defaultRes; + } + weight = weightInit->createConstVar({1, option.channel[0], option.kernelSize[1], option.kernelSize[0]}, NCHW); + group = option.channel[0]; + } else { + weight = weightInit->createConstVar( + {option.channel[1], option.channel[0], option.kernelSize[1], option.kernelSize[0]}, NCHW); + } + VARP bias; + if (hasBias) { + bias = biasInit->createConstVar({option.channel[1]}, NCHW); + } + return std::make_tuple(weight, bias, group); +} + +std::shared_ptr NN::ConvTranspose(const ConvOption& option, bool hasBias, + std::shared_ptr weightInit, + std::shared_ptr biasInit) { + VARP input = _Input({1, option.channel[0], 1, 1}, NC4HW4); + auto tuple = _initParameters(option, hasBias, weightInit, biasInit); + auto weight = std::get<0>(tuple); + if (nullptr == weight) { + return nullptr; + } + if (!option.depthwise) { + weight = _Transpose(weight, {1, 0, 2, 3}); + weight.fix(VARP::TRAINABLE); + } + auto bias = std::get<1>(tuple); + auto group = std::get<2>(tuple); + if (nullptr != bias) { + auto tempOutput = _Deconv(weight, bias, input, option.padMode, option.stride, option.dilate, group); + return std::shared_ptr(new FixModule({tempOutput}, {weight, bias}, {{input, NC4HW4}})); + } + auto tempOutput = _Deconv(weight, nullptr, input, option.padMode, option.stride, option.dilate, group); + return std::shared_ptr(new FixModule({tempOutput}, {weight}, {{input, NC4HW4}})); +} + +std::shared_ptr NN::Conv(const ConvOption& option, bool hasBias, std::shared_ptr weightInit, + std::shared_ptr biasInit) { + VARP input = _Input({1, option.channel[0], 1, 1}, NC4HW4); + auto tuple = _initParameters(option, hasBias, weightInit, biasInit); + auto weight = std::get<0>(tuple); + if (nullptr == weight) { + return nullptr; + } + auto bias = std::get<1>(tuple); + auto group = std::get<2>(tuple); + if (nullptr != bias) { + auto tempOutput = _Conv(weight, bias, input, option.padMode, option.stride, option.dilate, group); + return std::shared_ptr(new FixModule({tempOutput}, {weight, bias}, {{input, NC4HW4}})); + } + auto tempOutput = _Conv(weight, nullptr, input, option.padMode, option.stride, option.dilate, group); + return std::shared_ptr(new FixModule({tempOutput}, {weight}, {{input, NC4HW4}})); +} + +std::shared_ptr NN::Linear(int l, int t, bool hasBias, std::shared_ptr weightInit, + std::shared_ptr biasInit) { + if (nullptr == weightInit) { + weightInit.reset(Initializer::xavier()); + } + if (nullptr == biasInit) { + biasInit.reset(Initializer::constValue(0.0f)); + } + auto weight = weightInit->createConstVar({t, l}, NCHW); + auto input = _Input({l}, NCHW); + auto output = _MatMul(input, weight, false, true); + if (!hasBias) { + return std::shared_ptr(new FixModule({output}, {weight}, {{input, NCHW}})); + } + auto bias = biasInit->createConstVar({1, t}, NCHW); + output = _Add(output, bias); + return std::shared_ptr(new FixModule({output}, {weight, bias}, {{input, NCHW}})); +} + +std::shared_ptr NN::Dropout(const float dropRatio) { + return std::shared_ptr(new DropoutModule(dropRatio)); +} + +std::shared_ptr NN::BatchNorm(const int channels, const int dims, const float m, const float e) { + return std::shared_ptr(new BatchNormModule(channels, dims, m, e)); +} + +} // namespace Train +} // namespace MNN diff --git a/tools/train/source/module/NN.hpp b/tools/train/source/module/NN.hpp new file mode 100644 index 000000000..b9b6bf9eb --- /dev/null +++ b/tools/train/source/module/NN.hpp @@ -0,0 +1,48 @@ +// +// NN.hpp +// MNN +// +// Created by MNN on 2019/11/25. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef MNN_Train_NN_hpp +#define MNN_Train_NN_hpp +#include +#include "Distributions.hpp" +#include "Module.hpp" +namespace MNN { +namespace Train { +class Initializer; + +class MNN_PUBLIC NN { +public: + struct ConvOption { + Express::INTS kernelSize = {1, 1}; + Express::INTS channel = {0, 0}; + Express::INTS stride = {1, 1}; + Express::INTS dilate = {1, 1}; + Express::PaddingMode padMode = Express::VALID; + Express::INTS pads = {0, 0}; + bool depthwise = false; + + void reset(int size = 2); + }; + static std::shared_ptr Conv(const ConvOption& option, bool bias = true, + std::shared_ptr weightInit = nullptr, + std::shared_ptr biasInit = nullptr); + static std::shared_ptr ConvTranspose(const ConvOption& option, bool bias = true, + std::shared_ptr weightInit = nullptr, + std::shared_ptr biasInit = nullptr); + static std::shared_ptr Linear(int l, int t, bool hasBias = true, + std::shared_ptr weightInit = nullptr, + std::shared_ptr biasInit = nullptr); + static std::shared_ptr Dropout(const float dropRatio); + static std::shared_ptr BatchNorm(const int channels, const int dims = 4, + const float m = 0.999, const float e = 1e-5); +}; + +} // namespace Train +} // namespace MNN + +#endif diff --git a/tools/train/source/optimizer/ADAM.cpp b/tools/train/source/optimizer/ADAM.cpp new file mode 100644 index 000000000..dc171fd25 --- /dev/null +++ b/tools/train/source/optimizer/ADAM.cpp @@ -0,0 +1,65 @@ +// +// ADAM.cpp +// MNN +// +// Created by MNN on 2019/12/03. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "ADAM.hpp" +#include "OpGrad.hpp" + +using namespace MNN::Express; + +namespace MNN { +namespace Train { + +void ADAM::setMomentum2(float momentum2) { + mMomentum2 = momentum2; +} + +void ADAM::setEps(float eps) { + mEps = eps; +} + +void ADAM::append(const std::set& parameters) { + for (auto p : parameters) { + mParameters.insert(p); + mHistory[p] = _Const(0.0f, p->getInfo()->dim, p->getInfo()->order); + mHistory2[p] = _Const(0.0f, p->getInfo()->dim, p->getInfo()->order); + } +} + +void ADAM::remove(const std::set& parameters) { + for (auto p : parameters) { + mParameters.erase(p); + mHistory.erase(p); + mHistory2.erase(p); + } +} + +Express::VARP ADAM::computeUpdateValue(Express::VARP param, Express::VARP grad) { + auto lr = _Const(mLearningRate, {}, NCHW); + auto step = _Const(mStep, {}, NCHW); + auto beta1 = _Const(mMomentum, {}, NCHW); + auto beta2 = _Const(mMomentum2, {}, NCHW); + auto eps = _Const(mEps, {}, NCHW); + // auto m = mHistory[param]; + // auto v = mHistory2[param]; + + auto correction = _Sqrt(_Const(1.0f, {}, NCHW) - _Pow(beta2, step)) / (_Const(1.0f, {}, NCHW) - _Pow(beta1, step)); + + mHistory[param] = beta1 * mHistory[param] + (_Const(1.0f, {}, NCHW) - beta1) * grad; + mHistory[param].fix(Express::VARP::CONST); + + mHistory2[param] = beta2 * mHistory2[param] + (_Const(1.0f, {}, NCHW) - beta2) * _Square(grad); + mHistory2[param].fix(Express::VARP::CONST); + + auto updateValue = lr * correction * (mHistory[param] / (_Sqrt(mHistory2[param]) + eps)); + updateValue.fix(Express::VARP::CONST); + + return updateValue; +} + +} // namespace Train +} // namespace MNN diff --git a/tools/train/source/optimizer/ADAM.hpp b/tools/train/source/optimizer/ADAM.hpp new file mode 100644 index 000000000..bb78114b6 --- /dev/null +++ b/tools/train/source/optimizer/ADAM.hpp @@ -0,0 +1,42 @@ +// +// ADAM.hpp +// MNN +// +// Created by MNN on 2019/12/03. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef ADAM_hpp +#define ADAM_hpp + +#include +#include +#include +#include "ParameterOptimizer.hpp" +#include "SGD.hpp" + +namespace MNN { +namespace Train { + +class MNN_PUBLIC ADAM : public SGD { +public: + void append(const std::set& parameters); + + void remove(const std::set& parameters); + + Express::VARP computeUpdateValue(Express::VARP param, Express::VARP grad) override; + + void setMomentum2(float momentum2); + + void setEps(float eps); + +private: + float mMomentum2 = 0.999; // default 0.999 + float mEps = 1e-8; + std::map mHistory2; +}; + +} // namespace Train +} // namespace MNN + +#endif // ADAM_hpp diff --git a/tools/train/source/optimizer/LearningRateScheduler.cpp b/tools/train/source/optimizer/LearningRateScheduler.cpp new file mode 100644 index 000000000..2a472912f --- /dev/null +++ b/tools/train/source/optimizer/LearningRateScheduler.cpp @@ -0,0 +1,46 @@ +// +// LearningRateScheduler.cpp +// MNN +// +// Created by MNN on 2019/12/05. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "LearningRateScheduler.hpp" +#include +#include + +namespace MNN { +namespace Train { + +float LrScheduler::multiStep(const float baseLr, const int step, std::vector stepIterations, + std::vector lrMulti) { + float lr = baseLr; + std::sort(stepIterations.begin(), stepIterations.end()); + for (int i = 0; i < stepIterations.size(); i++) { + if (step == stepIterations[i]) { + float multi; + if ((i + 1) > lrMulti.size()) { + multi = lrMulti[lrMulti.size() - 1]; + } else { + multi = lrMulti[i]; + } + lr *= multi; + } + } + + return lr; +} + +float LrScheduler::inv(const float baseLr, const int step, const float gamma, const float power) { + float lr = baseLr * std::pow(1 + gamma * step, -power); + return lr; +} + +float LrScheduler::exp(const float baseLr, const int step, const float gamma) { + float lr = baseLr * std::pow(gamma, step); + return lr; +} + +} // namespace Train +} // namespace MNN diff --git a/tools/train/source/optimizer/LearningRateScheduler.hpp b/tools/train/source/optimizer/LearningRateScheduler.hpp new file mode 100644 index 000000000..385016b46 --- /dev/null +++ b/tools/train/source/optimizer/LearningRateScheduler.hpp @@ -0,0 +1,31 @@ +// +// LearningRateScheduler.hpp +// MNN +// +// Created by MNN on 2019/12/03. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef LearningRateScheduler_hpp +#define LearningRateScheduler_hpp + +#include +#include + +namespace MNN { +namespace Train { + +class MNN_PUBLIC LrScheduler { +public: + static float multiStep(const float baseLr, const int step, std::vector stepIterations, + std::vector lrMulti); + + static float inv(const float baseLr, const int step, const float gamma, const float power); + + static float exp(const float baseLr, const int step, const float gamma); +}; + +} // namespace Train +} // namespace MNN + +#endif // LearningRateScheduler_hpp diff --git a/tools/train/source/optimizer/Loss.cpp b/tools/train/source/optimizer/Loss.cpp new file mode 100644 index 000000000..0e2b67c99 --- /dev/null +++ b/tools/train/source/optimizer/Loss.cpp @@ -0,0 +1,52 @@ +// +// Loss.cpp +// MNN +// +// Created by MNN on 2019/11/29. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "Loss.hpp" + +using namespace MNN::Express; + +namespace MNN { +namespace Train { + +Express::VARP _CrossEntropy(Express::VARP predicts, Express::VARP oneHotTargets) { + MNN_ASSERT(predicts->getInfo()->dim.size() == 2); + MNN_ASSERT(predicts->getInfo()->dim == oneHotTargets->getInfo()->dim); + auto loss = _Negative(_ReduceMean(_ReduceSum(_Log(predicts) * oneHotTargets, {1}), {})); + return loss; +} + +Express::VARP _KLDivergence(Express::VARP predicts, Express::VARP oneHotTargets) { + MNN_ASSERT(predicts->getInfo()->dim.size() == 2); + MNN_ASSERT(predicts->getInfo()->dim == oneHotTargets->getInfo()->dim); + auto loss = _ReduceMean(_ReduceSum(_Multiply(predicts, _Log(predicts) - _Log(oneHotTargets)), {1}), {}); + return loss; +} + +Express::VARP _MSE(Express::VARP predicts, Express::VARP oneHotTargets) { + MNN_ASSERT(predicts->getInfo()->dim.size() == 2); + MNN_ASSERT(predicts->getInfo()->dim == oneHotTargets->getInfo()->dim); + auto loss = _ReduceMean(_ReduceSum(_Square(predicts - oneHotTargets), {1}), {}); + return loss; +} + +Express::VARP _MAE(Express::VARP predicts, Express::VARP oneHotTargets) { + MNN_ASSERT(predicts->getInfo()->dim.size() == 2); + MNN_ASSERT(predicts->getInfo()->dim == oneHotTargets->getInfo()->dim); + auto loss = _ReduceMean(_ReduceSum(_Abs(predicts - oneHotTargets), {1}), {}); + return loss; +} + +Express::VARP _Hinge(Express::VARP predicts, Express::VARP oneHotTargets) { + MNN_ASSERT(predicts->getInfo()->dim.size() == 2); + MNN_ASSERT(predicts->getInfo()->dim == oneHotTargets->getInfo()->dim); + auto loss = _ReduceMean(_ReduceSum(_Maximum(_Const(0.), _Const(1.) - predicts * oneHotTargets), {1}), {}); + return loss; +} + +} // namespace Train +} // namespace MNN diff --git a/tools/train/source/optimizer/Loss.hpp b/tools/train/source/optimizer/Loss.hpp new file mode 100644 index 000000000..4f5eaa714 --- /dev/null +++ b/tools/train/source/optimizer/Loss.hpp @@ -0,0 +1,30 @@ +// +// Loss.hpp +// MNN +// +// Created by MNN on 2019/11/29. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef Loss_hpp +#define Loss_hpp + +#include + +namespace MNN { +namespace Train { + +MNN_PUBLIC Express::VARP _CrossEntropy(Express::VARP predicts, Express::VARP oneHotTargets); + +MNN_PUBLIC Express::VARP _KLDivergence(Express::VARP predicts, Express::VARP oneHotTargets); + +MNN_PUBLIC Express::VARP _MSE(Express::VARP predicts, Express::VARP oneHotTargets); + +MNN_PUBLIC Express::VARP _MAE(Express::VARP predicts, Express::VARP oneHotTargets); + +MNN_PUBLIC Express::VARP _Hinge(Express::VARP predicts, Express::VARP oneHotTargets); + +} // namespace Train +} // namespace MNN + +#endif // Loss_hpp diff --git a/tools/train/source/optimizer/ParameterOptimizer.cpp b/tools/train/source/optimizer/ParameterOptimizer.cpp new file mode 100644 index 000000000..c2fe598ad --- /dev/null +++ b/tools/train/source/optimizer/ParameterOptimizer.cpp @@ -0,0 +1,26 @@ +// +// ParameterOptimizer.cpp +// MNN +// +// Created by MNN on 2019/11/22. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "ParameterOptimizer.hpp" + +namespace MNN { +namespace Train { + +bool ParameterOptimizer::step(Express::VARP loss) { + auto res = this->onGetNextParameter(loss); + for (auto iter : res) { + iter.second.fix(Express::VARP::CONST); + } + for (auto iter : res) { + Express::Variable::replace(iter.first, iter.second); + } + return !res.empty(); +} + +} // namespace Train +} // namespace MNN diff --git a/tools/train/source/optimizer/ParameterOptimizer.hpp b/tools/train/source/optimizer/ParameterOptimizer.hpp new file mode 100644 index 000000000..0259ed726 --- /dev/null +++ b/tools/train/source/optimizer/ParameterOptimizer.hpp @@ -0,0 +1,27 @@ +// +// ParameterOptimizer.hpp +// MNN +// +// Created by MNN on 2019/11/22. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef ParameterOptimizer_hpp +#define ParameterOptimizer_hpp +#include + +namespace MNN { +namespace Train { + +class MNN_PUBLIC ParameterOptimizer { +public: + ParameterOptimizer() = default; + virtual ~ParameterOptimizer() = default; + bool step(Express::VARP loss); + virtual std::map onGetNextParameter(Express::VARP loss) = 0; +}; + +} // namespace Train +} // namespace MNN + +#endif diff --git a/tools/train/source/optimizer/SGD.cpp b/tools/train/source/optimizer/SGD.cpp new file mode 100644 index 000000000..e177faff1 --- /dev/null +++ b/tools/train/source/optimizer/SGD.cpp @@ -0,0 +1,90 @@ +// +// SGD.cpp +// MNN +// +// Created by MNN on 2019/11/22. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "SGD.hpp" +#include "OpGrad.hpp" + +using namespace MNN::Express; + +namespace MNN { +namespace Train { + +void SGD::setLearningRate(float rate) { + mLearningRate = rate; +} + +void SGD::setMomentum(float momentum) { + mMomentum = momentum; +} + +void SGD::setWeightDecay(float decay) { + mWeightDecay = decay; +} + +void SGD::setRegularizationMethod(RegularizationMethod method) { + mRegularizationMethod = method; +} + +void SGD::append(const std::set& parameters) { + for (auto p : parameters) { + mParameters.insert(p); + mHistory[p] = _Const(0.0f, p->getInfo()->dim, p->getInfo()->order); + } +} + +void SGD::remove(const std::set& parameters) { + for (auto p : parameters) { + mParameters.erase(p); + mHistory.erase(p); + } +} + +const std::set& SGD::parameters() const { + return mParameters; +} + +Express::VARP SGD::regularizeParameters(Express::VARP param, Express::VARP grad) { + VARP addWeightDecayGrad; + if (mRegularizationMethod == L1) { + auto temp = _Sign(param); + addWeightDecayGrad = _Const(mWeightDecay, {}, NCHW) * temp + grad; + } else if (mRegularizationMethod == L2) { + addWeightDecayGrad = _Const(mWeightDecay, {}, NCHW) * param + grad; + } + + return addWeightDecayGrad; +} + +Express::VARP SGD::computeUpdateValue(Express::VARP param, Express::VARP grad) { + auto lr = _Const(mLearningRate, {}, NCHW); + mHistory[param] = lr * grad + _Const(mMomentum, {}, NCHW) * mHistory[param]; + mHistory[param].fix(Express::VARP::CONST); + + return mHistory[param]; +} + +std::map SGD::onGetNextParameter(Express::VARP loss) { + mStep++; + + auto grad = OpGrad::grad(loss, mParameters); + + for (auto& iter : grad) { + // apply regularization + auto addWeightDecayGrad = regularizeParameters(iter.first, iter.second); + addWeightDecayGrad.fix(Express::VARP::CONST); + // apply momentum, etc. + auto updateValue = computeUpdateValue(iter.first, addWeightDecayGrad); + // apply update + auto newParameter = iter.first - updateValue; + iter.second = newParameter; + } + return grad; +} + +} // namespace Train +} // namespace MNN diff --git a/tools/train/source/optimizer/SGD.hpp b/tools/train/source/optimizer/SGD.hpp new file mode 100644 index 000000000..662ad020e --- /dev/null +++ b/tools/train/source/optimizer/SGD.hpp @@ -0,0 +1,65 @@ +// +// SGD.hpp +// MNN +// +// Created by MNN on 2019/11/22. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef SGD_hpp +#define SGD_hpp + +#include +#include +#include +#include +#include "ParameterOptimizer.hpp" + +namespace MNN { +namespace Train { + +class MNN_PUBLIC SGD : public ParameterOptimizer { +public: + enum RegularizationMethod { + L1, + L2, + }; + + virtual std::map onGetNextParameter(Express::VARP loss) override; + + virtual Express::VARP regularizeParameters(Express::VARP param, Express::VARP grad); + + virtual Express::VARP computeUpdateValue(Express::VARP param, Express::VARP grad); + + void setLearningRate(float rate); + + void setMomentum(float momentum); + + void setWeightDecay(float decay); + + void setRegularizationMethod(RegularizationMethod method); + + void append(const std::set& parameters); + + void remove(const std::set& parameters); + + const std::set& parameters() const; + +protected: + float mLearningRate = 0.001f; + float mMomentum = 0; + float mWeightDecay = 0; + RegularizationMethod mRegularizationMethod = L2; + std::set mParameters; + std::map mHistory; + int mStep = 0; + + // For Cache + const Express::Expr* mLoss = nullptr; + int mLossFromIndex = 0; +}; + +} // namespace Train +} // namespace MNN + +#endif // SGD_hpp diff --git a/tools/train/source/parameters/Distributions.cpp b/tools/train/source/parameters/Distributions.cpp new file mode 100644 index 000000000..7f6fafa07 --- /dev/null +++ b/tools/train/source/parameters/Distributions.cpp @@ -0,0 +1,31 @@ +// +// Distributions.cpp +// MNN +// +// Created by MNN on 2019/11/28. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "Distributions.hpp" +#include +#include + +namespace MNN { +namespace Train { + +void Distributions::uniform(const int count, const float min, const float max, float *r, std::mt19937 gen) { + std::uniform_real_distribution dis(min, std::nextafter(max, std::numeric_limits::max())); + for (int i = 0; i < count; i++) { + r[i] = dis(gen); + } +} + +void Distributions::gaussian(const int count, const float mu, const float sigma, float *r, std::mt19937 gen) { + std::normal_distribution dis(mu, sigma); + for (int i = 0; i < count; i++) { + r[i] = dis(gen); + } +} + +} // namespace Train +} // namespace MNN diff --git a/tools/train/source/parameters/Distributions.hpp b/tools/train/source/parameters/Distributions.hpp new file mode 100644 index 000000000..c1ef1f64b --- /dev/null +++ b/tools/train/source/parameters/Distributions.hpp @@ -0,0 +1,27 @@ +// +// Distributions.hpp +// MNN +// +// Created by MNN on 2019/11/28. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef Distributions_hpp +#define Distributions_hpp + +#include +#include + +namespace MNN { +namespace Train { + +class Distributions { +public: + static void uniform(const int count, const float min, const float max, float* r, std::mt19937 gen); + static void gaussian(const int count, const float mu, const float sigma, float* r, std::mt19937 gen); +}; + +} // namespace Train +} // namespace MNN + +#endif // Distritutions_hpp diff --git a/tools/train/source/parameters/Initializer.cpp b/tools/train/source/parameters/Initializer.cpp new file mode 100644 index 000000000..99ec9208f --- /dev/null +++ b/tools/train/source/parameters/Initializer.cpp @@ -0,0 +1,208 @@ +// +// Initializer.cpp +// MNN +// +// Created by MNN on 2019/11/28. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "Initializer.hpp" +#include +#include +#include +#include "Distributions.hpp" +#include "RandomGenerator.hpp" + +namespace MNN { +namespace Train { + +Express::VARP Initializer::createConstVar(Express::INTS dim, Express::Dimensionformat format) { + auto res = Express::_Input(dim, format, halide_type_of()); + this->onExecute(res); + res.fix(Express::VARP::CONST); + return res; +} + +class ConstantInitializer : public Initializer { +public: + ConstantInitializer(float value) : mConstant(value) { + } + + virtual void onExecute(Express::VARP p) override { + const int count = p->getInfo()->size; + MNN_ASSERT(count > 0); + auto ptr = p->writeMap(); + for (int i = 0; i < count; i++) { + ptr[i] = mConstant; + } + } + +private: + float mConstant; +}; +Initializer* Initializer::constValue(float value) { + return new ConstantInitializer(value); +} + +class UniformInitializer : public Initializer { +public: + UniformInitializer(float min = 0, float max = 1) { + mMin = min; + mMax = max; + } + + virtual void onExecute(Express::VARP p) override { + const int count = p->getInfo()->size; + MNN_ASSERT(count > 0); + Distributions::uniform(count, mMin, mMax, p->writeMap(), RandomGenerator::generator()); + } + +private: + float mMin; + float mMax; +}; + +Initializer* Initializer::uniform(float minValue, float maxValue) { + return new UniformInitializer(minValue, maxValue); +} + +class XavierInitializer : public Initializer { +public: + XavierInitializer(VarianceNorm norm = FANIN) { + mNorm = norm; + } + + virtual void onExecute(Express::VARP p) override { + const int count = p->getInfo()->size; + MNN_ASSERT(count > 0); + const std::vector dims = p->getInfo()->dim; + // referenced from Caffe + // https://github.com/BVLC/caffe/blob/master/include/caffe/filler.hpp + int fanIn = count / dims[0]; + int fanOut = dims.size() > 1 ? count / dims[1] : count; + float n = fanIn; // default: FANIN + if (mNorm == VarianceNorm::AVERAGE) { + n = (fanIn + fanOut) / 2.0f; + } else if (mNorm == VarianceNorm::FANOUT) { + n = fanOut; + } + float scale = sqrtf(3.0f / n); + + Distributions::uniform(count, -scale, scale, p->writeMap(), RandomGenerator::generator()); + } + +private: + VarianceNorm mNorm; +}; +Initializer* Initializer::xavier(VarianceNorm norm) { + return new XavierInitializer(norm); +} + +class GaussianInitializer : public Initializer { +public: + GaussianInitializer(float mean = 0, float std = 1) { + mMean = mean; + mStd = std; + } + + virtual void onExecute(Express::VARP p) override { + const int count = p->getInfo()->size; + MNN_ASSERT(count > 0); + Distributions::gaussian(count, mMean, mStd, p->writeMap(), RandomGenerator::generator()); + } + +private: + float mMean; + float mStd; +}; +Initializer* Initializer::gauss(float mean, float std) { + return new GaussianInitializer(mean, std); +} + +class MSRAInitializer : public Initializer { +public: + MSRAInitializer(VarianceNorm norm = FANIN) { + mNorm = norm; + } + + virtual void onExecute(Express::VARP p) override { + const int count = p->getInfo()->size; + MNN_ASSERT(count > 0); + const std::vector dims = p->getInfo()->dim; + // referenced from Caffe + // https://github.com/BVLC/caffe/blob/master/include/caffe/filler.hpp + int fanIn = count / dims[0]; + int fanOut = dims.size() > 1 ? count / dims[1] : count; + float n = fanIn; // default: FANIN + if (mNorm == VarianceNorm::AVERAGE) { + n = (fanIn + fanOut) / 2.0f; + } else if (mNorm == VarianceNorm::FANOUT) { + n = fanOut; + } + float std = sqrtf(2.0f / n); + + Distributions::gaussian(count, 0.0f, std, p->writeMap(), RandomGenerator::generator()); + } + +private: + VarianceNorm mNorm; +}; +Initializer* Initializer::MSRA(VarianceNorm norm) { + return new MSRAInitializer(norm); +} + +class BilinearInitializer : public Initializer { +public: + BilinearInitializer() = default; + + virtual void onExecute(Express::VARP p) override { + const int count = p->getInfo()->size; + MNN_ASSERT(count > 0); + const std::vector dims = p->getInfo()->dim; + MNN_ASSERT(dims.size() == 4); + MNN_ASSERT(dims[2] == dims[3]); // NCHW, H == W + // referenced from Caffe + // https://github.com/BVLC/caffe/blob/master/include/caffe/filler.hpp + int f = ceilf(dims[3] / 2.0f); + float c = (dims[3] - 1) / (2.0f * f); + + for (int i = 0; i < count; i++) { + float x = i % dims[3]; + float y = (i / dims[3]) % dims[2]; + p->writeMap()[i] = (1 - std::fabs(x / f - c)) * (1 - std::fabs(y / f - c)); + } + } +}; +Initializer* Initializer::bilinear() { + return new BilinearInitializer(); +} + +class PositiveUnitball : public Initializer { +public: + PositiveUnitball() = default; + + virtual void onExecute(Express::VARP p) override { + const int count = p->getInfo()->size; + MNN_ASSERT(count > 0); + const std::vector dims = p->getInfo()->dim; + + Distributions::uniform(count, 0, 1, p->writeMap(), RandomGenerator::generator()); + + int dim = count / dims[0]; + for (int i = 0; i < dims[0]; i++) { + float sum = 0; + for (int j = 0; j < dim; j++) { + sum += p->readMap()[i * dim + j]; + } + for (int j = 0; j < dim; j++) { + p->writeMap()[i * dim + j] = p->readMap()[i * dim + j] / sum; + } + } + } +}; +Initializer* Initializer::positiveUnitball() { + return new PositiveUnitball(); +} + +} // namespace Train +} // namespace MNN diff --git a/tools/train/source/parameters/Initializer.hpp b/tools/train/source/parameters/Initializer.hpp new file mode 100644 index 000000000..25b449d2c --- /dev/null +++ b/tools/train/source/parameters/Initializer.hpp @@ -0,0 +1,43 @@ +// +// Initializer.hpp +// MNN +// +// Created by MNN on 2019/11/28. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef Initializer_hpp +#define Initializer_hpp + +#include + +namespace MNN { +namespace Train { +class RandomGenerator; +class Initializer { +public: + Initializer() = default; + virtual ~Initializer() = default; + Express::VARP createConstVar(Express::INTS dim, Express::Dimensionformat format = Express::NCHW); + virtual void onExecute(Express::VARP p) = 0; + + static Initializer* constValue(float value); + static Initializer* uniform(float minValue = 0.0f, float maxValue = 1.0f); + + enum VarianceNorm { + FANIN, + FANOUT, + AVERAGE, + }; + + static Initializer* xavier(VarianceNorm norm = FANIN); + static Initializer* gauss(float mean = 0.0f, float std = 1.0f); + static Initializer* MSRA(VarianceNorm norm = FANIN); + static Initializer* bilinear(); + static Initializer* positiveUnitball(); +}; + +} // namespace Train +} // namespace MNN + +#endif // Initializer_hpp diff --git a/tools/train/source/parameters/RandomGenerator.hpp b/tools/train/source/parameters/RandomGenerator.hpp new file mode 100644 index 000000000..f82aaa6c0 --- /dev/null +++ b/tools/train/source/parameters/RandomGenerator.hpp @@ -0,0 +1,45 @@ +// +// RandomGenerator.hpp +// MNN +// +// Created by MNN on 2019/11/28. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef RandomGenerator_hpp +#define RandomGenerator_hpp + +#include +#include + +namespace MNN { +namespace Train { + +class MNN_PUBLIC RandomGenerator { +private: + RandomGenerator(int seed = std::random_device()()) { + mSeed = seed; + mGenerator.seed(mSeed); + } + + ~RandomGenerator() = default; + + RandomGenerator(RandomGenerator &); + + RandomGenerator &operator=(const RandomGenerator &); + +private: + int mSeed; + std::mt19937 mGenerator; + +public: + static std::mt19937 &generator(int seed = std::random_device()()) { + static RandomGenerator rng(seed); + return rng.mGenerator; + } +}; + +} // namespace Train +} // namespace MNN + +#endif // RandomGenerator_hpp \ No newline at end of file diff --git a/tools/train/source/transform/BinaryGrad.cpp b/tools/train/source/transform/BinaryGrad.cpp deleted file mode 100644 index 83be6445a..000000000 --- a/tools/train/source/transform/BinaryGrad.cpp +++ /dev/null @@ -1,80 +0,0 @@ -// -// BinaryGrad.cpp -// MNN -// -// Created by MNN on 2019/05/04. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#include "BinaryGrad.hpp" -#include "Macro.h" -using namespace std; -using namespace MNN; -using namespace MNN::Express; - -class BinaryGrad : public OpGrad { -public: - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, const std::vector& backwardOutput) override { - std::vector res; - auto inputs = expr->inputs(); - res.resize(inputs.size()); - auto op = expr->get(); - auto outputDiff = backwardOutput[0]; - switch (op->main_as_BinaryOp()->opType()) { - case BinaryOpOperation_ADD: { - res[0] = outputDiff; - res[1] = outputDiff; - break; - } - case BinaryOpOperation_SUB: { - res[0] = outputDiff; - res[1] = _Neg(outputDiff); - break; - } - case BinaryOpOperation_MUL: { - res[0] = _Mul(outputDiff, inputs[1]); - res[1] = _Mul(outputDiff, inputs[0]); - break; - } - case BinaryOpOperation_REALDIV: { - res[0] = _Div(outputDiff, inputs[1]); - // d (u / v) = dx / v , -dx*u(1/v)*(1/v) - res[1] = _Neg(_Mul(outputDiff, _Div(output[0], inputs[1]))); - break; - } - default: - break; - } - for (int i=0; igetInfo(); - auto backShape = res[i]->getInfo(); - std::vector reduceDims; - bool keepDim = true; - MNN_ASSERT(inputShape->dim.size() <= backShape->dim.size()); - if (inputShape->dim.size() < backShape->dim.size()) { - auto diff = (int)backShape->dim.size() - (int)inputShape->dim.size(); - for (int i=0; idim.size(); ++i) { - if (backShape->dim[i] > 1 && inputShape->dim[i] == 1) { - reduceDims.emplace_back(i); - } - } - keepDim = true; - } - if (!reduceDims.empty()) { - res[i] = _Sum(res[i], reduceDims, keepDim); - } - } - return res; - } -}; - -static const auto gRegister = []() { - static BinaryGrad _c; - OpGrad::insert((int)OpType_BinaryOp, &_c); - return true; -}(); diff --git a/tools/train/source/transform/ConvGrad.cpp b/tools/train/source/transform/ConvGrad.cpp deleted file mode 100644 index 4643194be..000000000 --- a/tools/train/source/transform/ConvGrad.cpp +++ /dev/null @@ -1,77 +0,0 @@ -// -// ConvGrad.cpp -// MNN -// -// Created by MNN on 2019/04/22. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#include "ConvGrad.hpp" -#include "Macro.h" -using namespace std; -using namespace MNN::Express; -using namespace MNN; - -class ConvGrad : public OpGrad { -public: - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, const std::vector& backwardOutput) override { - auto inputs = expr->inputs(); - if (inputs.size() < 3) { - return std::vector{}; - } - std::shared_ptr forwardOp(expr->get()->UnPack()); - std::vector res; - res.resize(inputs.size()); - auto outputDiff = backwardOutput[0]; - { - // Create Input Grad - unique_ptr newOp(new OpT); - if (forwardOp->type == OpType_Convolution) { - newOp->type = OpType_Deconvolution; - } else if (forwardOp->type == OpType_ConvolutionDepthwise) { - newOp->type = OpType_DeconvolutionDepthwise; - } - newOp->main.type = OpParameter_Convolution2D; - auto conv2D = new Convolution2DT; - conv2D->common.reset(new Convolution2DCommonT(*forwardOp->main.AsConvolution2D()->common)); - auto inputCount = conv2D->common->inputCount; - auto outputCount = conv2D->common->outputCount; - conv2D->common->inputCount = outputCount; - conv2D->common->outputCount = inputCount; - newOp->main.value = conv2D; - - // Create Zero Bias - auto newConstBias = _Const(0.0f, {inputCount}, NHWC); - - auto expr = Expr::create(std::move(newOp), {outputDiff, inputs[1], newConstBias}); - res[0] = Variable::create(expr); - res[0]->setName(forwardOp->name + "_Input_Grad"); - } - // Add Filter Grad - { - unique_ptr newOp(new OpT); - newOp->type = OpType_Conv2DBackPropFilter; - newOp->main.type = OpParameter_Convolution2D; - auto conv2D = new Convolution2DT; - conv2D->common.reset(new Convolution2DCommonT(*forwardOp->main.AsConvolution2D()->common)); - newOp->main.value = conv2D; - auto expr = Expr::create(std::move(newOp), {inputs[1], inputs[0], outputDiff}); - res[1] = Variable::create(expr); - res[1]->setName(forwardOp->name + "_Filter_Grad"); - } - // Add Bias Grad - { - auto gradConvert = _Convert(outputDiff, NHWC); - res[2] = _Sum(gradConvert, {0, 1, 2}); - res[2]->setName(forwardOp->name + "_Bias_Grad"); - } - return res; - } -}; - -static const auto gRegister = []() { - static ConvGrad _c; - OpGrad::insert(OpType_Convolution, &_c); - OpGrad::insert(OpType_ConvolutionDepthwise, &_c); - return true; -}(); diff --git a/tools/train/source/transform/MatMulGrad.cpp b/tools/train/source/transform/MatMulGrad.cpp deleted file mode 100644 index 9c7310aa4..000000000 --- a/tools/train/source/transform/MatMulGrad.cpp +++ /dev/null @@ -1,55 +0,0 @@ -// -// MatMulGrad.cpp -// MNN -// -// Created by MNN on 2019/05/27. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#include "MatMulGrad.hpp" -using namespace std; -using namespace MNN; -using namespace MNN::Express; - -class MatMulGrad : public OpGrad { -public: - MatMulGrad() { - mType = LINEAR; - } - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, const std::vector& backwardOutput) override { - std::vector res; - auto inputs = expr->inputs(); - res.resize(inputs.size()); - auto outputDiff = backwardOutput[0]; - - { - // A' = C' * BT - unique_ptr newOp(new OpT); -// newOp->inputIndexes = {outputDiff, forwardOp->inputIndexes[1]}; - newOp->type = OpType_MatMul; - newOp->main.type = OpParameter_MatMul; - newOp->main.value = new MatMulT; - newOp->main.AsMatMul()->transposeB = true; - auto expr = Expr::create(std::move(newOp), {outputDiff, inputs[1]}); - res[0] = Variable::create(expr); - } - { - // B' = AT * C' - unique_ptr newOp(new OpT); -// newOp->inputIndexes = {forwardOp->inputIndexes[0], outputDiff}; -// newOp->outputIndexes = {gradTensors[1]}; - newOp->type = OpType_MatMul; - newOp->main.type = OpParameter_MatMul; - newOp->main.value = new MatMulT; - newOp->main.AsMatMul()->transposeA = true; - auto expr = Expr::create(std::move(newOp), {inputs[0], outputDiff}); - res[1] = Variable::create(expr); - } - return res; - } -}; -static const auto gRegister = []() { - static MatMulGrad _c; - OpGrad::insert(OpType_MatMul, &_c); - return true; -}(); diff --git a/tools/train/source/transform/OpGrad.cpp b/tools/train/source/transform/OpGrad.cpp deleted file mode 100644 index 5f30ac93c..000000000 --- a/tools/train/source/transform/OpGrad.cpp +++ /dev/null @@ -1,37 +0,0 @@ -// -// OpGrad.cpp -// MNN -// -// Created by MNN on 2019/05/05. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#include "OpGrad.hpp" -#include -using namespace std; -namespace MNN { -static std::map& getConverter() { - static std::map gConverterMap; - return gConverterMap; -} - -OpGrad* OpGrad::get(int type) { - auto& converterMap = getConverter(); - auto iter = converterMap.find(type); - if (iter != converterMap.end()) { - return iter->second; - } - return nullptr; -} - -void OpGrad::insert(int type, OpGrad* converter) { - auto& converterMap = getConverter(); - converterMap.insert(std::make_pair(type, converter)); -} - -std::string numberToString(int index) { - std::ostringstream os; - os << index; - return os.str(); -} -} diff --git a/tools/train/source/transform/OpGrad.hpp b/tools/train/source/transform/OpGrad.hpp deleted file mode 100644 index c5a32e427..000000000 --- a/tools/train/source/transform/OpGrad.hpp +++ /dev/null @@ -1,41 +0,0 @@ -// -// OpGrad.hpp -// MNN -// -// Created by MNN on 2019/05/05. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#ifndef OpGrad_hpp -#define OpGrad_hpp -#include -#include -#include "Expr.hpp" -#include "ExprCreator.hpp" -#include "MNN_generated.h" - -namespace MNN { - class MNN_PUBLIC OpGrad { - public: - enum Type { LINEAR, SEMI_LINEAR, NO_LINEAR }; - - OpGrad() = default; - virtual ~OpGrad() = default; - - Type type() const { - return mType; - } - - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, const std::vector& backwardOutput) = 0; - - static OpGrad* get(int type); - static void insert(int type, OpGrad* creator); - - protected: - Type mType = LINEAR; - }; -} - - -MNN_PUBLIC std::string numberToString(int index); -#endif diff --git a/tools/train/source/transform/ReduceGrad.cpp b/tools/train/source/transform/ReduceGrad.cpp deleted file mode 100644 index 6bf0ef32c..000000000 --- a/tools/train/source/transform/ReduceGrad.cpp +++ /dev/null @@ -1,162 +0,0 @@ -// -// ReduceGrad.cpp -// MNN -// -// Created by MNN on 2019/05/24. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#include "Macro.h" -#include "OpGrad.hpp" -using namespace std; -using namespace MNN; -using namespace MNN::Express; - -class ReduceGrad : public OpGrad { -public: - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, const std::vector& backwardOutput) override { - std::vector result; - auto inputs = expr->inputs(); - result.resize(inputs.size()); - std::unique_ptr forwardOp(expr->get()->UnPack()); - std::vector dim = forwardOp->main.AsReductionParam()->dim; - auto keepDim = forwardOp->main.AsReductionParam()->keepDims; - if (inputs.size() > 1) { - dim.clear(); - auto ptr = inputs[1]->readMap(); - auto shape = inputs[1]->getInfo(); - for (int i=0; isize; ++i) { - dim.emplace_back(ptr[i]); - } - inputs[1]->unMap(); - } - if (dim.empty()) { - auto shape = inputs[0]->getInfo(); - for (int i=0; idim.size(); ++i) { - dim.emplace_back(i); - } - } - if (forwardOp->main.AsReductionParam()->operation == ReductionType_SUM) { - VARP init; - { - unique_ptr newOp(new OpT); - newOp->name = forwardOp->name + "__Zero"; - newOp->type = OpType_ZerosLike; - init = Variable::create(Expr::create(std::move(newOp), {inputs[0]})); - } - auto outputDiff = backwardOutput[0]; - auto currentOutput = outputDiff; - if (!keepDim) { - // Create Unsqueeze Op - unique_ptr newOp(new OpT); - newOp->name = forwardOp->name + "__Unsqueeze"; - newOp->type = OpType_Unsqueeze; - newOp->main.type = OpParameter_SqueezeParam; - newOp->main.value = new SqueezeParamT; - newOp->main.AsSqueezeParam()->squeezeDims = dim; - outputDiff = Variable::create(Expr::create(std::move(newOp), {outputDiff})); - } - result[0] = _Add(init, outputDiff); - } - return result; - } -}; -//class ReduceMeanGrad : public ReduceGrad { -//public: -// ReduceMeanGrad(const std::vector& dims, const std::vector& inputs) : ReduceGrad(dims) { -// auto input = inputs[0]; -// float size = 1.0f; -// for (int i = 0; i < dims.size(); ++i) { -// size *= (float)input->length(i); -// } -// mScale = 1.0f / size; -// } -// virtual OpConverter::Result onGrad(const MNN::NetT* net, const MNN::OpT* forwardOp, -// std::map>& backwardTensors, -// const std::vector& gradTensors) { -// OpConverter::Result result; -// result.newTensorOffset = net->tensorName.size(); -// // Create Shape Op -// auto shapeId = result.newTensorOffset + 0; -// { -// unique_ptr newOp(new OpT); -// newOp->name = forwardOp->name + "__Shape"; -// newOp->inputIndexes = {forwardOp->inputIndexes[0]}; -// newOp->outputIndexes = {shapeId}; -// newOp->type = OpType_Shape; -// result.tensorNames.emplace_back(newOp->name); -// result.opLists.emplace_back(std::move(newOp)); -// } -// auto scaleId = result.newTensorOffset + 1; -// // Create scale -// { -// unique_ptr newOp(new OpT); -// newOp->name = forwardOp->name + "__ScaleConst"; -// newOp->inputIndexes = {}; -// newOp->outputIndexes = {scaleId}; -// newOp->type = OpType_Const; -// newOp->main.type = OpParameter_Blob; -// newOp->main.value = new BlobT; -// newOp->main.AsBlob()->dataType = DataType_DT_FLOAT; -// newOp->main.AsBlob()->float32s = {mScale}; -// newOp->main.AsBlob()->dataFormat = MNN_DATA_FORMAT_NHWC; -// result.tensorNames.emplace_back(newOp->name); -// result.opLists.emplace_back(std::move(newOp)); -// } -// // Create Fill -// auto fillId = result.newTensorOffset + 2; -// { -// unique_ptr newOp(new OpT); -// newOp->name = forwardOp->name + "__Fill"; -// newOp->inputIndexes = {shapeId, scaleId}; -// newOp->outputIndexes = {fillId}; -// newOp->type = OpType_Fill; -// result.tensorNames.emplace_back(newOp->name); -// result.opLists.emplace_back(std::move(newOp)); -// } -// auto zeroId = fillId; -// -// auto outputIndex = forwardOp->outputIndexes[0]; -// auto outputDiff = backwardTensors.find(outputIndex)->second[0]; -// auto currentOutput = outputDiff; -// auto dim = mDims; -// if (dim.size() > 0) { -// // Create Unsqueeze Op -// unique_ptr newOp(new OpT); -// newOp->name = forwardOp->name + "__Unsqueeze"; -// newOp->inputIndexes = {currentOutput}; -// newOp->outputIndexes = {result.newTensorOffset + 3}; -// newOp->type = OpType_Unsqueeze; -// newOp->main.type = OpParameter_SqueezeParam; -// newOp->main.value = new SqueezeParamT; -// newOp->main.AsSqueezeParam()->squeezeDims = dim; -// currentOutput = newOp->outputIndexes[0]; -// result.tensorNames.emplace_back(newOp->name); -// result.opLists.emplace_back(std::move(newOp)); -// } -// -// // Create Binary Op -// { -// unique_ptr newOp(new OpT); -// newOp->name = forwardOp->name + "__Grad"; -// newOp->inputIndexes = {zeroId, currentOutput}; -// newOp->outputIndexes = {gradTensors[0]}; -// newOp->type = OpType_BinaryOp; -// newOp->main.type = OpParameter_BinaryOp; -// newOp->main.value = new BinaryOpT; -// newOp->main.AsBinaryOp()->T = DataType_DT_FLOAT; -// newOp->main.AsBinaryOp()->opType = BinaryOpOperation_ADD; -// result.opLists.emplace_back(std::move(newOp)); -// } -// return result; -// } -// -//private: -// float mScale; -//}; - -static const auto gRegister = []() { - static ReduceGrad _c; - OpGrad::insert(OpType_Reduction, &_c); - return true; -}(); diff --git a/tools/train/source/transform/ReluGrad.cpp b/tools/train/source/transform/ReluGrad.cpp deleted file mode 100644 index a312a47e1..000000000 --- a/tools/train/source/transform/ReluGrad.cpp +++ /dev/null @@ -1,53 +0,0 @@ -// -// ReluGrad.cpp -// MNN -// -// Created by MNN on 2019/04/22. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#include "ReluGrad.hpp" -#include "Macro.h" -using namespace std; -using namespace MNN; - -class ReluGrad : public OpGrad { -public: - ReluGrad() { - mType = SEMI_LINEAR; - } - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, const std::vector& backwardOutput) override { - std::vector result{nullptr}; - - unique_ptr newOp(new OpT); - newOp->type = OpType_ReluGrad; - newOp->main.type = OpParameter_Relu; - newOp->main.value = new ReluT; - - result[0] = Express::Variable::create(Express::Expr::create(std::move(newOp), {expr->inputs()[0], backwardOutput[0]})); - - return result; - } -}; -class Relu6Grad : public OpGrad { -public: - Relu6Grad() { - mType = SEMI_LINEAR; - } - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, const std::vector& backwardOutput) override { - std::vector result{nullptr}; - - unique_ptr newOp(new OpT); - newOp->type = OpType_Relu6Grad; - newOp->main.type = OpParameter_NONE; - result[0] = Express::Variable::create(Express::Expr::create(std::move(newOp), {expr->inputs()[0], backwardOutput[0]})); - return result; - } -}; -static const auto gRegister = []() { - static ReluGrad _c; - OpGrad::insert(OpType_ReLU, &_c); - static Relu6Grad _d; - OpGrad::insert(OpType_ReLU6, &_d); - return true; -}(); diff --git a/tools/train/source/transform/TensorConvertGrad.cpp b/tools/train/source/transform/TensorConvertGrad.cpp deleted file mode 100644 index 07ce34e6d..000000000 --- a/tools/train/source/transform/TensorConvertGrad.cpp +++ /dev/null @@ -1,39 +0,0 @@ -// -// TensorConvertGrad.cpp -// MNN -// -// Created by MNN on 2019/05/04. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#include "TensorConvertGrad.hpp" -#include "Macro.h" -using namespace std; -using namespace MNN; - -class TensorConvertGrad : public OpGrad { -public: - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, const std::vector& backwardOutput) override { - std::vector result{nullptr}; - std::unique_ptr forwardOp(expr->get()->UnPack()); - - unique_ptr newOp(new OpT); -// newOp->name = forwardOp->name + "_Grad"; -// newOp->inputIndexes = {outputDiff}; -// newOp->outputIndexes = {gradTensors[0]}; - newOp->type = OpType_ConvertTensor; - newOp->main.type = OpParameter_TensorConvertInfo; - auto cInfo = new TensorConvertInfoT; - cInfo->dest = forwardOp->main.AsTensorConvertInfo()->source; - cInfo->source = forwardOp->main.AsTensorConvertInfo()->dest; - newOp->main.value = cInfo; - - result[0] = Express::Variable::create(Express::Expr::create(std::move(newOp), {backwardOutput[0]})); - return result; - } -}; -static const auto gRegister = []() { - static TensorConvertGrad _c; - OpGrad::insert(OpType_ConvertTensor, &_c); - return true; -}(); diff --git a/tools/train/source/transform/UnaryGrad.cpp b/tools/train/source/transform/UnaryGrad.cpp deleted file mode 100644 index 0fa068ac0..000000000 --- a/tools/train/source/transform/UnaryGrad.cpp +++ /dev/null @@ -1,84 +0,0 @@ -// -// UnaryGrad.cpp -// MNN -// -// Created by MNN on 2019/05/25. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#include "Macro.h" -#include "OpGrad.hpp" -using namespace std; -using namespace MNN; -using namespace MNN::Express; - -class UnaryGrad : public OpGrad { -public: - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, const std::vector& backwardOutput) override { - std::unique_ptr forwardOp(expr->get()->UnPack()); - auto outputDiff = backwardOutput[0]; - auto input = expr->inputs()[0]; - - switch (forwardOp->main.AsUnaryOp()->opType) { - case MNN::UnaryOpOperation_LOG1P: { - // d log(1+x) = 1/(1+x) * dx = dx / (1+x) - auto oneConst = _Const(1.0f, {}, NHWC); - auto addOne = _Add(input, oneConst); - auto div = _Div(outputDiff, addOne); - return {div}; - } - case MNN::UnaryOpOperation_EXP: { - // d Exp(x) = Exp(x) * dx - return {_Mul(outputDiff, output[0])}; - } - case MNN::UnaryOpOperation_LOG: { - // d Log(x) = dx / x - return {_Div(outputDiff, input)}; - } - case MNN::UnaryOpOperation_NEG: { - // d (-x) = - dx - return {_Neg(outputDiff)}; - } - case MNN::UnaryOpOperation_SQRT: { - // d (-sqrt(x)) = 0.5 / sqrt(x) * dx - auto oneConst = _Const(0.5f, {}, NHWC); - auto mul = _Mul(outputDiff, oneConst); - auto div = _Div(mul, output[0]); - return {div}; - } - case MNN::UnaryOpOperation_SQUARE: { - // d (x^2) = (x*dx + x*dx) - auto mul = _Mul(input, outputDiff); - return {_Add(mul, mul)}; - } - default: - MNN_ASSERT(false); - break; - } - - return {}; - } -}; -class SigmoidGrad : public OpGrad { -public: - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, const std::vector& backwardOutput) override { - std::vector result{nullptr}; - auto outputDiff = backwardOutput[0]; - - // y = (1/(1+e(-x))) , dy = y(1-y) * dx = (y*y - y)*dx - auto mul = _Mul(output[0], output[0]); - auto sub = _Sub(mul, output[0]); - auto grad = _Mul(sub, outputDiff); - result[0] = grad; - return result; - } -}; - - -static const auto gRegister = []() { - static UnaryGrad _c; - static SigmoidGrad _s; - OpGrad::insert(OpType_UnaryOp, &_c); - OpGrad::insert(OpType_Sigmoid, &_s); - return true; -}(); diff --git a/tools/train/source/transform/ConvolutionConverter.cpp b/tools/train/source/transformer/ConvolutionConverter.cpp similarity index 97% rename from tools/train/source/transform/ConvolutionConverter.cpp rename to tools/train/source/transformer/ConvolutionConverter.cpp index 63471dc7f..9b810af19 100644 --- a/tools/train/source/transform/ConvolutionConverter.cpp +++ b/tools/train/source/transformer/ConvolutionConverter.cpp @@ -7,8 +7,8 @@ // #include "ConvolutionConverter.hpp" -#include "Macro.h" -#include "Tensor.hpp" +#include +#include "core/Macro.h" using namespace MNN; OpConverter::ReductResult ConvolutionConverter::onReduct(int opIndex, MNN::OpT* op, MNN::NetT* net) { diff --git a/tools/train/source/transform/ConvolutionConverter.hpp b/tools/train/source/transformer/ConvolutionConverter.hpp similarity index 100% rename from tools/train/source/transform/ConvolutionConverter.hpp rename to tools/train/source/transformer/ConvolutionConverter.hpp diff --git a/tools/train/source/transform/OpConverter.cpp b/tools/train/source/transformer/OpConverter.cpp similarity index 78% rename from tools/train/source/transform/OpConverter.cpp rename to tools/train/source/transformer/OpConverter.cpp index 78a8d7b25..a0bdbd554 100644 --- a/tools/train/source/transform/OpConverter.cpp +++ b/tools/train/source/transformer/OpConverter.cpp @@ -7,8 +7,9 @@ // #include "OpConverter.hpp" +#include #include -#include "ExprCreator.hpp" + using namespace MNN; using namespace MNN::Express; static std::map& getConverter() { @@ -32,6 +33,9 @@ void OpConverter::insert(MNN::OpType type, OpConverter* converter) { EXPRP OpConverter::convert(EXPRP source) { auto opOrigin = source->get(); + if (nullptr == opOrigin) { + return source; + } std::unique_ptr op(opOrigin->UnPack()); if (op->type != OpType_Convolution && op->type != OpType_ConvolutionDepthwise) { return source; @@ -50,22 +54,22 @@ EXPRP OpConverter::convert(EXPRP source) { auto srcCount = (int)conv2D->weight.size() * conv2DCommon->group / conv2DCommon->outputCount / conv2DCommon->kernelX / conv2DCommon->kernelY; weight->main.value = new BlobT; - weight->main.AsBlob()->dims = {conv2DCommon->outputCount, srcCount / conv2DCommon->group, conv2DCommon->kernelY, conv2DCommon->kernelX}; + weight->main.AsBlob()->dims = {conv2DCommon->outputCount, srcCount / conv2DCommon->group, conv2DCommon->kernelY, + conv2DCommon->kernelX}; weight->main.AsBlob()->dataType = DataType_DT_FLOAT; weight->main.AsBlob()->dataFormat = MNN_DATA_FORMAT_NCHW; weight->main.AsBlob()->float32s = std::move(op->main.AsConvolution2D()->weight); - EXPRP weightExpr = Expr::create(std::move(weight), {}, 1); - weightValue = Variable::create(weightExpr, 0); - conv2DCommon->inputCount = srcCount; + EXPRP weightExpr = Expr::create(std::move(weight), {}, 1); + weightValue = Variable::create(weightExpr, 0); + conv2DCommon->inputCount = srcCount; } biasValue = _Const((const void*)conv2D->bias.data(), {(int)conv2D->bias.size()}, NCHW); - weightValue->setName(op->name + "_Weight"); - biasValue->setName(op->name + "_Bias"); + weightValue->setName(source->name() + "_Weight"); + biasValue->setName(source->name() + "_Bias"); // Origin Convolution std::unique_ptr newConvOp(new OpT); { newConvOp->type = op->type; - newConvOp->name = op->name; newConvOp->main.type = OpParameter_Convolution2D; newConvOp->main.value = new Convolution2DT; newConvOp->main.AsConvolution2D()->common.reset(new Convolution2DCommonT(*conv2DCommon)); @@ -79,13 +83,15 @@ EXPRP OpConverter::convert(EXPRP source) { auto relu = conv2DCommon->relu; auto relu6 = conv2DCommon->relu6; - EXPRP newConv = Expr::create(std::move(newConvOp), {inputs[0], weightValue, biasValue}); + EXPRP newConv = Expr::create(std::move(newConvOp), {inputs[0], weightValue, biasValue}); VARP resultVariable = Variable::create(newConv, 0); + resultVariable->setName(source->name()); if (relu) { resultVariable = _Relu(resultVariable); + resultVariable->setName(source->name() + "_Relu"); } else if (relu6) { resultVariable = _Relu6(resultVariable); + resultVariable->setName(source->name() + "_Relu6"); } - resultVariable->setName(op->name); return resultVariable->expr().first; } diff --git a/tools/train/source/transform/OpConverter.hpp b/tools/train/source/transformer/OpConverter.hpp similarity index 83% rename from tools/train/source/transform/OpConverter.hpp rename to tools/train/source/transformer/OpConverter.hpp index 02709c3d5..819803aff 100644 --- a/tools/train/source/transform/OpConverter.hpp +++ b/tools/train/source/transformer/OpConverter.hpp @@ -8,8 +8,8 @@ #ifndef OpConverter_hpp #define OpConverter_hpp -#include "Expr.hpp" -#include "MNNDefine.h" +#include +#include #include "MNN_generated.h" class MNN_PUBLIC OpConverter { @@ -18,7 +18,7 @@ class MNN_PUBLIC OpConverter { static MNN::Express::EXPRP convert(MNN::Express::EXPRP source); - virtual ~OpConverter() = default; + virtual ~OpConverter() = default; static OpConverter* get(MNN::OpType type); static void insert(MNN::OpType type, OpConverter* converter); diff --git a/tools/train/source/transformer/Transformer.cpp b/tools/train/source/transformer/Transformer.cpp new file mode 100644 index 000000000..a21114ccc --- /dev/null +++ b/tools/train/source/transformer/Transformer.cpp @@ -0,0 +1,78 @@ +// +// Transformer.cpp +// MNN +// +// Created by MNN on 2019/12/16. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "Transformer.hpp" +#include "OpConverter.hpp" +using namespace MNN::Express; +namespace MNN { +namespace Train { + +class TurnTrainable : public Express::Optimizer { +public: + TurnTrainable(Transformer::TrainConfig config) { + mConfig = std::move(config); + } + virtual Cost onMeasure(const std::vector& outputs, + std::shared_ptr parameters = nullptr) override { + return Cost(); + } + virtual bool onExecute(const std::vector& outputs, std::shared_ptr p) override { + auto exprs = Variable::getExecuteOrder(outputs); + { + // Turn convolution be trainable convolution + for (auto expr : exprs) { + auto newExpr = OpConverter::convert(expr); + if (newExpr.get() != expr.get()) { + Expr::replace(expr, newExpr); + } + } + } + exprs = Variable::getExecuteOrder(outputs); + auto& variableLimits = mConfig.variableLimits; + // Collect Const Variable and turn to Trainable + for (auto v : exprs) { + if (v->get() == nullptr && VARP::INPUT != v->inputType()) { + auto name = v->name(); + auto info = v->outputInfo(0); + if (halide_type_float != info->type.code) { + continue; + } + bool match = variableLimits.empty(); + for (auto limit : variableLimits) { + if (name.find(limit) != std::string::npos) { + match = true; + break; + } + } + auto va = Variable::create(v, 0); + if (match) { + MNN_PRINT("Add Variable: %s\n", name.c_str()); + va.fix(VARP::TRAINABLE); + } else { + va.fix(VARP::CONST); + } + } + } + return true; + } + +private: + Transformer::TrainConfig mConfig; +}; + +std::shared_ptr Transformer::turnModelToTrainable(TrainConfig config) { + std::shared_ptr res; + res.reset(new TurnTrainable(std::move(config))); + return res; +} + +std::shared_ptr Transformer::turnModelToInfer() { + return nullptr; +} +} // namespace Train +} // namespace MNN diff --git a/tools/train/source/transformer/Transformer.hpp b/tools/train/source/transformer/Transformer.hpp new file mode 100644 index 000000000..d2f756d5f --- /dev/null +++ b/tools/train/source/transformer/Transformer.hpp @@ -0,0 +1,26 @@ +// +// Transformer.hpp +// MNN +// +// Created by MNN on 2019/12/16. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef Transformer_hpp +#define Transformer_hpp +#include + +namespace MNN { +namespace Train { +class MNN_PUBLIC Transformer { +public: + struct TrainConfig { + std::vector variableLimits; + }; + + static std::shared_ptr turnModelToTrainable(TrainConfig config); + static std::shared_ptr turnModelToInfer(); +}; +} // namespace Train +} // namespace MNN +#endif