diff --git a/.github/workflows/issue.yml b/.github/workflows/issue.yml new file mode 100644 index 00000000..0f662e74 --- /dev/null +++ b/.github/workflows/issue.yml @@ -0,0 +1,22 @@ +name: issues +on: + schedule: + - cron: "0 0 * * 0" + +jobs: + close-issues: + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + steps: + - uses: actions/stale@v9.0.0 + with: + days-before-issue-stale: 30 + days-before-issue-close: 7 + stale-issue-label: "stale" + stale-issue-message: "This issue is stale because it has been open for 30 days with no activity." + close-issue-message: "This issue was closed because it has been inactive for 7 days since being marked as stale." + days-before-pr-stale: -1 + days-before-pr-close: -1 + repo-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore index d16a6c2d..481cf8dc 100644 --- a/.gitignore +++ b/.gitignore @@ -7,4 +7,7 @@ build-debug.sh __pycache__ scripts prebuilt -debug.txt \ No newline at end of file +debug.txt +tmp +third_party +build/ diff --git a/CMakeLists.txt b/CMakeLists.txt index eaffa6bf..bf20fbfa 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,81 +1,83 @@ cmake_minimum_required(VERSION 3.10) project(lite.ai.toolkit) -set(CMAKE_CXX_STANDARD 11) -set(VERSION_STRING 0.1.1) -set(SOVERSION_STRING 0.1.1) -include(cmake/platform.cmake) # checking platform +set(CMAKE_CXX_STANDARD 17) +set(VERSION_STRING 0.2.0) +set(SOVERSION_STRING 0.2.0) +include(cmake/utils.cmake) -message(STATUS "Lite.AI.ToolKit ${VERSION_STRING}") -message(STATUS "Project: lite.ai.toolkit") -message(STATUS "Version: ${VERSION_STRING}") -message(STATUS "SO Version: ${SOVERSION_STRING}") -message(STATUS "Build Type: ${CMAKE_BUILD_TYPE}") -message(STATUS "Platform Name: ${PLATFORM_NAME}") -message(STATUS "Root Path: ${CMAKE_SOURCE_DIR}") +if (NOT (UNIX AND NOT APPLE)) + message(FATAL_ERROR "lite.ai.toolkit>=0.2 not support for windows/mac now!") +endif() + +set(THIRD_PARTY_PATH "${CMAKE_SOURCE_DIR}/third_party") +if(NOT EXISTS ${THIRD_PARTY_PATH}) + file(MAKE_DIRECTORY ${THIRD_PARTY_PATH}) +endif() # Linux GCC Compiler Options if (CMAKE_COMPILER_IS_GNUCXX) - set(CMAKE_CXX_FLAGS "-std=c++11 -Wno-deprecated ${CMAKE_CXX_FLAGS} ") - message(STATUS "[Linux GCC Compiler Options]+:-std=c++11 -Wno-deprecated") + set(CMAKE_CXX_FLAGS "-std=c++17 -Wno-deprecated ${CMAKE_CXX_FLAGS} ") + message(STATUS "[Linux GCC Compiler Options]+:-std=c++17 -Wno-deprecated") endif () # message(STATUS "CMAKE_CXX_COMPILER: [${CMAKE_CXX_COMPILER}]") # root dir set(LITE_AI_ROOT_DIR ${CMAKE_SOURCE_DIR}) -# set default build dir for lite.ai.toolkit -if (NOT DEFINED BUILD_LITE_AI_DIR) - set(BUILD_LITE_AI_DIR ${LITE_AI_ROOT_DIR}/build/lite.ai.toolkit) -endif () -set(LIBRARY_OUTPUT_PATH ${BUILD_LITE_AI_DIR}/lib) -set(EXECUTABLE_OUTPUT_PATH ${BUILD_LITE_AI_DIR}/bin) # compile options for lite.ai.toolkit -option(LITE_AI_BUILD_LIB "build shared libraries." ON) # now, ON only option(LITE_AI_BUILD_TEST "build test examples." ON) -option(INCLUDE_OPENCV "package OpenCV into lite.ai.toolkit." ON) # inference engines setups: config.h.in -> config.h option(ENABLE_DEBUG_STRING "enable DEBUG string or not" ON) option(ENABLE_ONNXRUNTIME "enable ONNXRuntime engine" ON) -option(ENABLE_MNN "enable MNN engine" ON) # unstable now, DON'T use -option(ENABLE_NCNN "enable NCNN engine" ON) # unstable now, DON'T use -option(ENABLE_TNN "enable TNN engine" ON) # unstable now, DON'T use +option(ENABLE_MNN "enable MNN engine" OFF) # unstable now, DON'T use +option(ENABLE_NCNN "enable NCNN engine" OFF) # unstable now, DON'T use +option(ENABLE_TNN "enable TNN engine" OFF) # unstable now, DON'T use # cuda provider setups: config.h.in -> config.h (only for onnxruntime) option(ENABLE_ONNXRUNTIME_CUDA "enable ONNXRuntime engine with CUDA provider" OFF) -# openmp/opengl/vulkan/cuda setups: config.h.in -> config.h (for future use) -option(ENABLE_LITE_OPENMP "enable OPENMP accelerate for some post processes" OFF) # for future use, DON'T use NOW! -option(ENABLE_LITE_OPENGL "enable OPENGL accelerate for some post processes" OFF) # for future use, DON'T use NOW! -option(ENABLE_LITE_VULKAN "enable VULKAN accelerate for some post processes" OFF) # for future use, DON'T use NOW! -option(ENABLE_LITE_CUDA "enable CUDA accelerate for some post processes" OFF) # for future use, DON'T use NOW! -# videoio interface setups, for future use option(ENABLE_OPENCV_VIDEOIO "enable opencv videoio modules for detect_video apis" ON) # now, ON only -# inference engines backend setups for lite.ai.toolkit -option(BACKEND_ONNXRUNTIME "set ONNXRuntime as the main backend of lite.ai.toolkit" ON) -option(BACKEND_MNN "set MNN as the main backend of lite.ai.toolkit" OFF) # now, OFF only -option(BACKEND_NCNN "set NCNN as the main backend of lite.ai.toolkit" OFF) # now, OFF only -option(BACKEND_TNN "set TNN as the main backend of lite.ai.toolkit" OFF) # now, OFF only -message(STATUS "Engines Enable Details ... ") -message(STATUS "INCLUDE_OPENCV: ${INCLUDE_OPENCV}") -message(STATUS "ENABLE_ONNXRUNTIME: ${ENABLE_ONNXRUNTIME}") -message(STATUS "ENABLE_MNN: ${ENABLE_MNN}") -message(STATUS "ENABLE_NCNN: ${ENABLE_NCNN}") -message(STATUS "ENABLE_TNN: ${ENABLE_TNN}") +if (NOT ENABLE_ONNXRUNTIME) + message(FATAL_ERROR "ENABLE_ONNXRUNTIME must be enbled now!") +endif() # setup include dir and lib dir include_directories(${LITE_AI_ROOT_DIR}) -link_directories(${LITE_AI_ROOT_DIR}/lib/${PLATFORM_NAME}) +configure_file(cmake/lite.ai.toolkit.cmake.in ${CMAKE_SOURCE_DIR}/cmake/lite.ai.toolkit.cmake @ONLY) # include custom cmake files. include(cmake/opencv.cmake) -include(cmake/command.cmake) -# configuration for lite.ai shared lib. -if (LITE_AI_BUILD_LIB) - include(cmake/lite.ai.toolkit.cmake) -endif () +add_lite_ai_toolkit_shared_library(${VERSION_STRING} ${SOVERSION_STRING}) +install(TARGETS lite.ai.toolkit LIBRARY DESTINATION lib) +install(DIRECTORY ${CMAKE_SOURCE_DIR}/lite + DESTINATION ${CMAKE_INSTALL_PREFIX}/include + FILES_MATCHING + PATTERN "*.h") +install(DIRECTORY ${THIRD_PARTY_PATH} DESTINATION ${CMAKE_INSTALL_PREFIX}/include/) +install(FILES ${CMAKE_SOURCE_DIR}/cmake/lite.ai.toolkit.cmake + DESTINATION ${CMAKE_INSTALL_PREFIX}) +install(FILES ${CMAKE_SOURCE_DIR}/cmake/lite.ai.toolkit-config.cmake + DESTINATION ${CMAKE_INSTALL_PREFIX}) # configuration for test examples. -if (LITE_AI_BUILD_LIB AND LITE_AI_BUILD_TEST) +if (LITE_AI_BUILD_TEST) + set(EXECUTABLE_OUTPUT_PATH ${CMAKE_INSTALL_PREFIX}/bin) add_subdirectory(examples/lite) + if ((UNIX AND NOT APPLE)) + file(GLOB_RECURSE ALL_THIRD_LIBS ${THIRD_PARTY_PATH} FOLLOW_SYMLINKS *.so*) + install(FILES ${ALL_THIRD_LIBS} DESTINATION ${EXECUTABLE_OUTPUT_PATH}) + endif() endif () + +message(STATUS "-------------------------- lite.ai.toolkit Configuration Summary --------------------------") +message(STATUS " Version: ${VERSION_STRING}") +message(STATUS " SO Version: ${SOVERSION_STRING}") +message(STATUS " Build Type: ${CMAKE_BUILD_TYPE}") +message(STATUS " Root Path: ${CMAKE_SOURCE_DIR}") +message(STATUS " OpenCV: ON Version: ${OpenCV_Version}") +message(STATUS " ONNXRUNTIME: ${ENABLE_ONNXRUNTIME} Version: ${OnnxRuntime_Version}") +message(STATUS " MNN: ${ENABLE_MNN} Version: ${MNN_Version}") +message(STATUS " NCNN: ${ENABLE_NCNN} Version: ${NCNN_Version}") +message(STATUS " TNN: ${ENABLE_TNN} Version: ${TNN_Version}") +message(STATUS "-------------------------- lite.ai.toolkit Configuration Summary --------------------------") diff --git a/LICENSE b/LICENSE index 53d1f3d0..f288702d 100644 --- a/LICENSE +++ b/LICENSE @@ -672,4 +672,3 @@ may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . - diff --git a/MNN/AutoTime.hpp b/MNN/AutoTime.hpp deleted file mode 100644 index 40e47740..00000000 --- a/MNN/AutoTime.hpp +++ /dev/null @@ -1,58 +0,0 @@ -// -// AutoTime.hpp -// MNN -// -// Created by MNN on 2018/07/27. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#ifndef AutoTime_hpp -#define AutoTime_hpp - -#include -#include -#include - -namespace MNN { - -class MNN_PUBLIC Timer { -public: - Timer(); - ~Timer(); - Timer(const Timer&) = delete; - Timer(const Timer&&) = delete; - Timer& operator=(const Timer&) = delete; - Timer& operator=(const Timer&&) = delete; - - // reset timer - void reset(); - // get duration (us) from init or latest reset. - uint64_t durationInUs(); - -protected: - uint64_t mLastResetTime; -}; - -/** time tracing util. prints duration between init and deinit. */ -class MNN_PUBLIC AutoTime : Timer { -public: - AutoTime(int line, const char* func); - ~AutoTime(); - AutoTime(const AutoTime&) = delete; - AutoTime(const AutoTime&&) = delete; - AutoTime& operator=(const AutoTime&) = delete; - AutoTime& operator=(const AutoTime&&) = delete; - -private: - int mLine; - char* mName; -}; -} // namespace MNN - -#ifdef MNN_OPEN_TIME_TRACE -#define AUTOTIME MNN::AutoTime ___t(__LINE__, __func__) -#else -#define AUTOTIME -#endif - -#endif /* AutoTime_hpp */ diff --git a/MNN/ErrorCode.hpp b/MNN/ErrorCode.hpp deleted file mode 100644 index d3268016..00000000 --- a/MNN/ErrorCode.hpp +++ /dev/null @@ -1,34 +0,0 @@ -// -// ErrorCode.hpp -// MNN -// -// Created by MNN on 2018/09/18. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#ifndef ErrorCode_h -#define ErrorCode_h - -namespace MNN { -enum ErrorCode { -#ifdef NO_ERROR -#undef NO_ERROR -#endif // NO_ERROR - NO_ERROR = 0, - OUT_OF_MEMORY = 1, - NOT_SUPPORT = 2, - COMPUTE_SIZE_ERROR = 3, - NO_EXECUTION = 4, - INVALID_VALUE = 5, - - // User error - INPUT_DATA_ERROR = 10, - CALL_BACK_STOP = 11, - - // Op Resize Error - TENSOR_NOT_SUPPORT = 20, - TENSOR_NEED_DIVIDE = 21, -}; -} // namespace MNN - -#endif /* ErrorCode_h */ diff --git a/MNN/HalideRuntime.h b/MNN/HalideRuntime.h deleted file mode 100644 index bc4b0b52..00000000 --- a/MNN/HalideRuntime.h +++ /dev/null @@ -1,307 +0,0 @@ -#ifndef HALIDE_HALIDERUNTIME_H -#define HALIDE_HALIDERUNTIME_H - -#include -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -// Note that you should not use "inline" along with HALIDE_ALWAYS_INLINE; -// it is not necessary, and may produce warnings for some build configurations. -#ifdef _MSC_VER -#define HALIDE_ALWAYS_INLINE __forceinline -#define HALIDE_NEVER_INLINE __declspec(noinline) -#else -#define HALIDE_ALWAYS_INLINE __attribute__((always_inline)) inline -#define HALIDE_NEVER_INLINE __attribute__((noinline)) -#endif - -/** \file - * - * This file declares the routines used by Halide internally in its - * runtime. On platforms that support weak linking, these can be - * replaced with user-defined versions by defining an extern "C" - * function with the same name and signature. - * - * When doing Just In Time (JIT) compilation methods on the Func being - * compiled must be called instead. The corresponding methods are - * documented below. - * - * All of these functions take a "void *user_context" parameter as their - * first argument; if the Halide kernel that calls back to any of these - * functions has been compiled with the UserContext feature set on its Target, - * then the value of that pointer passed from the code that calls the - * Halide kernel is piped through to the function. - * - * Some of these are also useful to call when using the default - * implementation. E.g. halide_shutdown_thread_pool. - * - * Note that even on platforms with weak linking, some linker setups - * may not respect the override you provide. E.g. if the override is - * in a shared library and the halide object files are linked directly - * into the output, the builtin versions of the runtime functions will - * be called. See your linker documentation for more details. On - * Linux, LD_DYNAMIC_WEAK=1 may help. - * - */ - -// Forward-declare to suppress warnings if compiling as C. -struct halide_buffer_t; - -/** Types in the halide type system. They can be ints, unsigned ints, - * or floats (of various bit-widths), or a handle (which is always 64-bits). - * Note that the int/uint/float values do not imply a specific bit width - * (the bit width is expected to be encoded in a separate value). - */ -typedef enum halide_type_code_t -{ - halide_type_int = 0, //!< signed integers - halide_type_uint = 1, //!< unsigned integers - halide_type_float = 2, //!< floating point numbers - halide_type_handle = 3 //!< opaque pointer type (void *) -} halide_type_code_t; - -// Note that while __attribute__ can go before or after the declaration, -// __declspec apparently is only allowed before. -#ifndef HALIDE_ATTRIBUTE_ALIGN - #ifdef _MSC_VER - #define HALIDE_ATTRIBUTE_ALIGN(x) __declspec(align(x)) - #else - #define HALIDE_ATTRIBUTE_ALIGN(x) __attribute__((aligned(x))) - #endif -#endif - -/** A runtime tag for a type in the halide type system. Can be ints, - * unsigned ints, or floats of various bit-widths (the 'bits' - * field). Can also be vectors of the same (by setting the 'lanes' - * field to something larger than one). This struct should be - * exactly 32-bits in size. */ -struct halide_type_t { - /** The basic type code: signed integer, unsigned integer, or floating point. */ -#if __cplusplus >= 201103L - HALIDE_ATTRIBUTE_ALIGN(1) halide_type_code_t code; // halide_type_code_t -#else - HALIDE_ATTRIBUTE_ALIGN(1) uint8_t code; // halide_type_code_t -#endif - - /** The number of bits of precision of a single scalar value of this type. */ - HALIDE_ATTRIBUTE_ALIGN(1) uint8_t bits; - - /** How many elements in a vector. This is 1 for scalar types. */ - HALIDE_ATTRIBUTE_ALIGN(2) uint16_t lanes; - -#ifdef __cplusplus - /** Construct a runtime representation of a Halide type from: - * code: The fundamental type from an enum. - * bits: The bit size of one element. - * lanes: The number of vector elements in the type. */ - HALIDE_ALWAYS_INLINE halide_type_t(halide_type_code_t code, uint8_t bits, uint16_t lanes = 1) - : code(code), bits(bits), lanes(lanes) { - } - - /** Default constructor is required e.g. to declare halide_trace_event - * instances. */ - HALIDE_ALWAYS_INLINE halide_type_t() : code((halide_type_code_t)0), bits(0), lanes(0) {} - - /** Compare two types for equality. */ - HALIDE_ALWAYS_INLINE bool operator==(const halide_type_t &other) const { - return (code == other.code && - bits == other.bits && - lanes == other.lanes); - } - - HALIDE_ALWAYS_INLINE bool operator!=(const halide_type_t &other) const { - return !(*this == other); - } - - /** Size in bytes for a single element, even if width is not 1, of this type. */ - HALIDE_ALWAYS_INLINE int bytes() const { return (bits + 7) / 8; } -#endif -}; - -/** An opaque struct containing per-GPU API implementations of the - * device functions. */ -struct halide_device_interface_impl_t; - -/** Each GPU API provides a halide_device_interface_t struct pointing - * to the code that manages device allocations. You can access these - * functions directly from the struct member function pointers, or by - * calling the functions declared below. Note that the global - * functions are not available when using Halide as a JIT compiler. - * If you are using raw halide_buffer_t in that context you must use - * the function pointers in the device_interface struct. - * - * The function pointers below are currently the same for every GPU - * API; only the impl field varies. These top-level functions do the - * bookkeeping that is common across all GPU APIs, and then dispatch - * to more API-specific functions via another set of function pointers - * hidden inside the impl field. - */ -struct halide_device_interface_t { - int (*device_malloc)(void *user_context, struct halide_buffer_t *buf, - const struct halide_device_interface_t *device_interface); - int (*device_free)(void *user_context, struct halide_buffer_t *buf); - int (*device_sync)(void *user_context, struct halide_buffer_t *buf); - void (*device_release)(void *user_context, - const struct halide_device_interface_t *device_interface); - int (*copy_to_host)(void *user_context, struct halide_buffer_t *buf); - int (*copy_to_device)(void *user_context, struct halide_buffer_t *buf, - const struct halide_device_interface_t *device_interface); - int (*device_and_host_malloc)(void *user_context, struct halide_buffer_t *buf, - const struct halide_device_interface_t *device_interface); - int (*device_and_host_free)(void *user_context, struct halide_buffer_t *buf); - int (*buffer_copy)(void *user_context, struct halide_buffer_t *src, - const struct halide_device_interface_t *dst_device_interface, struct halide_buffer_t *dst); - int (*device_crop)(void *user_context, const struct halide_buffer_t *src, - struct halide_buffer_t *dst); - int (*device_release_crop)(void *user_context, struct halide_buffer_t *buf); - int (*wrap_native)(void *user_context, struct halide_buffer_t *buf, uint64_t handle, - const struct halide_device_interface_t *device_interface); - int (*detach_native)(void *user_context, struct halide_buffer_t *buf); - const struct halide_device_interface_impl_t *impl; -}; - -typedef struct halide_dimension_t { - int32_t min, extent, stride; - - // Per-dimension flags. None are defined yet (This is reserved for future use). - uint32_t flags; - -#ifdef __cplusplus - HALIDE_ALWAYS_INLINE halide_dimension_t() : min(0), extent(0), stride(0), flags(0) {} - HALIDE_ALWAYS_INLINE halide_dimension_t(int32_t m, int32_t e, int32_t s, uint32_t f = 0) : - min(m), extent(e), stride(s), flags(f) {} - - HALIDE_ALWAYS_INLINE bool operator==(const halide_dimension_t &other) const { - return (min == other.min) && - (extent == other.extent) && - (stride == other.stride) && - (flags == other.flags); - } - - HALIDE_ALWAYS_INLINE bool operator!=(const halide_dimension_t &other) const { - return !(*this == other); - } -#endif -} halide_dimension_t; - -#ifdef __cplusplus -} // extern "C" -#endif - -typedef enum {halide_buffer_flag_host_dirty = 1, - halide_buffer_flag_device_dirty = 2} halide_buffer_flags; - -/** - * The raw representation of an image passed around by generated - * Halide code. It includes some stuff to track whether the image is - * not actually in main memory, but instead on a device (like a - * GPU). For a more convenient C++ wrapper, use Halide::Buffer. */ -typedef struct halide_buffer_t { - /** A device-handle for e.g. GPU memory used to back this buffer. */ - uint64_t device; - - /** The interface used to interpret the above handle. */ - const struct halide_device_interface_t *device_interface; - - /** A pointer to the start of the data in main memory. In terms of - * the Halide coordinate system, this is the address of the min - * coordinates (defined below). */ - uint8_t* host; - - /** flags with various meanings. */ - uint64_t flags; - - /** The type of each buffer element. */ - struct halide_type_t type; - - /** The dimensionality of the buffer. */ - int32_t dimensions; - - /** The shape of the buffer. Halide does not own this array - you - * must manage the memory for it yourself. */ - halide_dimension_t *dim; - - /** Pads the buffer up to a multiple of 8 bytes */ - void *padding; -} halide_buffer_t; - - -#ifdef __cplusplus - -namespace { -template struct check_is_pointer; -template struct check_is_pointer {}; -} - -/** Construct the halide equivalent of a C type */ -template -HALIDE_ALWAYS_INLINE halide_type_t halide_type_of() { - // Create a compile-time error if T is not a pointer (without - // using any includes - this code goes into the runtime). - check_is_pointer check; - (void)check; - return halide_type_t(halide_type_handle, 64); -} - -template<> -HALIDE_ALWAYS_INLINE halide_type_t halide_type_of() { - return halide_type_t(halide_type_float, 32); -} - -template<> -HALIDE_ALWAYS_INLINE halide_type_t halide_type_of() { - return halide_type_t(halide_type_float, 64); -} - -template<> -HALIDE_ALWAYS_INLINE halide_type_t halide_type_of() { - return halide_type_t(halide_type_uint, 1); -} - -template<> -HALIDE_ALWAYS_INLINE halide_type_t halide_type_of() { - return halide_type_t(halide_type_uint, 8); -} - -template<> -HALIDE_ALWAYS_INLINE halide_type_t halide_type_of() { - return halide_type_t(halide_type_uint, 16); -} - -template<> -HALIDE_ALWAYS_INLINE halide_type_t halide_type_of() { - return halide_type_t(halide_type_uint, 32); -} - -template<> -HALIDE_ALWAYS_INLINE halide_type_t halide_type_of() { - return halide_type_t(halide_type_uint, 64); -} - -template<> -HALIDE_ALWAYS_INLINE halide_type_t halide_type_of() { - return halide_type_t(halide_type_int, 8); -} - -template<> -HALIDE_ALWAYS_INLINE halide_type_t halide_type_of() { - return halide_type_t(halide_type_int, 16); -} - -template<> -HALIDE_ALWAYS_INLINE halide_type_t halide_type_of() { - return halide_type_t(halide_type_int, 32); -} - -template<> -HALIDE_ALWAYS_INLINE halide_type_t halide_type_of() { - return halide_type_t(halide_type_int, 64); -} - -#endif - -#endif // HALIDE_HALIDERUNTIME_H diff --git a/MNN/ImageProcess.hpp b/MNN/ImageProcess.hpp deleted file mode 100644 index d5c6c748..00000000 --- a/MNN/ImageProcess.hpp +++ /dev/null @@ -1,154 +0,0 @@ -// -// ImageProcess.hpp -// MNN -// -// Created by MNN on 2018/09/19. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#ifndef ImageProcess_hpp -#define ImageProcess_hpp - -#include -#include -#include - -namespace MNN { -namespace CV { -enum ImageFormat { - RGBA = 0, - RGB, - BGR, - GRAY, - BGRA, - YUV_NV21 = 11, - YUV_NV12 = 12, - YUV_I420 = 13, -}; - -enum Filter { NEAREST = 0, BILINEAR = 1, BICUBIC = 2 }; - -enum Wrap { CLAMP_TO_EDGE = 0, ZERO = 1, REPEAT = 2 }; - -/** - * handle image process for tensor. - * step: - * 1: Do transform compute and get points - * 2: Sample line and do format convert - * 3: Turn RGBA to float tensor, and do sub and normalize - */ -class MNN_PUBLIC ImageProcess { -public: - struct Inside; - struct Config { - /** data filter */ - Filter filterType = NEAREST; - /** format of source data */ - ImageFormat sourceFormat = RGBA; - /** format of destination data */ - ImageFormat destFormat = RGBA; - - // Only valid if the dest type is float - float mean[4] = {0.0f, 0.0f, 0.0f, 0.0f}; - float normal[4] = {1.0f, 1.0f, 1.0f, 1.0f}; - - /** edge wrapper */ - Wrap wrap = CLAMP_TO_EDGE; - }; - -public: - /** - * @brief create image process with given config for given tensor. - * @param config given config. - * @param dstTensor given tensor. - * @return image processor. - */ - static ImageProcess* create(const Config& config, const Tensor* dstTensor = nullptr); - - /** - * @brief create image process with given config for given tensor. - * @param means given means - * @param meanCount given means count - * @param normals given normals - * @param normalCount given normal count - * @param sourceFormat format of source data - * @param destFormat format of destination data - * @param dstTensor given tensor. - * @return image processor. - */ - static ImageProcess* create(const ImageFormat sourceFormat = RGBA, const ImageFormat destFormat = RGBA, - const float* means = nullptr, const int meanCount = 0, const float* normals = nullptr, - const int normalCount = 0, const Tensor* dstTensor = nullptr); - - ~ImageProcess(); - - /** - * @brief get affine transform matrix. - * @return affine transform matrix. - */ - inline const Matrix& matrix() const { - return mTransform; - } - void setMatrix(const Matrix& matrix); - - /** - * @brief convert source data to given tensor. - * @param source source data. - * @param iw source width. - * @param ih source height. - * @param stride number of elements per row. eg: 100 width RGB contains at least 300 elements. - * @param dest given tensor. - * @return result code. - */ - ErrorCode convert(const uint8_t* source, int iw, int ih, int stride, Tensor* dest); - - /** - * @brief convert source data to given tensor. - * @param source source data. - * @param iw source width. - * @param ih source height. - * @param stride number of elements per row. eg: 100 width RGB contains at least 300 elements. - * @param dest dest data. - * @param ow output width. - * @param oh output height. - * @param outputBpp output bpp, if 0, set as the save and config.destFormat. - * @param outputStride output stride, if 0, set as ow * outputBpp. - * @param type Only support halide_type_of and halide_type_of. - * @return result code. - */ - ErrorCode convert(const uint8_t* source, int iw, int ih, int stride, void* dest, int ow, int oh, int outputBpp = 0, - int outputStride = 0, halide_type_t type = halide_type_of()); - - /** - * @brief create tensor with given data. - * @param w image width. - * @param h image height. - * @param bpp bytes per pixel. - * @param p pixel data pointer. - * @return created tensor. - */ - template - static Tensor* createImageTensor(int w, int h, int bpp, void* p = nullptr) { - return createImageTensor(halide_type_of(), w, h, bpp, p); - } - static Tensor* createImageTensor(halide_type_t type, int w, int h, int bpp, void* p = nullptr); - - /** - * @brief set padding value when wrap=ZERO. - * @param value padding value. - * @return void. - */ - void setPadding(uint8_t value) { - mPaddingValue = value; - } -private: - ImageProcess(const Config& config); - Matrix mTransform; - Matrix mTransformInvert; - Inside* mInside; - uint8_t mPaddingValue = 0; -}; -} // namespace CV -} // namespace MNN - -#endif /* ImageProcess_hpp */ diff --git a/MNN/Interpreter.hpp b/MNN/Interpreter.hpp deleted file mode 100644 index f1e7d04c..00000000 --- a/MNN/Interpreter.hpp +++ /dev/null @@ -1,350 +0,0 @@ -// -// Interpreter.hpp -// MNN -// -// Created by MNN on 2018/07/23. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#ifndef Interpreter_hpp -#define Interpreter_hpp - -#include -#include -#include -#include -#include -#include -#include - -namespace MNN { - -/** session schedule config */ -struct ScheduleConfig { - /** which tensor should be kept */ - std::vector saveTensors; - /** forward type */ - MNNForwardType type = MNN_FORWARD_CPU; - /** CPU:number of threads in parallel , Or GPU: mode setting*/ - union { - int numThread = 4; - int mode; - }; - - /** subpath to run */ - struct Path { - std::vector inputs; - std::vector outputs; - - enum Mode { - /** - * Op Mode - * - inputs means the source op, can NOT be empty. - * - outputs means the sink op, can be empty. - * The path will start from source op, then flow when encounter the sink op. - * The sink op will not be compute in this path. - */ - Op = 0, - - /** - * Tensor Mode - * - inputs means the inputs tensors, can NOT be empty. - * - outputs means the outputs tensors, can NOT be empty. - * It will find the pipeline that compute outputs from inputs. - */ - Tensor = 1 - }; - - /** running mode */ - Mode mode = Op; - }; - Path path; - - /** backup backend used to create execution when desinated backend do NOT support any op */ - MNNForwardType backupType = MNN_FORWARD_CPU; - - /** extra backend config */ - BackendConfig* backendConfig = nullptr; -}; - -class Session; -struct Content; -class Tensor; -class Backend; -class Runtime; - -class MNN_PUBLIC OperatorInfo { - struct Info; - -public: - /** Operator's name*/ - const std::string& name() const; - - /** Operator's type*/ - const std::string& type() const; - - /** Operator's flops, in M*/ - float flops() const; - -protected: - OperatorInfo(); - ~OperatorInfo(); - Info* mContent; -}; - -typedef std::function&, const std::string& /*opName*/)> TensorCallBack; -typedef std::function&, const OperatorInfo*)> TensorCallBackWithInfo; -typedef std::pair>, std::shared_ptr> RuntimeInfo; - -/** net data holder. multiple sessions could share same net. */ -class MNN_PUBLIC Interpreter { -public: - /** - * @brief create net from file. - * @param file given file. - * @return created net if success, NULL otherwise. - */ - static Interpreter* createFromFile(const char* file); - /** - * @brief create net from buffer. - * @param buffer given data buffer. - * @param size size of data buffer. - * @return created net if success, NULL otherwise. - */ - static Interpreter* createFromBuffer(const void* buffer, size_t size); - ~Interpreter(); - - enum SessionMode { - /** About CallBack, Default Session_Debug*/ - /** runSessionWithCallBack is allowed and can get internal op info*/ - Session_Debug = 0, - /** runSessionWithCallBack is not valid and can't get any info of op in session*/ - Session_Release = 1, - - /** About input tenosr, Default Session_Input_Inside*/ - /** The input tensor is alloced by session, input data after session resized*/ - Session_Input_Inside = 2, - /** The input tensor is alloced by user, set input data before session resize*/ - Session_Input_User = 3, - }; - /** - * @brief The API shoud be called before create session. - * @param mode session mode - */ - void setSessionMode(SessionMode mode); - - /** - * @brief The API shoud be called before create session. - * If the cache exist, try to load cache from file. - * After createSession, try to save cache to file. - * @param cacheFile cache file name - * @param keySize the first `keySize` bytes used as the key to check if the `cacheFile` exists. - */ - void setCacheFile(const char* cacheFile, size_t keySize = 128); - - /** - * @brief The API shoud be called after last resize session. - * If resize session generate new cache info, try to rewrite cache file. - * If resize session do not generate any new cache info, just do nothing. - * @param session giveb session - * @param flag Protected param, not used now - */ - ErrorCode updateCacheFile(Session *session, int flag = 0); - -public: - /** - * @brief create runtimeInfo seperately with schedule config. - * @param configs session schedule configs. - */ - static RuntimeInfo createRuntime(const std::vector& configs); - - /** - * @brief create session with schedule config. created session will be managed in net. - * @param config session schedule config. - * @return created session if success, NULL otherwise. - */ - Session* createSession(const ScheduleConfig& config); - - /** - * @brief create session with schedule config and user-specified runtime. - * @param config session schedule config, runtime runtimeInfo used by the created session. - * @return created session if success, NULL otherwise. - */ - Session* createSession(const ScheduleConfig& config, const RuntimeInfo& runtime); - - /** - * @brief create multi-path session with schedule configs. created session will be managed in net. - * @param configs session schedule configs. - * @return created session if success, NULL otherwise. - */ - Session* createMultiPathSession(const std::vector& configs); - - /** - * @brief create multi-path session with schedule configs and user-specified runtime. - created session will be managed in net. - * @param configs session schedule configs. - * @return created session if success, NULL otherwise. - */ - Session* createMultiPathSession(const std::vector& configs, const RuntimeInfo& runtime); - - /** - * @brief release session. - * @param session given session. - * @return true if given session is held by net and is freed. - */ - bool releaseSession(Session* session); - - /** - * @brief call this function to get tensors ready. output tensor buffer (host or deviceId) should be retrieved - * after resize of any input tensor. - * @param session given session. - */ - void resizeSession(Session* session); - - /** - * @brief call this function if don't need resize or create session any more, it will save a few memory that equal - * to the size of model buffer - */ - void releaseModel(); - - /** - * @brief Get the model buffer for user to save - * @return std::make_pair(modleBuffer, modelSize). - * @example: - * std::ofstream output("trainResult.alinn") - * auto buffer = net->getModelBuffer(); - * output.write((const char*)buffer.first, buffer.second); - */ - std::pair getModelBuffer() const; - - /** - * @brief update Session's Tensor to model's Const Op - * @param session given session. - * @return result of running. - */ - ErrorCode updateSessionToModel(Session* session); - - /** - * @brief run session. - * @param session given session. - * @return result of running. - */ - ErrorCode runSession(Session* session) const; - - /* - * @brief run session. - * @param session given session. - * @param before callback before each op. return true to run the op; return false to skip the op. - * @param after callback after each op. return true to continue running; return false to interrupt the session. - * @param sync synchronously wait for finish of execution or not. - * @return result of running. - */ - ErrorCode runSessionWithCallBack(const Session* session, const TensorCallBack& before, const TensorCallBack& end, - bool sync = false) const; - - /* - * @brief run session. - * @param session given session. - * @param before callback before each op. return true to run the op; return false to skip the op. - * @param after callback after each op. return true to continue running; return false to interrupt the session. - * @param sync synchronously wait for finish of execution or not. - * @return result of running. - */ - ErrorCode runSessionWithCallBackInfo(const Session* session, const TensorCallBackWithInfo& before, - const TensorCallBackWithInfo& end, bool sync = false) const; - - /** - * @brief get input tensor for given name. - * @param session given session. - * @param name given name. if NULL, return first input. - * @return tensor if found, NULL otherwise. - */ - Tensor* getSessionInput(const Session* session, const char* name); - /** - * @brief get output tensor for given name. - * @param session given session. - * @param name given name. if NULL, return first output. - * @return tensor if found, NULL otherwise. - */ - Tensor* getSessionOutput(const Session* session, const char* name); - - enum SessionInfoCode { - /** memory session used in MB, float* */ - MEMORY = 0, - - /** float operation needed in session in M, float* */ - FLOPS = 1, - - /** Backends in session in M, int*, length >= 1 + number of configs when create session */ - BACKENDS = 2, - - ALL - }; - - /** - * @brief get session info - * @param session given session. - * @param code given info code. - * @param ptr given info ptr, see SessionInfoCode for detail - * @return true if support the code, false otherwise. - */ - bool getSessionInfo(const Session* session, SessionInfoCode code, void* ptr); - - /** - * @brief get all output tensors. - * @param session given session. - * @return all output tensors mapped with name. - */ - const std::map& getSessionOutputAll(const Session* session) const; - /** - * @brief get all input tensors. - * @param session given session. - * @return all input tensors mapped with name. - */ - const std::map& getSessionInputAll(const Session* session) const; - -public: - /** - * @brief resize given tensor. - * @param tensor given tensor. - * @param dims new dims. at most 6 dims. - */ - void resizeTensor(Tensor* tensor, const std::vector& dims); - - /** - * @brief resize given tensor by nchw. - * @param batch / N. - * @param channel / C. - * @param height / H. - * @param width / W - */ - void resizeTensor(Tensor* tensor, int batch, int channel, int height, int width); - - /** - * @brief get backend used to create given tensor. - * @param session given session. - * @param tensor given tensor. - * @return backend used to create given tensor, may be NULL. - */ - const Backend* getBackend(const Session* session, const Tensor* tensor) const; - - /** - * @brief get business code (model identifier). - * @return business code. - */ - const char* bizCode() const; - -private: - static Interpreter* createFromBufferInternal(Content* net); - - Content* mNet = nullptr; - Interpreter(Content* net); - - Interpreter(const Interpreter&) = delete; - Interpreter(const Interpreter&&) = delete; - Interpreter& operator=(const Interpreter&) = delete; - Interpreter& operator=(const Interpreter&&) = delete; -}; -} // namespace MNN - -#endif /* Interpreter_hpp */ diff --git a/MNN/MNNDefine.h b/MNN/MNNDefine.h deleted file mode 100644 index e03ef4cb..00000000 --- a/MNN/MNNDefine.h +++ /dev/null @@ -1,64 +0,0 @@ -// -// MNNDefine.h -// MNN -// -// Created by MNN on 2018/08/09. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#ifndef MNNDefine_h -#define MNNDefine_h - -#include -#include - -#if defined(__APPLE__) -#include -#if TARGET_OS_IPHONE -#define MNN_BUILD_FOR_IOS -#endif -#endif - -#ifdef MNN_USE_LOGCAT -#include -#define MNN_ERROR(format, ...) __android_log_print(ANDROID_LOG_ERROR, "MNNJNI", format, ##__VA_ARGS__) -#define MNN_PRINT(format, ...) __android_log_print(ANDROID_LOG_INFO, "MNNJNI", format, ##__VA_ARGS__) -#else -#define MNN_PRINT(format, ...) printf(format, ##__VA_ARGS__) -#define MNN_ERROR(format, ...) printf(format, ##__VA_ARGS__) -#endif - -#ifdef DEBUG -#define MNN_ASSERT(x) \ - { \ - int res = (x); \ - if (!res) { \ - MNN_ERROR("Error for %s, %d\n", __FILE__, __LINE__); \ - assert(res); \ - } \ - } -#else -#define MNN_ASSERT(x) -#endif - -#define FUNC_PRINT(x) MNN_PRINT(#x "=%d in %s, %d \n", x, __func__, __LINE__); -#define FUNC_PRINT_ALL(x, type) MNN_PRINT(#x "=" #type " %" #type " in %s, %d \n", x, __func__, __LINE__); - -#define MNN_CHECK(success, log) \ -if(!(success)){ \ -MNN_ERROR("Check failed: %s ==> %s\n", #success, #log); \ -} - -#if defined(_MSC_VER) -#if defined(BUILDING_MNN_DLL) -#define MNN_PUBLIC __declspec(dllexport) -#elif defined(USING_MNN_DLL) -#define MNN_PUBLIC __declspec(dllimport) -#else -#define MNN_PUBLIC -#endif -#else -#define MNN_PUBLIC __attribute__((visibility("default"))) -#endif - -#endif /* MNNDefine_h */ diff --git a/MNN/MNNForwardType.h b/MNN/MNNForwardType.h deleted file mode 100644 index 838b1875..00000000 --- a/MNN/MNNForwardType.h +++ /dev/null @@ -1,91 +0,0 @@ -// -// MNNForwardType.h -// MNN -// -// Created by MNN on 2019/01/19. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#ifndef MNNForwardType_h -#define MNNForwardType_h -#include -#include - -typedef enum { - MNN_FORWARD_CPU = 0, - - /* - Firtly find the first available backends not equal to CPU - If no other backends, use cpu - */ - MNN_FORWARD_AUTO = 4, - - /*Hand write metal*/ - MNN_FORWARD_METAL = 1, - - /*NVIDIA GPU API*/ - MNN_FORWARD_CUDA = 2, - - /*Android / Common Device GPU API*/ - MNN_FORWARD_OPENCL = 3, - MNN_FORWARD_OPENGL = 6, - MNN_FORWARD_VULKAN = 7, - - /*Android 8.1's NNAPI, Not Support yet. CoreML Now*/ - MNN_FORWARD_NN = 5, - - /*User can use API from Backend.hpp to add or search Backend*/ - MNN_FORWARD_USER_0 = 8, - MNN_FORWARD_USER_1 = 9, - MNN_FORWARD_USER_2 = 10, - MNN_FORWARD_USER_3 = 11, - - MNN_FORWARD_ALL, - - /* Apply arm extension instruction set to accelerate some Ops, this forward type - is only used in MNN internal, and will be active automatically when user set forward type - to be MNN_FORWARD_CPU and extension instruction set is valid on hardware. - */ - MNN_FORWARD_CPU_EXTENSION - -} MNNForwardType; - -typedef enum { - // choose one tuning mode Only - MNN_GPU_TUNING_NONE = 1 << 0,/* Forbidden tuning, performance not good */ - MNN_GPU_TUNING_HEAVY = 1 << 1,/* heavily tuning, usually not suggested */ - MNN_GPU_TUNING_WIDE = 1 << 2,/* widely tuning, performance good. Default */ - MNN_GPU_TUNING_NORMAL = 1 << 3,/* normal tuning, performance may be ok */ - MNN_GPU_TUNING_FAST = 1 << 4,/* fast tuning, performance may not good */ - - // choose one opencl memory mode Only - /* User can try OpenCL_MEMORY_BUFFER and OpenCL_MEMORY_IMAGE both, - then choose the better one according to performance*/ - MNN_GPU_MEMORY_BUFFER = 1 << 6,/* User assign mode */ - MNN_GPU_MEMORY_IMAGE = 1 << 7,/* User assign mode */ -} MNNGpuMode; - -#ifdef __cplusplus -namespace MNN { -struct BackendConfig { - enum MemoryMode { Memory_Normal = 0, Memory_High, Memory_Low }; - - MemoryMode memory = Memory_Normal; - - enum PowerMode { Power_Normal = 0, Power_High, Power_Low }; - - PowerMode power = Power_Normal; - - enum PrecisionMode { Precision_Normal = 0, Precision_High, Precision_Low }; - - PrecisionMode precision = Precision_Normal; - - /** user defined context */ - union { - void* sharedContext = nullptr; - size_t flags; // Valid for CPU Backend - }; -}; -}; // namespace MNN -#endif -#endif /* MNNForwardType_h */ diff --git a/MNN/MNNSharedContext.h b/MNN/MNNSharedContext.h deleted file mode 100644 index 100a4ca8..00000000 --- a/MNN/MNNSharedContext.h +++ /dev/null @@ -1,35 +0,0 @@ -// -// MNNSharedContext.h -// MNN -// -// Created by MNN on 2018/10/11. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#ifndef MNNSharedContext_h -#define MNNSharedContext_h -#ifdef __cplusplus -extern "C" { -#endif - -#include /*uint32_t*/ - -#ifndef VK_DEFINE_HANDLE -#define VK_DEFINE_HANDLE(object) typedef struct object##_T* object; -VK_DEFINE_HANDLE(VkInstance) -VK_DEFINE_HANDLE(VkPhysicalDevice) -VK_DEFINE_HANDLE(VkDevice) -VK_DEFINE_HANDLE(VkQueue) -#endif -struct MNNVulkanContext { - VkInstance pInstance; - VkPhysicalDevice pPhysicalDevice; - VkDevice pDevice; - VkQueue pQueue; - uint32_t iQueueFamilyIndex; -}; -#ifdef __cplusplus -} -#endif - -#endif /* MNNSharedContext_h */ diff --git a/MNN/Matrix.h b/MNN/Matrix.h deleted file mode 100644 index 0d59f663..00000000 --- a/MNN/Matrix.h +++ /dev/null @@ -1,1615 +0,0 @@ -/* - * Copyright 2006 The Android Open Source Project - * - * Use of this source code is governed by a BSD-style license that can be - * found in the LICENSE file. - */ - -/* Generated by tools/bookmaker from include/core/Matrix.h and docs/SkMatrix_Reference.bmh - on 2018-07-13 08:15:11. Additional documentation and examples can be found at: - https://skia.org/user/api/SkMatrix_Reference - - You may edit either file directly. Structural changes to public interfaces require - editing both files. After editing docs/SkMatrix_Reference.bmh, run: - bookmaker -b docs -i include/core/Matrix.h -p - to create an updated version of this file. - */ - - -// -// Modified by jiangxiaotang on 2018/09/19. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#ifndef SkMatrix_DEFINED -#define SkMatrix_DEFINED - -#include -#include -#include - -namespace MNN { -namespace CV { - -/** \class Matrix - Matrix holds a 3x3 matrix for transforming coordinates. This allows mapping - Point and vectors with translation, scaling, skewing, rotation, and - perspective. - - Matrix elements are in row major order. Matrix does not have a constructor, - so it must be explicitly initialized. setIdentity() initializes Matrix - so it has no effect. setTranslate(), setScale(), setSkew(), setRotate(), set9 and setAll() - initializes all Matrix elements with the corresponding mapping. - - Matrix includes a hidden variable that classifies the type of matrix to - improve performance. Matrix is not thread safe unless getType() is called first. -*/ - -class MNN_PUBLIC Matrix { -public: - Matrix() { - setIdentity(); - } - - /** Sets Matrix to scale by (sx, sy). Returned matrix is: - - | sx 0 0 | - | 0 sy 0 | - | 0 0 1 | - - @param sx horizontal scale factor - @param sy vertical scale factor - @return Matrix with scale - */ - static Matrix MakeScale(float sx, float sy) { - Matrix m; - m.setScale(sx, sy); - return m; - } - - /** Sets Matrix to scale by (scale, scale). Returned matrix is: - - | scale 0 0 | - | 0 scale 0 | - | 0 0 1 | - - @param scale horizontal and vertical scale factor - @return Matrix with scale - */ - static Matrix MakeScale(float scale) { - Matrix m; - m.setScale(scale, scale); - return m; - } - - /** Sets Matrix to translate by (dx, dy). Returned matrix is: - - | 1 0 dx | - | 0 1 dy | - | 0 0 1 | - - @param dx horizontal translation - @param dy vertical translation - @return Matrix with translation - */ - static Matrix MakeTrans(float dx, float dy) { - Matrix m; - m.setTranslate(dx, dy); - return m; - } - - /** Sets Matrix to: - - | scaleX skewX transX | - | skewY scaleY transY | - | pers0 pers1 pers2 | - - @param scaleX horizontal scale factor - @param skewX horizontal skew factor - @param transX horizontal translation - @param skewY vertical skew factor - @param scaleY vertical scale factor - @param transY vertical translation - @param pers0 input x-axis perspective factor - @param pers1 input y-axis perspective factor - @param pers2 perspective scale factor - @return Matrix constructed from parameters - */ - static Matrix MakeAll(float scaleX, float skewX, float transX, float skewY, float scaleY, float transY, float pers0, - float pers1, float pers2) { - Matrix m; - m.setAll(scaleX, skewX, transX, skewY, scaleY, transY, pers0, pers1, pers2); - return m; - } - - /** \enum Matrix::TypeMask - Enum of bit fields for mask returned by getType(). - Used to identify the complexity of Matrix, to optimize performance. - */ - enum TypeMask { - kIdentity_Mask = 0, //!< identity Matrix; all bits clear - kTranslate_Mask = 0x01, //!< translation Matrix - kScale_Mask = 0x02, //!< scale Matrix - kAffine_Mask = 0x04, //!< skew or rotate Matrix - kPerspective_Mask = 0x08, //!< perspective Matrix - }; - - /** Returns a bit field describing the transformations the matrix may - perform. The bit field is computed conservatively, so it may include - false positives. For example, when kPerspective_Mask is set, all - other bits are set. - - @return kIdentity_Mask, or combinations of: kTranslate_Mask, kScale_Mask, - kAffine_Mask, kPerspective_Mask - */ - TypeMask getType() const { - if (fTypeMask & kUnknown_Mask) { - fTypeMask = this->computeTypeMask(); - } - // only return the public masks - return (TypeMask)(fTypeMask & 0xF); - } - - /** Returns true if Matrix is identity. Identity matrix is: - - | 1 0 0 | - | 0 1 0 | - | 0 0 1 | - - @return true if Matrix has no effect - */ - bool isIdentity() const { - return this->getType() == 0; - } - - /** Returns true if Matrix at most scales and translates. Matrix may be identity, - contain only scale elements, only translate elements, or both. Matrix form is: - - | scale-x 0 translate-x | - | 0 scale-y translate-y | - | 0 0 1 | - - @return true if Matrix is identity; or scales, translates, or both - */ - bool isScaleTranslate() const { - return !(this->getType() & ~(kScale_Mask | kTranslate_Mask)); - } - - /** Returns true if Matrix is identity, or translates. Matrix form is: - - | 1 0 translate-x | - | 0 1 translate-y | - | 0 0 1 | - - @return true if Matrix is identity, or translates - */ - bool isTranslate() const { - return !(this->getType() & ~(kTranslate_Mask)); - } - - /** Returns true Matrix maps Rect to another Rect. If true, Matrix is identity, - or scales, or rotates a multiple of 90 degrees, or mirrors on axes. In all - cases, Matrix may also have translation. Matrix form is either: - - | scale-x 0 translate-x | - | 0 scale-y translate-y | - | 0 0 1 | - - or - - | 0 rotate-x translate-x | - | rotate-y 0 translate-y | - | 0 0 1 | - - for non-zero values of scale-x, scale-y, rotate-x, and rotate-y. - - Also called preservesAxisAlignment(); use the one that provides better inline - documentation. - - @return true if Matrix maps one Rect into another - */ - bool rectStaysRect() const { - if (fTypeMask & kUnknown_Mask) { - fTypeMask = this->computeTypeMask(); - } - return (fTypeMask & kRectStaysRect_Mask) != 0; - } - - /** Returns true Matrix maps Rect to another Rect. If true, Matrix is identity, - or scales, or rotates a multiple of 90 degrees, or mirrors on axes. In all - cases, Matrix may also have translation. Matrix form is either: - - | scale-x 0 translate-x | - | 0 scale-y translate-y | - | 0 0 1 | - - or - - | 0 rotate-x translate-x | - | rotate-y 0 translate-y | - | 0 0 1 | - - for non-zero values of scale-x, scale-y, rotate-x, and rotate-y. - - Also called rectStaysRect(); use the one that provides better inline - documentation. - - @return true if Matrix maps one Rect into another - */ - bool preservesAxisAlignment() const { - return this->rectStaysRect(); - } - - /** Matrix organizes its values in row order. These members correspond to - each value in Matrix. - */ - static constexpr int kMScaleX = 0; //!< horizontal scale factor - static constexpr int kMSkewX = 1; //!< horizontal skew factor - static constexpr int kMTransX = 2; //!< horizontal translation - static constexpr int kMSkewY = 3; //!< vertical skew factor - static constexpr int kMScaleY = 4; //!< vertical scale factor - static constexpr int kMTransY = 5; //!< vertical translation - static constexpr int kMPersp0 = 6; //!< input x perspective factor - static constexpr int kMPersp1 = 7; //!< input y perspective factor - static constexpr int kMPersp2 = 8; //!< perspective bias - - /** Affine arrays are in column major order to match the matrix used by - PDF and XPS. - */ - static constexpr int kAScaleX = 0; //!< horizontal scale factor - static constexpr int kASkewY = 1; //!< vertical skew factor - static constexpr int kASkewX = 2; //!< horizontal skew factor - static constexpr int kAScaleY = 3; //!< vertical scale factor - static constexpr int kATransX = 4; //!< horizontal translation - static constexpr int kATransY = 5; //!< vertical translation - - /** Returns one matrix value. Asserts if index is out of range and SK_DEBUG is - defined. - - @param index one of: kMScaleX, kMSkewX, kMTransX, kMSkewY, kMScaleY, kMTransY, - kMPersp0, kMPersp1, kMPersp2 - @return value corresponding to index - */ - float operator[](int index) const { - MNN_ASSERT((unsigned)index < 9); - return fMat[index]; - } - - /** Returns one matrix value. Asserts if index is out of range and SK_DEBUG is - defined. - - @param index one of: kMScaleX, kMSkewX, kMTransX, kMSkewY, kMScaleY, kMTransY, - kMPersp0, kMPersp1, kMPersp2 - @return value corresponding to index - */ - float get(int index) const { - MNN_ASSERT((unsigned)index < 9); - return fMat[index]; - } - - /** Returns scale factor multiplied by x-axis input, contributing to x-axis output. - With mapPoints(), scales Point along the x-axis. - - @return horizontal scale factor - */ - float getScaleX() const { - return fMat[kMScaleX]; - } - - /** Returns scale factor multiplied by y-axis input, contributing to y-axis output. - With mapPoints(), scales Point along the y-axis. - - @return vertical scale factor - */ - float getScaleY() const { - return fMat[kMScaleY]; - } - - /** Returns scale factor multiplied by x-axis input, contributing to y-axis output. - With mapPoints(), skews Point along the y-axis. - Skewing both axes can rotate Point. - - @return vertical skew factor - */ - float getSkewY() const { - return fMat[kMSkewY]; - } - - /** Returns scale factor multiplied by y-axis input, contributing to x-axis output. - With mapPoints(), skews Point along the x-axis. - Skewing both axes can rotate Point. - - @return horizontal scale factor - */ - float getSkewX() const { - return fMat[kMSkewX]; - } - - /** Returns translation contributing to x-axis output. - With mapPoints(), moves Point along the x-axis. - - @return horizontal translation factor - */ - float getTranslateX() const { - return fMat[kMTransX]; - } - - /** Returns translation contributing to y-axis output. - With mapPoints(), moves Point along the y-axis. - - @return vertical translation factor - */ - float getTranslateY() const { - return fMat[kMTransY]; - } - - /** Returns factor scaling input x-axis relative to input y-axis. - - @return input x-axis perspective factor - */ - float getPerspX() const { - return fMat[kMPersp0]; - } - - /** Returns factor scaling input y-axis relative to input x-axis. - - @return input y-axis perspective factor - */ - float getPerspY() const { - return fMat[kMPersp1]; - } - - /** Returns writable Matrix value. Asserts if index is out of range and SK_DEBUG is - defined. Clears internal cache anticipating that caller will change Matrix value. - - Next call to read Matrix state may recompute cache; subsequent writes to Matrix - value must be followed by dirtyMatrixTypeCache(). - - @param index one of: kMScaleX, kMSkewX, kMTransX, kMSkewY, kMScaleY, kMTransY, - kMPersp0, kMPersp1, kMPersp2 - @return writable value corresponding to index - */ - float& operator[](int index) { - MNN_ASSERT((unsigned)index < 9); - this->setTypeMask(kUnknown_Mask); - return fMat[index]; - } - - /** Sets Matrix value. Asserts if index is out of range and SK_DEBUG is - defined. Safer than operator[]; internal cache is always maintained. - - @param index one of: kMScaleX, kMSkewX, kMTransX, kMSkewY, kMScaleY, kMTransY, - kMPersp0, kMPersp1, kMPersp2 - @param value scalar to store in Matrix - */ - void set(int index, float value) { - MNN_ASSERT((unsigned)index < 9); - fMat[index] = value; - this->setTypeMask(kUnknown_Mask); - } - - /** Sets horizontal scale factor. - - @param v horizontal scale factor to store - */ - void setScaleX(float v) { - this->set(kMScaleX, v); - } - - /** Sets vertical scale factor. - - @param v vertical scale factor to store - */ - void setScaleY(float v) { - this->set(kMScaleY, v); - } - - /** Sets vertical skew factor. - - @param v vertical skew factor to store - */ - void setSkewY(float v) { - this->set(kMSkewY, v); - } - - /** Sets horizontal skew factor. - - @param v horizontal skew factor to store - */ - void setSkewX(float v) { - this->set(kMSkewX, v); - } - - /** Sets horizontal translation. - - @param v horizontal translation to store - */ - void setTranslateX(float v) { - this->set(kMTransX, v); - } - - /** Sets vertical translation. - - @param v vertical translation to store - */ - void setTranslateY(float v) { - this->set(kMTransY, v); - } - - /** Sets input x-axis perspective factor, which causes mapXY() to vary input x-axis values - inversely proportional to input y-axis values. - - @param v perspective factor - */ - void setPerspX(float v) { - this->set(kMPersp0, v); - } - - /** Sets input y-axis perspective factor, which causes mapXY() to vary input y-axis values - inversely proportional to input x-axis values. - - @param v perspective factor - */ - void setPerspY(float v) { - this->set(kMPersp1, v); - } - - /** Sets all values from parameters. Sets matrix to: - - | scaleX skewX transX | - | skewY scaleY transY | - | persp0 persp1 persp2 | - - @param scaleX horizontal scale factor to store - @param skewX horizontal skew factor to store - @param transX horizontal translation to store - @param skewY vertical skew factor to store - @param scaleY vertical scale factor to store - @param transY vertical translation to store - @param persp0 input x-axis values perspective factor to store - @param persp1 input y-axis values perspective factor to store - @param persp2 perspective scale factor to store - */ - void setAll(float scaleX, float skewX, float transX, float skewY, float scaleY, float transY, float persp0, - float persp1, float persp2) { - fMat[kMScaleX] = scaleX; - fMat[kMSkewX] = skewX; - fMat[kMTransX] = transX; - fMat[kMSkewY] = skewY; - fMat[kMScaleY] = scaleY; - fMat[kMTransY] = transY; - fMat[kMPersp0] = persp0; - fMat[kMPersp1] = persp1; - fMat[kMPersp2] = persp2; - this->setTypeMask(kUnknown_Mask); - } - - /** Copies nine scalar values contained by Matrix into buffer, in member value - ascending order: kMScaleX, kMSkewX, kMTransX, kMSkewY, kMScaleY, kMTransY, - kMPersp0, kMPersp1, kMPersp2. - - @param buffer storage for nine scalar values - */ - void get9(float buffer[9]) const { - memcpy(buffer, fMat, 9 * sizeof(float)); - } - - /** Sets Matrix to nine scalar values in buffer, in member value ascending order: - kMScaleX, kMSkewX, kMTransX, kMSkewY, kMScaleY, kMTransY, kMPersp0, kMPersp1, - kMPersp2. - - Sets matrix to: - - | buffer[0] buffer[1] buffer[2] | - | buffer[3] buffer[4] buffer[5] | - | buffer[6] buffer[7] buffer[8] | - - In the future, set9 followed by get9 may not return the same values. Since Matrix - maps non-homogeneous coordinates, scaling all nine values produces an equivalent - transformation, possibly improving precision. - - @param buffer nine scalar values - */ - void set9(const float buffer[9]); - - /** Sets Matrix to identity; which has no effect on mapped Point. Sets Matrix to: - - | 1 0 0 | - | 0 1 0 | - | 0 0 1 | - - Also called setIdentity(); use the one that provides better inline - documentation. - */ - void reset(); - - /** Sets Matrix to identity; which has no effect on mapped Point. Sets Matrix to: - - | 1 0 0 | - | 0 1 0 | - | 0 0 1 | - - Also called reset(); use the one that provides better inline - documentation. - */ - void setIdentity() { - this->reset(); - } - - /** Sets Matrix to translate by (dx, dy). - - @param dx horizontal translation - @param dy vertical translation - */ - void setTranslate(float dx, float dy); - - /** Sets Matrix to scale by sx and sy, about a pivot point at (px, py). - The pivot point is unchanged when mapped with Matrix. - - @param sx horizontal scale factor - @param sy vertical scale factor - @param px pivot x - @param py pivot y - */ - void setScale(float sx, float sy, float px, float py); - - /** Sets Matrix to scale by sx and sy about at pivot point at (0, 0). - - @param sx horizontal scale factor - @param sy vertical scale factor - */ - void setScale(float sx, float sy); - - /** Sets Matrix to rotate by degrees about a pivot point at (px, py). - The pivot point is unchanged when mapped with Matrix. - - Positive degrees rotates clockwise. - - @param degrees angle of axes relative to upright axes - @param px pivot x - @param py pivot y - */ - void setRotate(float degrees, float px, float py); - - /** Sets Matrix to rotate by degrees about a pivot point at (0, 0). - Positive degrees rotates clockwise. - - @param degrees angle of axes relative to upright axes - */ - void setRotate(float degrees); - - /** Sets Matrix to rotate by sinValue and cosValue, about a pivot point at (px, py). - The pivot point is unchanged when mapped with Matrix. - - Vector (sinValue, cosValue) describes the angle of rotation relative to (0, 1). - Vector length specifies scale. - - @param sinValue rotation vector x-axis component - @param cosValue rotation vector y-axis component - @param px pivot x-axis - @param py pivot y-axis - */ - void setSinCos(float sinValue, float cosValue, float px, float py); - - /** Sets Matrix to rotate by sinValue and cosValue, about a pivot point at (0, 0). - - Vector (sinValue, cosValue) describes the angle of rotation relative to (0, 1). - Vector length specifies scale. - - @param sinValue rotation vector x-axis component - @param cosValue rotation vector y-axis component - */ - void setSinCos(float sinValue, float cosValue); - - /** Sets Matrix to skew by kx and ky, about a pivot point at (px, py). - The pivot point is unchanged when mapped with Matrix. - - @param kx horizontal skew factor - @param ky vertical skew factor - @param px pivot x - @param py pivot y - */ - void setSkew(float kx, float ky, float px, float py); - - /** Sets Matrix to skew by kx and ky, about a pivot point at (0, 0). - - @param kx horizontal skew factor - @param ky vertical skew factor - */ - void setSkew(float kx, float ky); - - /** Sets Matrix to Matrix a multiplied by Matrix b. Either a or b may be this. - - Given: - - | A B C | | J K L | - a = | D E F |, b = | M N O | - | G H I | | P Q R | - - sets Matrix to: - - | A B C | | J K L | | AJ+BM+CP AK+BN+CQ AL+BO+CR | - a * b = | D E F | * | M N O | = | DJ+EM+FP DK+EN+FQ DL+EO+FR | - | G H I | | P Q R | | GJ+HM+IP GK+HN+IQ GL+HO+IR | - - @param a Matrix on left side of multiply expression - @param b Matrix on right side of multiply expression - */ - void setConcat(const Matrix& a, const Matrix& b); - - /** Sets Matrix to Matrix multiplied by Matrix constructed from translation (dx, dy). - This can be thought of as moving the point to be mapped before applying Matrix. - - Given: - - | A B C | | 1 0 dx | - Matrix = | D E F |, T(dx, dy) = | 0 1 dy | - | G H I | | 0 0 1 | - - sets Matrix to: - - | A B C | | 1 0 dx | | A B A*dx+B*dy+C | - Matrix * T(dx, dy) = | D E F | | 0 1 dy | = | D E D*dx+E*dy+F | - | G H I | | 0 0 1 | | G H G*dx+H*dy+I | - - @param dx x-axis translation before applying Matrix - @param dy y-axis translation before applying Matrix - */ - void preTranslate(float dx, float dy); - - /** Sets Matrix to Matrix multiplied by Matrix constructed from scaling by (sx, sy) - about pivot point (px, py). - This can be thought of as scaling about a pivot point before applying Matrix. - - Given: - - | A B C | | sx 0 dx | - Matrix = | D E F |, S(sx, sy, px, py) = | 0 sy dy | - | G H I | | 0 0 1 | - - where - - dx = px - sx * px - dy = py - sy * py - - sets Matrix to: - - | A B C | | sx 0 dx | | A*sx B*sy A*dx+B*dy+C | - Matrix * S(sx, sy, px, py) = | D E F | | 0 sy dy | = | D*sx E*sy D*dx+E*dy+F | - | G H I | | 0 0 1 | | G*sx H*sy G*dx+H*dy+I | - - @param sx horizontal scale factor - @param sy vertical scale factor - @param px pivot x - @param py pivot y - */ - void preScale(float sx, float sy, float px, float py); - - /** Sets Matrix to Matrix multiplied by Matrix constructed from scaling by (sx, sy) - about pivot point (0, 0). - This can be thought of as scaling about the origin before applying Matrix. - - Given: - - | A B C | | sx 0 0 | - Matrix = | D E F |, S(sx, sy) = | 0 sy 0 | - | G H I | | 0 0 1 | - - sets Matrix to: - - | A B C | | sx 0 0 | | A*sx B*sy C | - Matrix * S(sx, sy) = | D E F | | 0 sy 0 | = | D*sx E*sy F | - | G H I | | 0 0 1 | | G*sx H*sy I | - - @param sx horizontal scale factor - @param sy vertical scale factor - */ - void preScale(float sx, float sy); - - /** Sets Matrix to Matrix multiplied by Matrix constructed from rotating by degrees - about pivot point (px, py). - This can be thought of as rotating about a pivot point before applying Matrix. - - Positive degrees rotates clockwise. - - Given: - - | A B C | | c -s dx | - Matrix = | D E F |, R(degrees, px, py) = | s c dy | - | G H I | | 0 0 1 | - - where - - c = cos(degrees) - s = sin(degrees) - dx = s * py + (1 - c) * px - dy = -s * px + (1 - c) * py - - sets Matrix to: - - | A B C | | c -s dx | | Ac+Bs -As+Bc A*dx+B*dy+C | - Matrix * R(degrees, px, py) = | D E F | | s c dy | = | Dc+Es -Ds+Ec D*dx+E*dy+F | - | G H I | | 0 0 1 | | Gc+Hs -Gs+Hc G*dx+H*dy+I | - - @param degrees angle of axes relative to upright axes - @param px pivot x - @param py pivot y - */ - void preRotate(float degrees, float px, float py); - - /** Sets Matrix to Matrix multiplied by Matrix constructed from rotating by degrees - about pivot point (0, 0). - This can be thought of as rotating about the origin before applying Matrix. - - Positive degrees rotates clockwise. - - Given: - - | A B C | | c -s 0 | - Matrix = | D E F |, R(degrees, px, py) = | s c 0 | - | G H I | | 0 0 1 | - - where - - c = cos(degrees) - s = sin(degrees) - - sets Matrix to: - - | A B C | | c -s 0 | | Ac+Bs -As+Bc C | - Matrix * R(degrees, px, py) = | D E F | | s c 0 | = | Dc+Es -Ds+Ec F | - | G H I | | 0 0 1 | | Gc+Hs -Gs+Hc I | - - @param degrees angle of axes relative to upright axes - */ - void preRotate(float degrees); - - /** Sets Matrix to Matrix multiplied by Matrix constructed from skewing by (kx, ky) - about pivot point (px, py). - This can be thought of as skewing about a pivot point before applying Matrix. - - Given: - - | A B C | | 1 kx dx | - Matrix = | D E F |, K(kx, ky, px, py) = | ky 1 dy | - | G H I | | 0 0 1 | - - where - - dx = -kx * py - dy = -ky * px - - sets Matrix to: - - | A B C | | 1 kx dx | | A+B*ky A*kx+B A*dx+B*dy+C | - Matrix * K(kx, ky, px, py) = | D E F | | ky 1 dy | = | D+E*ky D*kx+E D*dx+E*dy+F | - | G H I | | 0 0 1 | | G+H*ky G*kx+H G*dx+H*dy+I | - - @param kx horizontal skew factor - @param ky vertical skew factor - @param px pivot x - @param py pivot y - */ - void preSkew(float kx, float ky, float px, float py); - - /** Sets Matrix to Matrix multiplied by Matrix constructed from skewing by (kx, ky) - about pivot point (0, 0). - This can be thought of as skewing about the origin before applying Matrix. - - Given: - - | A B C | | 1 kx 0 | - Matrix = | D E F |, K(kx, ky) = | ky 1 0 | - | G H I | | 0 0 1 | - - sets Matrix to: - - | A B C | | 1 kx 0 | | A+B*ky A*kx+B C | - Matrix * K(kx, ky) = | D E F | | ky 1 0 | = | D+E*ky D*kx+E F | - | G H I | | 0 0 1 | | G+H*ky G*kx+H I | - - @param kx horizontal skew factor - @param ky vertical skew factor - */ - void preSkew(float kx, float ky); - - /** Sets Matrix to Matrix multiplied by Matrix other. - This can be thought of mapping by other before applying Matrix. - - Given: - - | A B C | | J K L | - Matrix = | D E F |, other = | M N O | - | G H I | | P Q R | - - sets Matrix to: - - | A B C | | J K L | | AJ+BM+CP AK+BN+CQ AL+BO+CR | - Matrix * other = | D E F | * | M N O | = | DJ+EM+FP DK+EN+FQ DL+EO+FR | - | G H I | | P Q R | | GJ+HM+IP GK+HN+IQ GL+HO+IR | - - @param other Matrix on right side of multiply expression - */ - void preConcat(const Matrix& other); - - /** Sets Matrix to Matrix constructed from translation (dx, dy) multiplied by Matrix. - This can be thought of as moving the point to be mapped after applying Matrix. - - Given: - - | J K L | | 1 0 dx | - Matrix = | M N O |, T(dx, dy) = | 0 1 dy | - | P Q R | | 0 0 1 | - - sets Matrix to: - - | 1 0 dx | | J K L | | J+dx*P K+dx*Q L+dx*R | - T(dx, dy) * Matrix = | 0 1 dy | | M N O | = | M+dy*P N+dy*Q O+dy*R | - | 0 0 1 | | P Q R | | P Q R | - - @param dx x-axis translation after applying Matrix - @param dy y-axis translation after applying Matrix - */ - void postTranslate(float dx, float dy); - - /** Sets Matrix to Matrix constructed from scaling by (sx, sy) about pivot point - (px, py), multiplied by Matrix. - This can be thought of as scaling about a pivot point after applying Matrix. - - Given: - - | J K L | | sx 0 dx | - Matrix = | M N O |, S(sx, sy, px, py) = | 0 sy dy | - | P Q R | | 0 0 1 | - - where - - dx = px - sx * px - dy = py - sy * py - - sets Matrix to: - - | sx 0 dx | | J K L | | sx*J+dx*P sx*K+dx*Q sx*L+dx+R | - S(sx, sy, px, py) * Matrix = | 0 sy dy | | M N O | = | sy*M+dy*P sy*N+dy*Q sy*O+dy*R | - | 0 0 1 | | P Q R | | P Q R | - - @param sx horizontal scale factor - @param sy vertical scale factor - @param px pivot x - @param py pivot y - */ - void postScale(float sx, float sy, float px, float py); - - /** Sets Matrix to Matrix constructed from scaling by (sx, sy) about pivot point - (0, 0), multiplied by Matrix. - This can be thought of as scaling about the origin after applying Matrix. - - Given: - - | J K L | | sx 0 0 | - Matrix = | M N O |, S(sx, sy) = | 0 sy 0 | - | P Q R | | 0 0 1 | - - sets Matrix to: - - | sx 0 0 | | J K L | | sx*J sx*K sx*L | - S(sx, sy) * Matrix = | 0 sy 0 | | M N O | = | sy*M sy*N sy*O | - | 0 0 1 | | P Q R | | P Q R | - - @param sx horizontal scale factor - @param sy vertical scale factor - */ - void postScale(float sx, float sy); - - /** Sets Matrix to Matrix constructed from scaling by (1/divx, 1/divy) about pivot point (px, py), multiplied by - Matrix. - - Returns false if either divx or divy is zero. - - Given: - - | J K L | | sx 0 0 | - Matrix = | M N O |, I(divx, divy) = | 0 sy 0 | - | P Q R | | 0 0 1 | - - where - - sx = 1 / divx - sy = 1 / divy - - sets Matrix to: - - | sx 0 0 | | J K L | | sx*J sx*K sx*L | - I(divx, divy) * Matrix = | 0 sy 0 | | M N O | = | sy*M sy*N sy*O | - | 0 0 1 | | P Q R | | P Q R | - - @param divx integer divisor for inverse scale in x - @param divy integer divisor for inverse scale in y - @return true on successful scale - */ - bool postIDiv(int divx, int divy); - - /** Sets Matrix to Matrix constructed from rotating by degrees about pivot point - (px, py), multiplied by Matrix. - This can be thought of as rotating about a pivot point after applying Matrix. - - Positive degrees rotates clockwise. - - Given: - - | J K L | | c -s dx | - Matrix = | M N O |, R(degrees, px, py) = | s c dy | - | P Q R | | 0 0 1 | - - where - - c = cos(degrees) - s = sin(degrees) - dx = s * py + (1 - c) * px - dy = -s * px + (1 - c) * py - - sets Matrix to: - - |c -s dx| |J K L| |cJ-sM+dx*P cK-sN+dx*Q cL-sO+dx+R| - R(degrees, px, py) * Matrix = |s c dy| |M N O| = |sJ+cM+dy*P sK+cN+dy*Q sL+cO+dy*R| - |0 0 1| |P Q R| | P Q R| - - @param degrees angle of axes relative to upright axes - @param px pivot x - @param py pivot y - */ - void postRotate(float degrees, float px, float py); - - /** Sets Matrix to Matrix constructed from rotating by degrees about pivot point - (0, 0), multiplied by Matrix. - This can be thought of as rotating about the origin after applying Matrix. - - Positive degrees rotates clockwise. - - Given: - - | J K L | | c -s 0 | - Matrix = | M N O |, R(degrees, px, py) = | s c 0 | - | P Q R | | 0 0 1 | - - where - - c = cos(degrees) - s = sin(degrees) - - sets Matrix to: - - | c -s dx | | J K L | | cJ-sM cK-sN cL-sO | - R(degrees, px, py) * Matrix = | s c dy | | M N O | = | sJ+cM sK+cN sL+cO | - | 0 0 1 | | P Q R | | P Q R | - - @param degrees angle of axes relative to upright axes - */ - void postRotate(float degrees); - - /** Sets Matrix to Matrix constructed from skewing by (kx, ky) about pivot point - (px, py), multiplied by Matrix. - This can be thought of as skewing about a pivot point after applying Matrix. - - Given: - - | J K L | | 1 kx dx | - Matrix = | M N O |, K(kx, ky, px, py) = | ky 1 dy | - | P Q R | | 0 0 1 | - - where - - dx = -kx * py - dy = -ky * px - - sets Matrix to: - - | 1 kx dx| |J K L| |J+kx*M+dx*P K+kx*N+dx*Q L+kx*O+dx+R| - K(kx, ky, px, py) * Matrix = |ky 1 dy| |M N O| = |ky*J+M+dy*P ky*K+N+dy*Q ky*L+O+dy*R| - | 0 0 1| |P Q R| | P Q R| - - @param kx horizontal skew factor - @param ky vertical skew factor - @param px pivot x - @param py pivot y - */ - void postSkew(float kx, float ky, float px, float py); - - /** Sets Matrix to Matrix constructed from skewing by (kx, ky) about pivot point - (0, 0), multiplied by Matrix. - This can be thought of as skewing about the origin after applying Matrix. - - Given: - - | J K L | | 1 kx 0 | - Matrix = | M N O |, K(kx, ky) = | ky 1 0 | - | P Q R | | 0 0 1 | - - sets Matrix to: - - | 1 kx 0 | | J K L | | J+kx*M K+kx*N L+kx*O | - K(kx, ky) * Matrix = | ky 1 0 | | M N O | = | ky*J+M ky*K+N ky*L+O | - | 0 0 1 | | P Q R | | P Q R | - - @param kx horizontal skew factor - @param ky vertical skew factor - */ - void postSkew(float kx, float ky); - - /** Sets Matrix to Matrix other multiplied by Matrix. - This can be thought of mapping by other after applying Matrix. - - Given: - - | J K L | | A B C | - Matrix = | M N O |, other = | D E F | - | P Q R | | G H I | - - sets Matrix to: - - | A B C | | J K L | | AJ+BM+CP AK+BN+CQ AL+BO+CR | - other * Matrix = | D E F | * | M N O | = | DJ+EM+FP DK+EN+FQ DL+EO+FR | - | G H I | | P Q R | | GJ+HM+IP GK+HN+IQ GL+HO+IR | - - @param other Matrix on left side of multiply expression - */ - void postConcat(const Matrix& other); - - /** \enum Matrix::ScaleToFit - ScaleToFit describes how Matrix is constructed to map one Rect to another. - ScaleToFit may allow Matrix to have unequal horizontal and vertical scaling, - or may restrict Matrix to square scaling. If restricted, ScaleToFit specifies - how Matrix maps to the side or center of the destination Rect. - */ - enum ScaleToFit { - kFill_ScaleToFit, //!< scales in x and y to fill destination Rect - kStart_ScaleToFit, //!< scales and aligns to left and top - kCenter_ScaleToFit, //!< scales and aligns to center - kEnd_ScaleToFit, //!< scales and aligns to right and bottom - }; - - /** Sets Matrix to scale and translate src Rect to dst Rect. stf selects whether - mapping completely fills dst or preserves the aspect ratio, and how to align - src within dst. Returns false if src is empty, and sets Matrix to identity. - Returns true if dst is empty, and sets Matrix to: - - | 0 0 0 | - | 0 0 0 | - | 0 0 1 | - - @param src Rect to map from - @param dst Rect to map to - @param stf one of: kFill_ScaleToFit, kStart_ScaleToFit, - kCenter_ScaleToFit, kEnd_ScaleToFit - @return true if Matrix can represent Rect mapping - */ - bool setRectToRect(const Rect& src, const Rect& dst, ScaleToFit stf); - - /** Returns Matrix set to scale and translate src Rect to dst Rect. stf selects - whether mapping completely fills dst or preserves the aspect ratio, and how to - align src within dst. Returns the identity Matrix if src is empty. If dst is - empty, returns Matrix set to: - - | 0 0 0 | - | 0 0 0 | - | 0 0 1 | - - @param src Rect to map from - @param dst Rect to map to - @param stf one of: kFill_ScaleToFit, kStart_ScaleToFit, - kCenter_ScaleToFit, kEnd_ScaleToFit - @return Matrix mapping src to dst - */ - static Matrix MakeRectToRect(const Rect& src, const Rect& dst, ScaleToFit stf) { - Matrix m; - m.setRectToRect(src, dst, stf); - return m; - } - - /** Sets Matrix to map src to dst. count must be zero or greater, and four or less. - - If count is zero, sets Matrix to identity and returns true. - If count is one, sets Matrix to translate and returns true. - If count is two or more, sets Matrix to map Point if possible; returns false - if Matrix cannot be constructed. If count is four, Matrix may include - perspective. - - @param src Point to map from - @param dst Point to map to - @param count number of Point in src and dst - @return true if Matrix was constructed successfully - */ - bool setPolyToPoly(const Point src[], const Point dst[], int count); - - /** Sets inverse to reciprocal matrix, returning true if Matrix can be inverted. - Geometrically, if Matrix maps from source to destination, inverse Matrix - maps from destination to source. If Matrix can not be inverted, inverse is - unchanged. - - @param inverse storage for inverted Matrix; may be nullptr - @return true if Matrix can be inverted - */ - bool invert(Matrix* inverse) const { - // Allow the trivial case to be inlined. - if (this->isIdentity()) { - if (inverse) { - inverse->reset(); - } - return true; - } - return this->invertNonIdentity(inverse); - } - - /** Fills affine with identity values in column major order. - Sets affine to: - - | 1 0 0 | - | 0 1 0 | - - Affine 3x2 matrices in column major order are used by OpenGL and XPS. - - @param affine storage for 3x2 affine matrix - */ - static void SetAffineIdentity(float affine[6]); - - /** Fills affine in column major order. Sets affine to: - - | scale-x skew-x translate-x | - | skew-y scale-y translate-y | - - If Matrix contains perspective, returns false and leaves affine unchanged. - - @param affine storage for 3x2 affine matrix; may be nullptr - @return true if Matrix does not contain perspective - */ - bool asAffine(float affine[6]) const; - - /** Sets Matrix to affine values, passed in column major order. Given affine, - column, then row, as: - - | scale-x skew-x translate-x | - | skew-y scale-y translate-y | - - Matrix is set, row, then column, to: - - | scale-x skew-x translate-x | - | skew-y scale-y translate-y | - | 0 0 1 | - - @param affine 3x2 affine matrix - */ - void setAffine(const float affine[6]); - - /** Maps src Point array of length count to dst Point array of equal or greater - length. Point are mapped by multiplying each Point by Matrix. Given: - - | A B C | | x | - Matrix = | D E F |, pt = | y | - | G H I | | 1 | - - where - - for (i = 0; i < count; ++i) { - x = src[i].fX - y = src[i].fY - } - - each dst Point is computed as: - - |A B C| |x| Ax+By+C Dx+Ey+F - Matrix * pt = |D E F| |y| = |Ax+By+C Dx+Ey+F Gx+Hy+I| = ------- , ------- - |G H I| |1| Gx+Hy+I Gx+Hy+I - - src and dst may point to the same storage. - - @param dst storage for mapped Point - @param src Point to transform - @param count number of Point to transform - */ - void mapPoints(Point dst[], const Point src[], int count) const { - MNN_ASSERT((dst && src && count > 0) || 0 == count); - // no partial overlap - MNN_ASSERT(src == dst || &dst[count] <= &src[0] || &src[count] <= &dst[0]); - this->getMapPtsProc()(*this, dst, src, count); - } - - /** Maps pts Point array of length count in place. Point are mapped by multiplying - each Point by Matrix. Given: - - | A B C | | x | - Matrix = | D E F |, pt = | y | - | G H I | | 1 | - - where - - for (i = 0; i < count; ++i) { - x = pts[i].fX - y = pts[i].fY - } - - each resulting pts Point is computed as: - - |A B C| |x| Ax+By+C Dx+Ey+F - Matrix * pt = |D E F| |y| = |Ax+By+C Dx+Ey+F Gx+Hy+I| = ------- , ------- - |G H I| |1| Gx+Hy+I Gx+Hy+I - - @param pts storage for mapped Point - @param count number of Point to transform - */ - void mapPoints(Point pts[], int count) const { - this->mapPoints(pts, pts, count); - } - - /** Maps Point (x, y) to result. Point is mapped by multiplying by Matrix. Given: - - | A B C | | x | - Matrix = | D E F |, pt = | y | - | G H I | | 1 | - - result is computed as: - - |A B C| |x| Ax+By+C Dx+Ey+F - Matrix * pt = |D E F| |y| = |Ax+By+C Dx+Ey+F Gx+Hy+I| = ------- , ------- - |G H I| |1| Gx+Hy+I Gx+Hy+I - - @param x x-axis value of Point to map - @param y y-axis value of Point to map - @param result storage for mapped Point - */ - void mapXY(float x, float y, Point* result) const { - this->getMapXYProc()(*this, x, y, result); - } - - /** Returns Point (x, y) multiplied by Matrix. Given: - - | A B C | | x | - Matrix = | D E F |, pt = | y | - | G H I | | 1 | - - result is computed as: - - |A B C| |x| Ax+By+C Dx+Ey+F - Matrix * pt = |D E F| |y| = |Ax+By+C Dx+Ey+F Gx+Hy+I| = ------- , ------- - |G H I| |1| Gx+Hy+I Gx+Hy+I - - @param x x-axis value of Point to map - @param y y-axis value of Point to map - @return mapped Point - */ - Point mapXY(float x, float y) const { - Point result; - this->getMapXYProc()(*this, x, y, &result); - return result; - } - - /** Sets dst to bounds of src corners mapped by Matrix. - Returns true if mapped corners are dst corners. - - Returned value is the same as calling rectStaysRect(). - - @param dst storage for bounds of mapped Point - @param src Rect to map - @return true if dst is equivalent to mapped src - */ - bool mapRect(Rect* dst, const Rect& src) const; - - /** Sets rect to bounds of rect corners mapped by Matrix. - Returns true if mapped corners are computed rect corners. - - Returned value is the same as calling rectStaysRect(). - - @param rect rectangle to map, and storage for bounds of mapped corners - @return true if result is equivalent to mapped src - */ - bool mapRect(Rect* rect) const { - return this->mapRect(rect, *rect); - } - - /** Returns bounds of src corners mapped by Matrix. - - @param src rectangle to map - @return mapped bounds - */ - Rect mapRect(const Rect& src) const { - Rect dst; - (void)this->mapRect(&dst, src); - return dst; - } - - /** Sets dst to bounds of src corners mapped by Matrix. If matrix contains - elements other than scale or translate: asserts if SK_DEBUG is defined; - otherwise, results are undefined. - - @param dst storage for bounds of mapped Point - @param src Rect to map - */ - void mapRectScaleTranslate(Rect* dst, const Rect& src) const; - - /** Returns true if Matrix equals m, using an efficient comparison. - - Returns false when the sign of zero values is the different; when one - matrix has positive zero value and the other has negative zero value. - - Returns true even when both Matrix contain NaN. - - NaN never equals any value, including itself. To improve performance, NaN values - are treated as bit patterns that are equal if their bit patterns are equal. - - @param m Matrix to compare - @return true if m and Matrix are represented by identical bit patterns - */ - bool cheapEqualTo(const Matrix& m) const { - return 0 == memcmp(fMat, m.fMat, sizeof(fMat)); - } - - /** Compares a and b; returns true if a and b are numerically equal. Returns true - even if sign of zero values are different. Returns false if either Matrix - contains NaN, even if the other Matrix also contains NaN. - - @param a Matrix to compare - @param b Matrix to compare - @return true if Matrix a and Matrix b are numerically equal - */ - friend MNN_PUBLIC bool operator==(const Matrix& a, const Matrix& b); - - /** Compares a and b; returns true if a and b are not numerically equal. Returns false - even if sign of zero values are different. Returns true if either Matrix - contains NaN, even if the other Matrix also contains NaN. - - @param a Matrix to compare - @param b Matrix to compare - @return true if Matrix a and Matrix b are numerically not equal - */ - friend MNN_PUBLIC bool operator!=(const Matrix& a, const Matrix& b) { - return !(a == b); - } - - /** Writes text representation of Matrix to standard output. Floating point values - are written with limited precision; it may not be possible to reconstruct - original Matrix from output. - */ - void dump() const; - - /** Returns the minimum scaling factor of Matrix by decomposing the scaling and - skewing elements. - Returns -1 if scale factor overflows or Matrix contains perspective. - - @return minimum scale factor - */ - float getMinScale() const; - - /** Returns the maximum scaling factor of Matrix by decomposing the scaling and - skewing elements. - Returns -1 if scale factor overflows or Matrix contains perspective. - - @return maximum scale factor - */ - float getMaxScale() const; - - /** Sets scaleFactors[0] to the minimum scaling factor, and scaleFactors[1] to the - maximum scaling factor. Scaling factors are computed by decomposing - the Matrix scaling and skewing elements. - - Returns true if scaleFactors are found; otherwise, returns false and sets - scaleFactors to undefined values. - - @param scaleFactors storage for minimum and maximum scale factors - @return true if scale factors were computed correctly - */ - bool getMinMaxScales(float scaleFactors[2]) const; - - /** Returns reference to const identity Matrix. Returned Matrix is set to: - - | 1 0 0 | - | 0 1 0 | - | 0 0 1 | - - @return const identity Matrix - */ - static const Matrix& I(); - - /** Returns reference to a const Matrix with invalid values. Returned Matrix is set - to: - - | SK_ScalarMax SK_ScalarMax SK_ScalarMax | - | SK_ScalarMax SK_ScalarMax SK_ScalarMax | - | SK_ScalarMax SK_ScalarMax SK_ScalarMax | - - @return const invalid Matrix - */ - static const Matrix& InvalidMatrix(); - - /** Returns Matrix a multiplied by Matrix b. - - Given: - - | A B C | | J K L | - a = | D E F |, b = | M N O | - | G H I | | P Q R | - - sets Matrix to: - - | A B C | | J K L | | AJ+BM+CP AK+BN+CQ AL+BO+CR | - a * b = | D E F | * | M N O | = | DJ+EM+FP DK+EN+FQ DL+EO+FR | - | G H I | | P Q R | | GJ+HM+IP GK+HN+IQ GL+HO+IR | - - @param a Matrix on left side of multiply expression - @param b Matrix on right side of multiply expression - @return Matrix computed from a times b - */ - static Matrix Concat(const Matrix& a, const Matrix& b) { - Matrix result; - result.setConcat(a, b); - return result; - } - - /** Sets internal cache to unknown state. Use to force update after repeated - modifications to Matrix element reference returned by operator[](int index). - */ - void dirtyMatrixTypeCache() { - this->setTypeMask(kUnknown_Mask); - } - - /** Initializes Matrix with scale and translate elements. - - | sx 0 tx | - | 0 sy ty | - | 0 0 1 | - - @param sx horizontal scale factor to store - @param sy vertical scale factor to store - @param tx horizontal translation to store - @param ty vertical translation to store - */ - void setScaleTranslate(float sx, float sy, float tx, float ty) { - fMat[kMScaleX] = sx; - fMat[kMSkewX] = 0; - fMat[kMTransX] = tx; - - fMat[kMSkewY] = 0; - fMat[kMScaleY] = sy; - fMat[kMTransY] = ty; - - fMat[kMPersp0] = 0; - fMat[kMPersp1] = 0; - fMat[kMPersp2] = 1; - - unsigned mask = 0; - if (sx != 1 || sy != 1) { - mask |= kScale_Mask; - } - if (tx || ty) { - mask |= kTranslate_Mask; - } - this->setTypeMask(mask | kRectStaysRect_Mask); - } - - /** Returns true if all elements of the matrix are finite. Returns false if any - element is infinity, or NaN. - - @return true if matrix has only finite elements - */ - -private: - /** Set if the matrix will map a rectangle to another rectangle. This - can be true if the matrix is scale-only, or rotates a multiple of - 90 degrees. - - This bit will be set on identity matrices - */ - static constexpr int kRectStaysRect_Mask = 0x10; - - /** Set if the perspective bit is valid even though the rest of - the matrix is Unknown. - */ - static constexpr int kOnlyPerspectiveValid_Mask = 0x40; - - static constexpr int kUnknown_Mask = 0x80; - - static constexpr int kORableMasks = kTranslate_Mask | kScale_Mask | kAffine_Mask | kPerspective_Mask; - - static constexpr int kAllMasks = - kTranslate_Mask | kScale_Mask | kAffine_Mask | kPerspective_Mask | kRectStaysRect_Mask; - - float fMat[9]; - mutable uint32_t fTypeMask; - - static void ComputeInv(float dst[9], const float src[9], double invDet, bool isPersp); - - uint8_t computeTypeMask() const; - uint8_t computePerspectiveTypeMask() const; - - void setTypeMask(int mask) { - // allow kUnknown or a valid mask - MNN_ASSERT(kUnknown_Mask == mask || (mask & kAllMasks) == mask || - ((kUnknown_Mask | kOnlyPerspectiveValid_Mask) & mask) == - (kUnknown_Mask | kOnlyPerspectiveValid_Mask)); - fTypeMask = (uint8_t)(mask); - } - - void orTypeMask(int mask) { - MNN_ASSERT((mask & kORableMasks) == mask); - fTypeMask = (uint8_t)(fTypeMask | mask); - } - - void clearTypeMask(int mask) { - // only allow a valid mask - MNN_ASSERT((mask & kAllMasks) == mask); - fTypeMask = fTypeMask & ~mask; - } - - TypeMask getPerspectiveTypeMaskOnly() const { - if ((fTypeMask & kUnknown_Mask) && !(fTypeMask & kOnlyPerspectiveValid_Mask)) { - fTypeMask = this->computePerspectiveTypeMask(); - } - return (TypeMask)(fTypeMask & 0xF); - } - - /** Returns true if we already know that the matrix is identity; - false otherwise. - */ - bool isTriviallyIdentity() const { - if (fTypeMask & kUnknown_Mask) { - return false; - } - return ((fTypeMask & 0xF) == 0); - } - - inline void updateTranslateMask() { - if ((fMat[kMTransX] != 0) | (fMat[kMTransY] != 0)) { - fTypeMask |= kTranslate_Mask; - } else { - fTypeMask &= ~kTranslate_Mask; - } - } - - typedef void (*MapXYProc)(const Matrix& mat, float x, float y, Point* result); - - static MapXYProc GetMapXYProc(TypeMask mask) { - MNN_ASSERT((mask & ~kAllMasks) == 0); - return gMapXYProcs[mask & kAllMasks]; - } - - MapXYProc getMapXYProc() const { - return GetMapXYProc(this->getType()); - } - - typedef void (*MapPtsProc)(const Matrix& mat, Point dst[], const Point src[], int count); - - static MapPtsProc GetMapPtsProc(TypeMask mask) { - MNN_ASSERT((mask & ~kAllMasks) == 0); - return gMapPtsProcs[mask & kAllMasks]; - } - - MapPtsProc getMapPtsProc() const { - return GetMapPtsProc(this->getType()); - } - - bool invertNonIdentity(Matrix* inverse) const; - - static void Identity_xy(const Matrix&, float, float, Point*); - static void Trans_xy(const Matrix&, float, float, Point*); - static void Scale_xy(const Matrix&, float, float, Point*); - static void ScaleTrans_xy(const Matrix&, float, float, Point*); - static void Rot_xy(const Matrix&, float, float, Point*); - static void RotTrans_xy(const Matrix&, float, float, Point*); - static void Persp_xy(const Matrix&, float, float, Point*); - - static const MapXYProc gMapXYProcs[]; - - static void Identity_pts(const Matrix&, Point[], const Point[], int); - static void Trans_pts(const Matrix&, Point dst[], const Point[], int); - static void Scale_pts(const Matrix&, Point dst[], const Point[], int); - static void ScaleTrans_pts(const Matrix&, Point dst[], const Point[], int count); - static void Persp_pts(const Matrix&, Point dst[], const Point[], int); - - static void Affine_vpts(const Matrix&, Point dst[], const Point[], int); - - static const MapPtsProc gMapPtsProcs[]; - static bool Poly2Proc(const Point srcPt[], Matrix* dst); - static bool Poly3Proc(const Point srcPt[], Matrix* dst); - static bool Poly4Proc(const Point srcPt[], Matrix* dst); -}; -} // namespace CV -} // namespace MNN -#endif diff --git a/MNN/Rect.h b/MNN/Rect.h deleted file mode 100644 index 91c4950f..00000000 --- a/MNN/Rect.h +++ /dev/null @@ -1,580 +0,0 @@ -// -// Rect.h -// MNN -// -// Modified by jiangxiaotang on 2018/09/19. -// Copyright © 2018, Alibaba Group Holding Limited -// - -/* - * Copyright 2006 The Android Open Source Project - * - * Use of this source code is governed by a BSD-style license that can be - * found in the LICENSE file. - */ - -/* Generated by tools/bookmaker from include/core/Rect.h and docs/SkRect_Reference.bmh - on 2018-07-13 08:15:11. Additional documentation and examples can be found at: - https://skia.org/user/api/SkRect_Reference - - You may edit either file directly. Structural changes to public interfaces require - editing both files. After editing docs/SkRect_Reference.bmh, run: - bookmaker -b docs -i include/core/Rect.h -p - to create an updated version of this file. - */ - -#ifndef SkRect_DEFINED -#define SkRect_DEFINED - -#include -#include -#include -#include - -namespace MNN { -namespace CV { - -struct Point { - float fX; - float fY; - - void set(float x, float y) { - fX = x; - fY = y; - } -}; - -/** \struct Rect - Rect holds four float coordinates describing the upper and - lower bounds of a rectangle. Rect may be created from outer bounds or - from position, width, and height. Rect describes an area; if its right - is less than or equal to its left, or if its bottom is less than or equal to - its top, it is considered empty. -*/ -struct MNN_PUBLIC Rect { - float fLeft; //!< smaller x-axis bounds - float fTop; //!< smaller y-axis bounds - float fRight; //!< larger x-axis bounds - float fBottom; //!< larger y-axis bounds - - /** Returns constructed Rect set to (0, 0, 0, 0). - Many other rectangles are empty; if left is equal to or greater than right, - or if top is equal to or greater than bottom. Setting all members to zero - is a convenience, but does not designate a special empty rectangle. - - @return bounds (0, 0, 0, 0) - */ - static constexpr Rect MakeEmpty() { - return Rect{0, 0, 0, 0}; - } - -#ifdef SK_SUPPORT_LEGACY_RECTMAKELARGEST - /** Deprecated. - */ - static Rect MakeLargest() { - return {SK_ScalarMin, SK_ScalarMin, SK_ScalarMax, SK_ScalarMax}; - } -#endif - - /** Returns constructed Rect set to float values (0, 0, w, h). Does not - validate input; w or h may be negative. - - Passing integer values may generate a compiler warning since Rect cannot - represent 32-bit integers exactly. Use SkIRect for an exact integer rectangle. - - @param w float width of constructed Rect - @param h float height of constructed Rect - @return bounds (0, 0, w, h) - */ - static constexpr Rect MakeWH(float w, float h) { - return Rect{0, 0, w, h}; - } - - /** Returns constructed Rect set to integer values (0, 0, w, h). Does not validate - input; w or h may be negative. - - Use to avoid a compiler warning that input may lose precision when stored. - Use SkIRect for an exact integer rectangle. - - @param w integer width of constructed Rect - @param h integer height of constructed Rect - @return bounds (0, 0, w, h) - */ - static Rect MakeIWH(int w, int h) { - Rect r; - r.set(0, 0, (float)(w), (float)(h)); - return r; - } - - /** Returns constructed Rect set to (l, t, r, b). Does not sort input; Rect may - result in fLeft greater than fRight, or fTop greater than fBottom. - - @param l float stored in fLeft - @param t float stored in fTop - @param r float stored in fRight - @param b float stored in fBottom - @return bounds (l, t, r, b) - */ - static constexpr Rect MakeLTRB(float l, float t, float r, float b) { - return Rect{l, t, r, b}; - } - - /** Returns constructed Rect set to (x, y, x + w, y + h). Does not validate input; - w or h may be negative. - - @param x stored in fLeft - @param y stored in fTop - @param w added to x and stored in fRight - @param h added to y and stored in fBottom - @return bounds at (x, y) with width w and height h - */ - static constexpr Rect MakeXYWH(float x, float y, float w, float h) { - return Rect{x, y, x + w, y + h}; - } - - /** Returns true if fLeft is equal to or greater than fRight, or if fTop is equal - to or greater than fBottom. Call sort() to reverse rectangles with negative - width() or height(). - - @return true if width() or height() are zero or negative - */ - bool isEmpty() const { - // We write it as the NOT of a non-empty rect, so we will return true if any values - // are NaN. - return !(fLeft < fRight && fTop < fBottom); - } - - /** Returns true if fLeft is equal to or less than fRight, or if fTop is equal - to or less than fBottom. Call sort() to reverse rectangles with negative - width() or height(). - - @return true if width() or height() are zero or positive - */ - bool isSorted() const { - return fLeft <= fRight && fTop <= fBottom; - } - - /** Returns left edge of Rect, if sorted. Call isSorted() to see if Rect is valid. - Call sort() to reverse fLeft and fRight if needed. - - @return fLeft - */ - float x() const { - return fLeft; - } - - /** Returns top edge of Rect, if sorted. Call isEmpty() to see if Rect may be invalid, - and sort() to reverse fTop and fBottom if needed. - - @return fTop - */ - float y() const { - return fTop; - } - - /** Returns left edge of Rect, if sorted. Call isSorted() to see if Rect is valid. - Call sort() to reverse fLeft and fRight if needed. - - @return fLeft - */ - float left() const { - return fLeft; - } - - /** Returns top edge of Rect, if sorted. Call isEmpty() to see if Rect may be invalid, - and sort() to reverse fTop and fBottom if needed. - - @return fTop - */ - float top() const { - return fTop; - } - - /** Returns right edge of Rect, if sorted. Call isSorted() to see if Rect is valid. - Call sort() to reverse fLeft and fRight if needed. - - @return fRight - */ - float right() const { - return fRight; - } - - /** Returns bottom edge of Rect, if sorted. Call isEmpty() to see if Rect may be invalid, - and sort() to reverse fTop and fBottom if needed. - - @return fBottom - */ - float bottom() const { - return fBottom; - } - - /** Returns span on the x-axis. This does not check if Rect is sorted, or if - result fits in 32-bit float; result may be negative or infinity. - - @return fRight minus fLeft - */ - float width() const { - return fRight - fLeft; - } - - /** Returns span on the y-axis. This does not check if Rect is sorted, or if - result fits in 32-bit float; result may be negative or infinity. - - @return fBottom minus fTop - */ - float height() const { - return fBottom - fTop; - } - - /** Returns average of left edge and right edge. Result does not change if Rect - is sorted. Result may overflow to infinity if Rect is far from the origin. - - @return midpoint in x - */ - float centerX() const { - // don't use floatHalf(fLeft + fBottom) as that might overflow before the 0.5 - return 0.5f * (fLeft) + 0.5f * (fRight); - } - - /** Returns average of top edge and bottom edge. Result does not change if Rect - is sorted. - - @return midpoint in y - */ - float centerY() const { - // don't use floatHalf(fTop + fBottom) as that might overflow before the 0.5 - return 0.5f * (fTop) + 0.5f * (fBottom); - } - - /** Sets Rect to (0, 0, 0, 0). - - Many other rectangles are empty; if left is equal to or greater than right, - or if top is equal to or greater than bottom. Setting all members to zero - is a convenience, but does not designate a special empty rectangle. - */ - void setEmpty() { - *this = MakeEmpty(); - } - - /** Sets Rect to (left, top, right, bottom). - left and right are not sorted; left is not necessarily less than right. - top and bottom are not sorted; top is not necessarily less than bottom. - - @param left stored in fLeft - @param top stored in fTop - @param right stored in fRight - @param bottom stored in fBottom - */ - void set(float left, float top, float right, float bottom) { - fLeft = left; - fTop = top; - fRight = right; - fBottom = bottom; - } - - /** Sets Rect to (left, top, right, bottom). - left and right are not sorted; left is not necessarily less than right. - top and bottom are not sorted; top is not necessarily less than bottom. - - @param left stored in fLeft - @param top stored in fTop - @param right stored in fRight - @param bottom stored in fBottom - */ - void setLTRB(float left, float top, float right, float bottom) { - this->set(left, top, right, bottom); - } - - /** Sets Rect to (left, top, right, bottom). - All parameters are promoted from integer to scalar. - left and right are not sorted; left is not necessarily less than right. - top and bottom are not sorted; top is not necessarily less than bottom. - - @param left promoted to float and stored in fLeft - @param top promoted to float and stored in fTop - @param right promoted to float and stored in fRight - @param bottom promoted to float and stored in fBottom - */ - void iset(int left, int top, int right, int bottom) { - fLeft = (float)(left); - fTop = (float)(top); - fRight = (float)(right); - fBottom = (float)(bottom); - } - - /** Sets Rect to (0, 0, width, height). - width and height may be zero or negative. width and height are promoted from - integer to float, large values may lose precision. - - @param width promoted to float and stored in fRight - @param height promoted to float and stored in fBottom - */ - void isetWH(int width, int height) { - fLeft = fTop = 0; - fRight = (float)(width); - fBottom = (float)(height); - } - - /** Sets Rect to (x, y, x + width, y + height). Does not validate input; - width or height may be negative. - - @param x stored in fLeft - @param y stored in fTop - @param width added to x and stored in fRight - @param height added to y and stored in fBottom - */ - void setXYWH(float x, float y, float width, float height) { - fLeft = x; - fTop = y; - fRight = x + width; - fBottom = y + height; - } - - /** Sets Rect to (0, 0, width, height). Does not validate input; - width or height may be negative. - - @param width stored in fRight - @param height stored in fBottom - */ - void setWH(float width, float height) { - fLeft = 0; - fTop = 0; - fRight = width; - fBottom = height; - } - - /** Returns Rect offset by (dx, dy). - - If dx is negative, Rect returned is moved to the left. - If dx is positive, Rect returned is moved to the right. - If dy is negative, Rect returned is moved upward. - If dy is positive, Rect returned is moved downward. - - @param dx added to fLeft and fRight - @param dy added to fTop and fBottom - @return Rect offset on axes, with original width and height - */ - Rect makeOffset(float dx, float dy) const { - return MakeLTRB(fLeft + dx, fTop + dy, fRight + dx, fBottom + dy); - } - - /** Returns Rect, inset by (dx, dy). - - If dx is negative, Rect returned is wider. - If dx is positive, Rect returned is narrower. - If dy is negative, Rect returned is taller. - If dy is positive, Rect returned is shorter. - - @param dx added to fLeft and subtracted from fRight - @param dy added to fTop and subtracted from fBottom - @return Rect inset symmetrically left and right, top and bottom - */ - Rect makeInset(float dx, float dy) const { - return MakeLTRB(fLeft + dx, fTop + dy, fRight - dx, fBottom - dy); - } - - /** Returns Rect, outset by (dx, dy). - - If dx is negative, Rect returned is narrower. - If dx is positive, Rect returned is wider. - If dy is negative, Rect returned is shorter. - If dy is positive, Rect returned is taller. - - @param dx subtracted to fLeft and added from fRight - @param dy subtracted to fTop and added from fBottom - @return Rect outset symmetrically left and right, top and bottom - */ - Rect makeOutset(float dx, float dy) const { - return MakeLTRB(fLeft - dx, fTop - dy, fRight + dx, fBottom + dy); - } - - /** Offsets Rect by adding dx to fLeft, fRight; and by adding dy to fTop, fBottom. - - If dx is negative, moves Rect to the left. - If dx is positive, moves Rect to the right. - If dy is negative, moves Rect upward. - If dy is positive, moves Rect downward. - - @param dx offset added to fLeft and fRight - @param dy offset added to fTop and fBottom - */ - void offset(float dx, float dy) { - fLeft += dx; - fTop += dy; - fRight += dx; - fBottom += dy; - } - - /** Offsets Rect so that fLeft equals newX, and fTop equals newY. width and height - are unchanged. - - @param newX stored in fLeft, preserving width() - @param newY stored in fTop, preserving height() - */ - void offsetTo(float newX, float newY) { - fRight += newX - fLeft; - fBottom += newY - fTop; - fLeft = newX; - fTop = newY; - } - - /** Insets Rect by (dx, dy). - - If dx is positive, makes Rect narrower. - If dx is negative, makes Rect wider. - If dy is positive, makes Rect shorter. - If dy is negative, makes Rect taller. - - @param dx added to fLeft and subtracted from fRight - @param dy added to fTop and subtracted from fBottom - */ - void inset(float dx, float dy) { - fLeft += dx; - fTop += dy; - fRight -= dx; - fBottom -= dy; - } - - /** Outsets Rect by (dx, dy). - - If dx is positive, makes Rect wider. - If dx is negative, makes Rect narrower. - If dy is positive, makes Rect taller. - If dy is negative, makes Rect shorter. - - @param dx subtracted to fLeft and added from fRight - @param dy subtracted to fTop and added from fBottom - */ - void outset(float dx, float dy) { - this->inset(-dx, -dy); - } - -private: - static bool Intersects(float al, float at, float ar, float ab, float bl, float bt, float br, float bb) { - float L = std::max(al, bl); - float R = std::min(ar, br); - float T = std::max(at, bt); - float B = std::min(ab, bb); - return L < R && T < B; - } - -public: - /** Constructs Rect to intersect from (left, top, right, bottom). Does not sort - construction. - - Returns true if Rect intersects construction. - Returns false if either construction or Rect is empty, or do not intersect. - - @param left x-axis minimum of constructed Rect - @param top y-axis minimum of constructed Rect - @param right x-axis maximum of constructed Rect - @param bottom y-axis maximum of constructed Rect - @return true if construction and Rect have area in common - */ - bool intersects(float left, float top, float right, float bottom) const { - return Intersects(fLeft, fTop, fRight, fBottom, left, top, right, bottom); - } - - /** Returns true if Rect intersects r. - Returns false if either r or Rect is empty, or do not intersect. - - @param r Rect to intersect - @return true if r and Rect have area in common - */ - bool intersects(const Rect& r) const { - return Intersects(fLeft, fTop, fRight, fBottom, r.fLeft, r.fTop, r.fRight, r.fBottom); - } - - /** Returns true if a intersects b. - Returns false if either a or b is empty, or do not intersect. - - @param a Rect to intersect - @param b Rect to intersect - @return true if a and b have area in common - */ - static bool Intersects(const Rect& a, const Rect& b) { - return Intersects(a.fLeft, a.fTop, a.fRight, a.fBottom, b.fLeft, b.fTop, b.fRight, b.fBottom); - } - - /** Sets Rect to the union of itself and r. - - Asserts if r is empty and SK_DEBUG is defined. - If Rect is empty, sets Rect to r. - - May produce incorrect results if r is empty. - - @param r expansion Rect - */ - void joinNonEmptyArg(const Rect& r) { - MNN_ASSERT(!r.isEmpty()); - // if we are empty, just assign - if (fLeft >= fRight || fTop >= fBottom) { - *this = r; - } else { - this->joinPossiblyEmptyRect(r); - } - } - - /** Sets Rect to the union of itself and the construction. - - May produce incorrect results if Rect or r is empty. - - @param r expansion Rect - */ - void joinPossiblyEmptyRect(const Rect& r) { - fLeft = std::min(fLeft, r.left()); - fTop = std::min(fTop, r.top()); - fRight = std::max(fRight, r.right()); - fBottom = std::max(fBottom, r.bottom()); - } - - /** Returns true if: fLeft <= x < fRight && fTop <= y < fBottom. - Returns false if Rect is empty. - - @param x test Point x-coordinate - @param y test Point y-coordinate - @return true if (x, y) is inside Rect - */ - bool contains(float x, float y) const { - return x >= fLeft && x < fRight && y >= fTop && y < fBottom; - } - - /** Swaps fLeft and fRight if fLeft is greater than fRight; and swaps - fTop and fBottom if fTop is greater than fBottom. Result may be empty; - and width() and height() will be zero or positive. - */ - void sort() { - using std::swap; - if (fLeft > fRight) { - swap(fLeft, fRight); - } - - if (fTop > fBottom) { - swap(fTop, fBottom); - } - } - - /** Returns Rect with fLeft and fRight swapped if fLeft is greater than fRight; and - with fTop and fBottom swapped if fTop is greater than fBottom. Result may be empty; - and width() and height() will be zero or positive. - - @return sorted Rect - */ - Rect makeSorted() const { - return MakeLTRB(std::min(fLeft, fRight), std::min(fTop, fBottom), std::max(fLeft, fRight), - std::max(fTop, fBottom)); - } - - /** Returns pointer to first scalar in Rect, to treat it as an array with four - entries. - - @return pointer to fLeft - */ - const float* asScalars() const { - return &fLeft; - } -}; - -} // namespace CV -} // namespace MNN -#endif diff --git a/MNN/Tensor.hpp b/MNN/Tensor.hpp deleted file mode 100644 index fd633a62..00000000 --- a/MNN/Tensor.hpp +++ /dev/null @@ -1,292 +0,0 @@ -// -// Tensor.hpp -// MNN -// -// Created by MNN on 2018/08/14. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#ifndef Tensor_hpp -#define Tensor_hpp - -#include -#include -#include -#define MNN_MAX_TENSOR_DIM 6 - -namespace MNN { - -/** - * data container. - * data for host tensor is saved in `host` field. its memory is allocated malloc directly. - * data for device tensor is saved in `deviceId` field. its memory is allocated by session's backend. - * usually, device tensors are created by engine (like net, session). - * meanwhile, host tensors could be created by engine or user. - */ -class MNN_PUBLIC Tensor { -public: - struct InsideDescribe; - - /** dimension type used to create tensor */ - enum DimensionType { - /** for tensorflow net type. uses NHWC as data format. */ - TENSORFLOW, - /** for caffe net type. uses NCHW as data format. */ - CAFFE, - /** for caffe net type. uses NC4HW4 as data format. */ - CAFFE_C4 - }; - - /** handle type */ - enum HandleDataType { - /** default handle type */ - HANDLE_NONE = 0, - /** string handle type */ - HANDLE_STRING = 1 - }; - - /** Tensor map type : Read or Write*/ - enum MapType { - /** map Tensor for writing data*/ - MAP_TENSOR_WRITE = 0, - MAP_TENSOR_READ = 1 - }; - -public: - /** - * @brief create a tensor with dimension size and type without acquire memory for data. - * @param dimSize dimension size. - * @param type dimension type. - */ - Tensor(int dimSize = 4, DimensionType type = CAFFE); - - /** - * @brief create a tensor with same shape as given tensor. - * @param tensor shape provider. - * @param type dimension type. - * @param allocMemory acquire memory for data or not. - * @warning tensor data won't be copied. - */ - Tensor(const Tensor* tensor, DimensionType type = CAFFE, bool allocMemory = true); - - /** deinitializer */ - ~Tensor(); - -private: - // remove all assignment operator - Tensor(const Tensor& tensor) = delete; - Tensor(const Tensor&& tensor) = delete; - Tensor& operator=(const Tensor&) = delete; - Tensor& operator=(const Tensor&&) = delete; - -public: - /** - * @brief create tensor with shape, data type and dimension type. - * @param shape tensor shape. - * @param type data type. - * @param dimType dimension type. - * @return created tensor. - * @warning memory for data won't be acquired. call backend's onAcquireBuffer to get memory ready. - */ - static Tensor* createDevice(const std::vector& shape, halide_type_t type, DimensionType dimType = TENSORFLOW); - - /** - * @brief create tensor with shape and dimension type. data type is represented by `T`. - * @param shape tensor shape. - * @param dimType dimension type. - * @return created tensor. - * @warning memory for data won't be acquired. call backend's onAcquireBuffer to get memory ready. - */ - template - static Tensor* createDevice(const std::vector& shape, DimensionType dimType = TENSORFLOW) { - return createDevice(shape, halide_type_of(), dimType); - } - - /** - * @brief create tensor with shape, data type, data and dimension type. - * @param shape tensor shape. - * @param type data type. - * @param data data to save. - * @param dimType dimension type. - * @return created tensor. - */ - static Tensor* create(const std::vector& shape, halide_type_t type, void* data = NULL, - DimensionType dimType = TENSORFLOW); - - /** - * @brief create tensor with shape, data and dimension type. data type is represented by `T`. - * @param shape tensor shape. - * @param data data to save. - * @param dimType dimension type. - * @return created tensor. - */ - template - static Tensor* create(const std::vector& shape, void* data = NULL, DimensionType dimType = TENSORFLOW) { - return create(shape, halide_type_of(), data, dimType); - } - -public: - /** - * @brief for DEVICE tensor, copy data from given host tensor. - * @param hostTensor host tensor, the data provider. - * @return true for DEVICE tensor, and false for HOST tensor. - */ - bool copyFromHostTensor(const Tensor* hostTensor); - - /** - * @brief for DEVICE tensor, copy data to given host tensor. - * @param hostTensor host tensor, the data consumer. - * @return true for DEVICE tensor, and false for HOST tensor. - */ - bool copyToHostTensor(Tensor* hostTensor) const; - - /** - * @brief create HOST tensor from DEVICE tensor, with or without data copying. - * @param deviceTensor given device tensor. - * @param copyData copy data or not. - * @return created host tensor. - */ - static Tensor* createHostTensorFromDevice(const Tensor* deviceTensor, bool copyData = true); - -public: - const halide_buffer_t& buffer() const { - return mBuffer; - } - halide_buffer_t& buffer() { - return mBuffer; - } - - /** - * @brief get dimension type. - * @return dimension type. - */ - DimensionType getDimensionType() const; - - /** - * @brief handle data type. used when data type code is halide_type_handle. - * @return handle data type. - */ - HandleDataType getHandleDataType() const; - - /** - * @brief set data type. - * @param type data type defined in 'Type_generated.h'. - */ - void setType(int type); - - /** - * @brief get data type. - * @return data type. - */ - inline halide_type_t getType() const { - return mBuffer.type; - } - - /** - * @brief visit host memory, data type is represented by `T`. - * @return data point in `T` type. - */ - template - T* host() const { - return (T*)mBuffer.host; - } - - /** - * @brief visit device memory. - * @return device data ID. what the ID means varies between backends. - */ - uint64_t deviceId() const { - return mBuffer.device; - } - -public: - int dimensions() const { - return mBuffer.dimensions; - } - - /** - * @brief get all dimensions' extent. - * @return dimensions' extent. - */ - std::vector shape() const; - - /** - * @brief calculate number of bytes needed to store data taking reordering flag into account. - * @return bytes needed to store data - */ - int size() const; - - /** - * @brief calculate number of elements needed to store data taking reordering flag into account. - * @return elements needed to store data - */ - inline int elementSize() const { - return size() / mBuffer.type.bytes(); - } - -public: - inline int width() const { - if (getDimensionType() == TENSORFLOW) { - return mBuffer.dim[2].extent; - } - - return mBuffer.dim[3].extent; - } - inline int height() const { - if (getDimensionType() == TENSORFLOW) { - return mBuffer.dim[1].extent; - } - return mBuffer.dim[2].extent; - } - inline int channel() const { - if (getDimensionType() == TENSORFLOW) { - return mBuffer.dim[3].extent; - } - return mBuffer.dim[1].extent; - } - inline int batch() const { - return mBuffer.dim[0].extent; - } - - // visit dimension's extent & stride - inline int stride(int index) const { - return mBuffer.dim[index].stride; - } - inline int length(int index) const { - return mBuffer.dim[index].extent; - } - inline void setStride(int index, int stride) { - mBuffer.dim[index].stride = stride; - } - inline void setLength(int index, int length) { - mBuffer.dim[index].extent = length; - } - -public: - /** - * @brief print tensor data. for DEBUG use only. - */ - void print() const; - - /** - *@brief print tensor shape - */ - void printShape() const; - -public: - /** - * @brief map/umap GPU Tensor, to get host ptr - */ - void* map(MapType mtype, DimensionType dtype); - void unmap(MapType mtype, DimensionType dtype, void* mapPtr); - -private: - halide_buffer_t mBuffer; - struct InsideDescribe* mDescribe; - -private: - friend class TensorUtils; -}; -} // namespace MNN - -#endif /* Tensor_hpp */ diff --git a/MNN/expr/Executor.hpp b/MNN/expr/Executor.hpp deleted file mode 100644 index a6cd3837..00000000 --- a/MNN/expr/Executor.hpp +++ /dev/null @@ -1,111 +0,0 @@ -// -// Executor.hpp -// MNN -// -// Created by MNN on 2019/07/25. -// Copyright © 2018, Alibaba Group Holding Limited -// -#ifndef Executor_hpp -#define Executor_hpp -#include -#include -#include -#include -#include -#include -#include -#include -namespace MNN { -class Backend; -class Execution; -class Runtime; -struct Op; -namespace Express { -class MNN_PUBLIC Executor { -public: - class ComputeCache; - struct Unit; - static void setShapeDirty(ComputeCache* cache); - static void setContentDirty(ComputeCache* cache); - static Tensor* getOutput(ComputeCache* cache, int offset); - static void* mapOutput(ComputeCache* cache, int offset, Tensor* dest); - struct Requirement { - std::vector contentNeedContent; - std::vector shapeNeedContent; - }; - ~Executor(); - Requirement getRequirement(Expr* expr) const; - ErrorCode computeInfo(Expr* expr); - void makeCache(const std::vector& expr, bool forceCPU = false); - ErrorCode runCache(std::shared_ptr cache); - void setGlobalExecutorConfig(MNNForwardType type, const BackendConfig& config, int numberThread); - enum GCFlag { - FULL, - PART - }; - void gc(GCFlag flag = FULL); - static std::shared_ptr getGlobalExecutor(); - - static std::shared_ptr newExecutor(MNNForwardType type, - const BackendConfig& config, - int numberThread); - void resetProfile(); - void dumpProfile(); - void addOpCostTime(int op, float costTime); - void addOpCostTime(const std::string& type, float costTime); - void addOpFlops(const std::string& type, float flops); - class Profiler; - static RuntimeInfo getRuntime(); - - struct Cache; - class RuntimeManager { - public: - RuntimeManager(std::vector &configs); - ~RuntimeManager() {}; - - /** - * @param configs: schedule configs. - * @param cacheName: full path for cache file. Note: should choose location for reading and writing. - */ - static RuntimeManager* createRuntimeManager(std::vector &configs); - - /** - * @brief set cache file. when file not exist -- create it, when file exist -- load it. - * When should use : When choose GPU backend or use AUTO backend. - * Calling Position: calling after createRuntimeManager. - */ - void setCache(std::string cacheName); - - /** - * @brief update cache file - * When should use : Together with setCache API. calling for first inference and when input shape is changed. - * Calling Position : calling after inference done. - */ - void updateCache(); - std::vector isBackendSupport(const std::vector type); - RuntimeInfo getRuntimeInfo() { - return mRuntime; - } - private: - RuntimeInfo mRuntime; - std::shared_ptr mInfo; - std::shared_ptr mCache; - - }; - - -private: - void _makeCache(const std::vector& outputs, bool forceCPU); - void _create(const std::vector& outputs, std::set>&& inputCaches, std::set>&& inputNode, bool forceCPU); - - void _visit(EXPRP expr, std::set>& inputCaches, std::set>& inputNode); - - Executor(std::shared_ptr backend, MNNForwardType type); - std::pair, MNNForwardType> mRuntime; - std::pair, MNNForwardType> mBackupRuntime; - std::mutex mMutex; - std::shared_ptr mProfiler; -}; -} // namespace Express -} // namespace MNN -#endif diff --git a/MNN/expr/ExecutorScope.hpp b/MNN/expr/ExecutorScope.hpp deleted file mode 100644 index d7764f74..00000000 --- a/MNN/expr/ExecutorScope.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// -// ExecutorScope.hpp -// MNN -// -// Created by MNN on 2020/10/26. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#ifndef MNN_EXPR_EXECUTOR_SCOPE_HPP_ -#define MNN_EXPR_EXECUTOR_SCOPE_HPP_ - -#include - -namespace MNN { -namespace Express { - -struct MNN_PUBLIC ExecutorScope final { -public: - ExecutorScope() = delete; - explicit ExecutorScope(const ExecutorScope&) = delete; - explicit ExecutorScope(const std::shared_ptr& current); - - explicit ExecutorScope(const std::string& scope_name, - const std::shared_ptr& current); - - virtual ~ExecutorScope(); - - static const std::shared_ptr Current(); -}; - -} // namespace MNN -} // namespace Express -#endif // MNN_EXPR_EXECUTOR_SCOPE_HPP_ diff --git a/MNN/expr/Expr.hpp b/MNN/expr/Expr.hpp deleted file mode 100644 index 094819af..00000000 --- a/MNN/expr/Expr.hpp +++ /dev/null @@ -1,261 +0,0 @@ -// -// Expr.hpp -// MNN -// -// Created by MNN on 2019/06/10. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#ifndef Expr_hpp -#define Expr_hpp - -#include -#include -#include -#include -#include -#include -#include - -namespace MNN { -struct OpT; -struct Op; -struct NetT; -class Tensor; -namespace Express { -class Variable; -class Expr; -class Executor; -typedef std::shared_ptr EXPRP; -typedef std::weak_ptr WeakEXPRP; -typedef std::vector INTS; -enum Dimensionformat { NHWC, NC4HW4, NCHW }; -class MNN_PUBLIC VARP { -public: - VARP() { - // Do nothing - } - VARP(std::shared_ptr c) { - mContent = std::move(c); - } - VARP(Variable* c) { - mContent.reset(c); - } - Variable* get() const { - return mContent.get(); - } - ~ VARP() { - // Do nothing - } - VARP(const VARP& var) { - mContent = var.mContent; - } - VARP(VARP&& var) { - mContent = std::move(var.mContent); - } - VARP operator+(VARP var) const; - VARP operator-(VARP var) const; - VARP operator*(VARP var) const; - VARP operator/(VARP var) const; - VARP mean(INTS dims) const; - VARP sum(INTS dims) const; - - bool operator==(const VARP& var) const { - return var.mContent == mContent; - } - bool operator<(const VARP& var) const { - return mContent < var.mContent; - } - bool operator<=(const VARP& var) const { - return mContent <= var.mContent; - } - VARP& operator=(const VARP& var) { - mContent = var.mContent; - return *this; - } - VARP& operator=(Variable* var) { - mContent.reset(var); - return *this; - } - Variable* operator->() const { - return mContent.get(); - } - enum InputType { - INPUT = 0, - CONSTANT = 1, - TRAINABLE = 2, - }; - bool fix(InputType type) const; -private: - friend class Variable; - std::shared_ptr mContent; -}; -inline bool operator==(Variable* src, VARP dst) { - return src == dst.get(); -} -inline bool operator!=(Variable* src, VARP dst) { - return src != dst.get(); -} -// inline bool operator<(VARP src, VARP dst) { -// return src.get() < dst.get(); -// } -typedef std::vector VARPS; - -class MNN_PUBLIC Variable { -public: - struct Info { - Dimensionformat order = NHWC; - INTS dim; - halide_type_t type; - int size; - void syncSize(); - }; - const std::string& name() const; - void setName(const std::string& name); - std::pair expr() const { - return std::make_pair(mFrom, mFromIndex); - } - // If compute info error, return nullptr - const Info* getInfo(); - bool resize(INTS dims); - template - const T* readMap() { - return (const T*)readInternal(); - } - - template - T* writeMap() { - return (T*)writeInternal(); - } - - //Depecerate - void unMap(); - - bool input(VARP src); - static void replace(VARP dst, VARP src); - - static VARP create(EXPRP expr, int index = 0); - - static std::vector load(const char* fileName); - static std::map loadMap(const char* fileName); - static std::vector load(const uint8_t* buffer, size_t length); - static std::map loadMap(const uint8_t* buffer, size_t length); - static std::pair, std::map> getInputAndOutput(const std::map& allVariable); - static std::vector mapToSequence(const std::map& source); - static std::vector getExecuteOrder(const std::vector& output); - static void save(const std::vector& vars, const char* fileName); - static void save(const std::vector& vars, NetT* dest); - - // Pack a few Variable to compute in one pipeline - static void prepareCompute(const std::vector& vars, bool forceCPU = false); - static void compute(const std::vector& vars, bool forceCPU = false); - - size_t linkNumber() const; - const std::vector& toExprs() const; - void setExpr(EXPRP expr, int index) { - mFrom = expr; - mFromIndex = index; - } -private: - Variable(EXPRP expr, int index) { - mFrom = expr; - mFromIndex = index; - } - - void* readInternal(bool forShape = false); - void* writeInternal(bool inform=true); - void informDirty(); - - friend class Expr; - EXPRP mFrom; - int mFromIndex; -}; -struct BufferStorage; -class MNN_PUBLIC Expr { -public: - struct Inside; - enum MemoryType { - COPY, - MOVE, - REF - }; - static EXPRP create(Tensor* tensor, bool own = false); - - static EXPRP create(Variable::Info&& info, const void* ptr, VARP::InputType type, MemoryType copy = COPY); - static EXPRP create(const OpT* op, std::vector inputs, int outputSize = 1); - static EXPRP create(std::shared_ptr extra, std::vector&& inputs, int outputSize = 1); - static EXPRP create(std::unique_ptr&& op, std::vector inputs, int outputSize = 1) { - return create(op.get(), inputs, outputSize); - } - void setName(const std::string& name); - - const Op* get() const { - return mOp; - } - const std::vector& inputs() const { - return mInputs; - } - int outputSize() const { - return (int)mOutputNames.size(); - } - static void replace(EXPRP oldExpr, EXPRP newExpr); - bool requireInfo(); - void visitOutputs(const std::function& visit); - static void visit(EXPRP expr, const std::function& before, const std::function& after); - - const std::vector& outputs() const { - return mTo; - } - ~Expr(); - - bool visited() const { - return mVisited; - } - void setVisited(bool visited) { - mVisited = visited; - } - const std::string& name() const { - return mName; - } - const std::string& outputName(int index) { - return mOutputNames[index]; - } - - VARP::InputType inputType() const {return mType;} - Variable::Info* outputInfo(int index) const; - std::shared_ptr extra() const { - return mStorage; - } - bool setInfoDirty(); - std::shared_ptr inside() const { - return mInside; - } - bool valid() const { - return mValid; - } - -private: - static void _addLinkForInputs(EXPRP expr); - - Expr(int outputSize); - Expr(Tensor* tensor, bool own = false); - - friend class Variable; - friend class VARP; - VARP::InputType mType; - const Op* mOp; - std::vector mInputs; - std::vector mOutputNames; - - bool mValid = true; - std::shared_ptr mStorage; - std::string mName; - std::shared_ptr mInside = nullptr; - bool mVisited = false; - std::vector mTo; - -}; -} // namespace Express -} // namespace MNN - -#endif /* Expr_hpp */ diff --git a/MNN/expr/ExprCreator.hpp b/MNN/expr/ExprCreator.hpp deleted file mode 100644 index 0a896011..00000000 --- a/MNN/expr/ExprCreator.hpp +++ /dev/null @@ -1,16 +0,0 @@ -// -// ExprCreator.hpp -// MNN -// -// Created by MNN on 2019/06/27. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#ifndef ExprCreator_hpp -#define ExprCreator_hpp - -#include -#include -#include - -#endif diff --git a/MNN/expr/MathOp.hpp b/MNN/expr/MathOp.hpp deleted file mode 100644 index 34d4a1fb..00000000 --- a/MNN/expr/MathOp.hpp +++ /dev/null @@ -1,129 +0,0 @@ -// -// MathOp.hpp -// MNN -// -// Created by MNN on 2019/06/27. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#ifndef MathOp_HPP -#define MathOp_HPP - -namespace MNN { -namespace Express { -//BinaryOPs -MNN_PUBLIC VARP _Add(VARP x, VARP y); -MNN_PUBLIC VARP _Subtract(VARP x, VARP y); -MNN_PUBLIC VARP _Multiply(VARP x, VARP y); -MNN_PUBLIC VARP _Divide(VARP x, VARP y); -MNN_PUBLIC VARP _Pow(VARP x, VARP y); -MNN_PUBLIC VARP _Minimum(VARP x, VARP y); -MNN_PUBLIC VARP _Maximum(VARP x, VARP y); -MNN_PUBLIC VARP _BiasAdd(VARP value, VARP bias); -MNN_PUBLIC VARP _Greater(VARP x, VARP y); -MNN_PUBLIC VARP _GreaterEqual(VARP x, VARP y); -MNN_PUBLIC VARP _Less(VARP x, VARP y); -MNN_PUBLIC VARP _FloorDiv(VARP x, VARP y); -MNN_PUBLIC VARP _SquaredDifference(VARP x, VARP y); -MNN_PUBLIC VARP _Equal(VARP x, VARP y); -MNN_PUBLIC VARP _LessEqual(VARP x, VARP y); -MNN_PUBLIC VARP _FloorMod(VARP x, VARP y); -MNN_PUBLIC VARP _Atan2(VARP x, VARP y); -MNN_PUBLIC VARP _LogicalOr(VARP x, VARP y); -MNN_PUBLIC VARP _NotEqual(VARP x, VARP y); - -//UnaryOPs -MNN_PUBLIC VARP _Sign(VARP a); -MNN_PUBLIC VARP _Abs(VARP x); -MNN_PUBLIC VARP _Negative(VARP x); -MNN_PUBLIC VARP _Floor(VARP x); -MNN_PUBLIC VARP _Round(VARP x); -MNN_PUBLIC VARP _Ceil(VARP x); -MNN_PUBLIC VARP _Square(VARP x); -MNN_PUBLIC VARP _Sqrt(VARP x); -MNN_PUBLIC VARP _Rsqrt(VARP x); -MNN_PUBLIC VARP _Exp(VARP x); -MNN_PUBLIC VARP _Log(VARP x); -MNN_PUBLIC VARP _Sin(VARP x); -MNN_PUBLIC VARP _Sinh(VARP x); -MNN_PUBLIC VARP _Cos(VARP x); -MNN_PUBLIC VARP _Cosh(VARP x); -MNN_PUBLIC VARP _Tan(VARP x); -MNN_PUBLIC VARP _Asin(VARP x); -MNN_PUBLIC VARP _Asinh(VARP x); -MNN_PUBLIC VARP _Acos(VARP x); -MNN_PUBLIC VARP _Acosh(VARP x); -MNN_PUBLIC VARP _Atan(VARP x); -MNN_PUBLIC VARP _Atanh(VARP x); -MNN_PUBLIC VARP _Reciprocal(VARP x); -MNN_PUBLIC VARP _Log1p(VARP x); -MNN_PUBLIC VARP _Gelu(VARP x); -//Only one but not in UnaryOPs -MNN_PUBLIC VARP _Tanh(VARP x); -MNN_PUBLIC VARP _Sigmoid(VARP x); -MNN_PUBLIC VARP _Erf(VARP x); -MNN_PUBLIC VARP _Erfc(VARP x); -MNN_PUBLIC VARP _Erfinv(VARP x); -MNN_PUBLIC VARP _Expm1(VARP x); - - -//ReduceOPs -MNN_PUBLIC VARP _ReduceSum(VARP input_variable, INTS axis = {}, bool keepDims = false); -MNN_PUBLIC VARP _ReduceMean(VARP input_variable, INTS axis = {}, bool keepDims = false); -MNN_PUBLIC VARP _ReduceMax(VARP input_variable, INTS axis = {}, bool keepDims = false); -MNN_PUBLIC VARP _ReduceMin(VARP input_variable, INTS axis = {}, bool keepDims = false); -MNN_PUBLIC VARP _ReduceProd(VARP input_variable, INTS axis = {}, bool keepDims = false); -MNN_PUBLIC VARP _ReduceAny(VARP input_variable, INTS axis = {}, bool keepDims = false); -MNN_PUBLIC VARP _ReduceAll(VARP input_variable, INTS axis = {}, bool keepDims = false); - -MNN_PUBLIC VARP _ReduceSumMutable(VARP input_variable, VARP axis, bool keepDims = false); -MNN_PUBLIC VARP _ReduceMeanMutable(VARP input_variable, VARP axis, bool keepDims = false); -MNN_PUBLIC VARP _ReduceMaxMutable(VARP input_variable, VARP axis, bool keepDims = false); -MNN_PUBLIC VARP _ReduceMinMutable(VARP input_variable, VARP axis, bool keepDims = false); -MNN_PUBLIC VARP _ReduceProdMutable(VARP input_variable, VARP axis, bool keepDims = false); -MNN_PUBLIC VARP _ReduceAnyMutable(VARP input_variable, VARP axis, bool keepDims = false); -MNN_PUBLIC VARP _ReduceAllMutable(VARP input_variable, VARP axis, bool keepDims = false); - -//EltwiseOPs -MNN_PUBLIC VARP _Prod(VARP a, VARP b, std::vector coeff); -MNN_PUBLIC VARP _Sum(VARP a, VARP b, std::vector coeff); -MNN_PUBLIC VARP _Max(VARP a, VARP b, std::vector coeff); -MNN_PUBLIC VARP _Sub(VARP a, VARP b, std::vector coeff); -MNN_PUBLIC VARP _EltwiseProdInt8(VARP x, VARP y, - std::vector x_weight, std::vector x_bias, std::vector x_scale, std::vector x_tensorScale, - std::vector y_weight, std::vector y_bias, std::vector y_scale, std::vector y_tensorScale, - std::vector output_weight, std::vector output_bias, std::vector output_scale, std::vector output_tensorScale); -MNN_PUBLIC VARP _EltwiseSumInt8(VARP x, VARP y, - std::vector x_weight, std::vector x_bias, std::vector x_scale, std::vector x_tensorScale, - std::vector y_weight, std::vector y_bias, std::vector y_scale, std::vector y_tensorScale, - std::vector output_weight, std::vector output_bias, std::vector output_scale, std::vector output_tensorScale); -MNN_PUBLIC VARP _EltwiseSubInt8(VARP x, VARP y, - std::vector x_weight, std::vector x_bias, std::vector x_scale, std::vector x_tensorScale, - std::vector y_weight, std::vector y_bias, std::vector y_scale, std::vector y_tensorScale, - std::vector output_weight, std::vector output_bias, std::vector output_scale, std::vector output_tensorScale); -MNN_PUBLIC VARP _EltwiseMaxInt8(VARP x, VARP y, - std::vector x_weight, std::vector x_bias, std::vector x_scale, std::vector x_tensorScale, - std::vector y_weight, std::vector y_bias, std::vector y_scale, std::vector y_tensorScale, - std::vector output_weight, std::vector output_bias, std::vector output_scale, std::vector output_tensorScale); - - -//OtherOPs -template -VARP _Cast(VARP x) { - return _Cast(x, halide_type_of()); -} -MNN_PUBLIC VARP _Cast(VARP x, halide_type_t dtype); -MNN_PUBLIC VARP _MatMul(VARP a, VARP b, bool tranposeA = false, bool tranposeB = false); -MNN_PUBLIC VARP _Normalize(VARP x, int32_t acrossSpatial, int32_t channelShared, float eps, std::vector scale); -MNN_PUBLIC VARP _ArgMax(VARP input, int axis = 0); -MNN_PUBLIC VARP _ArgMin(VARP input, int axis = 0); -MNN_PUBLIC VARP _BatchMatMul(VARP x, VARP y, bool adj_x = false, bool adj_y = false); -MNN_PUBLIC VARP _UnravelIndex(VARP indices, VARP dims); -MNN_PUBLIC VARP _ScatterNd(VARP indices, VARP updates, VARP shape); -MNN_PUBLIC VARP _OneHot(VARP indices, VARP depth, VARP onValue, VARP offValue, int axis = -1); -MNN_PUBLIC VARP _BroadcastTo(VARP a, VARP shape); -MNN_PUBLIC VARP _LinSpace(VARP start, VARP stop, VARP num); -}; // namespace Express -}; // namespace MNN - -#endif /* MathOp_HPP */ diff --git a/MNN/expr/Module.hpp b/MNN/expr/Module.hpp deleted file mode 100644 index 7188c8f6..00000000 --- a/MNN/expr/Module.hpp +++ /dev/null @@ -1,127 +0,0 @@ -// -// Module.hpp -// MNN -// -// Created by MNN on 2019/11/25. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#ifndef MNN_Train_Module_hpp -#define MNN_Train_Module_hpp - -#include -#include - -#include -#include -#include - -namespace MNN { -namespace Express { -struct SubGraph; -class MNN_PUBLIC Module { -public: - Module() = default; - virtual ~Module() = default; - virtual std::vector onForward(const std::vector& inputs) = 0; - Express::VARP forward(Express::VARP input); - std::vector parameters() const; - bool loadParameters(const std::vector& parameters); - void setIsTraining(const bool isTraining); - bool getIsTraining(); - void clearCache(); - - const std::string& name() const { - return mName; - }; - void setName(std::string name) { - mName = std::move(name); - } - const std::string type() const { - return mType; - } - void setType(std::string type) { - mType = std::move(type); - } - // Return the parameter index - int addParameter(Express::VARP parameter); - - void setParameter(Express::VARP parameter, int index); - static Module* createEmpty(const std::vector& parameters); - - struct BackendInfo { - MNNForwardType type = MNN_FORWARD_CPU; - BackendConfig* config = nullptr; - }; - - struct Config { - // Load module as dynamic, default static - bool dynamic = false; - - // for static mode, if the shape is mutable, set true, otherwise set false to avoid resizeSession freqencily - bool shapeMutable = true; - // Pre-rearrange weights or not. Disabled by default. - // The weights will be rearranged in a general way, so the best implementation - // may not be adopted if `rearrange` is enabled. - bool rearrange = false; - - BackendInfo* backend = nullptr; - }; - static Module* load(const std::vector& inputs, const std::vector& outputs, const uint8_t* buffer, size_t length, const Config* config = nullptr); - static Module* load(const std::vector& inputs, const std::vector& outputs, const char* fileName, const Config* config = nullptr); - // Shared RuntimeManager - static Module* load(const std::vector& inputs, const std::vector& outputs, const char* fileName, const std::shared_ptr rtMgr, const Config* config = nullptr); - static Module* load(const std::vector& inputs, const std::vector& outputs, const uint8_t* buffer, size_t length, const std::shared_ptr rtMgr, const Config* config = nullptr); - - static Module* extract(std::vector inputs, std::vector outputs, bool fortrain, const std::map& subGraph = {}); - - static Module* clone(const Module* module, const bool shareParams = false); - - class CloneContext { - public: - CloneContext() = default; - explicit CloneContext(const bool shareParams) - : mShareParams(shareParams) {} - virtual ~CloneContext() = default; - - const bool shareParams() const { return mShareParams; } - - EXPRP getOrClone(const EXPRP expr); - VARP getOrClone(const VARP var); - - private: - bool mShareParams = false; - std::unordered_map mExprMap; - std::unordered_map mVarMap; - }; - - virtual Module* clone(CloneContext* ctx) const { - return nullptr; - } - -protected: - void registerModel(const std::vector>& children); - virtual void onClearCache() { - } - - Module* cloneBaseTo(CloneContext* ctx, Module* module) const; - -private: - void _collectParameters(std::vector& result) const; - std::vector> mChildren; - std::vector mParameters; - bool mIsTraining = true; - std::string mName; - std::string mType; -}; - -struct SubGraph { - std::vector inputs; - std::vector outputs; - std::shared_ptr m; -}; - -} // namespace Train -} // namespace MNN - -#endif diff --git a/MNN/expr/NeuralNetWorkOp.hpp b/MNN/expr/NeuralNetWorkOp.hpp deleted file mode 100644 index 24120a1c..00000000 --- a/MNN/expr/NeuralNetWorkOp.hpp +++ /dev/null @@ -1,159 +0,0 @@ -// -// NeuralNetWorkOp.hpp -// MNN -// -// Created by MNN on 2019/06/27. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#ifndef NeuralNetWorkOp_HPP -#define NeuralNetWorkOp_HPP - -namespace MNN { -namespace Express { -enum PaddingMode {CAFFE, VALID, SAME}; -enum PoolingMode {MAXPOOL, AVEPOOL}; -enum PadValueMode {CONSTANT, REFLECT, SYMMETRIC}; -MNN_PUBLIC VARP _Input(INTS shape = {}, Dimensionformat data_format = NC4HW4, halide_type_t dtype = halide_type_of()) ; -MNN_PUBLIC VARP _Clone(VARP source, bool deepCopy = false); - -MNN_PUBLIC VARP _Scalar(const void* ptr, halide_type_t type); - -template -VARP _Scalar(T value) { - return _Scalar(&value, halide_type_of()); -} - - -MNN_PUBLIC VARP _Const(float value, INTS shape = {}, Dimensionformat format = NHWC); -MNN_PUBLIC VARP _Const(const void* ptr, INTS shape = {}, Dimensionformat format = NHWC, - halide_type_t type = halide_type_of()); -MNN_PUBLIC VARP _TrainableParam(float value, INTS dims, Dimensionformat format); -MNN_PUBLIC VARP _TrainableParam(const void* ptr, INTS dims, Dimensionformat format, - halide_type_t type = halide_type_of()); -MNN_PUBLIC VARP _InnerProduct(std::vector&& weight, std::vector&& bias, VARP x, INTS outputShape); -MNN_PUBLIC VARP _Conv(VARP weight, VARP bias, VARP x, PaddingMode pad = VALID, INTS stride = {1, 1}, - INTS dilate = {1, 1}, int group = 1, INTS pads = {0, 0}); - -MNN_PUBLIC VARP _Conv(float weight, float bias, VARP x, INTS channel, INTS kernelSize, PaddingMode pad = VALID, - INTS stride = {1, 1}, INTS dilate = {1, 1}, int group = 1); -MNN_PUBLIC VARP _Conv(std::vector&& weight, std::vector&& bias, VARP x, INTS channel, INTS kernelSize, - PaddingMode pad = VALID, INTS stride = {1, 1}, INTS dilate = {1, 1}, int group = 1, INTS pads = {0, 0}, bool relu = false, bool relu6 = false, int nbits = 8); -MNN_PUBLIC VARP _Conv(std::vector&& weight, std::vector&& bias, VARP x, INTS channel, INTS kernelSize, - PaddingMode pad = VALID, INTS stride = {1, 1}, INTS dilate = {1, 1}, int group = 1, INTS pads = {0, 0}, bool relu = false, bool relu6 = false); -MNN_PUBLIC VARP _Deconv(VARP weight, VARP bias, VARP x, PaddingMode pad = VALID, INTS stride = {1, 1}, - INTS dilate = {1, 1}, int group = 1, INTS pads = {0, 0}); - -MNN_PUBLIC VARP _Deconv(std::vector&& weight, std::vector&& bias, VARP x, INTS channel, INTS kernelSize, -PaddingMode pad, INTS stride = {1, 1}, INTS dilate = {1, 1}, int group = 1, INTS pads = {0, 0}, bool relu = false, bool relu6 = false); - -MNN_PUBLIC VARP _MaxPool(VARP x, INTS kernel, INTS stride = {1, 1}, PaddingMode pad = VALID, INTS pads= {0, 0}); -MNN_PUBLIC VARP _AvePool(VARP x, INTS kernel, INTS stride = {1, 1}, PaddingMode pad = VALID, INTS pads= {0, 0}); -MNN_PUBLIC VARP _Reshape(VARP x, INTS shape, Dimensionformat original_format = NCHW); -MNN_PUBLIC VARP _Reshape(VARP x, VARP shape); -MNN_PUBLIC VARP _Scale(VARP x, int channels, std::vector&& scales, std::vector&& bias); - -MNN_PUBLIC VARP _Relu(VARP x, float slope = 0.0f); -MNN_PUBLIC VARP _Relu6(VARP x, float minValue = 0.0f, float maxValue = 6.0f); -MNN_PUBLIC VARP _PRelu(VARP x, std::vector &&slopes); -MNN_PUBLIC VARP _Softmax(VARP logits, int axis = -1); -MNN_PUBLIC VARP _Softplus(VARP features); -MNN_PUBLIC VARP _Softsign(VARP features); -MNN_PUBLIC std::vector _Split(VARP value, INTS size_splits, int axis = 0); -MNN_PUBLIC VARP _Slice(VARP x, VARP starts, VARP sizes); -MNN_PUBLIC VARP _StridedSlice(VARP input, VARP begin, VARP end, VARP strided, - int32_t beginMask, int32_t endMask, int32_t ellipsisMask, - int32_t newAxisMask, int32_t shrinkAxisMask); -MNN_PUBLIC VARP _Concat(VARPS values, int axis); -MNN_PUBLIC VARP _Convert(VARP input, Dimensionformat format); -MNN_PUBLIC VARP _Transpose(VARP x, INTS perm); -MNN_PUBLIC VARP _Transpose(VARP x, VARP perm); -MNN_PUBLIC VARP _ChannelShuffle(VARP x, int group); -MNN_PUBLIC VARP _ChangeInputFormat(VARP input, Dimensionformat format); -MNN_PUBLIC VARP _Conv2DBackPropFilter(VARP input, VARP inputGrad, INTS kernelSize, PaddingMode pad = VALID, INTS stride = {1, 1}, INTS dilate = {1, 1}, int group = 1, INTS pads = {0, 0}); -MNN_PUBLIC VARP _PoolGrad(VARP originInput, VARP originOutput, VARP inputGrad, INTS kernel, INTS stride, PoolingMode type, PaddingMode pad = VALID, INTS pads= {0, 0}); -// FIXME: move the api to Array Ops -MNN_PUBLIC VARP _ReverseSequence(VARP x, VARP y, int batchDim, int seqDim); -// FIXME: move the api to Image Ops -MNN_PUBLIC VARP _Crop(VARP images, VARP size, int axis, INTS offset); -MNN_PUBLIC VARP _Resize(VARP images, float xScale, float yScale); -MNN_PUBLIC VARP _Pad(VARP x, VARP paddings, PadValueMode mode = CONSTANT); -MNN_PUBLIC VARP _ExpandDims(VARP input, int axis); -MNN_PUBLIC VARP _ExpandDims(VARP input, VARP axis); - -MNN_PUBLIC VARP _Shape(VARP input, bool nchw = false); -MNN_PUBLIC VARP _Stack(VARPS values, int axis=0); -enum InterpolationMethod {BILINEAR, NEAREST}; -MNN_PUBLIC VARP _CropAndResize(VARP image, VARP boxes, VARP box_ind, VARP crop_size, - InterpolationMethod method, float extrapolation_value = 0.0); -MNN_PUBLIC VARP _Fill(VARP dims, VARP value); -MNN_PUBLIC VARP _Tile(VARP input, VARP multiples); -MNN_PUBLIC VARP _Gather(VARP params, VARP indices); -MNN_PUBLIC VARP _GatherV2(VARP params, VARP indices, VARP axis = nullptr); -MNN_PUBLIC VARP _Squeeze(VARP input, INTS axis = {}); -MNN_PUBLIC VARP _Unsqueeze(VARP input, INTS axis = {}); -MNN_PUBLIC VARP _BatchToSpaceND(VARP input, VARP block_shape, VARP crops); -MNN_PUBLIC VARP _GatherND(VARP params, VARP indices); -MNN_PUBLIC VARP _Selu(VARP features, float scale, float alpha); -MNN_PUBLIC VARP _Size(VARP input); -MNN_PUBLIC VARP _Elu(VARP features, float alpha=1.0); -MNN_PUBLIC VARP _Threshold(VARP features, float alpha=1.0); -MNN_PUBLIC VARP _MatrixBandPart(VARP input, VARP num_lower, VARP num_upper); -MNN_PUBLIC std::vector _Moments(VARP x, INTS axis, VARP shift, bool keepDims); -MNN_PUBLIC VARP _SetDiff1D(VARP x, VARP y); -MNN_PUBLIC VARP _SpaceToDepth(VARP input, int block_size); -MNN_PUBLIC VARP _SpaceToBatchND(VARP input, VARP block_shape, VARP paddings); -MNN_PUBLIC VARP _ZerosLike(VARP input); -MNN_PUBLIC std::vector _Unstack(VARP value, int axis=0); -MNN_PUBLIC VARP _Rank(VARP input); -MNN_PUBLIC VARP _Range(VARP start, VARP limit, VARP delta); -MNN_PUBLIC VARP _DepthToSpace(VARP input, int block_size); -MNN_PUBLIC VARP _PriorBox(VARP feature, VARP image, - std::vector min_size, std::vector max_size, std::vectoraspect_ratio, - bool flip, bool clip, std::vectorvariance, - unsigned int img_h, unsigned int img_w, float step_h, float step_w, float offset = 0.5); -MNN_PUBLIC VARP _Permute(VARP input, INTS dims); -MNN_PUBLIC VARP _DetectionOutput(VARP location, VARP confidence, VARP priorbox, - unsigned int num_classes, bool share_location, int background_label_id, - float nms_threshhold, int nms_topk, int code_type, - bool variance_encoded_in_target, - int keep_top_k, float confidence_threshold, float visualize_threshold); -MNN_PUBLIC std::vector _DetectionPostProcess(VARP encode_boxes, VARP class_predictions, VARP anchors, - int num_classes, int max_detections, - int max_class_per_detection, int detections_per_class, - float nms_threshold, float iou_threshold, - bool use_regular_nms, std::vector centersize_encoding); -MNN_PUBLIC VARP _Interp(VARPS xs, float widthScale, float heightScale, int outputWidth, int outputHeight, int resizeType, bool alignCorners); - -MNN_PUBLIC VARP _ZeroGrad(VARP x); - -// Int8 Inference -MNN_PUBLIC VARP _Conv(std::vector&& weight, std::vector&& bias, std::vector&& scale, VARP x, INTS channel, INTS kernelSize, - PaddingMode pad, INTS stride, INTS dilate, int group, INTS pads, bool relu, int nbits = 8); -MNN_PUBLIC VARP _Conv(std::vector&& weight, std::vector&& bias, std::vector&& scale, - VARP x, INTS channel, INTS kernelSize, - PaddingMode pad, INTS stride, INTS dilate, int group, INTS pads, bool relu, - int8_t inputZeroPoint, int8_t outputZeroPoint, - int8_t minValue, int8_t maxValue, bool accumulateToInt16); -MNN_PUBLIC VARP _Conv(std::vector&& weight, std::vector&& bias, std::vector&& weightScale, - VARP x, INTS channel, INTS kernelSize, - PaddingMode pad, INTS stride, INTS dilate, int group, INTS pads, bool relu, - float scaleIn, float scaleOut, - int8_t inputZeroPoint, int8_t outputZeroPoint, - int8_t minValue, int8_t maxValue, float weightClampValue, bool accumulateToInt16); -MNN_PUBLIC VARP _CosineSimilarity(VARP input0, VARP input1, VARP inputDim); - -enum GridSamplePaddingMode {GRID_SAMPLE_PADDING_ZEROS, GRID_SAMPLE_PADDING_BORDER, GRID_SAMPLE_PADDING_REFLECTION}; -MNN_PUBLIC VARP _GridSample(VARP input, VARP grid, InterpolationMethod mode=BILINEAR, GridSamplePaddingMode paddingMode=GRID_SAMPLE_PADDING_ZEROS, bool alignCorners=false); -MNN_PUBLIC VARP _FloatToInt8(VARP x, VARP scale, char minValue, char maxValue); -MNN_PUBLIC VARP _FloatToInt8(VARP x, VARP scale, int8_t minValue, int8_t maxValue, int8_t zeroPoint); -MNN_PUBLIC VARP _Int8ToFloat(VARP x, VARP scale); -MNN_PUBLIC VARP _Int8ToFloat(VARP x, VARP scale, int8_t zeroPoint); - -MNN_PUBLIC VARP _Select(VARP select, VARP input0, VARP input1); -MNN_PUBLIC std::vector _TopKV2(VARP input0, VARP input1); - -} // namespace Express -} // namespace MNN - -#endif /* NeuralNetWorkOp_HPP */ diff --git a/MNN/expr/Optimizer.hpp b/MNN/expr/Optimizer.hpp deleted file mode 100644 index b1304c8d..00000000 --- a/MNN/expr/Optimizer.hpp +++ /dev/null @@ -1,64 +0,0 @@ -// -// Optimizer.hpp -// MNN -// -// Created by MNN on 2019/08/20. -// Copyright © 2018, Alibaba Group Holding Limited -// -#ifndef Optimizer_hpp -#define Optimizer_hpp -#include -#include - -namespace MNN { -namespace Express { -class MNN_PUBLIC Optimizer { -public: - enum Device { - CPU = 0, - GPU = 1, - OTHER = 2, - AUTO = 3 - }; - struct Config { - Device device = CPU; - MNNForwardType forwardType = MNN_FORWARD_ALL; - int numThread = 4; - }; - static std::shared_ptr create(Config config); - struct Cost { - float compute; // MFlops - float memory; // MB - }; - class Parameters { - public: - Parameters(int n); - virtual ~Parameters(); - - float* get() const { - return mValue; - } - int size() const { - return mSize; - } - - private: - float* mValue; - int mSize; - }; - virtual std::shared_ptr onGetParameters(const std::vector& outputs) { - return nullptr; - } - - //Given paramters and measure cost, the parameters must be the same as onGetParameters - virtual Cost onMeasure(const std::vector& outputs, std::shared_ptr parameters = nullptr) = 0; - - //Modify the output directly, the parameters must be the same as onGetParameters - virtual bool onExecute(const std::vector& outputs, std::shared_ptr parameters = nullptr) = 0; - - Optimizer() = default; - virtual ~Optimizer() = default; -}; -} // namespace Express -} // namespace MNN -#endif diff --git a/MNN/expr/Scope.hpp b/MNN/expr/Scope.hpp deleted file mode 100644 index 10366fe7..00000000 --- a/MNN/expr/Scope.hpp +++ /dev/null @@ -1,112 +0,0 @@ -// -// RuntimeScope.hpp -// MNN -// -// Created by MNN on 2020/10/26. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#ifndef MNN_EXPR_SCOPE_HPP_ -#define MNN_EXPR_SCOPE_HPP_ - -#include -#include -#include -#include - -#include - -namespace MNN { -namespace Express { - -template -class Scope { -public: - Scope(); - virtual ~Scope() = default; - - struct ScopedContent { - std::string scope_name; - T content; - }; - void EnterScope(const ScopedContent& current); - void EnterScope(const T& current); - void EnterScope(const std::string& scope_name, const T& current); - - void ExitScope(); - - const ScopedContent& Current() const; - const T Content() const; - - int ScopedLevel() const { return scoped_level_; } - -private: - std::string MakeScopeName(const std::string& prefix, int level) const; - - mutable std::mutex mutex_; - int scoped_level_ = 0; - std::vector scoped_contents_; -}; - -template -Scope::Scope() : scoped_level_(0) { -} - -template -void Scope::EnterScope(const ScopedContent& current) { - std::lock_guard lock(mutex_); - ++scoped_level_; - scoped_contents_.push_back(current); -} - -template -void Scope::EnterScope(const T& current) { - EnterScope("scope", current); -} - -template -void Scope::EnterScope(const std::string& scope_name, - const T& current) { - std::lock_guard lock(mutex_); - int scoped_level = ScopedLevel(); - std::string name = MakeScopeName(scope_name, scoped_level++); - ScopedContent content{name, current}; - ++scoped_level_; - scoped_contents_.push_back(content); -} - -template -void Scope::ExitScope() { - std::lock_guard lock(mutex_); - --scoped_level_; - scoped_contents_.resize(scoped_level_); -} - -template -const typename Scope::ScopedContent& Scope::Current() const { - std::lock_guard lock(mutex_); - MNN_CHECK(scoped_contents_.size() > 0, "Scope level should not be 0."); - return scoped_contents_.back(); -} - -template -const T Scope::Content() const { - std::lock_guard lock(mutex_); - if (scoped_contents_.empty()) { - return nullptr; - } - return scoped_contents_.back().content; -} - -template -std::string Scope::MakeScopeName(const std::string& prefix, - int level) const { - char s[16]; - snprintf(s, 16, "%d", level); - return prefix + "/" + std::string(s); -} - -} // namespace Express -} // namespace MNN - -#endif // MNN_EXPR_SCOPE_HPP_ diff --git a/MNN/plugin/PluginContext.hpp b/MNN/plugin/PluginContext.hpp deleted file mode 100644 index 665a3eb0..00000000 --- a/MNN/plugin/PluginContext.hpp +++ /dev/null @@ -1,139 +0,0 @@ -// -// ShapeInference.h -// MNN -// -// Created by MNN on 2020/04/05. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#ifndef MNN_PLUGIN_PLUGIN_CONTEXT_HPP_ -#define MNN_PLUGIN_PLUGIN_CONTEXT_HPP_ - -#include -#include - -#include // Backend -#include -#include "Tensor_generated.h" - -namespace MNN { -namespace plugin { - -class MNN_PUBLIC PluginContext { -public: - PluginContext() = delete; - PluginContext(const std::vector& inputs, // NOLINT - const std::vector& outputs); - - virtual ~PluginContext() = default; - - const std::vector& inputs() const { - return inputs_; - } - const std::vector& outputs() const { - return outputs_; - } - - const Tensor* input(const int index) const; - const Tensor* output(const int index) const; - - Tensor* output(const int index); - - bool hasAttr(const std::string& name) const; - - bool setAttr(const std::string& name, const Attribute* attr); - - void setAttrs(const std::unordered_map& attrs); - - const Attribute* getAttr(const std::string& name) const; - - const std::unordered_map& getAttrs() const; - -protected: - const std::vector& inputs_; - const std::vector& outputs_; - std::unordered_map attrs_; -}; - -class MNN_PUBLIC InferShapeContext : public PluginContext { -public: - InferShapeContext() = delete; - InferShapeContext(const std::vector& inputs, // NOLINT - const std::vector& outputs); - - virtual ~InferShapeContext() = default; -}; - -class MNN_PUBLIC CPUKernelContext : public PluginContext { -public: - CPUKernelContext() = delete; - CPUKernelContext(const std::string& op_type, // NOLINT - Backend* backend, // NOLINT - const std::vector& inputs, // NOLINT - const std::vector& outputs); - - virtual ~CPUKernelContext() = default; - - Backend* backend() const { - return backend_; - } - - const std::string& op_type() const { - return op_type_; - } - -private: - const std::string op_type_ = ""; - Backend* backend_ = nullptr; -}; - -inline PluginContext::PluginContext(const std::vector& inputs, // NOLINT - const std::vector& outputs) // NOLINT - : inputs_(inputs), outputs_(outputs) { -} - -inline const Tensor* PluginContext::input(const int index) const { - MNN_ASSERT(index < inputs_.size()); - return inputs_.at(index); -} - -inline const Tensor* PluginContext::output(const int index) const { - MNN_ASSERT(index < outputs_.size()); - return outputs_.at(index); -} - -inline Tensor* PluginContext::output(const int index) { - MNN_ASSERT(index < outputs_.size()); - return outputs_.at(index); -} - -inline bool PluginContext::hasAttr(const std::string& name) const { - return attrs_.count(name) > 0; -} - -inline bool PluginContext::setAttr(const std::string& name, // NOLINT - const Attribute* attr) { - return attrs_.emplace(name, attr).second; -} - -inline void PluginContext::setAttrs( // NOLINT - const std::unordered_map& attrs) { - attrs_ = attrs; -} - -inline const Attribute* PluginContext::getAttr(const std::string& name) const { - const auto& it = attrs_.find(name); - MNN_ASSERT(it != attrs_.end()); - return it->second; -} - -inline const std::unordered_map& // NOLINT -PluginContext::getAttrs() const { - return attrs_; -} - -} // namespace plugin -} // namespace MNN - -#endif // MNN_PLUGIN_PLUGIN_CONTEXT_HPP_ diff --git a/MNN/plugin/PluginKernel.hpp b/MNN/plugin/PluginKernel.hpp deleted file mode 100644 index f93d6823..00000000 --- a/MNN/plugin/PluginKernel.hpp +++ /dev/null @@ -1,69 +0,0 @@ -// -// ShapeInference.h -// MNN -// -// Created by MNN on 2020/04/05. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#ifndef MNN_PLUGIN_PLUGIN_KERNEL_HPP_ -#define MNN_PLUGIN_PLUGIN_KERNEL_HPP_ - -#include -#include -#include - -#include - -namespace MNN { -namespace plugin { - -template -class MNN_PUBLIC ComputeKernel { -public: - ComputeKernel() = default; - virtual ~ComputeKernel() = default; - virtual bool compute(KernelContextT* ctx) = 0; -}; - -class MNN_PUBLIC CPUComputeKernel : public ComputeKernel { -public: - using ContextT = CPUKernelContext; - using KernelT = CPUComputeKernel; - - CPUComputeKernel() = default; - virtual ~CPUComputeKernel() = default; - virtual bool init(CPUKernelContext* ctx) = 0; - virtual bool compute(CPUKernelContext* ctx) = 0; -}; - -template -class MNN_PUBLIC ComputeKernelRegistry { -public: - typedef std::function Factory; - static std::unordered_map* getFactoryMap(); - - static bool add(const std::string& name, Factory factory); - - static PluginKernelT* get(const std::string& name); -}; - -template -struct ComputeKernelRegistrar { - ComputeKernelRegistrar(const std::string& name) { - ComputeKernelRegistry::add(name, []() { // NOLINT - return new PluginKernelT; // NOLINT - }); - } -}; - -#define REGISTER_PLUGIN_COMPUTE_KERNEL(name, computeKernel) \ - namespace { \ - static auto _plugin_compute_kernel_##name##_ __attribute__((unused)) = \ - ComputeKernelRegistrar(#name); \ - } // namespace - -} // namespace plugin -} // namespace MNN - -#endif // MNN_PLUGIN_PLUGIN_KERNEL_HPP_ diff --git a/MNN/plugin/PluginShapeInference.hpp b/MNN/plugin/PluginShapeInference.hpp deleted file mode 100644 index fe272ed5..00000000 --- a/MNN/plugin/PluginShapeInference.hpp +++ /dev/null @@ -1,56 +0,0 @@ -// -// ShapeInference.h -// MNN -// -// Created by MNN on 2020/04/05. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#ifndef MNN_PLUGIN_PLUGIN_SHAPE_INFERENCE_HPP_ -#define MNN_PLUGIN_PLUGIN_SHAPE_INFERENCE_HPP_ - -#include -#include -#include - -#include - -namespace MNN { -namespace plugin { - -class MNN_PUBLIC InferShapeKernel { -public: - virtual ~InferShapeKernel() = default; - virtual bool compute(InferShapeContext* ctx) = 0; -}; - -class MNN_PUBLIC InferShapeKernelRegister { -public: - // typedef InferShapeKernel* (*Factory)(); - typedef std::function Factory; - static std::unordered_map* getFactoryMap(); - - static bool add(const std::string& name, Factory factory); - - static InferShapeKernel* get(const std::string& name); -}; - -template -struct InferShapeKernelRegistrar { - InferShapeKernelRegistrar(const std::string& name) { - InferShapeKernelRegister::add(name, []() { // NOLINT - return new PluginKernel; // NOLINT - }); - } -}; - -#define REGISTER_PLUGIN_OP(name, inferShapeKernel) \ - namespace { \ - static auto _plugin_infer_shape_##name##_ __attribute__((unused)) = \ - InferShapeKernelRegistrar(#name); \ - } // namespace - -} // namespace plugin -} // namespace MNN - -#endif // MNN_PLUGIN_PLUGIN_SHAPE_INFERENCE_HPP_ diff --git a/README.md b/README.md index ab93a832..8eee4a88 100644 --- a/README.md +++ b/README.md @@ -8,10 +8,10 @@ ![logo-v3](https://github.com/DefTruth/lite.ai.toolkit/assets/31974251/f99f5300-ece6-4572-8c4b-56b90e6e4d74)
- + - - + +
@@ -36,20 +36,8 @@ -
- - - - - - - - -

English | 中文文档 | MacOS | Linux | Windows

- - ## Features 👏👋
@@ -57,12 +45,6 @@ * **Minimum Dependencies.** Only **OpenCV** and **ONNXRuntime** are required by default, see [build](#lite.ai.toolkit-Build-Lite.AI.ToolKit). * **Lots of Algorithm Modules.** Contains almost **[300+](https://github.com/DefTruth/lite.ai.toolkit/tree/main/docs/hub/lite.ai.toolkit.hub.onnx.md)** C++ re-implementations and **[500+](https://github.com/DefTruth/lite.ai.toolkit/tree/main/docs/hub/lite.ai.toolkit.hub.onnx.md)** weights. -## Others 🌟🌟 - -|🛠[lite.ai.toolkit](https://github.com/DefTruth/lite.ai.toolkit) | 💎[torchlm](https://github.com/DefTruth/torchlm) | 📒[statistic-learning-R-note](https://github.com/DefTruth/statistic-learning-R-note) | 🎉[cuda-learn-note](https://github.com/DefTruth/cuda-learn-note) | 📖[Awesome-LLM-Inference](https://github.com/DefTruth/Awesome-LLM-Inference) | -|:---:|:---:|:---:|:---:|:---:| -|![](https://img.shields.io/github/stars/DefTruth/lite.ai.toolkit.svg?style=social) ![](https://img.shields.io/github/downloads/DefTruth/lite.ai.toolkit/total?color=ccf&label=downloads&logo=github&logoColor=lightgrey)| ![](https://img.shields.io/github/stars/DefTruth/torchlm.svg?style=social) ![](https://static.pepy.tech/personalized-badge/torchlm?period=total&units=international_system&left_color=grey&right_color=blue&left_text=downloads)| ![](https://img.shields.io/github/stars/DefTruth/statistic-learning-R-note.svg?style=social) ![](https://img.shields.io/github/downloads/DefTruth/statistic-learning-R-note/total?color=ccf&label=downloads&logo=github&logoColor=lightgrey) |![](https://img.shields.io/github/stars/DefTruth/cuda-learn-note.svg?style=social) ![](https://img.shields.io/github/issues/DefTruth/cuda-learn-note?color=9cc)| ![](https://img.shields.io/github/stars/DefTruth/Awesome-LLM-Inference.svg?style=social) ![](https://img.shields.io/github/downloads/DefTruth/Awesome-LLM-Inference/total?color=ccf&label=downloads&logo=github&logoColor=lightgrey)| - ## Citations 🎉🎉 ```BibTeX @misc{lite.ai.toolkit@2021, @@ -74,55 +56,26 @@ } ``` -## Downloads & RoadMap ✅ - -
- -![](https://github.com/DefTruth/lite.ai.toolkit/assets/31974251/f521540d-41d5-4d1c-8d5b-219ca96b5d2d) - -Some prebuilt lite.ai.toolkit libs for MacOS(x64) and Linux(x64) are available, you can download the libs from the release links. Further, prebuilt libs for Windows(x64) and Android will be coming soon ~ Please, see [issues#48](https://github.com/DefTruth/lite.ai.toolkit/issues/48) for more details of the prebuilt plan and refer to [releases](https://github.com/DefTruth/lite.ai.toolkit/releases) for more available prebuilt libs. - -* [x] [lite0.1.1-osx10.15.x-ocv4.5.2-ffmpeg4.2.2-onnxruntime1.8.1.zip](https://github.com/DefTruth/lite.ai.toolkit/releases/download/v0.1.1/lite0.1.1-osx10.15.x-ocv4.5.2-ffmpeg4.2.2-onnxruntime1.8.1.zip) -* [x] [lite0.1.1-osx10.15.x-ocv4.5.2-ffmpeg4.2.2-onnxruntime1.9.0.zip](https://github.com/DefTruth/lite.ai.toolkit/releases/download/v0.1.1/lite0.1.1-osx10.15.x-ocv4.5.2-ffmpeg4.2.2-onnxruntime1.9.0.zip) -* [x] [lite0.1.1-osx10.15.x-ocv4.5.2-ffmpeg4.2.2-onnxruntime1.10.0.zip](https://github.com/DefTruth/lite.ai.toolkit/releases/download/v0.1.1/lite0.1.1-osx10.15.x-ocv4.5.2-ffmpeg4.2.2-onnxruntime1.10.0.zip) -* [x] [lite0.1.1-ubuntu18.04-ocv4.5.2-ffmpeg4.2.2-onnxruntime1.8.1.zip](https://github.com/DefTruth/lite.ai.toolkit/releases/download/v0.1.1/lite0.1.1-ubuntu18.04-ocv4.5.2-ffmpeg4.2.2-onnxruntime1.8.1.zip) -* [x] [lite0.1.1-ubuntu18.04-ocv4.5.2-ffmpeg4.2.2-onnxruntime1.9.0.zip](https://github.com/DefTruth/lite.ai.toolkit/releases/download/v0.1.1/lite0.1.1-ubuntu18.04-ocv4.5.2-ffmpeg4.2.2-onnxruntime1.9.0.zip) -* [x] [lite0.1.1-ubuntu18.04-ocv4.5.2-ffmpeg4.2.2-onnxruntime1.10.0.zip](https://github.com/DefTruth/lite.ai.toolkit/releases/download/v0.1.1/lite0.1.1-ubuntu18.04-ocv4.5.2-ffmpeg4.2.2-onnxruntime1.10.0.zip) - -In Linux, in order to link the prebuilt libs, you need to export `lite.ai.toolkit/lib` to LD_LIBRARY_PATH first. -```shell -export LD_LIBRARY_PATH=YOUR-PATH-TO/lite.ai.toolkit/lib:$LD_LIBRARY_PATH -export LIBRARY_PATH=YOUR-PATH-TO/lite.ai.toolkit/lib:$LIBRARY_PATH # (may need) -``` -## Quick Setup 👀 - -To quickly setup `lite.ai.toolkit`, you can follow the `CMakeLists.txt` listed as belows. 👇👀 - -```cmake -set(LITE_AI_DIR ${CMAKE_SOURCE_DIR}/lite.ai.toolkit) -include_directories(${LITE_AI_DIR}/include) -link_directories(${LITE_AI_DIR}/lib}) -set(TOOLKIT_LIBS lite.ai.toolkit onnxruntime) -set(OpenCV_LIBS opencv_core opencv_imgcodecs opencv_imgproc opencv_video opencv_videoio) - -add_executable(lite_yolov5 examples/test_lite_yolov5.cpp) -target_link_libraries(lite_yolov5 ${TOOLKIT_LIBS} ${OpenCV_LIBS}) -``` - ## Contents 📖💡 * [Core Features](#lite.ai.toolkit-Core-Features) * [Quick Start](#lite.ai.toolkit-Quick-Start) -* [RoadMap](#lite.ai.toolkit-RoadMap) -* [Important Updates](#lite.ai.toolkit-Important-Updates) * [Supported Models Matrix](#lite.ai.toolkit-Supported-Models-Matrix) -* [Build Docs](#lite.ai.toolkit-Build-Lite.AI.ToolKit) +* [Build from source](#lite.ai.toolkit-Build) * [Model Zoo](#lite.ai.toolkit-Model-Zoo) * [Examples](#lite.ai.toolkit-Examples-for-Lite.AI.ToolKit) * [License](#lite.ai.toolkit-License) * [References](#lite.ai.toolkit-References) * [Contribute](#lite.ai.toolkit-Contribute) -## 1. Quick Start 🌟🌟 +## Build from source +
+ +```shell +git clone --depth=1 https://github.com/DefTruth/lite.ai.toolkit.git # latest +cd lite.ai.toolkit && sh ./build.sh +``` + +## Quick Start 🌟🌟
#### Example0: Object Detection using [YOLOv5](https://github.com/ultralytics/yolov5). Download model from Model-Zoo[2](#lite.ai.toolkit-2). @@ -131,9 +84,9 @@ target_link_libraries(lite_yolov5 ${TOOLKIT_LIBS} ${OpenCV_LIBS}) static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/yolov5s.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/yolov5s.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolov5_1.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolov5_1.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolov5_1.jpg"; auto *yolov5 = new lite::cv::detection::YoloV5(onnx_path); std::vector detected_boxes; @@ -147,30 +100,20 @@ static void test_default() } ``` -## 2. Important Updates 🆕 -
- -
- Click here to see details of Important Updates! - -| Date | Model | C++ | Paper | Code | Awesome | Type | -|:------------:|:------------------------------------------------------------------------------------:|:---------------------------------------------------------------------------------------------------------:|:-----------------------------------------------------:|:-------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------:|:------------:| -| 【2022/04/03】 | [MODNet](https://github.com/ZHKKKe/MODNet) | [link](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_modnet.cpp) | [AAAI 2022](https://arxiv.org/pdf/2011.11961.pdf) | [code](https://github.com/ZHKKKe/MODNet) | ![](https://img.shields.io/github/stars/ZHKKKe/MODNet.svg?style=social) | matting | -| 【2022/03/23】 | [PIPNtet](https://github.com/jhb86253817/PIPNet) | [link](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_pipnet98.cpp) | [CVPR 2021](https://arxiv.org/abs/2003.03771) | [code](https://github.com/jhb86253817/PIPNet) | ![](https://img.shields.io/github/stars/jhb86253817/PIPNet.svg?style=social) | face::align | -| 【2022/01/19】 | [YOLO5Face](https://github.com/deepcam-cn/yolov5-face) | [link](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_yolo5face.cpp) | [arXiv 2021](https://arxiv.org/abs/2105.12931) | [code](https://github.com/deepcam-cn/yolov5-face) | ![](https://img.shields.io/github/stars/deepcam-cn/yolov5-face.svg?style=social) | face::detect | -| 【2022/01/07】 | [SCRFD](https://github.com/deepinsight/insightface/blob/master/detection/scrfd/) | [link](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_scrfd.cpp) | [CVPR 2021](https://arxiv.org/abs/2105.04714) | [code](https://github.com/deepinsight/insightface/blob/master/detection/scrfd/) | ![](https://img.shields.io/github/stars/deepinsight/insightface.svg?style=social) | face::detect | -| 【2021/12/27】 | [NanoDetPlus](https://github.com/RangiLyu/nanodet) | [link](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_nanodet_plus.cpp) | [blog](https://zhuanlan.zhihu.com/p/449912627) | [code](https://github.com/RangiLyu/nanodet) | ![](https://img.shields.io/github/stars/RangiLyu/nanodet.svg?style=social) | detection | -| 【2021/12/08】 | [MGMatting](https://github.com/yucornetto/MGMatting) | [link](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_mg_matting.cpp) | [CVPR 2021](https://arxiv.org/abs/2012.06722) | [code](https://github.com/yucornetto/MGMatting) | ![](https://img.shields.io/github/stars/yucornetto/MGMatting.svg?style=social) | matting | -| 【2021/11/11】 | [YoloV5_V_6_0](https://github.com/ultralytics/yolov5/releases/tag/v6.0) | [link](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_yolov5_v6.0.cpp) | [doi](https://zenodo.org/record/5563715#.YbXffH1Bzfs) | [code](https://github.com/ultralytics/yolov5/releases/tag/v6.0) | ![](https://img.shields.io/github/stars/ultralytics/yolov5.svg?style=social) | detection | -| 【2021/10/26】 | [YoloX_V_0_1_1](https://github.com/Megvii-BaseDetection/YOLOX/releases/tag/0.1.1rc0) | [link](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_yolox_v0.1.1.cpp) | [arXiv 2021](https://arxiv.org/abs/2107.08430) | [code](https://github.com/Megvii-BaseDetection/YOLOX) | ![](https://img.shields.io/github/stars/Megvii-BaseDetection/YOLOX.svg?style=social) | detection | -| 【2021/10/02】 | [NanoDet](https://github.com/RangiLyu/nanodet) | [link](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_nanodet.cpp) | [blog](https://zhuanlan.zhihu.com/p/306530300) | [code](https://github.com/RangiLyu/nanodet) | ![](https://img.shields.io/github/stars/RangiLyu/nanodet.svg?style=social) | detection | -| 【2021/09/20】 | [RobustVideoMatting](https://github.com/PeterL1n/RobustVideoMatting) | [link](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_rvm.cpp) | [WACV 2022](https://arxiv.org/abs/2108.11515) | [code](https://github.com/PeterL1n/RobustVideoMatting) | ![](https://img.shields.io/github/stars/PeterL1n/RobustVideoMatting.svg?style=social) | matting | -| 【2021/09/02】 | [YOLOP](https://github.com/hustvl/YOLOP) | [link](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_yolop.cpp) | [arXiv 2021](https://arxiv.org/abs/2108.11250) | [code](https://github.com/hustvl/YOLOP) | ![](https://img.shields.io/github/stars/hustvl/YOLOP.svg?style=social) | detection | +## Quick Setup 👀 -
+To quickly setup `lite.ai.toolkit`, you can follow the `CMakeLists.txt` listed as belows. 👇👀 +```cmake +set(LITE_AI_DIR YOUR-PATH-TO/lite.ai.toolkit) +find_package(lite.ai.toolkit REQUIRED PATHS ${LITE_AI_DIR}) +add_executable(lite_yolov5 examples/test_lite_yolov5.cpp) +target_link_libraries(lite_yolov5 ${lite.ai.toolkit_LIBS}) +``` +
+ 🔑️ Supported Models Matrix!Click here! -## 3. Supported Models Matrix +## Supported Models Matrix
* / = not supported now. @@ -282,187 +225,13 @@ static void test_default() | [FaceParsingBiSeNet](https://github.com/zllrunning/face-parsing.PyTorch) | 50M | *segmentation* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_face_parsing_bisenet.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | | [FaceParsingBiSeNetDyn](https://github.com/zllrunning/face-parsing.PyTorch) | 50M | *segmentation* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_face_parsing_bisenet_dyn.cpp) | ✅ | / | / | / | / | ✔️ | ✔️ | ❔ | - -## 4. Build Docs. -
-
- -* MacOS: Build the shared lib of **Lite.Ai.ToolKit** for **MacOS** from sources. Note that Lite.Ai.ToolKit uses **onnxruntime** as default backend, for the reason that onnxruntime supports the most of onnx's operators. - -```shell - git clone --depth=1 https://github.com/DefTruth/lite.ai.toolkit.git # latest - cd lite.ai.toolkit && sh ./build.sh # On MacOS, you can use the built OpenCV, ONNXRuntime, MNN, NCNN and TNN libs in this repo. -``` - -
-
- -
-💡 Linux and Windows. - -### Linux and Windows. - -⚠️ **Lite.Ai.ToolKit** is not directly support Linux and Windows now. For Linux and Windows, you need to build or download(if have official builts) the shared libs of **OpenCV**、**ONNXRuntime** and any other Engines(like MNN, NCNN, TNN) firstly, then put the headers into the specific directories or just let these directories unchange(use the headers offer by this repo, the header file of the dependent library of this project is directly copied from the corresponding official library). However, the dynamic libraries under different operating systems need to be recompiled or downloaded. MacOS users can directly use the dynamic libraries of each dependent library provided by this project: -* **lite.ai.toolkit/opencv2** - ```shell - cp -r you-path-to-downloaded-or-built-opencv/include/opencv4/opencv2 lite.ai.toolkit/opencv2 - ``` -* **lite.ai.toolkit/onnxruntime** - ```shell - cp -r you-path-to-downloaded-or-built-onnxruntime/include/onnxruntime lite.ai.toolkit/onnxruntime - ``` -* **lite.ai.toolkit/MNN** - ```shell - cp -r you-path-to-downloaded-or-built-MNN/include/MNN lite.ai.toolkit/MNN - ``` -* **lite.ai.toolkit/ncnn** - ```shell - cp -r you-path-to-downloaded-or-built-ncnn/include/ncnn lite.ai.toolkit/ncnn - ``` -* **lite.ai.toolkit/tnn** - ```shell - cp -r you-path-to-downloaded-or-built-TNN/include/tnn lite.ai.toolkit/tnn - ``` - -and put the libs into **lite.ai.toolkit/lib/(linux|windows)** directory. Please reference the build-docs[1](#lite.ai.toolkit-1) for **third_party**. -* **lite.ai.toolkit/lib/(linux|windows)** - ```shell - cp you-path-to-downloaded-or-built-opencv/lib/*opencv* lite.ai.toolkit/lib/(linux|windows)/ - cp you-path-to-downloaded-or-built-onnxruntime/lib/*onnxruntime* lite.ai.toolkit/lib/(linux|windows)/ - cp you-path-to-downloaded-or-built-MNN/lib/*MNN* lite.ai.toolkit/lib/(linux|windows)/ - cp you-path-to-downloaded-or-built-ncnn/lib/*ncnn* lite.ai.toolkit/lib/(linux|windows)/ - cp you-path-to-downloaded-or-built-TNN/lib/*TNN* lite.ai.toolkit/lib/(linux|windows)/ - ``` - -Note, your also need to install ffmpeg(<=4.2.2) in Linux to support the opencv videoio module. See [issue#203](https://github.com/DefTruth/lite.ai.toolkit/issues/6). In MacOS, ffmpeg4.2.2 was been package into lite.ai.toolkit, thus, no installation need in OSX. In Windows, ffmpeg was been package into opencv dll prebuilt by the team of opencv. Please make sure -DWITH_FFMPEG=ON and check the configuration info when building opencv. -* first, build ffmpeg(<=4.2.2) from source. -```shell -git clone --depth=1 https://git.ffmpeg.org/ffmpeg.git -b n4.2.2 -cd ffmpeg -./configure --enable-shared --disable-x86asm --prefix=/usr/local/opt/ffmpeg --disable-static -make -j8 -make install -``` -* then, build opencv with -DWITH_FFMPEG=ON, just like -```shell -#!/bin/bash - -mkdir build -cd build - -cmake .. \ - -D CMAKE_BUILD_TYPE=Release \ - -D CMAKE_INSTALL_PREFIX=your-path-to-custom-dir \ - -D BUILD_TESTS=OFF \ - -D BUILD_PERF_TESTS=OFF \ - -D BUILD_opencv_python3=OFF \ - -D BUILD_opencv_python2=OFF \ - -D BUILD_SHARED_LIBS=ON \ - -D BUILD_opencv_apps=OFF \ - -D WITH_FFMPEG=ON - -make -j8 -make install -cd .. -``` -after built opencv, you can follow the steps to build lite.ai.toolkit. - -* Windows: You can reference to [issue#6](https://github.com/DefTruth/lite.ai.toolkit/issues/6) -* Linux: The Docs and Docker image for Linux will be coming soon ~ [issue#2](https://github.com/DefTruth/lite.ai.toolkit/issues/2) -* Happy News !!! : 🚀 You can download the latest **ONNXRuntime** official built libs of Windows, Linux, MacOS and Arm !!! Both CPU and GPU versions are available. No more attentions needed pay to build it from source. Download the official built libs from [v1.8.1](https://github.com/microsoft/onnxruntime/releases). I have used version 1.7.0 for Lite.Ai.ToolKit now, you can download it from [v1.7.0](https://github.com/microsoft/onnxruntime/releases/tag/v1.7.0), but version 1.8.1 should also work, I guess ~ 🙃🤪🍀. For **OpenCV**, try to build from source(Linux) or down load the official built(Windows) from [OpenCV 4.5.3](https://github.com/opencv/opencv/releases). Then put the includes and libs into specific directory of Lite.Ai.ToolKit. - -* GPU Compatibility for Windows: See [issue#10](https://github.com/DefTruth/lite.ai.toolkit/issues/10). -* GPU Compatibility for Linux: See [issue#97](https://github.com/DefTruth/lite.ai.toolkit/issues/97). - -
+
-🔑️ How to link Lite.Ai.ToolKit? -* To link Lite.Ai.ToolKit, you can follow the CMakeLists.txt listed belows. - -```cmake -cmake_minimum_required(VERSION 3.10) -project(lite.ai.toolkit.demo) - -set(CMAKE_CXX_STANDARD 11) - -# setting up lite.ai.toolkit -set(LITE_AI_DIR ${CMAKE_SOURCE_DIR}/lite.ai.toolkit) -set(LITE_AI_INCLUDE_DIR ${LITE_AI_DIR}/include) -set(LITE_AI_LIBRARY_DIR ${LITE_AI_DIR}/lib) -include_directories(${LITE_AI_INCLUDE_DIR}) -link_directories(${LITE_AI_LIBRARY_DIR}) - -set(OpenCV_LIBS - opencv_highgui - opencv_core - opencv_imgcodecs - opencv_imgproc - opencv_video - opencv_videoio - ) -# add your executable -set(EXECUTABLE_OUTPUT_PATH ${CMAKE_SOURCE_DIR}/examples/build) - -add_executable(lite_rvm examples/test_lite_rvm.cpp) -target_link_libraries(lite_rvm - lite.ai.toolkit - onnxruntime - MNN # need, if built lite.ai.toolkit with ENABLE_MNN=ON, default OFF - ncnn # need, if built lite.ai.toolkit with ENABLE_NCNN=ON, default OFF - TNN # need, if built lite.ai.toolkit with ENABLE_TNN=ON, default OFF - ${OpenCV_LIBS}) # link lite.ai.toolkit & other libs. -``` - -```shell -cd ./build/lite.ai.toolkit/lib && otool -L liblite.ai.toolkit.0.0.1.dylib -liblite.ai.toolkit.0.0.1.dylib: - @rpath/liblite.ai.toolkit.0.0.1.dylib (compatibility version 0.0.1, current version 0.0.1) - @rpath/libopencv_highgui.4.5.dylib (compatibility version 4.5.0, current version 4.5.2) - @rpath/libonnxruntime.1.7.0.dylib (compatibility version 0.0.0, current version 1.7.0) - ... -``` - - -```shell -cd ../ && tree . -├── bin -├── include -│   ├── lite -│   │   ├── backend.h -│   │   ├── config.h -│   │   └── lite.h -│   └── ort -└── lib - └── liblite.ai.toolkit.0.0.1.dylib -``` -* Run the built examples: -```shell -cd ./build/lite.ai.toolkit/bin && ls -lh | grep lite --rwxr-xr-x 1 root staff 301K Jun 26 23:10 liblite.ai.toolkit.0.0.1.dylib -... --rwxr-xr-x 1 root staff 196K Jun 26 23:10 lite_yolov4 --rwxr-xr-x 1 root staff 196K Jun 26 23:10 lite_yolov5 -... -``` + 🔑️ Model Zoo!Click here! -```shell -./lite_yolov5 -LITEORT_DEBUG LogId: ../../../hub/onnx/cv/yolov5s.onnx -=============== Input-Dims ============== -... -detected num_anchors: 25200 -generate_bboxes num: 66 -Default Version Detected Boxes Num: 5 -``` - -To link `lite.ai.toolkit` shared lib. You need to make sure that `OpenCV` and `onnxruntime` are linked correctly. A minimum example to show you how to link the shared lib of Lite.AI.ToolKit correctly for your own project can be found at [CMakeLists.txt](https://github.com/DefTruth/RobustVideoMatting-ncnn-mnn-tnn-onnxruntime/blob/main/CMakeLists.txt). - -
- - -## 5. Model Zoo. +## Model Zoo.
@@ -487,66 +256,7 @@ To link `lite.ai.toolkit` shared lib. You need to make sure that `OpenCV` and `o docker pull qyjdefdocker/lite.ai.toolkit-tnn-hub:v0.1.22.02.02 # (217M) + YOLO5Face ``` -
- ❇️ Lite.Ai.ToolKit modules. - -### Namespace and Lite.Ai.ToolKit modules. - -| Namespace | Details | -|:---------------------------|:----------------------------------------------------------------------------------------| -| *lite::cv::detection* | Object Detection. one-stage and anchor-free detectors, YoloV5, YoloV4, SSD, etc. ✅ | -| *lite::cv::classification* | Image Classification. DensNet, ShuffleNet, ResNet, IBNNet, GhostNet, etc. ✅ | -| *lite::cv::faceid* | Face Recognition. ArcFace, CosFace, CurricularFace, etc. ❇️ | -| *lite::cv::face* | Face Analysis. *detect*, *align*, *pose*, *attr*, etc. ❇️ | -| *lite::cv::face::detect* | Face Detection. UltraFace, RetinaFace, FaceBoxes, PyramidBox, etc. ❇️ | -| *lite::cv::face::align* | Face Alignment. PFLD(106), FaceLandmark1000(1000 landmarks), PRNet, etc. ❇️ | -| *lite::cv::face::align3d* | 3D Face Alignment. FaceMesh(468 3D landmarks), IrisLandmark(71+5 3D landmarks), etc. ❇️ | -| *lite::cv::face::pose* | Head Pose Estimation. FSANet, etc. ❇️ | -| *lite::cv::face::attr* | Face Attributes. Emotion, Age, Gender. EmotionFerPlus, VGG16Age, etc. ❇️ | -| *lite::cv::segmentation* | Object Segmentation. Such as FCN, DeepLabV3, etc. ❇️ ️ | -| *lite::cv::style* | Style Transfer. Contains neural style transfer now, such as FastStyleTransfer. ⚠️ | -| *lite::cv::matting* | Image Matting. Object and Human matting. ❇️ ️ | -| *lite::cv::colorization* | Colorization. Make Gray image become RGB. ⚠️ | -| *lite::cv::resolution* | Super Resolution. ⚠️ | - - -### Lite.Ai.ToolKit's Classes and Pretrained Files. - -Correspondence between the classes in **Lite.AI.ToolKit** and pretrained model files can be found at [lite.ai.toolkit.hub.onnx.md](https://github.com/DefTruth/lite.ai.toolkit/tree/main/docs/hub/lite.ai.toolkit.hub.onnx.md). For examples, the pretrained model files for *lite::cv::detection::YoloV5* and *lite::cv::detection::YoloX* are listed as follows. - - -| Class | Pretrained ONNX Files | Rename or Converted From (Repo) | Size | -|:-----------------------------:|:---------------------:|:----------------------------------------------------------------:|:-----:| -| *lite::cv::detection::YoloV5* | yolov5l.onnx | [yolov5](https://github.com/ultralytics/yolov5) (🔥🔥💥↑) | 188Mb | -| *lite::cv::detection::YoloV5* | yolov5m.onnx | [yolov5](https://github.com/ultralytics/yolov5) (🔥🔥💥↑) | 85Mb | -| *lite::cv::detection::YoloV5* | yolov5s.onnx | [yolov5](https://github.com/ultralytics/yolov5) (🔥🔥💥↑) | 29Mb | -| *lite::cv::detection::YoloV5* | yolov5x.onnx | [yolov5](https://github.com/ultralytics/yolov5) (🔥🔥💥↑) | 351Mb | -| *lite::cv::detection::YoloX* | yolox_x.onnx | [YOLOX](https://github.com/Megvii-BaseDetection/YOLOX) (🔥🔥!!↑) | 378Mb | -| *lite::cv::detection::YoloX* | yolox_l.onnx | [YOLOX](https://github.com/Megvii-BaseDetection/YOLOX) (🔥🔥!!↑) | 207Mb | -| *lite::cv::detection::YoloX* | yolox_m.onnx | [YOLOX](https://github.com/Megvii-BaseDetection/YOLOX) (🔥🔥!!↑) | 97Mb | -| *lite::cv::detection::YoloX* | yolox_s.onnx | [YOLOX](https://github.com/Megvii-BaseDetection/YOLOX) (🔥🔥!!↑) | 34Mb | -| *lite::cv::detection::YoloX* | yolox_tiny.onnx | [YOLOX](https://github.com/Megvii-BaseDetection/YOLOX) (🔥🔥!!↑) | 19Mb | -| *lite::cv::detection::YoloX* | yolox_nano.onnx | [YOLOX](https://github.com/Megvii-BaseDetection/YOLOX) (🔥🔥!!↑) | 3.5Mb | - -It means that you can load the the any one `yolov5*.onnx` and `yolox_*.onnx` according to your application through the same Lite.AI.ToolKit's classes, such as *YoloV5*, *YoloX*, etc. - -```c++ -auto *yolov5 = new lite::cv::detection::YoloV5("yolov5x.onnx"); // for server -auto *yolov5 = new lite::cv::detection::YoloV5("yolov5l.onnx"); -auto *yolov5 = new lite::cv::detection::YoloV5("yolov5m.onnx"); -auto *yolov5 = new lite::cv::detection::YoloV5("yolov5s.onnx"); // for mobile device -auto *yolox = new lite::cv::detection::YoloX("yolox_x.onnx"); -auto *yolox = new lite::cv::detection::YoloX("yolox_l.onnx"); -auto *yolox = new lite::cv::detection::YoloX("yolox_m.onnx"); -auto *yolox = new lite::cv::detection::YoloX("yolox_s.onnx"); -auto *yolox = new lite::cv::detection::YoloX("yolox_tiny.onnx"); -auto *yolox = new lite::cv::detection::YoloX("yolox_nano.onnx"); // 3.5Mb only ! -``` - -
- -
- 🔑️ How to download Model Zoo from Docker Hub? +### 🔑️ How to download Model Zoo from Docker Hub? * Firstly, pull the image from docker hub. ```shell @@ -582,13 +292,14 @@ auto *yolox = new lite::cv::detection::YoloX("yolox_nano.onnx"); // 3.5Mb only cp -rf mnn/cv share/ ``` -
### Model Hubs The pretrained and converted ONNX files provide by lite.ai.toolkit are listed as follows. Also, see [Model Zoo](#lite.ai.toolkit-Model-Zoo) and [ONNX Hub](https://github.com/DefTruth/lite.ai.toolkit/tree/main/docs/hub/lite.ai.toolkit.hub.onnx.md), [MNN Hub](https://github.com/DefTruth/lite.ai.toolkit/tree/main/docs/hub/lite.ai.toolkit.hub.mnn.md), [TNN Hub](https://github.com/DefTruth/lite.ai.toolkit/tree/main/docs/hub/lite.ai.toolkit.hub.tnn.md), [NCNN Hub](https://github.com/DefTruth/lite.ai.toolkit/tree/main/docs/hub/lite.ai.toolkit.hub.ncnn.md) for more details. + + -## 6. Examples. +## Examples.
@@ -602,9 +313,9 @@ More examples can be found at [examples](https://github.com/DefTruth/lite.ai.too static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/yolov5s.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/yolov5s.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolov5_1.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolov5_1.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolov5_1.jpg"; auto *yolov5 = new lite::cv::detection::YoloV5(onnx_path); std::vector detected_boxes; @@ -663,9 +374,9 @@ auto *detector = new lite::cv::detection::YOLOv6(onnx_path); // Newest 2022 YOL static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/rvm_mobilenetv3_fp32.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/rvm_mobilenetv3_fp32.onnx"; std::string video_path = "../../../examples/lite/resources/test_lite_rvm_0.mp4"; - std::string output_path = "../../../logs/test_lite_rvm_0.mp4"; + std::string output_path = "../../../examples/logs/test_lite_rvm_0.mp4"; std::string background_path = "../../../examples/lite/resources/test_lite_matting_bgr.jpg"; auto *rvm = new lite::cv::matting::RobustVideoMatting(onnx_path, 16); // 16 threads @@ -715,9 +426,9 @@ auto *matting = new lite::cv::matting::MobileHumanMatting(onnx_path); // 3Mb onl static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/FaceLandmark1000.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/FaceLandmark1000.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_landmarks_0.png"; - std::string save_img_path = "../../../logs/test_lite_face_landmarks_1000.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_face_landmarks_1000.jpg"; auto *face_landmarks_1000 = new lite::cv::face::align::FaceLandmark1000(onnx_path); @@ -762,9 +473,9 @@ auto *align = new lite::cv::face::align::PIPNet19(onnx_path); // 19 landmarks, static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/eccv16-colorizer.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/eccv16-colorizer.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_colorizer_1.jpg"; - std::string save_img_path = "../../../logs/test_lite_eccv16_colorizer_1.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_eccv16_colorizer_1.jpg"; auto *colorizer = new lite::cv::colorization::Colorizer(onnx_path); @@ -804,7 +515,7 @@ auto *colorizer = new lite::cv::colorization::Colorizer(onnx_path); static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/ms1mv3_arcface_r100.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/ms1mv3_arcface_r100.onnx"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_1.png"; std::string test_img_path2 = "../../../examples/lite/resources/test_lite_faceid_2.png"; @@ -871,9 +582,9 @@ auto *recognition = new lite::cv::faceid::MobileSEFocalFace(onnx_path); // 4.5Mb static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/scrfd_2.5g_bnkps_shape640x640.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/scrfd_2.5g_bnkps_shape640x640.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_detector.jpg"; - std::string save_img_path = "../../../logs/test_lite_scrfd.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_scrfd.jpg"; auto *scrfd = new lite::cv::face::detect::SCRFD(onnx_path); @@ -915,9 +626,9 @@ auto *detector = new lite::face::detect::YOLOv5BlazeFace(onnx_path); // 2021, S static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/deeplabv3_resnet101_coco.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/deeplabv3_resnet101_coco.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_deeplabv3_resnet101.png"; - std::string save_img_path = "../../../logs/test_lite_deeplabv3_resnet101.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_deeplabv3_resnet101.jpg"; auto *deeplabv3_resnet101 = new lite::cv::segmentation::DeepLabV3ResNet101(onnx_path, 16); // 16 threads @@ -964,9 +675,9 @@ auto *segment = new lite::cv::segmentation::DeepLabV3ResNet101(onnx_path); static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/ssrnet.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/ssrnet.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_ssrnet.jpg"; - std::string save_img_path = "../../../logs/test_lite_ssrnet.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_ssrnet.jpg"; auto *ssrnet = new lite::cv::face::attr::SSRNet(onnx_path); @@ -1010,7 +721,7 @@ auto *attribute = new lite::cv::face::attr::SSRNet(onnx_path); // age estimation static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/densenet121.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/densenet121.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_densenet.jpg"; auto *densenet = new lite::cv::classification::DenseNet(onnx_path); @@ -1064,9 +775,9 @@ auto *classifier = new lite::cv::classification::ResNeXt(onnx_path); static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/fsanet-var.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/fsanet-var.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_fsanet.jpg"; - std::string save_img_path = "../../../logs/test_lite_fsanet.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_fsanet.jpg"; auto *fsanet = new lite::cv::face::pose::FSANet(onnx_path); cv::Mat img_bgr = cv::imread(test_img_path); @@ -1105,9 +816,9 @@ auto *pose = new lite::cv::face::pose::FSANet(onnx_path); // 1.2Mb only! static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/style-candy-8.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/style-candy-8.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_fast_style_transfer.jpg"; - std::string save_img_path = "../../../logs/test_lite_fast_style_transfer_candy.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_fast_style_transfer_candy.jpg"; auto *fast_style_transfer = new lite::cv::style::FastStyleTransfer(onnx_path); @@ -1144,9 +855,9 @@ auto *transfer = new lite::cv::style::FastStyleTransfer(onnx_path); // 6.4Mb onl static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/minivision_head_seg.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/minivision_head_seg.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_head_seg.png"; - std::string save_img_path = "../../../logs/test_lite_head_seg.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_head_seg.jpg"; auto *head_seg = new lite::cv::segmentation::HeadSeg(onnx_path, 4); // 4 threads @@ -1186,11 +897,11 @@ auto *segment = new lite::cv::segmentation::MobileHairSeg(onnx_path); // 14M static void test_default() { - std::string head_seg_onnx_path = "../../../hub/onnx/cv/minivision_head_seg.onnx"; - std::string cartoon_onnx_path = "../../../hub/onnx/cv/minivision_female_photo2cartoon.onnx"; + std::string head_seg_onnx_path = "../../../examples/hub/onnx/cv/minivision_head_seg.onnx"; + std::string cartoon_onnx_path = "../../../examples/hub/onnx/cv/minivision_female_photo2cartoon.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_female_photo2cartoon.jpg"; - std::string save_mask_path = "../../../logs/test_lite_female_photo2cartoon_seg.jpg"; - std::string save_cartoon_path = "../../../logs/test_lite_female_photo2cartoon_cartoon.jpg"; + std::string save_mask_path = "../../../examples/logs/test_lite_female_photo2cartoon_seg.jpg"; + std::string save_cartoon_path = "../../../examples/logs/test_lite_female_photo2cartoon_cartoon.jpg"; auto *head_seg = new lite::cv::segmentation::HeadSeg(head_seg_onnx_path, 4); // 4 threads auto *female_photo2cartoon = new lite::cv::style::FemalePhoto2Cartoon(cartoon_onnx_path, 4); // 4 threads @@ -1236,9 +947,9 @@ auto *transfer = new lite::cv::style::FemalePhoto2Cartoon(onnx_path); static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/face_parsing_512x512.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/face_parsing_512x512.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_parsing.png"; - std::string save_img_path = "../../../logs/test_lite_face_parsing_bisenet.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_face_parsing_bisenet.jpg"; auto *face_parsing_bisenet = new lite::cv::segmentation::FaceParsingBiSeNet(onnx_path, 8); // 8 threads @@ -1267,86 +978,18 @@ auto *segment = new lite::cv::segmentation::FaceParsingBiSeNet(onnx_path); // 50 auto *segment = new lite::cv::segmentation::FaceParsingBiSeNetDyn(onnx_path); // Dynamic Shape Inference. ``` -## 7. License. +## License
The code of [Lite.Ai.ToolKit](#lite.ai.toolkit-Introduction) is released under the GPL-3.0 License. - -## 8. References. - -
- -Many thanks to these following projects. All the Lite.AI.ToolKit's models are sourced from these repos. - -* [RobustVideoMatting](https://github.com/PeterL1n/RobustVideoMatting) (🔥🔥🔥new!!↑) -* [nanodet](https://github.com/RangiLyu/nanodet) (🔥🔥🔥↑) -* [YOLOX](https://github.com/Megvii-BaseDetection/YOLOX) (🔥🔥🔥new!!↑) -* [YOLOP](https://github.com/hustvl/YOLOP) (🔥🔥new!!↑) -* [YOLOR](https://github.com/WongKinYiu/yolor) (🔥🔥new!!↑) -* [ScaledYOLOv4](https://github.com/WongKinYiu/ScaledYOLOv4) (🔥🔥🔥↑) -* [insightface](https://github.com/deepinsight/insightface) (🔥🔥🔥↑) -* [yolov5](https://github.com/ultralytics/yolov5) (🔥🔥💥↑) -* [TFace](https://github.com/Tencent/TFace) (🔥🔥↑) -* [YOLOv4-pytorch](https://github.com/argusswift/YOLOv4-pytorch) (🔥🔥🔥↑) -* [Ultra-Light-Fast-Generic-Face-Detector-1MB](https://github.com/Linzaer/Ultra-Light-Fast-Generic-Face-Detector-1MB) (🔥🔥🔥↑) - -
- Expand for More References. - -* [headpose-fsanet-pytorch](https://github.com/omasaht/headpose-fsanet-pytorch) (🔥↑) -* [pfld_106_face_landmarks](https://github.com/Hsintao/pfld_106_face_landmarks) (🔥🔥↑) -* [onnx-models](https://github.com/onnx/models) (🔥🔥🔥↑) -* [SSR_Net_Pytorch](https://github.com/oukohou/SSR_Net_Pytorch) (🔥↑) -* [colorization](https://github.com/richzhang/colorization) (🔥🔥🔥↑) -* [SUB_PIXEL_CNN](https://github.com/niazwazir/SUB_PIXEL_CNN) (🔥↑) -* [torchvision](https://github.com/pytorch/vision) (🔥🔥🔥↑) -* [facenet-pytorch](https://github.com/timesler/facenet-pytorch) (🔥↑) -* [face.evoLVe.PyTorch](https://github.com/ZhaoJ9014/face.evoLVe.PyTorch) (🔥🔥🔥↑) -* [center-loss.pytorch](https://github.com/louis-she/center-loss.pytorch) (🔥🔥↑) -* [sphereface_pytorch](https://github.com/clcarwin/sphereface_pytorch) (🔥🔥↑) -* [DREAM](https://github.com/penincillin/DREAM) (🔥🔥↑) -* [MobileFaceNet_Pytorch](https://github.com/Xiaoccer/MobileFaceNet_Pytorch) (🔥🔥↑) -* [cavaface.pytorch](https://github.com/cavalleria/cavaface.pytorch) (🔥🔥↑) -* [CurricularFace](https://github.com/HuangYG123/CurricularFace) (🔥🔥↑) -* [face-emotion-recognition](https://github.com/HSE-asavchenko/face-emotion-recognition) (🔥↑) -* [face_recognition.pytorch](https://github.com/grib0ed0v/face_recognition.pytorch) (🔥🔥↑) -* [PFLD-pytorch](https://github.com/polarisZhao/PFLD-pytorch) (🔥🔥↑) -* [pytorch_face_landmark](https://github.com/cunjian/pytorch_face_landmark) (🔥🔥↑) -* [FaceLandmark1000](https://github.com/Single430/FaceLandmark1000) (🔥🔥↑) -* [Pytorch_Retinaface](https://github.com/biubug6/Pytorch_Retinaface) (🔥🔥🔥↑) -* [FaceBoxes](https://github.com/zisianw/FaceBoxes.PyTorch) (🔥🔥↑) - -
- - -## 9. Compilation Options. - -In addition, [MNN](https://github.com/alibaba/MNN), [NCNN](https://github.com/Tencent/ncnn) and [TNN](https://github.com/Tencent/TNN) support for some models will be added in the future, but due to operator compatibility and some other reasons, it is impossible to ensure that all models supported by [ONNXRuntime C++](https://github.com/microsoft/onnxruntime) can run through [MNN](https://github.com/alibaba/MNN), [NCNN](https://github.com/Tencent/ncnn) and [TNN](https://github.com/Tencent/TNN). So, if you want to use all the models supported by this repo and don't care about the performance gap of *1~2ms*, just let [ONNXRuntime](https://github.com/microsoft/onnxruntime) as default inference engine for this repo. However, you can follow the steps below if you want to build with [MNN](https://github.com/alibaba/MNN), [NCNN](https://github.com/Tencent/ncnn) or [TNN](https://github.com/Tencent/TNN) support. - -* change the `build.sh` with `DENABLE_MNN=ON`,`DENABLE_NCNN=ON` or `DENABLE_TNN=ON`, such as -```shell -cd build && cmake \ - -DCMAKE_BUILD_TYPE=MinSizeRel \ - -DINCLUDE_OPENCV=ON \ # Whether to package OpenCV into lite.ai.toolkit, default ON; otherwise, you need to setup OpenCV yourself. - -DENABLE_MNN=ON \ # Whether to build with MNN, default OFF, only some models are supported now. - -DENABLE_NCNN=OFF \ # Whether to build with NCNN, default OFF, only some models are supported now. - -DENABLE_TNN=OFF \ # Whether to build with TNN, default OFF, only some models are supported now. - .. && make -j8 -``` -* use the MNN, NCNN or TNN version interface, see [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_nanodet.cpp), such as -```C++ -auto *nanodet = new lite::mnn::cv::detection::NanoDet(mnn_path); -auto *nanodet = new lite::tnn::cv::detection::NanoDet(proto_path, model_path); -auto *nanodet = new lite::ncnn::cv::detection::NanoDet(param_path, bin_path); -``` -## 10. Contribute +## Contribute
How to add your own models and become a contributor? See [CONTRIBUTING.zh.md](https://github.com/DefTruth/lite.ai.toolkit/issues/191). -## 11. Many Thanks !!! 🤗🎉🎉 +## Many Thanks !!! 🤗🎉🎉 * [Windows10 VS2019 CUDA 11.1 Build Docs](https://github.com/DefTruth/lite.ai.toolkit/issues/207) ([@zhanghongyong123456](https://github.com/zhanghongyong123456)) * [Linux Build Docs](https://github.com/DefTruth/lite.ai.toolkit/blob/main/docs/build/Linux.zh.md) ([@lee1221ee](https://github.com/lee1221ee)) * [Some Windows10 Bugs Fixed](https://github.com/DefTruth/lite.ai.toolkit/pull/105) ([@ysc3839](https://github.com/ysc3839), [@AvenSun](https://github.com/AvenSun)) diff --git a/README.zh.md b/README.zh.md deleted file mode 100644 index 68fa83aa..00000000 --- a/README.zh.md +++ /dev/null @@ -1,1380 +0,0 @@ - - - -
- - -![logo-v3](docs/resources/logo-v3.png) - -
- - - - - - - -
- -🛠**Lite.Ai.ToolKit**: 一个轻量级的`C++` AI模型工具箱,用户友好(还行吧),开箱即用。已经包括 **[100+](https://github.com/DefTruth/lite.ai.toolkit/tree/main/docs/hub/lite.ai.toolkit.hub.onnx.md)** 流行的开源模型。这是一个根据个人兴趣整理的C++工具箱,, 涵盖[目标检测](#lite.ai.toolkit-object-detection)、[人脸检测](#lite.ai.toolkit-face-detection)、[人脸识别](#lite.ai.toolkit-face-recognition)、[语义分割](#lite.ai.toolkit-segmentation)、[抠图](#lite.ai.toolkit-matting)等领域。详见 [Model Zoo](#lite.ai.toolkit-Model-Zoo) 和 [ONNX Hub](https://github.com/DefTruth/lite.ai.toolkit/tree/main/docs/hub/lite.ai.toolkit.hub.onnx.md) 、[MNN Hub](https://github.com/DefTruth/lite.ai.toolkit/tree/main/docs/hub/lite.ai.toolkit.hub.mnn.md) 、[TNN Hub](https://github.com/DefTruth/lite.ai.toolkit/tree/main/docs/hub/lite.ai.toolkit.hub.tnn.md) 、[NCNN Hub](https://github.com/DefTruth/lite.ai.toolkit/tree/main/docs/hub/lite.ai.toolkit.hub.ncnn.md). [若是有用,❤️不妨给个⭐️🌟支持一下吧,感谢支持~] - -
- - - - - - - - -
- - - - - - - - -
- - - - - - - - -
- -

English | 中文文档 | MacOS | Linux | Windows

- - -## 重要通知 (Important Note) !!! 🔥🔥🔥 - -中文简体:本项目将不再频繁更新,更优的部署体验请尝试[⚡️PaddlePaddle/FastDeploy](https://github.com/PaddlePaddle/FastDeploy) : ⚡️一款简单易用的推理部署工具箱。覆盖业界主流优质预训练模型并提供开箱即用的开发体验,包括图像分类、目标检测、图像分割、人脸检测、人体关键点识别、文字识别等多任务,满足开发者多场景,多硬件、多平台的快速部署需求,并同时支持 **C++** 和 **Python** 两种语言。**lite.ai.toolkit** 中的核心模型未来将会以contrib的方式集成到[⚡️PaddlePaddle/FastDeploy](https://github.com/PaddlePaddle/FastDeploy) 中。欢迎同学们使用 ✋👉[⚡️PaddlePaddle/FastDeploy](https://github.com/PaddlePaddle/FastDeploy). - ----- - -English: This project will no longer be updated frequently. For a better deployment experience, please try [⚡️PaddlePaddle/FastDeploy](https://github.com/PaddlePaddle/FastDeploy): ⚡️An Easy-to-use and Fast Deep Learning Model Deployment Toolkit. Covering the industry's mainstream high-quality pre-training models and providing out-of-the-box development experience, including image classification, object detection, image segmentation, face detection, human key point detection, text recognition and so on, to meet the needs of developers in multiple scenarios and multiple hardware, multi-platform deployment requirements. Furthermore, FastDeploy supports both **C++** and **Python** languages. The core models in **lite.ai.toolkit** will be integrated into [⚡️PaddlePaddle/FastDeploy](https://github.com/PaddlePaddle/FastDeploy) in a contrib way in the future. Welcome to use ✋👉[⚡️PaddlePaddle/FastDeploy](https://github.com/PaddlePaddle/FastDeploy). - ----- - -- [⚡️PaddlePaddle/FastDeploy](https://github.com/PaddlePaddle/FastDeploy) 近期更新 🔥🔥🔥 - - - 🔥 **2022.8.18:发布FastDeploy [release/v0.2.0](https://github.com/PaddlePaddle/FastDeploy/)** - - **服务端全新升级:一套SDK,覆盖全量模型** - - 发布基于x86 CPU、NVIDIA GPU的易用、高性能推理引擎SDK,推理速度大幅提升 - - 支持ONNXRuntime、Paddle Inference、TensorRT推理引擎 - - 支持YOLOv7、YOLOv6、YOLOv5、PP-YOLOE等目标检测最优模型及[Demo示例](https://github.com/PaddlePaddle/FastDeploy/tree/develop/examples/vision/detection/) - - 支持人脸检测、人脸识别、实时人像抠图、图像分割等40+重点模型及[Demo示例](https://github.com/PaddlePaddle/FastDeploy/tree/develop/examples/vision/) - - 支持Python API 和 C++ API - - 开发AI模型部署代码量减少~60% - - **端侧继ARM CPU后,延伸至瑞芯微、晶晨、恩智浦等NPU能力** - - 发布轻量化目标检测 [Picodet-NPU部署Demo](https://github.com/PaddlePaddle/Paddle-Lite-Demo/tree/develop/object_detection/linux/picodet_detection) ,提供低门槛INT8全量化能力 - - **同时支持Python和C++两种语言** - - 支持 `pip install` 一行命令安装,Linux/Mac/Windows 快速使用 - - 提供 Linux/Mac/Windows,CPU/GPU 等多平台多硬件支持的C++预编译库 -- 更多详情请跳转✋👉[⚡️PaddlePaddle/FastDeploy](https://github.com/PaddlePaddle/FastDeploy) 🎉🎉 ----- - - -## 核心特征👏👋 -
- -* **用户友好,开箱即用。** 使用简单一致的调用语法,如**lite::cv::Type::Class**,详见[examples](#lite.ai.toolkit-Examples-for-Lite.AI.ToolKit). -* **少量依赖,构建容易。** 目前, 默认只依赖 **OpenCV** 和 **ONNXRuntime**,详见[build](#lite.ai.toolkit-Build-Lite.AI.ToolKit)。 -* **众多的算法模块,且持续更新。** 目前,包括将近 **[300+](https://github.com/DefTruth/lite.ai.toolkit/tree/main/docs/hub/lite.ai.toolkit.hub.onnx.md)** C++实现以及 **[500+](https://github.com/DefTruth/lite.ai.toolkit/tree/main/docs/hub/lite.ai.toolkit.hub.onnx.md)** 权重文件 - -## 引用 🎉🎉 - -如果您在自己的项目中使用了**Lite.Ai.ToolKit**,可考虑按以下方式进行引用。 -```BibTeX -@misc{lite.ai.toolkit2021, - title={lite.ai.toolkit: A lite C++ toolkit of awesome AI models.}, - url={https://github.com/DefTruth/lite.ai.toolkit}, - note={Open-source software available at https://github.com/DefTruth/lite.ai.toolkit}, - author={Yan Jun}, - year={2021} -} -``` - -## 关于训练 🤓👀 -一个用于人脸关键点检测的训练和评估的工具箱已经开源,可通过pip一键安装,地址在[torchlm](https://github.com/DefTruth/torchlm). - -## 预编译库 和 技术规划 ✅ -![](docs/resources/lite.ai.toolkit-roadmap-v0.1.png) - -目前,有一些预编译的MacOS(x64)和Linux(x64)下的lite.ai.toolkit动态库,可以直接从以下链接进行下载。Windows(x64)和Android下的预编译库,也会在最近发布出来。更多详情请参考[issues#48](https://github.com/DefTruth/lite.ai.toolkit/issues/48) . 更多可下载的的预编译库,请跳转到[releases](https://github.com/DefTruth/lite.ai.toolkit/releases) 查看。 - -* [x] [lite0.1.1-osx10.15.x-ocv4.5.2-ffmpeg4.2.2-onnxruntime1.8.1.zip](https://github.com/DefTruth/lite.ai.toolkit/releases/download/v0.1.1/lite0.1.1-osx10.15.x-ocv4.5.2-ffmpeg4.2.2-onnxruntime1.8.1.zip) -* [x] [lite0.1.1-osx10.15.x-ocv4.5.2-ffmpeg4.2.2-onnxruntime1.9.0.zip](https://github.com/DefTruth/lite.ai.toolkit/releases/download/v0.1.1/lite0.1.1-osx10.15.x-ocv4.5.2-ffmpeg4.2.2-onnxruntime1.9.0.zip) -* [x] [lite0.1.1-osx10.15.x-ocv4.5.2-ffmpeg4.2.2-onnxruntime1.10.0.zip](https://github.com/DefTruth/lite.ai.toolkit/releases/download/v0.1.1/lite0.1.1-osx10.15.x-ocv4.5.2-ffmpeg4.2.2-onnxruntime1.10.0.zip) -* [x] [lite0.1.1-ubuntu18.04-ocv4.5.2-ffmpeg4.2.2-onnxruntime1.8.1.zip](https://github.com/DefTruth/lite.ai.toolkit/releases/download/v0.1.1/lite0.1.1-ubuntu18.04-ocv4.5.2-ffmpeg4.2.2-onnxruntime1.8.1.zip) -* [x] [lite0.1.1-ubuntu18.04-ocv4.5.2-ffmpeg4.2.2-onnxruntime1.9.0.zip](https://github.com/DefTruth/lite.ai.toolkit/releases/download/v0.1.1/lite0.1.1-ubuntu18.04-ocv4.5.2-ffmpeg4.2.2-onnxruntime1.9.0.zip) -* [x] [lite0.1.1-ubuntu18.04-ocv4.5.2-ffmpeg4.2.2-onnxruntime1.10.0.zip](https://github.com/DefTruth/lite.ai.toolkit/releases/download/v0.1.1/lite0.1.1-ubuntu18.04-ocv4.5.2-ffmpeg4.2.2-onnxruntime1.10.0.zip) - -在Linux下,为了链接到预编译库,使用前,需要先将`lite.ai.toolkit/lib`的路径添加到LD_LIBRARY_PATH. -```shell -export LD_LIBRARY_PATH=YOUR-PATH-TO/lite.ai.toolkit/lib:$LD_LIBRARY_PATH -export LIBRARY_PATH=YOUR-PATH-TO/lite.ai.toolkit/lib:$LIBRARY_PATH # (may need) -``` -## 快速配置 👀 - -可以参考以下的CMakeLists.txt,快速配置lite.ai.toolkit。👇👀 - -```cmake -set(LITE_AI_DIR ${CMAKE_SOURCE_DIR}/lite.ai.toolkit) -include_directories(${LITE_AI_DIR}/include) -link_directories(${LITE_AI_DIR}/lib}) -set(TOOLKIT_LIBS lite.ai.toolkit onnxruntime) -set(OpenCV_LIBS opencv_core opencv_imgcodecs opencv_imgproc opencv_video opencv_videoio) - -add_executable(lite_yolov5 examples/test_lite_yolov5.cpp) -target_link_libraries(lite_yolov5 ${TOOLKIT_LIBS} ${OpenCV_LIBS}) -``` - -## 目录 📖💡 -* [核心特征](#lite.ai.toolkit-Core-Features) -* [快速开始](#lite.ai.toolkit-Quick-Start) -* [技术规划](#lite.ai.toolkit-RoadMap) -* [重要更新](#lite.ai.toolkit-Important-Updates) -* [模型支持矩阵](#lite.ai.toolkit-Supported-Models-Matrix) -* [编译文档](#lite.ai.toolkit-Build-Lite.AI.ToolKit) -* [模型下载](#lite.ai.toolkit-Model-Zoo) -* [应用案例](#lite.ai.toolkit-Examples-for-Lite.AI.ToolKit) -* [开源协议](#lite.ai.toolkit-License) -* [引用参考](#lite.ai.toolkit-References) -* [如何添加您的模型](#lite.ai.toolkit-Contribute) - -## 1. 快速开始 🌟🌟 -
- -#### 案例0: 使用[YOLOv5](https://github.com/ultralytics/yolov5) 进行目标检测。请从Model-Zoo[2](#lite.ai.toolkit-2) 下载模型文件。 -```c++ -#include "lite/lite.h" - -static void test_default() -{ - std::string onnx_path = "../../../hub/onnx/cv/yolov5s.onnx"; - std::string test_img_path = "../../../examples/lite/resources/test_lite_yolov5_1.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolov5_1.jpg"; - - auto *yolov5 = new lite::cv::detection::YoloV5(onnx_path); - std::vector detected_boxes; - cv::Mat img_bgr = cv::imread(test_img_path); - yolov5->detect(img_bgr, detected_boxes); - - lite::utils::draw_boxes_inplace(img_bgr, detected_boxes); - cv::imwrite(save_img_path, img_bgr); - - delete yolov5; -} -``` - -## 2. 重要更新 🆕 -
- -
- Click here to see details of Important Updates! - -| Date | Model | C++ | Paper | Code | Awesome | Type | -|:------------:|:------------------------------------------------------------------------------------:|:---------------------------------------------------------------------------------------------------------:|:-----------------------------------------------------:|:-------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------:|:------------:| -| 【2022/04/03】 | [MODNet](https://github.com/ZHKKKe/MODNet) | [link](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_modnet.cpp) | [AAAI 2022](https://arxiv.org/pdf/2011.11961.pdf) | [code](https://github.com/ZHKKKe/MODNet) | ![](https://img.shields.io/github/stars/ZHKKKe/MODNet.svg?style=social) | matting | -| 【2022/03/23】 | [PIPNtet](https://github.com/jhb86253817/PIPNet) | [link](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_pipnet98.cpp) | [CVPR 2021](https://arxiv.org/abs/2003.03771) | [code](https://github.com/jhb86253817/PIPNet) | ![](https://img.shields.io/github/stars/jhb86253817/PIPNet.svg?style=social) | face::align | -| 【2022/01/19】 | [YOLO5Face](https://github.com/deepcam-cn/yolov5-face) | [link](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_yolo5face.cpp) | [arXiv 2021](https://arxiv.org/abs/2105.12931) | [code](https://github.com/deepcam-cn/yolov5-face) | ![](https://img.shields.io/github/stars/deepcam-cn/yolov5-face.svg?style=social) | face::detect | -| 【2022/01/07】 | [SCRFD](https://github.com/deepinsight/insightface/blob/master/detection/scrfd/) | [link](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_scrfd.cpp) | [CVPR 2021](https://arxiv.org/abs/2105.04714) | [code](https://github.com/deepinsight/insightface/blob/master/detection/scrfd/) | ![](https://img.shields.io/github/stars/deepinsight/insightface.svg?style=social) | face::detect | -| 【2021/12/27】 | [NanoDetPlus](https://github.com/RangiLyu/nanodet) | [link](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_nanodet_plus.cpp) | [blog](https://zhuanlan.zhihu.com/p/449912627) | [code](https://github.com/RangiLyu/nanodet) | ![](https://img.shields.io/github/stars/RangiLyu/nanodet.svg?style=social) | detection | -| 【2021/12/08】 | [MGMatting](https://github.com/yucornetto/MGMatting) | [link](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_mg_matting.cpp) | [CVPR 2021](https://arxiv.org/abs/2012.06722) | [code](https://github.com/yucornetto/MGMatting) | ![](https://img.shields.io/github/stars/yucornetto/MGMatting.svg?style=social) | matting | -| 【2021/11/11】 | [YoloV5_V_6_0](https://github.com/ultralytics/yolov5/releases/tag/v6.0) | [link](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_yolov5_v6.0.cpp) | [doi](https://zenodo.org/record/5563715#.YbXffH1Bzfs) | [code](https://github.com/ultralytics/yolov5/releases/tag/v6.0) | ![](https://img.shields.io/github/stars/ultralytics/yolov5.svg?style=social) | detection | -| 【2021/10/26】 | [YoloX_V_0_1_1](https://github.com/Megvii-BaseDetection/YOLOX/releases/tag/0.1.1rc0) | [link](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_yolox_v0.1.1.cpp) | [arXiv 2021](https://arxiv.org/abs/2107.08430) | [code](https://github.com/Megvii-BaseDetection/YOLOX) | ![](https://img.shields.io/github/stars/Megvii-BaseDetection/YOLOX.svg?style=social) | detection | -| 【2021/10/02】 | [NanoDet](https://github.com/RangiLyu/nanodet) | [link](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_nanodet.cpp) | [blog](https://zhuanlan.zhihu.com/p/306530300) | [code](https://github.com/RangiLyu/nanodet) | ![](https://img.shields.io/github/stars/RangiLyu/nanodet.svg?style=social) | detection | -| 【2021/09/20】 | [RobustVideoMatting](https://github.com/PeterL1n/RobustVideoMatting) | [link](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_rvm.cpp) | [WACV 2022](https://arxiv.org/abs/2108.11515) | [code](https://github.com/PeterL1n/RobustVideoMatting) | ![](https://img.shields.io/github/stars/PeterL1n/RobustVideoMatting.svg?style=social) | matting | -| 【2021/09/02】 | [YOLOP](https://github.com/hustvl/YOLOP) | [link](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_yolop.cpp) | [arXiv 2021](https://arxiv.org/abs/2108.11250) | [code](https://github.com/hustvl/YOLOP) | ![](https://img.shields.io/github/stars/hustvl/YOLOP.svg?style=social) | detection | - - - -
- -## 3. 模型支持矩阵 -
- -* / = 暂不支持. -* ✅ = 可以运行,且官方支持. -* ✔️ = 可以运行,但非官方支持. -* ❔ = 计划中,但不会很快实现,也许几个月后. - -| Class | Size | Type | Demo | ONNXRuntime | MNN | NCNN | TNN | MacOS | Linux | Windows | Android | -|:-----------------------------------------------------------------------------------------------------------------:|:-----:|:----------------:|:----------------------------------------------------------------------------------------------------------------------:|:-----------:|:---:|:----:|:---:|:-----:|:-----:|:-------:|:-------:| -| [YoloV5](https://github.com/ultralytics/yolov5) | 28M | *detection* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_yolov5.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [YoloV3](https://github.com/onnx/models/blob/master/vision/object_detection_segmentation/yolov3) | 236M | *detection* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_yolov3.cpp) | ✅ | / | / | / | ✅ | ✔️ | ✔️ | / | -| [TinyYoloV3](https://github.com/onnx/models/blob/master/vision/object_detection_segmentation/tiny-yolov3) | 33M | *detection* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_tiny_yolov3.cpp) | ✅ | / | / | / | ✅ | ✔️ | ✔️ | / | -| [YoloV4](https://github.com/argusswift/YOLOv4-pytorch) | 176M | *detection* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_yolov4.cpp) | ✅ | / | / | / | ✅ | ✔️ | ✔️ | / | -| [SSD](https://github.com/onnx/models/blob/master/vision/object_detection_segmentation/ssd) | 76M | *detection* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_ssd.cpp) | ✅ | / | / | / | ✅ | ✔️ | ✔️ | / | -| [SSDMobileNetV1](https://github.com/onnx/models/blob/master/vision/object_detection_segmentation/ssd-mobilenetv1) | 27M | *detection* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_ssd_mobilenetv1.cpp) | ✅ | / | / | / | ✅ | ✔️ | ✔️ | / | -| [YoloX](https://github.com/Megvii-BaseDetection/YOLOX) | 3.5M | *detection* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_yolox.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [TinyYoloV4VOC](https://github.com/bubbliiiing/yolov4-tiny-pytorch) | 22M | *detection* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_tiny_yolov4_voc.cpp) | ✅ | / | / | / | ✅ | ✔️ | ✔️ | / | -| [TinyYoloV4COCO](https://github.com/bubbliiiing/yolov4-tiny-pytorch) | 22M | *detection* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_tiny_yolov4_coco.cpp) | ✅ | / | / | / | ✅ | ✔️ | ✔️ | / | -| [YoloR](https://github.com/WongKinYiu/yolor) | 39M | *detection* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_yolor.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [ScaledYoloV4](https://github.com/WongKinYiu/ScaledYOLOv4) | 270M | *detection* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_scaled_yolov4.cpp) | ✅ | / | / | / | ✅ | ✔️ | ✔️ | / | -| [EfficientDet](https://github.com/zylo117/Yet-Another-EfficientDet-Pytorch) | 15M | *detection* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_efficientdet.cpp) | ✅ | / | / | / | ✅ | ✔️ | ✔️ | / | -| [EfficientDetD7](https://github.com/zylo117/Yet-Another-EfficientDet-Pytorch) | 220M | *detection* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_efficientdet_d7.cpp) | ✅ | / | / | / | ✅ | ✔️ | ✔️ | / | -| [EfficientDetD8](https://github.com/zylo117/Yet-Another-EfficientDet-Pytorch) | 322M | *detection* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_efficientdet_d8.cpp) | ✅ | / | / | / | ✅ | ✔️ | ✔️ | / | -| [YOLOP](https://github.com/hustvl/YOLOP) | 30M | *detection* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_yolop.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [NanoDet](https://github.com/RangiLyu/nanodet) | 1.1M | *detection* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_nanodet.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [NanoDetPlus](https://github.com/RangiLyu/nanodet) | 4.5M | *detection* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_nanodet_plus.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [NanoDetEffi...](https://github.com/RangiLyu/nanodet) | 12M | *detection* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_nanodet_efficientnet_lite.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [YoloX_V_0_1_1](https://github.com/Megvii-BaseDetection/YOLOX) | 3.5M | *detection* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_yolox_v0.1.1.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [YoloV5_V_6_0](https://github.com/ultralytics/yolov5) | 7.5M | *detection* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_yolov5_v6.0.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [GlintArcFace](https://github.com/deepinsight/insightface/tree/master/recognition/arcface_torch) | 92M | *faceid* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_glint_arcface.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [GlintCosFace](https://github.com/deepinsight/insightface/tree/master/recognition/arcface_torch) | 92M | *faceid* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_glint_cosface.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | / | -| [GlintPartialFC](https://github.com/deepinsight/insightface/tree/master/recognition/partial_fc) | 170M | *faceid* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_glint_partial_fc.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | / | -| [FaceNet](https://github.com/timesler/facenet-pytorch) | 89M | *faceid* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_facenet.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | / | -| [FocalArcFace](https://github.com/ZhaoJ9014/face.evoLVe.PyTorch) | 166M | *faceid* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_focal_arcface.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | / | -| [FocalAsiaArcFace](https://github.com/ZhaoJ9014/face.evoLVe.PyTorch) | 166M | *faceid* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_focal_asia_arcface.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | / | -| [TencentCurricularFace](https://github.com/Tencent/TFace/tree/master/tasks/distfc) | 249M | *faceid* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_tencent_curricular_face.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | / | -| [TencentCifpFace](https://github.com/Tencent/TFace/tree/master/tasks/cifp) | 130M | *faceid* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_tencent_cifp_face.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | / | -| [CenterLossFace](https://github.com/louis-she/center-loss.pytorch) | 280M | *faceid* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_center_loss_face.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | / | -| [SphereFace](https://github.com/clcarwin/sphereface_pytorch) | 80M | *faceid* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_sphere_face.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | / | -| [PoseRobustFace](https://github.com/penincillin/DREAM) | 92M | *faceid* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_pose_robust_face.cpp) | ✅ | / | / | / | ✅ | ✔️ | ✔️ | / | -| [NaivePoseRobustFace](https://github.com/penincillin/DREAM) | 43M | *faceid* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_naive_pose_robust_face.cpp) | ✅ | / | / | / | ✅ | ✔️ | ✔️ | / | -| [MobileFaceNet](https://github.com/Xiaoccer/MobileFaceNet_Pytorch) | 3.8M | *faceid* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_mobile_facenet.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [CavaGhostArcFace](https://github.com/cavalleria/cavaface.pytorch) | 15M | *faceid* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_cava_ghost_arcface.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [CavaCombinedFace](https://github.com/cavalleria/cavaface.pytorch) | 250M | *faceid* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_cava_combined_face.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | / | -| [MobileSEFocalFace](https://github.com/grib0ed0v/face_recognition.pytorch) | 4.5M | *faceid* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_mobilese_focal_face.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [RobustVideoMatting](https://github.com/PeterL1n/RobustVideoMatting) | 14M | *matting* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_rvm.cpp) | ✅ | ✅ | / | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [MGMatting](https://github.com/yucornetto/MGMatting) | 113M | *matting* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_mg_matting.cpp) | ✅ | ✅ | / | ✅ | ✅ | ✔️ | ✔️ | / | -| [MODNet](https://github.com/ZHKKKe/MODNet) | 24M | *matting* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_modnet.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | / | -| [MODNetDyn](https://github.com/ZHKKKe/MODNet) | 24M | *matting* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_modnet_dyn.cpp) | ✅ | / | / | / | ✅ | ✔️ | ✔️ | / | -| [BackgroundMattingV2](https://github.com/PeterL1n/BackgroundMattingV2) | 20M | *matting* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_backgroundmattingv2.cpp) | ✅ | ✅ | / | ✅ | ✅ | ✔️ | ✔️ | / | -| [BackgroundMattingV2Dyn](https://github.com/PeterL1n/BackgroundMattingV2) | 20M | *matting* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_backgroundmattingv2_dyn.cpp) | ✅ | / | / | / | ✅ | ✔️ | ✔️ | / | -| [UltraFace](https://github.com/Linzaer/Ultra-Light-Fast-Generic-Face-Detector-1MB) | 1.1M | *face::detect* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_ultraface.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [RetinaFace](https://github.com/biubug6/Pytorch_Retinaface) | 1.6M | *face::detect* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_retinaface.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [FaceBoxes](https://github.com/zisianw/FaceBoxes.PyTorch) | 3.8M | *face::detect* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_faceboxes.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [FaceBoxesV2](https://github.com/jhb86253817/FaceBoxesV2) | 3.8M | *face::detect* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_faceboxesv2.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [SCRFD](https://github.com/deepinsight/insightface/blob/master/detection/scrfd/) | 2.5M | *face::detect* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_scrfd.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [YOLO5Face](https://github.com/deepcam-cn/yolov5-face) | 4.8M | *face::detect* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_yolo5face.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [PFLD](https://github.com/Hsintao/pfld_106_face_landmarks) | 1.0M | *face::align* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_pfld.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [PFLD98](https://github.com/polarisZhao/PFLD-pytorch) | 4.8M | *face::align* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_pfld98.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [MobileNetV268](https://github.com/cunjian/pytorch_face_landmark) | 9.4M | *face::align* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_mobilenetv2_68.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [MobileNetV2SE68](https://github.com/cunjian/pytorch_face_landmark) | 11M | *face::align* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_mobilenetv2_se_68.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [PFLD68](https://github.com/cunjian/pytorch_face_landmark) | 2.8M | *face::align* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_pfld68.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [FaceLandmark1000](https://github.com/Single430/FaceLandmark1000) | 2.0M | *face::align* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_face_landmarks_1000.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [PIPNet98](https://github.com/jhb86253817/PIPNet) | 44.0M | *face::align* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_pipnet98.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [PIPNet68](https://github.com/jhb86253817/PIPNet) | 44.0M | *face::align* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_pipnet68.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [PIPNet29](https://github.com/jhb86253817/PIPNet) | 44.0M | *face::align* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_pipnet29.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [PIPNet19](https://github.com/jhb86253817/PIPNet) | 44.0M | *face::align* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_pipnet19.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [FSANet](https://github.com/omasaht/headpose-fsanet-pytorch) | 1.2M | *face::pose* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_fsanet.cpp) | ✅ | ✅ | / | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [AgeGoogleNet](https://github.com/onnx/models/tree/master/vision/body_analysis/age_gender) | 23M | *face::attr* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_age_googlenet.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [GenderGoogleNet](https://github.com/onnx/models/tree/master/vision/body_analysis/age_gender) | 23M | *face::attr* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_gender_googlenet.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [EmotionFerPlus](https://github.com/onnx/models/blob/master/vision/body_analysis/emotion_ferplus) | 33M | *face::attr* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_emotion_ferplus.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [VGG16Age](https://github.com/onnx/models/tree/master/vision/body_analysis/age_gender) | 514M | *face::attr* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_vgg16_age.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | / | -| [VGG16Gender](https://github.com/onnx/models/tree/master/vision/body_analysis/age_gender) | 512M | *face::attr* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_vgg16_gender.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | / | -| [SSRNet](https://github.com/oukohou/SSR_Net_Pytorch) | 190K | *face::attr* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_ssrnet.cpp) | ✅ | ✅ | / | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [EfficientEmotion7](https://github.com/HSE-asavchenko/face-emotion-recognition) | 15M | *face::attr* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_efficient_emotion7.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [EfficientEmotion8](https://github.com/HSE-asavchenko/face-emotion-recognition) | 15M | *face::attr* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_efficient_emotion8.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [MobileEmotion7](https://github.com/HSE-asavchenko/face-emotion-recognition) | 13M | *face::attr* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_mobile_emotion7.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [ReXNetEmotion7](https://github.com/HSE-asavchenko/face-emotion-recognition) | 30M | *face::attr* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_rexnet_emotion7.cpp) | ✅ | ✅ | / | ✅ | ✅ | ✔️ | ✔️ | / | -| [EfficientNetLite4](https://github.com/onnx/models/blob/master/vision/classification/efficientnet-lite4) | 49M | *classification* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_efficientnet_lite4.cpp) | ✅ | ✅ | / | ✅ | ✅ | ✔️ | ✔️ | / | -| [ShuffleNetV2](https://github.com/onnx/models/blob/master/vision/classification/shufflenet) | 8.7M | *classification* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_shufflenetv2.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [DenseNet121](https://pytorch.org/hub/pytorch_vision_densenet/) | 30.7M | *classification* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_densenet.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | / | -| [GhostNet](https://pytorch.org/hub/pytorch_vision_ghostnet/) | 20M | *classification* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_ghostnet.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [HdrDNet](https://pytorch.org/hub/pytorch_vision_hardnet//) | 13M | *classification* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_hardnet.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [IBNNet](https://pytorch.org/hub/pytorch_vision_ibnnet/) | 97M | *classification* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_ibnnet.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | / | -| [MobileNetV2](https://pytorch.org/hub/pytorch_vision_mobilenet_v2/) | 13M | *classification* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_mobilenetv2.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [ResNet](https://pytorch.org/hub/pytorch_vision_resnet/) | 44M | *classification* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_resnet.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | / | -| [ResNeXt](https://pytorch.org/hub/pytorch_vision_resnext/) | 95M | *classification* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_resnext.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | / | -| [DeepLabV3ResNet101](https://pytorch.org/hub/pytorch_vision_deeplabv3_resnet101/) | 232M | *segmentation* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_deeplabv3_resnet101.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | / | -| [FCNResNet101](https://pytorch.org/hub/pytorch_vision_fcn_resnet101/) | 207M | *segmentation* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_fcn_resnet101.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | / | -| [FastStyleTransfer](https://github.com/onnx/models/blob/master/vision/style_transfer/fast_neural_style) | 6.4M | *style* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_fast_style_transfer.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [Colorizer](https://github.com/richzhang/colorization) | 123M | *colorization* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_colorizer.cpp) | ✅ | ✅ | / | ✅ | ✅ | ✔️ | ✔️ | / | -| [SubPixelCNN](https://github.com/niazwazir/SUB_PIXEL_CNN) | 234K | *resolution* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_subpixel_cnn.cpp) | ✅ | ✅ | / | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [SubPixelCNN](https://github.com/niazwazir/SUB_PIXEL_CNN) | 234K | *resolution* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_subpixel_cnn.cpp) | ✅ | ✅ | / | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [InsectDet](https://github.com/quarrying/quarrying-insect-id) | 27M | *detection* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_insectdet.cpp) | ✅ | ✅ | / | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [InsectID](https://github.com/quarrying/quarrying-insect-id) | 22M | *classification* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_insectid.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ✔️ | ❔ | -| [PlantID](https://github.com/quarrying/quarrying-plant-id) | 30M | *classification* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_plantid.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ✔️ | ❔ | -| [YOLOv5BlazeFace](https://github.com/deepcam-cn/yolov5-face) | 3.4M | *face::detect* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_yolov5_blazeface.cpp) | ✅ | ✅ | / | / | ✅ | ✔️ | ✔️ | ❔ | -| [YoloV5_V_6_1](https://github.com/ultralytics/yolov5/releases/tag/v6.1) | 7.5M | *detection* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_yolov5_v6.1.cpp) | ✅ | ✅ | / | / | ✅ | ✔️ | ✔️ | ❔ | -| [HeadSeg](https://github.com/minivision-ai/photo2cartoon) | 31M | *segmentation* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_head_seg.cpp) | ✅ | ✅ | / | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [FemalePhoto2Cartoon](https://github.com/minivision-ai/photo2cartoon) | 15M | *style* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_female_photo2cartoon.cpp) | ✅ | ✅ | / | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [FastPortraitSeg](https://github.com/YexingWan/Fast-Portrait-Segmentation) | 400k | *segmentation* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_fast_portrait_seg.cpp) | ✅ | ✅ | / | / | ✅ | ✔️ | ✔️ | ❔ | -| [PortraitSegSINet](https://github.com/clovaai/ext_portrait_segmentation) | 380k | *segmentation* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_portrait_seg_sinet.cpp) | ✅ | ✅ | / | / | ✅ | ✔️ | ✔️ | ❔ | -| [PortraitSegExtremeC3Net](https://github.com/clovaai/ext_portrait_segmentation) | 180k | *segmentation* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_portrait_seg_extremec3net.cpp) | ✅ | ✅ | / | / | ✅ | ✔️ | ✔️ | ❔ | -| [FaceHairSeg](https://github.com/kampta/face-seg) | 18M | *segmentation* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_face_hair_seg.cpp) | ✅ | ✅ | / | / | ✅ | ✔️ | ✔️ | ❔ | -| [HairSeg](https://github.com/akirasosa/mobile-semantic-segmentation) | 18M | *segmentation* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_hair_seg.cpp) | ✅ | ✅ | / | / | ✅ | ✔️ | ✔️ | ❔ | -| [MobileHumanMatting](https://github.com/lizhengwei1992/mobile_phone_human_matting) | 3M | *matting* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_mobile_human_matting.cpp) | ✅ | ✅ | / | / | ✅ | ✔️ | ✔️ | ❔ | -| [MobileHairSeg](https://github.com/wonbeomjang/mobile-hair-segmentation-pytorch) | 14M | *segmentation* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_mobile_hair_seg.cpp) | ✅ | ✅ | / | / | ✅ | ✔️ | ✔️ | ❔ | -| [YOLOv6](https://github.com/meituan/YOLOv6) | 17M | *detection* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_yolov6.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [FaceParsingBiSeNet](https://github.com/zllrunning/face-parsing.PyTorch) | 50M | *segmentation* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_face_parsing_bisenet.cpp) | ✅ | ✅ | ✅ | ✅ | ✅ | ✔️ | ✔️ | ❔ | -| [FaceParsingBiSeNetDyn](https://github.com/zllrunning/face-parsing.PyTorch) | 50M | *segmentation* | [demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_face_parsing_bisenet_dyn.cpp) | ✅ | / | / | / | / | ✔️ | ✔️ | ❔ | - - -## 4. 编译文档 -
-
- -* MacOS: 从**Lite.Ai.ToolKit** 源码编译**MacOS**下的动态库。需要注意的是**Lite.Ai.ToolKit** 使用**onnxruntime**作为默认的后端,因为onnxruntime支持大部分onnx的原生算子,具有更高的易用性。如何编译Linux和Windows版本?点击 ▶️ 查看。 -```shell - git clone --depth=1 https://github.com/DefTruth/lite.ai.toolkit.git # 最新源码 - cd lite.ai.toolkit && sh ./build.sh # 对于MacOS, 你可以直接利用本项目包含的OpenCV, ONNXRuntime, MNN, NCNN and TNN依赖库,无需重新编译 -``` - -
-
- -
-💡️ Linux 和 Windows - -### Linux 和 Windows - -⚠️ **Lite.Ai.ToolKit** 的发行版本目前不直接支持Linux和Windows,你需要从下载**Lite.Ai.ToolKit**的源码进行构建。首先,你需要下载(如果有官方编译好的发行版本的话)或编译**OpenCV** 、**ONNXRuntime** 和其他你需要的推理引擎,如MNN、NCNN、TNN,然后把它们的头文件分别放入各自对应的文件夹,或者直接使用本项目提供的头文件。本项目的依赖库头文件是直接从相应的官方库拷贝而来的,但不同操作系统下的动态库需要重新编译或下载,MacOS用户可以直接使用本项目提供的各个依赖库的动态库。 -* **lite.ai.toolkit/opencv2** - ```shell - cp -r you-path-to-downloaded-or-built-opencv/include/opencv4/opencv2 lite.ai.toolkit/opencv2 - ``` -* **lite.ai.toolkit/onnxruntime** - ```shell - cp -r you-path-to-downloaded-or-built-onnxruntime/include/onnxruntime lite.ai.toolkit/onnxruntime - ``` -* **lite.ai.toolkit/MNN** - ```shell - cp -r you-path-to-downloaded-or-built-MNN/include/MNN lite.ai.toolkit/MNN - ``` -* **lite.ai.toolkit/ncnn** - ```shell - cp -r you-path-to-downloaded-or-built-ncnn/include/ncnn lite.ai.toolkit/ncnn - ``` -* **lite.ai.toolkit/tnn** - ```shell - cp -r you-path-to-downloaded-or-built-TNN/include/tnn lite.ai.toolkit/tnn - ``` - -然后把各个依赖库拷贝到**lite.ai.toolkit/lib/(linux|windows)** 文件夹。 请参考依赖库的编译文档[1](#lite.ai.toolkit-1)。 -* **lite.ai.toolkit/lib/(linux|windows)** - ```shell - cp you-path-to-downloaded-or-built-opencv/lib/*opencv* lite.ai.toolkit/lib/(linux|windows)/ - cp you-path-to-downloaded-or-built-onnxruntime/lib/*onnxruntime* lite.ai.toolkit/lib/(linux|windows)/ - cp you-path-to-downloaded-or-built-MNN/lib/*MNN* lite.ai.toolkit/lib/(linux|windows)/ - cp you-path-to-downloaded-or-built-ncnn/lib/*ncnn* lite.ai.toolkit/lib/(linux|windows)/ - cp you-path-to-downloaded-or-built-TNN/lib/*TNN* lite.ai.toolkit/lib/(linux|windows)/ - ``` -注意,你还需要安装ffmpeg(<=4.2.2),因为opencv的videoio模块依赖ffmpeg进行mp4的编解码。参考[issue#203](https://github.com/DefTruth/lite.ai.toolkit/issues/6) . 在MacOS下,ffmpeg4.2.2已经作为一个自定义依赖库被我打包进lite.ai.toolkit,不需要再从HomeBrew安装为系统库,因此lite.ai.toolkit是单体的,你可以把它移植到app里面,不用心另一台运行app的机器没有ffmpeg,MacOS版本的lite.ai.toolkit已经包含ffmpeg. 在Windows下,opencv官方团队已经提供了用于opencv的ffmpeg预编译库。在Linux下编译opencv时,请确保-DWITH_FFMPEG=ON,并检查是否链接到ffmpeg. -* 先编译ffmpeg,注意必须是低版本的,高于4.4的,opencv会不兼容。 -```shell -git clone --depth=1 https://git.ffmpeg.org/ffmpeg.git -b n4.2.2 -cd ffmpeg -./configure --enable-shared --disable-x86asm --prefix=/usr/local/opt/ffmpeg --disable-static -make -j8 -make install -``` -* 然后,编译带ffmpeg支持的OpenCV,指定-DWITH_FFMPEG=ON -```shell -#!/bin/bash - -mkdir build -cd build - -cmake .. \ - -D CMAKE_BUILD_TYPE=Release \ - -D CMAKE_INSTALL_PREFIX=your-path-to-custom-dir \ - -D BUILD_TESTS=OFF \ - -D BUILD_PERF_TESTS=OFF \ - -D BUILD_opencv_python3=OFF \ - -D BUILD_opencv_python2=OFF \ - -D BUILD_SHARED_LIBS=ON \ - -D BUILD_opencv_apps=OFF \ - -D WITH_FFMPEG=ON - -make -j8 -make install -cd .. -``` -编译完opencv后,你就可以按照上述的步骤,继续编译lite.ai.toolkit. - -* Windows: 你可以参考[issue#6](https://github.com/DefTruth/lite.ai.toolkit/issues/6) ,讨论了常见的编译问题。 -* Linux: 参考MacOS下的编译,替换Linux版本的依赖库即可。Linux下的发行版本将会在近期添加 ~ [issue#2](https://github.com/DefTruth/lite.ai.toolkit/issues/2) -* 令人开心的消息!!! : 🚀 你可以直接下载最新的**ONNXRuntime**官方构建的动态库,包含Windows, Linux, MacOS and Arm的版本!!! CPU和GPU的版本均可获得。不需要再从源码进行编译了,nice。可以从[v1.8.1](https://github.com/microsoft/onnxruntime/releases) 下载最新的动态库. 我目前在**Lite.Ai.ToolKit**中用的是1.7.0,你可以从[v1.7.0](https://github.com/microsoft/onnxruntime/releases/tag/v1.7.0) 下载, 但1.8.1应该也是可行的。对于**OpenCV**,请尝试从源码构建(Linux) 或者 直接从[OpenCV 4.5.3](https://github.com/opencv/opencv/releases) 下载官方编译好的动态库(Windows). 然后把头文件和依赖库放入上述的文件夹中. - -* Windows GPU 兼容性: 详见[issue#10](https://github.com/DefTruth/lite.ai.toolkit/issues/10). -* Linux GPU 兼容性: 详见[issue#97](https://github.com/DefTruth/lite.ai.toolkit/issues/97). - -
- -
-🔑️ 如何链接Lite.Ai.ToolKit动态库? - -* 你可参考以下的CMakeLists.txt设置来链接动态库. - -```cmake -cmake_minimum_required(VERSION 3.17) -project(lite.ai.toolkit.demo) - -set(CMAKE_CXX_STANDARD 11) - -# setting up lite.ai.toolkit -set(LITE_AI_DIR ${CMAKE_SOURCE_DIR}/lite.ai.toolkit) -set(LITE_AI_INCLUDE_DIR ${LITE_AI_DIR}/include) -set(LITE_AI_LIBRARY_DIR ${LITE_AI_DIR}/lib) -include_directories(${LITE_AI_INCLUDE_DIR}) -link_directories(${LITE_AI_LIBRARY_DIR}) - -set(OpenCV_LIBS - opencv_highgui - opencv_core - opencv_imgcodecs - opencv_imgproc - opencv_video - opencv_videoio - ) -# add your executable -set(EXECUTABLE_OUTPUT_PATH ${CMAKE_SOURCE_DIR}/examples/build) - -add_executable(lite_rvm examples/test_lite_rvm.cpp) -target_link_libraries(lite_rvm - lite.ai.toolkit - onnxruntime - MNN # need, if built lite.ai.toolkit with ENABLE_MNN=ON, default OFF - ncnn # need, if built lite.ai.toolkit with ENABLE_NCNN=ON, default OFF - TNN # need, if built lite.ai.toolkit with ENABLE_TNN=ON, default OFF - ${OpenCV_LIBS}) # link lite.ai.toolkit & other libs. -``` - -```shell -cd ./build/lite.ai.toolkit/lib && otool -L liblite.ai.toolkit.0.0.1.dylib -liblite.ai.toolkit.0.0.1.dylib: - @rpath/liblite.ai.toolkit.0.0.1.dylib (compatibility version 0.0.1, current version 0.0.1) - @rpath/libopencv_highgui.4.5.dylib (compatibility version 4.5.0, current version 4.5.2) - @rpath/libonnxruntime.1.7.0.dylib (compatibility version 0.0.0, current version 1.7.0) - ... -``` - - -```shell -cd ../ && tree . -├── bin -├── include -│   ├── lite -│   │   ├── backend.h -│   │   ├── config.h -│   │   └── lite.h -│   └── ort -└── lib - └── liblite.ai.toolkit.0.0.1.dylib -``` -* 运行已经编译好的examples: -```shell -cd ./build/lite.ai.toolkit/bin && ls -lh | grep lite --rwxr-xr-x 1 root staff 301K Jun 26 23:10 liblite.ai.toolkit.0.0.1.dylib -... --rwxr-xr-x 1 root staff 196K Jun 26 23:10 lite_yolov4 --rwxr-xr-x 1 root staff 196K Jun 26 23:10 lite_yolov5 -... -``` - -```shell -./lite_yolov5 -LITEORT_DEBUG LogId: ../../../hub/onnx/cv/yolov5s.onnx -=============== Input-Dims ============== -... -detected num_anchors: 25200 -generate_bboxes num: 66 -Default Version Detected Boxes Num: 5 -``` - -为了链接`lite.ai.toolkit`动态库,你需要确保`OpenCV` and `onnxruntime`也被正确地链接。你可以在[CMakeLists.txt](https://github.com/DefTruth/RobustVideoMatting-ncnn-mnn-tnn-onnxruntime/blob/main/CMakeLists.txt) 中找到一个简单且完整的,关于如何正确地链接Lite.AI.ToolKit动态库的应用案例。 - -
- - -## 5. 模型下载 -
-
- -**Lite.Ai.ToolKit** 目前包括将近 **[100+](https://github.com/DefTruth/lite.ai.toolkit/tree/main/docs/hub/lite.ai.toolkit.hub.onnx.md)** 流行的开源模型以及 **[500+](https://github.com/DefTruth/lite.ai.toolkit/tree/main/docs/hub/lite.ai.toolkit.hub.onnx.md)** 文件,大部分文件是我自己转换的。你可以通过**lite::cv::Type::Class** 语法进行调用,如 **[lite::cv::detection::YoloV5](#lite.ai.toolkit-object-detection)**。更多的细节见[Examples for Lite.Ai.ToolKit](#lite.ai.toolkit-Examples-for-Lite.AI.ToolKit)。注意,由于Google Driver(15G)的存储限制,我无法上传所有的模型文件,国内的小伙伴请使用百度云盘。 - -| File | Baidu Drive | Google Drive | Docker Hub | Hub (Docs) | -|:----:|:-------------------------------------------------------------------------:|:----------------------------------------------------------------------------------------------------:|:----------------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------:| -| ONNX | [Baidu Drive](https://pan.baidu.com/s/1elUGcx7CZkkjEoYhTMwTRQ) code: 8gin | [Google Drive](https://drive.google.com/drive/folders/1p6uBcxGeyS1exc-T61vL8YRhwjYL4iD2?usp=sharing) | [ONNX Docker v0.1.22.01.08 (28G), v0.1.22.02.02 (400M)](https://hub.docker.com/r/qyjdefdocker/lite.ai.toolkit-onnx-hub/tags) | [ONNX Hub](https://github.com/DefTruth/lite.ai.toolkit/tree/main/docs/hub/lite.ai.toolkit.hub.onnx.md) | -| MNN | [Baidu Drive](https://pan.baidu.com/s/1KyO-bCYUv6qPq2M8BH_Okg) code: 9v63 | ❔ | [MNN Docker v0.1.22.01.08 (11G), v0.1.22.02.02 (213M)](https://hub.docker.com/r/qyjdefdocker/lite.ai.toolkit-mnn-hub/tags) | [MNN Hub](https://github.com/DefTruth/lite.ai.toolkit/tree/main/docs/hub/lite.ai.toolkit.hub.mnn.md) | -| NCNN | [Baidu Drive](https://pan.baidu.com/s/1hlnqyNsFbMseGFWscgVhgQ) code: sc7f | ❔ | [NCNN Docker v0.1.22.01.08 (9G), v0.1.22.02.02 (197M)](https://hub.docker.com/r/qyjdefdocker/lite.ai.toolkit-ncnn-hub/tags) | [NCNN Hub](https://github.com/DefTruth/lite.ai.toolkit/tree/main/docs/hub/lite.ai.toolkit.hub.ncnn.md) | -| TNN | [Baidu Drive](https://pan.baidu.com/s/1lvM2YKyUbEc5HKVtqITpcw) code: 6o6k | ❔ | [TNN Docker v0.1.22.01.08 (11G), v0.1.22.02.02 (217M)](https://hub.docker.com/r/qyjdefdocker/lite.ai.toolkit-tnn-hub/tags) | [TNN Hub](https://github.com/DefTruth/lite.ai.toolkit/tree/main/docs/hub/lite.ai.toolkit.hub.tnn.md) | - -```shell - docker pull qyjdefdocker/lite.ai.toolkit-onnx-hub:v0.1.22.01.08 # (28G) - docker pull qyjdefdocker/lite.ai.toolkit-mnn-hub:v0.1.22.01.08 # (11G) - docker pull qyjdefdocker/lite.ai.toolkit-ncnn-hub:v0.1.22.01.08 # (9G) - docker pull qyjdefdocker/lite.ai.toolkit-tnn-hub:v0.1.22.01.08 # (11G) - docker pull qyjdefdocker/lite.ai.toolkit-onnx-hub:v0.1.22.02.02 # (400M) + YOLO5Face - docker pull qyjdefdocker/lite.ai.toolkit-mnn-hub:v0.1.22.02.02 # (213M) + YOLO5Face - docker pull qyjdefdocker/lite.ai.toolkit-ncnn-hub:v0.1.22.02.02 # (197M) + YOLO5Face - docker pull qyjdefdocker/lite.ai.toolkit-tnn-hub:v0.1.22.02.02 # (217M) + YOLO5Face -``` -
- ❇️ 命名空间和Lite.Ai.ToolKit算法模块的对应关系 - -### 命名空间和Lite.Ai.ToolKit算法模块的对应关系 - -| Namespace | Details | -|:---------------------------|:----------------------------------------------------------------------------------------------| -| *lite::cv::detection* | Object Detection. one-stage and anchor-free detectors, YoloV5, YoloV4, SSD, etc. ✅ | -| *lite::cv::classification* | Image Classification. DensNet, ShuffleNet, ResNet, IBNNet, GhostNet, etc. ✅ | -| *lite::cv::faceid* | Face Recognition. ArcFace, CosFace, CurricularFace, etc. ❇️ | -| *lite::cv::face* | Face Analysis. *detect*, *align*, *pose*, *attr*, etc. ❇️ | -| *lite::cv::face::detect* | Face Detection. UltraFace, RetinaFace, FaceBoxes, PyramidBox, etc. ❇️ | -| *lite::cv::face::align* | Face Alignment. PFLD(106), FaceLandmark1000(1000 landmarks), PRNet, etc. ❇️ | -| *lite::cv::face::align3d* | 3D Face Alignment. FaceMesh(468 3D landmarks), IrisLandmark(71+5 3D landmarks), etc. ❇️ | -| *lite::cv::face::pose* | Head Pose Estimation. FSANet, etc. ❇️ | -| *lite::cv::face::attr* | Face Attributes. Emotion, Age, Gender. EmotionFerPlus, VGG16Age, etc. ❇️ | -| *lite::cv::segmentation* | Object Segmentation. Such as FCN, DeepLabV3, etc. ❇️ ️ | -| *lite::cv::style* | Style Transfer. Contains neural style transfer now, such as FastStyleTransfer. ⚠️ | -| *lite::cv::matting* | Image Matting. Object and Human matting. ❇️ ️ | -| *lite::cv::colorization* | Colorization. Make Gray image become RGB. ⚠️ | -| *lite::cv::resolution* | Super Resolution. ⚠️ | - - -### Lite.Ai.ToolKit的类与权重文件对应关系说明 - -Lite.AI.ToolKit的类与权重文件对应关系说明,可以在[lite.ai.toolkit.hub.onnx.md](https://github.com/DefTruth/lite.ai.toolkit/tree/main/docs/hub/lite.ai.toolkit.hub.onnx.md) 中找到。比如, *lite::cv::detection::YoloV5* 和 *lite::cv::detection::YoloX* 的权重文件为: - - -| Class | Pretrained ONNX Files | Rename or Converted From (Repo) | Size | -|:-----------------------------:|:---------------------:|:----------------------------------------------------------------:|:-----:| -| *lite::cv::detection::YoloV5* | yolov5l.onnx | [yolov5](https://github.com/ultralytics/yolov5) (🔥🔥💥↑) | 188Mb | -| *lite::cv::detection::YoloV5* | yolov5m.onnx | [yolov5](https://github.com/ultralytics/yolov5) (🔥🔥💥↑) | 85Mb | -| *lite::cv::detection::YoloV5* | yolov5s.onnx | [yolov5](https://github.com/ultralytics/yolov5) (🔥🔥💥↑) | 29Mb | -| *lite::cv::detection::YoloV5* | yolov5x.onnx | [yolov5](https://github.com/ultralytics/yolov5) (🔥🔥💥↑) | 351Mb | -| *lite::cv::detection::YoloX* | yolox_x.onnx | [YOLOX](https://github.com/Megvii-BaseDetection/YOLOX) (🔥🔥!!↑) | 378Mb | -| *lite::cv::detection::YoloX* | yolox_l.onnx | [YOLOX](https://github.com/Megvii-BaseDetection/YOLOX) (🔥🔥!!↑) | 207Mb | -| *lite::cv::detection::YoloX* | yolox_m.onnx | [YOLOX](https://github.com/Megvii-BaseDetection/YOLOX) (🔥🔥!!↑) | 97Mb | -| *lite::cv::detection::YoloX* | yolox_s.onnx | [YOLOX](https://github.com/Megvii-BaseDetection/YOLOX) (🔥🔥!!↑) | 34Mb | -| *lite::cv::detection::YoloX* | yolox_tiny.onnx | [YOLOX](https://github.com/Megvii-BaseDetection/YOLOX) (🔥🔥!!↑) | 19Mb | -| *lite::cv::detection::YoloX* | yolox_nano.onnx | [YOLOX](https://github.com/Megvii-BaseDetection/YOLOX) (🔥🔥!!↑) | 3.5Mb | - -这意味着,你可以通过Lite.Ai.ToolKit中的同一个类,根据你的使用情况,加载任意一个`yolov5*.onnx`或`yolox_*.onnx`,如 *YoloV5*, *YoloX*等. - -```c++ -auto *yolov5 = new lite::cv::detection::YoloV5("yolov5x.onnx"); // for server -auto *yolov5 = new lite::cv::detection::YoloV5("yolov5l.onnx"); -auto *yolov5 = new lite::cv::detection::YoloV5("yolov5m.onnx"); -auto *yolov5 = new lite::cv::detection::YoloV5("yolov5s.onnx"); // for mobile device -auto *yolox = new lite::cv::detection::YoloX("yolox_x.onnx"); -auto *yolox = new lite::cv::detection::YoloX("yolox_l.onnx"); -auto *yolox = new lite::cv::detection::YoloX("yolox_m.onnx"); -auto *yolox = new lite::cv::detection::YoloX("yolox_s.onnx"); -auto *yolox = new lite::cv::detection::YoloX("yolox_tiny.onnx"); -auto *yolox = new lite::cv::detection::YoloX("yolox_nano.onnx"); // 3.5Mb only ! -``` - -
- -
- 🔑️ 如何从通过Docker Hub下载Model Zoo? - -* Firstly, pull the image from docker hub. - ```shell - docker pull qyjdefdocker/lite.ai.toolkit-mnn-hub:v0.1.22.01.08 # (11G) - docker pull qyjdefdocker/lite.ai.toolkit-ncnn-hub:v0.1.22.01.08 # (9G) - docker pull qyjdefdocker/lite.ai.toolkit-tnn-hub:v0.1.22.01.08 # (11G) - docker pull qyjdefdocker/lite.ai.toolkit-onnx-hub:v0.1.22.01.08 # (28G) - ``` -* Secondly, run the container with local `share` dir using `docker run -idt xxx`. A minimum example will show you as follows. - * make a `share` dir in your local device. - ```shell - mkdir share # any name is ok. - ``` - * write `run_mnn_docker_hub.sh` script like: - ```shell - #!/bin/bash - PORT1=6072 - PORT2=6084 - SERVICE_DIR=/Users/xxx/Desktop/your-path-to/share - CONRAINER_DIR=/home/hub/share - CONRAINER_NAME=mnn_docker_hub_d - - docker run -idt -p ${PORT2}:${PORT1} -v ${SERVICE_DIR}:${CONRAINER_DIR} --shm-size=16gb --name ${CONRAINER_NAME} qyjdefdocker/lite.ai.toolkit-mnn-hub:v0.1.22.01.08 - - ``` -* Finally, copy the model weights from `/home/hub/mnn/cv` to your local `share` dir. - ```shell - # activate mnn docker. - sh ./run_mnn_docker_hub.sh - docker exec -it mnn_docker_hub_d /bin/bash - # copy the models to the share dir. - cd /home/hub - cp -rf mnn/cv share/ - ``` - -
- -### Model Hubs -lite.ai.toolkit提供大量的预训练模型的ONNX文件. 同时, 更多的模型权重文件详见 [Model Zoo](#lite.ai.toolkit-Model-Zoo) and [ONNX Hub](https://github.com/DefTruth/lite.ai.toolkit/tree/main/docs/hub/lite.ai.toolkit.hub.onnx.md), [MNN Hub](https://github.com/DefTruth/lite.ai.toolkit/tree/main/docs/hub/lite.ai.toolkit.hub.mnn.md), [TNN Hub](https://github.com/DefTruth/lite.ai.toolkit/tree/main/docs/hub/lite.ai.toolkit.hub.tnn.md), [NCNN Hub](https://github.com/DefTruth/lite.ai.toolkit/tree/main/docs/hub/lite.ai.toolkit.hub.ncnn.md). - - -## 6. 应用案例 - -
- -更多的应用案例详见[examples](https://github.com/DefTruth/lite.ai.toolkit/tree/main/examples/lite/cv) 。点击 ▶️ 可以看到该主题下更多的案例。 - -
- -#### 案例0: 使用[YOLOv5](https://github.com/ultralytics/yolov5) 进行目标检测。请从Model-Zoo[2](#lite.ai.toolkit-2) 下载模型文件。 -```c++ -#include "lite/lite.h" - -static void test_default() -{ - std::string onnx_path = "../../../hub/onnx/cv/yolov5s.onnx"; - std::string test_img_path = "../../../examples/lite/resources/test_lite_yolov5_1.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolov5_1.jpg"; - - auto *yolov5 = new lite::cv::detection::YoloV5(onnx_path); - std::vector detected_boxes; - cv::Mat img_bgr = cv::imread(test_img_path); - yolov5->detect(img_bgr, detected_boxes); - - lite::utils::draw_boxes_inplace(img_bgr, detected_boxes); - cv::imwrite(save_img_path, img_bgr); - - delete yolov5; -} -``` - -输出的结果是: -
- - -
- -或者你可以使用最新的 🔥🔥 ! YOLO 系列检测器[YOLOX](https://github.com/Megvii-BaseDetection/YOLOX) 或 [YoloR](https://github.com/WongKinYiu/yolor) ,它们会获得接近的结果。 - -更多可用的通用目标检测器(80类、COCO): -```c++ -auto *detector = new lite::cv::detection::YoloX(onnx_path); // Newest YOLO detector !!! 2021-07 -auto *detector = new lite::cv::detection::YoloV4(onnx_path); -auto *detector = new lite::cv::detection::YoloV3(onnx_path); -auto *detector = new lite::cv::detection::TinyYoloV3(onnx_path); -auto *detector = new lite::cv::detection::SSD(onnx_path); -auto *detector = new lite::cv::detection::YoloV5(onnx_path); -auto *detector = new lite::cv::detection::YoloR(onnx_path); // Newest YOLO detector !!! 2021-05 -auto *detector = new lite::cv::detection::TinyYoloV4VOC(onnx_path); -auto *detector = new lite::cv::detection::TinyYoloV4COCO(onnx_path); -auto *detector = new lite::cv::detection::ScaledYoloV4(onnx_path); -auto *detector = new lite::cv::detection::EfficientDet(onnx_path); -auto *detector = new lite::cv::detection::EfficientDetD7(onnx_path); -auto *detector = new lite::cv::detection::EfficientDetD8(onnx_path); -auto *detector = new lite::cv::detection::YOLOP(onnx_path); -auto *detector = new lite::cv::detection::NanoDet(onnx_path); // Super fast and tiny! -auto *detector = new lite::cv::detection::NanoDetPlus(onnx_path); // Super fast and tiny! 2021/12/25 -auto *detector = new lite::cv::detection::NanoDetEfficientNetLite(onnx_path); // Super fast and tiny! -auto *detector = new lite::cv::detection::YoloV5_V_6_0(onnx_path); -auto *detector = new lite::cv::detection::YoloV5_V_6_1(onnx_path); -auto *detector = new lite::cv::detection::YoloX_V_0_1_1(onnx_path); // Newest YOLO detector !!! 2021-07 -auto *detector = new lite::cv::detection::YOLOv6(onnx_path); // Newest 2022 YOLO detector !!! -``` - -**** - -
- -#### 案例1: 使用[RobustVideoMatting2021🔥🔥🔥](https://github.com/PeterL1n/RobustVideoMatting) 进行视频抠图。请从Model-Zoo[2](#lite.ai.toolkit-2) 下载模型文件。 - -```c++ -#include "lite/lite.h" - -static void test_default() -{ - std::string onnx_path = "../../../hub/onnx/cv/rvm_mobilenetv3_fp32.onnx"; - std::string video_path = "../../../examples/lite/resources/test_lite_rvm_0.mp4"; - std::string output_path = "../../../logs/test_lite_rvm_0.mp4"; - std::string background_path = "../../../examples/lite/resources/test_lite_matting_bgr.jpg"; - - auto *rvm = new lite::cv::matting::RobustVideoMatting(onnx_path, 16); // 16 threads - std::vector contents; - - // 1. video matting. - cv::Mat background = cv::imread(background_path); - rvm->detect_video(video_path, output_path, contents, false, 0.4f, - 20, true, true, background); - - delete rvm; -} -``` -输出的结果是: - -
- - - - -
- - - - -
- - -更多可用的抠图模型(图片抠图、视频抠图、trimap/mask-free、trimap/mask-based): -```c++ -auto *matting = new lite::cv::matting::RobustVideoMatting:(onnx_path); // WACV 2022. -auto *matting = new lite::cv::matting::MGMatting(onnx_path); // CVPR 2021 -auto *matting = new lite::cv::matting::MODNet(onnx_path); // AAAI 2022 -auto *matting = new lite::cv::matting::MODNetDyn(onnx_path); // AAAI 2022 Dynamic Shape Inference. -auto *matting = new lite::cv::matting::BackgroundMattingV2(onnx_path); // CVPR 2020 -auto *matting = new lite::cv::matting::BackgroundMattingV2Dyn(onnx_path); // CVPR 2020 Dynamic Shape Inference. -auto *matting = new lite::cv::matting::MobileHumanMatting(onnx_path); // 3Mb only !!! -``` - -**** - -
- -#### 案例2: 使用[FaceLandmarks1000](https://github.com/Single430/FaceLandmark1000) 进行人脸1000关键点检测。请从Model-Zoo[2](#lite.ai.toolkit-2) 下载模型文件。 -```c++ -#include "lite/lite.h" - -static void test_default() -{ - std::string onnx_path = "../../../hub/onnx/cv/FaceLandmark1000.onnx"; - std::string test_img_path = "../../../examples/lite/resources/test_lite_face_landmarks_0.png"; - std::string save_img_path = "../../../logs/test_lite_face_landmarks_1000.jpg"; - - auto *face_landmarks_1000 = new lite::cv::face::align::FaceLandmark1000(onnx_path); - - lite::types::Landmarks landmarks; - cv::Mat img_bgr = cv::imread(test_img_path); - face_landmarks_1000->detect(img_bgr, landmarks); - lite::utils::draw_landmarks_inplace(img_bgr, landmarks); - cv::imwrite(save_img_path, img_bgr); - - delete face_landmarks_1000; -} -``` -输出的结果是: -
- - - -
- -更多可用的人脸关键点检测器(68点、98点、106点、1000点): -```c++ -auto *align = new lite::cv::face::align::PFLD(onnx_path); // 106 landmarks, 1.0Mb only! -auto *align = new lite::cv::face::align::PFLD98(onnx_path); // 98 landmarks, 4.8Mb only! -auto *align = new lite::cv::face::align::PFLD68(onnx_path); // 68 landmarks, 2.8Mb only! -auto *align = new lite::cv::face::align::MobileNetV268(onnx_path); // 68 landmarks, 9.4Mb only! -auto *align = new lite::cv::face::align::MobileNetV2SE68(onnx_path); // 68 landmarks, 11Mb only! -auto *align = new lite::cv::face::align::FaceLandmark1000(onnx_path); // 1000 landmarks, 2.0Mb only! -auto *align = new lite::cv::face::align::PIPNet98(onnx_path); // 98 landmarks, CVPR2021! -auto *align = new lite::cv::face::align::PIPNet68(onnx_path); // 68 landmarks, CVPR2021! -auto *align = new lite::cv::face::align::PIPNet29(onnx_path); // 29 landmarks, CVPR2021! -auto *align = new lite::cv::face::align::PIPNet19(onnx_path); // 19 landmarks, CVPR2021! -``` - -**** - -
- -#### 案例3: 使用[colorization](https://github.com/richzhang/colorization) 进行图像着色。请从Model-Zoo[2](#lite.ai.toolkit-2) 下载模型文件。 -```c++ -#include "lite/lite.h" - -static void test_default() -{ - std::string onnx_path = "../../../hub/onnx/cv/eccv16-colorizer.onnx"; - std::string test_img_path = "../../../examples/lite/resources/test_lite_colorizer_1.jpg"; - std::string save_img_path = "../../../logs/test_lite_eccv16_colorizer_1.jpg"; - - auto *colorizer = new lite::cv::colorization::Colorizer(onnx_path); - - cv::Mat img_bgr = cv::imread(test_img_path); - lite::types::ColorizeContent colorize_content; - colorizer->detect(img_bgr, colorize_content); - - if (colorize_content.flag) cv::imwrite(save_img_path, colorize_content.mat); - delete colorizer; -} -``` -输出的结果是: - -
- - - -
- - - -
- -更多可用的着色器模型(灰度图转彩色图): -```c++ -auto *colorizer = new lite::cv::colorization::Colorizer(onnx_path); -``` - -**** - -
- -#### 案例4: 使用[ArcFace](https://github.com/deepinsight/insightface/tree/master/recognition/arcface_torch) 进行人脸识别。请从Model-Zoo[2](#lite.ai.toolkit-2) 下载模型文件。 - -```c++ -#include "lite/lite.h" - -static void test_default() -{ - std::string onnx_path = "../../../hub/onnx/cv/ms1mv3_arcface_r100.onnx"; - std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; - std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_1.png"; - std::string test_img_path2 = "../../../examples/lite/resources/test_lite_faceid_2.png"; - - auto *glint_arcface = new lite::cv::faceid::GlintArcFace(onnx_path); - - lite::types::FaceContent face_content0, face_content1, face_content2; - cv::Mat img_bgr0 = cv::imread(test_img_path0); - cv::Mat img_bgr1 = cv::imread(test_img_path1); - cv::Mat img_bgr2 = cv::imread(test_img_path2); - glint_arcface->detect(img_bgr0, face_content0); - glint_arcface->detect(img_bgr1, face_content1); - glint_arcface->detect(img_bgr2, face_content2); - - if (face_content0.flag && face_content1.flag && face_content2.flag) - { - float sim01 = lite::utils::math::cosine_similarity( - face_content0.embedding, face_content1.embedding); - float sim02 = lite::utils::math::cosine_similarity( - face_content0.embedding, face_content2.embedding); - std::cout << "Detected Sim01: " << sim << " Sim02: " << sim02 << std::endl; - } - - delete glint_arcface; -} -``` - -输出的结果是: -
- - - -
- -> Detected Sim01: 0.721159 Sim02: -0.0626267 - -更多可用的人脸识别模型(人脸特征提取): -```c++ -auto *recognition = new lite::cv::faceid::GlintCosFace(onnx_path); // DeepGlint(insightface) -auto *recognition = new lite::cv::faceid::GlintArcFace(onnx_path); // DeepGlint(insightface) -auto *recognition = new lite::cv::faceid::GlintPartialFC(onnx_path); // DeepGlint(insightface) -auto *recognition = new lite::cv::faceid::FaceNet(onnx_path); -auto *recognition = new lite::cv::faceid::FocalArcFace(onnx_path); -auto *recognition = new lite::cv::faceid::FocalAsiaArcFace(onnx_path); -auto *recognition = new lite::cv::faceid::TencentCurricularFace(onnx_path); // Tencent(TFace) -auto *recognition = new lite::cv::faceid::TencentCifpFace(onnx_path); // Tencent(TFace) -auto *recognition = new lite::cv::faceid::CenterLossFace(onnx_path); -auto *recognition = new lite::cv::faceid::SphereFace(onnx_path); -auto *recognition = new lite::cv::faceid::PoseRobustFace(onnx_path); -auto *recognition = new lite::cv::faceid::NaivePoseRobustFace(onnx_path); -auto *recognition = new lite::cv::faceid::MobileFaceNet(onnx_path); // 3.8Mb only ! -auto *recognition = new lite::cv::faceid::CavaGhostArcFace(onnx_path); -auto *recognition = new lite::cv::faceid::CavaCombinedFace(onnx_path); -auto *recognition = new lite::cv::faceid::MobileSEFocalFace(onnx_path); // 4.5Mb only ! -``` - -**** - -
- -#### 案例5: 使用[SCRFD 2021](https://github.com/deepinsight/insightface/blob/master/detection/scrfd/) 进行人脸检测。请从Model-Zoo[2](#lite.ai.toolkit-2) 下载模型文件。 -```c++ -#include "lite/lite.h" - -static void test_default() -{ - std::string onnx_path = "../../../hub/onnx/cv/scrfd_2.5g_bnkps_shape640x640.onnx"; - std::string test_img_path = "../../../examples/lite/resources/test_lite_face_detector.jpg"; - std::string save_img_path = "../../../logs/test_lite_scrfd.jpg"; - - auto *scrfd = new lite::cv::face::detect::SCRFD(onnx_path); - - std::vector detected_boxes; - cv::Mat img_bgr = cv::imread(test_img_path); - scrfd->detect(img_bgr, detected_boxes); - - lite::utils::draw_boxes_with_landmarks_inplace(img_bgr, detected_boxes); - cv::imwrite(save_img_path, img_bgr); - - delete scrfd; -} -``` -输出的结果是: -
- - - -
- -更多可用的人脸检测器(轻量级人脸检测器): -```c++ -auto *detector = new lite::face::detect::UltraFace(onnx_path); // 1.1Mb only ! -auto *detector = new lite::face::detect::FaceBoxes(onnx_path); // 3.8Mb only ! -auto *detector = new lite::face::detect::FaceBoxesv2(onnx_path); // 4.0Mb only ! -auto *detector = new lite::face::detect::RetinaFace(onnx_path); // 1.6Mb only ! CVPR2020 -auto *detector = new lite::face::detect::SCRFD(onnx_path); // 2.5Mb only ! CVPR2021, Super fast and accurate!! -auto *detector = new lite::face::detect::YOLO5Face(onnx_path); // 2021, Super fast and accurate!! -auto *detector = new lite::face::detect::YOLOv5BlazeFace(onnx_path); // 2021, Super fast and accurate!! -``` - -**** - -
- -#### 案例6: 使用 [DeepLabV3ResNet101](https://pytorch.org/hub/pytorch_vision_deeplabv3_resnet101/) 进行语义分割. 请从Model-Zoo[2](#lite.ai.toolkit-2) 下载模型文件。 -```c++ -#include "lite/lite.h" - -static void test_default() -{ - std::string onnx_path = "../../../hub/onnx/cv/deeplabv3_resnet101_coco.onnx"; - std::string test_img_path = "../../../examples/lite/resources/test_lite_deeplabv3_resnet101.png"; - std::string save_img_path = "../../../logs/test_lite_deeplabv3_resnet101.jpg"; - - auto *deeplabv3_resnet101 = new lite::cv::segmentation::DeepLabV3ResNet101(onnx_path, 16); // 16 threads - - lite::types::SegmentContent content; - cv::Mat img_bgr = cv::imread(test_img_path); - deeplabv3_resnet101->detect(img_bgr, content); - - if (content.flag) - { - cv::Mat out_img; - cv::addWeighted(img_bgr, 0.2, content.color_mat, 0.8, 0., out_img); - cv::imwrite(save_img_path, out_img); - if (!content.names_map.empty()) - { - for (auto it = content.names_map.begin(); it != content.names_map.end(); ++it) - { - std::cout << it->first << " Name: " << it->second << std::endl; - } - } - } - delete deeplabv3_resnet101; -} -``` - -输出的结果是: -
- - -
- -更多可用的语义分割模型(人像分割、实例分割): -```c++ -auto *segment = new lite::cv::segmentation::FCNResNet101(onnx_path); -auto *segment = new lite::cv::segmentation::DeepLabV3ResNet101(onnx_path); -``` - -**** - -
- -#### 案例7: 使用 [SSRNet](https://github.com/oukohou/SSR_Net_Pytorch) 进行年龄估计. 请从Model-Zoo[2](#lite.ai.toolkit-2) 下载模型文件。 -```c++ -#include "lite/lite.h" - -static void test_default() -{ - std::string onnx_path = "../../../hub/onnx/cv/ssrnet.onnx"; - std::string test_img_path = "../../../examples/lite/resources/test_lite_ssrnet.jpg"; - std::string save_img_path = "../../../logs/test_lite_ssrnet.jpg"; - - auto *ssrnet = new lite::cv::face::attr::SSRNet(onnx_path); - - lite::types::Age age; - cv::Mat img_bgr = cv::imread(test_img_path); - ssrnet->detect(img_bgr, age); - lite::utils::draw_age_inplace(img_bgr, age); - cv::imwrite(save_img_path, img_bgr); - - delete ssrnet; -} -``` -输出的结果是: -
- - - -
- -更多可用的人脸属性识别模型(性别、年龄、情绪): -```c++ -auto *attribute = new lite::cv::face::attr::AgeGoogleNet(onnx_path); -auto *attribute = new lite::cv::face::attr::GenderGoogleNet(onnx_path); -auto *attribute = new lite::cv::face::attr::EmotionFerPlus(onnx_path); -auto *attribute = new lite::cv::face::attr::VGG16Age(onnx_path); -auto *attribute = new lite::cv::face::attr::VGG16Gender(onnx_path); -auto *attribute = new lite::cv::face::attr::EfficientEmotion7(onnx_path); // 7 emotions, 15Mb only! -auto *attribute = new lite::cv::face::attr::EfficientEmotion8(onnx_path); // 8 emotions, 15Mb only! -auto *attribute = new lite::cv::face::attr::MobileEmotion7(onnx_path); // 7 emotions, 13Mb only! -auto *attribute = new lite::cv::face::attr::ReXNetEmotion7(onnx_path); // 7 emotions -auto *attribute = new lite::cv::face::attr::SSRNet(onnx_path); // age estimation, 190kb only!!! -``` - -**** - -
- -#### 案例8: 使用 [DenseNet](https://pytorch.org/hub/pytorch_vision_densenet/) 进行图片1000分类. 请从Model-Zoo[2](#lite.ai.toolkit-2) 下载模型文件。 -```c++ -#include "lite/lite.h" - -static void test_default() -{ - std::string onnx_path = "../../../hub/onnx/cv/densenet121.onnx"; - std::string test_img_path = "../../../examples/lite/resources/test_lite_densenet.jpg"; - - auto *densenet = new lite::cv::classification::DenseNet(onnx_path); - - lite::types::ImageNetContent content; - cv::Mat img_bgr = cv::imread(test_img_path); - densenet->detect(img_bgr, content); - if (content.flag) - { - const unsigned int top_k = content.scores.size(); - if (top_k > 0) - { - for (unsigned int i = 0; i < top_k; ++i) - std::cout << i + 1 - << ": " << content.labels.at(i) - << ": " << content.texts.at(i) - << ": " << content.scores.at(i) - << std::endl; - } - } - delete densenet; -} -``` - -输出的结果是: -
- - -
- -更多可用的图像分类模型(1000类): -```c++ -auto *classifier = new lite::cv::classification::EfficientNetLite4(onnx_path); -auto *classifier = new lite::cv::classification::ShuffleNetV2(onnx_path); // 8.7Mb only! -auto *classifier = new lite::cv::classification::GhostNet(onnx_path); -auto *classifier = new lite::cv::classification::HdrDNet(onnx_path); -auto *classifier = new lite::cv::classification::IBNNet(onnx_path); -auto *classifier = new lite::cv::classification::MobileNetV2(onnx_path); // 13Mb only! -auto *classifier = new lite::cv::classification::ResNet(onnx_path); -auto *classifier = new lite::cv::classification::ResNeXt(onnx_path); -``` - -**** - -
- -#### 案例9: 使用 [FSANet](https://github.com/omasaht/headpose-fsanet-pytorch) 进行头部姿态识别. 请从Model-Zoo[2](#lite.ai.toolkit-2) 下载模型文件。 - -```c++ -#include "lite/lite.h" - -static void test_default() -{ - std::string onnx_path = "../../../hub/onnx/cv/fsanet-var.onnx"; - std::string test_img_path = "../../../examples/lite/resources/test_lite_fsanet.jpg"; - std::string save_img_path = "../../../logs/test_lite_fsanet.jpg"; - - auto *fsanet = new lite::cv::face::pose::FSANet(onnx_path); - cv::Mat img_bgr = cv::imread(test_img_path); - lite::types::EulerAngles euler_angles; - fsanet->detect(img_bgr, euler_angles); - - if (euler_angles.flag) - { - lite::utils::draw_axis_inplace(img_bgr, euler_angles); - cv::imwrite(save_img_path, img_bgr); - std::cout << "yaw:" << euler_angles.yaw << " pitch:" << euler_angles.pitch << " row:" << euler_angles.roll << std::endl; - } - delete fsanet; -} -``` - -输出的结果是: -
- - - -
- -更多可用的头部姿态识别模型(欧拉角、yaw、pitch、roll): -```c++ -auto *pose = new lite::cv::face::pose::FSANet(onnx_path); // 1.2Mb only! -``` - -**** - -
- -#### 案例10: 使用 [FastStyleTransfer](https://github.com/onnx/models/tree/master/vision/style_transfer/fast_neural_style) 进行风格迁移. 请从Model-Zoo[2](#lite.ai.toolkit-2) 下载模型文件。 -```c++ -#include "lite/lite.h" - -static void test_default() -{ - std::string onnx_path = "../../../hub/onnx/cv/style-candy-8.onnx"; - std::string test_img_path = "../../../examples/lite/resources/test_lite_fast_style_transfer.jpg"; - std::string save_img_path = "../../../logs/test_lite_fast_style_transfer_candy.jpg"; - - auto *fast_style_transfer = new lite::cv::style::FastStyleTransfer(onnx_path); - - lite::types::StyleContent style_content; - cv::Mat img_bgr = cv::imread(test_img_path); - fast_style_transfer->detect(img_bgr, style_content); - - if (style_content.flag) cv::imwrite(save_img_path, style_content.mat); - delete fast_style_transfer; -} -``` -输出的结果是: - -
- - - -
- - - -
- -更多可用的风格迁移模型(自然风格迁移、其他): -```c++ -auto *transfer = new lite::cv::style::FastStyleTransfer(onnx_path); // 6.4Mb only -``` - -**** - -#### 案例11: 使用 [HeadSeg](https://github.com/minivision-ai/photo2cartoon) 进行人像头部分割. 请从Model-Zoo[2](#lite.ai.toolkit-2) 下载模型文件。 -```c++ -#include "lite/lite.h" - -static void test_default() -{ - std::string onnx_path = "../../../hub/onnx/cv/minivision_head_seg.onnx"; - std::string test_img_path = "../../../examples/lite/resources/test_lite_head_seg.png"; - std::string save_img_path = "../../../logs/test_lite_head_seg.jpg"; - - auto *head_seg = new lite::cv::segmentation::HeadSeg(onnx_path, 4); // 4 threads - - lite::types::HeadSegContent content; - cv::Mat img_bgr = cv::imread(test_img_path); - head_seg->detect(img_bgr, content); - if (content.flag) cv::imwrite(save_img_path, content.mask * 255.f); - - delete head_seg; -} -``` -输出的结果是: - -
- - - - -
- -更多可用的人像分割模型(头部分割、肖像分割、头发分割) -```c++ -auto *segment = new lite::cv::segmentation::HeadSeg(onnx_path); // 31Mb -auto *segment = new lite::cv::segmentation::FastPortraitSeg(onnx_path); // <= 400Kb !!! -auto *segment = new lite::cv::segmentation::PortraitSegSINet(onnx_path); // <= 380Kb !!! -auto *segment = new lite::cv::segmentation::PortraitSegExtremeC3Net(onnx_path); // <= 180Kb !!! Extreme Tiny !!! -auto *segment = new lite::cv::segmentation::FaceHairSeg(onnx_path); // 18M -auto *segment = new lite::cv::segmentation::HairSeg(onnx_path); // 18M -auto *segment = new lite::cv::segmentation::MobileHairSeg(onnx_path); // 14M -``` - -**** - -#### Example12: 使用 [Photo2Cartoon](https://github.com/minivision-ai/photo2cartoon) 进行人像卡通风格化。请从Model-Zoo[2](#lite.ai.toolkit-2) 下载模型文件。 -```c++ -#include "lite/lite.h" - -static void test_default() -{ - std::string head_seg_onnx_path = "../../../hub/onnx/cv/minivision_head_seg.onnx"; - std::string cartoon_onnx_path = "../../../hub/onnx/cv/minivision_female_photo2cartoon.onnx"; - std::string test_img_path = "../../../examples/lite/resources/test_lite_female_photo2cartoon.jpg"; - std::string save_mask_path = "../../../logs/test_lite_female_photo2cartoon_seg.jpg"; - std::string save_cartoon_path = "../../../logs/test_lite_female_photo2cartoon_cartoon.jpg"; - - auto *head_seg = new lite::cv::segmentation::HeadSeg(head_seg_onnx_path, 4); // 4 threads - auto *female_photo2cartoon = new lite::cv::style::FemalePhoto2Cartoon(cartoon_onnx_path, 4); // 4 threads - - lite::types::HeadSegContent head_seg_content; - cv::Mat img_bgr = cv::imread(test_img_path); - head_seg->detect(img_bgr, head_seg_content); - - if (head_seg_content.flag && !head_seg_content.mask.empty()) - { - cv::imwrite(save_mask_path, head_seg_content.mask * 255.f); - // Female Photo2Cartoon Style Transfer - lite::types::FemalePhoto2CartoonContent female_cartoon_content; - female_photo2cartoon->detect(img_bgr, head_seg_content.mask, female_cartoon_content); - - if (female_cartoon_content.flag && !female_cartoon_content.cartoon.empty()) - cv::imwrite(save_cartoon_path, female_cartoon_content.cartoon); - } - - delete head_seg; - delete female_photo2cartoon; -} -``` -输出的结果是: - -
- - - - -
- -更多的人像风格化模型: -```c++ -auto *transfer = new lite::cv::style::FemalePhoto2Cartoon(onnx_path); -``` - -**** - -#### Example13: 使用 [FaceParsing](https://github.com/zllrunning/face-parsing.PyTorch) 进行五官分割. 请从Model-Zoo[2](#lite.ai.toolkit-2) 下载模型文件。 -```c++ -#include "lite/lite.h" - -static void test_default() -{ - std::string onnx_path = "../../../hub/onnx/cv/face_parsing_512x512.onnx"; - std::string test_img_path = "../../../examples/lite/resources/test_lite_face_parsing.png"; - std::string save_img_path = "../../../logs/test_lite_face_parsing_bisenet.jpg"; - - auto *face_parsing_bisenet = new lite::cv::segmentation::FaceParsingBiSeNet(onnx_path, 8); // 8 threads - - lite::types::FaceParsingContent content; - cv::Mat img_bgr = cv::imread(test_img_path); - face_parsing_bisenet->detect(img_bgr, content); - - if (content.flag && !content.merge.empty()) - cv::imwrite(save_img_path, content.merge); - - delete face_parsing_bisenet; -} -``` -输出的结果是: - -
- - - - -
- -更多的进行五官分割的模型 (hair, eyes, nose, mouth, others): -```c++ -auto *segment = new lite::cv::segmentation::FaceParsingBiSeNet(onnx_path); // 50Mb -auto *segment = new lite::cv::segmentation::FaceParsingBiSeNetDyn(onnx_path); // Dynamic Shape Inference. -``` - - - -## 7. 开源协议 - -
- -[Lite.Ai.ToolKit](#lite.ai.toolkit-Introduction) 的代码采用GPL-3.0协议。 - - -## 8. 引用参考 - -
- -本项目参考了以下开源项目。 - -* [RobustVideoMatting](https://github.com/PeterL1n/RobustVideoMatting) (🔥🔥🔥new!!↑) -* [nanodet](https://github.com/RangiLyu/nanodet) (🔥🔥🔥↑) -* [YOLOX](https://github.com/Megvii-BaseDetection/YOLOX) (🔥🔥🔥new!!↑) -* [YOLOP](https://github.com/hustvl/YOLOP) (🔥🔥new!!↑) -* [YOLOR](https://github.com/WongKinYiu/yolor) (🔥🔥new!!↑) -* [ScaledYOLOv4](https://github.com/WongKinYiu/ScaledYOLOv4) (🔥🔥🔥↑) -* [insightface](https://github.com/deepinsight/insightface) (🔥🔥🔥↑) -* [yolov5](https://github.com/ultralytics/yolov5) (🔥🔥💥↑) -* [TFace](https://github.com/Tencent/TFace) (🔥🔥↑) -* [YOLOv4-pytorch](https://github.com/argusswift/YOLOv4-pytorch) (🔥🔥🔥↑) -* [Ultra-Light-Fast-Generic-Face-Detector-1MB](https://github.com/Linzaer/Ultra-Light-Fast-Generic-Face-Detector-1MB) (🔥🔥🔥↑) - -
- 展开更多引用参考 - -* [headpose-fsanet-pytorch](https://github.com/omasaht/headpose-fsanet-pytorch) (🔥↑) -* [pfld_106_face_landmarks](https://github.com/Hsintao/pfld_106_face_landmarks) (🔥🔥↑) -* [onnx-models](https://github.com/onnx/models) (🔥🔥🔥↑) -* [SSR_Net_Pytorch](https://github.com/oukohou/SSR_Net_Pytorch) (🔥↑) -* [colorization](https://github.com/richzhang/colorization) (🔥🔥🔥↑) -* [SUB_PIXEL_CNN](https://github.com/niazwazir/SUB_PIXEL_CNN) (🔥↑) -* [torchvision](https://github.com/pytorch/vision) (🔥🔥🔥↑) -* [facenet-pytorch](https://github.com/timesler/facenet-pytorch) (🔥↑) -* [face.evoLVe.PyTorch](https://github.com/ZhaoJ9014/face.evoLVe.PyTorch) (🔥🔥🔥↑) -* [center-loss.pytorch](https://github.com/louis-she/center-loss.pytorch) (🔥🔥↑) -* [sphereface_pytorch](https://github.com/clcarwin/sphereface_pytorch) (🔥🔥↑) -* [DREAM](https://github.com/penincillin/DREAM) (🔥🔥↑) -* [MobileFaceNet_Pytorch](https://github.com/Xiaoccer/MobileFaceNet_Pytorch) (🔥🔥↑) -* [cavaface.pytorch](https://github.com/cavalleria/cavaface.pytorch) (🔥🔥↑) -* [CurricularFace](https://github.com/HuangYG123/CurricularFace) (🔥🔥↑) -* [face-emotion-recognition](https://github.com/HSE-asavchenko/face-emotion-recognition) (🔥↑) -* [face_recognition.pytorch](https://github.com/grib0ed0v/face_recognition.pytorch) (🔥🔥↑) -* [PFLD-pytorch](https://github.com/polarisZhao/PFLD-pytorch) (🔥🔥↑) -* [pytorch_face_landmark](https://github.com/cunjian/pytorch_face_landmark) (🔥🔥↑) -* [FaceLandmark1000](https://github.com/Single430/FaceLandmark1000) (🔥🔥↑) -* [Pytorch_Retinaface](https://github.com/biubug6/Pytorch_Retinaface) (🔥🔥🔥↑) -* [FaceBoxes](https://github.com/zisianw/FaceBoxes.PyTorch) (🔥🔥↑) - -
- - -## 9. 编译选项 -未来会增加一些模型的[MNN](https://github.com/alibaba/MNN) 、[NCNN](https://github.com/Tencent/ncnn) 和 [TNN](https://github.com/Tencent/TNN) 支持,但由于算子兼容等原因,也无法确保所有被[ONNXRuntime C++](https://github.com/microsoft/onnxruntime) 支持的模型能够在[MNN](https://github.com/alibaba/MNN) 、[NCNN](https://github.com/Tencent/ncnn) 和 [TNN](https://github.com/Tencent/TNN) 下跑通。所以,如果您想使用本项目支持的所有模型,并且不在意*1~2ms*的性能差距的话,请使用ONNXRuntime版本的实现。[ONNXRuntime](https://github.com/microsoft/onnxruntime) 是本仓库默认的推理引擎。但是如果你确实希望编译支持[MNN](https://github.com/alibaba/MNN) 、[NCNN](https://github.com/Tencent/ncnn) 和 [TNN](https://github.com/Tencent/TNN) 支持的Lite.Ai.ToolKit动态库,你可以按照以下的步骤进行设置。 - -* 在`build.sh`中添加`DENABLE_MNN=ON` 、`DENABLE_NCNN=ON` 或 `DENABLE_TNN=ON`,比如 -```shell -cd build && cmake \ - -DCMAKE_BUILD_TYPE=MinSizeRel \ - -DINCLUDE_OPENCV=ON \ # 是否打包OpenCV进lite.ai.toolkit,默认ON;否则,你需要单独设置OpenCV - -DENABLE_MNN=ON \ # 是否编译MNN版本的模型, 默认OFF,目前只支持部分模型 - -DENABLE_NCNN=OFF \ # 是否编译NCNN版本的模型,默认OFF,目前只支持部分模型 - -DENABLE_TNN=OFF \ # 是否编译TNN版本的模型, 默认OFF,目前只支持部分模型 - .. && make -j8 -``` -* 使用MNN、NCNN或TNN版本的接口,详见案例[demo](https://github.com/DefTruth/lite.ai.toolkit/blob/main/examples/lite/cv/test_lite_nanodet.cpp) ,比如 -```C++ -auto *nanodet = new lite::mnn::cv::detection::NanoDet(mnn_path); -auto *nanodet = new lite::tnn::cv::detection::NanoDet(proto_path, model_path); -auto *nanodet = new lite::ncnn::cv::detection::NanoDet(param_path, bin_path); -``` - -## 10. 如何添加您的模型 -
- -如何添加您自己的模型以及成为贡献者?具体步骤请参考 [CONTRIBUTING.zh.md](https://github.com/DefTruth/lite.ai.toolkit/issues/191) . - -## 11. 感谢 !! 🤗🤗 -非常感谢以下的贡献者: -* [Windows10 VS2019 CUDA 11.1 Build Docs](https://github.com/DefTruth/lite.ai.toolkit/issues/207) ([@zhanghongyong123456](https://github.com/zhanghongyong123456)) -* [Linux Build Docs](https://github.com/DefTruth/lite.ai.toolkit/blob/main/docs/build/Linux.zh.md) ([@lee1221ee](https://github.com/lee1221ee)) -* [Some Windows10 Bugs Fixed](https://github.com/DefTruth/lite.ai.toolkit/pull/105) ([@ysc3839](https://github.com/ysc3839), [@AvenSun](https://github.com/AvenSun)) - diff --git a/build.sh b/build.sh old mode 100644 new mode 100755 index 13ed9398..7b912af3 --- a/build.sh +++ b/build.sh @@ -9,10 +9,7 @@ else echo "build dir: ${BUILD_DIR} directory exist! ..." fi -cd "${BUILD_DIR}" && pwd && cmake .. \ - -DCMAKE_BUILD_TYPE=MinSizeRel \ - -DINCLUDE_OPENCV=ON \ - -DENABLE_MNN=OFF \ - -DENABLE_NCNN=OFF \ - -DENABLE_TNN=OFF && - make -j8 +cd "${BUILD_DIR}" && pwd +cmake .. -DCMAKE_BUILD_TYPE=MinSizeRel -DCMAKE_INSTALL_PREFIX=./install +make -j8 +make install diff --git a/build/.gitignore b/build/.gitignore deleted file mode 100644 index c4d497a2..00000000 --- a/build/.gitignore +++ /dev/null @@ -1,14 +0,0 @@ -litehub/bin -litehub/include -cmake_install.cmake -CMakeCache.txt -Makefile -CMakeFiles -examples -lite.ai/bin -lite.ai/include - -lite.ai.toolkit/bin -lite.ai.toolkit/include -lite.ai.toolkit/lib - diff --git a/cmake/MNN.cmake b/cmake/MNN.cmake index e97a3c07..1565dcf0 100644 --- a/cmake/MNN.cmake +++ b/cmake/MNN.cmake @@ -1,4 +1,21 @@ -############################## Source Files of LiteHub Based on MNN ################################# +set(MNN_Version "2.8.2" CACHE STRING "MNN version" FORCE) +set(MNN_DIR ${THIRD_PARTY_PATH}/MNN) +# download from github if MNN library is not exists +if (NOT EXISTS ${MNN_DIR}) + set(MNN_Filename "MNN-${MNN_Version}-linux-cpu-x86_64.tgz") + set(MNN_URL https://github.com/DefTruth/lite.ai.toolkit/releases/download/v0.2.0-rc0/${MNN_Filename}) + message("[Lite.AI.Toolkit][I] Downloading MNN library: ${MNN_URL}") + download_and_decompress(${MNN_URL} ${MNN_Filename} ${MNN_DIR}) +else() + message("[Lite.AI.Toolkit][I] Found local MNN library: ${MNN_DIR}") +endif() +if(NOT EXISTS ${MNN_DIR}) + message(FATAL_ERROR "[Lite.AI.Toolkit][E] ${MNN_DIR} is not exists!") +endif() + +include_directories(${MNN_DIR}/include) +link_directories(${MNN_DIR}/lib) + # 1. glob sources files file(GLOB MNN_CORE_SRCS ${CMAKE_SOURCE_DIR}/lite/mnn/core/*.cpp) file(GLOB MNN_CV_SRCS ${CMAKE_SOURCE_DIR}/lite/mnn/cv/*.cpp) @@ -10,17 +27,12 @@ file(GLOB MNN_CV_HEAD ${CMAKE_SOURCE_DIR}/lite/mnn/cv/*.h) file(GLOB MNN_NLP_HEAD ${CMAKE_SOURCE_DIR}/lite/mnn/nlp/*.h) file(GLOB MNN_ASR_HEAD ${CMAKE_SOURCE_DIR}/lite/mnn/asr/*.h) -set(MNN_SRCS - ${MNN_CV_SRCS} - ${MNN_NLP_SRCS} - ${MNN_ASR_SRCS} - ${MNN_CORE_SRCS}) - +set(MNN_SRCS ${MNN_CV_SRCS} ${MNN_NLP_SRCS} ${MNN_ASR_SRCS} ${MNN_CORE_SRCS}) # 3. copy -message("Installing Lite.AI.ToolKit Headers for MNN Backend ...") +message("[Lite.AI.Toolkit][I] Installing Lite.AI.ToolKit Headers for MNN Backend ...") # "INSTALL" can copy all files from the list to the specified path. # "COPY" only copies one file to a specified path -file(INSTALL ${MNN_CORE_HEAD} DESTINATION ${BUILD_LITE_AI_DIR}/include/lite/mnn/core) -file(INSTALL ${MNN_CV_HEAD} DESTINATION ${BUILD_LITE_AI_DIR}/include/lite/mnn/cv) -file(INSTALL ${MNN_ASR_HEAD} DESTINATION ${BUILD_LITE_AI_DIR}/include/lite/mnn/asr) -file(INSTALL ${MNN_NLP_HEAD} DESTINATION ${BUILD_LITE_AI_DIR}/include/lite/mnn/nlp) +file(INSTALL ${MNN_CORE_HEAD} DESTINATION ${CMAKE_INSTALL_PREFIX}/include/lite/mnn/core) +file(INSTALL ${MNN_CV_HEAD} DESTINATION ${CMAKE_INSTALL_PREFIX}/include/lite/mnn/cv) +file(INSTALL ${MNN_ASR_HEAD} DESTINATION ${CMAKE_INSTALL_PREFIX}/include/lite/mnn/asr) +file(INSTALL ${MNN_NLP_HEAD} DESTINATION ${CMAKE_INSTALL_PREFIX}/include/lite/mnn/nlp) diff --git a/cmake/TNN.cmake b/cmake/TNN.cmake index e3575df7..08a246a6 100644 --- a/cmake/TNN.cmake +++ b/cmake/TNN.cmake @@ -1,4 +1,10 @@ -############################## Source Files of LiteHub Based on TNN ################################# +set(TNN_DIR ${THIRD_PARTY_PATH}/TNN) +if(NOT EXISTS ${TNN_DIR}) + message(FATAL_ERROR "[Lite.AI.Toolkit][E] ${TNN_DIR} is not exists!") +endif() +include_directories(${TNN_DIR}/include) +link_directories(${TNN_DIR}/lib) + # 1. glob sources files file(GLOB TNN_CORE_SRCS ${CMAKE_SOURCE_DIR}/lite/tnn/core/*.cpp) file(GLOB TNN_CV_SRCS ${CMAKE_SOURCE_DIR}/lite/tnn/cv/*.cpp) @@ -10,17 +16,12 @@ file(GLOB TNN_CV_HEAD ${CMAKE_SOURCE_DIR}/lite/tnn/cv/*.h) file(GLOB TNN_NLP_HEAD ${CMAKE_SOURCE_DIR}/lite/tnn/nlp/*.h) file(GLOB TNN_ASR_HEAD ${CMAKE_SOURCE_DIR}/lite/tnn/asr/*.h) -set(TNN_SRCS - ${TNN_CV_SRCS} - ${TNN_NLP_SRCS} - ${TNN_ASR_SRCS} - ${TNN_CORE_SRCS}) - +set(TNN_SRCS ${TNN_CV_SRCS} ${TNN_NLP_SRCS} ${TNN_ASR_SRCS} ${TNN_CORE_SRCS}) # 3. copy -message("Installing Lite.AI.ToolKit Headers for TNN Backend ...") +message("[Lite.AI.Toolkit][I] Installing Lite.AI.ToolKit Headers for TNN Backend ...") # "INSTALL" can copy all files from the list to the specified path. # "COPY" only copies one file to a specified path -file(INSTALL ${TNN_CORE_HEAD} DESTINATION ${BUILD_LITE_AI_DIR}/include/lite/tnn/core) -file(INSTALL ${TNN_CV_HEAD} DESTINATION ${BUILD_LITE_AI_DIR}/include/lite/tnn/cv) -file(INSTALL ${TNN_ASR_HEAD} DESTINATION ${BUILD_LITE_AI_DIR}/include/lite/tnn/asr) -file(INSTALL ${TNN_NLP_HEAD} DESTINATION ${BUILD_LITE_AI_DIR}/include/lite/tnn/nlp) +file(INSTALL ${TNN_CORE_HEAD} DESTINATION ${CMAKE_INSTALL_PREFIX}/include/lite/tnn/core) +file(INSTALL ${TNN_CV_HEAD} DESTINATION ${CMAKE_INSTALL_PREFIX}/include/lite/tnn/cv) +file(INSTALL ${TNN_ASR_HEAD} DESTINATION ${CMAKE_INSTALL_PREFIX}/include/lite/tnn/asr) +file(INSTALL ${TNN_NLP_HEAD} DESTINATION ${CMAKE_INSTALL_PREFIX}/include/lite/tnn/nlp) diff --git a/cmake/command.cmake b/cmake/command.cmake deleted file mode 100644 index d9d4d668..00000000 --- a/cmake/command.cmake +++ /dev/null @@ -1,182 +0,0 @@ -# config lite.ai shared lib. -function(add_lite_ai_toolkit_shared_library version soversion) - configure_file( - "${CMAKE_SOURCE_DIR}/lite/config.h.in" - "${CMAKE_SOURCE_DIR}/lite/config.h" - ) - - # 2. glob headers files - file(GLOB LITE_HEAD ${CMAKE_SOURCE_DIR}/lite/*.h) - - # 3. glob sources files - file(GLOB LITE_SRCS ${CMAKE_SOURCE_DIR}/lite/*.cpp) - set(LITE_DEPENDENCIES ${OpenCV_LIBS}) - - if (ENABLE_ONNXRUNTIME) - include(cmake/onnxruntime.cmake) - set(LITE_SRCS ${LITE_SRCS} ${ORT_SRCS}) - set(LITE_DEPENDENCIES ${LITE_DEPENDENCIES} onnxruntime) - endif () - - if (ENABLE_MNN) - include(cmake/MNN.cmake) - set(LITE_SRCS ${LITE_SRCS} ${MNN_SRCS}) - set(LITE_DEPENDENCIES ${LITE_DEPENDENCIES} MNN) - endif () - - if (ENABLE_NCNN) - include(cmake/ncnn.cmake) - set(LITE_SRCS ${LITE_SRCS} ${NCNN_SRCS}) - set(LITE_DEPENDENCIES ${LITE_DEPENDENCIES} ncnn) - endif () - - if (ENABLE_TNN) - include(cmake/TNN.cmake) - set(LITE_SRCS ${LITE_SRCS} ${TNN_SRCS}) - set(LITE_DEPENDENCIES ${LITE_DEPENDENCIES} TNN) - endif () - - # 4. shared library - add_library(lite.ai.toolkit SHARED ${LITE_SRCS}) - target_link_libraries(lite.ai.toolkit ${LITE_DEPENDENCIES}) - set_target_properties(lite.ai.toolkit PROPERTIES VERSION ${version} SOVERSION ${soversion}) - - message("Installing Lite.AI.ToolKit Headers ...") - file(INSTALL ${LITE_HEAD} DESTINATION ${BUILD_LITE_AI_DIR}/include/lite) - - message(">>>> Added Shared Library: lite.ai.toolkit !") - -endfunction() - -# add custom command for lite.ai shared lib. -function(add_lite_ai_toolkit_engines_headers_command) - add_custom_command( - TARGET lite.ai.toolkit PRE_BUILD - COMMAND ${CMAKE_COMMAND} -E make_directory ${EXECUTABLE_OUTPUT_PATH} - COMMAND ${CMAKE_COMMAND} -E make_directory ${LIBRARY_OUTPUT_PATH} - COMMAND ${CMAKE_COMMAND} -E echo "Preparing ${LIBRARY_OUTPUT_PATH} ... done!" - COMMAND ${CMAKE_COMMAND} -E echo "Preparing ${EXECUTABLE_OUTPUT_PATH} ... done!" - ) - - # copy opencv2 headers - if (INCLUDE_OPENCV) - add_custom_command( - TARGET lite.ai.toolkit PRE_BUILD - COMMAND ${CMAKE_COMMAND} -E make_directory ${BUILD_LITE_AI_DIR}/include/opencv2 - ) - add_custom_command( - TARGET lite.ai.toolkit PRE_BUILD - COMMAND ${CMAKE_COMMAND} -E copy_directory ${LITE_AI_ROOT_DIR}/opencv2 ${BUILD_LITE_AI_DIR}/include/opencv2 - COMMAND ${CMAKE_COMMAND} -E echo "Installing opencv2 headers to ${BUILD_LITE_AI_DIR}/opencv2 ... done!" - ) - endif () - - if (ENABLE_ONNXRUNTIME) - add_custom_command( - TARGET lite.ai.toolkit PRE_BUILD - COMMAND ${CMAKE_COMMAND} -E make_directory ${BUILD_LITE_AI_DIR}/include/onnxruntime - ) - # copy onnxruntime headers - add_custom_command( - TARGET lite.ai.toolkit PRE_BUILD - COMMAND ${CMAKE_COMMAND} -E copy_directory ${LITE_AI_ROOT_DIR}/onnxruntime ${BUILD_LITE_AI_DIR}/include/onnxruntime - COMMAND ${CMAKE_COMMAND} -E echo "Installing onnxruntime headers to ${BUILD_LITE_AI_DIR}/include/onnxruntime ... done!" - ) - - endif () - - if (ENABLE_MNN) - add_custom_command( - TARGET lite.ai.toolkit PRE_BUILD - COMMAND ${CMAKE_COMMAND} -E make_directory ${BUILD_LITE_AI_DIR}/include/MNN - ) - # copy MNN headers - add_custom_command( - TARGET lite.ai.toolkit PRE_BUILD - COMMAND ${CMAKE_COMMAND} -E copy_directory ${LITE_AI_ROOT_DIR}/MNN ${BUILD_LITE_AI_DIR}/include/MNN - COMMAND ${CMAKE_COMMAND} -E echo "Installing MNN headers to ${BUILD_LITE_AI_DIR}/include/MNN ... done!" - ) - - endif () - - if (ENABLE_NCNN) - add_custom_command( - TARGET lite.ai.toolkit PRE_BUILD - COMMAND ${CMAKE_COMMAND} -E make_directory ${BUILD_LITE_AI_DIR}/include/ncnn - ) - # copy ncnn headers - add_custom_command( - TARGET lite.ai.toolkit PRE_BUILD - COMMAND ${CMAKE_COMMAND} -E copy_directory ${LITE_AI_ROOT_DIR}/ncnn ${BUILD_LITE_AI_DIR}/include/ncnn - COMMAND ${CMAKE_COMMAND} -E echo "Installing NCNN headers to ${BUILD_LITE_AI_DIR}/include/ncnn ... done!" - ) - endif () - - if (ENABLE_TNN) - add_custom_command( - TARGET lite.ai.toolkit PRE_BUILD - COMMAND ${CMAKE_COMMAND} -E make_directory ${BUILD_LITE_AI_DIR}/include/tnn - ) - # copy TNN headers - add_custom_command( - TARGET lite.ai.toolkit PRE_BUILD - COMMAND ${CMAKE_COMMAND} -E copy_directory ${LITE_AI_ROOT_DIR}/tnn ${BUILD_LITE_AI_DIR}/include/tnn - COMMAND ${CMAKE_COMMAND} -E echo "Installing TNN headers to ${BUILD_LITE_AI_DIR}/include/tnn ... done!" - ) - endif () - -endfunction() - -function(add_lite_ai_toolkit_engines_libs_command) - # copy opencv libs - if (INCLUDE_OPENCV) - message("Installing OpenCV libs -> INCLUDE_OPENCV: ${INCLUDE_OPENCV} ...") - file(GLOB ALL_OpenCV_LIBS ${LITE_AI_ROOT_DIR}/lib/${PLATFORM_NAME}/*opencv*) - file(GLOB FFMPEG_AV_LIBS ${LITE_AI_ROOT_DIR}/lib/${PLATFORM_NAME}/*av*) - file(GLOB FFMPEG_SW_LIBS ${LITE_AI_ROOT_DIR}/lib/${PLATFORM_NAME}/*sw*) - file(INSTALL ${ALL_OpenCV_LIBS} DESTINATION ${LIBRARY_OUTPUT_PATH}) - file(INSTALL ${FFMPEG_AV_LIBS} DESTINATION ${LIBRARY_OUTPUT_PATH}) - file(INSTALL ${FFMPEG_SW_LIBS} DESTINATION ${LIBRARY_OUTPUT_PATH}) - endif () - # copy onnxruntime libs - if (ENABLE_ONNXRUNTIME) - message("Installing ONNXRuntime libs -> ENABLE_ONNXRUNTIME: ${ENABLE_ONNXRUNTIME} ...") - file(GLOB ALL_ONNXRUNTIME_LIBS ${LITE_AI_ROOT_DIR}/lib/${PLATFORM_NAME}/*onnxruntime*) - file(INSTALL ${ALL_ONNXRUNTIME_LIBS} DESTINATION ${LIBRARY_OUTPUT_PATH}) - endif () - # copy MNN libs - if (ENABLE_MNN) - message("Installing MNN libs -> ENABLE_MNN: ${ENABLE_MNN} ...") - file(GLOB ALL_MNN_LIBS ${LITE_AI_ROOT_DIR}/lib/${PLATFORM_NAME}/*MNN*) - file(INSTALL ${ALL_MNN_LIBS} DESTINATION ${LIBRARY_OUTPUT_PATH}) - endif () - # copy NCNN libs - if (ENABLE_NCNN) - message("Installing NCNN libs -> ENABLE_NCNN: ${ENABLE_NCNN} ...") - file(GLOB ALL_NCNN_LIBS ${LITE_AI_ROOT_DIR}/lib/${PLATFORM_NAME}/*ncnn*) - file(INSTALL ${ALL_NCNN_LIBS} DESTINATION ${LIBRARY_OUTPUT_PATH}) - endif () - # copy TNN libs - if (ENABLE_TNN) - message("Installing TNN libs -> ENABLE_TNN: ${ENABLE_TNN} ...") - file(GLOB ALL_TNN_LIBS ${LITE_AI_ROOT_DIR}/lib/${PLATFORM_NAME}/*TNN*) - file(INSTALL ${ALL_TNN_LIBS} DESTINATION ${LIBRARY_OUTPUT_PATH}) - endif () -endfunction() - -function(add_lite_ai_toolkit_test_custom_command) - if (LITE_AI_BUILD_TEST) - # copy opencv & lite.ai.toolkit & engines libs to bin directory - add_custom_command( - TARGET lite.ai.toolkit POST_BUILD - COMMAND ${CMAKE_COMMAND} -E copy_directory ${LIBRARY_OUTPUT_PATH} ${EXECUTABLE_OUTPUT_PATH} - COMMAND ${CMAKE_COMMAND} -E echo "Installing all lite.ai.toolkit libs to ${EXECUTABLE_OUTPUT_PATH} ... done!" - ) - endif () -endfunction() - -function(add_lite_executable executable_name field) - add_executable(${executable_name} ${field}/test_${executable_name}.cpp) - target_link_libraries(${executable_name} lite.ai.toolkit) # link lite.ai.toolkit - message(">>>> Added Lite Executable: ${executable_name} !") -endfunction() diff --git a/cmake/lite.ai.toolkit-config.cmake b/cmake/lite.ai.toolkit-config.cmake new file mode 100644 index 00000000..88c8c619 --- /dev/null +++ b/cmake/lite.ai.toolkit-config.cmake @@ -0,0 +1,10 @@ +# This file will define the following variables for find_package method: +# - lite.ai.toolkit_LIBS : The list of libraries to link against. +# - lite.ai.toolkit_INCLUDE_DIRS : The lite.ai.toolkit include directories. +# - lite.ai.toolkit_Found : The status of lite.ai.toolkit + +include(${CMAKE_CURRENT_LIST_DIR}/lite.ai.toolkit.cmake) +# setup lite.ai.toolkit cmake variables +set(lite.ai.toolkit_LIBS ${Lite_AI_LIBS}) +set(lite.ai.toolkit_INCLUDE_DIRS ${Lite_AI_INCLUDE_DIRS}) +set(lite.ai.toolkit_Found TRUE) diff --git a/cmake/lite.ai.toolkit.cmake b/cmake/lite.ai.toolkit.cmake index 9dabf090..46f93cfe 100644 --- a/cmake/lite.ai.toolkit.cmake +++ b/cmake/lite.ai.toolkit.cmake @@ -1,16 +1,49 @@ -add_lite_ai_toolkit_shared_library(${VERSION_STRING} ${SOVERSION_STRING}) -add_lite_ai_toolkit_engines_headers_command() - -if(${PLATFORM_NAME} MATCHES macos OR ${PLATFORM_NAME} MATCHES linux) - add_lite_ai_toolkit_engines_libs_command() +CMAKE_MINIMUM_REQUIRED(VERSION 3.8) + +set(ENABLE_ONNXRUNTIME ON) +set(ENABLE_MNN OFF) +set(ENABLE_NCNN OFF) +set(ENABLE_TNN OFF) + +set(LITE_AI_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/include) +set(THIRD_PARTY_PATH ${CMAKE_CURRENT_SOURCE_DIR}/include/third_party) +set(LITE_AI_LIB_DIR ${CMAKE_CURRENT_SOURCE_DIR}/lib) + +set(TOOLKIT_LIBS lite.ai.toolkit onnxruntime) +if ((UNIX AND NOT APPLE)) + set(OpenCV_LIBS opencv_core opencv_imgcodecs opencv_imgproc opencv_video opencv_videoio) +else() + message(FATAL_ERROR "lite.ai.toolkit>=0.2 not support for windows/mac now!") endif() -if(${PLATFORM_NAME} MATCHES macos OR ${PLATFORM_NAME} MATCHES linux) - add_lite_ai_toolkit_test_custom_command() -endif() +set(Lite_AI_INCLUDE_DIRS ${LITE_AI_INCLUDE_DIR}) +set(Lite_AI_LIBS ${TOOLKIT_LIBS} ${OpenCV_LIBS}) +include_directories(${LITE_AI_INCLUDE_DIR}) +include_directories(${THIRD_PARTY_PATH}/opencv/include/opencv4) +include_directories(${THIRD_PARTY_PATH}/onnxruntime/include) +link_directories(${LITE_AI_LIB_DIR}) +link_directories(${THIRD_PARTY_PATH}/opencv/lib) +link_directories(${THIRD_PARTY_PATH}/onnxruntime/lib) + +if (ENABLE_MNN) + list(APPEND Lite_AI_LIBS MNN) + include_directories(${THIRD_PARTY_PATH}/MNN/include) + link_directories(${THIRD_PARTY_PATH}/MNN/lib) +endif() +if (ENABLE_TNN) + list(APPEND Lite_AI_LIBS TNN) + include_directories(${THIRD_PARTY_PATH}/TNN/include) + link_directories(${THIRD_PARTY_PATH}/TNN/lib) +endif() + +if (ENABLE_NCNN) + list(APPEND Lite_AI_LIBS ncnn) + include_directories(${THIRD_PARTY_PATH}/ncnn/include) + link_directories(${THIRD_PARTY_PATH}/ncnn/lib) +endif() diff --git a/cmake/lite.ai.toolkit.cmake.in b/cmake/lite.ai.toolkit.cmake.in new file mode 100644 index 00000000..b55a401a --- /dev/null +++ b/cmake/lite.ai.toolkit.cmake.in @@ -0,0 +1,60 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 3.8) + +set(ENABLE_ONNXRUNTIME @ENABLE_ONNXRUNTIME@) +set(ENABLE_MNN @ENABLE_MNN@) +set(ENABLE_NCNN @ENABLE_NCNN@) +set(ENABLE_TNN @ENABLE_TNN@) + +set(LITE_AI_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/include) +set(THIRD_PARTY_PATH ${CMAKE_CURRENT_SOURCE_DIR}/include/third_party) +set(LITE_AI_LIB_DIR ${CMAKE_CURRENT_SOURCE_DIR}/lib) + +set(TOOLKIT_LIBS lite.ai.toolkit onnxruntime) +if ((UNIX AND NOT APPLE)) + set(OpenCV_LIBS opencv_core opencv_imgcodecs opencv_imgproc opencv_video opencv_videoio) +else() + message(FATAL_ERROR "lite.ai.toolkit>=0.2 not support for windows/mac now!") +endif() + +set(Lite_AI_INCLUDE_DIRS ${LITE_AI_INCLUDE_DIR}) +set(Lite_AI_LIBS ${TOOLKIT_LIBS} ${OpenCV_LIBS}) + +include_directories(${LITE_AI_INCLUDE_DIR}) +include_directories(${THIRD_PARTY_PATH}/opencv/include/opencv4) +include_directories(${THIRD_PARTY_PATH}/onnxruntime/include) + +link_directories(${LITE_AI_LIB_DIR}) +link_directories(${THIRD_PARTY_PATH}/opencv/lib) +link_directories(${THIRD_PARTY_PATH}/onnxruntime/lib) + +if (ENABLE_MNN) + list(APPEND Lite_AI_LIBS MNN) + include_directories(${THIRD_PARTY_PATH}/MNN/include) + link_directories(${THIRD_PARTY_PATH}/MNN/lib) +endif() + +if (ENABLE_TNN) + list(APPEND Lite_AI_LIBS TNN) + include_directories(${THIRD_PARTY_PATH}/TNN/include) + link_directories(${THIRD_PARTY_PATH}/TNN/lib) +endif() + +if (ENABLE_NCNN) + list(APPEND Lite_AI_LIBS ncnn) + include_directories(${THIRD_PARTY_PATH}/ncnn/include) + link_directories(${THIRD_PARTY_PATH}/ncnn/lib) +endif() + + + + + + + + + + + + + + diff --git a/cmake/models.cmake b/cmake/models.cmake deleted file mode 100644 index add3f992..00000000 --- a/cmake/models.cmake +++ /dev/null @@ -1 +0,0 @@ -# Model compile options (for future use) \ No newline at end of file diff --git a/cmake/ncnn.cmake b/cmake/ncnn.cmake index 72d99b49..61800e83 100644 --- a/cmake/ncnn.cmake +++ b/cmake/ncnn.cmake @@ -1,4 +1,10 @@ -############################## Source Files of LiteHub Based on NCNN ################################# +set(NCNN_DIR ${THIRD_PARTY_PATH}/ncnn) +if(NOT EXISTS ${NCNN_DIR}) + message(FATAL_ERROR "[Lite.AI.Toolkit][E] ${NCNN_DIR} is not exists!") +endif() +include_directories(${NCNN_DIR}/include) +link_directories(${NCNN_DIR}/lib) + # 1. glob sources files file(GLOB NCNN_CORE_SRCS ${CMAKE_SOURCE_DIR}/lite/ncnn/core/*.cpp) file(GLOB NCNN_CV_SRCS ${CMAKE_SOURCE_DIR}/lite/ncnn/cv/*.cpp) @@ -10,17 +16,12 @@ file(GLOB NCNN_CV_HEAD ${CMAKE_SOURCE_DIR}/lite/ncnn/cv/*.h) file(GLOB NCNN_NLP_HEAD ${CMAKE_SOURCE_DIR}/lite/ncnn/nlp/*.h) file(GLOB NCNN_ASR_HEAD ${CMAKE_SOURCE_DIR}/lite/ncnn/asr/*.h) -set(NCNN_SRCS - ${NCNN_CV_SRCS} - ${NCNN_NLP_SRCS} - ${NCNN_ASR_SRCS} - ${NCNN_CORE_SRCS}) - +set(NCNN_SRCS ${NCNN_CV_SRCS} ${NCNN_NLP_SRCS} ${NCNN_ASR_SRCS} ${NCNN_CORE_SRCS}) # 3. copy -message("Installing Lite.AI.ToolKit Headers for NCNN Backend ...") +message("[Lite.AI.Toolkit][I] Installing Lite.AI.ToolKit Headers for NCNN Backend ...") # "INSTALL" can copy all files from the list to the specified path. # "COPY" only copies one file to a specified path -file(INSTALL ${NCNN_CORE_HEAD} DESTINATION ${BUILD_LITE_AI_DIR}/include/lite/ncnn/core) -file(INSTALL ${NCNN_CV_HEAD} DESTINATION ${BUILD_LITE_AI_DIR}/include/lite/ncnn/cv) -file(INSTALL ${NCNN_ASR_HEAD} DESTINATION ${BUILD_LITE_AI_DIR}/include/lite/ncnn/asr) -file(INSTALL ${NCNN_NLP_HEAD} DESTINATION ${BUILD_LITE_AI_DIR}/include/lite/ncnn/nlp) +file(INSTALL ${NCNN_CORE_HEAD} DESTINATION ${CMAKE_INSTALL_PREFIX}/include/lite/ncnn/core) +file(INSTALL ${NCNN_CV_HEAD} DESTINATION ${CMAKE_INSTALL_PREFIX}/include/lite/ncnn/cv) +file(INSTALL ${NCNN_ASR_HEAD} DESTINATION ${CMAKE_INSTALL_PREFIX}/include/lite/ncnn/asr) +file(INSTALL ${NCNN_NLP_HEAD} DESTINATION ${CMAKE_INSTALL_PREFIX}/include/lite/ncnn/nlp) diff --git a/cmake/onnxruntime.cmake b/cmake/onnxruntime.cmake index f8af8928..02b0bb41 100644 --- a/cmake/onnxruntime.cmake +++ b/cmake/onnxruntime.cmake @@ -1,4 +1,21 @@ -############################## Source Files of LiteHub Based on ONNXRuntime ################################# +set(OnnxRuntime_Version "1.17.1" CACHE STRING "OnnxRuntime version" FORCE) +set(OnnxRuntime_DIR ${THIRD_PARTY_PATH}/onnxruntime) +# download from github if OnnxRuntime library is not exists +if (NOT EXISTS ${OnnxRuntime_DIR}) + set(OnnxRuntime_Filename "onnxruntime-linux-x64-${OnnxRuntime_Version}.tgz") + set(OnnxRuntime_URL https://github.com/microsoft/onnxruntime/releases/download/v1.17.1/${OnnxRuntime_Filename}) + message("[Lite.AI.Toolkit][I] Downloading onnxruntimelibrary: ${OnnxRuntime_URL}") + download_and_decompress(${OnnxRuntime_URL} ${OnnxRuntime_Filename} ${OnnxRuntime_DIR}) +else() + message("[Lite.AI.Toolkit][I] Found local onnxruntime library: ${OnnxRuntime_DIR}") +endif() + +if(NOT EXISTS ${OnnxRuntime_DIR}) + message(FATAL_ERROR "[Lite.AI.Toolkit][E] ${OnnxRuntime_DIR} is not exists!") +endif() +include_directories(${OnnxRuntime_DIR}/include) +link_directories(${OnnxRuntime_DIR}/lib) + # 1. glob sources files file(GLOB ONNXRUNTIME_CORE_SRCS ${CMAKE_SOURCE_DIR}/lite/ort/core/*.cpp) file(GLOB ONNXRUNTIME_CV_SRCS ${CMAKE_SOURCE_DIR}/lite/ort/cv/*.cpp) @@ -10,17 +27,12 @@ file(GLOB ONNXRUNTIME_CV_HEAD ${CMAKE_SOURCE_DIR}/lite/ort/cv/*.h) file(GLOB ONNXRUNTIME_NLP_HEAD ${CMAKE_SOURCE_DIR}/lite/ort/nlp/*.h) file(GLOB ONNXRUNTIME_ASR_HEAD ${CMAKE_SOURCE_DIR}/lite/ort/asr/*.h) -set(ORT_SRCS - ${ONNXRUNTIME_CV_SRCS} - ${ONNXRUNTIME_NLP_SRCS} - ${ONNXRUNTIME_ASR_SRCS} - ${ONNXRUNTIME_CORE_SRCS}) - +set(ORT_SRCS ${ONNXRUNTIME_CV_SRCS} ${ONNXRUNTIME_NLP_SRCS} ${ONNXRUNTIME_ASR_SRCS} ${ONNXRUNTIME_CORE_SRCS}) # 3. copy -message("Installing Lite.AI.ToolKit Headers for ONNXRuntime Backend ...") +message("[Lite.AI.Toolkit][I] Installing Lite.AI.ToolKit Headers for ONNXRuntime Backend ...") # "INSTALL" can copy all files from the list to the specified path. # "COPY" only copies one file to a specified path -file(INSTALL ${ONNXRUNTIME_CORE_HEAD} DESTINATION ${BUILD_LITE_AI_DIR}/include/lite/ort/core) -file(INSTALL ${ONNXRUNTIME_CV_HEAD} DESTINATION ${BUILD_LITE_AI_DIR}/include/lite/ort/cv) -file(INSTALL ${ONNXRUNTIME_ASR_HEAD} DESTINATION ${BUILD_LITE_AI_DIR}/include/lite/ort/asr) -file(INSTALL ${ONNXRUNTIME_NLP_HEAD} DESTINATION ${BUILD_LITE_AI_DIR}/include/lite/ort/nlp) +file(INSTALL ${ONNXRUNTIME_CORE_HEAD} DESTINATION ${CMAKE_INSTALL_PREFIX}/include/lite/ort/core) +file(INSTALL ${ONNXRUNTIME_CV_HEAD} DESTINATION ${CMAKE_INSTALL_PREFIX}/include/lite/ort/cv) +file(INSTALL ${ONNXRUNTIME_ASR_HEAD} DESTINATION ${CMAKE_INSTALL_PREFIX}/include/lite/ort/asr) +file(INSTALL ${ONNXRUNTIME_NLP_HEAD} DESTINATION ${CMAKE_INSTALL_PREFIX}/include/lite/ort/nlp) diff --git a/cmake/opencv.cmake b/cmake/opencv.cmake index ec4405de..55b2f539 100644 --- a/cmake/opencv.cmake +++ b/cmake/opencv.cmake @@ -1,26 +1,28 @@ -# 1. setup 3rd-party dependencies -message(STATUS "Setting up OpenCV libs for: ${CMAKE_CURRENT_SOURCE_DIR}") +set(OpenCV_Version "4.9.0-ffmpeg4.2.2" CACHE STRING "OpenCV version" FORCE) +set(OpenCV_DIR ${THIRD_PARTY_PATH}/opencv) +# download from github if opencv library is not exists +if (NOT EXISTS ${OpenCV_DIR}) + set(OpenCV_Filename "opencv-${OpenCV_Version}-linux-x86_64.tgz") + set(OpenCV_URL https://github.com/DefTruth/lite.ai.toolkit/releases/download/v0.2.0-rc0/${OpenCV_Filename}) + message("[Lite.AI.Toolkit][I] Downloading library: ${OpenCV_URL}") + download_and_decompress(${OpenCV_URL} ${OpenCV_Filename} ${OpenCV_DIR}) + create_ffmpeg_syslinks_if_not_found(${OpenCV_DIR}/lib) +else() + message("[Lite.AI.Toolkit][I] Found local OpenCV library: ${OpenCV_DIR}") +endif() +if(NOT EXISTS ${OpenCV_DIR}) + message(FATAL_ERROR "[Lite.AI.Toolkit][E] ${OpenCV_DIR} is not exists!") +endif() -if (NOT WIN32) - if (ENABLE_OPENCV_VIDEOIO OR LITE_AI_BUILD_TEST) - set( - OpenCV_LIBS - opencv_core - opencv_imgproc - opencv_imgcodecs - opencv_video - opencv_videoio - ) - else () - set( - OpenCV_LIBS - opencv_core - opencv_imgproc - opencv_imgcodecs - ) # no videoio, video module - endif () -else () - set(OpenCV_LIBS opencv_world452) -endif () +include_directories(${OpenCV_DIR}/include/opencv4) +link_directories(${OpenCV_DIR}/lib) -message(STATUS "Setting up OpenCV libs done! OpenCV_LIBS:+[${OpenCV_LIBS}]") +if(NOT WIN32) + if(ENABLE_OPENCV_VIDEOIO OR LITE_AI_BUILD_TEST) + set(OpenCV_LIBS opencv_core opencv_imgproc opencv_imgcodecs opencv_video opencv_videoio) + else() + set(OpenCV_LIBS opencv_core opencv_imgproc opencv_imgcodecs) # no videoio, video module + endif() +else() + set(OpenCV_LIBS opencv_world490) +endif() diff --git a/cmake/platform.cmake b/cmake/platform.cmake deleted file mode 100644 index 9341fb79..00000000 --- a/cmake/platform.cmake +++ /dev/null @@ -1,13 +0,0 @@ -message(STATUS "Checking Build Platform for: ${CMAKE_CURRENT_SOURCE_DIR}") - -if(WIN32) - set(PLATFORM_NAME windows) -elseif(APPLE) - set(PLATFORM_NAME macos) -elseif(UNIX AND NOT APPLE) - set(PLATFORM_NAME linux) -else() - message(FATAL_ERROR "Not support for ${CMAKE_SYSTEM_NAME} now!") -endif() - -message(STATUS "Checking Build Platform Done!") diff --git a/cmake/utils.cmake b/cmake/utils.cmake new file mode 100644 index 00000000..ab0a3736 --- /dev/null +++ b/cmake/utils.cmake @@ -0,0 +1,83 @@ +function(download_and_decompress url filename decompress_dir) + if(NOT EXISTS ${filename}) + message("[Lite.AI.Toolkit][I] Downloading file from ${url} to ${filename} ...") + file(DOWNLOAD ${url} ${CMAKE_CURRENT_BINARY_DIR}/${filename}.tmp SHOW_PROGRESS) + if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/${filename}.tmp) + message(FATAL_ERROR "Can not found ${filename}.tmp!") + endif() + file(RENAME ${CMAKE_CURRENT_BINARY_DIR}/${filename}.tmp ${CMAKE_CURRENT_BINARY_DIR}/${filename}) + endif() + if(NOT EXISTS ${decompress_dir}) + file(MAKE_DIRECTORY ${decompress_dir}) + endif() + message("[Lite.AI.Toolkit][I] Decompress file ${filename} ...") + execute_process(COMMAND ${CMAKE_COMMAND} -E tar -xf ${CMAKE_CURRENT_BINARY_DIR}/${filename} WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) + string(REGEX REPLACE ".tgz|.zip|.tar.gz|.tar" "" strip_filename ${filename}) + file(RENAME ${CMAKE_CURRENT_BINARY_DIR}/${strip_filename} ${decompress_dir}) +endfunction() + +function(_create_syslink_if_not_found lib_dir src_lib dest_lib) + if (NOT EXISTS ${lib_dir}/${dest_lib}) + if (EXISTS ${lib_dir}/${src_lib}) + message("[Lite.AI.Toolkit][I] CREATE_LINK ${lib_dir}/${src_lib} -> ${lib_dir}/${dest_lib}") + file(CREATE_LINK ${lib_dir}/${src_lib} ${lib_dir}/${dest_lib}) + endif() + endif() +endfunction() + +function(create_ffmpeg_syslinks_if_not_found lib_dir) + _create_syslink_if_not_found(${lib_dir} libavcodec.so libavcodec.so.58) + _create_syslink_if_not_found(${lib_dir} libavformat.so libavformat.so.58) + _create_syslink_if_not_found(${lib_dir} libavutil.so libavutil.so.56) + _create_syslink_if_not_found(${lib_dir} libswscale.so libswscale.so.5) + _create_syslink_if_not_found(${lib_dir} libswresample.so libswresample.so.3) +endfunction() + +# config lite.ai shared lib. +function(add_lite_ai_toolkit_shared_library version soversion) + configure_file( + "${CMAKE_SOURCE_DIR}/lite/config.h.in" + "${CMAKE_SOURCE_DIR}/lite/config.h" + ) + file(GLOB LITE_SRCS ${CMAKE_SOURCE_DIR}/lite/*.cpp) + set(LITE_DEPENDENCIES ${OpenCV_LIBS}) + + if (ENABLE_ONNXRUNTIME) + include(cmake/onnxruntime.cmake) + set(LITE_SRCS ${LITE_SRCS} ${ORT_SRCS}) + set(LITE_DEPENDENCIES ${LITE_DEPENDENCIES} onnxruntime) + endif () + + if (ENABLE_MNN) + include(cmake/MNN.cmake) + set(LITE_SRCS ${LITE_SRCS} ${MNN_SRCS}) + set(LITE_DEPENDENCIES ${LITE_DEPENDENCIES} MNN) + endif () + + if (ENABLE_NCNN) + include(cmake/ncnn.cmake) + set(LITE_SRCS ${LITE_SRCS} ${NCNN_SRCS}) + set(LITE_DEPENDENCIES ${LITE_DEPENDENCIES} ncnn) + endif () + + if (ENABLE_TNN) + include(cmake/TNN.cmake) + set(LITE_SRCS ${LITE_SRCS} ${TNN_SRCS}) + set(LITE_DEPENDENCIES ${LITE_DEPENDENCIES} TNN) + endif () + + # 4. shared library + add_library(lite.ai.toolkit SHARED ${LITE_SRCS}) + target_link_libraries(lite.ai.toolkit ${LITE_DEPENDENCIES}) + set_target_properties(lite.ai.toolkit PROPERTIES VERSION ${version} SOVERSION ${soversion}) + + message("[Lite.AI.Toolkit][I] Added Shared Library: lite.ai.toolkit !") + +endfunction() + +function(add_lite_executable executable_name field) + add_executable(${executable_name} ${field}/test_${executable_name}.cpp) + target_link_libraries(${executable_name} lite.ai.toolkit) # link lite.ai.toolkit + message("[Lite.AI.Toolkit][I] Added Lite Executable: ${executable_name} !") +endfunction() + diff --git a/docs/build/Windows.zh.md b/docs/build/Windows.zh.md deleted file mode 100644 index 41d96a53..00000000 --- a/docs/build/Windows.zh.md +++ /dev/null @@ -1 +0,0 @@ -* TODO \ No newline at end of file diff --git a/hub/mnn/cv/.gitignore b/examples/hub/mnn/cv/.gitignore similarity index 100% rename from hub/mnn/cv/.gitignore rename to examples/hub/mnn/cv/.gitignore diff --git a/hub/ncnn/cv/.gitignore b/examples/hub/ncnn/cv/.gitignore similarity index 100% rename from hub/ncnn/cv/.gitignore rename to examples/hub/ncnn/cv/.gitignore diff --git a/hub/onnx/cv/.gitignore b/examples/hub/onnx/cv/.gitignore similarity index 100% rename from hub/onnx/cv/.gitignore rename to examples/hub/onnx/cv/.gitignore diff --git a/hub/tnn/cv/.gitignore b/examples/hub/tnn/cv/.gitignore similarity index 100% rename from hub/tnn/cv/.gitignore rename to examples/hub/tnn/cv/.gitignore diff --git a/examples/lite/cv/test_lite_age_googlenet.cpp b/examples/lite/cv/test_lite_age_googlenet.cpp index 00e118c5..0bee0c7e 100644 --- a/examples/lite/cv/test_lite_age_googlenet.cpp +++ b/examples/lite/cv/test_lite_age_googlenet.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/age_googlenet.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/age_googlenet.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_age_googlenet.jpg"; - std::string save_img_path = "../../../logs/test_lite_age_googlenet.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_age_googlenet.jpg"; // 1. Test Default Engine ONNXRuntime lite::cv::face::attr::AgeGoogleNet *age_googlenet = new lite::cv::face::attr::AgeGoogleNet(onnx_path); @@ -31,9 +31,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/age_googlenet.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/age_googlenet.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_age_googlenet.jpg"; - std::string save_img_path = "../../../logs/test_lite_age_googlenet.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_age_googlenet.jpg"; // 2. Test Specific Engine ONNXRuntime lite::onnxruntime::cv::face::attr::AgeGoogleNet *onnx_age_googlenet = @@ -52,9 +52,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/age_googlenet.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/age_googlenet.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_age_googlenet.jpg"; - std::string save_img_path = "../../../logs/test_lite_age_googlenet_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_age_googlenet_mnn.jpg"; lite::mnn::cv::face::attr::AgeGoogleNet *age_googlenet = new lite::mnn::cv::face::attr::AgeGoogleNet(mnn_path); @@ -77,10 +77,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/age_googlenet.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/age_googlenet.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/age_googlenet.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/age_googlenet.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_age_googlenet.jpg"; - std::string save_img_path = "../../../logs/test_lite_age_googlenet_ncnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_age_googlenet_ncnn.jpg"; lite::ncnn::cv::face::attr::AgeGoogleNet *age_googlenet = new lite::ncnn::cv::face::attr::AgeGoogleNet(param_path, bin_path); @@ -103,10 +103,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/age_googlenet.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/age_googlenet.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/age_googlenet.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/age_googlenet.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_age_googlenet.jpg"; - std::string save_img_path = "../../../logs/test_lite_age_googlenet_tnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_age_googlenet_tnn.jpg"; lite::tnn::cv::face::attr::AgeGoogleNet *age_googlenet = new lite::tnn::cv::face::attr::AgeGoogleNet(proto_path, model_path); diff --git a/examples/lite/cv/test_lite_backgroundmattingv2.cpp b/examples/lite/cv/test_lite_backgroundmattingv2.cpp index 62a937cd..299c4233 100644 --- a/examples/lite/cv/test_lite_backgroundmattingv2.cpp +++ b/examples/lite/cv/test_lite_backgroundmattingv2.cpp @@ -6,12 +6,12 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/BGMv2_mobilenetv2-512x512-full.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/BGMv2_mobilenetv2-512x512-full.onnx"; std::string test_src_path = "../../../examples/lite/resources/test_lite_bgmv2_src.png"; std::string test_bgr_path = "../../../examples/lite/resources/test_lite_bgmv2_bgr.png"; - std::string save_fgr_path = "../../../logs/test_lite_bgmv2_fgr.jpg"; - std::string save_pha_path = "../../../logs/test_lite_bgmv2_pha.jpg"; - std::string save_merge_path = "../../../logs/test_lite_bgmv2_merge.jpg"; + std::string save_fgr_path = "../../../examples/logs/test_lite_bgmv2_fgr.jpg"; + std::string save_pha_path = "../../../examples/logs/test_lite_bgmv2_pha.jpg"; + std::string save_merge_path = "../../../examples/logs/test_lite_bgmv2_merge.jpg"; lite::cv::matting::BackgroundMattingV2 *bgmv2 = new lite::cv::matting::BackgroundMattingV2(onnx_path, 16); // 16 threads @@ -37,12 +37,12 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/BGMv2_mobilenetv2-512x512-full.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/BGMv2_mobilenetv2-512x512-full.onnx"; std::string test_src_path = "../../../examples/lite/resources/test_lite_bgmv2_src.png"; std::string test_bgr_path = "../../../examples/lite/resources/test_lite_bgmv2_bgr.png"; - std::string save_fgr_path = "../../../logs/test_lite_bgmv2_fgr_onnx.jpg"; - std::string save_pha_path = "../../../logs/test_lite_bgmv2_pha_onnx.jpg"; - std::string save_merge_path = "../../../logs/test_lite_bgmv2_merge_onnx.jpg"; + std::string save_fgr_path = "../../../examples/logs/test_lite_bgmv2_fgr_onnx.jpg"; + std::string save_pha_path = "../../../examples/logs/test_lite_bgmv2_pha_onnx.jpg"; + std::string save_merge_path = "../../../examples/logs/test_lite_bgmv2_merge_onnx.jpg"; lite::onnxruntime::cv::matting::BackgroundMattingV2 *bgmv2 = new lite::onnxruntime::cv::matting::BackgroundMattingV2(onnx_path, 16); // 16 threads @@ -69,12 +69,12 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/BGMv2_mobilenetv2-512x512-full.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/BGMv2_mobilenetv2-512x512-full.mnn"; std::string test_src_path = "../../../examples/lite/resources/test_lite_bgmv2_src.png"; std::string test_bgr_path = "../../../examples/lite/resources/test_lite_bgmv2_bgr.png"; - std::string save_fgr_path = "../../../logs/test_lite_bgmv2_fgr_mnn.jpg"; - std::string save_pha_path = "../../../logs/test_lite_bgmv2_pha_mnn.jpg"; - std::string save_merge_path = "../../../logs/test_lite_bgmv2_merge_mnn.jpg"; + std::string save_fgr_path = "../../../examples/logs/test_lite_bgmv2_fgr_mnn.jpg"; + std::string save_pha_path = "../../../examples/logs/test_lite_bgmv2_pha_mnn.jpg"; + std::string save_merge_path = "../../../examples/logs/test_lite_bgmv2_merge_mnn.jpg"; lite::mnn::cv::matting::BackgroundMattingV2 *bgmv2 = new lite::mnn::cv::matting::BackgroundMattingV2(mnn_path, 16); // 16 threads @@ -107,13 +107,13 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/BGMv2_mobilenetv2-512x512-full.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/BGMv2_mobilenetv2-512x512-full.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/BGMv2_mobilenetv2-512x512-full.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/BGMv2_mobilenetv2-512x512-full.opt.tnnmodel"; std::string test_src_path = "../../../examples/lite/resources/test_lite_bgmv2_src.png"; std::string test_bgr_path = "../../../examples/lite/resources/test_lite_bgmv2_bgr.png"; - std::string save_fgr_path = "../../../logs/test_lite_bgmv2_fgr_tnn.jpg"; - std::string save_pha_path = "../../../logs/test_lite_bgmv2_pha_tnn.jpg"; - std::string save_merge_path = "../../../logs/test_lite_bgmv2_merge_tnn.jpg"; + std::string save_fgr_path = "../../../examples/logs/test_lite_bgmv2_fgr_tnn.jpg"; + std::string save_pha_path = "../../../examples/logs/test_lite_bgmv2_pha_tnn.jpg"; + std::string save_merge_path = "../../../examples/logs/test_lite_bgmv2_merge_tnn.jpg"; lite::tnn::cv::matting::BackgroundMattingV2 *bgmv2 = new lite::tnn::cv::matting::BackgroundMattingV2(proto_path, model_path, 16); // 16 threads diff --git a/examples/lite/cv/test_lite_backgroundmattingv2_dyn.cpp b/examples/lite/cv/test_lite_backgroundmattingv2_dyn.cpp index d895f86f..eea1692f 100644 --- a/examples/lite/cv/test_lite_backgroundmattingv2_dyn.cpp +++ b/examples/lite/cv/test_lite_backgroundmattingv2_dyn.cpp @@ -6,12 +6,12 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/BGMv2_mobilenetv2_hd_dynamic.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/BGMv2_mobilenetv2_hd_dynamic.onnx"; std::string test_src_path = "../../../examples/lite/resources/test_lite_bgmv2_src.png"; std::string test_bgr_path = "../../../examples/lite/resources/test_lite_bgmv2_bgr.png"; - std::string save_fgr_path = "../../../logs/test_lite_bgmv2_dyn_fgr.jpg"; - std::string save_pha_path = "../../../logs/test_lite_bgmv2_dyn_pha.jpg"; - std::string save_merge_path = "../../../logs/test_lite_bgmv2_dyn_merge.jpg"; + std::string save_fgr_path = "../../../examples/logs/test_lite_bgmv2_dyn_fgr.jpg"; + std::string save_pha_path = "../../../examples/logs/test_lite_bgmv2_dyn_pha.jpg"; + std::string save_merge_path = "../../../examples/logs/test_lite_bgmv2_dyn_merge.jpg"; lite::cv::matting::BackgroundMattingV2Dyn *bgmv2_dyn = new lite::cv::matting::BackgroundMattingV2Dyn(onnx_path, 16); // 16 threads @@ -37,12 +37,12 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/BGMv2_mobilenetv2_hd_dynamic.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/BGMv2_mobilenetv2_hd_dynamic.onnx"; std::string test_src_path = "../../../examples/lite/resources/test_lite_bgmv2_src.png"; std::string test_bgr_path = "../../../examples/lite/resources/test_lite_bgmv2_bgr.png"; - std::string save_fgr_path = "../../../logs/test_lite_bgmv2_dyn_fgr_onnx.jpg"; - std::string save_pha_path = "../../../logs/test_lite_bgmv2_dyn_pha_onnx.jpg"; - std::string save_merge_path = "../../../logs/test_lite_bgmv2_dyn_merge_onnx.jpg"; + std::string save_fgr_path = "../../../examples/logs/test_lite_bgmv2_dyn_fgr_onnx.jpg"; + std::string save_pha_path = "../../../examples/logs/test_lite_bgmv2_dyn_pha_onnx.jpg"; + std::string save_merge_path = "../../../examples/logs/test_lite_bgmv2_dyn_merge_onnx.jpg"; lite::onnxruntime::cv::matting::BackgroundMattingV2Dyn *bgmv2_dyn = new lite::onnxruntime::cv::matting::BackgroundMattingV2Dyn(onnx_path, 16); // 16 threads diff --git a/examples/lite/cv/test_lite_cava_combined_face.cpp b/examples/lite/cv/test_lite_cava_combined_face.cpp index 80114608..258b51ec 100644 --- a/examples/lite/cv/test_lite_cava_combined_face.cpp +++ b/examples/lite/cv/test_lite_cava_combined_face.cpp @@ -6,7 +6,7 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/cavaface_IR_SE_100_Combined_Epoch_24.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/cavaface_IR_SE_100_Combined_Epoch_24.onnx"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_1.png"; @@ -32,7 +32,7 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/cavaface_IR_SE_100_Combined_Epoch_24.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/cavaface_IR_SE_100_Combined_Epoch_24.onnx"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; @@ -59,7 +59,7 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/cavaface_IR_SE_100_Combined_Epoch_24.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/cavaface_IR_SE_100_Combined_Epoch_24.mnn"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; @@ -86,8 +86,8 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/cavaface_IR_SE_100_Combined_Epoch_24.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/cavaface_IR_SE_100_Combined_Epoch_24.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/cavaface_IR_SE_100_Combined_Epoch_24.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/cavaface_IR_SE_100_Combined_Epoch_24.opt.bin"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; @@ -114,8 +114,8 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/cavaface_IR_SE_100_Combined_Epoch_24.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/cavaface_IR_SE_100_Combined_Epoch_24.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/cavaface_IR_SE_100_Combined_Epoch_24.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/cavaface_IR_SE_100_Combined_Epoch_24.opt.tnnmodel"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; diff --git a/examples/lite/cv/test_lite_cava_ghost_arcface.cpp b/examples/lite/cv/test_lite_cava_ghost_arcface.cpp index de468a4b..463cb818 100644 --- a/examples/lite/cv/test_lite_cava_ghost_arcface.cpp +++ b/examples/lite/cv/test_lite_cava_ghost_arcface.cpp @@ -6,7 +6,7 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/cavaface_GhostNet_x1.3_Arcface_Epoch_24.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/cavaface_GhostNet_x1.3_Arcface_Epoch_24.onnx"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_1.png"; @@ -32,7 +32,7 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/cavaface_GhostNet_x1.3_Arcface_Epoch_24.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/cavaface_GhostNet_x1.3_Arcface_Epoch_24.onnx"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; @@ -59,7 +59,7 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/cavaface_GhostNet_x1.3_Arcface_Epoch_24.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/cavaface_GhostNet_x1.3_Arcface_Epoch_24.mnn"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; @@ -86,8 +86,8 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/cavaface_GhostNet_x1.3_Arcface_Epoch_24.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/cavaface_GhostNet_x1.3_Arcface_Epoch_24.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/cavaface_GhostNet_x1.3_Arcface_Epoch_24.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/cavaface_GhostNet_x1.3_Arcface_Epoch_24.opt.bin"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; @@ -114,8 +114,8 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/cavaface_GhostNet_x1.3_Arcface_Epoch_24.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/cavaface_GhostNet_x1.3_Arcface_Epoch_24.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/cavaface_GhostNet_x1.3_Arcface_Epoch_24.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/cavaface_GhostNet_x1.3_Arcface_Epoch_24.opt.tnnmodel"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; diff --git a/examples/lite/cv/test_lite_center_loss_face.cpp b/examples/lite/cv/test_lite_center_loss_face.cpp index 6e6fcda0..b4a7ab66 100644 --- a/examples/lite/cv/test_lite_center_loss_face.cpp +++ b/examples/lite/cv/test_lite_center_loss_face.cpp @@ -6,7 +6,7 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/CenterLossFace_epoch_100.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/CenterLossFace_epoch_100.onnx"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_1.png"; @@ -31,7 +31,7 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/CenterLossFace_epoch_100.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/CenterLossFace_epoch_100.onnx"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; @@ -58,7 +58,7 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/CenterLossFace_epoch_100.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/CenterLossFace_epoch_100.mnn"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; @@ -85,8 +85,8 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/CenterLossFace_epoch_100.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/CenterLossFace_epoch_100.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/CenterLossFace_epoch_100.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/CenterLossFace_epoch_100.opt.bin"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; @@ -113,8 +113,8 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/CenterLossFace_epoch_100.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/CenterLossFace_epoch_100.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/CenterLossFace_epoch_100.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/CenterLossFace_epoch_100.tnnmodel"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; diff --git a/examples/lite/cv/test_lite_colorizer.cpp b/examples/lite/cv/test_lite_colorizer.cpp index d3401f5e..7679d0a2 100644 --- a/examples/lite/cv/test_lite_colorizer.cpp +++ b/examples/lite/cv/test_lite_colorizer.cpp @@ -6,20 +6,20 @@ static void test_default() { - std::string eccv16_onnx_path = "../../../hub/onnx/cv/eccv16-colorizer.onnx"; - std::string siggraph17_onnx_path = "../../../hub/onnx/cv/siggraph17-colorizer.onnx"; + std::string eccv16_onnx_path = "../../../examples/hub/onnx/cv/eccv16-colorizer.onnx"; + std::string siggraph17_onnx_path = "../../../examples/hub/onnx/cv/siggraph17-colorizer.onnx"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_colorizer_1.jpg"; std::string test_img_path2 = "../../../examples/lite/resources/test_lite_colorizer_2.jpg"; std::string test_img_path3 = "../../../examples/lite/resources/test_lite_colorizer_3.jpg"; std::string test_img_path4 = "../../../examples/lite/resources/test_lite_colorizer_one_piece_0.png"; - std::string save_eccv_img_path1 = "../../../logs/test_lite_eccv16_colorizer_1.jpg"; - std::string save_eccv_img_path2 = "../../../logs/test_lite_eccv16_colorizer_2.jpg"; - std::string save_eccv_img_path3 = "../../../logs/test_lite_eccv16_colorizer_3.jpg"; - std::string save_eccv_img_path4 = "../../../logs/test_lite_eccv16_colorizer_one_piece_0.jpg"; - std::string save_siggraph_img_path1 = "../../../logs/test_lite_siggraph17_colorizer_1.jpg"; - std::string save_siggraph_img_path2 = "../../../logs/test_lite_siggraph17_colorizer_2.jpg"; - std::string save_siggraph_img_path3 = "../../../logs/test_lite_siggraph17_colorizer_3.jpg"; - std::string save_siggraph_img_path4 = "../../../logs/test_lite_siggraph17_colorizer_one_piece_0.jpg"; + std::string save_eccv_img_path1 = "../../../examples/logs/test_lite_eccv16_colorizer_1.jpg"; + std::string save_eccv_img_path2 = "../../../examples/logs/test_lite_eccv16_colorizer_2.jpg"; + std::string save_eccv_img_path3 = "../../../examples/logs/test_lite_eccv16_colorizer_3.jpg"; + std::string save_eccv_img_path4 = "../../../examples/logs/test_lite_eccv16_colorizer_one_piece_0.jpg"; + std::string save_siggraph_img_path1 = "../../../examples/logs/test_lite_siggraph17_colorizer_1.jpg"; + std::string save_siggraph_img_path2 = "../../../examples/logs/test_lite_siggraph17_colorizer_2.jpg"; + std::string save_siggraph_img_path3 = "../../../examples/logs/test_lite_siggraph17_colorizer_3.jpg"; + std::string save_siggraph_img_path4 = "../../../examples/logs/test_lite_siggraph17_colorizer_one_piece_0.jpg"; lite::cv::colorization::Colorizer *eccv16_colorizer = new lite::cv::colorization::Colorizer(eccv16_onnx_path); @@ -70,20 +70,20 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string eccv16_onnx_path = "../../../hub/onnx/cv/eccv16-colorizer.onnx"; - std::string siggraph17_onnx_path = "../../../hub/onnx/cv/siggraph17-colorizer.onnx"; + std::string eccv16_onnx_path = "../../../examples/hub/onnx/cv/eccv16-colorizer.onnx"; + std::string siggraph17_onnx_path = "../../../examples/hub/onnx/cv/siggraph17-colorizer.onnx"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_colorizer_1.jpg"; std::string test_img_path2 = "../../../examples/lite/resources/test_lite_colorizer_2.jpg"; std::string test_img_path3 = "../../../examples/lite/resources/test_lite_colorizer_3.jpg"; std::string test_img_path4 = "../../../examples/lite/resources/test_lite_colorizer_one_piece_0.png"; - std::string save_eccv_img_path1 = "../../../logs/test_onnx_eccv16_colorizer_1.jpg"; - std::string save_eccv_img_path2 = "../../../logs/test_onnx_eccv16_colorizer_2.jpg"; - std::string save_eccv_img_path3 = "../../../logs/test_onnx_eccv16_colorizer_3.jpg"; - std::string save_eccv_img_path4 = "../../../logs/test_onnx_eccv16_colorizer_one_piece_0.jpg"; - std::string save_siggraph_img_path1 = "../../../logs/test_onnx_siggraph17_colorizer_1.jpg"; - std::string save_siggraph_img_path2 = "../../../logs/test_onnx_siggraph17_colorizer_2.jpg"; - std::string save_siggraph_img_path3 = "../../../logs/test_onnx_siggraph17_colorizer_3.jpg"; - std::string save_siggraph_img_path4 = "../../../logs/test_onnx_siggraph17_colorizer_one_piece_0.jpg"; + std::string save_eccv_img_path1 = "../../../examples/logs/test_onnx_eccv16_colorizer_1.jpg"; + std::string save_eccv_img_path2 = "../../../examples/logs/test_onnx_eccv16_colorizer_2.jpg"; + std::string save_eccv_img_path3 = "../../../examples/logs/test_onnx_eccv16_colorizer_3.jpg"; + std::string save_eccv_img_path4 = "../../../examples/logs/test_onnx_eccv16_colorizer_one_piece_0.jpg"; + std::string save_siggraph_img_path1 = "../../../examples/logs/test_onnx_siggraph17_colorizer_1.jpg"; + std::string save_siggraph_img_path2 = "../../../examples/logs/test_onnx_siggraph17_colorizer_2.jpg"; + std::string save_siggraph_img_path3 = "../../../examples/logs/test_onnx_siggraph17_colorizer_3.jpg"; + std::string save_siggraph_img_path4 = "../../../examples/logs/test_onnx_siggraph17_colorizer_one_piece_0.jpg"; lite::onnxruntime::cv::colorization::Colorizer *eccv16_colorizer = new lite::onnxruntime::cv::colorization::Colorizer(eccv16_onnx_path); @@ -135,10 +135,10 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/eccv16-colorizer.mnn"; - // std::string mnn_path = "../../../hub/mnn/cv/eccv16-colorizer.opt.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/eccv16-colorizer.mnn"; + // std::string mnn_path = "../../../examples/hub/mnn/cv/eccv16-colorizer.opt.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_colorizer_1.jpg"; - std::string save_img_path = "../../../logs/test_lite_colorizer_1_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_colorizer_1_mnn.jpg"; lite::mnn::cv::colorization::Colorizer *eccv16_colorizer = new lite::mnn::cv::colorization::Colorizer(mnn_path, 8); @@ -160,10 +160,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/eccv16-colorizer.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/eccv16-colorizer.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/eccv16-colorizer.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/eccv16-colorizer.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_colorizer_1.jpg"; - std::string save_img_path = "../../../logs/test_lite_colorizer_1_ncnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_colorizer_1_ncnn.jpg"; lite::ncnn::cv::colorization::Colorizer *eccv16_colorizer = new lite::ncnn::cv::colorization::Colorizer(param_path, bin_path, 1); @@ -185,10 +185,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/eccv16-colorizer.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/eccv16-colorizer.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/eccv16-colorizer.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/eccv16-colorizer.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_colorizer_1.jpg"; - std::string save_img_path = "../../../logs/test_lite_colorizer_1_tnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_colorizer_1_tnn.jpg"; lite::tnn::cv::colorization::Colorizer *eccv16_colorizer = new lite::tnn::cv::colorization::Colorizer(proto_path, model_path, 1); diff --git a/examples/lite/cv/test_lite_deeplabv3_resnet101.cpp b/examples/lite/cv/test_lite_deeplabv3_resnet101.cpp index f077f74a..58d32e9b 100644 --- a/examples/lite/cv/test_lite_deeplabv3_resnet101.cpp +++ b/examples/lite/cv/test_lite_deeplabv3_resnet101.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/deeplabv3_resnet101_coco.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/deeplabv3_resnet101_coco.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_deeplabv3_resnet101.png"; - std::string save_img_path = "../../../logs/test_lite_deeplabv3_resnet101.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_deeplabv3_resnet101.jpg"; lite::cv::segmentation::DeepLabV3ResNet101 *deeplabv3_resnet101 = new lite::cv::segmentation::DeepLabV3ResNet101(onnx_path, 16); // 16 threads @@ -38,9 +38,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/deeplabv3_resnet101_coco.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/deeplabv3_resnet101_coco.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_deeplabv3_resnet101.png"; - std::string save_img_path = "../../../logs/test_onnx_deeplabv3_resnet101.jpg"; + std::string save_img_path = "../../../examples/logs/test_onnx_deeplabv3_resnet101.jpg"; lite::onnxruntime::cv::segmentation::DeepLabV3ResNet101 *deeplabv3_resnet101 = new lite::onnxruntime::cv::segmentation::DeepLabV3ResNet101(onnx_path, 16); // 16 threads @@ -71,10 +71,10 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/deeplabv3_resnet101_coco.mnn"; - // std::string mnn_path = "../../../hub/onnx/cv/deeplabv3_resnet101_coco.opt.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/deeplabv3_resnet101_coco.mnn"; + // std::string mnn_path = "../../../examples/hub/onnx/cv/deeplabv3_resnet101_coco.opt.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_deeplabv3_resnet101.png"; - std::string save_img_path = "../../../logs/test_deeplabv3_resnet101_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_deeplabv3_resnet101_mnn.jpg"; lite::mnn::cv::segmentation::DeepLabV3ResNet101 *deeplabv3_resnet101 = new lite::mnn::cv::segmentation::DeepLabV3ResNet101(mnn_path, 16); // 16 threads @@ -105,10 +105,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/deeplabv3_resnet101_coco.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/deeplabv3_resnet101_coco.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/deeplabv3_resnet101_coco.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/deeplabv3_resnet101_coco.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_deeplabv3_resnet101.png"; - std::string save_img_path = "../../../logs/test_deeplabv3_resnet101_ncnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_deeplabv3_resnet101_ncnn.jpg"; lite::ncnn::cv::segmentation::DeepLabV3ResNet101 *deeplabv3_resnet101 = new lite::ncnn::cv::segmentation::DeepLabV3ResNet101(param_path, bin_path, 16); // 16 threads @@ -139,10 +139,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/deeplabv3_resnet101_coco.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/deeplabv3_resnet101_coco.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/deeplabv3_resnet101_coco.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/deeplabv3_resnet101_coco.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_deeplabv3_resnet101.png"; - std::string save_img_path = "../../../logs/test_deeplabv3_resnet101_tnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_deeplabv3_resnet101_tnn.jpg"; lite::tnn::cv::segmentation::DeepLabV3ResNet101 *deeplabv3_resnet101 = new lite::tnn::cv::segmentation::DeepLabV3ResNet101(proto_path, model_path, 16); // 16 threads diff --git a/examples/lite/cv/test_lite_densenet.cpp b/examples/lite/cv/test_lite_densenet.cpp index 4bf71086..bb9956c5 100644 --- a/examples/lite/cv/test_lite_densenet.cpp +++ b/examples/lite/cv/test_lite_densenet.cpp @@ -6,7 +6,7 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/densenet121.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/densenet121.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_densenet.jpg"; lite::cv::classification::DenseNet *densenet = new lite::cv::classification::DenseNet(onnx_path); @@ -36,7 +36,7 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/densenet121.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/densenet121.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_densenet.jpg"; lite::onnxruntime::cv::classification::DenseNet *densenet = @@ -68,7 +68,7 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/densenet121.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/densenet121.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_densenet.jpg"; lite::mnn::cv::classification::DenseNet *densenet = @@ -100,8 +100,8 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/densenet121.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/densenet121.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/densenet121.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/densenet121.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_densenet.jpg"; lite::ncnn::cv::classification::DenseNet *densenet = @@ -133,8 +133,8 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/densenet121.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/densenet121.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/densenet121.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/densenet121.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_densenet.jpg"; lite::tnn::cv::classification::DenseNet *densenet = diff --git a/examples/lite/cv/test_lite_efficient_emotion7.cpp b/examples/lite/cv/test_lite_efficient_emotion7.cpp index ee0ca277..ff622822 100644 --- a/examples/lite/cv/test_lite_efficient_emotion7.cpp +++ b/examples/lite/cv/test_lite_efficient_emotion7.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/face-emotion-recognition-enet_b0_7.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/face-emotion-recognition-enet_b0_7.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_emotion.jpg"; - std::string save_img_path = "../../../logs/test_lite_emotion7.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_emotion7.jpg"; lite::cv::face::attr::EfficientEmotion7 *efficient_emotion7 = new lite::cv::face::attr::EfficientEmotion7(onnx_path); @@ -31,9 +31,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/face-emotion-recognition-enet_b0_7.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/face-emotion-recognition-enet_b0_7.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_emotion.jpg"; - std::string save_img_path = "../../../logs/test_lite_emotion7.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_emotion7.jpg"; lite::onnxruntime::cv::face::attr::EfficientEmotion7 *efficient_emotion7 = new lite::onnxruntime::cv::face::attr::EfficientEmotion7(onnx_path); @@ -56,9 +56,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/face-emotion-recognition-enet_b0_7.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/face-emotion-recognition-enet_b0_7.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_emotion.jpg"; - std::string save_img_path = "../../../logs/test_lite_emotion7_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_emotion7_mnn.jpg"; lite::mnn::cv::face::attr::EfficientEmotion7 *efficient_emotion7 = new lite::mnn::cv::face::attr::EfficientEmotion7(mnn_path); @@ -81,10 +81,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/face-emotion-recognition-enet_b0_7.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/face-emotion-recognition-enet_b0_7.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/face-emotion-recognition-enet_b0_7.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/face-emotion-recognition-enet_b0_7.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_emotion.jpg"; - std::string save_img_path = "../../../logs/test_lite_emotion7_ncnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_emotion7_ncnn.jpg"; lite::ncnn::cv::face::attr::EfficientEmotion7 *efficient_emotion7 = new lite::ncnn::cv::face::attr::EfficientEmotion7(param_path, bin_path); @@ -107,10 +107,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/face-emotion-recognition-enet_b0_7.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/face-emotion-recognition-enet_b0_7.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/face-emotion-recognition-enet_b0_7.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/face-emotion-recognition-enet_b0_7.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_emotion.jpg"; - std::string save_img_path = "../../../logs/test_lite_emotion7_tnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_emotion7_tnn.jpg"; lite::tnn::cv::face::attr::EfficientEmotion7 *efficient_emotion7 = new lite::tnn::cv::face::attr::EfficientEmotion7(proto_path, model_path); diff --git a/examples/lite/cv/test_lite_efficient_emotion8.cpp b/examples/lite/cv/test_lite_efficient_emotion8.cpp index 8d6ec215..f37828bb 100644 --- a/examples/lite/cv/test_lite_efficient_emotion8.cpp +++ b/examples/lite/cv/test_lite_efficient_emotion8.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/face-emotion-recognition-enet_b0_8_best_afew.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/face-emotion-recognition-enet_b0_8_best_afew.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_emotion.jpg"; - std::string save_img_path = "../../../logs/test_lite_emotion8.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_emotion8.jpg"; lite::cv::face::attr::EfficientEmotion8 *efficient_emotion8 = new lite::cv::face::attr::EfficientEmotion8(onnx_path); @@ -31,9 +31,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/face-emotion-recognition-enet_b0_8_best_afew.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/face-emotion-recognition-enet_b0_8_best_afew.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_emotion.jpg"; - std::string save_img_path = "../../../logs/test_lite_emotion8.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_emotion8.jpg"; lite::onnxruntime::cv::face::attr::EfficientEmotion8 *efficient_emotion8 = new lite::onnxruntime::cv::face::attr::EfficientEmotion8(onnx_path); @@ -56,9 +56,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/face-emotion-recognition-enet_b0_8_best_afew.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/face-emotion-recognition-enet_b0_8_best_afew.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_emotion.jpg"; - std::string save_img_path = "../../../logs/test_lite_emotion8_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_emotion8_mnn.jpg"; lite::mnn::cv::face::attr::EfficientEmotion8 *efficient_emotion8 = new lite::mnn::cv::face::attr::EfficientEmotion8(mnn_path); @@ -81,10 +81,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/face-emotion-recognition-enet_b0_8_best_afew.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/face-emotion-recognition-enet_b0_8_best_afew.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/face-emotion-recognition-enet_b0_8_best_afew.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/face-emotion-recognition-enet_b0_8_best_afew.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_emotion.jpg"; - std::string save_img_path = "../../../logs/test_lite_emotion8_ncnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_emotion8_ncnn.jpg"; lite::ncnn::cv::face::attr::EfficientEmotion8 *efficient_emotion8 = new lite::ncnn::cv::face::attr::EfficientEmotion8(param_path, bin_path); @@ -107,10 +107,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/face-emotion-recognition-enet_b0_8_best_afew.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/face-emotion-recognition-enet_b0_8_best_afew.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/face-emotion-recognition-enet_b0_8_best_afew.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/face-emotion-recognition-enet_b0_8_best_afew.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_emotion.jpg"; - std::string save_img_path = "../../../logs/test_lite_emotion8_tnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_emotion8_tnn.jpg"; lite::tnn::cv::face::attr::EfficientEmotion8 *efficient_emotion8 = new lite::tnn::cv::face::attr::EfficientEmotion8(proto_path, model_path); diff --git a/examples/lite/cv/test_lite_efficientdet.cpp b/examples/lite/cv/test_lite_efficientdet.cpp index 237d83f4..f5589c1a 100644 --- a/examples/lite/cv/test_lite_efficientdet.cpp +++ b/examples/lite/cv/test_lite_efficientdet.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/efficientdet-d5.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/efficientdet-d5.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_1.jpg"; - std::string save_img_path = "../../../logs/test_lite_efficientdet_1.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_efficientdet_1.jpg"; // 1. Test Default Engine ONNXRuntime lite::cv::detection::EfficientDet *efficientdet = @@ -31,9 +31,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/efficientdet-d5.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/efficientdet-d5.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_2.jpg"; - std::string save_img_path = "../../../logs/test_onnx_efficientdet_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_onnx_efficientdet_2.jpg"; // 2. Test Specific Engine ONNXRuntime lite::onnxruntime::cv::detection::EfficientDet *efficientdet = diff --git a/examples/lite/cv/test_lite_efficientdet_d7.cpp b/examples/lite/cv/test_lite_efficientdet_d7.cpp index 7eded579..f5ff28d2 100644 --- a/examples/lite/cv/test_lite_efficientdet_d7.cpp +++ b/examples/lite/cv/test_lite_efficientdet_d7.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/efficientdet-d7.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/efficientdet-d7.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_1.jpg"; - std::string save_img_path = "../../../logs/test_lite_efficientdet_d7_1.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_efficientdet_d7_1.jpg"; // 1. Test Default Engine ONNXRuntime lite::cv::detection::EfficientDetD7 *efficientdet_d7 = @@ -31,9 +31,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/efficientdet-d7.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/efficientdet-d7.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_2.jpg"; - std::string save_img_path = "../../../logs/test_onnx_efficientdet_d7_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_onnx_efficientdet_d7_2.jpg"; // 2. Test Specific Engine ONNXRuntime lite::onnxruntime::cv::detection::EfficientDetD7 *efficientdet_d7 = diff --git a/examples/lite/cv/test_lite_efficientdet_d8.cpp b/examples/lite/cv/test_lite_efficientdet_d8.cpp index d8c53dae..e931f168 100644 --- a/examples/lite/cv/test_lite_efficientdet_d8.cpp +++ b/examples/lite/cv/test_lite_efficientdet_d8.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/efficientdet-d8.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/efficientdet-d8.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_1.jpg"; - std::string save_img_path = "../../../logs/test_lite_efficientdet_d8_1.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_efficientdet_d8_1.jpg"; // 1. Test Default Engine ONNXRuntime lite::cv::detection::EfficientDetD8 *efficientdet_d8 = @@ -31,9 +31,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/efficientdet-d8.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/efficientdet-d8.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_2.jpg"; - std::string save_img_path = "../../../logs/test_onnx_efficientdet_d8_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_onnx_efficientdet_d8_2.jpg"; // 2. Test Specific Engine ONNXRuntime lite::onnxruntime::cv::detection::EfficientDetD8 *efficientdet_d8 = diff --git a/examples/lite/cv/test_lite_efficientnet_lite4.cpp b/examples/lite/cv/test_lite_efficientnet_lite4.cpp index cace250d..ae712700 100644 --- a/examples/lite/cv/test_lite_efficientnet_lite4.cpp +++ b/examples/lite/cv/test_lite_efficientnet_lite4.cpp @@ -6,7 +6,7 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/efficientnet-lite4-11.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/efficientnet-lite4-11.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_efficientnet_lite4.jpg"; lite::cv::classification::EfficientNetLite4 *efficientnet_lite4 = @@ -37,7 +37,7 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/efficientnet-lite4-11.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/efficientnet-lite4-11.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_efficientnet_lite4.jpg"; lite::onnxruntime::cv::classification::EfficientNetLite4 *efficientnet_lite4 = @@ -69,7 +69,7 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/efficientnet-lite4-11.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/efficientnet-lite4-11.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_efficientnet_lite4.jpg"; lite::mnn::cv::classification::EfficientNetLite4 *efficientnet_lite4 = @@ -101,8 +101,8 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/efficientnet-lite4-11.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/efficientnet-lite4-11.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/efficientnet-lite4-11.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/efficientnet-lite4-11.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_efficientnet_lite4.jpg"; lite::ncnn::cv::classification::EfficientNetLite4 *efficientnet_lite4 = @@ -134,8 +134,8 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/efficientnet-lite4-11.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/efficientnet-lite4-11.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/efficientnet-lite4-11.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/efficientnet-lite4-11.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_efficientnet_lite4.jpg"; lite::tnn::cv::classification::EfficientNetLite4 *efficientnet_lite4 = diff --git a/examples/lite/cv/test_lite_emotion_ferplus.cpp b/examples/lite/cv/test_lite_emotion_ferplus.cpp index eda805a7..8fb44bae 100644 --- a/examples/lite/cv/test_lite_emotion_ferplus.cpp +++ b/examples/lite/cv/test_lite_emotion_ferplus.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/emotion-ferplus-8.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/emotion-ferplus-8.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_emotion_ferplus.jpg"; - std::string save_img_path = "../../../logs/test_lite_emotion_ferplus.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_emotion_ferplus.jpg"; lite::cv::face::attr::EmotionFerPlus *emotion_ferplus = new lite::cv::face::attr::EmotionFerPlus(onnx_path); @@ -30,9 +30,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/emotion-ferplus-8.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/emotion-ferplus-8.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_emotion_ferplus.jpg"; - std::string save_img_path = "../../../logs/test_lite_emotion_ferplus.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_emotion_ferplus.jpg"; lite::onnxruntime::cv::face::attr::EmotionFerPlus *emotion_ferplus = new lite::onnxruntime::cv::face::attr::EmotionFerPlus(onnx_path); @@ -54,9 +54,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/emotion-ferplus-8.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/emotion-ferplus-8.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_emotion_ferplus.jpg"; - std::string save_img_path = "../../../logs/test_lite_emotion_ferplus_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_emotion_ferplus_mnn.jpg"; lite::mnn::cv::face::attr::EmotionFerPlus *emotion_ferplus = new lite::mnn::cv::face::attr::EmotionFerPlus(mnn_path); @@ -78,10 +78,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/emotion-ferplus-8.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/emotion-ferplus-8.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/emotion-ferplus-8.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/emotion-ferplus-8.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_emotion_ferplus.jpg"; - std::string save_img_path = "../../../logs/test_lite_emotion_ferplus_ncnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_emotion_ferplus_ncnn.jpg"; lite::ncnn::cv::face::attr::EmotionFerPlus *emotion_ferplus = new lite::ncnn::cv::face::attr::EmotionFerPlus(param_path, bin_path); @@ -103,10 +103,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/emotion-ferplus-8.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/emotion-ferplus-8.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/emotion-ferplus-8.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/emotion-ferplus-8.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_emotion_ferplus.jpg"; - std::string save_img_path = "../../../logs/test_lite_emotion_ferplus_tnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_emotion_ferplus_tnn.jpg"; lite::tnn::cv::face::attr::EmotionFerPlus *emotion_ferplus = new lite::tnn::cv::face::attr::EmotionFerPlus(proto_path, model_path); diff --git a/examples/lite/cv/test_lite_face_hair_seg.cpp b/examples/lite/cv/test_lite_face_hair_seg.cpp index 29d7f46e..c1338257 100644 --- a/examples/lite/cv/test_lite_face_hair_seg.cpp +++ b/examples/lite/cv/test_lite_face_hair_seg.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/face_hair_seg_224x224.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/face_hair_seg_224x224.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_hair.png"; - std::string save_img_path = "../../../logs/test_lite_face_hair_seg.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_face_hair_seg.jpg"; lite::cv::segmentation::FaceHairSeg *face_hair_seg = new lite::cv::segmentation::FaceHairSeg(onnx_path, 4); // 4 threads @@ -29,9 +29,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/face_hair_seg_224x224.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/face_hair_seg_224x224.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_hair.png"; - std::string save_img_path = "../../../logs/test_lite_face_hair_seg_onnx.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_face_hair_seg_onnx.jpg"; lite::onnxruntime::cv::segmentation::FaceHairSeg *face_hair_seg = new lite::onnxruntime::cv::segmentation::FaceHairSeg(onnx_path, 4); // 4 threads @@ -53,9 +53,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/face_hair_seg_224x224.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/face_hair_seg_224x224.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_hair.png"; - std::string save_img_path = "../../../logs/test_lite_face_hair_seg_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_face_hair_seg_mnn.jpg"; lite::mnn::cv::segmentation::FaceHairSeg *face_hair_seg = new lite::mnn::cv::segmentation::FaceHairSeg(mnn_path, 4); // 4 threads diff --git a/examples/lite/cv/test_lite_face_landmarks_1000.cpp b/examples/lite/cv/test_lite_face_landmarks_1000.cpp index b43f2d6b..3861034d 100644 --- a/examples/lite/cv/test_lite_face_landmarks_1000.cpp +++ b/examples/lite/cv/test_lite_face_landmarks_1000.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/FaceLandmark1000.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/FaceLandmark1000.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_landmarks_0.png"; - std::string save_img_path = "../../../logs/test_lite_face_landmarks_1000.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_face_landmarks_1000.jpg"; lite::cv::face::align::FaceLandmark1000 *face_landmarks_1000 = new lite::cv::face::align::FaceLandmark1000(onnx_path); @@ -30,9 +30,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/FaceLandmark1000.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/FaceLandmark1000.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_landmarks_0.png"; - std::string save_img_path = "../../../logs/test_face_landmarks_1000_onnx.jpg"; + std::string save_img_path = "../../../examples/logs/test_face_landmarks_1000_onnx.jpg"; lite::onnxruntime::cv::face::align::FaceLandmark1000 *face_landmarks_1000 = new lite::onnxruntime::cv::face::align::FaceLandmark1000(onnx_path); @@ -55,9 +55,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/FaceLandmark1000.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/FaceLandmark1000.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_landmarks_0.png"; - std::string save_img_path = "../../../logs/test_face_landmarks_1000_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_face_landmarks_1000_mnn.jpg"; lite::mnn::cv::face::align::FaceLandmark1000 *face_landmarks_1000 = new lite::mnn::cv::face::align::FaceLandmark1000(mnn_path); @@ -80,10 +80,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/FaceLandmark1000.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/FaceLandmark1000.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/FaceLandmark1000.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/FaceLandmark1000.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_landmarks_0.png"; - std::string save_img_path = "../../../logs/test_face_landmarks_1000_ncnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_face_landmarks_1000_ncnn.jpg"; lite::ncnn::cv::face::align::FaceLandmark1000 *face_landmarks_1000 = new lite::ncnn::cv::face::align::FaceLandmark1000(param_path, bin_path); @@ -106,10 +106,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/FaceLandmark1000.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/FaceLandmark1000.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/FaceLandmark1000.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/FaceLandmark1000.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_landmarks_0.png"; - std::string save_img_path = "../../../logs/test_face_landmarks_1000_tnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_face_landmarks_1000_tnn.jpg"; lite::tnn::cv::face::align::FaceLandmark1000 *face_landmarks_1000 = new lite::tnn::cv::face::align::FaceLandmark1000(proto_path, model_path); diff --git a/examples/lite/cv/test_lite_face_parsing_bisenet.cpp b/examples/lite/cv/test_lite_face_parsing_bisenet.cpp index 8ef0e77c..7e85b130 100644 --- a/examples/lite/cv/test_lite_face_parsing_bisenet.cpp +++ b/examples/lite/cv/test_lite_face_parsing_bisenet.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/face_parsing_512x512.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/face_parsing_512x512.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_parsing.png"; - std::string save_img_path = "../../../logs/test_lite_face_parsing_bisenet.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_face_parsing_bisenet.jpg"; lite::cv::segmentation::FaceParsingBiSeNet *face_parsing_bisenet = new lite::cv::segmentation::FaceParsingBiSeNet(onnx_path, 4); // 4 threads @@ -29,9 +29,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/face_parsing_512x512.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/face_parsing_512x512.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_parsing.png"; - std::string save_img_path = "../../../logs/test_lite_face_parsing_bisenet_onnx.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_face_parsing_bisenet_onnx.jpg"; lite::onnxruntime::cv::segmentation::FaceParsingBiSeNet *face_parsing_bisenet = new lite::onnxruntime::cv::segmentation::FaceParsingBiSeNet(onnx_path, 4); // 4 threads @@ -53,9 +53,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/face_parsing_512x512.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/face_parsing_512x512.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_parsing.png"; - std::string save_img_path = "../../../logs/test_lite_face_parsing_bisenet_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_face_parsing_bisenet_mnn.jpg"; lite::mnn::cv::segmentation::FaceParsingBiSeNet *face_parsing_bisenet = new lite::mnn::cv::segmentation::FaceParsingBiSeNet(mnn_path, 4); // 4 threads @@ -77,10 +77,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string proto_path = "../../../hub/ncnn/cv/face_parsing_512x512.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/face_parsing_512x512.opt.bin"; + std::string proto_path = "../../../examples/hub/ncnn/cv/face_parsing_512x512.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/face_parsing_512x512.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_parsing.png"; - std::string save_img_path = "../../../logs/test_lite_face_parsing_bisenet_ncnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_face_parsing_bisenet_ncnn.jpg"; lite::ncnn::cv::segmentation::FaceParsingBiSeNet *face_parsing_bisenet = new lite::ncnn::cv::segmentation::FaceParsingBiSeNet( @@ -103,10 +103,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/face_parsing_512x512.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/face_parsing_512x512.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/face_parsing_512x512.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/face_parsing_512x512.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_parsing.png"; - std::string save_img_path = "../../../logs/test_lite_face_parsing_bisenet_tnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_face_parsing_bisenet_tnn.jpg"; lite::tnn::cv::segmentation::FaceParsingBiSeNet *face_parsing_bisenet = new lite::tnn::cv::segmentation::FaceParsingBiSeNet( diff --git a/examples/lite/cv/test_lite_face_parsing_bisenet_dyn.cpp b/examples/lite/cv/test_lite_face_parsing_bisenet_dyn.cpp index 52a06d44..b86df6e9 100644 --- a/examples/lite/cv/test_lite_face_parsing_bisenet_dyn.cpp +++ b/examples/lite/cv/test_lite_face_parsing_bisenet_dyn.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/face_parsing_dynamic.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/face_parsing_dynamic.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_parsing.png"; - std::string save_img_path = "../../../logs/test_lite_face_parsing_bisenet_dyn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_face_parsing_bisenet_dyn.jpg"; lite::cv::segmentation::FaceParsingBiSeNetDyn *face_parsing_bisenet_dyn = new lite::cv::segmentation::FaceParsingBiSeNetDyn(onnx_path, 4); // 4 threads @@ -29,9 +29,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/face_parsing_dynamic.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/face_parsing_dynamic.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_parsing.png"; - std::string save_img_path = "../../../logs/test_lite_face_parsing_bisenet_dyn_onnx.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_face_parsing_bisenet_dyn_onnx.jpg"; lite::onnxruntime::cv::segmentation::FaceParsingBiSeNetDyn *face_parsing_bisenet_dyn = new lite::onnxruntime::cv::segmentation::FaceParsingBiSeNetDyn(onnx_path, 4); // 4 threads diff --git a/examples/lite/cv/test_lite_faceboxes.cpp b/examples/lite/cv/test_lite_faceboxes.cpp index ae515633..61273f0e 100644 --- a/examples/lite/cv/test_lite_faceboxes.cpp +++ b/examples/lite/cv/test_lite_faceboxes.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/FaceBoxes.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/FaceBoxes.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_detector.jpg"; - std::string save_img_path = "../../../logs/test_lite_faceboxes.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_faceboxes.jpg"; lite::cv::face::detect::FaceBoxes *faceboxes = new lite::cv::face::detect::FaceBoxes(onnx_path); @@ -28,9 +28,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/FaceBoxes.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/FaceBoxes.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_detector_2.jpg"; - std::string save_img_path = "../../../logs/test_faceboxes_onnx_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_faceboxes_onnx_2.jpg"; lite::onnxruntime::cv::face::detect::FaceBoxes *faceboxes = new lite::onnxruntime::cv::face::detect::FaceBoxes(onnx_path); @@ -52,9 +52,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/FaceBoxes.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/FaceBoxes.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_detector_2.jpg"; - std::string save_img_path = "../../../logs/test_faceboxes_mnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_faceboxes_mnn_2.jpg"; lite::mnn::cv::face::detect::FaceBoxes *faceboxes = new lite::mnn::cv::face::detect::FaceBoxes(mnn_path); @@ -76,10 +76,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/FaceBoxes.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/FaceBoxes.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/FaceBoxes.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/FaceBoxes.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_detector_2.jpg"; - std::string save_img_path = "../../../logs/test_faceboxes_ncnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_faceboxes_ncnn_2.jpg"; lite::ncnn::cv::face::detect::FaceBoxes *faceboxes = new lite::ncnn::cv::face::detect::FaceBoxes(param_path, bin_path); @@ -101,10 +101,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/FaceBoxes.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/FaceBoxes.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/FaceBoxes.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/FaceBoxes.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_detector_2.jpg"; - std::string save_img_path = "../../../logs/test_faceboxes_tnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_faceboxes_tnn_2.jpg"; lite::tnn::cv::face::detect::FaceBoxes *faceboxes = new lite::tnn::cv::face::detect::FaceBoxes(proto_path, model_path); diff --git a/examples/lite/cv/test_lite_faceboxesv2.cpp b/examples/lite/cv/test_lite_faceboxesv2.cpp index 99400740..5749eb63 100644 --- a/examples/lite/cv/test_lite_faceboxesv2.cpp +++ b/examples/lite/cv/test_lite_faceboxesv2.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/faceboxesv2-640x640.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/faceboxesv2-640x640.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_detector.jpg"; - std::string save_img_path = "../../../logs/test_lite_faceboxesv2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_faceboxesv2.jpg"; lite::cv::face::detect::FaceBoxesV2 *faceboxesv2 = new lite::cv::face::detect::FaceBoxesV2(onnx_path); @@ -28,9 +28,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/faceboxesv2-640x640.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/faceboxesv2-640x640.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_detector.jpg"; - std::string save_img_path = "../../../logs/test_lite_faceboxesv2_onnx.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_faceboxesv2_onnx.jpg"; lite::onnxruntime::cv::face::detect::FaceBoxesV2 *faceboxesv2 = new lite::onnxruntime::cv::face::detect::FaceBoxesV2(onnx_path); @@ -52,9 +52,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/faceboxesv2-640x640.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/faceboxesv2-640x640.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_detector.jpg"; - std::string save_img_path = "../../../logs/test_lite_faceboxesv2_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_faceboxesv2_mnn.jpg"; lite::mnn::cv::face::detect::FaceBoxesV2 *faceboxesv2 = new lite::mnn::cv::face::detect::FaceBoxesV2(mnn_path); @@ -76,10 +76,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/faceboxesv2-640x640.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/faceboxesv2-640x640.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/faceboxesv2-640x640.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/faceboxesv2-640x640.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_detector.jpg"; - std::string save_img_path = "../../../logs/test_lite_faceboxesv2_ncnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_faceboxesv2_ncnn.jpg"; lite::ncnn::cv::face::detect::FaceBoxesV2 *faceboxesv2 = new lite::ncnn::cv::face::detect::FaceBoxesV2(param_path, bin_path); @@ -101,10 +101,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/faceboxesv2-640x640.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/faceboxesv2-640x640.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/faceboxesv2-640x640.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/faceboxesv2-640x640.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_detector.jpg"; - std::string save_img_path = "../../../logs/test_lite_faceboxesv2_tnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_faceboxesv2_tnn.jpg"; lite::tnn::cv::face::detect::FaceBoxesV2 *faceboxesv2 = new lite::tnn::cv::face::detect::FaceBoxesV2(proto_path, model_path); diff --git a/examples/lite/cv/test_lite_facenet.cpp b/examples/lite/cv/test_lite_facenet.cpp index ec5a68f6..60ba1f48 100644 --- a/examples/lite/cv/test_lite_facenet.cpp +++ b/examples/lite/cv/test_lite_facenet.cpp @@ -6,7 +6,7 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/facenet_casia-webface_resnet.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/facenet_casia-webface_resnet.onnx"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_facenet_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_facenet_1.png"; @@ -31,7 +31,7 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/facenet_casia-webface_resnet.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/facenet_casia-webface_resnet.onnx"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_facenet_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_facenet_2.png"; @@ -58,7 +58,7 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/facenet_casia-webface_resnet.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/facenet_casia-webface_resnet.mnn"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_facenet_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_facenet_2.png"; @@ -85,8 +85,8 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/facenet_casia-webface_resnet.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/facenet_casia-webface_resnet.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/facenet_casia-webface_resnet.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/facenet_casia-webface_resnet.opt.bin"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_facenet_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_facenet_2.png"; @@ -113,8 +113,8 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/facenet_casia-webface_resnet.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/facenet_casia-webface_resnet.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/facenet_casia-webface_resnet.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/facenet_casia-webface_resnet.tnnmodel"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_facenet_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_facenet_2.png"; diff --git a/examples/lite/cv/test_lite_fast_portrait_seg.cpp b/examples/lite/cv/test_lite_fast_portrait_seg.cpp index 0e513338..7f217dfe 100644 --- a/examples/lite/cv/test_lite_fast_portrait_seg.cpp +++ b/examples/lite/cv/test_lite_fast_portrait_seg.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/fast_portrait_seg_SINet_bi_320_256.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/fast_portrait_seg_SINet_bi_320_256.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_head_seg.png"; - std::string save_img_path = "../../../logs/test_lite_fast_portrait_seg.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_fast_portrait_seg.jpg"; lite::cv::segmentation::FastPortraitSeg *fast_portrait_seg = new lite::cv::segmentation::FastPortraitSeg(onnx_path, 4); // 4 threads @@ -29,9 +29,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/fast_portrait_seg_SINet_bi_320_256.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/fast_portrait_seg_SINet_bi_320_256.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_head_seg_1.png"; - std::string save_img_path = "../../../logs/test_lite_fast_portrait_seg_1_onnx.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_fast_portrait_seg_1_onnx.jpg"; lite::onnxruntime::cv::segmentation::FastPortraitSeg *fast_portrait_seg = new lite::onnxruntime::cv::segmentation::FastPortraitSeg(onnx_path, 4); // 4 threads @@ -53,9 +53,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/fast_portrait_seg_SINet_bi_320_256.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/fast_portrait_seg_SINet_bi_320_256.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_head_seg_1.png"; - std::string save_img_path = "../../../logs/test_lite_fast_portrait_seg_1_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_fast_portrait_seg_1_mnn.jpg"; lite::mnn::cv::segmentation::FastPortraitSeg *fast_portrait_seg = new lite::mnn::cv::segmentation::FastPortraitSeg(mnn_path, 4); // 4 threads diff --git a/examples/lite/cv/test_lite_fast_style_transfer.cpp b/examples/lite/cv/test_lite_fast_style_transfer.cpp index 69f6663f..71c29c8a 100644 --- a/examples/lite/cv/test_lite_fast_style_transfer.cpp +++ b/examples/lite/cv/test_lite_fast_style_transfer.cpp @@ -6,17 +6,17 @@ static void test_default() { - std::string candy_onnx_path = "../../../hub/onnx/cv/style-candy-8.onnx"; - std::string mosaic_onnx_path = "../../../hub/onnx/cv/style-mosaic-8.onnx"; - std::string pointilism_onnx_path = "../../../hub/onnx/cv/style-pointilism-8.onnx"; - std::string rain_princess_onnx_path = "../../../hub/onnx/cv/style-rain-princess-8.onnx"; - std::string udnie_onnx_path = "../../../hub/onnx/cv/style-udnie-8.onnx"; + std::string candy_onnx_path = "../../../examples/hub/onnx/cv/style-candy-8.onnx"; + std::string mosaic_onnx_path = "../../../examples/hub/onnx/cv/style-mosaic-8.onnx"; + std::string pointilism_onnx_path = "../../../examples/hub/onnx/cv/style-pointilism-8.onnx"; + std::string rain_princess_onnx_path = "../../../examples/hub/onnx/cv/style-rain-princess-8.onnx"; + std::string udnie_onnx_path = "../../../examples/hub/onnx/cv/style-udnie-8.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_fast_style_transfer.jpg"; - std::string save_candy_path = "../../../logs/test_lite_fast_style_transfer_candy.jpg"; - std::string save_mosaic_path = "../../../logs/test_lite_fast_style_transfer_mosaic.jpg"; - std::string save_pointilism_path = "../../../logs/test_lite_fast_style_transfer_pointilism.jpg"; - std::string save_rain_princess_path = "../../../logs/test_lite_fast_style_transfer_rain_princes.jpg"; - std::string save_udnie_path = "../../../logs/test_lite_fast_style_transfer_udnie.jpg"; + std::string save_candy_path = "../../../examples/logs/test_lite_fast_style_transfer_candy.jpg"; + std::string save_mosaic_path = "../../../examples/logs/test_lite_fast_style_transfer_mosaic.jpg"; + std::string save_pointilism_path = "../../../examples/logs/test_lite_fast_style_transfer_pointilism.jpg"; + std::string save_rain_princess_path = "../../../examples/logs/test_lite_fast_style_transfer_rain_princes.jpg"; + std::string save_udnie_path = "../../../examples/logs/test_lite_fast_style_transfer_udnie.jpg"; lite::cv::style::FastStyleTransfer *candy_fast_style_transfer = new lite::cv::style::FastStyleTransfer(candy_onnx_path); @@ -62,17 +62,17 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string candy_onnx_path = "../../../hub/onnx/cv/style-candy-8.onnx"; - std::string mosaic_onnx_path = "../../../hub/onnx/cv/style-mosaic-8.onnx"; - std::string pointilism_onnx_path = "../../../hub/onnx/cv/style-pointilism-8.onnx"; - std::string rain_princess_onnx_path = "../../../hub/onnx/cv/style-rain-princess-8.onnx"; - std::string udnie_onnx_path = "../../../hub/onnx/cv/style-udnie-8.onnx"; + std::string candy_onnx_path = "../../../examples/hub/onnx/cv/style-candy-8.onnx"; + std::string mosaic_onnx_path = "../../../examples/hub/onnx/cv/style-mosaic-8.onnx"; + std::string pointilism_onnx_path = "../../../examples/hub/onnx/cv/style-pointilism-8.onnx"; + std::string rain_princess_onnx_path = "../../../examples/hub/onnx/cv/style-rain-princess-8.onnx"; + std::string udnie_onnx_path = "../../../examples/hub/onnx/cv/style-udnie-8.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_fast_style_transfer.jpg"; - std::string save_candy_path = "../../../logs/test_onnx_fast_style_transfer_candy.jpg"; - std::string save_mosaic_path = "../../../logs/test_onnx_fast_style_transfer_mosaic.jpg"; - std::string save_pointilism_path = "../../../logs/test_onnx_fast_style_transfer_pointilism.jpg"; - std::string save_rain_princess_path = "../../../logs/test_onnx_fast_style_transfer_rain_princes.jpg"; - std::string save_udnie_path = "../../../logs/test_onnx_fast_style_transfer_udnie.jpg"; + std::string save_candy_path = "../../../examples/logs/test_onnx_fast_style_transfer_candy.jpg"; + std::string save_mosaic_path = "../../../examples/logs/test_onnx_fast_style_transfer_mosaic.jpg"; + std::string save_pointilism_path = "../../../examples/logs/test_onnx_fast_style_transfer_pointilism.jpg"; + std::string save_rain_princess_path = "../../../examples/logs/test_onnx_fast_style_transfer_rain_princes.jpg"; + std::string save_udnie_path = "../../../examples/logs/test_onnx_fast_style_transfer_udnie.jpg"; lite::onnxruntime::cv::style::FastStyleTransfer *candy_fast_style_transfer = new lite::onnxruntime::cv::style::FastStyleTransfer(candy_onnx_path); @@ -118,9 +118,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/style-candy-8.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/style-candy-8.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_fast_style_transfer.jpg"; - std::string save_path = "../../../logs/test_fast_style_transfer_candy_mnn.jpg"; + std::string save_path = "../../../examples/logs/test_fast_style_transfer_candy_mnn.jpg"; lite::mnn::cv::style::FastStyleTransfer *candy_fast_style_transfer = new lite::mnn::cv::style::FastStyleTransfer(mnn_path); @@ -143,10 +143,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/style-candy-8.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/style-candy-8.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/style-candy-8.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/style-candy-8.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_fast_style_transfer.jpg"; - std::string save_path = "../../../logs/test_fast_style_transfer_candy_ncnn.jpg"; + std::string save_path = "../../../examples/logs/test_fast_style_transfer_candy_ncnn.jpg"; lite::ncnn::cv::style::FastStyleTransfer *candy_fast_style_transfer = new lite::ncnn::cv::style::FastStyleTransfer(param_path, bin_path); @@ -170,10 +170,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/style-candy-8.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/style-candy-8.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/style-candy-8.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/style-candy-8.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_fast_style_transfer.jpg"; - std::string save_path = "../../../logs/test_fast_style_transfer_candy_tnn.jpg"; + std::string save_path = "../../../examples/logs/test_fast_style_transfer_candy_tnn.jpg"; lite::tnn::cv::style::FastStyleTransfer *candy_fast_style_transfer = new lite::tnn::cv::style::FastStyleTransfer(proto_path, model_path); diff --git a/examples/lite/cv/test_lite_fcn_resnet101.cpp b/examples/lite/cv/test_lite_fcn_resnet101.cpp index 0badd640..139e4884 100644 --- a/examples/lite/cv/test_lite_fcn_resnet101.cpp +++ b/examples/lite/cv/test_lite_fcn_resnet101.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/fcn_resnet101.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/fcn_resnet101.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_fcn_resnet101.png"; - std::string save_img_path = "../../../logs/test_lite_fcn_resnet101.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_fcn_resnet101.jpg"; lite::cv::segmentation::FCNResNet101 *fcn_resnet101 = new lite::cv::segmentation::FCNResNet101(onnx_path, 16); @@ -38,9 +38,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/fcn_resnet101.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/fcn_resnet101.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_fcn_resnet101.png"; - std::string save_img_path = "../../../logs/test_onnx_fcn_resnet101.jpg"; + std::string save_img_path = "../../../examples/logs/test_onnx_fcn_resnet101.jpg"; lite::onnxruntime::cv::segmentation::FCNResNet101 *fcn_resnet101 = new lite::onnxruntime::cv::segmentation::FCNResNet101(onnx_path, 16); @@ -71,10 +71,10 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/fcn_resnet101.mnn"; - // std::string mnn_path = "../../../hub/mnn/cv/fcn_resnet101.opt.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/fcn_resnet101.mnn"; + // std::string mnn_path = "../../../examples/hub/mnn/cv/fcn_resnet101.opt.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_fcn_resnet101.png"; - std::string save_img_path = "../../../logs/test_fcn_resnet101_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_fcn_resnet101_mnn.jpg"; lite::mnn::cv::segmentation::FCNResNet101 *fcn_resnet101 = new lite::mnn::cv::segmentation::FCNResNet101(mnn_path, 16); @@ -105,10 +105,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/fcn_resnet101.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/fcn_resnet101.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/fcn_resnet101.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/fcn_resnet101.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_fcn_resnet101.png"; - std::string save_img_path = "../../../logs/test_fcn_resnet101_ncnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_fcn_resnet101_ncnn.jpg"; lite::ncnn::cv::segmentation::FCNResNet101 *fcn_resnet101 = new lite::ncnn::cv::segmentation::FCNResNet101(param_path, bin_path, 16); @@ -139,10 +139,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/fcn_resnet101.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/fcn_resnet101.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/fcn_resnet101.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/fcn_resnet101.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_fcn_resnet101.png"; - std::string save_img_path = "../../../logs/test_fcn_resnet101_tnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_fcn_resnet101_tnn.jpg"; lite::tnn::cv::segmentation::FCNResNet101 *fcn_resnet101 = new lite::tnn::cv::segmentation::FCNResNet101(proto_path, model_path, 16); diff --git a/examples/lite/cv/test_lite_female_photo2cartoon.cpp b/examples/lite/cv/test_lite_female_photo2cartoon.cpp index 7b46d5a2..d9cac78a 100644 --- a/examples/lite/cv/test_lite_female_photo2cartoon.cpp +++ b/examples/lite/cv/test_lite_female_photo2cartoon.cpp @@ -6,11 +6,11 @@ static void test_default() { - std::string head_seg_onnx_path = "../../../hub/onnx/cv/minivision_head_seg.onnx"; - std::string cartoon_onnx_path = "../../../hub/onnx/cv/minivision_female_photo2cartoon.onnx"; + std::string head_seg_onnx_path = "../../../examples/hub/onnx/cv/minivision_head_seg.onnx"; + std::string cartoon_onnx_path = "../../../examples/hub/onnx/cv/minivision_female_photo2cartoon.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_head_seg.png"; - std::string save_mask_path = "../../../logs/test_lite_female_photo2cartoon_seg.jpg"; - std::string save_cartoon_path = "../../../logs/test_lite_female_photo2cartoon_cartoon.jpg"; + std::string save_mask_path = "../../../examples/logs/test_lite_female_photo2cartoon_seg.jpg"; + std::string save_cartoon_path = "../../../examples/logs/test_lite_female_photo2cartoon_cartoon.jpg"; lite::cv::segmentation::HeadSeg *head_seg = new lite::cv::segmentation::HeadSeg(head_seg_onnx_path, 4); // 4 threads @@ -43,11 +43,11 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string head_seg_onnx_path = "../../../hub/onnx/cv/minivision_head_seg.onnx"; - std::string cartoon_onnx_path = "../../../hub/onnx/cv/minivision_female_photo2cartoon.onnx"; + std::string head_seg_onnx_path = "../../../examples/hub/onnx/cv/minivision_head_seg.onnx"; + std::string cartoon_onnx_path = "../../../examples/hub/onnx/cv/minivision_female_photo2cartoon.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_female_photo2cartoon.jpg"; - std::string save_mask_path = "../../../logs/test_lite_female_photo2cartoon_seg_1_onnx.jpg"; - std::string save_cartoon_path = "../../../logs/test_lite_female_photo2cartoon_cartoon_1_onnx.jpg"; + std::string save_mask_path = "../../../examples/logs/test_lite_female_photo2cartoon_seg_1_onnx.jpg"; + std::string save_cartoon_path = "../../../examples/logs/test_lite_female_photo2cartoon_cartoon_1_onnx.jpg"; lite::onnxruntime::cv::segmentation::HeadSeg *head_seg = new lite::onnxruntime::cv::segmentation::HeadSeg(head_seg_onnx_path, 4); // 4 threads @@ -81,11 +81,11 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string head_seg_mnn_path = "../../../hub/mnn/cv/minivision_head_seg.mnn"; - std::string cartoon_mnn_path = "../../../hub/mnn/cv/minivision_female_photo2cartoon.mnn"; + std::string head_seg_mnn_path = "../../../examples/hub/mnn/cv/minivision_head_seg.mnn"; + std::string cartoon_mnn_path = "../../../examples/hub/mnn/cv/minivision_female_photo2cartoon.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_female_photo2cartoon.jpg"; - std::string save_mask_path = "../../../logs/test_lite_female_photo2cartoon_seg_1_mnn.jpg"; - std::string save_cartoon_path = "../../../logs/test_lite_female_photo2cartoon_cartoon_1_mnn.jpg"; + std::string save_mask_path = "../../../examples/logs/test_lite_female_photo2cartoon_seg_1_mnn.jpg"; + std::string save_cartoon_path = "../../../examples/logs/test_lite_female_photo2cartoon_cartoon_1_mnn.jpg"; lite::mnn::cv::segmentation::HeadSeg *head_seg = new lite::mnn::cv::segmentation::HeadSeg(head_seg_mnn_path, 4); // 4 threads @@ -120,12 +120,12 @@ static void test_ncnn() { #ifdef ENABLE_NCNN // WARN: TEST FAILED !!! - std::string head_seg_onnx_path = "../../../hub/onnx/cv/minivision_head_seg.onnx"; // helper - std::string cartoon_param_path = "../../../hub/ncnn/cv/minivision_female_photo2cartoon.opt.param"; - std::string cartoon_bin_path = "../../../hub/ncnn/cv/minivision_female_photo2cartoon.opt.bin"; + std::string head_seg_onnx_path = "../../../examples/hub/onnx/cv/minivision_head_seg.onnx"; // helper + std::string cartoon_param_path = "../../../examples/hub/ncnn/cv/minivision_female_photo2cartoon.opt.param"; + std::string cartoon_bin_path = "../../../examples/hub/ncnn/cv/minivision_female_photo2cartoon.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_female_photo2cartoon.jpg"; - std::string save_mask_path = "../../../logs/test_lite_female_photo2cartoon_seg_1_ncnn.jpg"; - std::string save_cartoon_path = "../../../logs/test_lite_female_photo2cartoon_cartoon_1_ncnn.jpg"; + std::string save_mask_path = "../../../examples/logs/test_lite_female_photo2cartoon_seg_1_ncnn.jpg"; + std::string save_cartoon_path = "../../../examples/logs/test_lite_female_photo2cartoon_cartoon_1_ncnn.jpg"; lite::cv::segmentation::HeadSeg *head_seg = new lite::cv::segmentation::HeadSeg(head_seg_onnx_path, 4); // 4 threads @@ -160,12 +160,12 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string head_seg_onnx_path = "../../../hub/onnx/cv/minivision_head_seg.onnx"; // helper - std::string cartoon_proto_path = "../../../hub/tnn/cv/minivision_female_photo2cartoon.opt.tnnproto"; - std::string cartoon_model_path = "../../../hub/tnn/cv/minivision_female_photo2cartoon.opt.tnnmodel"; + std::string head_seg_onnx_path = "../../../examples/hub/onnx/cv/minivision_head_seg.onnx"; // helper + std::string cartoon_proto_path = "../../../examples/hub/tnn/cv/minivision_female_photo2cartoon.opt.tnnproto"; + std::string cartoon_model_path = "../../../examples/hub/tnn/cv/minivision_female_photo2cartoon.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_female_photo2cartoon.jpg"; - std::string save_mask_path = "../../../logs/test_lite_female_photo2cartoon_seg_1_tnn.jpg"; - std::string save_cartoon_path = "../../../logs/test_lite_female_photo2cartoon_cartoon_1_tnn.jpg"; + std::string save_mask_path = "../../../examples/logs/test_lite_female_photo2cartoon_seg_1_tnn.jpg"; + std::string save_cartoon_path = "../../../examples/logs/test_lite_female_photo2cartoon_cartoon_1_tnn.jpg"; lite::cv::segmentation::HeadSeg *head_seg = new lite::cv::segmentation::HeadSeg(head_seg_onnx_path, 4); // 4 threads diff --git a/examples/lite/cv/test_lite_focal_arcface.cpp b/examples/lite/cv/test_lite_focal_arcface.cpp index 06e035de..0b01d63b 100644 --- a/examples/lite/cv/test_lite_focal_arcface.cpp +++ b/examples/lite/cv/test_lite_focal_arcface.cpp @@ -6,7 +6,7 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/focal-arcface-ms1m-ir50-epoch120.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/focal-arcface-ms1m-ir50-epoch120.onnx"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_focal_arcface_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_focal_arcface_1.png"; @@ -31,7 +31,7 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/focal-arcface-ms1m-ir50-epoch120.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/focal-arcface-ms1m-ir50-epoch120.onnx"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_focal_arcface_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_focal_arcface_2.png"; @@ -58,7 +58,7 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/focal-arcface-ms1m-ir50-epoch120.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/focal-arcface-ms1m-ir50-epoch120.mnn"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_focal_arcface_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_focal_arcface_2.png"; @@ -85,8 +85,8 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/focal-arcface-ms1m-ir50-epoch120.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/focal-arcface-ms1m-ir50-epoch120.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/focal-arcface-ms1m-ir50-epoch120.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/focal-arcface-ms1m-ir50-epoch120.opt.bin"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_focal_arcface_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_focal_arcface_2.png"; @@ -113,8 +113,8 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/focal-arcface-ms1m-ir50-epoch120.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/focal-arcface-ms1m-ir50-epoch120.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/focal-arcface-ms1m-ir50-epoch120.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/focal-arcface-ms1m-ir50-epoch120.opt.tnnmodel"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_focal_arcface_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_focal_arcface_2.png"; diff --git a/examples/lite/cv/test_lite_focal_asia_arcface.cpp b/examples/lite/cv/test_lite_focal_asia_arcface.cpp index f080c021..9fdd56d0 100644 --- a/examples/lite/cv/test_lite_focal_asia_arcface.cpp +++ b/examples/lite/cv/test_lite_focal_asia_arcface.cpp @@ -6,7 +6,7 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/focal-arcface-bh-ir50-asia.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/focal-arcface-bh-ir50-asia.onnx"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_focal_asia_arcface_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_focal_asia_arcface_1.png"; @@ -31,7 +31,7 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/focal-arcface-bh-ir50-asia.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/focal-arcface-bh-ir50-asia.onnx"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_focal_asia_arcface_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_focal_asia_arcface_2.png"; @@ -58,7 +58,7 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/focal-arcface-bh-ir50-asia.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/focal-arcface-bh-ir50-asia.mnn"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_focal_asia_arcface_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_focal_asia_arcface_2.png"; @@ -85,8 +85,8 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/focal-arcface-bh-ir50-asia.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/focal-arcface-bh-ir50-asia.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/focal-arcface-bh-ir50-asia.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/focal-arcface-bh-ir50-asia.opt.bin"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_focal_asia_arcface_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_focal_asia_arcface_2.png"; @@ -113,8 +113,8 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/focal-arcface-bh-ir50-asia.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/focal-arcface-bh-ir50-asia.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/focal-arcface-bh-ir50-asia.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/focal-arcface-bh-ir50-asia.opt.tnnmodel"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_focal_asia_arcface_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_focal_asia_arcface_2.png"; diff --git a/examples/lite/cv/test_lite_fsanet.cpp b/examples/lite/cv/test_lite_fsanet.cpp index 7bf5f708..3f862a5f 100644 --- a/examples/lite/cv/test_lite_fsanet.cpp +++ b/examples/lite/cv/test_lite_fsanet.cpp @@ -6,10 +6,10 @@ static void test_default() { - std::string var_onnx_path = "../../../hub/onnx/cv/fsanet-var.onnx"; - std::string conv_onnx_path = "../../../hub/onnx/cv/fsanet-1x1.onnx"; + std::string var_onnx_path = "../../../examples/hub/onnx/cv/fsanet-var.onnx"; + std::string conv_onnx_path = "../../../examples/hub/onnx/cv/fsanet-1x1.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_fsanet.jpg"; - std::string save_img_path = "../../../logs/test_lite_fsanet.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_fsanet.jpg"; lite::cv::face::pose::FSANet *var_fsanet = new lite::cv::face::pose::FSANet(var_onnx_path); lite::cv::face::pose::FSANet *conv_fsanet = new lite::cv::face::pose::FSANet(conv_onnx_path); @@ -47,10 +47,10 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string var_onnx_path = "../../../hub/onnx/cv/fsanet-var.onnx"; - std::string conv_onnx_path = "../../../hub/onnx/cv/fsanet-1x1.onnx"; + std::string var_onnx_path = "../../../examples/hub/onnx/cv/fsanet-var.onnx"; + std::string conv_onnx_path = "../../../examples/hub/onnx/cv/fsanet-1x1.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_fsanet.jpg"; - std::string save_img_path = "../../../logs/test_fsanet_onnx.jpg"; + std::string save_img_path = "../../../examples/logs/test_fsanet_onnx.jpg"; lite::onnxruntime::cv::face::pose::FSANet *var_fsanet = new lite::onnxruntime::cv::face::pose::FSANet(var_onnx_path); @@ -89,10 +89,10 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string var_mnn_path = "../../../hub/mnn/cv/fsanet-var.mnn"; - std::string conv_mnn_path = "../../../hub/mnn/cv/fsanet-1x1.mnn"; + std::string var_mnn_path = "../../../examples/hub/mnn/cv/fsanet-var.mnn"; + std::string conv_mnn_path = "../../../examples/hub/mnn/cv/fsanet-1x1.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_fsanet.jpg"; - std::string save_img_path = "../../../logs/test_fsanet_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_fsanet_mnn.jpg"; lite::mnn::cv::face::pose::FSANet *var_fsanet = new lite::mnn::cv::face::pose::FSANet(var_mnn_path); @@ -137,12 +137,12 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string var_proto_path = "../../../hub/tnn/cv/fsanet-var.opt.tnnproto"; - std::string var_model_path = "../../../hub/tnn/cv/fsanet-var.opt.tnnmodel"; - std::string conv_proto_path = "../../../hub/tnn/cv/fsanet-1x1.opt.tnnproto"; - std::string conv_model_path = "../../../hub/tnn/cv/fsanet-1x1.opt.tnnmodel"; + std::string var_proto_path = "../../../examples/hub/tnn/cv/fsanet-var.opt.tnnproto"; + std::string var_model_path = "../../../examples/hub/tnn/cv/fsanet-var.opt.tnnmodel"; + std::string conv_proto_path = "../../../examples/hub/tnn/cv/fsanet-1x1.opt.tnnproto"; + std::string conv_model_path = "../../../examples/hub/tnn/cv/fsanet-1x1.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_fsanet.jpg"; - std::string save_img_path = "../../../logs/test_fsanet_tnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_fsanet_tnn.jpg"; lite::tnn::cv::face::pose::FSANet *var_fsanet = new lite::tnn::cv::face::pose::FSANet(var_proto_path, var_model_path); diff --git a/examples/lite/cv/test_lite_gender_googlenet.cpp b/examples/lite/cv/test_lite_gender_googlenet.cpp index ff40dfd5..f83f75e3 100644 --- a/examples/lite/cv/test_lite_gender_googlenet.cpp +++ b/examples/lite/cv/test_lite_gender_googlenet.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/gender_googlenet.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/gender_googlenet.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_gender_googlenet.jpg"; - std::string save_img_path = "../../../logs/test_lite_gender_googlenet.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_gender_googlenet.jpg"; lite::cv::face::attr::GenderGoogleNet *gender_googlenet = new lite::cv::face::attr::GenderGoogleNet(onnx_path); @@ -29,9 +29,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/gender_googlenet.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/gender_googlenet.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_gender_googlenet.jpg"; - std::string save_img_path = "../../../logs/test_onnx_gender_googlenet.jpg"; + std::string save_img_path = "../../../examples/logs/test_onnx_gender_googlenet.jpg"; lite::onnxruntime::cv::face::attr::GenderGoogleNet *gender_googlenet = new lite::onnxruntime::cv::face::attr::GenderGoogleNet(onnx_path); @@ -53,9 +53,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/gender_googlenet.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/gender_googlenet.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_gender_googlenet.jpg"; - std::string save_img_path = "../../../logs/test_gender_googlenet_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_gender_googlenet_mnn.jpg"; lite::mnn::cv::face::attr::GenderGoogleNet *gender_googlenet = new lite::mnn::cv::face::attr::GenderGoogleNet(mnn_path); @@ -77,10 +77,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/gender_googlenet.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/gender_googlenet.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/gender_googlenet.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/gender_googlenet.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_gender_googlenet.jpg"; - std::string save_img_path = "../../../logs/test_gender_googlenet_ncnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_gender_googlenet_ncnn.jpg"; lite::ncnn::cv::face::attr::GenderGoogleNet *gender_googlenet = new lite::ncnn::cv::face::attr::GenderGoogleNet(param_path, bin_path); @@ -102,10 +102,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/gender_googlenet.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/gender_googlenet.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/gender_googlenet.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/gender_googlenet.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_gender_googlenet.jpg"; - std::string save_img_path = "../../../logs/test_gender_googlenet_tnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_gender_googlenet_tnn.jpg"; lite::tnn::cv::face::attr::GenderGoogleNet *gender_googlenet = new lite::tnn::cv::face::attr::GenderGoogleNet(proto_path, model_path); diff --git a/examples/lite/cv/test_lite_ghostnet.cpp b/examples/lite/cv/test_lite_ghostnet.cpp index a40a9f10..5f9063dd 100644 --- a/examples/lite/cv/test_lite_ghostnet.cpp +++ b/examples/lite/cv/test_lite_ghostnet.cpp @@ -6,7 +6,7 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/ghostnet.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/ghostnet.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_ghostnet.jpg"; lite::cv::classification::GhostNet *ghostnet = new lite::cv::classification::GhostNet(onnx_path); @@ -36,7 +36,7 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/ghostnet.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/ghostnet.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_ghostnet.jpg"; lite::onnxruntime::cv::classification::GhostNet *ghostnet = @@ -68,7 +68,7 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/ghostnet.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/ghostnet.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_ghostnet.jpg"; lite::mnn::cv::classification::GhostNet *ghostnet = @@ -100,8 +100,8 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/ghostnet.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/ghostnet.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/ghostnet.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/ghostnet.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_ghostnet.jpg"; lite::ncnn::cv::classification::GhostNet *ghostnet = @@ -133,8 +133,8 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/ghostnet.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/ghostnet.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/ghostnet.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/ghostnet.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_ghostnet.jpg"; lite::tnn::cv::classification::GhostNet *ghostnet = diff --git a/examples/lite/cv/test_lite_glint_arcface.cpp b/examples/lite/cv/test_lite_glint_arcface.cpp index ab3cae44..f00da3ca 100644 --- a/examples/lite/cv/test_lite_glint_arcface.cpp +++ b/examples/lite/cv/test_lite_glint_arcface.cpp @@ -6,7 +6,7 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/ms1mv3_arcface_r100.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/ms1mv3_arcface_r100.onnx"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_1.png"; @@ -31,7 +31,7 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/ms1mv3_arcface_r100.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/ms1mv3_arcface_r100.onnx"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; @@ -58,7 +58,7 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/ms1mv3_arcface_r100.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/ms1mv3_arcface_r100.mnn"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; @@ -85,8 +85,8 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/ms1mv3_arcface_r100.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/ms1mv3_arcface_r100.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/ms1mv3_arcface_r100.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/ms1mv3_arcface_r100.opt.bin"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; @@ -113,8 +113,8 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/ms1mv3_arcface_r100.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/ms1mv3_arcface_r100.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/ms1mv3_arcface_r100.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/ms1mv3_arcface_r100.opt.tnnmodel"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; diff --git a/examples/lite/cv/test_lite_glint_cosface.cpp b/examples/lite/cv/test_lite_glint_cosface.cpp index f95b919d..3d4675b9 100644 --- a/examples/lite/cv/test_lite_glint_cosface.cpp +++ b/examples/lite/cv/test_lite_glint_cosface.cpp @@ -6,7 +6,7 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/glint360k_cosface_r100.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/glint360k_cosface_r100.onnx"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_glint_cosface_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_glint_cosface_1.png"; @@ -31,7 +31,7 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/glint360k_cosface_r100.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/glint360k_cosface_r100.onnx"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_glint_cosface_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_glint_cosface_2.png"; @@ -58,7 +58,7 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/glint360k_cosface_r100.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/glint360k_cosface_r100.mnn"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_glint_cosface_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_glint_cosface_2.png"; @@ -85,8 +85,8 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/glint360k_cosface_r100.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/glint360k_cosface_r100.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/glint360k_cosface_r100.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/glint360k_cosface_r100.opt.bin"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; @@ -113,8 +113,8 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/glint360k_cosface_r100.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/glint360k_cosface_r100.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/glint360k_cosface_r100.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/glint360k_cosface_r100.opt.tnnmodel"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; diff --git a/examples/lite/cv/test_lite_glint_partial_fc.cpp b/examples/lite/cv/test_lite_glint_partial_fc.cpp index bbf3f5e5..8e2e985d 100644 --- a/examples/lite/cv/test_lite_glint_partial_fc.cpp +++ b/examples/lite/cv/test_lite_glint_partial_fc.cpp @@ -6,7 +6,7 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/partial_fc_glint360k_r50.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/partial_fc_glint360k_r50.onnx"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_glint_partial_fc_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_glint_partial_fc_1.png"; @@ -31,7 +31,7 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/partial_fc_glint360k_r50.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/partial_fc_glint360k_r50.onnx"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_glint_partial_fc_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_glint_partial_fc_2.png"; @@ -58,7 +58,7 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/partial_fc_glint360k_r50.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/partial_fc_glint360k_r50.mnn"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_glint_partial_fc_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_glint_partial_fc_2.png"; @@ -85,8 +85,8 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/partial_fc_glint360k_r50.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/partial_fc_glint360k_r50.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/partial_fc_glint360k_r50.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/partial_fc_glint360k_r50.opt.bin"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; @@ -113,8 +113,8 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/partial_fc_glint360k_r50.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/partial_fc_glint360k_r50.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/partial_fc_glint360k_r50.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/partial_fc_glint360k_r50.opt.tnnmodel"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; diff --git a/examples/lite/cv/test_lite_hair_seg.cpp b/examples/lite/cv/test_lite_hair_seg.cpp index 82ed2f08..6e83d097 100644 --- a/examples/lite/cv/test_lite_hair_seg.cpp +++ b/examples/lite/cv/test_lite_hair_seg.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/hairseg_224x224.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/hairseg_224x224.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_hair.png"; - std::string save_img_path = "../../../logs/test_lite_hair_seg.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_hair_seg.jpg"; lite::cv::segmentation::HairSeg *hair_seg = new lite::cv::segmentation::HairSeg(onnx_path, 4); // 4 threads @@ -29,9 +29,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/hairseg_224x224.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/hairseg_224x224.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_hair.png"; - std::string save_img_path = "../../../logs/test_lite_hair_seg_onnx.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_hair_seg_onnx.jpg"; lite::onnxruntime::cv::segmentation::HairSeg *hair_seg = new lite::onnxruntime::cv::segmentation::HairSeg(onnx_path, 4); // 4 threads @@ -53,9 +53,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/hairseg_224x224.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/hairseg_224x224.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_hair.png"; - std::string save_img_path = "../../../logs/test_lite_hair_seg_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_hair_seg_mnn.jpg"; lite::mnn::cv::segmentation::HairSeg *hair_seg = new lite::mnn::cv::segmentation::HairSeg(mnn_path, 4); // 4 threads diff --git a/examples/lite/cv/test_lite_hardnet.cpp b/examples/lite/cv/test_lite_hardnet.cpp index 70e19f58..0cc7d3c8 100644 --- a/examples/lite/cv/test_lite_hardnet.cpp +++ b/examples/lite/cv/test_lite_hardnet.cpp @@ -6,7 +6,7 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/hardnet.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/hardnet.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_hardnet.jpg"; lite::cv::classification::HdrDNet *hardnet = new lite::cv::classification::HdrDNet(onnx_path); @@ -36,7 +36,7 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/hardnet.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/hardnet.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_hardnet.jpg"; lite::onnxruntime::cv::classification::HdrDNet *hardnet = @@ -68,7 +68,7 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/hardnet.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/hardnet.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_hardnet.jpg"; lite::mnn::cv::classification::HdrDNet *hardnet = @@ -100,8 +100,8 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/hardnet.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/hardnet.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/hardnet.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/hardnet.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_hardnet.jpg"; lite::ncnn::cv::classification::HdrDNet *hardnet = @@ -134,8 +134,8 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/hardnet.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/hardnet.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/hardnet.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/hardnet.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_hardnet.jpg"; lite::tnn::cv::classification::HdrDNet *hardnet = diff --git a/examples/lite/cv/test_lite_head_seg.cpp b/examples/lite/cv/test_lite_head_seg.cpp index 8ee8998e..04ac3a03 100644 --- a/examples/lite/cv/test_lite_head_seg.cpp +++ b/examples/lite/cv/test_lite_head_seg.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/minivision_head_seg.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/minivision_head_seg.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_head_seg.png"; - std::string save_img_path = "../../../logs/test_lite_head_seg.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_head_seg.jpg"; lite::cv::segmentation::HeadSeg *head_seg = new lite::cv::segmentation::HeadSeg(onnx_path, 4); // 4 threads @@ -29,9 +29,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/minivision_head_seg.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/minivision_head_seg.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_head_seg_1.png"; - std::string save_img_path = "../../../logs/test_lite_head_seg_1_onnx.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_head_seg_1_onnx.jpg"; lite::onnxruntime::cv::segmentation::HeadSeg *head_seg = new lite::onnxruntime::cv::segmentation::HeadSeg(onnx_path, 4); // 4 threads @@ -53,9 +53,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/minivision_head_seg.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/minivision_head_seg.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_head_seg_1.png"; - std::string save_img_path = "../../../logs/test_lite_head_seg_1_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_head_seg_1_mnn.jpg"; lite::mnn::cv::segmentation::HeadSeg *head_seg = new lite::mnn::cv::segmentation::HeadSeg(mnn_path, 4); // 4 threads @@ -84,10 +84,10 @@ static void test_tnn() { #ifdef ENABLE_TNN // WARN: BAD RESULTS !!!! - std::string proto_path = "../../../hub/tnn/cv/minivision_head_seg.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/minivision_head_seg.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/minivision_head_seg.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/minivision_head_seg.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_head_seg_1.png"; - std::string save_img_path = "../../../logs/test_lite_head_seg_1_tnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_head_seg_1_tnn.jpg"; lite::tnn::cv::segmentation::HeadSeg *head_seg = new lite::tnn::cv::segmentation::HeadSeg(proto_path, model_path, 4); // 4 threads diff --git a/examples/lite/cv/test_lite_ibnnet.cpp b/examples/lite/cv/test_lite_ibnnet.cpp index 1b25c192..63d65f48 100644 --- a/examples/lite/cv/test_lite_ibnnet.cpp +++ b/examples/lite/cv/test_lite_ibnnet.cpp @@ -6,7 +6,7 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/ibnnet18.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/ibnnet18.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_ibnnet.jpg"; lite::cv::classification::IBNNet *ibnnet = new lite::cv::classification::IBNNet(onnx_path); @@ -36,7 +36,7 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/ibnnet18.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/ibnnet18.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_ibnnet.jpg"; lite::onnxruntime::cv::classification::IBNNet *ibnnet = @@ -68,7 +68,7 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/ibnnet18.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/ibnnet18.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_ibnnet.jpg"; lite::mnn::cv::classification::IBNNet *ibnnet = @@ -100,8 +100,8 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/ibnnet18.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/ibnnet18.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/ibnnet18.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/ibnnet18.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_ibnnet.jpg"; lite::ncnn::cv::classification::IBNNet *ibnnet = @@ -133,8 +133,8 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/ibnnet18.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/ibnnet18.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/ibnnet18.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/ibnnet18.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_ibnnet.jpg"; lite::tnn::cv::classification::IBNNet *ibnnet = diff --git a/examples/lite/cv/test_lite_insectdet.cpp b/examples/lite/cv/test_lite_insectdet.cpp index 42298cb7..3c503202 100644 --- a/examples/lite/cv/test_lite_insectdet.cpp +++ b/examples/lite/cv/test_lite_insectdet.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/quarrying_insect_detector.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/quarrying_insect_detector.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_insect.jpg"; - std::string save_img_path = "../../../logs/test_lite_insectdet.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_insectdet.jpg"; // 1. Test Default Engine ONNXRuntime lite::cv::detection::InsectDet *insectdet = new lite::cv::detection::InsectDet(onnx_path); // default @@ -30,9 +30,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/quarrying_insect_detector.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/quarrying_insect_detector.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_insect.jpg"; - std::string save_img_path = "../../../logs/test_lite_insectdet_onnx.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_insectdet_onnx.jpg"; // 2. Test Specific Engine ONNXRuntime lite::onnxruntime::cv::detection::InsectDet *insectdet = @@ -55,9 +55,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/quarrying_insect_detector.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/quarrying_insect_detector.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_insect.jpg"; - std::string save_img_path = "../../../logs/test_lite_insectdet_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_insectdet_mnn.jpg"; // 3. Test Specific Engine MNN lite::mnn::cv::detection::InsectDet *insectdet = @@ -86,10 +86,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/quarrying_insect_detector.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/quarrying_insect_detector.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/quarrying_insect_detector.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/quarrying_insect_detector.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_insect.jpg"; - std::string save_img_path = "../../../logs/test_lite_insectdet_tnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_insectdet_tnn.jpg"; // 5. Test Specific Engine TNN lite::tnn::cv::detection::InsectDet *insectdet = diff --git a/examples/lite/cv/test_lite_insectid.cpp b/examples/lite/cv/test_lite_insectid.cpp index c46baeb3..3f79e5ae 100644 --- a/examples/lite/cv/test_lite_insectid.cpp +++ b/examples/lite/cv/test_lite_insectid.cpp @@ -6,7 +6,7 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/quarrying_insect_identifier.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/quarrying_insect_identifier.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_insectid.jpg"; lite::cv::classification::InsectID *insectid = new lite::cv::classification::InsectID(onnx_path); @@ -36,7 +36,7 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/quarrying_insect_identifier.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/quarrying_insect_identifier.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_insectid.jpg"; lite::onnxruntime::cv::classification::InsectID *insectid = @@ -68,7 +68,7 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/quarrying_insect_identifier.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/quarrying_insect_identifier.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_insectid.jpg"; lite::mnn::cv::classification::InsectID *insectid = @@ -100,8 +100,8 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/quarrying_insect_identifier.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/quarrying_insect_identifier.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/quarrying_insect_identifier.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/quarrying_insect_identifier.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_insectid.jpg"; lite::ncnn::cv::classification::InsectID *insectid = @@ -133,8 +133,8 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/quarrying_insect_identifier.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/quarrying_insect_identifier.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/quarrying_insect_identifier.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/quarrying_insect_identifier.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_insectid.jpg"; lite::tnn::cv::classification::InsectID *insectid = diff --git a/examples/lite/cv/test_lite_mg_matting.cpp b/examples/lite/cv/test_lite_mg_matting.cpp index 105e0b60..6bf892f7 100644 --- a/examples/lite/cv/test_lite_mg_matting.cpp +++ b/examples/lite/cv/test_lite_mg_matting.cpp @@ -6,12 +6,12 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/MGMatting-DIM-100k.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/MGMatting-DIM-100k.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_mg_matting_input.jpg"; std::string test_mask_path = "../../../examples/lite/resources/test_lite_mg_matting_mask.png"; - std::string save_fgr_path = "../../../logs/test_lite_mg_matting_fgr.jpg"; - std::string save_pha_path = "../../../logs/test_lite_mg_matting_pha.jpg"; - std::string save_merge_path = "../../../logs/test_lite_mg_matting_merge.jpg"; + std::string save_fgr_path = "../../../examples/logs/test_lite_mg_matting_fgr.jpg"; + std::string save_pha_path = "../../../examples/logs/test_lite_mg_matting_pha.jpg"; + std::string save_merge_path = "../../../examples/logs/test_lite_mg_matting_merge.jpg"; lite::cv::matting::MGMatting *mgmatting = new lite::cv::matting::MGMatting(onnx_path, 16); // 16 threads @@ -37,12 +37,12 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/MGMatting-DIM-100k.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/MGMatting-DIM-100k.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_mg_matting_input.jpg"; std::string test_mask_path = "../../../examples/lite/resources/test_lite_mg_matting_mask.png"; - std::string save_fgr_path = "../../../logs/test_lite_mg_matting_fgr_onnx.jpg"; - std::string save_pha_path = "../../../logs/test_lite_mg_matting_pha_onnx.jpg"; - std::string save_merge_path = "../../../logs/test_lite_mg_matting_merge_onnx.jpg"; + std::string save_fgr_path = "../../../examples/logs/test_lite_mg_matting_fgr_onnx.jpg"; + std::string save_pha_path = "../../../examples/logs/test_lite_mg_matting_pha_onnx.jpg"; + std::string save_merge_path = "../../../examples/logs/test_lite_mg_matting_merge_onnx.jpg"; lite::onnxruntime::cv::matting::MGMatting *mgmatting = new lite::onnxruntime::cv::matting::MGMatting(onnx_path, 16); // 16 threads @@ -69,12 +69,12 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/MGMatting-DIM-100k.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/MGMatting-DIM-100k.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_mg_matting_input.jpg"; std::string test_mask_path = "../../../examples/lite/resources/test_lite_mg_matting_mask.png"; - std::string save_fgr_path = "../../../logs/test_lite_mg_matting_fgr_mnn.jpg"; - std::string save_pha_path = "../../../logs/test_lite_mg_matting_pha_mnn.jpg"; - std::string save_merge_path = "../../../logs/test_lite_mg_matting_merge_mnn.jpg"; + std::string save_fgr_path = "../../../examples/logs/test_lite_mg_matting_fgr_mnn.jpg"; + std::string save_pha_path = "../../../examples/logs/test_lite_mg_matting_pha_mnn.jpg"; + std::string save_merge_path = "../../../examples/logs/test_lite_mg_matting_merge_mnn.jpg"; lite::mnn::cv::matting::MGMatting *mgmatting = new lite::mnn::cv::matting::MGMatting(mnn_path, 16); // 16 threads @@ -107,13 +107,13 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/MGMatting-DIM-100k.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/MGMatting-DIM-100k.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/MGMatting-DIM-100k.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/MGMatting-DIM-100k.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_mg_matting_input.jpg"; std::string test_mask_path = "../../../examples/lite/resources/test_lite_mg_matting_mask.png"; - std::string save_fgr_path = "../../../logs/test_lite_mg_matting_fgr_tnn.jpg"; - std::string save_pha_path = "../../../logs/test_lite_mg_matting_pha_tnn.jpg"; - std::string save_merge_path = "../../../logs/test_lite_mg_matting_merge_tnn.jpg"; + std::string save_fgr_path = "../../../examples/logs/test_lite_mg_matting_fgr_tnn.jpg"; + std::string save_pha_path = "../../../examples/logs/test_lite_mg_matting_pha_tnn.jpg"; + std::string save_merge_path = "../../../examples/logs/test_lite_mg_matting_merge_tnn.jpg"; lite::tnn::cv::matting::MGMatting *mgmatting = new lite::tnn::cv::matting::MGMatting(proto_path, model_path, 16); // 16 threads diff --git a/examples/lite/cv/test_lite_mobile_emotion7.cpp b/examples/lite/cv/test_lite_mobile_emotion7.cpp index e891077e..762369c6 100644 --- a/examples/lite/cv/test_lite_mobile_emotion7.cpp +++ b/examples/lite/cv/test_lite_mobile_emotion7.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/face-emotion-recognition-mobilenet_7.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/face-emotion-recognition-mobilenet_7.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_emotion.jpg"; - std::string save_img_path = "../../../logs/test_lite_mobile_emotion7.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_mobile_emotion7.jpg"; lite::cv::face::attr::MobileEmotion7 *mobile_emotion7 = new lite::cv::face::attr::MobileEmotion7(onnx_path); @@ -31,9 +31,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/face-emotion-recognition-mobilenet_7.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/face-emotion-recognition-mobilenet_7.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_emotion.jpg"; - std::string save_img_path = "../../../logs/test_lite_mobile_emotion7.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_mobile_emotion7.jpg"; lite::onnxruntime::cv::face::attr::MobileEmotion7 *mobile_emotion7 = new lite::onnxruntime::cv::face::attr::MobileEmotion7(onnx_path); @@ -56,9 +56,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/face-emotion-recognition-mobilenet_7.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/face-emotion-recognition-mobilenet_7.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_emotion.jpg"; - std::string save_img_path = "../../../logs/test_lite_mobile_emotion7_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_mobile_emotion7_mnn.jpg"; lite::mnn::cv::face::attr::MobileEmotion7 *mobile_emotion7 = new lite::mnn::cv::face::attr::MobileEmotion7(mnn_path); @@ -81,10 +81,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/face-emotion-recognition-mobilenet_7.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/face-emotion-recognition-mobilenet_7.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/face-emotion-recognition-mobilenet_7.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/face-emotion-recognition-mobilenet_7.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_emotion.jpg"; - std::string save_img_path = "../../../logs/test_lite_mobile_emotion7_ncnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_mobile_emotion7_ncnn.jpg"; lite::ncnn::cv::face::attr::MobileEmotion7 *mobile_emotion7 = new lite::ncnn::cv::face::attr::MobileEmotion7(param_path, bin_path); @@ -107,10 +107,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/face-emotion-recognition-mobilenet_7.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/face-emotion-recognition-mobilenet_7.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/face-emotion-recognition-mobilenet_7.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/face-emotion-recognition-mobilenet_7.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_emotion.jpg"; - std::string save_img_path = "../../../logs/test_lite_mobile_emotion7_tnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_mobile_emotion7_tnn.jpg"; lite::tnn::cv::face::attr::MobileEmotion7 *mobile_emotion7 = new lite::tnn::cv::face::attr::MobileEmotion7(proto_path, model_path); diff --git a/examples/lite/cv/test_lite_mobile_facenet.cpp b/examples/lite/cv/test_lite_mobile_facenet.cpp index 6e3f5abe..f2b41536 100644 --- a/examples/lite/cv/test_lite_mobile_facenet.cpp +++ b/examples/lite/cv/test_lite_mobile_facenet.cpp @@ -6,7 +6,7 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/MobileFaceNet_Pytorch_068.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/MobileFaceNet_Pytorch_068.onnx"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_1.png"; @@ -31,7 +31,7 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/MobileFaceNet_Pytorch_068.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/MobileFaceNet_Pytorch_068.onnx"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; @@ -58,7 +58,7 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/MobileFaceNet_Pytorch_068.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/MobileFaceNet_Pytorch_068.mnn"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; @@ -85,8 +85,8 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/MobileFaceNet_Pytorch_068.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/MobileFaceNet_Pytorch_068.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/MobileFaceNet_Pytorch_068.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/MobileFaceNet_Pytorch_068.opt.bin"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; @@ -113,8 +113,8 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/MobileFaceNet_Pytorch_068.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/MobileFaceNet_Pytorch_068.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/MobileFaceNet_Pytorch_068.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/MobileFaceNet_Pytorch_068.opt.tnnmodel"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; diff --git a/examples/lite/cv/test_lite_mobile_hair_seg.cpp b/examples/lite/cv/test_lite_mobile_hair_seg.cpp index 32012ae2..9728bf00 100644 --- a/examples/lite/cv/test_lite_mobile_hair_seg.cpp +++ b/examples/lite/cv/test_lite_mobile_hair_seg.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/mobile_hair_seg_hairmattenetv1_224x224.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/mobile_hair_seg_hairmattenetv1_224x224.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_hair.png"; - std::string save_img_path = "../../../logs/test_lite_mobile_hair_seg.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_mobile_hair_seg.jpg"; lite::cv::segmentation::MobileHairSeg *mobile_hair_seg = new lite::cv::segmentation::MobileHairSeg(onnx_path, 4); // 4 threads @@ -29,9 +29,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/mobile_hair_seg_hairmattenetv1_224x224.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/mobile_hair_seg_hairmattenetv1_224x224.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_hair.png"; - std::string save_img_path = "../../../logs/test_lite_mobile_hair_seg_onnx.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_mobile_hair_seg_onnx.jpg"; lite::onnxruntime::cv::segmentation::MobileHairSeg *mobile_hair_seg = new lite::onnxruntime::cv::segmentation::MobileHairSeg(onnx_path, 4); // 4 threads @@ -53,9 +53,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/mobile_hair_seg_hairmattenetv1_224x224.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/mobile_hair_seg_hairmattenetv1_224x224.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_hair.png"; - std::string save_img_path = "../../../logs/test_lite_mobile_hair_seg_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_mobile_hair_seg_mnn.jpg"; lite::mnn::cv::segmentation::MobileHairSeg *mobile_hair_seg = new lite::mnn::cv::segmentation::MobileHairSeg(mnn_path, 4); // 4 threads diff --git a/examples/lite/cv/test_lite_mobile_human_matting.cpp b/examples/lite/cv/test_lite_mobile_human_matting.cpp index ecd18860..020a12e8 100644 --- a/examples/lite/cv/test_lite_mobile_human_matting.cpp +++ b/examples/lite/cv/test_lite_mobile_human_matting.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/mobile_human_matting_256x256.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/mobile_human_matting_256x256.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_hair.png"; - std::string save_img_path = "../../../logs/test_lite_mobile_human_matting.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_mobile_human_matting.jpg"; lite::cv::matting::MobileHumanMatting *mobile_human_matting = new lite::cv::matting::MobileHumanMatting(onnx_path, 4); // 4 threads @@ -29,9 +29,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/mobile_human_matting_256x256.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/mobile_human_matting_256x256.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_hair.png"; - std::string save_img_path = "../../../logs/test_lite_mobile_human_matting_onnx.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_mobile_human_matting_onnx.jpg"; lite::onnxruntime::cv::matting::MobileHumanMatting *mobile_human_matting = new lite::onnxruntime::cv::matting::MobileHumanMatting(onnx_path, 4); // 4 threads @@ -53,9 +53,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/mobile_human_matting_256x256.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/mobile_human_matting_256x256.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_hair.png"; - std::string save_img_path = "../../../logs/test_lite_mobile_human_matting_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_mobile_human_matting_mnn.jpg"; lite::mnn::cv::matting::MobileHumanMatting *mobile_human_matting = new lite::mnn::cv::matting::MobileHumanMatting(mnn_path, 4); // 4 threads diff --git a/examples/lite/cv/test_lite_mobilenetv2.cpp b/examples/lite/cv/test_lite_mobilenetv2.cpp index ef9d9052..f18da66c 100644 --- a/examples/lite/cv/test_lite_mobilenetv2.cpp +++ b/examples/lite/cv/test_lite_mobilenetv2.cpp @@ -6,7 +6,7 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/mobilenetv2.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/mobilenetv2.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_mobilenetv2.jpg"; lite::cv::classification::MobileNetV2 *mobilenetv2 = new lite::cv::classification::MobileNetV2(onnx_path); @@ -36,7 +36,7 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/mobilenetv2.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/mobilenetv2.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_mobilenetv2.jpg"; lite::onnxruntime::cv::classification::MobileNetV2 *mobilenetv2 = @@ -68,7 +68,7 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/mobilenetv2.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/mobilenetv2.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_mobilenetv2.jpg"; lite::mnn::cv::classification::MobileNetV2 *mobilenetv2 = @@ -100,8 +100,8 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/mobilenetv2.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/mobilenetv2.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/mobilenetv2.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/mobilenetv2.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_mobilenetv2.jpg"; lite::ncnn::cv::classification::MobileNetV2 *mobilenetv2 = @@ -133,8 +133,8 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/mobilenetv2.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/mobilenetv2.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/mobilenetv2.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/mobilenetv2.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_mobilenetv2.jpg"; lite::tnn::cv::classification::MobileNetV2 *mobilenetv2 = diff --git a/examples/lite/cv/test_lite_mobilenetv2_68.cpp b/examples/lite/cv/test_lite_mobilenetv2_68.cpp index fdc626b1..f882ab5c 100644 --- a/examples/lite/cv/test_lite_mobilenetv2_68.cpp +++ b/examples/lite/cv/test_lite_mobilenetv2_68.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/pytorch_face_landmarks_landmark_detection_56.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/pytorch_face_landmarks_landmark_detection_56.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_landmarks.png"; - std::string save_img_path = "../../../logs/test_lite_mobilenetv2_68.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_mobilenetv2_68.jpg"; lite::cv::face::align::MobileNetV268 *mobilenetv2_68 = new lite::cv::face::align::MobileNetV268(onnx_path); @@ -30,9 +30,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/pytorch_face_landmarks_landmark_detection_56.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/pytorch_face_landmarks_landmark_detection_56.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_landmarks.png"; - std::string save_img_path = "../../../logs/test_mobilenetv2_68_onnx.jpg"; + std::string save_img_path = "../../../examples/logs/test_mobilenetv2_68_onnx.jpg"; lite::onnxruntime::cv::face::align::MobileNetV268 *mobilenetv2_68 = new lite::onnxruntime::cv::face::align::MobileNetV268(onnx_path); @@ -55,9 +55,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/pytorch_face_landmarks_landmark_detection_56.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/pytorch_face_landmarks_landmark_detection_56.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_landmarks.png"; - std::string save_img_path = "../../../logs/test_mobilenetv2_68_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_mobilenetv2_68_mnn.jpg"; lite::mnn::cv::face::align::MobileNetV268 *mobilenetv2_68 = new lite::mnn::cv::face::align::MobileNetV268(mnn_path); @@ -80,10 +80,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/pytorch_face_landmarks_landmark_detection_56.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/pytorch_face_landmarks_landmark_detection_56.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/pytorch_face_landmarks_landmark_detection_56.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/pytorch_face_landmarks_landmark_detection_56.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_landmarks.png"; - std::string save_img_path = "../../../logs/test_mobilenetv2_68_ncnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_mobilenetv2_68_ncnn.jpg"; lite::ncnn::cv::face::align::MobileNetV268 *mobilenetv2_68 = new lite::ncnn::cv::face::align::MobileNetV268(param_path, bin_path); @@ -106,10 +106,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/pytorch_face_landmarks_landmark_detection_56.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/pytorch_face_landmarks_landmark_detection_56.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/pytorch_face_landmarks_landmark_detection_56.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/pytorch_face_landmarks_landmark_detection_56.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_landmarks.png"; - std::string save_img_path = "../../../logs/test_mobilenetv2_68_tnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_mobilenetv2_68_tnn.jpg"; lite::tnn::cv::face::align::MobileNetV268 *mobilenetv2_68 = new lite::tnn::cv::face::align::MobileNetV268(proto_path, model_path); diff --git a/examples/lite/cv/test_lite_mobilenetv2_se_68.cpp b/examples/lite/cv/test_lite_mobilenetv2_se_68.cpp index 35ccad56..3022be9c 100644 --- a/examples/lite/cv/test_lite_mobilenetv2_se_68.cpp +++ b/examples/lite/cv/test_lite_mobilenetv2_se_68.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/pytorch_face_landmarks_landmark_detection_56_se_external.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/pytorch_face_landmarks_landmark_detection_56_se_external.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_landmarks.png"; - std::string save_img_path = "../../../logs/test_lite_mobilenetv2_se_68.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_mobilenetv2_se_68.jpg"; lite::cv::face::align::MobileNetV2SE68 *mobilenetv2_se_68 = new lite::cv::face::align::MobileNetV2SE68(onnx_path); @@ -30,9 +30,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/pytorch_face_landmarks_landmark_detection_56_se_external.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/pytorch_face_landmarks_landmark_detection_56_se_external.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_landmarks.png"; - std::string save_img_path = "../../../logs/test_mobilenetv2_se_68_onnx.jpg"; + std::string save_img_path = "../../../examples/logs/test_mobilenetv2_se_68_onnx.jpg"; lite::onnxruntime::cv::face::align::MobileNetV2SE68 *mobilenetv2_se_68 = new lite::onnxruntime::cv::face::align::MobileNetV2SE68(onnx_path); @@ -55,9 +55,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/pytorch_face_landmarks_landmark_detection_56_se_external.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/pytorch_face_landmarks_landmark_detection_56_se_external.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_landmarks.png"; - std::string save_img_path = "../../../logs/test_mobilenetv2_se_68_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_mobilenetv2_se_68_mnn.jpg"; lite::mnn::cv::face::align::MobileNetV2SE68 *mobilenetv2_se_68 = new lite::mnn::cv::face::align::MobileNetV2SE68(mnn_path); @@ -80,10 +80,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/pytorch_face_landmarks_landmark_detection_56_se_external.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/pytorch_face_landmarks_landmark_detection_56_se_external.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/pytorch_face_landmarks_landmark_detection_56_se_external.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/pytorch_face_landmarks_landmark_detection_56_se_external.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_landmarks.png"; - std::string save_img_path = "../../../logs/test_mobilenetv2_se_68_ncnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_mobilenetv2_se_68_ncnn.jpg"; lite::ncnn::cv::face::align::MobileNetV2SE68 *mobilenetv2_se_68 = new lite::ncnn::cv::face::align::MobileNetV2SE68(param_path, bin_path); @@ -106,10 +106,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/pytorch_face_landmarks_landmark_detection_56_se_external.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/pytorch_face_landmarks_landmark_detection_56_se_external.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/pytorch_face_landmarks_landmark_detection_56_se_external.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/pytorch_face_landmarks_landmark_detection_56_se_external.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_landmarks.png"; - std::string save_img_path = "../../../logs/test_mobilenetv2_68_tnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_mobilenetv2_68_tnn.jpg"; lite::tnn::cv::face::align::MobileNetV2SE68 *mobilenetv2_se_68 = new lite::tnn::cv::face::align::MobileNetV2SE68(proto_path, model_path); diff --git a/examples/lite/cv/test_lite_mobilese_focal_face.cpp b/examples/lite/cv/test_lite_mobilese_focal_face.cpp index 3dadae29..8c730f99 100644 --- a/examples/lite/cv/test_lite_mobilese_focal_face.cpp +++ b/examples/lite/cv/test_lite_mobilese_focal_face.cpp @@ -6,7 +6,7 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/face_recognition.pytorch_Mobilenet_se_focal_121000.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/face_recognition.pytorch_Mobilenet_se_focal_121000.onnx"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_1.png"; @@ -32,7 +32,7 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/face_recognition.pytorch_Mobilenet_se_focal_121000.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/face_recognition.pytorch_Mobilenet_se_focal_121000.onnx"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; @@ -59,7 +59,7 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/face_recognition.pytorch_Mobilenet_se_focal_121000.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/face_recognition.pytorch_Mobilenet_se_focal_121000.mnn"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; @@ -86,8 +86,8 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/face_recognition.pytorch_Mobilenet_se_focal_121000.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/face_recognition.pytorch_Mobilenet_se_focal_121000.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/face_recognition.pytorch_Mobilenet_se_focal_121000.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/face_recognition.pytorch_Mobilenet_se_focal_121000.opt.bin"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; @@ -114,8 +114,8 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/face_recognition.pytorch_Mobilenet_se_focal_121000.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/face_recognition.pytorch_Mobilenet_se_focal_121000.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/face_recognition.pytorch_Mobilenet_se_focal_121000.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/face_recognition.pytorch_Mobilenet_se_focal_121000.opt.tnnmodel"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; diff --git a/examples/lite/cv/test_lite_modnet.cpp b/examples/lite/cv/test_lite_modnet.cpp index 181a65b5..4eafc33d 100644 --- a/examples/lite/cv/test_lite_modnet.cpp +++ b/examples/lite/cv/test_lite_modnet.cpp @@ -6,13 +6,13 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/modnet_photographic_portrait_matting-512x512.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/modnet_photographic_portrait_matting-512x512.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_matting_input.jpg"; std::string test_bgr_path = "../../../examples/lite/resources/test_lite_matting_bgr.jpg"; - std::string save_fgr_path = "../../../logs/test_lite_modnet_fgr.jpg"; - std::string save_pha_path = "../../../logs/test_lite_modnet_pha.jpg"; - std::string save_merge_path = "../../../logs/test_lite_modnet_merge.jpg"; - std::string save_swap_path = "../../../logs/test_lite_modnet_swap.jpg"; + std::string save_fgr_path = "../../../examples/logs/test_lite_modnet_fgr.jpg"; + std::string save_pha_path = "../../../examples/logs/test_lite_modnet_pha.jpg"; + std::string save_merge_path = "../../../examples/logs/test_lite_modnet_merge.jpg"; + std::string save_swap_path = "../../../examples/logs/test_lite_modnet_swap.jpg"; lite::cv::matting::MODNet *modnet = new lite::cv::matting::MODNet(onnx_path, 16); // 16 threads @@ -51,13 +51,13 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/modnet_photographic_portrait_matting-512x512.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/modnet_photographic_portrait_matting-512x512.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_matting_input.jpg"; std::string test_bgr_path = "../../../examples/lite/resources/test_lite_matting_bgr.jpg"; - std::string save_fgr_path = "../../../logs/test_lite_modnet_fgr_onnx.jpg"; - std::string save_pha_path = "../../../logs/test_lite_modnet_pha_onnx.jpg"; - std::string save_merge_path = "../../../logs/test_lite_modnet_merge_onnx.jpg"; - std::string save_swap_path = "../../../logs/test_lite_modnet_swap_onnx.jpg"; + std::string save_fgr_path = "../../../examples/logs/test_lite_modnet_fgr_onnx.jpg"; + std::string save_pha_path = "../../../examples/logs/test_lite_modnet_pha_onnx.jpg"; + std::string save_merge_path = "../../../examples/logs/test_lite_modnet_merge_onnx.jpg"; + std::string save_swap_path = "../../../examples/logs/test_lite_modnet_swap_onnx.jpg"; lite::onnxruntime::cv::matting::MODNet *modnet = new lite::onnxruntime::cv::matting::MODNet(onnx_path, 16); // 16 threads @@ -97,13 +97,13 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/modnet_photographic_portrait_matting-512x512.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/modnet_photographic_portrait_matting-512x512.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_matting_input.jpg"; std::string test_bgr_path = "../../../examples/lite/resources/test_lite_matting_bgr.jpg"; - std::string save_fgr_path = "../../../logs/test_lite_modnet_fgr_mnn.jpg"; - std::string save_pha_path = "../../../logs/test_lite_modnet_pha_mnn.jpg"; - std::string save_merge_path = "../../../logs/test_lite_modnet_merge_mnn.jpg"; - std::string save_swap_path = "../../../logs/test_lite_modnet_swap_mnn.jpg"; + std::string save_fgr_path = "../../../examples/logs/test_lite_modnet_fgr_mnn.jpg"; + std::string save_pha_path = "../../../examples/logs/test_lite_modnet_pha_mnn.jpg"; + std::string save_merge_path = "../../../examples/logs/test_lite_modnet_merge_mnn.jpg"; + std::string save_swap_path = "../../../examples/logs/test_lite_modnet_swap_mnn.jpg"; lite::mnn::cv::matting::MODNet *modnet = new lite::mnn::cv::matting::MODNet(mnn_path, 16); // 16 threads @@ -142,14 +142,14 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string proto_path = "../../../hub/ncnn/cv/modnet_photographic_portrait_matting-512x512.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/modnet_photographic_portrait_matting-512x512.opt.bin"; + std::string proto_path = "../../../examples/hub/ncnn/cv/modnet_photographic_portrait_matting-512x512.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/modnet_photographic_portrait_matting-512x512.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_matting_input.jpg"; std::string test_bgr_path = "../../../examples/lite/resources/test_lite_matting_bgr.jpg"; - std::string save_fgr_path = "../../../logs/test_lite_modnet_fgr_ncnn.jpg"; - std::string save_pha_path = "../../../logs/test_lite_modnet_pha_ncnn.jpg"; - std::string save_merge_path = "../../../logs/test_lite_modnet_merge_ncnn.jpg"; - std::string save_swap_path = "../../../logs/test_lite_modnet_swap_ncnn.jpg"; + std::string save_fgr_path = "../../../examples/logs/test_lite_modnet_fgr_ncnn.jpg"; + std::string save_pha_path = "../../../examples/logs/test_lite_modnet_pha_ncnn.jpg"; + std::string save_merge_path = "../../../examples/logs/test_lite_modnet_merge_ncnn.jpg"; + std::string save_swap_path = "../../../examples/logs/test_lite_modnet_swap_ncnn.jpg"; lite::ncnn::cv::matting::MODNet *modnet = new lite::ncnn::cv::matting::MODNet(proto_path, bin_path, 16, 512, 512); // 16 threads @@ -188,14 +188,14 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/modnet_photographic_portrait_matting-512x512.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/modnet_photographic_portrait_matting-512x512.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/modnet_photographic_portrait_matting-512x512.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/modnet_photographic_portrait_matting-512x512.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_matting_input.jpg"; std::string test_bgr_path = "../../../examples/lite/resources/test_lite_matting_bgr.jpg"; - std::string save_fgr_path = "../../../logs/test_lite_modnet_fgr_tnn.jpg"; - std::string save_pha_path = "../../../logs/test_lite_modnet_pha_tnn.jpg"; - std::string save_merge_path = "../../../logs/test_lite_modnet_merge_tnn.jpg"; - std::string save_swap_path = "../../../logs/test_lite_modnet_swap_tnn.jpg"; + std::string save_fgr_path = "../../../examples/logs/test_lite_modnet_fgr_tnn.jpg"; + std::string save_pha_path = "../../../examples/logs/test_lite_modnet_pha_tnn.jpg"; + std::string save_merge_path = "../../../examples/logs/test_lite_modnet_merge_tnn.jpg"; + std::string save_swap_path = "../../../examples/logs/test_lite_modnet_swap_tnn.jpg"; lite::tnn::cv::matting::MODNet *modnet = new lite::tnn::cv::matting::MODNet(proto_path, model_path, 16); // 16 threads diff --git a/examples/lite/cv/test_lite_modnet_dyn.cpp b/examples/lite/cv/test_lite_modnet_dyn.cpp index d6f1ba25..70f5f97f 100644 --- a/examples/lite/cv/test_lite_modnet_dyn.cpp +++ b/examples/lite/cv/test_lite_modnet_dyn.cpp @@ -6,13 +6,13 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/modnet_photographic_portrait_matting.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/modnet_photographic_portrait_matting.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_matting_input.jpg"; std::string test_bgr_path = "../../../examples/lite/resources/test_lite_matting_bgr.jpg"; - std::string save_fgr_path = "../../../logs/test_lite_modnet_dyn_fgr.jpg"; - std::string save_pha_path = "../../../logs/test_lite_modnet_dyn_pha.jpg"; - std::string save_merge_path = "../../../logs/test_lite_modnet_dyn_merge.jpg"; - std::string save_swap_path = "../../../logs/test_lite_modnet_dyn_swap.jpg"; + std::string save_fgr_path = "../../../examples/logs/test_lite_modnet_dyn_fgr.jpg"; + std::string save_pha_path = "../../../examples/logs/test_lite_modnet_dyn_pha.jpg"; + std::string save_merge_path = "../../../examples/logs/test_lite_modnet_dyn_merge.jpg"; + std::string save_swap_path = "../../../examples/logs/test_lite_modnet_dyn_swap.jpg"; lite::cv::matting::MODNetDyn *modnet_dyn = new lite::cv::matting::MODNetDyn(onnx_path, 4); // 4 threads @@ -51,13 +51,13 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/modnet_photographic_portrait_matting.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/modnet_photographic_portrait_matting.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_matting_input.jpg"; std::string test_bgr_path = "../../../examples/lite/resources/test_lite_matting_bgr.jpg"; - std::string save_fgr_path = "../../../logs/test_lite_modnet_dyn_fgr_onnx.jpg"; - std::string save_pha_path = "../../../logs/test_lite_modnet_dyn_pha_onnx.jpg"; - std::string save_merge_path = "../../../logs/test_lite_modnet_dyn_merge_onnx.jpg"; - std::string save_swap_path = "../../../logs/test_lite_modnet_dyn_swap_onnx.jpg"; + std::string save_fgr_path = "../../../examples/logs/test_lite_modnet_dyn_fgr_onnx.jpg"; + std::string save_pha_path = "../../../examples/logs/test_lite_modnet_dyn_pha_onnx.jpg"; + std::string save_merge_path = "../../../examples/logs/test_lite_modnet_dyn_merge_onnx.jpg"; + std::string save_swap_path = "../../../examples/logs/test_lite_modnet_dyn_swap_onnx.jpg"; lite::onnxruntime::cv::matting::MODNetDyn *modnet_dyn = new lite::onnxruntime::cv::matting::MODNetDyn(onnx_path, 4); // 4 threads diff --git a/examples/lite/cv/test_lite_naive_pose_robust_face.cpp b/examples/lite/cv/test_lite_naive_pose_robust_face.cpp index 517033b9..affa1a9f 100644 --- a/examples/lite/cv/test_lite_naive_pose_robust_face.cpp +++ b/examples/lite/cv/test_lite_naive_pose_robust_face.cpp @@ -6,7 +6,7 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/dream_ijba_res18_naive.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/dream_ijba_res18_naive.onnx"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_1.png"; @@ -32,7 +32,7 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/dream_ijba_res18_naive.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/dream_ijba_res18_naive.onnx"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; diff --git a/examples/lite/cv/test_lite_nanodet.cpp b/examples/lite/cv/test_lite_nanodet.cpp index aee65e15..31ab2add 100644 --- a/examples/lite/cv/test_lite_nanodet.cpp +++ b/examples/lite/cv/test_lite_nanodet.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/nanodet_m.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/nanodet_m.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_1.jpg"; - std::string save_img_path = "../../../logs/test_lite_nanodet_1.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_nanodet_1.jpg"; // 1. Test Default Engine ONNXRuntime lite::cv::detection::NanoDet *nanodet = new lite::cv::detection::NanoDet(onnx_path); // default @@ -30,9 +30,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/nanodet_m.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/nanodet_m.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_nanodet_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_nanodet_2.jpg"; // 2. Test Specific Engine ONNXRuntime lite::onnxruntime::cv::detection::NanoDet *nanodet = @@ -55,9 +55,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/nanodet_m.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/nanodet_m.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_nanodet_mnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_nanodet_mnn_2.jpg"; // 3. Test Specific Engine MNN lite::mnn::cv::detection::NanoDet *nanodet = @@ -79,10 +79,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/nanodet_m-opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/nanodet_m-opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/nanodet_m-opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/nanodet_m-opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_nanodet_ncnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_nanodet_ncnn_2.jpg"; // 4. Test Specific Engine NCNN lite::ncnn::cv::detection::NanoDet *nanodet = @@ -105,10 +105,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/nanodet_m.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/nanodet_m.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/nanodet_m.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/nanodet_m.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_nanodet_tnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_nanodet_tnn_2.jpg"; // 4. Test Specific Engine TNN lite::tnn::cv::detection::NanoDet *nanodet = diff --git a/examples/lite/cv/test_lite_nanodet_depreciated.cpp b/examples/lite/cv/test_lite_nanodet_depreciated.cpp index 25b07aa6..0a81e737 100644 --- a/examples/lite/cv/test_lite_nanodet_depreciated.cpp +++ b/examples/lite/cv/test_lite_nanodet_depreciated.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/nanodet_m.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/nanodet_m.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_1.jpg"; - std::string save_img_path = "../../../logs/test_lite_nanodet_1.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_nanodet_1.jpg"; // 1. Test Default Engine ONNXRuntime lite::cv::detection::NanoDet *nanodet = new lite::cv::detection::NanoDet(onnx_path); // default @@ -30,9 +30,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/nanodet_m.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/nanodet_m.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_nanodet_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_nanodet_2.jpg"; // 2. Test Specific Engine ONNXRuntime lite::onnxruntime::cv::detection::NanoDet *nanodet = @@ -55,9 +55,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/nanodet_m.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/nanodet_m.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_nanodet_mnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_nanodet_mnn_2.jpg"; // 3. Test Specific Engine MNN lite::mnn::cv::detection::NanoDet *nanodet = @@ -79,10 +79,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/nanodet_m-depreciated-opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/nanodet_m-depreciated-opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/nanodet_m-depreciated-opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/nanodet_m-depreciated-opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_nanodet_depreciated_ncnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_nanodet_depreciated_ncnn_2.jpg"; // 4. Test Specific Engine NCNN lite::ncnn::cv::detection::NanoDetDepreciated *nanodet = diff --git a/examples/lite/cv/test_lite_nanodet_efficientnet_lite.cpp b/examples/lite/cv/test_lite_nanodet_efficientnet_lite.cpp index e895b494..1c000bfe 100644 --- a/examples/lite/cv/test_lite_nanodet_efficientnet_lite.cpp +++ b/examples/lite/cv/test_lite_nanodet_efficientnet_lite.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/nanodet-EfficientNet-Lite2_512.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/nanodet-EfficientNet-Lite2_512.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_1.jpg"; - std::string save_img_path = "../../../logs/test_lite_nanodet_efficientnet_lite_1.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_nanodet_efficientnet_lite_1.jpg"; // 1. Test Default Engine ONNXRuntime lite::cv::detection::NanoDetEfficientNetLite *nanodet = @@ -31,9 +31,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/nanodet-EfficientNet-Lite2_512.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/nanodet-EfficientNet-Lite2_512.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_nanodet_efficientnet_lite_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_nanodet_efficientnet_lite_2.jpg"; // 2. Test Specific Engine ONNXRuntime lite::onnxruntime::cv::detection::NanoDetEfficientNetLite *nanodet = @@ -56,9 +56,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/nanodet-EfficientNet-Lite2_512.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/nanodet-EfficientNet-Lite2_512.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_nanodet_efficientnet_lite_mnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_nanodet_efficientnet_lite_mnn_2.jpg"; // 3. Test Specific Engine MNN lite::mnn::cv::detection::NanoDetEfficientNetLite *nanodet = @@ -80,10 +80,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/nanodet-EfficientNet-Lite2_512-opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/nanodet-EfficientNet-Lite2_512-opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/nanodet-EfficientNet-Lite2_512-opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/nanodet-EfficientNet-Lite2_512-opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_nanodet_efficientnet_lite_ncnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_nanodet_efficientnet_lite_ncnn_2.jpg"; // 4. Test Specific Engine NCNN lite::ncnn::cv::detection::NanoDetEfficientNetLite *nanodet = @@ -106,10 +106,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/nanodet-EfficientNet-Lite2_512.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/nanodet-EfficientNet-Lite2_512.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/nanodet-EfficientNet-Lite2_512.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/nanodet-EfficientNet-Lite2_512.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_nanodet_efficientnet_lite_tnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_nanodet_efficientnet_lite_tnn_2.jpg"; // 4. Test Specific Engine TNN lite::tnn::cv::detection::NanoDetEfficientNetLite *nanodet = diff --git a/examples/lite/cv/test_lite_nanodet_efficientnet_lite_depreciated.cpp b/examples/lite/cv/test_lite_nanodet_efficientnet_lite_depreciated.cpp index 06290ebe..ec4e1ef1 100644 --- a/examples/lite/cv/test_lite_nanodet_efficientnet_lite_depreciated.cpp +++ b/examples/lite/cv/test_lite_nanodet_efficientnet_lite_depreciated.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/nanodet-EfficientNet-Lite2_512.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/nanodet-EfficientNet-Lite2_512.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_1.jpg"; - std::string save_img_path = "../../../logs/test_lite_nanodet_efficientnet_lite_1.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_nanodet_efficientnet_lite_1.jpg"; // 1. Test Default Engine ONNXRuntime lite::cv::detection::NanoDetEfficientNetLite *nanodet = @@ -31,9 +31,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/nanodet-EfficientNet-Lite2_512.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/nanodet-EfficientNet-Lite2_512.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_nanodet_efficientnet_lite_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_nanodet_efficientnet_lite_2.jpg"; // 2. Test Specific Engine ONNXRuntime lite::onnxruntime::cv::detection::NanoDetEfficientNetLite *nanodet = @@ -56,9 +56,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/nanodet-EfficientNet-Lite2_512.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/nanodet-EfficientNet-Lite2_512.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_nanodet_efficientnet_lite_mnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_nanodet_efficientnet_lite_mnn_2.jpg"; // 3. Test Specific Engine MNN lite::mnn::cv::detection::NanoDetEfficientNetLite *nanodet = @@ -80,10 +80,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/nanodet-EfficientNet-Lite2_512-depreciated-opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/nanodet-EfficientNet-Lite2_512-depreciated-opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/nanodet-EfficientNet-Lite2_512-depreciated-opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/nanodet-EfficientNet-Lite2_512-depreciated-opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_nanodet_efficientnet_lite_depreciated_ncnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_nanodet_efficientnet_lite_depreciated_ncnn_2.jpg"; // 4. Test Specific Engine NCNN lite::ncnn::cv::detection::NanoDetEfficientNetLiteDepreciated *nanodet = diff --git a/examples/lite/cv/test_lite_nanodet_plus.cpp b/examples/lite/cv/test_lite_nanodet_plus.cpp index e48c488e..c7e05984 100644 --- a/examples/lite/cv/test_lite_nanodet_plus.cpp +++ b/examples/lite/cv/test_lite_nanodet_plus.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/nanodet-plus-m-1.5x_320.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/nanodet-plus-m-1.5x_320.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_1.jpg"; - std::string save_img_path = "../../../logs/test_lite_nanodet_plus_1.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_nanodet_plus_1.jpg"; // 1. Test Default Engine ONNXRuntime lite::cv::detection::NanoDetPlus *nanodet_plus = new lite::cv::detection::NanoDetPlus(onnx_path); // default @@ -30,9 +30,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/nanodet-plus-m-1.5x_320.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/nanodet-plus-m-1.5x_320.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_nanodet_plus_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_nanodet_plus_2.jpg"; // 2. Test Specific Engine ONNXRuntime lite::onnxruntime::cv::detection::NanoDetPlus *nanodet_plus = @@ -55,9 +55,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/nanodet-plus-m-1.5x_320.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/nanodet-plus-m-1.5x_320.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_nanodet_plus_mnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_nanodet_plus_mnn_2.jpg"; // 3. Test Specific Engine MNN lite::mnn::cv::detection::NanoDetPlus *nanodet_plus = @@ -79,10 +79,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/nanodet-plus-m-1.5x_320.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/nanodet-plus-m-1.5x_320.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/nanodet-plus-m-1.5x_320.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/nanodet-plus-m-1.5x_320.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_nanodet_plus_ncnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_nanodet_plus_ncnn_2.jpg"; // 4. Test Specific Engine NCNN lite::ncnn::cv::detection::NanoDetPlus *nanodet_plus = @@ -105,10 +105,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/nanodet-plus-m-1.5x_320.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/nanodet-plus-m-1.5x_320.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/nanodet-plus-m-1.5x_320.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/nanodet-plus-m-1.5x_320.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_nanodet_plus_tnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_nanodet_plus_tnn_2.jpg"; // 4. Test Specific Engine TNN lite::tnn::cv::detection::NanoDetPlus *nanodet_plus = diff --git a/examples/lite/cv/test_lite_pfld.cpp b/examples/lite/cv/test_lite_pfld.cpp index 62263b1b..936072af 100644 --- a/examples/lite/cv/test_lite_pfld.cpp +++ b/examples/lite/cv/test_lite_pfld.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/pfld-106-v3.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/pfld-106-v3.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_pfld.png"; - std::string save_img_path = "../../../logs/test_lite_pfld.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_pfld.jpg"; lite::cv::face::align::PFLD *pfld = new lite::cv::face::align::PFLD(onnx_path); @@ -29,9 +29,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/pfld-106-v3.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/pfld-106-v3.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_pfld.png"; - std::string save_img_path = "../../../logs/test_pfld_onnx.jpg"; + std::string save_img_path = "../../../examples/logs/test_pfld_onnx.jpg"; lite::onnxruntime::cv::face::align::PFLD *pfld = new lite::onnxruntime::cv::face::align::PFLD(onnx_path); @@ -54,9 +54,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/pfld-106-v3.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/pfld-106-v3.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_pfld.png"; - std::string save_img_path = "../../../logs/test_pfld_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_pfld_mnn.jpg"; lite::mnn::cv::face::align::PFLD *pfld = new lite::mnn::cv::face::align::PFLD(mnn_path); @@ -79,10 +79,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/pfld-106-v3.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/pfld-106-v3.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/pfld-106-v3.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/pfld-106-v3.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_pfld.png"; - std::string save_img_path = "../../../logs/test_pfld_ncnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_pfld_ncnn.jpg"; lite::ncnn::cv::face::align::PFLD *pfld = new lite::ncnn::cv::face::align::PFLD(param_path, bin_path); @@ -105,10 +105,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/pfld-106-v3.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/pfld-106-v3.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/pfld-106-v3.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/pfld-106-v3.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_pfld.png"; - std::string save_img_path = "../../../logs/test_pfld_tnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_pfld_tnn.jpg"; lite::tnn::cv::face::align::PFLD *pfld = new lite::tnn::cv::face::align::PFLD(proto_path, model_path); diff --git a/examples/lite/cv/test_lite_pfld68.cpp b/examples/lite/cv/test_lite_pfld68.cpp index 3de62c47..6c832e2d 100644 --- a/examples/lite/cv/test_lite_pfld68.cpp +++ b/examples/lite/cv/test_lite_pfld68.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/pytorch_face_landmarks_pfld.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/pytorch_face_landmarks_pfld.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_landmarks.png"; - std::string save_img_path = "../../../logs/test_lite_pfld68.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_pfld68.jpg"; lite::cv::face::align::PFLD68 *pfld68 = new lite::cv::face::align::PFLD68(onnx_path); @@ -29,9 +29,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/pytorch_face_landmarks_pfld.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/pytorch_face_landmarks_pfld.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_landmarks.png"; - std::string save_img_path = "../../../logs/test_pfld68_onnx.jpg"; + std::string save_img_path = "../../../examples/logs/test_pfld68_onnx.jpg"; lite::onnxruntime::cv::face::align::PFLD68 *pfld68 = new lite::onnxruntime::cv::face::align::PFLD68(onnx_path); @@ -54,9 +54,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/pytorch_face_landmarks_pfld.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/pytorch_face_landmarks_pfld.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_landmarks.png"; - std::string save_img_path = "../../../logs/test_pfld68_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_pfld68_mnn.jpg"; lite::mnn::cv::face::align::PFLD68 *pfld68 = new lite::mnn::cv::face::align::PFLD68(mnn_path); @@ -79,10 +79,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/pytorch_face_landmarks_pfld.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/pytorch_face_landmarks_pfld.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/pytorch_face_landmarks_pfld.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/pytorch_face_landmarks_pfld.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_landmarks.png"; - std::string save_img_path = "../../../logs/test_pfld68_ncnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_pfld68_ncnn.jpg"; lite::ncnn::cv::face::align::PFLD68 *pfld68 = new lite::ncnn::cv::face::align::PFLD68(param_path, bin_path); @@ -105,10 +105,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/pytorch_face_landmarks_pfld.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/pytorch_face_landmarks_pfld.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/pytorch_face_landmarks_pfld.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/pytorch_face_landmarks_pfld.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_landmarks.png"; - std::string save_img_path = "../../../logs/test_pfld68_tnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_pfld68_tnn.jpg"; lite::tnn::cv::face::align::PFLD68 *pfld68 = new lite::tnn::cv::face::align::PFLD68(proto_path, model_path); diff --git a/examples/lite/cv/test_lite_pfld98.cpp b/examples/lite/cv/test_lite_pfld98.cpp index cd4ab82d..1cd70c46 100644 --- a/examples/lite/cv/test_lite_pfld98.cpp +++ b/examples/lite/cv/test_lite_pfld98.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/PFLD-pytorch-pfld.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/PFLD-pytorch-pfld.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_landmarks.png"; - std::string save_img_path = "../../../logs/test_lite_pfld98.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_pfld98.jpg"; lite::cv::face::align::PFLD98 *pfld98 = new lite::cv::face::align::PFLD98(onnx_path); @@ -29,9 +29,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/PFLD-pytorch-pfld.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/PFLD-pytorch-pfld.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_landmarks.png"; - std::string save_img_path = "../../../logs/test_pfld98_onnx.jpg"; + std::string save_img_path = "../../../examples/logs/test_pfld98_onnx.jpg"; lite::onnxruntime::cv::face::align::PFLD98 *pfld98 = new lite::onnxruntime::cv::face::align::PFLD98(onnx_path); @@ -54,9 +54,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/PFLD-pytorch-pfld.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/PFLD-pytorch-pfld.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_landmarks.png"; - std::string save_img_path = "../../../logs/test_pfld98_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_pfld98_mnn.jpg"; lite::mnn::cv::face::align::PFLD98 *pfld98 = new lite::mnn::cv::face::align::PFLD98(mnn_path); @@ -79,10 +79,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/PFLD-pytorch-pfld.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/PFLD-pytorch-pfld.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/PFLD-pytorch-pfld.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/PFLD-pytorch-pfld.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_landmarks.png"; - std::string save_img_path = "../../../logs/test_pfld98_ncnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_pfld98_ncnn.jpg"; lite::ncnn::cv::face::align::PFLD98 *pfld98 = new lite::ncnn::cv::face::align::PFLD98(param_path, bin_path); @@ -105,10 +105,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/PFLD-pytorch-pfld.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/PFLD-pytorch-pfld.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/PFLD-pytorch-pfld.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/PFLD-pytorch-pfld.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_landmarks.png"; - std::string save_img_path = "../../../logs/test_pfld98_tnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_pfld98_tnn.jpg"; lite::tnn::cv::face::align::PFLD98 *pfld98 = new lite::tnn::cv::face::align::PFLD98(proto_path, model_path); diff --git a/examples/lite/cv/test_lite_pipnet19.cpp b/examples/lite/cv/test_lite_pipnet19.cpp index 54de878f..6f4dba35 100644 --- a/examples/lite/cv/test_lite_pipnet19.cpp +++ b/examples/lite/cv/test_lite_pipnet19.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/pipnet_resnet18_10x19x32x256_aflw.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/pipnet_resnet18_10x19x32x256_aflw.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_align.png"; - std::string save_img_path = "../../../logs/test_lite_pipnet19.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_pipnet19.jpg"; lite::cv::face::align::PIPNet19 *pipnet19 = new lite::cv::face::align::PIPNet19(onnx_path); @@ -29,9 +29,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/pipnet_resnet18_10x19x32x256_aflw.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/pipnet_resnet18_10x19x32x256_aflw.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_align.png"; - std::string save_img_path = "../../../logs/test_lite_pipnet19_onnx.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_pipnet19_onnx.jpg"; lite::onnxruntime::cv::face::align::PIPNet19 *pipnet19 = new lite::onnxruntime::cv::face::align::PIPNet19(onnx_path); @@ -54,9 +54,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/pipnet_resnet18_10x19x32x256_aflw.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/pipnet_resnet18_10x19x32x256_aflw.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_align.png"; - std::string save_img_path = "../../../logs/test_lite_pipnet19_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_pipnet19_mnn.jpg"; lite::mnn::cv::face::align::PIPNet19 *pipnet19 = new lite::mnn::cv::face::align::PIPNet19(mnn_path); @@ -79,10 +79,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/pipnet_resnet18_10x19x32x256_aflw.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/pipnet_resnet18_10x19x32x256_aflw.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/pipnet_resnet18_10x19x32x256_aflw.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/pipnet_resnet18_10x19x32x256_aflw.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_align.png"; - std::string save_img_path = "../../../logs/test_lite_pipnet19_ncnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_pipnet19_ncnn.jpg"; lite::ncnn::cv::face::align::PIPNet19 *pipnet19 = new lite::ncnn::cv::face::align::PIPNet19(param_path, bin_path); @@ -105,10 +105,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/pipnet_resnet18_10x19x32x256_aflw.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/pipnet_resnet18_10x19x32x256_aflw.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/pipnet_resnet18_10x19x32x256_aflw.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/pipnet_resnet18_10x19x32x256_aflw.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_align.png"; - std::string save_img_path = "../../../logs/test_lite_pipnet19_tnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_pipnet19_tnn.jpg"; lite::tnn::cv::face::align::PIPNet19 *pipnet19 = new lite::tnn::cv::face::align::PIPNet19(proto_path, model_path); diff --git a/examples/lite/cv/test_lite_pipnet29.cpp b/examples/lite/cv/test_lite_pipnet29.cpp index 8b54c1a9..aae8acdf 100644 --- a/examples/lite/cv/test_lite_pipnet29.cpp +++ b/examples/lite/cv/test_lite_pipnet29.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/pipnet_resnet18_10x29x32x256_cofw.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/pipnet_resnet18_10x29x32x256_cofw.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_align.png"; - std::string save_img_path = "../../../logs/test_lite_pipnet29.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_pipnet29.jpg"; lite::cv::face::align::PIPNet29 *pipnet29 = new lite::cv::face::align::PIPNet29(onnx_path); @@ -29,9 +29,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/pipnet_resnet18_10x29x32x256_cofw.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/pipnet_resnet18_10x29x32x256_cofw.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_align.png"; - std::string save_img_path = "../../../logs/test_lite_pipnet29_onnx.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_pipnet29_onnx.jpg"; lite::onnxruntime::cv::face::align::PIPNet29 *pipnet29 = new lite::onnxruntime::cv::face::align::PIPNet29(onnx_path); @@ -54,9 +54,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/pipnet_resnet18_10x29x32x256_cofw.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/pipnet_resnet18_10x29x32x256_cofw.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_align.png"; - std::string save_img_path = "../../../logs/test_lite_pipnet29_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_pipnet29_mnn.jpg"; lite::mnn::cv::face::align::PIPNet29 *pipnet29 = new lite::mnn::cv::face::align::PIPNet29(mnn_path); @@ -79,10 +79,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/pipnet_resnet18_10x29x32x256_cofw.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/pipnet_resnet18_10x29x32x256_cofw.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/pipnet_resnet18_10x29x32x256_cofw.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/pipnet_resnet18_10x29x32x256_cofw.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_align.png"; - std::string save_img_path = "../../../logs/test_lite_pipnet29_ncnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_pipnet29_ncnn.jpg"; lite::ncnn::cv::face::align::PIPNet29 *pipnet29 = new lite::ncnn::cv::face::align::PIPNet29(param_path, bin_path); @@ -105,10 +105,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/pipnet_resnet18_10x29x32x256_cofw.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/pipnet_resnet18_10x29x32x256_cofw.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/pipnet_resnet18_10x29x32x256_cofw.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/pipnet_resnet18_10x29x32x256_cofw.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_align.png"; - std::string save_img_path = "../../../logs/test_lite_pipnet29_tnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_pipnet29_tnn.jpg"; lite::tnn::cv::face::align::PIPNet29 *pipnet29 = new lite::tnn::cv::face::align::PIPNet29(proto_path, model_path); diff --git a/examples/lite/cv/test_lite_pipnet68.cpp b/examples/lite/cv/test_lite_pipnet68.cpp index bec8d9ed..ef48fd0c 100644 --- a/examples/lite/cv/test_lite_pipnet68.cpp +++ b/examples/lite/cv/test_lite_pipnet68.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/pipnet_resnet18_10x68x32x256_300w.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/pipnet_resnet18_10x68x32x256_300w.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_align.png"; - std::string save_img_path = "../../../logs/test_lite_pipnet68.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_pipnet68.jpg"; lite::cv::face::align::PIPNet68 *pipnet68 = new lite::cv::face::align::PIPNet68(onnx_path); @@ -29,9 +29,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/pipnet_resnet18_10x68x32x256_300w.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/pipnet_resnet18_10x68x32x256_300w.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_align.png"; - std::string save_img_path = "../../../logs/test_lite_pipnet68_onnx.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_pipnet68_onnx.jpg"; lite::onnxruntime::cv::face::align::PIPNet68 *pipnet68 = new lite::onnxruntime::cv::face::align::PIPNet68(onnx_path); @@ -54,9 +54,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/pipnet_resnet18_10x68x32x256_300w.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/pipnet_resnet18_10x68x32x256_300w.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_align.png"; - std::string save_img_path = "../../../logs/test_lite_pipnet68_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_pipnet68_mnn.jpg"; lite::mnn::cv::face::align::PIPNet68 *pipnet68 = new lite::mnn::cv::face::align::PIPNet68(mnn_path); @@ -79,10 +79,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/pipnet_resnet18_10x68x32x256_300w.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/pipnet_resnet18_10x68x32x256_300w.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/pipnet_resnet18_10x68x32x256_300w.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/pipnet_resnet18_10x68x32x256_300w.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_align.png"; - std::string save_img_path = "../../../logs/test_lite_pipnet68_ncnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_pipnet68_ncnn.jpg"; lite::ncnn::cv::face::align::PIPNet68 *pipnet68 = new lite::ncnn::cv::face::align::PIPNet68(param_path, bin_path); @@ -105,10 +105,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/pipnet_resnet18_10x68x32x256_300w.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/pipnet_resnet18_10x68x32x256_300w.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/pipnet_resnet18_10x68x32x256_300w.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/pipnet_resnet18_10x68x32x256_300w.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_align.png"; - std::string save_img_path = "../../../logs/test_lite_pipnet68_tnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_pipnet68_tnn.jpg"; lite::tnn::cv::face::align::PIPNet68 *pipnet68 = new lite::tnn::cv::face::align::PIPNet68(proto_path, model_path); diff --git a/examples/lite/cv/test_lite_pipnet98.cpp b/examples/lite/cv/test_lite_pipnet98.cpp index d5c85e42..90b1ca68 100644 --- a/examples/lite/cv/test_lite_pipnet98.cpp +++ b/examples/lite/cv/test_lite_pipnet98.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/pipnet_resnet18_10x98x32x256_wflw.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/pipnet_resnet18_10x98x32x256_wflw.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_align.png"; - std::string save_img_path = "../../../logs/test_lite_pipnet98.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_pipnet98.jpg"; lite::cv::face::align::PIPNet98 *pipnet98 = new lite::cv::face::align::PIPNet98(onnx_path); @@ -29,9 +29,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/pipnet_resnet18_10x98x32x256_wflw.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/pipnet_resnet18_10x98x32x256_wflw.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_align.png"; - std::string save_img_path = "../../../logs/test_lite_pipnet98_onnx.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_pipnet98_onnx.jpg"; lite::onnxruntime::cv::face::align::PIPNet98 *pipnet98 = new lite::onnxruntime::cv::face::align::PIPNet98(onnx_path); @@ -54,9 +54,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/pipnet_resnet18_10x98x32x256_wflw.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/pipnet_resnet18_10x98x32x256_wflw.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_align.png"; - std::string save_img_path = "../../../logs/test_lite_pipnet98_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_pipnet98_mnn.jpg"; lite::mnn::cv::face::align::PIPNet98 *pipnet98 = new lite::mnn::cv::face::align::PIPNet98(mnn_path); @@ -79,10 +79,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/pipnet_resnet18_10x98x32x256_wflw.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/pipnet_resnet18_10x98x32x256_wflw.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/pipnet_resnet18_10x98x32x256_wflw.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/pipnet_resnet18_10x98x32x256_wflw.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_align.png"; - std::string save_img_path = "../../../logs/test_lite_pipnet98_ncnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_pipnet98_ncnn.jpg"; lite::ncnn::cv::face::align::PIPNet98 *pipnet98 = new lite::ncnn::cv::face::align::PIPNet98(param_path, bin_path); @@ -105,10 +105,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/pipnet_resnet18_10x98x32x256_wflw.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/pipnet_resnet18_10x98x32x256_wflw.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/pipnet_resnet18_10x98x32x256_wflw.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/pipnet_resnet18_10x98x32x256_wflw.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_align.png"; - std::string save_img_path = "../../../logs/test_lite_pipnet98_tnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_pipnet98_tnn.jpg"; lite::tnn::cv::face::align::PIPNet98 *pipnet98 = new lite::tnn::cv::face::align::PIPNet98(proto_path, model_path); diff --git a/examples/lite/cv/test_lite_plantid.cpp b/examples/lite/cv/test_lite_plantid.cpp index 1fa01879..09fd4cff 100644 --- a/examples/lite/cv/test_lite_plantid.cpp +++ b/examples/lite/cv/test_lite_plantid.cpp @@ -6,7 +6,7 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/quarrying_plantid_model.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/quarrying_plantid_model.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_plantid.jpg"; lite::cv::classification::PlantID *plantid = new lite::cv::classification::PlantID(onnx_path); @@ -37,7 +37,7 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/quarrying_plantid_model.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/quarrying_plantid_model.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_plantid.jpg"; lite::onnxruntime::cv::classification::PlantID *plantid = @@ -69,7 +69,7 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/quarrying_plantid_model.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/quarrying_plantid_model.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_plantid.jpg"; lite::mnn::cv::classification::PlantID *plantid = @@ -101,8 +101,8 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/quarrying_plantid_model.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/quarrying_plantid_model.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/quarrying_plantid_model.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/quarrying_plantid_model.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_plantid.jpg"; lite::ncnn::cv::classification::PlantID *plantid = @@ -134,8 +134,8 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/quarrying_plantid_model.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/quarrying_plantid_model.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/quarrying_plantid_model.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/quarrying_plantid_model.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_plantid.jpg"; lite::tnn::cv::classification::PlantID *plantid = diff --git a/examples/lite/cv/test_lite_portrait_seg_extremec3net.cpp b/examples/lite/cv/test_lite_portrait_seg_extremec3net.cpp index a88acfa6..18acc8b1 100644 --- a/examples/lite/cv/test_lite_portrait_seg_extremec3net.cpp +++ b/examples/lite/cv/test_lite_portrait_seg_extremec3net.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/ext_portrait_seg_ExtremeC3_224x224.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/ext_portrait_seg_ExtremeC3_224x224.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_head_seg.png"; - std::string save_img_path = "../../../logs/test_lite_portrait_seg_extremec3net.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_portrait_seg_extremec3net.jpg"; lite::cv::segmentation::PortraitSegExtremeC3Net *portrait_seg_extremec3net = new lite::cv::segmentation::PortraitSegExtremeC3Net(onnx_path, 4); // 4 threads @@ -29,9 +29,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/ext_portrait_seg_ExtremeC3_224x224.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/ext_portrait_seg_ExtremeC3_224x224.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_head_seg_1.png"; - std::string save_img_path = "../../../logs/test_lite_portrait_seg_extremec3net_1_onnx.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_portrait_seg_extremec3net_1_onnx.jpg"; lite::onnxruntime::cv::segmentation::PortraitSegExtremeC3Net *portrait_seg_extremec3net = new lite::onnxruntime::cv::segmentation::PortraitSegExtremeC3Net(onnx_path, 4); // 4 threads @@ -53,9 +53,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/ext_portrait_seg_ExtremeC3_224x224.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/ext_portrait_seg_ExtremeC3_224x224.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_head_seg_1.png"; - std::string save_img_path = "../../../logs/test_lite_portrait_seg_extremec3net_1_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_portrait_seg_extremec3net_1_mnn.jpg"; lite::mnn::cv::segmentation::PortraitSegExtremeC3Net *portrait_seg_extremec3net = new lite::mnn::cv::segmentation::PortraitSegExtremeC3Net(mnn_path, 4); // 4 threads diff --git a/examples/lite/cv/test_lite_portrait_seg_sinet.cpp b/examples/lite/cv/test_lite_portrait_seg_sinet.cpp index a42cc3d6..c65243c6 100644 --- a/examples/lite/cv/test_lite_portrait_seg_sinet.cpp +++ b/examples/lite/cv/test_lite_portrait_seg_sinet.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/ext_portrait_seg_SINet_224x224.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/ext_portrait_seg_SINet_224x224.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_head_seg.png"; - std::string save_img_path = "../../../logs/test_lite_portrait_seg_sinet.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_portrait_seg_sinet.jpg"; lite::cv::segmentation::PortraitSegSINet *portrait_seg_sinet = new lite::cv::segmentation::PortraitSegSINet(onnx_path, 4); // 4 threads @@ -29,9 +29,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/ext_portrait_seg_SINet_224x224.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/ext_portrait_seg_SINet_224x224.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_head_seg_1.png"; - std::string save_img_path = "../../../logs/test_lite_portrait_seg_sinet_1_onnx.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_portrait_seg_sinet_1_onnx.jpg"; lite::onnxruntime::cv::segmentation::PortraitSegSINet *portrait_seg_sinet = new lite::onnxruntime::cv::segmentation::PortraitSegSINet(onnx_path, 4); // 4 threads @@ -53,9 +53,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/ext_portrait_seg_SINet_224x224.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/ext_portrait_seg_SINet_224x224.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_head_seg_1.png"; - std::string save_img_path = "../../../logs/test_lite_portrait_seg_sinet_1_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_portrait_seg_sinet_1_mnn.jpg"; lite::mnn::cv::segmentation::PortraitSegSINet *portrait_seg_sinet = new lite::mnn::cv::segmentation::PortraitSegSINet(mnn_path, 4); // 4 threads diff --git a/examples/lite/cv/test_lite_pose_robust_face.cpp b/examples/lite/cv/test_lite_pose_robust_face.cpp index 6c4ac0ac..6f6a455d 100644 --- a/examples/lite/cv/test_lite_pose_robust_face.cpp +++ b/examples/lite/cv/test_lite_pose_robust_face.cpp @@ -6,8 +6,8 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/dream_ijba_res18_end2end.onnx"; - std::string pose_onnx_path = "../../../hub/onnx/cv/fsanet-var.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/dream_ijba_res18_end2end.onnx"; + std::string pose_onnx_path = "../../../examples/hub/onnx/cv/fsanet-var.onnx"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_1.png"; @@ -52,8 +52,8 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/dream_ijba_res18_end2end.onnx"; - std::string pose_onnx_path = "../../../hub/onnx/cv/fsanet-var.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/dream_ijba_res18_end2end.onnx"; + std::string pose_onnx_path = "../../../examples/hub/onnx/cv/fsanet-var.onnx"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; diff --git a/examples/lite/cv/test_lite_resnet.cpp b/examples/lite/cv/test_lite_resnet.cpp index b7016ddc..4d6097c4 100644 --- a/examples/lite/cv/test_lite_resnet.cpp +++ b/examples/lite/cv/test_lite_resnet.cpp @@ -6,7 +6,7 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/resnet18.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/resnet18.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_resnet.jpg"; lite::cv::classification::ResNet *resnet = new lite::cv::classification::ResNet(onnx_path); @@ -36,7 +36,7 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/resnet18.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/resnet18.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_resnet.jpg"; lite::onnxruntime::cv::classification::ResNet *resnet = @@ -68,7 +68,7 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/resnet18.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/resnet18.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_resnet.jpg"; lite::mnn::cv::classification::ResNet *resnet = @@ -100,8 +100,8 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/resnet18.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/resnet18.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/resnet18.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/resnet18.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_resnet.jpg"; lite::ncnn::cv::classification::ResNet *resnet = @@ -133,8 +133,8 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/resnet18.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/resnet18.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/resnet18.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/resnet18.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_resnet.jpg"; lite::tnn::cv::classification::ResNet *resnet = diff --git a/examples/lite/cv/test_lite_resnext.cpp b/examples/lite/cv/test_lite_resnext.cpp index deb14009..47f82aee 100644 --- a/examples/lite/cv/test_lite_resnext.cpp +++ b/examples/lite/cv/test_lite_resnext.cpp @@ -6,7 +6,7 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/resnext50.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/resnext50.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_resnext.jpg"; lite::cv::classification::ResNeXt *resnext = new lite::cv::classification::ResNeXt(onnx_path); @@ -36,7 +36,7 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/resnext50.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/resnext50.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_resnext.jpg"; lite::onnxruntime::cv::classification::ResNeXt *resnext = @@ -68,7 +68,7 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/resnext50.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/resnext50.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_resnext.jpg"; lite::mnn::cv::classification::ResNeXt *resnext = @@ -100,8 +100,8 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/resnext50.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/resnext50.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/resnext50.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/resnext50.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_resnext.jpg"; lite::ncnn::cv::classification::ResNeXt *resnext = @@ -133,8 +133,8 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/resnext50.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/resnext50.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/resnext50.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/resnext50.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_resnext.jpg"; lite::tnn::cv::classification::ResNeXt *resnext = diff --git a/examples/lite/cv/test_lite_retinaface.cpp b/examples/lite/cv/test_lite_retinaface.cpp index 33574566..ca04d046 100644 --- a/examples/lite/cv/test_lite_retinaface.cpp +++ b/examples/lite/cv/test_lite_retinaface.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/Pytorch_RetinaFace_resnet50.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/Pytorch_RetinaFace_resnet50.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_detector.jpg"; - std::string save_img_path = "../../../logs/test_lite_retinaface.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_retinaface.jpg"; lite::cv::face::detect::RetinaFace *retinaface = new lite::cv::face::detect::RetinaFace(onnx_path); @@ -28,9 +28,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/Pytorch_RetinaFace_resnet50.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/Pytorch_RetinaFace_resnet50.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_detector_2.jpg"; - std::string save_img_path = "../../../logs/test_retinaface_onnx_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_retinaface_onnx_2.jpg"; lite::onnxruntime::cv::face::detect::RetinaFace *retinaface = new lite::onnxruntime::cv::face::detect::RetinaFace(onnx_path); @@ -52,9 +52,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/Pytorch_RetinaFace_resnet50.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/Pytorch_RetinaFace_resnet50.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_detector_2.jpg"; - std::string save_img_path = "../../../logs/test_retinaface_mnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_retinaface_mnn_2.jpg"; lite::mnn::cv::face::detect::RetinaFace *retinaface = new lite::mnn::cv::face::detect::RetinaFace(mnn_path); @@ -76,10 +76,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/Pytorch_RetinaFace_mobile0.25-640-640.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/Pytorch_RetinaFace_mobile0.25-640-640.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/Pytorch_RetinaFace_mobile0.25-640-640.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/Pytorch_RetinaFace_mobile0.25-640-640.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_detector_2.jpg"; - std::string save_img_path = "../../../logs/test_retinaface_ncnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_retinaface_ncnn_2.jpg"; lite::ncnn::cv::face::detect::RetinaFace *retinaface = new lite::ncnn::cv::face::detect::RetinaFace(param_path, bin_path, 1, 640, 640); @@ -101,10 +101,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/Pytorch_RetinaFace_mobile0.25-640-640.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/Pytorch_RetinaFace_mobile0.25-640-640.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/Pytorch_RetinaFace_mobile0.25-640-640.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/Pytorch_RetinaFace_mobile0.25-640-640.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_detector_2.jpg"; - std::string save_img_path = "../../../logs/test_retinaface_tnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_retinaface_tnn_2.jpg"; lite::tnn::cv::face::detect::RetinaFace *retinaface = new lite::tnn::cv::face::detect::RetinaFace(proto_path, model_path); diff --git a/examples/lite/cv/test_lite_rexnet_emotion7.cpp b/examples/lite/cv/test_lite_rexnet_emotion7.cpp index 1d805e8c..a00aa94f 100644 --- a/examples/lite/cv/test_lite_rexnet_emotion7.cpp +++ b/examples/lite/cv/test_lite_rexnet_emotion7.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/face-emotion-recognition-affectnet_7_vggface2_rexnet150.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/face-emotion-recognition-affectnet_7_vggface2_rexnet150.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_emotion.jpg"; - std::string save_img_path = "../../../logs/test_lite_emotion.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_emotion.jpg"; lite::cv::face::attr::ReXNetEmotion7 *rexnet_emotion7 = new lite::cv::face::attr::ReXNetEmotion7(onnx_path); @@ -31,9 +31,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/face-emotion-recognition-affectnet_7_vggface2_rexnet150.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/face-emotion-recognition-affectnet_7_vggface2_rexnet150.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_emotion.jpg"; - std::string save_img_path = "../../../logs/test_lite_emotion.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_emotion.jpg"; lite::onnxruntime::cv::face::attr::ReXNetEmotion7 *rexnet_emotion7 = new lite::onnxruntime::cv::face::attr::ReXNetEmotion7(onnx_path); @@ -56,9 +56,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/face-emotion-recognition-affectnet_7_vggface2_rexnet150.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/face-emotion-recognition-affectnet_7_vggface2_rexnet150.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_emotion.jpg"; - std::string save_img_path = "../../../logs/test_lite_emotion_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_emotion_mnn.jpg"; lite::mnn::cv::face::attr::ReXNetEmotion7 *rexnet_emotion7 = new lite::mnn::cv::face::attr::ReXNetEmotion7(mnn_path); @@ -87,10 +87,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/face-emotion-recognition-affectnet_7_vggface2_rexnet150.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/face-emotion-recognition-affectnet_7_vggface2_rexnet150.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/face-emotion-recognition-affectnet_7_vggface2_rexnet150.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/face-emotion-recognition-affectnet_7_vggface2_rexnet150.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_emotion.jpg"; - std::string save_img_path = "../../../logs/test_lite_emotion_tnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_emotion_tnn.jpg"; lite::tnn::cv::face::attr::ReXNetEmotion7 *rexnet_emotion7 = new lite::tnn::cv::face::attr::ReXNetEmotion7(proto_path, model_path); diff --git a/examples/lite/cv/test_lite_rvm.cpp b/examples/lite/cv/test_lite_rvm.cpp index 6df46e09..76384c86 100644 --- a/examples/lite/cv/test_lite_rvm.cpp +++ b/examples/lite/cv/test_lite_rvm.cpp @@ -6,10 +6,10 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/rvm_mobilenetv3_fp32.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/rvm_mobilenetv3_fp32.onnx"; std::string video_path = "../../../examples/lite/resources/test_lite_rvm_0.mp4"; std::string background_path = "../../../examples/lite/resources/test_lite_matting_bgr.jpg"; - std::string output_path = "../../../logs/test_lite_rvm_0.mp4"; + std::string output_path = "../../../examples/logs/test_lite_rvm_0.mp4"; cv::Mat background = cv::imread(background_path); auto *rvm = new lite::cv::matting::RobustVideoMatting(onnx_path, 16); // 16 threads @@ -25,10 +25,10 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/rvm_mobilenetv3_fp32.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/rvm_mobilenetv3_fp32.onnx"; std::string video_path = "../../../examples/lite/resources/test_lite_rvm_0.mp4"; std::string background_path = "../../../examples/lite/resources/test_lite_matting_bgr.jpg"; - std::string output_path = "../../../logs/test_lite_rvm_0.mp4"; + std::string output_path = "../../../examples/logs/test_lite_rvm_0.mp4"; cv::Mat background = cv::imread(background_path); auto *rvm = new lite::onnxruntime::cv::matting::RobustVideoMatting(onnx_path, 16); // 16 threads @@ -45,10 +45,10 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/rvm_mobilenetv3_fp32-1080-1920.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/rvm_mobilenetv3_fp32-1080-1920.mnn"; std::string video_path = "../../../examples/lite/resources/test_lite_rvm_0.mp4"; std::string background_path = "../../../examples/lite/resources/test_lite_matting_bgr.jpg"; - std::string output_path = "../../../logs/test_lite_rvm_0_mnn.mp4"; + std::string output_path = "../../../examples/logs/test_lite_rvm_0_mnn.mp4"; cv::Mat background = cv::imread(background_path); auto *rvm = new lite::mnn::cv::matting::RobustVideoMatting(mnn_path, 16, 0); // 16 threads @@ -73,11 +73,11 @@ static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/rvm_mobilenetv3_fp32-480-480-sim.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/rvm_mobilenetv3_fp32-480-480-sim.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/rvm_mobilenetv3_fp32-480-480-sim.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/rvm_mobilenetv3_fp32-480-480-sim.opt.tnnmodel"; std::string video_path = "../../../examples/lite/resources/test_lite_rvm_0.mp4"; std::string background_path = "../../../examples/lite/resources/test_lite_matting_bgr.jpg"; - std::string output_path = "../../../logs/test_lite_rvm_0_tnn.mp4"; + std::string output_path = "../../../examples/logs/test_lite_rvm_0_tnn.mp4"; cv::Mat background = cv::imread(background_path); auto *rvm = new lite::tnn::cv::matting::RobustVideoMatting( diff --git a/examples/lite/cv/test_lite_scaled_yolov4.cpp b/examples/lite/cv/test_lite_scaled_yolov4.cpp index 1349ae89..d408cc2e 100644 --- a/examples/lite/cv/test_lite_scaled_yolov4.cpp +++ b/examples/lite/cv/test_lite_scaled_yolov4.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/ScaledYoloV4_yolov4-p5-640-640.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/ScaledYoloV4_yolov4-p5-640-640.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_1.jpg"; - std::string save_img_path = "../../../logs/test_lite_scaled_yolov4_1.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_scaled_yolov4_1.jpg"; // 1. Test Default Engine ONNXRuntime lite::cv::detection::ScaledYoloV4 *scaled_yolov4 = new lite::cv::detection::ScaledYoloV4(onnx_path, 8); // default @@ -30,9 +30,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/ScaledYoloV4_yolov4-p5-640-640.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/ScaledYoloV4_yolov4-p5-640-640.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_scaled_yolov4_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_scaled_yolov4_2.jpg"; // 2. Test Specific Engine ONNXRuntime lite::onnxruntime::cv::detection::ScaledYoloV4 *scaled_yolov4 = diff --git a/examples/lite/cv/test_lite_scrfd.cpp b/examples/lite/cv/test_lite_scrfd.cpp index 8432c874..103849cd 100644 --- a/examples/lite/cv/test_lite_scrfd.cpp +++ b/examples/lite/cv/test_lite_scrfd.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/scrfd_2.5g_bnkps_shape640x640.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/scrfd_2.5g_bnkps_shape640x640.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_detector.jpg"; - std::string save_img_path = "../../../logs/test_lite_scrfd.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_scrfd.jpg"; lite::cv::face::detect::SCRFD *scrfd = new lite::cv::face::detect::SCRFD(onnx_path); @@ -28,9 +28,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/scrfd_2.5g_bnkps_shape640x640.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/scrfd_2.5g_bnkps_shape640x640.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_detector_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_scrfd_onnx_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_scrfd_onnx_2.jpg"; lite::onnxruntime::cv::face::detect::SCRFD *scrfd = new lite::onnxruntime::cv::face::detect::SCRFD(onnx_path); @@ -52,9 +52,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/scrfd_2.5g_bnkps_shape640x640.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/scrfd_2.5g_bnkps_shape640x640.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_detector_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_scrfd_mnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_scrfd_mnn_2.jpg"; lite::mnn::cv::face::detect::SCRFD *scrfd = new lite::mnn::cv::face::detect::SCRFD(mnn_path); @@ -76,10 +76,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/scrfd_2.5g_bnkps_shape640x640.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/scrfd_2.5g_bnkps_shape640x640.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/scrfd_2.5g_bnkps_shape640x640.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/scrfd_2.5g_bnkps_shape640x640.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_detector_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_scrfd_ncnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_scrfd_ncnn_2.jpg"; lite::ncnn::cv::face::detect::SCRFD *scrfd = new lite::ncnn::cv::face::detect::SCRFD(param_path, bin_path, 1, 640, 640); @@ -101,10 +101,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/scrfd_2.5g_bnkps_shape640x640.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/scrfd_2.5g_bnkps_shape640x640.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/scrfd_2.5g_bnkps_shape640x640.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/scrfd_2.5g_bnkps_shape640x640.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_detector_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_scrfd_tnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_scrfd_tnn_2.jpg"; lite::tnn::cv::face::detect::SCRFD *scrfd = new lite::tnn::cv::face::detect::SCRFD(proto_path, model_path); diff --git a/examples/lite/cv/test_lite_shufflenetv2.cpp b/examples/lite/cv/test_lite_shufflenetv2.cpp index 1c97f849..1e5b6f5e 100644 --- a/examples/lite/cv/test_lite_shufflenetv2.cpp +++ b/examples/lite/cv/test_lite_shufflenetv2.cpp @@ -6,7 +6,7 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/shufflenet-v2-10.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/shufflenet-v2-10.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_shufflenetv2.jpg"; lite::cv::classification::ShuffleNetV2 *shufflenetv2 = @@ -37,7 +37,7 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/shufflenet-v2-10.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/shufflenet-v2-10.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_shufflenetv2.jpg"; lite::onnxruntime::cv::classification::ShuffleNetV2 *shufflenetv2 = @@ -69,7 +69,7 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/shufflenet-v2-10.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/shufflenet-v2-10.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_shufflenetv2.jpg"; lite::mnn::cv::classification::ShuffleNetV2 *shufflenetv2 = @@ -101,8 +101,8 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/shufflenet-v2-10.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/shufflenet-v2-10.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/shufflenet-v2-10.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/shufflenet-v2-10.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_shufflenetv2.jpg"; lite::ncnn::cv::classification::ShuffleNetV2 *shufflenetv2 = @@ -134,8 +134,8 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/shufflenet-v2-10.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/shufflenet-v2-10.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/shufflenet-v2-10.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/shufflenet-v2-10.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_shufflenetv2.jpg"; lite::tnn::cv::classification::ShuffleNetV2 *shufflenetv2 = diff --git a/examples/lite/cv/test_lite_sphere_face.cpp b/examples/lite/cv/test_lite_sphere_face.cpp index 3da9a219..fc12c3aa 100644 --- a/examples/lite/cv/test_lite_sphere_face.cpp +++ b/examples/lite/cv/test_lite_sphere_face.cpp @@ -6,7 +6,7 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/sphere20a_20171020.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/sphere20a_20171020.onnx"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_1.png"; @@ -31,7 +31,7 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/sphere20a_20171020.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/sphere20a_20171020.onnx"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; @@ -58,7 +58,7 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/sphere20a_20171020.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/sphere20a_20171020.mnn"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; @@ -85,8 +85,8 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/sphere20a_20171020.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/sphere20a_20171020.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/sphere20a_20171020.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/sphere20a_20171020.opt.bin"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; @@ -113,8 +113,8 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/sphere20a_20171020.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/sphere20a_20171020.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/sphere20a_20171020.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/sphere20a_20171020.opt.tnnmodel"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; diff --git a/examples/lite/cv/test_lite_ssd.cpp b/examples/lite/cv/test_lite_ssd.cpp index 7f3a6540..4c0d62c5 100644 --- a/examples/lite/cv/test_lite_ssd.cpp +++ b/examples/lite/cv/test_lite_ssd.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/ssd-10.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/ssd-10.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_ssd.jpg"; - std::string save_img_path = "../../../logs/test_lite_ssd.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_ssd.jpg"; lite::cv::detection::SSD *ssd = new lite::cv::detection::SSD(onnx_path); @@ -28,9 +28,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/ssd-10.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/ssd-10.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_ssd.jpg"; - std::string save_img_path = "../../../logs/test_onnx_ssd.jpg"; + std::string save_img_path = "../../../examples/logs/test_onnx_ssd.jpg"; lite::onnxruntime::cv::detection::SSD *ssd = new lite::onnxruntime::cv::detection::SSD(onnx_path); diff --git a/examples/lite/cv/test_lite_ssd_mobilenetv1.cpp b/examples/lite/cv/test_lite_ssd_mobilenetv1.cpp index d3a8ff5f..4def9f5b 100644 --- a/examples/lite/cv/test_lite_ssd_mobilenetv1.cpp +++ b/examples/lite/cv/test_lite_ssd_mobilenetv1.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/ssd_mobilenet_v1_10.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/ssd_mobilenet_v1_10.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_ssd_mobilenetv1.png"; - std::string save_img_path = "../../../logs/test_lite_ssd_mobilenetv1.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_ssd_mobilenetv1.jpg"; lite::cv::detection::SSDMobileNetV1 *ssd_mobilenetv1 = new lite::cv::detection::SSDMobileNetV1(onnx_path); @@ -29,9 +29,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/ssd_mobilenet_v1_10.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/ssd_mobilenet_v1_10.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_ssd_mobilenetv1.png"; - std::string save_img_path = "../../../logs/test_onnx_ssd_mobilenetv1.jpg"; + std::string save_img_path = "../../../examples/logs/test_onnx_ssd_mobilenetv1.jpg"; lite::onnxruntime::cv::detection::SSDMobileNetV1 *ssd_mobilenetv1 = new lite::onnxruntime::cv::detection::SSDMobileNetV1(onnx_path); diff --git a/examples/lite/cv/test_lite_ssrnet.cpp b/examples/lite/cv/test_lite_ssrnet.cpp index 9277f117..2c69eb43 100644 --- a/examples/lite/cv/test_lite_ssrnet.cpp +++ b/examples/lite/cv/test_lite_ssrnet.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/ssrnet.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/ssrnet.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_ssrnet.jpg"; - std::string save_img_path = "../../../logs/test_lite_ssrnet.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_ssrnet.jpg"; lite::cv::face::attr::SSRNet *ssrnet = new lite::cv::face::attr::SSRNet(onnx_path); @@ -28,9 +28,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/ssrnet.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/ssrnet.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_ssrnet.jpg"; - std::string save_img_path = "../../../logs/test_onnx_ssrnet.jpg"; + std::string save_img_path = "../../../examples/logs/test_onnx_ssrnet.jpg"; lite::onnxruntime::cv::face::attr::SSRNet *ssrnet = new lite::onnxruntime::cv::face::attr::SSRNet(onnx_path); @@ -52,9 +52,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/ssrnet.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/ssrnet.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_ssrnet.jpg"; - std::string save_img_path = "../../../logs/test_ssrnet_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_ssrnet_mnn.jpg"; lite::mnn::cv::face::attr::SSRNet *ssrnet = new lite::mnn::cv::face::attr::SSRNet(mnn_path); @@ -82,10 +82,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/ssrnet.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/ssrnet.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/ssrnet.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/ssrnet.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_ssrnet.jpg"; - std::string save_img_path = "../../../logs/test_ssrnet_tnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_ssrnet_tnn.jpg"; lite::tnn::cv::face::attr::SSRNet *ssrnet = new lite::tnn::cv::face::attr::SSRNet(proto_path, model_path); diff --git a/examples/lite/cv/test_lite_subpixel_cnn.cpp b/examples/lite/cv/test_lite_subpixel_cnn.cpp index 67b30a06..1f3d658f 100644 --- a/examples/lite/cv/test_lite_subpixel_cnn.cpp +++ b/examples/lite/cv/test_lite_subpixel_cnn.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/subpixel-cnn.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/subpixel-cnn.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_subpixel_cnn.jpg"; - std::string save_img_path = "../../../logs/test_lite_subpixel_cnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_subpixel_cnn.jpg"; lite::cv::resolution::SubPixelCNN *subpixel_cnn = new lite::cv::resolution::SubPixelCNN(onnx_path); @@ -26,9 +26,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/subpixel-cnn.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/subpixel-cnn.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_subpixel_cnn.jpg"; - std::string save_img_path = "../../../logs/test_onnx_subpixel_cnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_onnx_subpixel_cnn.jpg"; lite::onnxruntime::cv::resolution::SubPixelCNN *subpixel_cnn = new lite::onnxruntime::cv::resolution::SubPixelCNN(onnx_path); @@ -48,9 +48,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/subpixel-cnn.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/subpixel-cnn.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_subpixel_cnn.jpg"; - std::string save_img_path = "../../../logs/test_subpixel_cnn_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_subpixel_cnn_mnn.jpg"; lite::mnn::cv::resolution::SubPixelCNN *subpixel_cnn = new lite::mnn::cv::resolution::SubPixelCNN(mnn_path); @@ -70,10 +70,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/subpixel-cnn.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/subpixel-cnn.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/subpixel-cnn.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/subpixel-cnn.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_subpixel_cnn.jpg"; - std::string save_img_path = "../../../logs/test_subpixel_cnn_ncnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_subpixel_cnn_ncnn.jpg"; lite::ncnn::cv::resolution::SubPixelCNN *subpixel_cnn = new lite::ncnn::cv::resolution::SubPixelCNN(param_path, bin_path); @@ -93,10 +93,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/subpixel-cnn.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/subpixel-cnn.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/subpixel-cnn.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/subpixel-cnn.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_subpixel_cnn.jpg"; - std::string save_img_path = "../../../logs/test_subpixel_cnn_tnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_subpixel_cnn_tnn.jpg"; lite::tnn::cv::resolution::SubPixelCNN *subpixel_cnn = new lite::tnn::cv::resolution::SubPixelCNN(proto_path, model_path); diff --git a/examples/lite/cv/test_lite_tencent_cifp_face.cpp b/examples/lite/cv/test_lite_tencent_cifp_face.cpp index 154742a6..0ad9eb3e 100644 --- a/examples/lite/cv/test_lite_tencent_cifp_face.cpp +++ b/examples/lite/cv/test_lite_tencent_cifp_face.cpp @@ -6,7 +6,7 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/Tencent_Cifp_BUPT_Balancedface_IR_34.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/Tencent_Cifp_BUPT_Balancedface_IR_34.onnx"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_1.png"; @@ -31,7 +31,7 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/Tencent_Cifp_BUPT_Balancedface_IR_34.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/Tencent_Cifp_BUPT_Balancedface_IR_34.onnx"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; @@ -58,7 +58,7 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/Tencent_Cifp_BUPT_Balancedface_IR_34.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/Tencent_Cifp_BUPT_Balancedface_IR_34.mnn"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; @@ -85,8 +85,8 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/Tencent_Cifp_BUPT_Balancedface_IR_34.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/Tencent_Cifp_BUPT_Balancedface_IR_34.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/Tencent_Cifp_BUPT_Balancedface_IR_34.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/Tencent_Cifp_BUPT_Balancedface_IR_34.opt.bin"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; @@ -113,8 +113,8 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/Tencent_Cifp_BUPT_Balancedface_IR_34.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/Tencent_Cifp_BUPT_Balancedface_IR_34.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/Tencent_Cifp_BUPT_Balancedface_IR_34.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/Tencent_Cifp_BUPT_Balancedface_IR_34.opt.tnnmodel"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; diff --git a/examples/lite/cv/test_lite_tencent_curricular_face.cpp b/examples/lite/cv/test_lite_tencent_curricular_face.cpp index ef42a527..a9567e81 100644 --- a/examples/lite/cv/test_lite_tencent_curricular_face.cpp +++ b/examples/lite/cv/test_lite_tencent_curricular_face.cpp @@ -6,7 +6,7 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/Tencent_CurricularFace_Backbone.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/Tencent_CurricularFace_Backbone.onnx"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_1.png"; @@ -32,7 +32,7 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/Tencent_CurricularFace_Backbone.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/Tencent_CurricularFace_Backbone.onnx"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; @@ -59,7 +59,7 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/Tencent_CurricularFace_Backbone.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/Tencent_CurricularFace_Backbone.mnn"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; @@ -86,8 +86,8 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/Tencent_CurricularFace_Backbone.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/Tencent_CurricularFace_Backbone.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/Tencent_CurricularFace_Backbone.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/Tencent_CurricularFace_Backbone.opt.bin"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; @@ -114,8 +114,8 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/Tencent_CurricularFace_Backbone.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/Tencent_CurricularFace_Backbone.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/Tencent_CurricularFace_Backbone.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/Tencent_CurricularFace_Backbone.opt.tnnmodel"; std::string test_img_path0 = "../../../examples/lite/resources/test_lite_faceid_0.png"; std::string test_img_path1 = "../../../examples/lite/resources/test_lite_faceid_2.png"; diff --git a/examples/lite/cv/test_lite_tiny_yolov3.cpp b/examples/lite/cv/test_lite_tiny_yolov3.cpp index 8d8023bb..4747a135 100644 --- a/examples/lite/cv/test_lite_tiny_yolov3.cpp +++ b/examples/lite/cv/test_lite_tiny_yolov3.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/tiny-yolov3-11.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/tiny-yolov3-11.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_tiny_yolov3.jpg"; - std::string save_img_path = "../../../logs/test_lite_tiny_yolov3.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_tiny_yolov3.jpg"; lite::cv::detection::TinyYoloV3 *tiny_yolov3 = new lite::cv::detection::TinyYoloV3(onnx_path); @@ -28,9 +28,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/tiny-yolov3-11.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/tiny-yolov3-11.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_tiny_yolov3.jpg"; - std::string save_img_path = "../../../logs/test_onnx_tiny_yolov3.jpg"; + std::string save_img_path = "../../../examples/logs/test_onnx_tiny_yolov3.jpg"; lite::onnxruntime::cv::detection::TinyYoloV3 *tiny_yolov3 = new lite::onnxruntime::cv::detection::TinyYoloV3(onnx_path); diff --git a/examples/lite/cv/test_lite_tiny_yolov4_coco.cpp b/examples/lite/cv/test_lite_tiny_yolov4_coco.cpp index 973671a0..bbb4304a 100644 --- a/examples/lite/cv/test_lite_tiny_yolov4_coco.cpp +++ b/examples/lite/cv/test_lite_tiny_yolov4_coco.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/yolov4_tiny_weights_coco.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/yolov4_tiny_weights_coco.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_1.jpg"; - std::string save_img_path = "../../../logs/test_lite_tiny_yolov4_coco_1.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_tiny_yolov4_coco_1.jpg"; // 1. Test Default Engine ONNXRuntime lite::cv::detection::TinyYoloV4COCO *tiny_yolov4_coco = new lite::cv::detection::TinyYoloV4COCO(onnx_path); // default @@ -30,9 +30,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/yolov4_tiny_weights_coco.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/yolov4_tiny_weights_coco.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_tiny_yolov4_coco_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_tiny_yolov4_coco_2.jpg"; // 2. Test Specific Engine ONNXRuntime lite::onnxruntime::cv::detection::TinyYoloV4COCO *tiny_yolov4_coco = diff --git a/examples/lite/cv/test_lite_tiny_yolov4_voc.cpp b/examples/lite/cv/test_lite_tiny_yolov4_voc.cpp index 0382e750..bc679dd8 100644 --- a/examples/lite/cv/test_lite_tiny_yolov4_voc.cpp +++ b/examples/lite/cv/test_lite_tiny_yolov4_voc.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/yolov4_tiny_weights_voc.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/yolov4_tiny_weights_voc.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_1.jpg"; - std::string save_img_path = "../../../logs/test_lite_tiny_yolov4_voc_1.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_tiny_yolov4_voc_1.jpg"; // 1. Test Default Engine ONNXRuntime lite::cv::detection::TinyYoloV4VOC *tiny_yolov4_voc = @@ -31,9 +31,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/yolov4_tiny_weights_voc.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/yolov4_tiny_weights_voc.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_tiny_yolov4_voc_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_tiny_yolov4_voc_2.jpg"; // 2. Test Specific Engine ONNXRuntime lite::onnxruntime::cv::detection::TinyYoloV4VOC *tiny_yolov4_voc = diff --git a/examples/lite/cv/test_lite_ultraface.cpp b/examples/lite/cv/test_lite_ultraface.cpp index fc93bfb3..3846959b 100644 --- a/examples/lite/cv/test_lite_ultraface.cpp +++ b/examples/lite/cv/test_lite_ultraface.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/ultraface-rfb-640.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/ultraface-rfb-640.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_ultraface.jpg"; - std::string save_img_path = "../../../logs/test_lite_ultraface.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_ultraface.jpg"; lite::cv::face::detect::UltraFace *ultraface = new lite::cv::face::detect::UltraFace(onnx_path); @@ -28,9 +28,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/ultraface-rfb-640.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/ultraface-rfb-640.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_ultraface.jpg"; - std::string save_img_path = "../../../logs/test_ultraface_onnx.jpg"; + std::string save_img_path = "../../../examples/logs/test_ultraface_onnx.jpg"; lite::onnxruntime::cv::face::detect::UltraFace *ultraface = new lite::onnxruntime::cv::face::detect::UltraFace(onnx_path); @@ -52,9 +52,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/ultraface-rfb-640.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/ultraface-rfb-640.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_ultraface.jpg"; - std::string save_img_path = "../../../logs/test_ultraface_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_ultraface_mnn.jpg"; lite::mnn::cv::face::detect::UltraFace *ultraface = new lite::mnn::cv::face::detect::UltraFace(mnn_path); @@ -76,10 +76,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/ultraface-rfb-320.param"; - std::string bin_path = "../../../hub/ncnn/cv/ultraface-rfb-320.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/ultraface-rfb-320.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/ultraface-rfb-320.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_ultraface.jpg"; - std::string save_img_path = "../../../logs/test_ultraface_ncnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_ultraface_ncnn.jpg"; lite::ncnn::cv::face::detect::UltraFace *ultraface = new lite::ncnn::cv::face::detect::UltraFace(param_path, bin_path, 1, 320, 240); @@ -101,10 +101,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/ultraface-rfb-640.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/ultraface-rfb-640.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/ultraface-rfb-640.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/ultraface-rfb-640.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_ultraface.jpg"; - std::string save_img_path = "../../../logs/test_ultraface_tnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_ultraface_tnn.jpg"; lite::tnn::cv::face::detect::UltraFace *ultraface = new lite::tnn::cv::face::detect::UltraFace(proto_path, model_path); diff --git a/examples/lite/cv/test_lite_vgg16_age.cpp b/examples/lite/cv/test_lite_vgg16_age.cpp index d0554d9b..1cabe87c 100644 --- a/examples/lite/cv/test_lite_vgg16_age.cpp +++ b/examples/lite/cv/test_lite_vgg16_age.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/vgg_ilsvrc_16_age_imdb_wiki.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/vgg_ilsvrc_16_age_imdb_wiki.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_vgg16_age.jpg"; - std::string save_img_path = "../../../logs/test_lite_vgg16_age.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_vgg16_age.jpg"; lite::cv::face::attr::VGG16Age *vgg16_age = new lite::cv::face::attr::VGG16Age(onnx_path); @@ -28,9 +28,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/vgg_ilsvrc_16_age_imdb_wiki.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/vgg_ilsvrc_16_age_imdb_wiki.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_vgg16_age.jpg"; - std::string save_img_path = "../../../logs/test_onnx_vgg16_age.jpg"; + std::string save_img_path = "../../../examples/logs/test_onnx_vgg16_age.jpg"; lite::onnxruntime::cv::face::attr::VGG16Age *vgg16_age = new lite::onnxruntime::cv::face::attr::VGG16Age(onnx_path); diff --git a/examples/lite/cv/test_lite_vgg16_gender.cpp b/examples/lite/cv/test_lite_vgg16_gender.cpp index 51cf2596..b5912b74 100644 --- a/examples/lite/cv/test_lite_vgg16_gender.cpp +++ b/examples/lite/cv/test_lite_vgg16_gender.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/vgg_ilsvrc_16_gender_imdb_wiki.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/vgg_ilsvrc_16_gender_imdb_wiki.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_vgg16_gender.jpg"; - std::string save_img_path = "../../../logs/test_lite_vgg16_gender.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_vgg16_gender.jpg"; lite::cv::face::attr::VGG16Gender *vgg16_gender = new lite::cv::face::attr::VGG16Gender(onnx_path); @@ -29,9 +29,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/vgg_ilsvrc_16_gender_imdb_wiki.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/vgg_ilsvrc_16_gender_imdb_wiki.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_vgg16_gender.jpg"; - std::string save_img_path = "../../../logs/test_onnx_vgg16_gender.jpg"; + std::string save_img_path = "../../../examples/logs/test_onnx_vgg16_gender.jpg"; lite::onnxruntime::cv::face::attr::VGG16Gender *vgg16_gender = new lite::onnxruntime::cv::face::attr::VGG16Gender(onnx_path); diff --git a/examples/lite/cv/test_lite_yolo5face.cpp b/examples/lite/cv/test_lite_yolo5face.cpp index 13a8b20d..282b0f40 100644 --- a/examples/lite/cv/test_lite_yolo5face.cpp +++ b/examples/lite/cv/test_lite_yolo5face.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/yolov5face-s-640x640.onnx"; // yolov5s-face + std::string onnx_path = "../../../examples/hub/onnx/cv/yolov5face-s-640x640.onnx"; // yolov5s-face std::string test_img_path = "../../../examples/lite/resources/test_lite_face_detector.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolov5face.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolov5face.jpg"; lite::cv::face::detect::YOLO5Face *yolov5face = new lite::cv::face::detect::YOLO5Face(onnx_path); @@ -28,9 +28,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/yolov5face-s-640x640.onnx"; // yolov5s-face + std::string onnx_path = "../../../examples/hub/onnx/cv/yolov5face-s-640x640.onnx"; // yolov5s-face std::string test_img_path = "../../../examples/lite/resources/test_lite_face_detector_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolov5face_onnx_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolov5face_onnx_2.jpg"; lite::onnxruntime::cv::face::detect::YOLO5Face *yolov5face = new lite::onnxruntime::cv::face::detect::YOLO5Face(onnx_path); @@ -52,9 +52,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/yolov5face-s-640x640.mnn"; // yolov5s-face + std::string mnn_path = "../../../examples/hub/mnn/cv/yolov5face-s-640x640.mnn"; // yolov5s-face std::string test_img_path = "../../../examples/lite/resources/test_lite_face_detector_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolov5face_mnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolov5face_mnn_2.jpg"; lite::mnn::cv::face::detect::YOLO5Face *yolov5face = new lite::mnn::cv::face::detect::YOLO5Face(mnn_path); @@ -76,10 +76,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/yolov5face-s-640x640.opt.param"; // yolov5s-face - std::string bin_path = "../../../hub/ncnn/cv/yolov5face-s-640x640.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/yolov5face-s-640x640.opt.param"; // yolov5s-face + std::string bin_path = "../../../examples/hub/ncnn/cv/yolov5face-s-640x640.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_detector_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolov5face_ncnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolov5face_ncnn_2.jpg"; lite::ncnn::cv::face::detect::YOLO5Face *yolov5face = new lite::ncnn::cv::face::detect::YOLO5Face(param_path, bin_path, 1, 640, 640); @@ -101,10 +101,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/yolov5face-s-640x640.opt.tnnproto"; // yolov5s-face - std::string model_path = "../../../hub/tnn/cv/yolov5face-s-640x640.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/yolov5face-s-640x640.opt.tnnproto"; // yolov5s-face + std::string model_path = "../../../examples/hub/tnn/cv/yolov5face-s-640x640.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_detector_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolov5face_tnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolov5face_tnn_2.jpg"; lite::tnn::cv::face::detect::YOLO5Face *yolov5face = new lite::tnn::cv::face::detect::YOLO5Face(proto_path, model_path); diff --git a/examples/lite/cv/test_lite_yolop.cpp b/examples/lite/cv/test_lite_yolop.cpp index b94406b4..d13a62fa 100644 --- a/examples/lite/cv/test_lite_yolop.cpp +++ b/examples/lite/cv/test_lite_yolop.cpp @@ -6,12 +6,12 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/yolop-640-640.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/yolop-640-640.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolop.jpg"; - std::string save_det_path = "../../../logs/test_lite_yolop_det.jpg"; - std::string save_da_path = "../../../logs/test_lite_yolop_da.jpg"; - std::string save_ll_path = "../../../logs/test_lite_yolop_ll.jpg"; - std::string save_merge_path = "../../../logs/test_lite_yolop_merge.jpg"; + std::string save_det_path = "../../../examples/logs/test_lite_yolop_det.jpg"; + std::string save_da_path = "../../../examples/logs/test_lite_yolop_da.jpg"; + std::string save_ll_path = "../../../examples/logs/test_lite_yolop_ll.jpg"; + std::string save_merge_path = "../../../examples/logs/test_lite_yolop_merge.jpg"; lite::cv::detection::YOLOP *yolop = new lite::cv::detection::YOLOP(onnx_path, 16); // 16 threads @@ -67,12 +67,12 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/yolop-640-640.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/yolop-640-640.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolop.jpg"; - std::string save_det_path = "../../../logs/test_lite_yolop_det_onnx.jpg"; - std::string save_da_path = "../../../logs/test_lite_yolop_da_onnx.jpg"; - std::string save_ll_path = "../../../logs/test_lite_yolop_ll_onnx.jpg"; - std::string save_merge_path = "../../../logs/test_lite_yolop_merge_onnx.jpg"; + std::string save_det_path = "../../../examples/logs/test_lite_yolop_det_onnx.jpg"; + std::string save_da_path = "../../../examples/logs/test_lite_yolop_da_onnx.jpg"; + std::string save_ll_path = "../../../examples/logs/test_lite_yolop_ll_onnx.jpg"; + std::string save_merge_path = "../../../examples/logs/test_lite_yolop_merge_onnx.jpg"; lite::onnxruntime::cv::detection::YOLOP *yolop = new lite::onnxruntime::cv::detection::YOLOP(onnx_path, 16); // 16 threads @@ -130,12 +130,12 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/yolop-640-640.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/yolop-640-640.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolop.jpg"; - std::string save_det_path = "../../../logs/test_lite_yolop_det_mnn.jpg"; - std::string save_da_path = "../../../logs/test_lite_yolop_da_mnn.jpg"; - std::string save_ll_path = "../../../logs/test_lite_yolop_ll_mnn.jpg"; - std::string save_merge_path = "../../../logs/test_lite_yolop_merge_mnn.jpg"; + std::string save_det_path = "../../../examples/logs/test_lite_yolop_det_mnn.jpg"; + std::string save_da_path = "../../../examples/logs/test_lite_yolop_da_mnn.jpg"; + std::string save_ll_path = "../../../examples/logs/test_lite_yolop_ll_mnn.jpg"; + std::string save_merge_path = "../../../examples/logs/test_lite_yolop_merge_mnn.jpg"; lite::mnn::cv::detection::YOLOP *yolop = new lite::mnn::cv::detection::YOLOP(mnn_path, 16); // 16 threads @@ -193,13 +193,13 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/yolop-640-640.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/yolop-640-640.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/yolop-640-640.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/yolop-640-640.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolop.jpg"; - std::string save_det_path = "../../../logs/test_lite_yolop_det_ncnn.jpg"; - std::string save_da_path = "../../../logs/test_lite_yolop_da_ncnn.jpg"; - std::string save_ll_path = "../../../logs/test_lite_yolop_ll_ncnn.jpg"; - std::string save_merge_path = "../../../logs/test_lite_yolop_merge_ncnn.jpg"; + std::string save_det_path = "../../../examples/logs/test_lite_yolop_det_ncnn.jpg"; + std::string save_da_path = "../../../examples/logs/test_lite_yolop_da_ncnn.jpg"; + std::string save_ll_path = "../../../examples/logs/test_lite_yolop_ll_ncnn.jpg"; + std::string save_merge_path = "../../../examples/logs/test_lite_yolop_merge_ncnn.jpg"; lite::ncnn::cv::detection::YOLOP *yolop = new lite::ncnn::cv::detection::YOLOP(param_path, bin_path, 16); // 16 threads @@ -257,13 +257,13 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/yolop-640-640.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/yolop-640-640.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/yolop-640-640.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/yolop-640-640.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolop.jpg"; - std::string save_det_path = "../../../logs/test_lite_yolop_det_tnn.jpg"; - std::string save_da_path = "../../../logs/test_lite_yolop_da_tnn.jpg"; - std::string save_ll_path = "../../../logs/test_lite_yolop_ll_tnn.jpg"; - std::string save_merge_path = "../../../logs/test_lite_yolop_merge_tnn.jpg"; + std::string save_det_path = "../../../examples/logs/test_lite_yolop_det_tnn.jpg"; + std::string save_da_path = "../../../examples/logs/test_lite_yolop_da_tnn.jpg"; + std::string save_ll_path = "../../../examples/logs/test_lite_yolop_ll_tnn.jpg"; + std::string save_merge_path = "../../../examples/logs/test_lite_yolop_merge_tnn.jpg"; lite::tnn::cv::detection::YOLOP *yolop = new lite::tnn::cv::detection::YOLOP(proto_path, model_path, 16); // 16 threads diff --git a/examples/lite/cv/test_lite_yolor.cpp b/examples/lite/cv/test_lite_yolor.cpp index 49963abb..b1710df1 100644 --- a/examples/lite/cv/test_lite_yolor.cpp +++ b/examples/lite/cv/test_lite_yolor.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/yolor-p6-640-640.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/yolor-p6-640-640.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_1.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolor_1.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolor_1.jpg"; // 1. Test Default Engine ONNXRuntime lite::cv::detection::YoloR *yolor = new lite::cv::detection::YoloR(onnx_path); // default @@ -30,9 +30,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/yolor-p6-640-640.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/yolor-p6-640-640.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolor_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolor_2.jpg"; // 2. Test Specific Engine ONNXRuntime lite::onnxruntime::cv::detection::YoloR *yolor = @@ -55,9 +55,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/yolor-p6-640-640.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/yolor-p6-640-640.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolor_mnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolor_mnn_2.jpg"; // 3. Test Specific Engine MNN lite::mnn::cv::detection::YoloR *yolor = @@ -80,12 +80,12 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/yolor-p6-640-640.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/yolor-p6-640-640.opt.bin"; - // std::string param_path = "../../../hub/ncnn/cv/yolor-ssss-s2d-640-640.opt.param"; - // std::string bin_path = "../../../hub/ncnn/cv/yolor-ssss-s2d-640-640.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/yolor-p6-640-640.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/yolor-p6-640-640.opt.bin"; + // std::string param_path = "../../../examples/hub/ncnn/cv/yolor-ssss-s2d-640-640.opt.param"; + // std::string bin_path = "../../../examples/hub/ncnn/cv/yolor-ssss-s2d-640-640.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolor_ncnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolor_ncnn_2.jpg"; // 4. Test Specific Engine NCNN if (param_path.find("ssss") != std::string::npos) // yolor-ssss-xxx @@ -128,10 +128,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/yolor-p6-640-640.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/yolor-p6-640-640.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/yolor-p6-640-640.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/yolor-p6-640-640.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_detection_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolor_tnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolor_tnn_2.jpg"; // 5. Test Specific Engine TNN lite::tnn::cv::detection::YoloR *yolor = diff --git a/examples/lite/cv/test_lite_yolov3.cpp b/examples/lite/cv/test_lite_yolov3.cpp index ceca8b87..9733f17b 100644 --- a/examples/lite/cv/test_lite_yolov3.cpp +++ b/examples/lite/cv/test_lite_yolov3.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/yolov3-10.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/yolov3-10.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolov3.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolov3.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolov3.jpg"; lite::cv::detection::YoloV3 *yolov3 = new lite::cv::detection::YoloV3(onnx_path); @@ -28,9 +28,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/yolov3-10.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/yolov3-10.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolov3.jpg"; - std::string save_img_path = "../../../logs/test_onnx_yolov3.jpg"; + std::string save_img_path = "../../../examples/logs/test_onnx_yolov3.jpg"; lite::onnxruntime::cv::detection::YoloV3 *yolov3 = new lite::onnxruntime::cv::detection::YoloV3(onnx_path); diff --git a/examples/lite/cv/test_lite_yolov4.cpp b/examples/lite/cv/test_lite_yolov4.cpp index 42a04e90..300c9367 100644 --- a/examples/lite/cv/test_lite_yolov4.cpp +++ b/examples/lite/cv/test_lite_yolov4.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/voc-mobilenetv2-yolov4-640.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/voc-mobilenetv2-yolov4-640.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolov4.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolov4.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolov4.jpg"; lite::cv::detection::YoloV4 *yolov4 = new lite::cv::detection::YoloV4(onnx_path); @@ -28,9 +28,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/voc-mobilenetv2-yolov4-640.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/voc-mobilenetv2-yolov4-640.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolov4.jpg"; - std::string save_img_path = "../../../logs/test_onnx_yolov4.jpg"; + std::string save_img_path = "../../../examples/logs/test_onnx_yolov4.jpg"; lite::onnxruntime::cv::detection::YoloV4 *yolov4 = new lite::onnxruntime::cv::detection::YoloV4(onnx_path); diff --git a/examples/lite/cv/test_lite_yolov5.cpp b/examples/lite/cv/test_lite_yolov5.cpp index 2b7346e6..f9c90450 100644 --- a/examples/lite/cv/test_lite_yolov5.cpp +++ b/examples/lite/cv/test_lite_yolov5.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/yolov5s.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/yolov5s.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolov5_1.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolov5_1.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolov5_1.jpg"; // 1. Test Default Engine ONNXRuntime lite::cv::detection::YoloV5 *yolov5 = new lite::cv::detection::YoloV5(onnx_path); // default @@ -30,9 +30,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/yolov5s.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/yolov5s.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolov5_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolov5_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolov5_2.jpg"; // 2. Test Specific Engine ONNXRuntime lite::onnxruntime::cv::detection::YoloV5 *yolov5 = @@ -55,9 +55,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/yolov5s.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/yolov5s.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolov5_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolov5_2_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolov5_2_mnn.jpg"; // 3. Test Specific Engine MNN lite::mnn::cv::detection::YoloV5 *yolov5 = @@ -80,10 +80,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/yolov5s.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/yolov5s.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/yolov5s.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/yolov5s.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolov5_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolov5_2_ncnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolov5_2_ncnn.jpg"; // 4. Test Specific Engine NCNN lite::ncnn::cv::detection::YoloV5 *yolov5 = @@ -106,10 +106,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/yolov5s.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/yolov5s.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/yolov5s.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/yolov5s.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolov5_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolov5_2_tnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolov5_2_tnn.jpg"; // 5. Test Specific Engine TNN lite::tnn::cv::detection::YoloV5 *yolov5 = diff --git a/examples/lite/cv/test_lite_yolov5_blazeface.cpp b/examples/lite/cv/test_lite_yolov5_blazeface.cpp index c3d33097..f5624b7f 100644 --- a/examples/lite/cv/test_lite_yolov5_blazeface.cpp +++ b/examples/lite/cv/test_lite_yolov5_blazeface.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/yolov5face-blazeface-640x640.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/yolov5face-blazeface-640x640.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_detector.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolov5_blazeface.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolov5_blazeface.jpg"; lite::cv::face::detect::YOLOv5BlazeFace *yolov5_blazeface = new lite::cv::face::detect::YOLOv5BlazeFace(onnx_path); @@ -29,9 +29,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/yolov5face-blazeface-640x640.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/yolov5face-blazeface-640x640.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_detector_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolov5_blazeface_onnx_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolov5_blazeface_onnx_2.jpg"; lite::onnxruntime::cv::face::detect::YOLOv5BlazeFace *yolov5_blazeface = new lite::onnxruntime::cv::face::detect::YOLOv5BlazeFace(onnx_path); @@ -53,9 +53,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/yolov5face-blazeface-640x640.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/yolov5face-blazeface-640x640.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_face_detector_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolov5_blazeface_mnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolov5_blazeface_mnn_2.jpg"; lite::mnn::cv::face::detect::YOLOv5BlazeFace *yolov5_blazeface = new lite::mnn::cv::face::detect::YOLOv5BlazeFace(mnn_path); diff --git a/examples/lite/cv/test_lite_yolov5_v6.0.cpp b/examples/lite/cv/test_lite_yolov5_v6.0.cpp index 71b9a3f1..1789881c 100644 --- a/examples/lite/cv/test_lite_yolov5_v6.0.cpp +++ b/examples/lite/cv/test_lite_yolov5_v6.0.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/yolov5s.640-640.v.6.0.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/yolov5s.640-640.v.6.0.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolov5_1.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolov5_v6.0_1.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolov5_v6.0_1.jpg"; // 1. Test Default Engine ONNXRuntime lite::cv::detection::YoloV5_V_6_0 *yolov5 = new lite::cv::detection::YoloV5_V_6_0(onnx_path); // default @@ -30,9 +30,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/yolov5s.640-640.v.6.0.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/yolov5s.640-640.v.6.0.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolov5_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolov5_v6.0_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolov5_v6.0_2.jpg"; // 2. Test Specific Engine ONNXRuntime lite::onnxruntime::cv::detection::YoloV5_V_6_0 *yolov5 = @@ -55,9 +55,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/yolov5s.640-640.v.6.0.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/yolov5s.640-640.v.6.0.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolov5_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolov5_v6.0_2_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolov5_v6.0_2_mnn.jpg"; // 3. Test Specific Engine MNN lite::mnn::cv::detection::YoloV5_V_6_0 *yolov5 = @@ -80,12 +80,12 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/yolov5s.640-640.v.6.0.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/yolov5s.640-640.v.6.0.opt.bin"; - // std::string param_path = "../../../hub/ncnn/cv/yolov5s6.640-640.v.6.0.opt.param"; - // std::string bin_path = "../../../hub/ncnn/cv/yolov5s6.640-640.v.6.0.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/yolov5s.640-640.v.6.0.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/yolov5s.640-640.v.6.0.opt.bin"; + // std::string param_path = "../../../examples/hub/ncnn/cv/yolov5s6.640-640.v.6.0.opt.param"; + // std::string bin_path = "../../../examples/hub/ncnn/cv/yolov5s6.640-640.v.6.0.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolov5_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolov5_v6.0_2_ncnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolov5_v6.0_2_ncnn.jpg"; // 4. Test Specific Engine NCNN if (param_path.find("s6") == std::string::npos) @@ -129,10 +129,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/yolov5s.640-640.v.6.0.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/yolov5s.640-640.v.6.0.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/yolov5s.640-640.v.6.0.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/yolov5s.640-640.v.6.0.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolov5_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolov5_v6.0_2_tnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolov5_v6.0_2_tnn.jpg"; // 5. Test Specific Engine TNN lite::tnn::cv::detection::YoloV5_V_6_0 *yolov5 = diff --git a/examples/lite/cv/test_lite_yolov5_v6.1.cpp b/examples/lite/cv/test_lite_yolov5_v6.1.cpp index 2682fc85..cebbc6fb 100644 --- a/examples/lite/cv/test_lite_yolov5_v6.1.cpp +++ b/examples/lite/cv/test_lite_yolov5_v6.1.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/yolov5s.v6.1.640x640.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/yolov5s.v6.1.640x640.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolov5_1.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolov5_v6.1_1.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolov5_v6.1_1.jpg"; // 1. Test Default Engine ONNXRuntime lite::cv::detection::YoloV5_V_6_1 *yolov5 = new lite::cv::detection::YoloV5_V_6_1(onnx_path); // default @@ -30,9 +30,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/yolov5s.v6.1.640x640.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/yolov5s.v6.1.640x640.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolov5_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolov5_v6.1_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolov5_v6.1_2.jpg"; // 2. Test Specific Engine ONNXRuntime lite::onnxruntime::cv::detection::YoloV5_V_6_1 *yolov5 = @@ -55,9 +55,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/yolov5s.v6.1.640x640.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/yolov5s.v6.1.640x640.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolov5_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolov5_v6.1_2_mnn.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolov5_v6.1_2_mnn.jpg"; // 3. Test Specific Engine MNN lite::mnn::cv::detection::YoloV5_V_6_1 *yolov5 = diff --git a/examples/lite/cv/test_lite_yolov6.cpp b/examples/lite/cv/test_lite_yolov6.cpp index c57475ab..3fa44d98 100644 --- a/examples/lite/cv/test_lite_yolov6.cpp +++ b/examples/lite/cv/test_lite_yolov6.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/yolov6s-640x640.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/yolov6s-640x640.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolox_1.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolov6_1.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolov6_1.jpg"; // 1. Test Default Engine ONNXRuntime lite::cv::detection::YOLOv6 *yolov6 = new lite::cv::detection::YOLOv6(onnx_path); // default @@ -30,9 +30,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/yolov6s-640x640.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/yolov6s-640x640.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolox_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolov6_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolov6_2.jpg"; // 2. Test Specific Engine ONNXRuntime lite::onnxruntime::cv::detection::YOLOv6 *yolov6 = @@ -55,9 +55,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/yolov6s-640x640.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/yolov6s-640x640.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolox_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolov6_mnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolov6_mnn_2.jpg"; // 3. Test Specific Engine MNN lite::mnn::cv::detection::YOLOv6 *yolov6 = @@ -80,10 +80,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/yolov6s-640x640-for-ncnn.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/yolov6s-640x640-for-ncnn.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/yolov6s-640x640-for-ncnn.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/yolov6s-640x640-for-ncnn.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolox_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolov6_ncnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolov6_ncnn_2.jpg"; // 4. Test Specific Engine NCNN lite::ncnn::cv::detection::YOLOv6 *yolov6 = @@ -106,10 +106,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/yolov6s-640x640.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/yolov6s-640x640.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/yolov6s-640x640.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/yolov6s-640x640.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolox_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolov6_tnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolov6_tnn_2.jpg"; // 5. Test Specific Engine TNN lite::tnn::cv::detection::YOLOv6 *yolov6 = diff --git a/examples/lite/cv/test_lite_yolox.cpp b/examples/lite/cv/test_lite_yolox.cpp index 77162314..3c7629a9 100644 --- a/examples/lite/cv/test_lite_yolox.cpp +++ b/examples/lite/cv/test_lite_yolox.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/yolox_s.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/yolox_s.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolox_1.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolox_1.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolox_1.jpg"; // 1. Test Default Engine ONNXRuntime lite::cv::detection::YoloX *yolox = new lite::cv::detection::YoloX(onnx_path); // default @@ -30,9 +30,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/yolox_s.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/yolox_s.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolox_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolox_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolox_2.jpg"; // 2. Test Specific Engine ONNXRuntime lite::onnxruntime::cv::detection::YoloX *yolox = @@ -55,9 +55,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/yolox_s.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/yolox_s.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolox_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolox_mnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolox_mnn_2.jpg"; // 3. Test Specific Engine MNN lite::mnn::cv::detection::YoloX *yolox = @@ -80,10 +80,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/yolox_s.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/yolox_s.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/yolox_s.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/yolox_s.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolox_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolox_ncnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolox_ncnn_2.jpg"; // 4. Test Specific Engine NCNN lite::ncnn::cv::detection::YoloX *yolox = @@ -106,10 +106,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/yolox_s.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/yolox_s.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/yolox_s.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/yolox_s.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolox_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolox_tnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolox_tnn_2.jpg"; // 5. Test Specific Engine TNN lite::tnn::cv::detection::YoloX *yolox = diff --git a/examples/lite/cv/test_lite_yolox_v0.1.1.cpp b/examples/lite/cv/test_lite_yolox_v0.1.1.cpp index f25ebf6f..7a5ebb3e 100644 --- a/examples/lite/cv/test_lite_yolox_v0.1.1.cpp +++ b/examples/lite/cv/test_lite_yolox_v0.1.1.cpp @@ -6,9 +6,9 @@ static void test_default() { - std::string onnx_path = "../../../hub/onnx/cv/yolox_s_v0.1.1.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/yolox_s_v0.1.1.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolox_1.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolox_v0.1.1_1.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolox_v0.1.1_1.jpg"; // 1. Test Default Engine ONNXRuntime lite::cv::detection::YoloX_V_0_1_1 *yolox = new lite::cv::detection::YoloX_V_0_1_1(onnx_path); // default @@ -30,9 +30,9 @@ static void test_default() static void test_onnxruntime() { #ifdef ENABLE_ONNXRUNTIME - std::string onnx_path = "../../../hub/onnx/cv/yolox_s_v0.1.1.onnx"; + std::string onnx_path = "../../../examples/hub/onnx/cv/yolox_s_v0.1.1.onnx"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolox_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolox_v0.1.1_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolox_v0.1.1_2.jpg"; // 2. Test Specific Engine ONNXRuntime lite::onnxruntime::cv::detection::YoloX_V_0_1_1 *yolox = @@ -55,9 +55,9 @@ static void test_onnxruntime() static void test_mnn() { #ifdef ENABLE_MNN - std::string mnn_path = "../../../hub/mnn/cv/yolox_s_v0.1.1.mnn"; + std::string mnn_path = "../../../examples/hub/mnn/cv/yolox_s_v0.1.1.mnn"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolox_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolox_v0.1.1_mnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolox_v0.1.1_mnn_2.jpg"; // 3. Test Specific Engine MNN lite::mnn::cv::detection::YoloX_V_0_1_1 *yolox = @@ -80,10 +80,10 @@ static void test_mnn() static void test_ncnn() { #ifdef ENABLE_NCNN - std::string param_path = "../../../hub/ncnn/cv/yolox_s_v0.1.1.opt.param"; - std::string bin_path = "../../../hub/ncnn/cv/yolox_s_v0.1.1.opt.bin"; + std::string param_path = "../../../examples/hub/ncnn/cv/yolox_s_v0.1.1.opt.param"; + std::string bin_path = "../../../examples/hub/ncnn/cv/yolox_s_v0.1.1.opt.bin"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolox_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolox_v0.1.1_ncnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolox_v0.1.1_ncnn_2.jpg"; // 4. Test Specific Engine NCNN lite::ncnn::cv::detection::YoloX_V_0_1_1 *yolox = @@ -106,10 +106,10 @@ static void test_ncnn() static void test_tnn() { #ifdef ENABLE_TNN - std::string proto_path = "../../../hub/tnn/cv/yolox_s_v0.1.1.opt.tnnproto"; - std::string model_path = "../../../hub/tnn/cv/yolox_s_v0.1.1.opt.tnnmodel"; + std::string proto_path = "../../../examples/hub/tnn/cv/yolox_s_v0.1.1.opt.tnnproto"; + std::string model_path = "../../../examples/hub/tnn/cv/yolox_s_v0.1.1.opt.tnnmodel"; std::string test_img_path = "../../../examples/lite/resources/test_lite_yolox_2.jpg"; - std::string save_img_path = "../../../logs/test_lite_yolox_v0.1.1_tnn_2.jpg"; + std::string save_img_path = "../../../examples/logs/test_lite_yolox_v0.1.1_tnn_2.jpg"; // 5. Test Specific Engine TNN lite::tnn::cv::detection::YoloX_V_0_1_1 *yolox = diff --git a/logs/.gitignore b/examples/logs/.gitignore similarity index 100% rename from logs/.gitignore rename to examples/logs/.gitignore diff --git a/logs/test_lite_age_googlenet.jpg b/examples/logs/test_lite_age_googlenet.jpg similarity index 100% rename from logs/test_lite_age_googlenet.jpg rename to examples/logs/test_lite_age_googlenet.jpg diff --git a/logs/test_lite_deeplabv3_resnet101.jpg b/examples/logs/test_lite_deeplabv3_resnet101.jpg similarity index 100% rename from logs/test_lite_deeplabv3_resnet101.jpg rename to examples/logs/test_lite_deeplabv3_resnet101.jpg diff --git a/logs/test_lite_densenet.png b/examples/logs/test_lite_densenet.png similarity index 100% rename from logs/test_lite_densenet.png rename to examples/logs/test_lite_densenet.png diff --git a/logs/test_lite_eccv16_colorizer_1.jpg b/examples/logs/test_lite_eccv16_colorizer_1.jpg similarity index 100% rename from logs/test_lite_eccv16_colorizer_1.jpg rename to examples/logs/test_lite_eccv16_colorizer_1.jpg diff --git a/logs/test_lite_eccv16_colorizer_2.jpg b/examples/logs/test_lite_eccv16_colorizer_2.jpg similarity index 100% rename from logs/test_lite_eccv16_colorizer_2.jpg rename to examples/logs/test_lite_eccv16_colorizer_2.jpg diff --git a/logs/test_lite_eccv16_colorizer_3.jpg b/examples/logs/test_lite_eccv16_colorizer_3.jpg similarity index 100% rename from logs/test_lite_eccv16_colorizer_3.jpg rename to examples/logs/test_lite_eccv16_colorizer_3.jpg diff --git a/logs/test_lite_emotion_ferplus.jpg b/examples/logs/test_lite_emotion_ferplus.jpg similarity index 100% rename from logs/test_lite_emotion_ferplus.jpg rename to examples/logs/test_lite_emotion_ferplus.jpg diff --git a/logs/test_lite_face_landmarks_1000.jpg b/examples/logs/test_lite_face_landmarks_1000.jpg similarity index 100% rename from logs/test_lite_face_landmarks_1000.jpg rename to examples/logs/test_lite_face_landmarks_1000.jpg diff --git a/logs/test_lite_face_landmarks_1000_0.jpg b/examples/logs/test_lite_face_landmarks_1000_0.jpg similarity index 100% rename from logs/test_lite_face_landmarks_1000_0.jpg rename to examples/logs/test_lite_face_landmarks_1000_0.jpg diff --git a/logs/test_lite_face_landmarks_1000_2.jpg b/examples/logs/test_lite_face_landmarks_1000_2.jpg similarity index 100% rename from logs/test_lite_face_landmarks_1000_2.jpg rename to examples/logs/test_lite_face_landmarks_1000_2.jpg diff --git a/logs/test_lite_fast_style_transfer_candy.jpg b/examples/logs/test_lite_fast_style_transfer_candy.jpg similarity index 100% rename from logs/test_lite_fast_style_transfer_candy.jpg rename to examples/logs/test_lite_fast_style_transfer_candy.jpg diff --git a/logs/test_lite_fast_style_transfer_mosaic.jpg b/examples/logs/test_lite_fast_style_transfer_mosaic.jpg similarity index 100% rename from logs/test_lite_fast_style_transfer_mosaic.jpg rename to examples/logs/test_lite_fast_style_transfer_mosaic.jpg diff --git a/logs/test_lite_fast_style_transfer_pointilism.jpg b/examples/logs/test_lite_fast_style_transfer_pointilism.jpg similarity index 100% rename from logs/test_lite_fast_style_transfer_pointilism.jpg rename to examples/logs/test_lite_fast_style_transfer_pointilism.jpg diff --git a/logs/test_lite_fast_style_transfer_rain_princes.jpg b/examples/logs/test_lite_fast_style_transfer_rain_princes.jpg similarity index 100% rename from logs/test_lite_fast_style_transfer_rain_princes.jpg rename to examples/logs/test_lite_fast_style_transfer_rain_princes.jpg diff --git a/logs/test_lite_fast_style_transfer_udnie.jpg b/examples/logs/test_lite_fast_style_transfer_udnie.jpg similarity index 100% rename from logs/test_lite_fast_style_transfer_udnie.jpg rename to examples/logs/test_lite_fast_style_transfer_udnie.jpg diff --git a/logs/test_lite_fcn_resnet101.jpg b/examples/logs/test_lite_fcn_resnet101.jpg similarity index 100% rename from logs/test_lite_fcn_resnet101.jpg rename to examples/logs/test_lite_fcn_resnet101.jpg diff --git a/logs/test_lite_fsanet.jpg b/examples/logs/test_lite_fsanet.jpg similarity index 100% rename from logs/test_lite_fsanet.jpg rename to examples/logs/test_lite_fsanet.jpg diff --git a/logs/test_lite_fsanet_2.jpg b/examples/logs/test_lite_fsanet_2.jpg similarity index 100% rename from logs/test_lite_fsanet_2.jpg rename to examples/logs/test_lite_fsanet_2.jpg diff --git a/logs/test_lite_fsanet_3.jpg b/examples/logs/test_lite_fsanet_3.jpg similarity index 100% rename from logs/test_lite_fsanet_3.jpg rename to examples/logs/test_lite_fsanet_3.jpg diff --git a/logs/test_lite_gender_googlenet.jpg b/examples/logs/test_lite_gender_googlenet.jpg similarity index 100% rename from logs/test_lite_gender_googlenet.jpg rename to examples/logs/test_lite_gender_googlenet.jpg diff --git a/logs/test_lite_mg_matting_fgr.jpg b/examples/logs/test_lite_mg_matting_fgr.jpg similarity index 100% rename from logs/test_lite_mg_matting_fgr.jpg rename to examples/logs/test_lite_mg_matting_fgr.jpg diff --git a/logs/test_lite_mg_matting_merge.jpg b/examples/logs/test_lite_mg_matting_merge.jpg similarity index 100% rename from logs/test_lite_mg_matting_merge.jpg rename to examples/logs/test_lite_mg_matting_merge.jpg diff --git a/logs/test_lite_mg_matting_pha.jpg b/examples/logs/test_lite_mg_matting_pha.jpg similarity index 100% rename from logs/test_lite_mg_matting_pha.jpg rename to examples/logs/test_lite_mg_matting_pha.jpg diff --git a/logs/test_lite_pfld.jpg b/examples/logs/test_lite_pfld.jpg similarity index 100% rename from logs/test_lite_pfld.jpg rename to examples/logs/test_lite_pfld.jpg diff --git a/logs/test_lite_pfld_2.jpg b/examples/logs/test_lite_pfld_2.jpg similarity index 100% rename from logs/test_lite_pfld_2.jpg rename to examples/logs/test_lite_pfld_2.jpg diff --git a/logs/test_lite_pfld_3.jpg b/examples/logs/test_lite_pfld_3.jpg similarity index 100% rename from logs/test_lite_pfld_3.jpg rename to examples/logs/test_lite_pfld_3.jpg diff --git a/logs/test_lite_scrfd.jpg b/examples/logs/test_lite_scrfd.jpg similarity index 100% rename from logs/test_lite_scrfd.jpg rename to examples/logs/test_lite_scrfd.jpg diff --git a/logs/test_lite_siggraph17_colorizer_1.jpg b/examples/logs/test_lite_siggraph17_colorizer_1.jpg similarity index 100% rename from logs/test_lite_siggraph17_colorizer_1.jpg rename to examples/logs/test_lite_siggraph17_colorizer_1.jpg diff --git a/logs/test_lite_siggraph17_colorizer_2.jpg b/examples/logs/test_lite_siggraph17_colorizer_2.jpg similarity index 100% rename from logs/test_lite_siggraph17_colorizer_2.jpg rename to examples/logs/test_lite_siggraph17_colorizer_2.jpg diff --git a/logs/test_lite_siggraph17_colorizer_3.jpg b/examples/logs/test_lite_siggraph17_colorizer_3.jpg similarity index 100% rename from logs/test_lite_siggraph17_colorizer_3.jpg rename to examples/logs/test_lite_siggraph17_colorizer_3.jpg diff --git a/logs/test_lite_ssd.jpg b/examples/logs/test_lite_ssd.jpg similarity index 100% rename from logs/test_lite_ssd.jpg rename to examples/logs/test_lite_ssd.jpg diff --git a/logs/test_lite_ssd_mobilenetv1.jpg b/examples/logs/test_lite_ssd_mobilenetv1.jpg similarity index 100% rename from logs/test_lite_ssd_mobilenetv1.jpg rename to examples/logs/test_lite_ssd_mobilenetv1.jpg diff --git a/logs/test_lite_ssrnet.jpg b/examples/logs/test_lite_ssrnet.jpg similarity index 100% rename from logs/test_lite_ssrnet.jpg rename to examples/logs/test_lite_ssrnet.jpg diff --git a/logs/test_lite_subpixel_cnn.jpg b/examples/logs/test_lite_subpixel_cnn.jpg similarity index 100% rename from logs/test_lite_subpixel_cnn.jpg rename to examples/logs/test_lite_subpixel_cnn.jpg diff --git a/logs/test_lite_tiny_yolov3.jpg b/examples/logs/test_lite_tiny_yolov3.jpg similarity index 100% rename from logs/test_lite_tiny_yolov3.jpg rename to examples/logs/test_lite_tiny_yolov3.jpg diff --git a/logs/test_lite_ultraface.jpg b/examples/logs/test_lite_ultraface.jpg similarity index 100% rename from logs/test_lite_ultraface.jpg rename to examples/logs/test_lite_ultraface.jpg diff --git a/logs/test_lite_ultraface_2.jpg b/examples/logs/test_lite_ultraface_2.jpg similarity index 100% rename from logs/test_lite_ultraface_2.jpg rename to examples/logs/test_lite_ultraface_2.jpg diff --git a/logs/test_lite_ultraface_3.jpg b/examples/logs/test_lite_ultraface_3.jpg similarity index 100% rename from logs/test_lite_ultraface_3.jpg rename to examples/logs/test_lite_ultraface_3.jpg diff --git a/logs/test_lite_vgg16_age.jpg b/examples/logs/test_lite_vgg16_age.jpg similarity index 100% rename from logs/test_lite_vgg16_age.jpg rename to examples/logs/test_lite_vgg16_age.jpg diff --git a/logs/test_lite_vgg16_gender.jpg b/examples/logs/test_lite_vgg16_gender.jpg similarity index 100% rename from logs/test_lite_vgg16_gender.jpg rename to examples/logs/test_lite_vgg16_gender.jpg diff --git a/logs/test_lite_yolop_merge.jpg b/examples/logs/test_lite_yolop_merge.jpg similarity index 100% rename from logs/test_lite_yolop_merge.jpg rename to examples/logs/test_lite_yolop_merge.jpg diff --git a/logs/test_lite_yolov3.jpg b/examples/logs/test_lite_yolov3.jpg similarity index 100% rename from logs/test_lite_yolov3.jpg rename to examples/logs/test_lite_yolov3.jpg diff --git a/logs/test_lite_yolov4.jpg b/examples/logs/test_lite_yolov4.jpg similarity index 100% rename from logs/test_lite_yolov4.jpg rename to examples/logs/test_lite_yolov4.jpg diff --git a/logs/test_lite_yolov5_1.jpg b/examples/logs/test_lite_yolov5_1.jpg similarity index 100% rename from logs/test_lite_yolov5_1.jpg rename to examples/logs/test_lite_yolov5_1.jpg diff --git a/logs/test_lite_yolov5_2.jpg b/examples/logs/test_lite_yolov5_2.jpg similarity index 100% rename from logs/test_lite_yolov5_2.jpg rename to examples/logs/test_lite_yolov5_2.jpg diff --git a/logs/test_lite_yolox_1.jpg b/examples/logs/test_lite_yolox_1.jpg similarity index 100% rename from logs/test_lite_yolox_1.jpg rename to examples/logs/test_lite_yolox_1.jpg diff --git a/logs/test_lite_yolox_2.jpg b/examples/logs/test_lite_yolox_2.jpg similarity index 100% rename from logs/test_lite_yolox_2.jpg rename to examples/logs/test_lite_yolox_2.jpg diff --git a/hub/mnn/cv/nanodet_g.mnn b/hub/mnn/cv/nanodet_g.mnn deleted file mode 100644 index a941a24a..00000000 Binary files a/hub/mnn/cv/nanodet_g.mnn and /dev/null differ diff --git a/hub/mnn/cv/nanodet_m.mnn b/hub/mnn/cv/nanodet_m.mnn deleted file mode 100644 index a1d9cf56..00000000 Binary files a/hub/mnn/cv/nanodet_m.mnn and /dev/null differ diff --git a/hub/mnn/cv/nanodet_m_0.5x.mnn b/hub/mnn/cv/nanodet_m_0.5x.mnn deleted file mode 100644 index f76260ce..00000000 Binary files a/hub/mnn/cv/nanodet_m_0.5x.mnn and /dev/null differ diff --git a/hub/mnn/cv/nanodet_m_1.5x.mnn b/hub/mnn/cv/nanodet_m_1.5x.mnn deleted file mode 100644 index 287395f3..00000000 Binary files a/hub/mnn/cv/nanodet_m_1.5x.mnn and /dev/null differ diff --git a/hub/mnn/cv/rvm_mobilenetv3_fp32-480-640.mnn b/hub/mnn/cv/rvm_mobilenetv3_fp32-480-640.mnn deleted file mode 100644 index 71babfdb..00000000 Binary files a/hub/mnn/cv/rvm_mobilenetv3_fp32-480-640.mnn and /dev/null differ diff --git a/hub/mnn/cv/rvm_mobilenetv3_fp32-640-480.mnn b/hub/mnn/cv/rvm_mobilenetv3_fp32-640-480.mnn deleted file mode 100644 index 8992613c..00000000 Binary files a/hub/mnn/cv/rvm_mobilenetv3_fp32-640-480.mnn and /dev/null differ diff --git a/hub/ncnn/cv/nanodet_m-opt.bin b/hub/ncnn/cv/nanodet_m-opt.bin deleted file mode 100644 index 5184f118..00000000 Binary files a/hub/ncnn/cv/nanodet_m-opt.bin and /dev/null differ diff --git a/hub/ncnn/cv/nanodet_m-opt.param b/hub/ncnn/cv/nanodet_m-opt.param deleted file mode 100644 index 711102ba..00000000 --- a/hub/ncnn/cv/nanodet_m-opt.param +++ /dev/null @@ -1,196 +0,0 @@ -7767517 -194 219 -Input input.1 0 1 input.1 -Convolution Conv_0 1 1 input.1 424 0=24 1=3 3=2 4=1 5=1 6=648 9=2 -23310=1,1.000000e-01 -Pooling MaxPool_2 1 1 424 425 1=3 2=2 3=1 5=1 -Split splitncnn_0 1 2 425 425_splitncnn_0 425_splitncnn_1 -ConvolutionDepthWise Conv_3 1 1 425_splitncnn_1 973 0=24 1=3 3=2 4=1 5=1 6=216 7=24 -Convolution Conv_4 1 1 973 430 0=58 1=1 5=1 6=1392 9=2 -23310=1,1.000000e-01 -Convolution Conv_6 1 1 425_splitncnn_0 433 0=58 1=1 5=1 6=1392 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_8 1 1 433 982 0=58 1=3 3=2 4=1 5=1 6=522 7=58 -Convolution Conv_9 1 1 982 438 0=58 1=1 5=1 6=3364 9=2 -23310=1,1.000000e-01 -Concat Concat_11 2 1 430 438 439 -ShuffleChannel Reshape_16 1 1 439 444 0=2 -Split splitncnn_1 1 2 444 444_splitncnn_0 444_splitncnn_1 -Crop Slice_27 1 1 444_splitncnn_1 455 -23309=1,0 -23310=1,58 -23311=1,0 -Crop Slice_30 1 1 444_splitncnn_0 458 -23309=1,58 -23310=1,116 -23311=1,0 -Convolution Conv_31 1 1 458 461 0=58 1=1 5=1 6=3364 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_33 1 1 461 991 0=58 1=3 4=1 5=1 6=522 7=58 -Convolution Conv_34 1 1 991 466 0=58 1=1 5=1 6=3364 9=2 -23310=1,1.000000e-01 -Concat Concat_36 2 1 455 466 467 -ShuffleChannel Reshape_41 1 1 467 472 0=2 -Split splitncnn_2 1 2 472 472_splitncnn_0 472_splitncnn_1 -Crop Slice_52 1 1 472_splitncnn_1 483 -23309=1,0 -23310=1,58 -23311=1,0 -Crop Slice_55 1 1 472_splitncnn_0 486 -23309=1,58 -23310=1,116 -23311=1,0 -Convolution Conv_56 1 1 486 489 0=58 1=1 5=1 6=3364 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_58 1 1 489 1000 0=58 1=3 4=1 5=1 6=522 7=58 -Convolution Conv_59 1 1 1000 494 0=58 1=1 5=1 6=3364 9=2 -23310=1,1.000000e-01 -Concat Concat_61 2 1 483 494 495 -ShuffleChannel Reshape_66 1 1 495 500 0=2 -Split splitncnn_3 1 2 500 500_splitncnn_0 500_splitncnn_1 -Crop Slice_77 1 1 500_splitncnn_1 511 -23309=1,0 -23310=1,58 -23311=1,0 -Crop Slice_80 1 1 500_splitncnn_0 514 -23309=1,58 -23310=1,116 -23311=1,0 -Convolution Conv_81 1 1 514 517 0=58 1=1 5=1 6=3364 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_83 1 1 517 1009 0=58 1=3 4=1 5=1 6=522 7=58 -Convolution Conv_84 1 1 1009 522 0=58 1=1 5=1 6=3364 9=2 -23310=1,1.000000e-01 -Concat Concat_86 2 1 511 522 523 -ShuffleChannel Reshape_91 1 1 523 528 0=2 -Split splitncnn_4 1 3 528 528_splitncnn_0 528_splitncnn_1 528_splitncnn_2 -ConvolutionDepthWise Conv_92 1 1 528_splitncnn_2 1015 0=116 1=3 3=2 4=1 5=1 6=1044 7=116 -Convolution Conv_93 1 1 1015 533 0=116 1=1 5=1 6=13456 9=2 -23310=1,1.000000e-01 -Convolution Conv_95 1 1 528_splitncnn_1 536 0=116 1=1 5=1 6=13456 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_97 1 1 536 1024 0=116 1=3 3=2 4=1 5=1 6=1044 7=116 -Convolution Conv_98 1 1 1024 541 0=116 1=1 5=1 6=13456 9=2 -23310=1,1.000000e-01 -Concat Concat_100 2 1 533 541 542 -ShuffleChannel Reshape_105 1 1 542 547 0=2 -Split splitncnn_5 1 2 547 547_splitncnn_0 547_splitncnn_1 -Crop Slice_116 1 1 547_splitncnn_1 558 -23309=1,0 -23310=1,116 -23311=1,0 -Crop Slice_119 1 1 547_splitncnn_0 561 -23309=1,116 -23310=1,232 -23311=1,0 -Convolution Conv_120 1 1 561 564 0=116 1=1 5=1 6=13456 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_122 1 1 564 1033 0=116 1=3 4=1 5=1 6=1044 7=116 -Convolution Conv_123 1 1 1033 569 0=116 1=1 5=1 6=13456 9=2 -23310=1,1.000000e-01 -Concat Concat_125 2 1 558 569 570 -ShuffleChannel Reshape_130 1 1 570 575 0=2 -Split splitncnn_6 1 2 575 575_splitncnn_0 575_splitncnn_1 -Crop Slice_141 1 1 575_splitncnn_1 586 -23309=1,0 -23310=1,116 -23311=1,0 -Crop Slice_144 1 1 575_splitncnn_0 589 -23309=1,116 -23310=1,232 -23311=1,0 -Convolution Conv_145 1 1 589 592 0=116 1=1 5=1 6=13456 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_147 1 1 592 1042 0=116 1=3 4=1 5=1 6=1044 7=116 -Convolution Conv_148 1 1 1042 597 0=116 1=1 5=1 6=13456 9=2 -23310=1,1.000000e-01 -Concat Concat_150 2 1 586 597 598 -ShuffleChannel Reshape_155 1 1 598 603 0=2 -Split splitncnn_7 1 2 603 603_splitncnn_0 603_splitncnn_1 -Crop Slice_166 1 1 603_splitncnn_1 614 -23309=1,0 -23310=1,116 -23311=1,0 -Crop Slice_169 1 1 603_splitncnn_0 617 -23309=1,116 -23310=1,232 -23311=1,0 -Convolution Conv_170 1 1 617 620 0=116 1=1 5=1 6=13456 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_172 1 1 620 1051 0=116 1=3 4=1 5=1 6=1044 7=116 -Convolution Conv_173 1 1 1051 625 0=116 1=1 5=1 6=13456 9=2 -23310=1,1.000000e-01 -Concat Concat_175 2 1 614 625 626 -ShuffleChannel Reshape_180 1 1 626 631 0=2 -Split splitncnn_8 1 2 631 631_splitncnn_0 631_splitncnn_1 -Crop Slice_191 1 1 631_splitncnn_1 642 -23309=1,0 -23310=1,116 -23311=1,0 -Crop Slice_194 1 1 631_splitncnn_0 645 -23309=1,116 -23310=1,232 -23311=1,0 -Convolution Conv_195 1 1 645 648 0=116 1=1 5=1 6=13456 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_197 1 1 648 1060 0=116 1=3 4=1 5=1 6=1044 7=116 -Convolution Conv_198 1 1 1060 653 0=116 1=1 5=1 6=13456 9=2 -23310=1,1.000000e-01 -Concat Concat_200 2 1 642 653 654 -ShuffleChannel Reshape_205 1 1 654 659 0=2 -Split splitncnn_9 1 2 659 659_splitncnn_0 659_splitncnn_1 -Crop Slice_216 1 1 659_splitncnn_1 670 -23309=1,0 -23310=1,116 -23311=1,0 -Crop Slice_219 1 1 659_splitncnn_0 673 -23309=1,116 -23310=1,232 -23311=1,0 -Convolution Conv_220 1 1 673 676 0=116 1=1 5=1 6=13456 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_222 1 1 676 1069 0=116 1=3 4=1 5=1 6=1044 7=116 -Convolution Conv_223 1 1 1069 681 0=116 1=1 5=1 6=13456 9=2 -23310=1,1.000000e-01 -Concat Concat_225 2 1 670 681 682 -ShuffleChannel Reshape_230 1 1 682 687 0=2 -Split splitncnn_10 1 2 687 687_splitncnn_0 687_splitncnn_1 -Crop Slice_241 1 1 687_splitncnn_1 698 -23309=1,0 -23310=1,116 -23311=1,0 -Crop Slice_244 1 1 687_splitncnn_0 701 -23309=1,116 -23310=1,232 -23311=1,0 -Convolution Conv_245 1 1 701 704 0=116 1=1 5=1 6=13456 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_247 1 1 704 1078 0=116 1=3 4=1 5=1 6=1044 7=116 -Convolution Conv_248 1 1 1078 709 0=116 1=1 5=1 6=13456 9=2 -23310=1,1.000000e-01 -Concat Concat_250 2 1 698 709 710 -ShuffleChannel Reshape_255 1 1 710 715 0=2 -Split splitncnn_11 1 2 715 715_splitncnn_0 715_splitncnn_1 -Crop Slice_266 1 1 715_splitncnn_1 726 -23309=1,0 -23310=1,116 -23311=1,0 -Crop Slice_269 1 1 715_splitncnn_0 729 -23309=1,116 -23310=1,232 -23311=1,0 -Convolution Conv_270 1 1 729 732 0=116 1=1 5=1 6=13456 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_272 1 1 732 1087 0=116 1=3 4=1 5=1 6=1044 7=116 -Convolution Conv_273 1 1 1087 737 0=116 1=1 5=1 6=13456 9=2 -23310=1,1.000000e-01 -Concat Concat_275 2 1 726 737 738 -ShuffleChannel Reshape_280 1 1 738 743 0=2 -Split splitncnn_12 1 3 743 743_splitncnn_0 743_splitncnn_1 743_splitncnn_2 -ConvolutionDepthWise Conv_281 1 1 743_splitncnn_2 1093 0=232 1=3 3=2 4=1 5=1 6=2088 7=232 -Convolution Conv_282 1 1 1093 748 0=232 1=1 5=1 6=53824 9=2 -23310=1,1.000000e-01 -Convolution Conv_284 1 1 743_splitncnn_1 751 0=232 1=1 5=1 6=53824 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_286 1 1 751 1102 0=232 1=3 3=2 4=1 5=1 6=2088 7=232 -Convolution Conv_287 1 1 1102 756 0=232 1=1 5=1 6=53824 9=2 -23310=1,1.000000e-01 -Concat Concat_289 2 1 748 756 757 -ShuffleChannel Reshape_294 1 1 757 762 0=2 -Split splitncnn_13 1 2 762 762_splitncnn_0 762_splitncnn_1 -Crop Slice_305 1 1 762_splitncnn_1 773 -23309=1,0 -23310=1,232 -23311=1,0 -Crop Slice_308 1 1 762_splitncnn_0 776 -23309=1,232 -23310=1,464 -23311=1,0 -Convolution Conv_309 1 1 776 779 0=232 1=1 5=1 6=53824 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_311 1 1 779 1111 0=232 1=3 4=1 5=1 6=2088 7=232 -Convolution Conv_312 1 1 1111 784 0=232 1=1 5=1 6=53824 9=2 -23310=1,1.000000e-01 -Concat Concat_314 2 1 773 784 785 -ShuffleChannel Reshape_319 1 1 785 790 0=2 -Split splitncnn_14 1 2 790 790_splitncnn_0 790_splitncnn_1 -Crop Slice_330 1 1 790_splitncnn_1 801 -23309=1,0 -23310=1,232 -23311=1,0 -Crop Slice_333 1 1 790_splitncnn_0 804 -23309=1,232 -23310=1,464 -23311=1,0 -Convolution Conv_334 1 1 804 807 0=232 1=1 5=1 6=53824 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_336 1 1 807 1120 0=232 1=3 4=1 5=1 6=2088 7=232 -Convolution Conv_337 1 1 1120 812 0=232 1=1 5=1 6=53824 9=2 -23310=1,1.000000e-01 -Concat Concat_339 2 1 801 812 813 -ShuffleChannel Reshape_344 1 1 813 818 0=2 -Split splitncnn_15 1 2 818 818_splitncnn_0 818_splitncnn_1 -Crop Slice_355 1 1 818_splitncnn_1 829 -23309=1,0 -23310=1,232 -23311=1,0 -Crop Slice_358 1 1 818_splitncnn_0 832 -23309=1,232 -23310=1,464 -23311=1,0 -Convolution Conv_359 1 1 832 835 0=232 1=1 5=1 6=53824 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_361 1 1 835 1129 0=232 1=3 4=1 5=1 6=2088 7=232 -Convolution Conv_362 1 1 1129 840 0=232 1=1 5=1 6=53824 9=2 -23310=1,1.000000e-01 -Concat Concat_364 2 1 829 840 841 -ShuffleChannel Reshape_369 1 1 841 846 0=2 -Convolution Conv_370 1 1 528_splitncnn_0 847 0=96 1=1 5=1 6=11136 -Convolution Conv_371 1 1 743_splitncnn_0 848 0=96 1=1 5=1 6=22272 -Convolution Conv_372 1 1 846 849 0=96 1=1 5=1 6=44544 -Split splitncnn_16 1 2 849 849_splitncnn_0 849_splitncnn_1 -Interp Resize_374 1 1 849_splitncnn_1 854 0=2 1=2.000000e+00 2=2.000000e+00 -BinaryOp Add_375 2 1 848 854 855 -Split splitncnn_17 1 2 855 855_splitncnn_0 855_splitncnn_1 -Interp Resize_377 1 1 855_splitncnn_1 860 0=2 1=2.000000e+00 2=2.000000e+00 -BinaryOp Add_378 2 1 847 860 861 -Split splitncnn_18 1 2 861 861_splitncnn_0 861_splitncnn_1 -Interp Resize_380 1 1 861_splitncnn_1 866 0=2 1=5.000000e-01 2=5.000000e-01 -BinaryOp Add_381 2 1 855_splitncnn_0 866 867 -Split splitncnn_19 1 2 867 867_splitncnn_0 867_splitncnn_1 -Interp Resize_383 1 1 867_splitncnn_1 872 0=2 1=5.000000e-01 2=5.000000e-01 -BinaryOp Add_384 2 1 849_splitncnn_0 872 873 -ConvolutionDepthWise Conv_385 1 1 861_splitncnn_0 876 0=96 1=3 4=1 5=1 6=864 7=96 9=2 -23310=1,1.000000e-01 -Convolution Conv_387 1 1 876 879 0=96 1=1 5=1 6=9216 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_389 1 1 879 882 0=96 1=3 4=1 5=1 6=864 7=96 9=2 -23310=1,1.000000e-01 -Convolution Conv_391 1 1 882 885 0=96 1=1 5=1 6=9216 9=2 -23310=1,1.000000e-01 -Convolution Conv_393 1 1 885 886 0=112 1=1 5=1 6=10752 -Slice Split_394 1 2 886 887 888 -23300=2,80,-233 -Sigmoid Sigmoid_395 1 1 887 889 -Reshape Reshape_397 1 1 889 891 0=-1 1=80 -Permute Transpose_398 1 1 891 cls_pred_stride_8 0=1 -Reshape Reshape_400 1 1 888 894 0=-1 1=32 -Permute Transpose_401 1 1 894 895 0=1 -Reshape Reshape_403 1 1 895 897 0=8 1=-1 -Softmax Softmax_404 1 1 897 898 0=1 1=1 -InnerProduct MatMul_405 1 1 898 901 0=1 2=8 -Reshape Reshape_407 1 1 901 903 0=4 1=-1 -Reshape Reshape_409 1 1 903 dis_pred_stride_8 0=4 1=-1 -ConvolutionDepthWise Conv_410 1 1 867_splitncnn_0 908 0=96 1=3 4=1 5=1 6=864 7=96 9=2 -23310=1,1.000000e-01 -Convolution Conv_412 1 1 908 911 0=96 1=1 5=1 6=9216 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_414 1 1 911 914 0=96 1=3 4=1 5=1 6=864 7=96 9=2 -23310=1,1.000000e-01 -Convolution Conv_416 1 1 914 917 0=96 1=1 5=1 6=9216 9=2 -23310=1,1.000000e-01 -Convolution Conv_418 1 1 917 918 0=112 1=1 5=1 6=10752 -Slice Split_419 1 2 918 919 920 -23300=2,80,-233 -Sigmoid Sigmoid_420 1 1 919 921 -Reshape Reshape_422 1 1 921 923 0=-1 1=80 -Permute Transpose_423 1 1 923 cls_pred_stride_16 0=1 -Reshape Reshape_425 1 1 920 926 0=-1 1=32 -Permute Transpose_426 1 1 926 927 0=1 -Reshape Reshape_428 1 1 927 929 0=8 1=-1 -Softmax Softmax_429 1 1 929 930 0=1 1=1 -InnerProduct MatMul_430 1 1 930 933 0=1 2=8 -Reshape Reshape_432 1 1 933 935 0=4 1=-1 -Reshape Reshape_434 1 1 935 dis_pred_stride_16 0=4 1=-1 -ConvolutionDepthWise Conv_435 1 1 873 940 0=96 1=3 4=1 5=1 6=864 7=96 9=2 -23310=1,1.000000e-01 -Convolution Conv_437 1 1 940 943 0=96 1=1 5=1 6=9216 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_439 1 1 943 946 0=96 1=3 4=1 5=1 6=864 7=96 9=2 -23310=1,1.000000e-01 -Convolution Conv_441 1 1 946 949 0=96 1=1 5=1 6=9216 9=2 -23310=1,1.000000e-01 -Convolution Conv_443 1 1 949 950 0=112 1=1 5=1 6=10752 -Slice Split_444 1 2 950 951 952 -23300=2,80,-233 -Sigmoid Sigmoid_445 1 1 951 953 -Reshape Reshape_447 1 1 953 955 0=-1 1=80 -Permute Transpose_448 1 1 955 cls_pred_stride_32 0=1 -Reshape Reshape_450 1 1 952 958 0=-1 1=32 -Permute Transpose_451 1 1 958 959 0=1 -Reshape Reshape_453 1 1 959 961 0=8 1=-1 -Softmax Softmax_454 1 1 961 962 0=1 1=1 -InnerProduct MatMul_455 1 1 962 965 0=1 2=8 -Reshape Reshape_457 1 1 965 967 0=4 1=-1 -Reshape Reshape_459 1 1 967 dis_pred_stride_32 0=4 1=-1 diff --git a/hub/ncnn/cv/nanodet_m_0.5x-opt.bin b/hub/ncnn/cv/nanodet_m_0.5x-opt.bin deleted file mode 100644 index fcccb5a4..00000000 Binary files a/hub/ncnn/cv/nanodet_m_0.5x-opt.bin and /dev/null differ diff --git a/hub/ncnn/cv/nanodet_m_0.5x-opt.param b/hub/ncnn/cv/nanodet_m_0.5x-opt.param deleted file mode 100644 index 552528c5..00000000 --- a/hub/ncnn/cv/nanodet_m_0.5x-opt.param +++ /dev/null @@ -1,196 +0,0 @@ -7767517 -194 219 -Input input.1 0 1 input.1 -Convolution Conv_0 1 1 input.1 424 0=24 1=3 3=2 4=1 5=1 6=648 9=2 -23310=1,1.000000e-01 -Pooling MaxPool_2 1 1 424 425 1=3 2=2 3=1 5=1 -Split splitncnn_0 1 2 425 425_splitncnn_0 425_splitncnn_1 -ConvolutionDepthWise Conv_3 1 1 425_splitncnn_1 973 0=24 1=3 3=2 4=1 5=1 6=216 7=24 -Convolution Conv_4 1 1 973 430 0=24 1=1 5=1 6=576 9=2 -23310=1,1.000000e-01 -Convolution Conv_6 1 1 425_splitncnn_0 433 0=24 1=1 5=1 6=576 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_8 1 1 433 982 0=24 1=3 3=2 4=1 5=1 6=216 7=24 -Convolution Conv_9 1 1 982 438 0=24 1=1 5=1 6=576 9=2 -23310=1,1.000000e-01 -Concat Concat_11 2 1 430 438 439 -ShuffleChannel Reshape_16 1 1 439 444 0=2 -Split splitncnn_1 1 2 444 444_splitncnn_0 444_splitncnn_1 -Crop Slice_27 1 1 444_splitncnn_1 455 -23309=1,0 -23310=1,24 -23311=1,0 -Crop Slice_30 1 1 444_splitncnn_0 458 -23309=1,24 -23310=1,48 -23311=1,0 -Convolution Conv_31 1 1 458 461 0=24 1=1 5=1 6=576 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_33 1 1 461 991 0=24 1=3 4=1 5=1 6=216 7=24 -Convolution Conv_34 1 1 991 466 0=24 1=1 5=1 6=576 9=2 -23310=1,1.000000e-01 -Concat Concat_36 2 1 455 466 467 -ShuffleChannel Reshape_41 1 1 467 472 0=2 -Split splitncnn_2 1 2 472 472_splitncnn_0 472_splitncnn_1 -Crop Slice_52 1 1 472_splitncnn_1 483 -23309=1,0 -23310=1,24 -23311=1,0 -Crop Slice_55 1 1 472_splitncnn_0 486 -23309=1,24 -23310=1,48 -23311=1,0 -Convolution Conv_56 1 1 486 489 0=24 1=1 5=1 6=576 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_58 1 1 489 1000 0=24 1=3 4=1 5=1 6=216 7=24 -Convolution Conv_59 1 1 1000 494 0=24 1=1 5=1 6=576 9=2 -23310=1,1.000000e-01 -Concat Concat_61 2 1 483 494 495 -ShuffleChannel Reshape_66 1 1 495 500 0=2 -Split splitncnn_3 1 2 500 500_splitncnn_0 500_splitncnn_1 -Crop Slice_77 1 1 500_splitncnn_1 511 -23309=1,0 -23310=1,24 -23311=1,0 -Crop Slice_80 1 1 500_splitncnn_0 514 -23309=1,24 -23310=1,48 -23311=1,0 -Convolution Conv_81 1 1 514 517 0=24 1=1 5=1 6=576 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_83 1 1 517 1009 0=24 1=3 4=1 5=1 6=216 7=24 -Convolution Conv_84 1 1 1009 522 0=24 1=1 5=1 6=576 9=2 -23310=1,1.000000e-01 -Concat Concat_86 2 1 511 522 523 -ShuffleChannel Reshape_91 1 1 523 528 0=2 -Split splitncnn_4 1 3 528 528_splitncnn_0 528_splitncnn_1 528_splitncnn_2 -ConvolutionDepthWise Conv_92 1 1 528_splitncnn_2 1015 0=48 1=3 3=2 4=1 5=1 6=432 7=48 -Convolution Conv_93 1 1 1015 533 0=48 1=1 5=1 6=2304 9=2 -23310=1,1.000000e-01 -Convolution Conv_95 1 1 528_splitncnn_1 536 0=48 1=1 5=1 6=2304 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_97 1 1 536 1024 0=48 1=3 3=2 4=1 5=1 6=432 7=48 -Convolution Conv_98 1 1 1024 541 0=48 1=1 5=1 6=2304 9=2 -23310=1,1.000000e-01 -Concat Concat_100 2 1 533 541 542 -ShuffleChannel Reshape_105 1 1 542 547 0=2 -Split splitncnn_5 1 2 547 547_splitncnn_0 547_splitncnn_1 -Crop Slice_116 1 1 547_splitncnn_1 558 -23309=1,0 -23310=1,48 -23311=1,0 -Crop Slice_119 1 1 547_splitncnn_0 561 -23309=1,48 -23310=1,96 -23311=1,0 -Convolution Conv_120 1 1 561 564 0=48 1=1 5=1 6=2304 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_122 1 1 564 1033 0=48 1=3 4=1 5=1 6=432 7=48 -Convolution Conv_123 1 1 1033 569 0=48 1=1 5=1 6=2304 9=2 -23310=1,1.000000e-01 -Concat Concat_125 2 1 558 569 570 -ShuffleChannel Reshape_130 1 1 570 575 0=2 -Split splitncnn_6 1 2 575 575_splitncnn_0 575_splitncnn_1 -Crop Slice_141 1 1 575_splitncnn_1 586 -23309=1,0 -23310=1,48 -23311=1,0 -Crop Slice_144 1 1 575_splitncnn_0 589 -23309=1,48 -23310=1,96 -23311=1,0 -Convolution Conv_145 1 1 589 592 0=48 1=1 5=1 6=2304 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_147 1 1 592 1042 0=48 1=3 4=1 5=1 6=432 7=48 -Convolution Conv_148 1 1 1042 597 0=48 1=1 5=1 6=2304 9=2 -23310=1,1.000000e-01 -Concat Concat_150 2 1 586 597 598 -ShuffleChannel Reshape_155 1 1 598 603 0=2 -Split splitncnn_7 1 2 603 603_splitncnn_0 603_splitncnn_1 -Crop Slice_166 1 1 603_splitncnn_1 614 -23309=1,0 -23310=1,48 -23311=1,0 -Crop Slice_169 1 1 603_splitncnn_0 617 -23309=1,48 -23310=1,96 -23311=1,0 -Convolution Conv_170 1 1 617 620 0=48 1=1 5=1 6=2304 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_172 1 1 620 1051 0=48 1=3 4=1 5=1 6=432 7=48 -Convolution Conv_173 1 1 1051 625 0=48 1=1 5=1 6=2304 9=2 -23310=1,1.000000e-01 -Concat Concat_175 2 1 614 625 626 -ShuffleChannel Reshape_180 1 1 626 631 0=2 -Split splitncnn_8 1 2 631 631_splitncnn_0 631_splitncnn_1 -Crop Slice_191 1 1 631_splitncnn_1 642 -23309=1,0 -23310=1,48 -23311=1,0 -Crop Slice_194 1 1 631_splitncnn_0 645 -23309=1,48 -23310=1,96 -23311=1,0 -Convolution Conv_195 1 1 645 648 0=48 1=1 5=1 6=2304 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_197 1 1 648 1060 0=48 1=3 4=1 5=1 6=432 7=48 -Convolution Conv_198 1 1 1060 653 0=48 1=1 5=1 6=2304 9=2 -23310=1,1.000000e-01 -Concat Concat_200 2 1 642 653 654 -ShuffleChannel Reshape_205 1 1 654 659 0=2 -Split splitncnn_9 1 2 659 659_splitncnn_0 659_splitncnn_1 -Crop Slice_216 1 1 659_splitncnn_1 670 -23309=1,0 -23310=1,48 -23311=1,0 -Crop Slice_219 1 1 659_splitncnn_0 673 -23309=1,48 -23310=1,96 -23311=1,0 -Convolution Conv_220 1 1 673 676 0=48 1=1 5=1 6=2304 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_222 1 1 676 1069 0=48 1=3 4=1 5=1 6=432 7=48 -Convolution Conv_223 1 1 1069 681 0=48 1=1 5=1 6=2304 9=2 -23310=1,1.000000e-01 -Concat Concat_225 2 1 670 681 682 -ShuffleChannel Reshape_230 1 1 682 687 0=2 -Split splitncnn_10 1 2 687 687_splitncnn_0 687_splitncnn_1 -Crop Slice_241 1 1 687_splitncnn_1 698 -23309=1,0 -23310=1,48 -23311=1,0 -Crop Slice_244 1 1 687_splitncnn_0 701 -23309=1,48 -23310=1,96 -23311=1,0 -Convolution Conv_245 1 1 701 704 0=48 1=1 5=1 6=2304 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_247 1 1 704 1078 0=48 1=3 4=1 5=1 6=432 7=48 -Convolution Conv_248 1 1 1078 709 0=48 1=1 5=1 6=2304 9=2 -23310=1,1.000000e-01 -Concat Concat_250 2 1 698 709 710 -ShuffleChannel Reshape_255 1 1 710 715 0=2 -Split splitncnn_11 1 2 715 715_splitncnn_0 715_splitncnn_1 -Crop Slice_266 1 1 715_splitncnn_1 726 -23309=1,0 -23310=1,48 -23311=1,0 -Crop Slice_269 1 1 715_splitncnn_0 729 -23309=1,48 -23310=1,96 -23311=1,0 -Convolution Conv_270 1 1 729 732 0=48 1=1 5=1 6=2304 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_272 1 1 732 1087 0=48 1=3 4=1 5=1 6=432 7=48 -Convolution Conv_273 1 1 1087 737 0=48 1=1 5=1 6=2304 9=2 -23310=1,1.000000e-01 -Concat Concat_275 2 1 726 737 738 -ShuffleChannel Reshape_280 1 1 738 743 0=2 -Split splitncnn_12 1 3 743 743_splitncnn_0 743_splitncnn_1 743_splitncnn_2 -ConvolutionDepthWise Conv_281 1 1 743_splitncnn_2 1093 0=96 1=3 3=2 4=1 5=1 6=864 7=96 -Convolution Conv_282 1 1 1093 748 0=96 1=1 5=1 6=9216 9=2 -23310=1,1.000000e-01 -Convolution Conv_284 1 1 743_splitncnn_1 751 0=96 1=1 5=1 6=9216 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_286 1 1 751 1102 0=96 1=3 3=2 4=1 5=1 6=864 7=96 -Convolution Conv_287 1 1 1102 756 0=96 1=1 5=1 6=9216 9=2 -23310=1,1.000000e-01 -Concat Concat_289 2 1 748 756 757 -ShuffleChannel Reshape_294 1 1 757 762 0=2 -Split splitncnn_13 1 2 762 762_splitncnn_0 762_splitncnn_1 -Crop Slice_305 1 1 762_splitncnn_1 773 -23309=1,0 -23310=1,96 -23311=1,0 -Crop Slice_308 1 1 762_splitncnn_0 776 -23309=1,96 -23310=1,192 -23311=1,0 -Convolution Conv_309 1 1 776 779 0=96 1=1 5=1 6=9216 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_311 1 1 779 1111 0=96 1=3 4=1 5=1 6=864 7=96 -Convolution Conv_312 1 1 1111 784 0=96 1=1 5=1 6=9216 9=2 -23310=1,1.000000e-01 -Concat Concat_314 2 1 773 784 785 -ShuffleChannel Reshape_319 1 1 785 790 0=2 -Split splitncnn_14 1 2 790 790_splitncnn_0 790_splitncnn_1 -Crop Slice_330 1 1 790_splitncnn_1 801 -23309=1,0 -23310=1,96 -23311=1,0 -Crop Slice_333 1 1 790_splitncnn_0 804 -23309=1,96 -23310=1,192 -23311=1,0 -Convolution Conv_334 1 1 804 807 0=96 1=1 5=1 6=9216 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_336 1 1 807 1120 0=96 1=3 4=1 5=1 6=864 7=96 -Convolution Conv_337 1 1 1120 812 0=96 1=1 5=1 6=9216 9=2 -23310=1,1.000000e-01 -Concat Concat_339 2 1 801 812 813 -ShuffleChannel Reshape_344 1 1 813 818 0=2 -Split splitncnn_15 1 2 818 818_splitncnn_0 818_splitncnn_1 -Crop Slice_355 1 1 818_splitncnn_1 829 -23309=1,0 -23310=1,96 -23311=1,0 -Crop Slice_358 1 1 818_splitncnn_0 832 -23309=1,96 -23310=1,192 -23311=1,0 -Convolution Conv_359 1 1 832 835 0=96 1=1 5=1 6=9216 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_361 1 1 835 1129 0=96 1=3 4=1 5=1 6=864 7=96 -Convolution Conv_362 1 1 1129 840 0=96 1=1 5=1 6=9216 9=2 -23310=1,1.000000e-01 -Concat Concat_364 2 1 829 840 841 -ShuffleChannel Reshape_369 1 1 841 846 0=2 -Convolution Conv_370 1 1 528_splitncnn_0 847 0=96 1=1 5=1 6=4608 -Convolution Conv_371 1 1 743_splitncnn_0 848 0=96 1=1 5=1 6=9216 -Convolution Conv_372 1 1 846 849 0=96 1=1 5=1 6=18432 -Split splitncnn_16 1 2 849 849_splitncnn_0 849_splitncnn_1 -Interp Resize_374 1 1 849_splitncnn_1 854 0=2 1=2.000000e+00 2=2.000000e+00 -BinaryOp Add_375 2 1 848 854 855 -Split splitncnn_17 1 2 855 855_splitncnn_0 855_splitncnn_1 -Interp Resize_377 1 1 855_splitncnn_1 860 0=2 1=2.000000e+00 2=2.000000e+00 -BinaryOp Add_378 2 1 847 860 861 -Split splitncnn_18 1 2 861 861_splitncnn_0 861_splitncnn_1 -Interp Resize_380 1 1 861_splitncnn_1 866 0=2 1=5.000000e-01 2=5.000000e-01 -BinaryOp Add_381 2 1 855_splitncnn_0 866 867 -Split splitncnn_19 1 2 867 867_splitncnn_0 867_splitncnn_1 -Interp Resize_383 1 1 867_splitncnn_1 872 0=2 1=5.000000e-01 2=5.000000e-01 -BinaryOp Add_384 2 1 849_splitncnn_0 872 873 -ConvolutionDepthWise Conv_385 1 1 861_splitncnn_0 876 0=96 1=3 4=1 5=1 6=864 7=96 9=2 -23310=1,1.000000e-01 -Convolution Conv_387 1 1 876 879 0=96 1=1 5=1 6=9216 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_389 1 1 879 882 0=96 1=3 4=1 5=1 6=864 7=96 9=2 -23310=1,1.000000e-01 -Convolution Conv_391 1 1 882 885 0=96 1=1 5=1 6=9216 9=2 -23310=1,1.000000e-01 -Convolution Conv_393 1 1 885 886 0=112 1=1 5=1 6=10752 -Slice Split_394 1 2 886 887 888 -23300=2,80,-233 -Sigmoid Sigmoid_395 1 1 887 889 -Reshape Reshape_397 1 1 889 891 0=-1 1=80 -Permute Transpose_398 1 1 891 cls_pred_stride_8 0=1 -Reshape Reshape_400 1 1 888 894 0=-1 1=32 -Permute Transpose_401 1 1 894 895 0=1 -Reshape Reshape_403 1 1 895 897 0=8 1=-1 -Softmax Softmax_404 1 1 897 898 0=1 1=1 -InnerProduct MatMul_405 1 1 898 901 0=1 2=8 -Reshape Reshape_407 1 1 901 903 0=4 1=-1 -Reshape Reshape_409 1 1 903 dis_pred_stride_8 0=4 1=-1 -ConvolutionDepthWise Conv_410 1 1 867_splitncnn_0 908 0=96 1=3 4=1 5=1 6=864 7=96 9=2 -23310=1,1.000000e-01 -Convolution Conv_412 1 1 908 911 0=96 1=1 5=1 6=9216 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_414 1 1 911 914 0=96 1=3 4=1 5=1 6=864 7=96 9=2 -23310=1,1.000000e-01 -Convolution Conv_416 1 1 914 917 0=96 1=1 5=1 6=9216 9=2 -23310=1,1.000000e-01 -Convolution Conv_418 1 1 917 918 0=112 1=1 5=1 6=10752 -Slice Split_419 1 2 918 919 920 -23300=2,80,-233 -Sigmoid Sigmoid_420 1 1 919 921 -Reshape Reshape_422 1 1 921 923 0=-1 1=80 -Permute Transpose_423 1 1 923 cls_pred_stride_16 0=1 -Reshape Reshape_425 1 1 920 926 0=-1 1=32 -Permute Transpose_426 1 1 926 927 0=1 -Reshape Reshape_428 1 1 927 929 0=8 1=-1 -Softmax Softmax_429 1 1 929 930 0=1 1=1 -InnerProduct MatMul_430 1 1 930 933 0=1 2=8 -Reshape Reshape_432 1 1 933 935 0=4 1=-1 -Reshape Reshape_434 1 1 935 dis_pred_stride_16 0=4 1=-1 -ConvolutionDepthWise Conv_435 1 1 873 940 0=96 1=3 4=1 5=1 6=864 7=96 9=2 -23310=1,1.000000e-01 -Convolution Conv_437 1 1 940 943 0=96 1=1 5=1 6=9216 9=2 -23310=1,1.000000e-01 -ConvolutionDepthWise Conv_439 1 1 943 946 0=96 1=3 4=1 5=1 6=864 7=96 9=2 -23310=1,1.000000e-01 -Convolution Conv_441 1 1 946 949 0=96 1=1 5=1 6=9216 9=2 -23310=1,1.000000e-01 -Convolution Conv_443 1 1 949 950 0=112 1=1 5=1 6=10752 -Slice Split_444 1 2 950 951 952 -23300=2,80,-233 -Sigmoid Sigmoid_445 1 1 951 953 -Reshape Reshape_447 1 1 953 955 0=-1 1=80 -Permute Transpose_448 1 1 955 cls_pred_stride_32 0=1 -Reshape Reshape_450 1 1 952 958 0=-1 1=32 -Permute Transpose_451 1 1 958 959 0=1 -Reshape Reshape_453 1 1 959 961 0=8 1=-1 -Softmax Softmax_454 1 1 961 962 0=1 1=1 -InnerProduct MatMul_455 1 1 962 965 0=1 2=8 -Reshape Reshape_457 1 1 965 967 0=4 1=-1 -Reshape Reshape_459 1 1 967 dis_pred_stride_32 0=4 1=-1 diff --git a/hub/onnx/cv/age_googlenet.onnx b/hub/onnx/cv/age_googlenet.onnx deleted file mode 100644 index c48ca848..00000000 Binary files a/hub/onnx/cv/age_googlenet.onnx and /dev/null differ diff --git a/hub/onnx/cv/chinese_ocr_angle_net.onnx b/hub/onnx/cv/chinese_ocr_angle_net.onnx deleted file mode 100755 index 3d7057e2..00000000 Binary files a/hub/onnx/cv/chinese_ocr_angle_net.onnx and /dev/null differ diff --git a/hub/onnx/cv/chinese_ocr_crnn_lite_lstm.onnx b/hub/onnx/cv/chinese_ocr_crnn_lite_lstm.onnx deleted file mode 100644 index 51a15c3c..00000000 Binary files a/hub/onnx/cv/chinese_ocr_crnn_lite_lstm.onnx and /dev/null differ diff --git a/hub/onnx/cv/chinese_ocr_dbnet.onnx b/hub/onnx/cv/chinese_ocr_dbnet.onnx deleted file mode 100644 index 0b1b1627..00000000 Binary files a/hub/onnx/cv/chinese_ocr_dbnet.onnx and /dev/null differ diff --git a/hub/onnx/cv/emotion-ferplus-7.onnx b/hub/onnx/cv/emotion-ferplus-7.onnx deleted file mode 100644 index 1c08b2f6..00000000 Binary files a/hub/onnx/cv/emotion-ferplus-7.onnx and /dev/null differ diff --git a/hub/onnx/cv/emotion-ferplus-8.onnx b/hub/onnx/cv/emotion-ferplus-8.onnx deleted file mode 100644 index 27b9e8f8..00000000 Binary files a/hub/onnx/cv/emotion-ferplus-8.onnx and /dev/null differ diff --git a/hub/onnx/cv/fsanet-1x1.onnx b/hub/onnx/cv/fsanet-1x1.onnx deleted file mode 100644 index 20100e40..00000000 Binary files a/hub/onnx/cv/fsanet-1x1.onnx and /dev/null differ diff --git a/hub/onnx/cv/fsanet-var.onnx b/hub/onnx/cv/fsanet-var.onnx deleted file mode 100644 index 1ac10223..00000000 Binary files a/hub/onnx/cv/fsanet-var.onnx and /dev/null differ diff --git a/hub/onnx/cv/gender_googlenet.onnx b/hub/onnx/cv/gender_googlenet.onnx deleted file mode 100644 index ea16a81a..00000000 Binary files a/hub/onnx/cv/gender_googlenet.onnx and /dev/null differ diff --git a/hub/onnx/cv/pfld-106-lite.onnx b/hub/onnx/cv/pfld-106-lite.onnx deleted file mode 100644 index 4b82f344..00000000 Binary files a/hub/onnx/cv/pfld-106-lite.onnx and /dev/null differ diff --git a/hub/onnx/cv/pfld-106-v2.onnx b/hub/onnx/cv/pfld-106-v2.onnx deleted file mode 100644 index 8e4230a9..00000000 Binary files a/hub/onnx/cv/pfld-106-v2.onnx and /dev/null differ diff --git a/hub/onnx/cv/pfld-106-v3.onnx b/hub/onnx/cv/pfld-106-v3.onnx deleted file mode 100644 index 18eb8a4f..00000000 Binary files a/hub/onnx/cv/pfld-106-v3.onnx and /dev/null differ diff --git a/hub/onnx/cv/ssrnet.onnx b/hub/onnx/cv/ssrnet.onnx deleted file mode 100644 index 49933771..00000000 Binary files a/hub/onnx/cv/ssrnet.onnx and /dev/null differ diff --git a/hub/onnx/cv/style-candy-8.onnx b/hub/onnx/cv/style-candy-8.onnx deleted file mode 100644 index 529fd5ad..00000000 Binary files a/hub/onnx/cv/style-candy-8.onnx and /dev/null differ diff --git a/hub/onnx/cv/style-candy-9.onnx b/hub/onnx/cv/style-candy-9.onnx deleted file mode 100644 index d5ac0db8..00000000 Binary files a/hub/onnx/cv/style-candy-9.onnx and /dev/null differ diff --git a/hub/onnx/cv/style-mosaic-8.onnx b/hub/onnx/cv/style-mosaic-8.onnx deleted file mode 100644 index c0df679d..00000000 Binary files a/hub/onnx/cv/style-mosaic-8.onnx and /dev/null differ diff --git a/hub/onnx/cv/style-mosaic-9.onnx b/hub/onnx/cv/style-mosaic-9.onnx deleted file mode 100644 index ad036d74..00000000 Binary files a/hub/onnx/cv/style-mosaic-9.onnx and /dev/null differ diff --git a/hub/onnx/cv/style-pointilism-8.onnx b/hub/onnx/cv/style-pointilism-8.onnx deleted file mode 100644 index 7e6f7684..00000000 Binary files a/hub/onnx/cv/style-pointilism-8.onnx and /dev/null differ diff --git a/hub/onnx/cv/style-pointilism-9.onnx b/hub/onnx/cv/style-pointilism-9.onnx deleted file mode 100644 index 39228532..00000000 Binary files a/hub/onnx/cv/style-pointilism-9.onnx and /dev/null differ diff --git a/hub/onnx/cv/style-rain-princess-8.onnx b/hub/onnx/cv/style-rain-princess-8.onnx deleted file mode 100644 index 16a0c18e..00000000 Binary files a/hub/onnx/cv/style-rain-princess-8.onnx and /dev/null differ diff --git a/hub/onnx/cv/style-rain-princess-9.onnx b/hub/onnx/cv/style-rain-princess-9.onnx deleted file mode 100644 index b07e5da0..00000000 Binary files a/hub/onnx/cv/style-rain-princess-9.onnx and /dev/null differ diff --git a/hub/onnx/cv/style-udnie-8.onnx b/hub/onnx/cv/style-udnie-8.onnx deleted file mode 100644 index 17462dca..00000000 Binary files a/hub/onnx/cv/style-udnie-8.onnx and /dev/null differ diff --git a/hub/onnx/cv/style-udnie-9.onnx b/hub/onnx/cv/style-udnie-9.onnx deleted file mode 100644 index cb64d7bd..00000000 Binary files a/hub/onnx/cv/style-udnie-9.onnx and /dev/null differ diff --git a/hub/onnx/cv/subpixel-cnn.onnx b/hub/onnx/cv/subpixel-cnn.onnx deleted file mode 100644 index c321425f..00000000 Binary files a/hub/onnx/cv/subpixel-cnn.onnx and /dev/null differ diff --git a/hub/onnx/cv/ultraface-rfb-320.onnx b/hub/onnx/cv/ultraface-rfb-320.onnx deleted file mode 100755 index a5e23096..00000000 Binary files a/hub/onnx/cv/ultraface-rfb-320.onnx and /dev/null differ diff --git a/hub/onnx/cv/ultraface-rfb-640.onnx b/hub/onnx/cv/ultraface-rfb-640.onnx deleted file mode 100755 index ae7f5703..00000000 Binary files a/hub/onnx/cv/ultraface-rfb-640.onnx and /dev/null differ diff --git a/hub/onnx/cv/ultraface-slim-320.onnx b/hub/onnx/cv/ultraface-slim-320.onnx deleted file mode 100755 index 38ffe424..00000000 Binary files a/hub/onnx/cv/ultraface-slim-320.onnx and /dev/null differ diff --git a/hub/onnx/cv/yolov5s.onnx b/hub/onnx/cv/yolov5s.onnx deleted file mode 100644 index 98ec5d21..00000000 Binary files a/hub/onnx/cv/yolov5s.onnx and /dev/null differ diff --git a/hub/onnx/cv/yolox_nano.onnx b/hub/onnx/cv/yolox_nano.onnx deleted file mode 100644 index df0bbcac..00000000 Binary files a/hub/onnx/cv/yolox_nano.onnx and /dev/null differ diff --git a/hub/onnx/cv/yolox_tiny.onnx b/hub/onnx/cv/yolox_tiny.onnx deleted file mode 100644 index d7a2edcd..00000000 Binary files a/hub/onnx/cv/yolox_tiny.onnx and /dev/null differ diff --git a/lib/.gitignore b/lib/.gitignore deleted file mode 100644 index ae93b1bc..00000000 --- a/lib/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -.DS_Store -tmp \ No newline at end of file diff --git a/lib/android/.gitignore b/lib/android/.gitignore deleted file mode 100644 index e69de29b..00000000 diff --git a/lib/linux/.gitignore b/lib/linux/.gitignore deleted file mode 100644 index a89b890e..00000000 --- a/lib/linux/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*.so -*.so.* \ No newline at end of file diff --git a/lib/macos/.gitignore b/lib/macos/.gitignore deleted file mode 100644 index e69de29b..00000000 diff --git a/lib/macos/libMNN.dylib b/lib/macos/libMNN.dylib deleted file mode 100644 index 673c8514..00000000 Binary files a/lib/macos/libMNN.dylib and /dev/null differ diff --git a/lib/macos/libTNN.0.dylib b/lib/macos/libTNN.0.dylib deleted file mode 100644 index 6f5fcaf1..00000000 Binary files a/lib/macos/libTNN.0.dylib and /dev/null differ diff --git a/lib/macos/libTNN.dylib b/lib/macos/libTNN.dylib deleted file mode 100755 index 6f5fcaf1..00000000 Binary files a/lib/macos/libTNN.dylib and /dev/null differ diff --git a/lib/macos/libavcodec.58.dylib b/lib/macos/libavcodec.58.dylib deleted file mode 100755 index 16c228c7..00000000 Binary files a/lib/macos/libavcodec.58.dylib and /dev/null differ diff --git a/lib/macos/libavcodec.dylib b/lib/macos/libavcodec.dylib deleted file mode 100755 index 16c228c7..00000000 Binary files a/lib/macos/libavcodec.dylib and /dev/null differ diff --git a/lib/macos/libavdevice.58.dylib b/lib/macos/libavdevice.58.dylib deleted file mode 100755 index 06b8aeea..00000000 Binary files a/lib/macos/libavdevice.58.dylib and /dev/null differ diff --git a/lib/macos/libavdevice.dylib b/lib/macos/libavdevice.dylib deleted file mode 100755 index 06b8aeea..00000000 Binary files a/lib/macos/libavdevice.dylib and /dev/null differ diff --git a/lib/macos/libavfilter.7.dylib b/lib/macos/libavfilter.7.dylib deleted file mode 100755 index cf0e9ca2..00000000 Binary files a/lib/macos/libavfilter.7.dylib and /dev/null differ diff --git a/lib/macos/libavfilter.dylib b/lib/macos/libavfilter.dylib deleted file mode 100755 index cf0e9ca2..00000000 Binary files a/lib/macos/libavfilter.dylib and /dev/null differ diff --git a/lib/macos/libavformat.58.dylib b/lib/macos/libavformat.58.dylib deleted file mode 100755 index 7d468162..00000000 Binary files a/lib/macos/libavformat.58.dylib and /dev/null differ diff --git a/lib/macos/libavformat.dylib b/lib/macos/libavformat.dylib deleted file mode 100755 index 7d468162..00000000 Binary files a/lib/macos/libavformat.dylib and /dev/null differ diff --git a/lib/macos/libavutil.56.dylib b/lib/macos/libavutil.56.dylib deleted file mode 100755 index dd8cfba3..00000000 Binary files a/lib/macos/libavutil.56.dylib and /dev/null differ diff --git a/lib/macos/libavutil.dylib b/lib/macos/libavutil.dylib deleted file mode 100755 index dd8cfba3..00000000 Binary files a/lib/macos/libavutil.dylib and /dev/null differ diff --git a/lib/macos/libncnn.1.dylib b/lib/macos/libncnn.1.dylib deleted file mode 100644 index 8617570d..00000000 Binary files a/lib/macos/libncnn.1.dylib and /dev/null differ diff --git a/lib/macos/libncnn.dylib b/lib/macos/libncnn.dylib deleted file mode 100755 index 8617570d..00000000 Binary files a/lib/macos/libncnn.dylib and /dev/null differ diff --git a/lib/macos/libonnxruntime.1.10.0.dylib b/lib/macos/libonnxruntime.1.10.0.dylib deleted file mode 100755 index ef6d7148..00000000 Binary files a/lib/macos/libonnxruntime.1.10.0.dylib and /dev/null differ diff --git a/lib/macos/libonnxruntime.dylib b/lib/macos/libonnxruntime.dylib deleted file mode 100755 index ef6d7148..00000000 Binary files a/lib/macos/libonnxruntime.dylib and /dev/null differ diff --git a/lib/macos/libopencv_calib3d.4.5.dylib b/lib/macos/libopencv_calib3d.4.5.dylib deleted file mode 100755 index 139d64e3..00000000 Binary files a/lib/macos/libopencv_calib3d.4.5.dylib and /dev/null differ diff --git a/lib/macos/libopencv_calib3d.dylib b/lib/macos/libopencv_calib3d.dylib deleted file mode 100755 index 139d64e3..00000000 Binary files a/lib/macos/libopencv_calib3d.dylib and /dev/null differ diff --git a/lib/macos/libopencv_core.4.5.dylib b/lib/macos/libopencv_core.4.5.dylib deleted file mode 100755 index 4e7d57b2..00000000 Binary files a/lib/macos/libopencv_core.4.5.dylib and /dev/null differ diff --git a/lib/macos/libopencv_core.dylib b/lib/macos/libopencv_core.dylib deleted file mode 100755 index 4e7d57b2..00000000 Binary files a/lib/macos/libopencv_core.dylib and /dev/null differ diff --git a/lib/macos/libopencv_features2d.4.5.dylib b/lib/macos/libopencv_features2d.4.5.dylib deleted file mode 100755 index 0d58dd7a..00000000 Binary files a/lib/macos/libopencv_features2d.4.5.dylib and /dev/null differ diff --git a/lib/macos/libopencv_features2d.dylib b/lib/macos/libopencv_features2d.dylib deleted file mode 100755 index 0d58dd7a..00000000 Binary files a/lib/macos/libopencv_features2d.dylib and /dev/null differ diff --git a/lib/macos/libopencv_flann.4.5.dylib b/lib/macos/libopencv_flann.4.5.dylib deleted file mode 100755 index e30201bd..00000000 Binary files a/lib/macos/libopencv_flann.4.5.dylib and /dev/null differ diff --git a/lib/macos/libopencv_flann.dylib b/lib/macos/libopencv_flann.dylib deleted file mode 100755 index e30201bd..00000000 Binary files a/lib/macos/libopencv_flann.dylib and /dev/null differ diff --git a/lib/macos/libopencv_gapi.4.5.dylib b/lib/macos/libopencv_gapi.4.5.dylib deleted file mode 100755 index d604bc27..00000000 Binary files a/lib/macos/libopencv_gapi.4.5.dylib and /dev/null differ diff --git a/lib/macos/libopencv_gapi.dylib b/lib/macos/libopencv_gapi.dylib deleted file mode 100755 index d604bc27..00000000 Binary files a/lib/macos/libopencv_gapi.dylib and /dev/null differ diff --git a/lib/macos/libopencv_highgui.4.5.dylib b/lib/macos/libopencv_highgui.4.5.dylib deleted file mode 100755 index 54244e0a..00000000 Binary files a/lib/macos/libopencv_highgui.4.5.dylib and /dev/null differ diff --git a/lib/macos/libopencv_highgui.dylib b/lib/macos/libopencv_highgui.dylib deleted file mode 100755 index 54244e0a..00000000 Binary files a/lib/macos/libopencv_highgui.dylib and /dev/null differ diff --git a/lib/macos/libopencv_imgcodecs.4.5.dylib b/lib/macos/libopencv_imgcodecs.4.5.dylib deleted file mode 100755 index 932bd6ab..00000000 Binary files a/lib/macos/libopencv_imgcodecs.4.5.dylib and /dev/null differ diff --git a/lib/macos/libopencv_imgcodecs.dylib b/lib/macos/libopencv_imgcodecs.dylib deleted file mode 100755 index 932bd6ab..00000000 Binary files a/lib/macos/libopencv_imgcodecs.dylib and /dev/null differ diff --git a/lib/macos/libopencv_imgproc.4.5.dylib b/lib/macos/libopencv_imgproc.4.5.dylib deleted file mode 100755 index fa7ffef5..00000000 Binary files a/lib/macos/libopencv_imgproc.4.5.dylib and /dev/null differ diff --git a/lib/macos/libopencv_imgproc.dylib b/lib/macos/libopencv_imgproc.dylib deleted file mode 100755 index fa7ffef5..00000000 Binary files a/lib/macos/libopencv_imgproc.dylib and /dev/null differ diff --git a/lib/macos/libopencv_ml.4.5.dylib b/lib/macos/libopencv_ml.4.5.dylib deleted file mode 100755 index 927cd782..00000000 Binary files a/lib/macos/libopencv_ml.4.5.dylib and /dev/null differ diff --git a/lib/macos/libopencv_ml.dylib b/lib/macos/libopencv_ml.dylib deleted file mode 100755 index 927cd782..00000000 Binary files a/lib/macos/libopencv_ml.dylib and /dev/null differ diff --git a/lib/macos/libopencv_objdetect.4.5.dylib b/lib/macos/libopencv_objdetect.4.5.dylib deleted file mode 100755 index 0d273a36..00000000 Binary files a/lib/macos/libopencv_objdetect.4.5.dylib and /dev/null differ diff --git a/lib/macos/libopencv_objdetect.dylib b/lib/macos/libopencv_objdetect.dylib deleted file mode 100755 index 0d273a36..00000000 Binary files a/lib/macos/libopencv_objdetect.dylib and /dev/null differ diff --git a/lib/macos/libopencv_photo.4.5.dylib b/lib/macos/libopencv_photo.4.5.dylib deleted file mode 100755 index 103cb101..00000000 Binary files a/lib/macos/libopencv_photo.4.5.dylib and /dev/null differ diff --git a/lib/macos/libopencv_photo.dylib b/lib/macos/libopencv_photo.dylib deleted file mode 100755 index 103cb101..00000000 Binary files a/lib/macos/libopencv_photo.dylib and /dev/null differ diff --git a/lib/macos/libopencv_stitching.4.5.dylib b/lib/macos/libopencv_stitching.4.5.dylib deleted file mode 100755 index dc27791b..00000000 Binary files a/lib/macos/libopencv_stitching.4.5.dylib and /dev/null differ diff --git a/lib/macos/libopencv_stitching.dylib b/lib/macos/libopencv_stitching.dylib deleted file mode 100755 index dc27791b..00000000 Binary files a/lib/macos/libopencv_stitching.dylib and /dev/null differ diff --git a/lib/macos/libopencv_video.4.5.dylib b/lib/macos/libopencv_video.4.5.dylib deleted file mode 100755 index b19cfda5..00000000 Binary files a/lib/macos/libopencv_video.4.5.dylib and /dev/null differ diff --git a/lib/macos/libopencv_video.dylib b/lib/macos/libopencv_video.dylib deleted file mode 100755 index b19cfda5..00000000 Binary files a/lib/macos/libopencv_video.dylib and /dev/null differ diff --git a/lib/macos/libopencv_videoio.4.5.dylib b/lib/macos/libopencv_videoio.4.5.dylib deleted file mode 100755 index cb0f9193..00000000 Binary files a/lib/macos/libopencv_videoio.4.5.dylib and /dev/null differ diff --git a/lib/macos/libopencv_videoio.dylib b/lib/macos/libopencv_videoio.dylib deleted file mode 100755 index cb0f9193..00000000 Binary files a/lib/macos/libopencv_videoio.dylib and /dev/null differ diff --git a/lib/macos/libswresample.3.dylib b/lib/macos/libswresample.3.dylib deleted file mode 100755 index e154ade5..00000000 Binary files a/lib/macos/libswresample.3.dylib and /dev/null differ diff --git a/lib/macos/libswresample.dylib b/lib/macos/libswresample.dylib deleted file mode 100755 index e154ade5..00000000 Binary files a/lib/macos/libswresample.dylib and /dev/null differ diff --git a/lib/macos/libswscale.5.dylib b/lib/macos/libswscale.5.dylib deleted file mode 100755 index b9084299..00000000 Binary files a/lib/macos/libswscale.5.dylib and /dev/null differ diff --git a/lib/macos/libswscale.dylib b/lib/macos/libswscale.dylib deleted file mode 100755 index b9084299..00000000 Binary files a/lib/macos/libswscale.dylib and /dev/null differ diff --git a/lib/windows/.gitignore b/lib/windows/.gitignore deleted file mode 100644 index e69de29b..00000000 diff --git a/lite/backend.h b/lite/backend.h deleted file mode 100644 index 9990c0e6..00000000 --- a/lite/backend.h +++ /dev/null @@ -1,78 +0,0 @@ -// -// Created by DefTruth on 2021/5/30. -// - -#ifndef LITE_AI_BACKEND_H -#define LITE_AI_BACKEND_H - -#include "config.h" - -// BACKEND ONNXRuntime -#ifdef BACKEND_ONNXRUNTIME - -# ifdef BACKEND_NCNN -# undef BACKEND_NCNN -# endif - -# ifdef BACKEND_MNN -# undef BACKEND_MNN -# endif - -# ifdef BACKEND_TNN -# undef BACKEND_TNN -# endif - -#endif - -// BACKEND NCNN -#ifdef BACKEND_NCNN - -# ifdef BACKEND_ONNXRUNTIME -# undef BACKEND_ONNXRUNTIME -# endif - -# ifdef BACKEND_MNN -# undef BACKEND_MNN -# endif - -# ifdef BACKEND_TNN -# undef BACKEND_TNN -# endif - -#endif - -// BACKEND MNN -#ifdef BACKEND_MNN - -# ifdef BACKEND_NCNN -# undef BACKEND_NCNN -# endif - -# ifdef BACKEND_ONNXRUNTIME -# undef BACKEND_ONNXRUNTIME -# endif - -# ifdef BACKEND_TNN -# undef BACKEND_TNN -# endif - -#endif - -// BACKEND TNN -#ifdef BACKEND_TNN - -# ifdef BACKEND_NCNN -# undef BACKEND_NCNN -# endif - -# ifdef BACKEND_MNN -# undef BACKEND_MNN -# endif - -# ifdef BACKEND_ONNXRUNTIME -# undef BACKEND_ONNXRUNTIME -# endif - -#endif - -#endif //LITE_AI_BACKEND_H diff --git a/lite/config.h b/lite/config.h index e3822ad8..af70089c 100644 --- a/lite/config.h +++ b/lite/config.h @@ -2,20 +2,12 @@ #define LITE_AI_CONFIG_H #define ENABLE_ONNXRUNTIME -#define ENABLE_MNN -#define ENABLE_NCNN -#define ENABLE_TNN +/* #undef ENABLE_MNN */ +/* #undef ENABLE_NCNN */ +/* #undef ENABLE_TNN */ /* #undef ENABLE_ONNXRUNTIME_CUDA */ #define ENABLE_OPENCV_VIDEOIO #define ENABLE_DEBUG_STRING -/* #undef ENABLE_LITE_OPENMP */ -/* #undef ENABLE_LITE_OPENGL */ -/* #undef ENABLE_LITE_VULKAN */ -/* #undef ENABLE_LITE_CUDA */ -#define BACKEND_ONNXRUNTIME -/* #undef BACKEND_MNN */ -/* #undef BACKEND_NCNN */ -/* #undef BACKEND_TNN */ #endif //LITE_AI_CONFIG_H diff --git a/lite/config.h.in b/lite/config.h.in index 4d1ebeec..5d73dd97 100644 --- a/lite/config.h.in +++ b/lite/config.h.in @@ -8,14 +8,6 @@ #cmakedefine ENABLE_ONNXRUNTIME_CUDA #cmakedefine ENABLE_OPENCV_VIDEOIO #cmakedefine ENABLE_DEBUG_STRING -#cmakedefine ENABLE_LITE_OPENMP -#cmakedefine ENABLE_LITE_OPENGL -#cmakedefine ENABLE_LITE_VULKAN -#cmakedefine ENABLE_LITE_CUDA -#cmakedefine BACKEND_ONNXRUNTIME -#cmakedefine BACKEND_MNN -#cmakedefine BACKEND_NCNN -#cmakedefine BACKEND_TNN #endif //LITE_AI_CONFIG_H diff --git a/lite/lite.h b/lite/lite.h index 2d593881..b623c74b 100644 --- a/lite/lite.h +++ b/lite/lite.h @@ -8,6 +8,5 @@ #include "types.h" #include "utils.h" #include "models.h" -#include "pipeline.h" #endif //LITE_AI_LITE_H diff --git a/lite/models.h b/lite/models.h index 4d0100b6..dedfacb9 100644 --- a/lite/models.h +++ b/lite/models.h @@ -5,7 +5,7 @@ #ifndef LITE_AI_MODELS_H #define LITE_AI_MODELS_H -#include "backend.h" +#include "config.h" // ENABLE_ONNXRUNTIME #ifdef ENABLE_ONNXRUNTIME @@ -364,407 +364,12 @@ #endif -// Default Engine ONNXRuntime -namespace lite -{ - // mediapipe - namespace mediapipe - { -#ifdef BACKEND_ONNXRUNTIME -#endif - } - - namespace cv - { -#ifdef BACKEND_ONNXRUNTIME - typedef ortcv::FSANet _FSANet; - typedef ortcv::PFLD _PFLD; - typedef ortcv::UltraFace _UltraFace; - typedef ortcv::AgeGoogleNet _AgeGoogleNet; - typedef ortcv::GenderGoogleNet _GenderGoogleNet; - typedef ortcv::EmotionFerPlus _EmotionFerPlus; - typedef ortcv::VGG16Age _VGG16Age; - typedef ortcv::VGG16Gender _VGG16Gender; - typedef ortcv::SSRNet _SSRNet; - typedef ortcv::FastStyleTransfer _FastStyleTransfer; - typedef ortcv::GlintArcFace _GlintArcFace; - typedef ortcv::Colorizer _Colorizer; - typedef ortcv::SubPixelCNN _SubPixelCNN; - typedef ortcv::YoloV4 _YoloV4; - typedef ortcv::YoloV3 _YoloV3; - typedef ortcv::YoloV5 _YoloV5; - typedef ortcv::EfficientNetLite4 _EfficientNetLite4; - typedef ortcv::ShuffleNetV2 _ShuffleNetV2; - typedef ortcv::TinyYoloV3 _TinyYoloV3; - typedef ortcv::SSD _SSD; - typedef ortcv::SSDMobileNetV1 _SSDMobileNetV1; - typedef ortcv::DeepLabV3ResNet101 _DeepLabV3ResNet101; - typedef ortcv::DenseNet _DenseNet; - typedef ortcv::FCNResNet101 _FCNResNet101; - typedef ortcv::GhostNet _GhostNet; - typedef ortcv::HdrDNet _HdrDNet; - typedef ortcv::IBNNet _IBNNet; - typedef ortcv::MobileNetV2 _MobileNetV2; - typedef ortcv::ResNet _ResNet; - typedef ortcv::ResNeXt _ResNeXt; - typedef ortcv::GlintCosFace _GlintCosFace; - typedef ortcv::GlintPartialFC _GlintPartialFC; - typedef ortcv::FaceNet _FaceNet; - typedef ortcv::FocalArcFace _FocalArcFace; - typedef ortcv::FocalAsiaArcFace _FocalAsiaArcFace; - typedef ortcv::TencentCifpFace _TencentCifpFace; - typedef ortcv::TencentCurricularFace _TencentCurricularFace; - typedef ortcv::CenterLossFace _CenterLossFace; - typedef ortcv::SphereFace _SphereFace; - typedef ortcv::PoseRobustFace _PoseRobustFace; - typedef ortcv::NaivePoseRobustFace _NaivePoseRobustFace; - typedef ortcv::MobileFaceNet _MobileFaceNet; - typedef ortcv::CavaGhostArcFace _CavaGhostArcFace; - typedef ortcv::CavaCombinedFace _CavaCombinedFace; - typedef ortcv::YoloX _YoloX; - typedef ortcv::MobileSEFocalFace _MobileSEFocalFace; - typedef ortcv::EfficientEmotion7 _EfficientEmotion7; - typedef ortcv::EfficientEmotion8 _EfficientEmotion8; - typedef ortcv::MobileEmotion7 _MobileEmotion7; - typedef ortcv::ReXNetEmotion7 _ReXNetEmotion7; - typedef ortcv::PFLD98 _PFLD98; - typedef ortcv::PFLD68 _PFLD68; - typedef ortcv::MobileNetV268 _MobileNetV268; - typedef ortcv::MobileNetV2SE68 _MobileNetV2SE68; - typedef ortcv::FaceLandmark1000 _FaceLandmark1000; - typedef ortcv::RetinaFace _RetinaFace; - typedef ortcv::FaceBoxes _FaceBoxes; - typedef ortcv::TinyYoloV4VOC _TinyYoloV4VOC; - typedef ortcv::TinyYoloV4COCO _TinyYoloV4COCO; - typedef ortcv::YoloR _YoloR; - typedef ortcv::ScaledYoloV4 _ScaledYoloV4; - typedef ortcv::EfficientDet _EfficientDet; - typedef ortcv::EfficientDetD7 _EfficientDetD7; - typedef ortcv::EfficientDetD8 _EfficientDetD8; - typedef ortcv::YOLOP _YOLOP; - typedef ortcv::RobustVideoMatting _RobustVideoMatting; - typedef ortcv::NanoDet _NanoDet; - typedef ortcv::NanoDetEfficientNetLite _NanoDetEfficientNetLite; - typedef ortcv::YoloX_V_0_1_1 _YoloX_V_0_1_1; - typedef ortcv::YoloV5_V_6_0 _YoloV5_V_6_0; - typedef ortcv::MGMatting _MGMatting; - typedef ortcv::NanoDetPlus _NanoDetPlus; - typedef ortcv::SCRFD _SCRFD; - typedef ortcv::YOLO5Face _YOLO5Face; - typedef ortcv::FaceBoxesV2 _FaceBoxesV2; - typedef ortcv::PIPNet98 _PIPNet98; - typedef ortcv::PIPNet68 _PIPNet68; - typedef ortcv::PIPNet29 _PIPNet29; - typedef ortcv::PIPNet19 _PIPNet19; - typedef ortcv::InsectDet _InsectDet; - typedef ortcv::InsectID _InsectID; - typedef ortcv::PlantID _PlantID; - typedef ortcv::MODNet _MODNet; - typedef ortcv::MODNetDyn _MODNetDyn; - typedef ortcv::BackgroundMattingV2 _BackgroundMattingV2; - typedef ortcv::BackgroundMattingV2Dyn _BackgroundMattingV2Dyn; - typedef ortcv::YOLOv5BlazeFace _YOLOv5BlazeFace; - typedef ortcv::YoloV5_V_6_1 _YoloV5_V_6_1; - typedef ortcv::HeadSeg _HeadSeg; - typedef ortcv::FemalePhoto2Cartoon _FemalePhoto2Cartoon; - typedef ortcv::FastPortraitSeg _FastPortraitSeg; - typedef ortcv::PortraitSegSINet _PortraitSegSINet; - typedef ortcv::PortraitSegExtremeC3Net _PortraitSegExtremeC3Net; - typedef ortcv::HairSeg _HairSeg; - typedef ortcv::FaceHairSeg _FaceHairSeg; - typedef ortcv::MobileHumanMatting _MobileHumanMatting; - typedef ortcv::MobileHairSeg _MobileHairSeg; - typedef ortcv::YOLOv6 _YOLOv6; - typedef ortcv::FaceParsingBiSeNet _FaceParsingBiSeNet; - typedef ortcv::FaceParsingBiSeNetDyn _FaceParsingBiSeNetDyn; - -#endif - - // 1. classification - namespace classification - { -#ifdef BACKEND_ONNXRUNTIME - typedef _EfficientNetLite4 EfficientNetLite4; - typedef _ShuffleNetV2 ShuffleNetV2; - typedef _DenseNet DenseNet; - typedef _GhostNet GhostNet; - typedef _HdrDNet HdrDNet; - typedef _IBNNet IBNNet; - typedef _MobileNetV2 MobileNetV2; - typedef _ResNet ResNet; - typedef _ResNeXt ResNeXt; - typedef _InsectID InsectID; - typedef _PlantID PlantID; -#endif - } - - // 2. general object detection - namespace detection - { -#ifdef BACKEND_ONNXRUNTIME - typedef _YoloV3 YoloV3; - typedef _YoloV4 YoloV4; - typedef _YoloV5 YoloV5; - typedef _TinyYoloV3 TinyYoloV3; - typedef _SSD SSD; - typedef _SSDMobileNetV1 SSDMobileNetV1; - typedef _YoloX YoloX; - typedef _TinyYoloV4VOC TinyYoloV4VOC; - typedef _TinyYoloV4COCO TinyYoloV4COCO; - typedef _YoloR YoloR; - typedef _ScaledYoloV4 ScaledYoloV4; - typedef _EfficientDet EfficientDet; - typedef _EfficientDetD7 EfficientDetD7; - typedef _EfficientDetD8 EfficientDetD8; - typedef _YOLOP YOLOP; - typedef _NanoDet NanoDet; - typedef _NanoDetEfficientNetLite NanoDetEfficientNetLite; - typedef _YoloX_V_0_1_1 YoloX_V_0_1_1; - typedef _YoloV5_V_6_0 YoloV5_V_6_0; - typedef _NanoDetPlus NanoDetPlus; - typedef _InsectDet InsectDet; - typedef _YoloV5_V_6_1 YoloV5_V_6_1; - typedef _YOLOv6 YOLOv6; -#endif - } - // 3. face detection & facial attributes detection - namespace face - { - namespace detect - { -#ifdef BACKEND_ONNXRUNTIME - typedef _UltraFace UltraFace; // face detection. - typedef _RetinaFace RetinaFace; - typedef _FaceBoxes FaceBoxes; - typedef _SCRFD SCRFD; - typedef _YOLO5Face YOLO5Face; - typedef _FaceBoxesV2 FaceBoxesV2; - typedef _YOLOv5BlazeFace YOLOv5BlazeFace; -#endif - } - - namespace align - { -#ifdef BACKEND_ONNXRUNTIME - typedef _PFLD PFLD; // facial landmarks detection. 106 points - typedef _PFLD98 PFLD98; // 98 points - typedef _PFLD68 PFLD68; // 68 points - typedef _MobileNetV268 MobileNetV268; // 68 points - typedef _MobileNetV2SE68 MobileNetV2SE68; // 68 points - typedef _FaceLandmark1000 FaceLandmark1000; // 1000 points - typedef _PIPNet98 PIPNet98; // 98 points - typedef _PIPNet68 PIPNet68; // 68 points - typedef _PIPNet29 PIPNet29; // 29 points - typedef _PIPNet19 PIPNet19; // 19 points -#endif - } - - namespace align3d - { -#ifdef BACKEND_ONNXRUNTIME - -#endif - } - - namespace pose - { -#ifdef BACKEND_ONNXRUNTIME - typedef _FSANet FSANet; // head pose estimation. -#endif - } - - namespace attr - { -#ifdef BACKEND_ONNXRUNTIME - typedef _AgeGoogleNet AgeGoogleNet; // age estimation - typedef _GenderGoogleNet GenderGoogleNet; // gender estimation - typedef _VGG16Age VGG16Age; // age estimation - typedef _VGG16Gender VGG16Gender; // gender estimation - typedef _EmotionFerPlus EmotionFerPlus; // emotion detection - typedef _SSRNet SSRNet; // age estimation - typedef _EfficientEmotion7 EfficientEmotion7; - typedef _EfficientEmotion8 EfficientEmotion8; - typedef _MobileEmotion7 MobileEmotion7; - typedef _ReXNetEmotion7 ReXNetEmotion7; -#endif - } - - } - // 4. face recognition - namespace faceid - { -#ifdef BACKEND_ONNXRUNTIME - typedef _GlintArcFace GlintArcFace; // - typedef _GlintCosFace GlintCosFace; // - typedef _GlintPartialFC GlintPartialFC; - typedef _FaceNet FaceNet; - typedef _FocalArcFace FocalArcFace; - typedef _FocalAsiaArcFace FocalAsiaArcFace; - typedef _TencentCurricularFace TencentCurricularFace; - typedef _TencentCifpFace TencentCifpFace; - typedef _CenterLossFace CenterLossFace; - typedef _SphereFace SphereFace; - typedef _PoseRobustFace PoseRobustFace; - typedef _NaivePoseRobustFace NaivePoseRobustFace; - typedef _MobileFaceNet MobileFaceNet; - typedef _CavaGhostArcFace CavaGhostArcFace; - typedef _CavaCombinedFace CavaCombinedFace; - typedef _MobileSEFocalFace MobileSEFocalFace; -#endif - - } - // 5. segmentation - namespace segmentation - { -#ifdef BACKEND_ONNXRUNTIME - typedef _DeepLabV3ResNet101 DeepLabV3ResNet101; - typedef _FCNResNet101 FCNResNet101; - typedef _HeadSeg HeadSeg; - typedef _FastPortraitSeg FastPortraitSeg; - typedef _PortraitSegSINet PortraitSegSINet; - typedef _PortraitSegExtremeC3Net PortraitSegExtremeC3Net; - typedef _HairSeg HairSeg; - typedef _FaceHairSeg FaceHairSeg; - typedef _MobileHairSeg MobileHairSeg; - typedef _FaceParsingBiSeNet FaceParsingBiSeNet; - typedef _FaceParsingBiSeNetDyn FaceParsingBiSeNetDyn; -#endif - - } - // 6. reid - namespace reid - { -#ifdef BACKEND_ONNXRUNTIME -#endif - } - - // 7. ocr - namespace ocr - { -#ifdef BACKEND_ONNXRUNTIME -#endif - } - // 8. neural rendering - namespace render - { -#ifdef BACKEND_ONNXRUNTIME -#endif - } - // 9. style transfer - namespace style - { -#ifdef BACKEND_ONNXRUNTIME - typedef _FastStyleTransfer FastStyleTransfer; - typedef _FemalePhoto2Cartoon FemalePhoto2Cartoon; -#endif - } - - // 10. colorization - namespace colorization - { -#ifdef BACKEND_ONNXRUNTIME - typedef _Colorizer Colorizer; -#endif - } - // 11. super resolution - namespace resolution - { -#ifdef BACKEND_ONNXRUNTIME - typedef _SubPixelCNN SubPixelCNN; -#endif - } - // 12. image & face & human matting - namespace matting - { -#ifdef BACKEND_ONNXRUNTIME - typedef _RobustVideoMatting RobustVideoMatting; - typedef _MGMatting MGMatting; - typedef _MODNet MODNet; - typedef _MODNetDyn MODNetDyn; - typedef _BackgroundMattingV2 BackgroundMattingV2; - typedef _BackgroundMattingV2Dyn BackgroundMattingV2Dyn; - typedef _MobileHumanMatting MobileHumanMatting; -#endif - } - } - - namespace asr - { -#ifdef BACKEND_ONNXRUNTIME -#endif - } - - namespace nlp - { -#ifdef BACKEND_ONNXRUNTIME -#endif - } -} - -// models for mobile device -namespace lite -{ - namespace mobile - { - // classification - namespace classification - { - } - // object detection - namespace detection - { - } - // face etc. - namespace face - { - namespace detect - { - } - namespace align - { - } - namespace pose - { - } - namespace attr - { - } - } - // face recognition - namespace faceid - { - } - // segmentation - namespace segmentation - { - } - // reid - namespace reid - { - } - // ocr - namespace ocr - { - } - // matting - namespace matting - { - } - - } -} - // ONNXRuntime version namespace lite { #ifdef ENABLE_ONNXRUNTIME namespace onnxruntime { - // mediapipe - namespace mediapipe - { - } - namespace cv { typedef ortcv::FSANet _ONNXFSANet; @@ -1055,11 +660,6 @@ namespace lite #ifdef ENABLE_MNN namespace mnn { - // mediapipe - namespace mediapipe - { - } - namespace cv { // classification @@ -1522,4 +1122,19 @@ namespace lite #endif } +// Default Engine ONNXRuntime +namespace lite +{ +#if defined(ENABLE_ONNXRUNTIME) + namespace cv = lite::onnxruntime::cv; +#elif defined(ENABLE_MNN) + namespace cv = lite::mnn::cv; +#elif defined(ENABLE_NCNN) + namespace cv = lite::ncnn::cv; +#elif defined(ENABLE_TNN) + namespace cv = lite::tnn::cv; +#endif + +} + #endif //LITE_AI_MODELS_H diff --git a/lite/ort/core/ort_config.h b/lite/ort/core/ort_config.h index ad15c96d..b9f5401f 100644 --- a/lite/ort/core/ort_config.h +++ b/lite/ort/core/ort_config.h @@ -9,18 +9,27 @@ #include "lite/lite.ai.headers.h" #ifdef ENABLE_ONNXRUNTIME -#include "onnxruntime/core/session/onnxruntime_cxx_api.h" -/* Need to define USE_CUDA macro manually by users who want to - * enable onnxruntime and lite.ai.toolkit with CUDA support. It - * seems that the latest onnxruntime will no longer pre-defined the - * USE_CUDA macro and just let the decision make by users - * who really know the environments of running device.*/ -// #define USE_CUDA -# ifdef USE_CUDA -#include "onnxruntime/core/providers/cuda/cuda_provider_factory.h" -# endif +#include "onnxruntime_cxx_api.h" #endif +inline static std::string OrtCompatiableGetInputName(size_t index, OrtAllocator* allocator, + Ort::Session *ort_session) { +#if ORT_API_VERSION >= 14 + return std::string(ort_session->GetInputNameAllocated(index, allocator).get()); +#else + return std::string(ort_session->GetInputName(i, allocator)); +#endif +} + +inline static std::string OrtCompatiableGetOutputName(size_t index, OrtAllocator* allocator, + Ort::Session *ort_session) { +#if ORT_API_VERSION >= 14 + return std::string(ort_session->GetOutputNameAllocated(index, allocator).get()); +#else + return std::string(ort_session->GetOutputName(i, allocator)); +#endif +} + namespace core {} #endif //LITE_AI_ORT_CORE_ORT_CONFIG_H diff --git a/lite/ort/core/ort_handler.cpp b/lite/ort/core/ort_handler.cpp index 328c1a75..277514b6 100644 --- a/lite/ort/core/ort_handler.cpp +++ b/lite/ort/core/ort_handler.cpp @@ -43,11 +43,14 @@ void BasicOrtHandler::initialize_handler() // 1. session ort_session = new Ort::Session(ort_env, onnx_path, session_options); - Ort::AllocatorWithDefaultOptions allocator; + // Ort::AllocatorWithDefaultOptions allocator; + Ort::Allocator allocator(*ort_session, memory_info_handler); // 2. input name & input dims - input_name = ort_session->GetInputName(0, allocator); input_node_names.resize(1); - input_node_names[0] = input_name; + input_node_names_.resize(1); + input_node_names_[0] = OrtCompatiableGetInputName(0, allocator, ort_session); + input_node_names[0] = input_node_names_[0].data(); + // 3. type info. Ort::TypeInfo type_info = ort_session->GetInputTypeInfo(0); auto tensor_info = type_info.GetTensorTypeAndShapeInfo(); @@ -59,9 +62,11 @@ void BasicOrtHandler::initialize_handler() // 4. output names & output dimms num_outputs = ort_session->GetOutputCount(); output_node_names.resize(num_outputs); + output_node_names_.resize(num_outputs); for (unsigned int i = 0; i < num_outputs; ++i) { - output_node_names[i] = ort_session->GetOutputName(i, allocator); + output_node_names_[i] = OrtCompatiableGetOutputName(i, allocator, ort_session); + output_node_names[i] = output_node_names_[i].data(); Ort::TypeInfo output_type_info = ort_session->GetOutputTypeInfo(i); auto output_tensor_info = output_type_info.GetTensorTypeAndShapeInfo(); auto output_dims = output_tensor_info.GetShape(); @@ -83,8 +88,9 @@ void BasicOrtHandler::print_debug_string() { std::cout << "LITEORT_DEBUG LogId: " << onnx_path << "\n"; std::cout << "=============== Input-Dims ==============\n"; + std::cout << "Name: " << input_node_names[0] << "\n"; for (unsigned int i = 0; i < input_node_dims.size(); ++i) - std::cout << "input_node_dims: " << input_node_dims.at(i) << "\n"; + std::cout << "Dims: " << input_node_dims.at(i) << "\n"; std::cout << "=============== Output-Dims ==============\n"; for (unsigned int i = 0; i < num_outputs; ++i) for (unsigned int j = 0; j < output_node_dims.at(i).size(); ++j) @@ -94,92 +100,3 @@ void BasicOrtHandler::print_debug_string() std::cout << "========================================\n"; } -//***********************************BasicMultiOrtHandler**********************************/ -BasicMultiOrtHandler::BasicMultiOrtHandler( - const std::string &_onnx_path, unsigned int _num_threads) : - log_id(_onnx_path.data()), num_threads(_num_threads) -{ -#ifdef LITE_WIN32 - std::wstring _w_onnx_path(lite::utils::to_wstring(_onnx_path)); - onnx_path = _w_onnx_path.data(); -#else - onnx_path = _onnx_path.data(); -#endif - initialize_handler(); -} - -void BasicMultiOrtHandler::initialize_handler() -{ - ort_env = Ort::Env(ORT_LOGGING_LEVEL_ERROR, log_id); - // 0. session options - Ort::SessionOptions session_options; - session_options.SetIntraOpNumThreads(num_threads); - session_options.SetGraphOptimizationLevel( - GraphOptimizationLevel::ORT_ENABLE_ALL); - session_options.SetLogSeverityLevel(4); - - // 1. session - // GPU compatiable. - // OrtCUDAProviderOptions provider_options; - // session_options.AppendExecutionProvider_CUDA(provider_options); -#ifdef USE_CUDA - OrtSessionOptionsAppendExecutionProvider_CUDA(session_options, 0); // C API stable. -#endif - ort_session = new Ort::Session(ort_env, onnx_path, session_options); - Ort::AllocatorWithDefaultOptions allocator; - // 2. input name & input dims - num_inputs = ort_session->GetInputCount(); - input_node_names.resize(num_inputs); - for (unsigned int i = 0; i < num_inputs; ++i) - { - input_node_names[i] = ort_session->GetInputName(i, allocator); - Ort::TypeInfo type_info = ort_session->GetInputTypeInfo(i); - auto tensor_info = type_info.GetTensorTypeAndShapeInfo(); - auto input_dims = tensor_info.GetShape(); - input_node_dims.push_back(input_dims); - size_t tensor_size = 1; - for (unsigned int j = 0; j < input_dims.size(); ++j) - tensor_size *= input_dims.at(j); - input_tensor_sizes.push_back(tensor_size); - input_values_handlers.push_back(std::vector(tensor_size)); - } - // 4. output names & output dimms - num_outputs = ort_session->GetOutputCount(); - output_node_names.resize(num_outputs); - for (unsigned int i = 0; i < num_outputs; ++i) - { - output_node_names[i] = ort_session->GetOutputName(i, allocator); - Ort::TypeInfo output_type_info = ort_session->GetOutputTypeInfo(i); - auto output_tensor_info = output_type_info.GetTensorTypeAndShapeInfo(); - auto output_dims = output_tensor_info.GetShape(); - output_node_dims.push_back(output_dims); - } -#if LITEORT_DEBUG - this->print_debug_string(); -#endif -} - -BasicMultiOrtHandler::~BasicMultiOrtHandler() -{ - if (ort_session) - delete ort_session; - ort_session = nullptr; -} - -void BasicMultiOrtHandler::print_debug_string() -{ - std::cout << "LITEORT_DEBUG LogId: " << onnx_path << "\n"; - std::cout << "=============== Input-Dims ==============\n"; - for (unsigned int i = 0; i < num_inputs; ++i) - for (unsigned int j = 0; j < input_node_dims.at(i).size(); ++j) - std::cout << "Input: " << i << " Name: " - << input_node_names.at(i) << " Dim: " << j << " :" - << input_node_dims.at(i).at(j) << std::endl; - std::cout << "=============== Output-Dims ==============\n"; - for (unsigned int i = 0; i < num_outputs; ++i) - for (unsigned int j = 0; j < output_node_dims.at(i).size(); ++j) - std::cout << "Output: " << i << " Name: " - << output_node_names.at(i) << " Dim: " << j << " :" - << output_node_dims.at(i).at(j) << std::endl; - std::cout << "========================================\n"; -} \ No newline at end of file diff --git a/lite/ort/core/ort_handler.h b/lite/ort/core/ort_handler.h index 31a42bbb..04c9abf3 100644 --- a/lite/ort/core/ort_handler.h +++ b/lite/ort/core/ort_handler.h @@ -18,12 +18,14 @@ namespace core Ort::Session *ort_session = nullptr; const char *input_name = nullptr; std::vector input_node_names; + std::vector input_node_names_; std::vector input_node_dims; // 1 input only. std::size_t input_tensor_size = 1; std::vector input_values_handler; Ort::MemoryInfo memory_info_handler = Ort::MemoryInfo::CreateCpu( OrtArenaAllocator, OrtMemTypeDefault); std::vector output_node_names; + std::vector output_node_names_; std::vector> output_node_dims; // >=1 outputs const LITEORT_CHAR *onnx_path = nullptr; const char *log_id = nullptr; @@ -101,6 +103,7 @@ namespace core void print_debug_string(); }; + } #endif //LITE_AI_ORT_CORE_ORT_HANDLER_H diff --git a/lite/ort/core/ort_utils.cpp b/lite/ort/core/ort_utils.cpp index cd3540a9..a3416174 100644 --- a/lite/ort/core/ort_utils.cpp +++ b/lite/ort/core/ort_utils.cpp @@ -10,7 +10,6 @@ Ort::Value ortcv::utils::transform::create_tensor(const cv::Mat &mat, const Ort::MemoryInfo &memory_info_handler, std::vector &tensor_value_handler, unsigned int data_format) -throw(std::runtime_error) { const unsigned int rows = mat.rows; const unsigned int cols = mat.cols; diff --git a/lite/ort/core/ort_utils.h b/lite/ort/core/ort_utils.h index 37ef76eb..dd114207 100644 --- a/lite/ort/core/ort_utils.h +++ b/lite/ort/core/ort_utils.h @@ -30,7 +30,7 @@ namespace ortcv LITE_EXPORTS Ort::Value create_tensor(const cv::Mat &mat, const std::vector &tensor_dims, const Ort::MemoryInfo &memory_info_handler, std::vector &tensor_value_handler, - unsigned int data_format = CHW) throw(std::runtime_error); + unsigned int data_format = CHW); LITE_EXPORTS cv::Mat normalize(const cv::Mat &mat, float mean, float scale); diff --git a/lite/ort/cv/backgroundmattingv2.cpp b/lite/ort/cv/backgroundmattingv2.cpp index aab208d6..4b3212de 100644 --- a/lite/ort/cv/backgroundmattingv2.cpp +++ b/lite/ort/cv/backgroundmattingv2.cpp @@ -34,8 +34,11 @@ BackgroundMattingV2::BackgroundMattingV2(const std::string &_onnx_path, unsigned // 3. type info. num_inputs = ort_session->GetInputCount(); // 2 input_node_names.resize(num_inputs); - for (unsigned int i = 0; i < num_inputs; ++i) - input_node_names[i] = ort_session->GetInputName(i, allocator); + input_node_names_.resize(num_inputs); + for (unsigned int i = 0; i < num_inputs; ++i) { + input_node_names_[i] = OrtCompatiableGetInputName(i, allocator, ort_session); + input_node_names[i] = input_node_names_[i].data(); + } Ort::TypeInfo input_mat_type_info = ort_session->GetInputTypeInfo(0); Ort::TypeInfo input_bgr_type_info = ort_session->GetInputTypeInfo(1); @@ -56,9 +59,11 @@ BackgroundMattingV2::BackgroundMattingV2(const std::string &_onnx_path, unsigned num_outputs = ort_session->GetOutputCount(); output_node_names.resize(num_outputs); + output_node_names_.resize(num_outputs); for (unsigned int i = 0; i < num_outputs; ++i) { - output_node_names[i] = ort_session->GetOutputName(i, allocator); + output_node_names_[i] = OrtCompatiableGetOutputName(i, allocator, ort_session); + output_node_names[i] = output_node_names_[i].data(); Ort::TypeInfo type_info = ort_session->GetOutputTypeInfo(i); auto tensor_info = type_info.GetTensorTypeAndShapeInfo(); auto output_shape = tensor_info.GetShape(); diff --git a/lite/ort/cv/backgroundmattingv2.h b/lite/ort/cv/backgroundmattingv2.h index 70b41762..3d595d18 100644 --- a/lite/ort/cv/backgroundmattingv2.h +++ b/lite/ort/cv/backgroundmattingv2.h @@ -21,10 +21,12 @@ namespace ortcv // hardcode input node names unsigned int num_inputs = 2; std::vector input_node_names; + std::vector input_node_names_; std::vector> input_node_dims; // hardcode output node names unsigned int num_outputs = 6; std::vector output_node_names; + std::vector output_node_names_; std::vector> output_node_dims; const LITEORT_CHAR *onnx_path = nullptr; const char *log_id = nullptr; diff --git a/lite/ort/cv/backgroundmattingv2_dyn.cpp b/lite/ort/cv/backgroundmattingv2_dyn.cpp index 6412caee..424e569d 100644 --- a/lite/ort/cv/backgroundmattingv2_dyn.cpp +++ b/lite/ort/cv/backgroundmattingv2_dyn.cpp @@ -32,9 +32,11 @@ BackgroundMattingV2Dyn::BackgroundMattingV2Dyn(const std::string &_onnx_path, un ort_session = new Ort::Session(ort_env, onnx_path, session_options); // 2. input name & input dims input_node_names.resize(num_inputs); // num_inputs=1 - input_node_names.resize(num_inputs); - for (unsigned int i = 0; i < num_inputs; ++i) - input_node_names[i] = ort_session->GetInputName(i, allocator); + input_node_names_.resize(num_inputs); + for (unsigned int i = 0; i < num_inputs; ++i) { + input_node_names_[i] = OrtCompatiableGetInputName(i, allocator, ort_session); + input_node_names[i] = input_node_names_[i].data(); + } // 3. initial input node dims. dynamic_input_node_dims.push_back({1, 3, dynamic_input_height, dynamic_input_width}); // src dynamic_input_node_dims.push_back({1, 3, dynamic_input_height, dynamic_input_width}); // bgr @@ -45,8 +47,11 @@ BackgroundMattingV2Dyn::BackgroundMattingV2Dyn(const std::string &_onnx_path, un // 4. output names & output dims num_outputs = ort_session->GetOutputCount(); output_node_names.resize(num_outputs); - for (unsigned int i = 0; i < num_outputs; ++i) - output_node_names[i] = ort_session->GetOutputName(i, allocator); + output_node_names_.resize(num_outputs); + for (unsigned int i = 0; i < num_outputs; ++i) { + output_node_names_[i] = OrtCompatiableGetOutputName(i, allocator, ort_session); + output_node_names[i] = output_node_names_[i].data(); + } #if LITEORT_DEBUG this->print_debug_string(); #endif diff --git a/lite/ort/cv/backgroundmattingv2_dyn.h b/lite/ort/cv/backgroundmattingv2_dyn.h index 6ba15038..fb52e1fc 100644 --- a/lite/ort/cv/backgroundmattingv2_dyn.h +++ b/lite/ort/cv/backgroundmattingv2_dyn.h @@ -21,6 +21,7 @@ namespace ortcv // hardcode input node names unsigned int num_inputs = 2; std::vector input_node_names; + std::vector input_node_names_; std::vector> dynamic_input_node_dims; unsigned int dynamic_input_height = 512; // init only, will change according to input mat. unsigned int dynamic_input_width = 512; // init only, will change according to input mat. @@ -29,6 +30,7 @@ namespace ortcv // hardcode output node names unsigned int num_outputs = 6; std::vector output_node_names; + std::vector output_node_names_; const LITEORT_CHAR *onnx_path = nullptr; const char *log_id = nullptr; // input values handlers diff --git a/lite/ort/cv/deeplabv3_resnet101.cpp b/lite/ort/cv/deeplabv3_resnet101.cpp index 010e888a..394a7751 100644 --- a/lite/ort/cv/deeplabv3_resnet101.cpp +++ b/lite/ort/cv/deeplabv3_resnet101.cpp @@ -36,7 +36,9 @@ DeepLabV3ResNet101::DeepLabV3ResNet101(const std::string &_onnx_path, unsigned i Ort::AllocatorWithDefaultOptions allocator; // 2. input name & input dims input_node_names.resize(num_inputs); // num_inputs=1 - input_node_names[0] = ort_session->GetInputName(0, allocator); + input_node_names_.resize(num_inputs); + input_node_names_[0] = OrtCompatiableGetInputName(0, allocator, ort_session); + input_node_names[0] = input_node_names_[0].data(); // 3. initial input node dims. dynamic_input_node_dims.push_back({1, 3, dynamic_input_height, dynamic_input_width}); dynamic_input_tensor_size = 1 * 3 * dynamic_input_height * dynamic_input_width; @@ -44,8 +46,11 @@ DeepLabV3ResNet101::DeepLabV3ResNet101(const std::string &_onnx_path, unsigned i // 4. output names & output dimms num_outputs = ort_session->GetOutputCount(); output_node_names.resize(num_outputs); - for (unsigned int i = 0; i < num_outputs; ++i) - output_node_names[i] = ort_session->GetOutputName(i, allocator); + output_node_names_.resize(num_outputs); + for (unsigned int i = 0; i < num_outputs; ++i) { + output_node_names_[i] = OrtCompatiableGetOutputName(i, allocator, ort_session); + output_node_names[i] = output_node_names_[i].data(); + } #if LITEORT_DEBUG this->print_debug_string(); #endif diff --git a/lite/ort/cv/deeplabv3_resnet101.h b/lite/ort/cv/deeplabv3_resnet101.h index 466d35d4..b79bb826 100644 --- a/lite/ort/cv/deeplabv3_resnet101.h +++ b/lite/ort/cv/deeplabv3_resnet101.h @@ -15,6 +15,7 @@ namespace ortcv Ort::Env ort_env; Ort::Session *ort_session = nullptr; std::vector input_node_names; + std::vector input_node_names_; std::vector> dynamic_input_node_dims; // >=1 inputs. unsigned int dynamic_input_height = 512; // init only, will change according to input mat. unsigned int dynamic_input_width = 512; // init only, will change according to input mat. @@ -22,6 +23,7 @@ namespace ortcv Ort::MemoryInfo memory_info_handler = Ort::MemoryInfo::CreateCpu( OrtArenaAllocator, OrtMemTypeDefault); std::vector output_node_names; + std::vector output_node_names_; const LITEORT_CHAR *onnx_path = nullptr; const char *log_id = nullptr; unsigned int num_outputs = 1; diff --git a/lite/ort/cv/face_parsing_bisenet_dyn.cpp b/lite/ort/cv/face_parsing_bisenet_dyn.cpp index e5f4d07d..3a3d0bd2 100644 --- a/lite/ort/cv/face_parsing_bisenet_dyn.cpp +++ b/lite/ort/cv/face_parsing_bisenet_dyn.cpp @@ -32,7 +32,9 @@ FaceParsingBiSeNetDyn::FaceParsingBiSeNetDyn(const std::string &_onnx_path, unsi ort_session = new Ort::Session(ort_env, onnx_path, session_options); // 2. input name & input dims input_node_names.resize(num_inputs); // num_inputs=1 - input_node_names[0] = ort_session->GetInputName(0, allocator); + input_node_names_.resize(num_inputs); // num_inputs=1 + input_node_names_[0] = OrtCompatiableGetInputName(0, allocator, ort_session); + input_node_names[0] = input_node_names_[0].data(); // 3. initial input node dims. dynamic_input_node_dims.push_back({1, 3, dynamic_input_height, dynamic_input_width}); dynamic_input_tensor_size = 1 * 3 * dynamic_input_height * dynamic_input_width; @@ -40,8 +42,11 @@ FaceParsingBiSeNetDyn::FaceParsingBiSeNetDyn(const std::string &_onnx_path, unsi // 4. output names & output dimms num_outputs = ort_session->GetOutputCount(); output_node_names.resize(num_outputs); - for (unsigned int i = 0; i < num_outputs; ++i) - output_node_names[i] = ort_session->GetOutputName(i, allocator); + output_node_names_.resize(num_outputs); + for (unsigned int i = 0; i < num_outputs; ++i) { + output_node_names_[i] = OrtCompatiableGetOutputName(i, allocator, ort_session); + output_node_names[i] = output_node_names_[i].data(); + } #if LITEORT_DEBUG this->print_debug_string(); #endif diff --git a/lite/ort/cv/face_parsing_bisenet_dyn.h b/lite/ort/cv/face_parsing_bisenet_dyn.h index df796018..6c2841f1 100644 --- a/lite/ort/cv/face_parsing_bisenet_dyn.h +++ b/lite/ort/cv/face_parsing_bisenet_dyn.h @@ -16,6 +16,7 @@ namespace ortcv Ort::Session *ort_session = nullptr; Ort::AllocatorWithDefaultOptions allocator; std::vector input_node_names; + std::vector input_node_names_; std::vector> dynamic_input_node_dims; // >=1 inputs. unsigned int dynamic_input_height = 512; // init only, will change according to input mat. unsigned int dynamic_input_width = 512; // init only, will change according to input mat. @@ -23,6 +24,7 @@ namespace ortcv Ort::MemoryInfo memory_info_handler = Ort::MemoryInfo::CreateCpu( OrtArenaAllocator, OrtMemTypeDefault); std::vector output_node_names; + std::vector output_node_names_; const LITEORT_CHAR *onnx_path = nullptr; const char *log_id = nullptr; unsigned int num_outputs = 3; diff --git a/lite/ort/cv/fcn_resnet101.cpp b/lite/ort/cv/fcn_resnet101.cpp index ecddd4de..fe5b4a80 100644 --- a/lite/ort/cv/fcn_resnet101.cpp +++ b/lite/ort/cv/fcn_resnet101.cpp @@ -33,7 +33,9 @@ FCNResNet101::FCNResNet101(const std::string &_onnx_path, unsigned int _num_thre Ort::AllocatorWithDefaultOptions allocator; // 2. input name & input dims input_node_names.resize(num_inputs); // num_inputs=1 - input_node_names[0] = ort_session->GetInputName(0, allocator); + input_node_names_.resize(num_inputs); // num_inputs=1 + input_node_names_[0] = OrtCompatiableGetInputName(0, allocator, ort_session); + input_node_names[0] = input_node_names_[0].data(); // 3. initial input node dims. dynamic_input_node_dims.push_back({1, 3, dynamic_input_height, dynamic_input_width}); dynamic_input_tensor_size = 1 * 3 * dynamic_input_height * dynamic_input_width; @@ -41,8 +43,11 @@ FCNResNet101::FCNResNet101(const std::string &_onnx_path, unsigned int _num_thre // 4. output names & output dimms num_outputs = ort_session->GetOutputCount(); output_node_names.resize(num_outputs); - for (unsigned int i = 0; i < num_outputs; ++i) - output_node_names[i] = ort_session->GetOutputName(i, allocator); + output_node_names_.resize(num_outputs); + for (unsigned int i = 0; i < num_outputs; ++i) { + output_node_names_[i] = OrtCompatiableGetOutputName(i, allocator, ort_session); + output_node_names[i] = output_node_names_[i].data(); + } #if LITEORT_DEBUG this->print_debug_string(); #endif diff --git a/lite/ort/cv/fcn_resnet101.h b/lite/ort/cv/fcn_resnet101.h index 2eb3e56a..73c47121 100644 --- a/lite/ort/cv/fcn_resnet101.h +++ b/lite/ort/cv/fcn_resnet101.h @@ -15,6 +15,7 @@ namespace ortcv Ort::Env ort_env; Ort::Session *ort_session = nullptr; std::vector input_node_names; + std::vector input_node_names_; std::vector> dynamic_input_node_dims; // >=1 inputs. unsigned int dynamic_input_height = 512; // init only, will change according to input mat. unsigned int dynamic_input_width = 512; // init only, will change according to input mat. @@ -22,6 +23,7 @@ namespace ortcv Ort::MemoryInfo memory_info_handler = Ort::MemoryInfo::CreateCpu( OrtArenaAllocator, OrtMemTypeDefault); std::vector output_node_names; + std::vector output_node_names_; const LITEORT_CHAR *onnx_path = nullptr; const char *log_id = nullptr; unsigned int num_outputs = 1; diff --git a/lite/ort/cv/head_seg.cpp b/lite/ort/cv/head_seg.cpp index 7b977edd..f80236c7 100644 --- a/lite/ort/cv/head_seg.cpp +++ b/lite/ort/cv/head_seg.cpp @@ -49,8 +49,10 @@ void HeadSeg::initialize_handler() Ort::AllocatorWithDefaultOptions allocator; // 2. input name & input dims - input_name = ort_session->GetInputName(0, allocator); input_node_names.resize(1); + input_node_names_.resize(1); + input_node_names_[0] = OrtCompatiableGetInputName(0, allocator, ort_session); + input_name = input_node_names_[0].data(); input_node_names[0] = input_name; // 3. type info. Ort::TypeInfo type_info = ort_session->GetInputTypeInfo(0); @@ -62,8 +64,11 @@ void HeadSeg::initialize_handler() // 4. output names & output dimms num_outputs = ort_session->GetOutputCount(); output_node_names.resize(num_outputs); - for (unsigned int i = 0; i < num_outputs; ++i) - output_node_names[i] = ort_session->GetOutputName(i, allocator); + output_node_names_.resize(num_outputs); + for (unsigned int i = 0; i < num_outputs; ++i) { + output_node_names_[i] = OrtCompatiableGetOutputName(i, allocator, ort_session); + output_node_names[i] = output_node_names_[i].data(); + } #if LITEORT_DEBUG this->print_debug_string(); #endif diff --git a/lite/ort/cv/head_seg.h b/lite/ort/cv/head_seg.h index b6717380..478b1cfa 100644 --- a/lite/ort/cv/head_seg.h +++ b/lite/ort/cv/head_seg.h @@ -16,12 +16,14 @@ namespace ortcv Ort::Session *ort_session = nullptr; const char *input_name = nullptr; std::vector input_node_names; + std::vector input_node_names_; std::vector input_node_dims; // 1 input only. (?,384,384,3) std::size_t input_tensor_size = 1; std::vector input_values_handler; Ort::MemoryInfo memory_info_handler = Ort::MemoryInfo::CreateCpu( OrtArenaAllocator, OrtMemTypeDefault); std::vector output_node_names; + std::vector output_node_names_; const LITEORT_CHAR *onnx_path = nullptr; const char *log_id = nullptr; int num_outputs = 1; diff --git a/lite/ort/cv/modnet_dyn.cpp b/lite/ort/cv/modnet_dyn.cpp index 27b3c496..049c4e78 100644 --- a/lite/ort/cv/modnet_dyn.cpp +++ b/lite/ort/cv/modnet_dyn.cpp @@ -32,7 +32,9 @@ MODNetDyn::MODNetDyn(const std::string &_onnx_path, unsigned int _num_threads) : ort_session = new Ort::Session(ort_env, onnx_path, session_options); // 2. input name & input dims input_node_names.resize(num_inputs); // num_inputs=1 - input_node_names[0] = ort_session->GetInputName(0, allocator); + input_node_names_.resize(num_inputs); // num_inputs=1 + input_node_names_[0] = OrtCompatiableGetInputName(0, allocator, ort_session); + input_node_names[0] = input_node_names_[0].data(); // 3. initial input node dims. dynamic_input_node_dims.push_back({1, 3, dynamic_input_height, dynamic_input_width}); dynamic_input_tensor_size = 1 * 3 * dynamic_input_height * dynamic_input_width; @@ -40,8 +42,11 @@ MODNetDyn::MODNetDyn(const std::string &_onnx_path, unsigned int _num_threads) : // 4. output names & output dimms num_outputs = ort_session->GetOutputCount(); output_node_names.resize(num_outputs); - for (unsigned int i = 0; i < num_outputs; ++i) - output_node_names[i] = ort_session->GetOutputName(i, allocator); + output_node_names_.resize(num_outputs); + for (unsigned int i = 0; i < num_outputs; ++i) { + output_node_names_[i] = OrtCompatiableGetOutputName(i, allocator, ort_session); + output_node_names[i] = output_node_names_[i].data(); + } #if LITEORT_DEBUG this->print_debug_string(); #endif diff --git a/lite/ort/cv/modnet_dyn.h b/lite/ort/cv/modnet_dyn.h index d21d5bbe..33f6ce97 100644 --- a/lite/ort/cv/modnet_dyn.h +++ b/lite/ort/cv/modnet_dyn.h @@ -16,6 +16,7 @@ namespace ortcv Ort::Session *ort_session = nullptr; Ort::AllocatorWithDefaultOptions allocator; std::vector input_node_names; + std::vector input_node_names_; std::vector> dynamic_input_node_dims; // >=1 inputs. unsigned int dynamic_input_height = 512; // init only, will change according to input mat. unsigned int dynamic_input_width = 512; // init only, will change according to input mat. @@ -23,6 +24,7 @@ namespace ortcv Ort::MemoryInfo memory_info_handler = Ort::MemoryInfo::CreateCpu( OrtArenaAllocator, OrtMemTypeDefault); std::vector output_node_names; + std::vector output_node_names_; const LITEORT_CHAR *onnx_path = nullptr; const char *log_id = nullptr; unsigned int num_outputs = 1; diff --git a/lite/ort/cv/pose_robust_face.cpp b/lite/ort/cv/pose_robust_face.cpp index 183529c3..d30a5f32 100644 --- a/lite/ort/cv/pose_robust_face.cpp +++ b/lite/ort/cv/pose_robust_face.cpp @@ -32,10 +32,12 @@ PoseRobustFace::PoseRobustFace(const std::string &_onnx_path, unsigned int _num_ // 2. input name & input dims num_inputs = ort_session->GetInputCount(); input_node_names.resize(num_inputs); + input_node_names_.resize(num_inputs); // 3. initial input node dims. "input" & "yaw" for (unsigned int i = 0; i < num_inputs; ++i) { - input_node_names[i] = ort_session->GetInputName(i, allocator); + input_node_names_[i] = OrtCompatiableGetInputName(i, allocator, ort_session); + input_node_names[i] = input_node_names_[i].data(); Ort::TypeInfo type_info = ort_session->GetInputTypeInfo(i); auto tensor_info = type_info.GetTensorTypeAndShapeInfo(); unsigned int input_size = 1; @@ -50,9 +52,11 @@ PoseRobustFace::PoseRobustFace(const std::string &_onnx_path, unsigned int _num_ // 4. output names & output dims num_outputs = ort_session->GetOutputCount(); output_node_names.resize(num_outputs); + output_node_names_.resize(num_outputs); for (unsigned int i = 0; i < num_outputs; ++i) { - output_node_names[i] = ort_session->GetOutputName(i, allocator); + output_node_names_[i] = OrtCompatiableGetOutputName(i, allocator, ort_session); + output_node_names[i] = output_node_names_[i].data(); Ort::TypeInfo output_type_info = ort_session->GetOutputTypeInfo(i); auto output_tensor_info = output_type_info.GetTensorTypeAndShapeInfo(); auto output_dims = output_tensor_info.GetShape(); diff --git a/lite/ort/cv/pose_robust_face.h b/lite/ort/cv/pose_robust_face.h index 2a31a9ed..96f297c8 100644 --- a/lite/ort/cv/pose_robust_face.h +++ b/lite/ort/cv/pose_robust_face.h @@ -16,11 +16,13 @@ namespace ortcv Ort::Env ort_env; Ort::Session *ort_session = nullptr; std::vector input_node_names; + std::vector input_node_names_; std::vector> input_node_dims; // >=1 inputs. std::vector input_tensor_sizes; Ort::MemoryInfo memory_info_handler = Ort::MemoryInfo::CreateCpu( OrtArenaAllocator, OrtMemTypeDefault); std::vector output_node_names; + std::vector output_node_names_; std::vector> output_node_dims; // >=1 outputs const LITEORT_CHAR *onnx_path = nullptr; const char *log_id = nullptr; diff --git a/lite/ort/cv/ssd_mobilenetv1.cpp b/lite/ort/cv/ssd_mobilenetv1.cpp index df76b3ed..096a2650 100644 --- a/lite/ort/cv/ssd_mobilenetv1.cpp +++ b/lite/ort/cv/ssd_mobilenetv1.cpp @@ -31,17 +31,23 @@ SSDMobileNetV1::SSDMobileNetV1(const std::string &_onnx_path, unsigned int _num_ // 2. input name & input dims num_inputs = ort_session->GetInputCount(); input_node_names.resize(num_inputs); + input_node_names_.resize(num_inputs); // 3. initial input node dims. input_node_dims.push_back({batch_size, input_height, input_width, 3}); // NHWC input_tensor_sizes.push_back(batch_size * input_height * input_width * 3); input_values_handler.resize(batch_size * input_height * input_width * 3); - for (unsigned int i = 0; i < num_inputs; ++i) - input_node_names[i] = ort_session->GetInputName(i, allocator); + for (unsigned int i = 0; i < num_inputs; ++i) { + input_node_names_[i] = OrtCompatiableGetInputName(i, allocator, ort_session); + input_node_names[i] = input_node_names_[i].data(); + } // 4. output names & output dimms num_outputs = ort_session->GetOutputCount(); output_node_names.resize(num_outputs); - for (unsigned int i = 0; i < num_outputs; ++i) - output_node_names[i] = ort_session->GetOutputName(i, allocator); + output_node_names_.resize(num_outputs); + for (unsigned int i = 0; i < num_outputs; ++i) { + output_node_names_[i] = OrtCompatiableGetOutputName(i, allocator, ort_session); + output_node_names[i] = output_node_names_[i].data(); + } #if LITEORT_DEBUG this->print_debug_string(); #endif diff --git a/lite/ort/cv/ssd_mobilenetv1.h b/lite/ort/cv/ssd_mobilenetv1.h index e7fa37a9..18eadc6d 100644 --- a/lite/ort/cv/ssd_mobilenetv1.h +++ b/lite/ort/cv/ssd_mobilenetv1.h @@ -15,11 +15,13 @@ namespace ortcv Ort::Env ort_env; Ort::Session *ort_session = nullptr; std::vector input_node_names; + std::vector input_node_names_; std::vector> input_node_dims; // >=1 inputs. std::vector input_tensor_sizes; Ort::MemoryInfo memory_info_handler = Ort::MemoryInfo::CreateCpu( OrtArenaAllocator, OrtMemTypeDefault); std::vector output_node_names; + std::vector output_node_names_; const LITEORT_CHAR *onnx_path = nullptr; const char *log_id = nullptr; unsigned int num_outputs = 1; diff --git a/lite/ort/cv/tiny_yolov3.cpp b/lite/ort/cv/tiny_yolov3.cpp index c81b2e1c..ddd2a5b1 100644 --- a/lite/ort/cv/tiny_yolov3.cpp +++ b/lite/ort/cv/tiny_yolov3.cpp @@ -37,6 +37,7 @@ TinyYoloV3::TinyYoloV3(const std::string &_onnx_path, unsigned int _num_threads) // 2. input name & input dims num_inputs = ort_session->GetInputCount(); input_node_names.resize(num_inputs); + input_node_names_.resize(num_inputs); // 3. initial input node dims. input_node_dims.push_back({batch_size, 3, input_height, input_width}); // input_1 dims input_node_dims.push_back({batch_size, 2}); // image_shape dims @@ -44,13 +45,18 @@ TinyYoloV3::TinyYoloV3(const std::string &_onnx_path, unsigned int _num_threads) input_tensor_sizes.push_back(batch_size * 2); input_1_values_handler.resize(batch_size * 3 * input_height * input_width); image_shape_values_handler.resize(batch_size * 2); - for (unsigned int i = 0; i < num_inputs; ++i) - input_node_names[i] = ort_session->GetInputName(i, allocator); + for (unsigned int i = 0; i < num_inputs; ++i) { + input_node_names_[i] = OrtCompatiableGetInputName(i, allocator, ort_session); + input_node_names[i] = input_node_names_[i].data(); + } // 4. output names & output dimms num_outputs = ort_session->GetOutputCount(); output_node_names.resize(num_outputs); - for (unsigned int i = 0; i < num_outputs; ++i) - output_node_names[i] = ort_session->GetOutputName(i, allocator); + output_node_names_.resize(num_outputs); + for (unsigned int i = 0; i < num_outputs; ++i) { + output_node_names_[i] = OrtCompatiableGetOutputName(i, allocator, ort_session); + output_node_names[i] = output_node_names_[i].data(); + } #if LITEORT_DEBUG this->print_debug_string(); #endif diff --git a/lite/ort/cv/tiny_yolov3.h b/lite/ort/cv/tiny_yolov3.h index 18e24bd4..0c515c4f 100644 --- a/lite/ort/cv/tiny_yolov3.h +++ b/lite/ort/cv/tiny_yolov3.h @@ -15,11 +15,13 @@ namespace ortcv Ort::Env ort_env; Ort::Session *ort_session = nullptr; std::vector input_node_names; + std::vector input_node_names_; std::vector> input_node_dims; // >=1 inputs. std::vector input_tensor_sizes; Ort::MemoryInfo memory_info_handler = Ort::MemoryInfo::CreateCpu( OrtArenaAllocator, OrtMemTypeDefault); std::vector output_node_names; + std::vector output_node_names_; const LITEORT_CHAR *onnx_path = nullptr; const char *log_id = nullptr; unsigned int num_outputs = 1; diff --git a/lite/ort/cv/yolov3.cpp b/lite/ort/cv/yolov3.cpp index 2e29a847..5725ffb9 100644 --- a/lite/ort/cv/yolov3.cpp +++ b/lite/ort/cv/yolov3.cpp @@ -37,6 +37,7 @@ YoloV3::YoloV3(const std::string &_onnx_path, unsigned int _num_threads) : // 2. input name & input dims num_inputs = ort_session->GetInputCount(); input_node_names.resize(num_inputs); + input_node_names_.resize(num_inputs); // 3. initial input node dims. input_node_dims.push_back({batch_size, 3, input_height, input_width}); // input_1 dims input_node_dims.push_back({batch_size, 2}); // image_shape dims @@ -44,13 +45,18 @@ YoloV3::YoloV3(const std::string &_onnx_path, unsigned int _num_threads) : input_tensor_sizes.push_back(batch_size * 2); input_1_values_handler.resize(batch_size * 3 * input_height * input_width); image_shape_values_handler.resize(batch_size * 2); - for (unsigned int i = 0; i < num_inputs; ++i) - input_node_names[i] = ort_session->GetInputName(i, allocator); + for (unsigned int i = 0; i < num_inputs; ++i) { + input_node_names_[i] = OrtCompatiableGetInputName(i, allocator, ort_session); + input_node_names[i] = input_node_names_[i].data(); + } // 4. output names & output dimms num_outputs = ort_session->GetOutputCount(); output_node_names.resize(num_outputs); - for (unsigned int i = 0; i < num_outputs; ++i) - output_node_names[i] = ort_session->GetOutputName(i, allocator); + output_node_names_.resize(num_outputs); + for (unsigned int i = 0; i < num_outputs; ++i) { + output_node_names_[i] = OrtCompatiableGetOutputName(i, allocator, ort_session); + output_node_names[i] = output_node_names_[i].data(); + } #if LITEORT_DEBUG this->print_debug_string(); #endif diff --git a/lite/ort/cv/yolov3.h b/lite/ort/cv/yolov3.h index 71c94ce3..ce0ec3b2 100644 --- a/lite/ort/cv/yolov3.h +++ b/lite/ort/cv/yolov3.h @@ -15,11 +15,13 @@ namespace ortcv Ort::Env ort_env; Ort::Session *ort_session = nullptr; std::vector input_node_names; + std::vector input_node_names_; std::vector> input_node_dims; // >=1 inputs. std::vector input_tensor_sizes; Ort::MemoryInfo memory_info_handler = Ort::MemoryInfo::CreateCpu( OrtArenaAllocator, OrtMemTypeDefault); std::vector output_node_names; + std::vector output_node_names_; const LITEORT_CHAR *onnx_path = nullptr; const char *log_id = nullptr; unsigned int num_outputs = 1; diff --git a/lite/pipeline.h b/lite/pipeline.h deleted file mode 100644 index 83927ce3..00000000 --- a/lite/pipeline.h +++ /dev/null @@ -1,65 +0,0 @@ -// -// Created by DefTruth on 2021/8/8. -// - -#ifndef LITE_AI_PIPELINE_H -#define LITE_AI_PIPELINE_H - -#include "models.h" - - -// Pipeline namespace -namespace lite -{ - namespace cv - { - namespace pipeline - { - // VideoPipeline: video demos for general models, such Face Detection - // + Face landmarks Detection | Face Attributes Analysis. - class LITE_EXPORTS VideoPipeline; - - // FaceTracker: Face tracking using face detection and IoU. - class LITE_EXPORTS FaceTracker; - - // ObjectTracker: Face tracking using object detection and IoU | DeepSort. - class LITE_EXPORTS ObjectTracker; - - // FaceRecognizer: Face ID recognition using face detection. - // face alignment and recognition. - class LITE_EXPORTS FaceRecognizer; - - // FaceAttributesAnalyser: Face Attributes Analysis using face detection, - // Face Attributes estimation. - class LITE_EXPORTS FaceAttributesAnalyser; - - // VideoStyleTransfer: Style transfer for video demo. - class LITE_EXPORTS VideoStyleTransfer; - - // VideoColorizer: colorization for video demo. - class LITE_EXPORTS VideoColorizer; - } - } -} - -namespace lite -{ - namespace cv - { - namespace pipeline - { - enum MODEL - { - FSANet = 0, - PFLD = 1, - UltraFace = 2, - AgeGoogleNet = 3, - GenderGoogleNet = 4 - }; - - } - } -} - - -#endif //LITE_AI_PIPELINE_H diff --git a/lite/pipeline/.gitignore b/lite/pipeline/.gitignore deleted file mode 100644 index e69de29b..00000000 diff --git a/lite/pipeline/face_attribute_pl.h b/lite/pipeline/face_attribute_pl.h deleted file mode 100644 index 8834efae..00000000 --- a/lite/pipeline/face_attribute_pl.h +++ /dev/null @@ -1,8 +0,0 @@ -// -// Created by DefTruth on 2022/5/22. -// - -#ifndef LITE_AI_TOOLKIT_PIPELINE_FACE_ATTRIBUTE_PL_H -#define LITE_AI_TOOLKIT_PIPELINE_FACE_ATTRIBUTE_PL_H - -#endif //LITE_AI_TOOLKIT_PIPELINE_FACE_ATTRIBUTE_PL_H diff --git a/lite/pipeline/face_landmarks_pl.h b/lite/pipeline/face_landmarks_pl.h deleted file mode 100644 index 9cae1639..00000000 --- a/lite/pipeline/face_landmarks_pl.h +++ /dev/null @@ -1,8 +0,0 @@ -// -// Created by DefTruth on 2022/5/22. -// - -#ifndef LITE_AI_TOOLKIT_PIPELINE_FACE_LANDMARKS_PL_H -#define LITE_AI_TOOLKIT_PIPELINE_FACE_LANDMARKS_PL_H - -#endif //LITE_AI_TOOLKIT_PIPELINE_FACE_LANDMARKS_PL_H diff --git a/lite/pipeline/face_mesh_pl.h b/lite/pipeline/face_mesh_pl.h deleted file mode 100644 index a1c9cca6..00000000 --- a/lite/pipeline/face_mesh_pl.h +++ /dev/null @@ -1,8 +0,0 @@ -// -// Created by DefTruth on 2022/5/22. -// - -#ifndef LITE_AI_TOOLKIT_PIPELINE_FACE_MESH_PL_H -#define LITE_AI_TOOLKIT_PIPELINE_FACE_MESH_PL_H - -#endif //LITE_AI_TOOLKIT_PIPELINE_FACE_MESH_PL_H diff --git a/lite/pipeline/face_recognition_pl.h b/lite/pipeline/face_recognition_pl.h deleted file mode 100644 index 9b667d1b..00000000 --- a/lite/pipeline/face_recognition_pl.h +++ /dev/null @@ -1,8 +0,0 @@ -// -// Created by DefTruth on 2022/5/22. -// - -#ifndef LITE_AI_TOOLKIT_PIPELINE_FACE_RECOGNITION_PL_H -#define LITE_AI_TOOLKIT_PIPELINE_FACE_RECOGNITION_PL_H - -#endif //LITE_AI_TOOLKIT_PIPELINE_FACE_RECOGNITION_PL_H diff --git a/lite/pipeline/face_tracking_pl.h b/lite/pipeline/face_tracking_pl.h deleted file mode 100644 index 2d73427c..00000000 --- a/lite/pipeline/face_tracking_pl.h +++ /dev/null @@ -1,8 +0,0 @@ -// -// Created by DefTruth on 2022/5/22. -// - -#ifndef LITE_AI_TOOLKIT_PIPELINE_FACE_TRACKING_PL_H -#define LITE_AI_TOOLKIT_PIPELINE_FACE_TRACKING_PL_H - -#endif //LITE_AI_TOOLKIT_PIPELINE_FACE_TRACKING_PL_H diff --git a/lite/pipeline/hand_landmarks_pl.h b/lite/pipeline/hand_landmarks_pl.h deleted file mode 100644 index b7de2f55..00000000 --- a/lite/pipeline/hand_landmarks_pl.h +++ /dev/null @@ -1,8 +0,0 @@ -// -// Created by DefTruth on 2022/5/22. -// - -#ifndef LITE_AI_TOOLKIT_PIPELINE_HAND_LANDMARKS_PL_H -#define LITE_AI_TOOLKIT_PIPELINE_HAND_LANDMARKS_PL_H - -#endif //LITE_AI_TOOLKIT_PIPELINE_HAND_LANDMARKS_PL_H diff --git a/lite/pipeline/headpose_tracking_pl.h b/lite/pipeline/headpose_tracking_pl.h deleted file mode 100644 index e7301296..00000000 --- a/lite/pipeline/headpose_tracking_pl.h +++ /dev/null @@ -1,8 +0,0 @@ -// -// Created by DefTruth on 2022/5/22. -// - -#ifndef LITE_AI_TOOLKIT_PIPELINE_HEADPOSE_TRACKING_PL_H -#define LITE_AI_TOOLKIT_PIPELINE_HEADPOSE_TRACKING_PL_H - -#endif //LITE_AI_TOOLKIT_PIPELINE_HEADPOSE_TRACKING_PL_H diff --git a/lite/pipeline/human_matting_pl.h b/lite/pipeline/human_matting_pl.h deleted file mode 100644 index 6799d7ab..00000000 --- a/lite/pipeline/human_matting_pl.h +++ /dev/null @@ -1,8 +0,0 @@ -// -// Created by DefTruth on 2022/5/22. -// - -#ifndef LITE_AI_TOOLKIT_PIPELINE_HUMAN_MATTING_PL_H -#define LITE_AI_TOOLKIT_PIPELINE_HUMAN_MATTING_PL_H - -#endif //LITE_AI_TOOLKIT_PIPELINE_HUMAN_MATTING_PL_H diff --git a/lite/pipeline/iris_landmarks_pl.h b/lite/pipeline/iris_landmarks_pl.h deleted file mode 100644 index 01d7931d..00000000 --- a/lite/pipeline/iris_landmarks_pl.h +++ /dev/null @@ -1,8 +0,0 @@ -// -// Created by DefTruth on 2022/5/22. -// - -#ifndef LITE_AI_TOOLKIT_PIPELINE_IRIS_LANDMARKS_PL_H -#define LITE_AI_TOOLKIT_PIPELINE_IRIS_LANDMARKS_PL_H - -#endif //LITE_AI_TOOLKIT_PIPELINE_IRIS_LANDMARKS_PL_H diff --git a/lite/pipeline/object_tracking_pl.h b/lite/pipeline/object_tracking_pl.h deleted file mode 100644 index 19ec37a4..00000000 --- a/lite/pipeline/object_tracking_pl.h +++ /dev/null @@ -1,8 +0,0 @@ -// -// Created by DefTruth on 2022/5/22. -// - -#ifndef LITE_AI_TOOLKIT_PIPELINE_OBJECT_TRACKING_PL_H -#define LITE_AI_TOOLKIT_PIPELINE_OBJECT_TRACKING_PL_H - -#endif //LITE_AI_TOOLKIT_PIPELINE_OBJECT_TRACKING_PL_H diff --git a/lite/pipeline/pl_rotation.h b/lite/pipeline/pl_rotation.h deleted file mode 100644 index dc8aefbc..00000000 --- a/lite/pipeline/pl_rotation.h +++ /dev/null @@ -1,8 +0,0 @@ -// -// Created by DefTruth on 2022/5/22. -// - -#ifndef LITE_AI_TOOLKIT_PIPELINE_PL_ROTATION_H -#define LITE_AI_TOOLKIT_PIPELINE_PL_ROTATION_H - -#endif //LITE_AI_TOOLKIT_PIPELINE_PL_ROTATION_H diff --git a/lite/pipeline/pl_utils.h b/lite/pipeline/pl_utils.h deleted file mode 100644 index a6a99345..00000000 --- a/lite/pipeline/pl_utils.h +++ /dev/null @@ -1,8 +0,0 @@ -// -// Created by DefTruth on 2022/5/22. -// - -#ifndef LITE_AI_TOOLKIT_PIPELINE_PL_UTILS_H -#define LITE_AI_TOOLKIT_PIPELINE_PL_UTILS_H - -#endif //LITE_AI_TOOLKIT_PIPELINE_PL_UTILS_H diff --git a/lite/pipeline/pose_tracking_pl.h b/lite/pipeline/pose_tracking_pl.h deleted file mode 100644 index b5b4989a..00000000 --- a/lite/pipeline/pose_tracking_pl.h +++ /dev/null @@ -1,8 +0,0 @@ -// -// Created by DefTruth on 2022/5/22. -// - -#ifndef LITE_AI_TOOLKIT_PIPELINE_POSE_TRACKING_PL_H -#define LITE_AI_TOOLKIT_PIPELINE_POSE_TRACKING_PL_H - -#endif //LITE_AI_TOOLKIT_PIPELINE_POSE_TRACKING_PL_H diff --git a/logs/test_lite_yolox_mnn_2.jpg b/logs/test_lite_yolox_mnn_2.jpg deleted file mode 100644 index 44f7d86a..00000000 Binary files a/logs/test_lite_yolox_mnn_2.jpg and /dev/null differ diff --git a/ncnn/allocator.h b/ncnn/allocator.h deleted file mode 100644 index d8e82ea1..00000000 --- a/ncnn/allocator.h +++ /dev/null @@ -1,439 +0,0 @@ -// Tencent is pleased to support the open source community by making ncnn available. -// -// Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved. -// -// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// https://opensource.org/licenses/BSD-3-Clause -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef NCNN_ALLOCATOR_H -#define NCNN_ALLOCATOR_H - -#ifdef _WIN32 -#define WIN32_LEAN_AND_MEAN -#include -#endif - -#include "platform.h" - -#include - -#if NCNN_VULKAN -#include -#endif // NCNN_VULKAN - -#if NCNN_PLATFORM_API -#if __ANDROID_API__ >= 26 -#include -#endif // __ANDROID_API__ >= 26 -#endif // NCNN_PLATFORM_API - -namespace ncnn { - -#if __AVX__ -// the alignment of all the allocated buffers -#define NCNN_MALLOC_ALIGN 32 -#else -// the alignment of all the allocated buffers -#define NCNN_MALLOC_ALIGN 16 -#endif - -// we have some optimized kernels that may overread buffer a bit in loop -// it is common to interleave next-loop data load with arithmetic instructions -// allocating more bytes keeps us safe from SEGV_ACCERR failure -#define NCNN_MALLOC_OVERREAD 64 - -// Aligns a pointer to the specified number of bytes -// ptr Aligned pointer -// n Alignment size that must be a power of two -template -static inline _Tp* alignPtr(_Tp* ptr, int n = (int)sizeof(_Tp)) -{ - return (_Tp*)(((size_t)ptr + n - 1) & -n); -} - -// Aligns a buffer size to the specified number of bytes -// The function returns the minimum number that is greater or equal to sz and is divisible by n -// sz Buffer size to align -// n Alignment size that must be a power of two -static inline size_t alignSize(size_t sz, int n) -{ - return (sz + n - 1) & -n; -} - -static inline void* fastMalloc(size_t size) -{ -#if _MSC_VER - return _aligned_malloc(size, NCNN_MALLOC_ALIGN); -#elif (defined(__unix__) || defined(__APPLE__)) && _POSIX_C_SOURCE >= 200112L || (__ANDROID__ && __ANDROID_API__ >= 17) - void* ptr = 0; - if (posix_memalign(&ptr, NCNN_MALLOC_ALIGN, size + NCNN_MALLOC_OVERREAD)) - ptr = 0; - return ptr; -#elif __ANDROID__ && __ANDROID_API__ < 17 - return memalign(NCNN_MALLOC_ALIGN, size + NCNN_MALLOC_OVERREAD); -#else - unsigned char* udata = (unsigned char*)malloc(size + sizeof(void*) + NCNN_MALLOC_ALIGN + NCNN_MALLOC_OVERREAD); - if (!udata) - return 0; - unsigned char** adata = alignPtr((unsigned char**)udata + 1, NCNN_MALLOC_ALIGN); - adata[-1] = udata; - return adata; -#endif -} - -static inline void fastFree(void* ptr) -{ - if (ptr) - { -#if _MSC_VER - _aligned_free(ptr); -#elif (defined(__unix__) || defined(__APPLE__)) && _POSIX_C_SOURCE >= 200112L || (__ANDROID__ && __ANDROID_API__ >= 17) - free(ptr); -#elif __ANDROID__ && __ANDROID_API__ < 17 - free(ptr); -#else - unsigned char* udata = ((unsigned char**)ptr)[-1]; - free(udata); -#endif - } -} - -#if NCNN_THREADS -// exchange-add operation for atomic operations on reference counters -#if defined __riscv && !defined __riscv_atomic -// riscv target without A extension -static inline int NCNN_XADD(int* addr, int delta) -{ - int tmp = *addr; - *addr += delta; - return tmp; -} -#elif defined __INTEL_COMPILER && !(defined WIN32 || defined _WIN32) -// atomic increment on the linux version of the Intel(tm) compiler -#define NCNN_XADD(addr, delta) (int)_InterlockedExchangeAdd(const_cast(reinterpret_cast(addr)), delta) -#elif defined __GNUC__ -#if defined __clang__ && __clang_major__ >= 3 && !defined __ANDROID__ && !defined __EMSCRIPTEN__ && !defined(__CUDACC__) -#ifdef __ATOMIC_ACQ_REL -#define NCNN_XADD(addr, delta) __c11_atomic_fetch_add((_Atomic(int)*)(addr), delta, __ATOMIC_ACQ_REL) -#else -#define NCNN_XADD(addr, delta) __atomic_fetch_add((_Atomic(int)*)(addr), delta, 4) -#endif -#else -#if defined __ATOMIC_ACQ_REL && !defined __clang__ -// version for gcc >= 4.7 -#define NCNN_XADD(addr, delta) (int)__atomic_fetch_add((unsigned*)(addr), (unsigned)(delta), __ATOMIC_ACQ_REL) -#else -#define NCNN_XADD(addr, delta) (int)__sync_fetch_and_add((unsigned*)(addr), (unsigned)(delta)) -#endif -#endif -#elif defined _MSC_VER && !defined RC_INVOKED -#define NCNN_XADD(addr, delta) (int)_InterlockedExchangeAdd((long volatile*)addr, delta) -#else -// thread-unsafe branch -static inline int NCNN_XADD(int* addr, int delta) -{ - int tmp = *addr; - *addr += delta; - return tmp; -} -#endif -#else // NCNN_THREADS -static inline int NCNN_XADD(int* addr, int delta) -{ - int tmp = *addr; - *addr += delta; - return tmp; -} -#endif // NCNN_THREADS - -class NCNN_EXPORT Allocator -{ -public: - virtual ~Allocator(); - virtual void* fastMalloc(size_t size) = 0; - virtual void fastFree(void* ptr) = 0; -}; - -class PoolAllocatorPrivate; -class NCNN_EXPORT PoolAllocator : public Allocator -{ -public: - PoolAllocator(); - ~PoolAllocator(); - - // ratio range 0 ~ 1 - // default cr = 0.75 - void set_size_compare_ratio(float scr); - - // release all budgets immediately - void clear(); - - virtual void* fastMalloc(size_t size); - virtual void fastFree(void* ptr); - -private: - PoolAllocator(const PoolAllocator&); - PoolAllocator& operator=(const PoolAllocator&); - -private: - PoolAllocatorPrivate* const d; -}; - -class UnlockedPoolAllocatorPrivate; -class NCNN_EXPORT UnlockedPoolAllocator : public Allocator -{ -public: - UnlockedPoolAllocator(); - ~UnlockedPoolAllocator(); - - // ratio range 0 ~ 1 - // default cr = 0.75 - void set_size_compare_ratio(float scr); - - // release all budgets immediately - void clear(); - - virtual void* fastMalloc(size_t size); - virtual void fastFree(void* ptr); - -private: - UnlockedPoolAllocator(const UnlockedPoolAllocator&); - UnlockedPoolAllocator& operator=(const UnlockedPoolAllocator&); - -private: - UnlockedPoolAllocatorPrivate* const d; -}; - -#if NCNN_VULKAN - -class VulkanDevice; - -class NCNN_EXPORT VkBufferMemory -{ -public: - VkBuffer buffer; - - // the base offset assigned by allocator - size_t offset; - size_t capacity; - - VkDeviceMemory memory; - void* mapped_ptr; - - // buffer state, modified by command functions internally - mutable VkAccessFlags access_flags; - mutable VkPipelineStageFlags stage_flags; - - // initialize and modified by mat - int refcount; -}; - -class NCNN_EXPORT VkImageMemory -{ -public: - VkImage image; - VkImageView imageview; - - // underlying info assigned by allocator - int width; - int height; - int depth; - VkFormat format; - - VkDeviceMemory memory; - void* mapped_ptr; - - // the base offset assigned by allocator - size_t bind_offset; - size_t bind_capacity; - - // image state, modified by command functions internally - mutable VkAccessFlags access_flags; - mutable VkImageLayout image_layout; - mutable VkPipelineStageFlags stage_flags; - - // in-execution state, modified by command functions internally - mutable int command_refcount; - - // initialize and modified by mat - int refcount; -}; - -class NCNN_EXPORT VkAllocator -{ -public: - explicit VkAllocator(const VulkanDevice* _vkdev); - virtual ~VkAllocator(); - - virtual void clear(); - - virtual VkBufferMemory* fastMalloc(size_t size) = 0; - virtual void fastFree(VkBufferMemory* ptr) = 0; - virtual int flush(VkBufferMemory* ptr); - virtual int invalidate(VkBufferMemory* ptr); - - virtual VkImageMemory* fastMalloc(int w, int h, int c, size_t elemsize, int elempack) = 0; - virtual void fastFree(VkImageMemory* ptr) = 0; - -public: - const VulkanDevice* vkdev; - uint32_t buffer_memory_type_index; - uint32_t image_memory_type_index; - uint32_t reserved_type_index; - bool mappable; - bool coherent; - -protected: - VkBuffer create_buffer(size_t size, VkBufferUsageFlags usage); - VkDeviceMemory allocate_memory(size_t size, uint32_t memory_type_index); - VkDeviceMemory allocate_dedicated_memory(size_t size, uint32_t memory_type_index, VkImage image, VkBuffer buffer); - - VkImage create_image(int width, int height, int depth, VkFormat format, VkImageTiling tiling, VkImageUsageFlags usage); - VkImageView create_imageview(VkImage image, VkFormat format); -}; - -class VkBlobAllocatorPrivate; -class NCNN_EXPORT VkBlobAllocator : public VkAllocator -{ -public: - explicit VkBlobAllocator(const VulkanDevice* vkdev, size_t preferred_block_size = 16 * 1024 * 1024); // 16M - virtual ~VkBlobAllocator(); - -public: - // release all budgets immediately - virtual void clear(); - - virtual VkBufferMemory* fastMalloc(size_t size); - virtual void fastFree(VkBufferMemory* ptr); - virtual VkImageMemory* fastMalloc(int w, int h, int c, size_t elemsize, int elempack); - virtual void fastFree(VkImageMemory* ptr); - -private: - VkBlobAllocator(const VkBlobAllocator&); - VkBlobAllocator& operator=(const VkBlobAllocator&); - -private: - VkBlobAllocatorPrivate* const d; -}; - -class VkWeightAllocatorPrivate; -class NCNN_EXPORT VkWeightAllocator : public VkAllocator -{ -public: - explicit VkWeightAllocator(const VulkanDevice* vkdev, size_t preferred_block_size = 8 * 1024 * 1024); // 8M - virtual ~VkWeightAllocator(); - -public: - // release all blocks immediately - virtual void clear(); - -public: - virtual VkBufferMemory* fastMalloc(size_t size); - virtual void fastFree(VkBufferMemory* ptr); - virtual VkImageMemory* fastMalloc(int w, int h, int c, size_t elemsize, int elempack); - virtual void fastFree(VkImageMemory* ptr); - -private: - VkWeightAllocator(const VkWeightAllocator&); - VkWeightAllocator& operator=(const VkWeightAllocator&); - -private: - VkWeightAllocatorPrivate* const d; -}; - -class VkStagingAllocatorPrivate; -class NCNN_EXPORT VkStagingAllocator : public VkAllocator -{ -public: - explicit VkStagingAllocator(const VulkanDevice* vkdev); - virtual ~VkStagingAllocator(); - -public: - // ratio range 0 ~ 1 - // default cr = 0.75 - void set_size_compare_ratio(float scr); - - // release all budgets immediately - virtual void clear(); - - virtual VkBufferMemory* fastMalloc(size_t size); - virtual void fastFree(VkBufferMemory* ptr); - virtual VkImageMemory* fastMalloc(int w, int h, int c, size_t elemsize, int elempack); - virtual void fastFree(VkImageMemory* ptr); - -private: - VkStagingAllocator(const VkStagingAllocator&); - VkStagingAllocator& operator=(const VkStagingAllocator&); - -private: - VkStagingAllocatorPrivate* const d; -}; - -class VkWeightStagingAllocatorPrivate; -class NCNN_EXPORT VkWeightStagingAllocator : public VkAllocator -{ -public: - explicit VkWeightStagingAllocator(const VulkanDevice* vkdev); - virtual ~VkWeightStagingAllocator(); - -public: - virtual VkBufferMemory* fastMalloc(size_t size); - virtual void fastFree(VkBufferMemory* ptr); - virtual VkImageMemory* fastMalloc(int w, int h, int c, size_t elemsize, int elempack); - virtual void fastFree(VkImageMemory* ptr); - -private: - VkWeightStagingAllocator(const VkWeightStagingAllocator&); - VkWeightStagingAllocator& operator=(const VkWeightStagingAllocator&); - -private: - VkWeightStagingAllocatorPrivate* const d; -}; - -#if NCNN_PLATFORM_API -#if __ANDROID_API__ >= 26 -class NCNN_EXPORT VkAndroidHardwareBufferImageAllocator : public VkAllocator -{ -public: - VkAndroidHardwareBufferImageAllocator(const VulkanDevice* _vkdev, AHardwareBuffer* _hb); - virtual ~VkAndroidHardwareBufferImageAllocator(); - -public: - virtual VkBufferMemory* fastMalloc(size_t size); - virtual void fastFree(VkBufferMemory* ptr); - virtual VkImageMemory* fastMalloc(int w, int h, int c, size_t elemsize, int elempack); - virtual void fastFree(VkImageMemory* ptr); - -private: - VkAndroidHardwareBufferImageAllocator(const VkAndroidHardwareBufferImageAllocator&); - VkAndroidHardwareBufferImageAllocator& operator=(const VkAndroidHardwareBufferImageAllocator&); - -public: - int init(); - - int width() const; - int height() const; - uint64_t external_format() const; - -public: - AHardwareBuffer* hb; - AHardwareBuffer_Desc bufferDesc; - VkAndroidHardwareBufferFormatPropertiesANDROID bufferFormatProperties; - VkAndroidHardwareBufferPropertiesANDROID bufferProperties; - VkSamplerYcbcrConversionKHR samplerYcbcrConversion; -}; -#endif // __ANDROID_API__ >= 26 -#endif // NCNN_PLATFORM_API - -#endif // NCNN_VULKAN - -} // namespace ncnn - -#endif // NCNN_ALLOCATOR_H diff --git a/ncnn/benchmark.h b/ncnn/benchmark.h deleted file mode 100644 index 3d5c0cda..00000000 --- a/ncnn/benchmark.h +++ /dev/null @@ -1,36 +0,0 @@ -// Tencent is pleased to support the open source community by making ncnn available. -// -// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. -// -// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// https://opensource.org/licenses/BSD-3-Clause -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef NCNN_BENCHMARK_H -#define NCNN_BENCHMARK_H - -#include "layer.h" -#include "mat.h" -#include "platform.h" - -namespace ncnn { - -// get now timestamp in ms -NCNN_EXPORT double get_current_time(); - -#if NCNN_BENCHMARK - -NCNN_EXPORT void benchmark(const Layer* layer, double start, double end); -NCNN_EXPORT void benchmark(const Layer* layer, const Mat& bottom_blob, Mat& top_blob, double start, double end); - -#endif // NCNN_BENCHMARK - -} // namespace ncnn - -#endif // NCNN_BENCHMARK_H diff --git a/ncnn/blob.h b/ncnn/blob.h deleted file mode 100644 index c9f144fb..00000000 --- a/ncnn/blob.h +++ /dev/null @@ -1,44 +0,0 @@ -// Tencent is pleased to support the open source community by making ncnn available. -// -// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. -// -// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// https://opensource.org/licenses/BSD-3-Clause -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef NCNN_BLOB_H -#define NCNN_BLOB_H - -#include "mat.h" -#include "platform.h" - -namespace ncnn { - -class NCNN_EXPORT Blob -{ -public: - // empty - Blob(); - -public: -#if NCNN_STRING - // blob name - std::string name; -#endif // NCNN_STRING - // layer index which produce this blob as output - int producer; - // layer index which need this blob as input - int consumer; - // shape hint - Mat shape; -}; - -} // namespace ncnn - -#endif // NCNN_BLOB_H diff --git a/ncnn/c_api.h b/ncnn/c_api.h deleted file mode 100644 index 19ad7157..00000000 --- a/ncnn/c_api.h +++ /dev/null @@ -1,308 +0,0 @@ -/* Tencent is pleased to support the open source community by making ncnn available. - * - * Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. - * - * Licensed under the BSD 3-Clause License (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * https://opensource.org/licenses/BSD-3-Clause - * - * Unless required by applicable law or agreed to in writing, software distributed - * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR - * CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - -#ifndef NCNN_C_API_H -#define NCNN_C_API_H - -#include -#include "platform.h" - -#ifdef __cplusplus -extern "C" { -#endif - -NCNN_EXPORT const char* ncnn_version(); - -/* allocator api */ -typedef struct __ncnn_allocator_t* ncnn_allocator_t; -struct NCNN_EXPORT __ncnn_allocator_t -{ - void* pthis; - - void* (*fast_malloc)(ncnn_allocator_t allocator, size_t size); - void (*fast_free)(ncnn_allocator_t allocator, void* ptr); -}; - -NCNN_EXPORT ncnn_allocator_t ncnn_allocator_create_pool_allocator(); -NCNN_EXPORT ncnn_allocator_t ncnn_allocator_create_unlocked_pool_allocator(); -NCNN_EXPORT void ncnn_allocator_destroy(ncnn_allocator_t allocator); - -/* option api */ -typedef struct __ncnn_option_t* ncnn_option_t; - -NCNN_EXPORT ncnn_option_t ncnn_option_create(); -NCNN_EXPORT void ncnn_option_destroy(ncnn_option_t opt); - -NCNN_EXPORT int ncnn_option_get_num_threads(const ncnn_option_t opt); -NCNN_EXPORT void ncnn_option_set_num_threads(ncnn_option_t opt, int num_threads); - -NCNN_EXPORT int ncnn_option_get_use_vulkan_compute(const ncnn_option_t opt); -NCNN_EXPORT void ncnn_option_set_use_vulkan_compute(ncnn_option_t opt, int use_vulkan_compute); - -/* mat api */ -typedef struct __ncnn_mat_t* ncnn_mat_t; - -NCNN_EXPORT ncnn_mat_t ncnn_mat_create_1d(int w, ncnn_allocator_t allocator); -NCNN_EXPORT ncnn_mat_t ncnn_mat_create_2d(int w, int h, ncnn_allocator_t allocator); -NCNN_EXPORT ncnn_mat_t ncnn_mat_create_3d(int w, int h, int c, ncnn_allocator_t allocator); -NCNN_EXPORT ncnn_mat_t ncnn_mat_create_external_1d(int w, void* data, ncnn_allocator_t allocator); -NCNN_EXPORT ncnn_mat_t ncnn_mat_create_external_2d(int w, int h, void* data, ncnn_allocator_t allocator); -NCNN_EXPORT ncnn_mat_t ncnn_mat_create_external_3d(int w, int h, int c, void* data, ncnn_allocator_t allocator); -NCNN_EXPORT ncnn_mat_t ncnn_mat_create_1d_elem(int w, size_t elemsize, int elempack, ncnn_allocator_t allocator); -NCNN_EXPORT ncnn_mat_t ncnn_mat_create_2d_elem(int w, int h, size_t elemsize, int elempack, ncnn_allocator_t allocator); -NCNN_EXPORT ncnn_mat_t ncnn_mat_create_3d_elem(int w, int h, int c, size_t elemsize, int elempack, ncnn_allocator_t allocator); -NCNN_EXPORT ncnn_mat_t ncnn_mat_create_external_1d_elem(int w, void* data, size_t elemsize, int elempack, ncnn_allocator_t allocator); -NCNN_EXPORT ncnn_mat_t ncnn_mat_create_external_2d_elem(int w, int h, void* data, size_t elemsize, int elempack, ncnn_allocator_t allocator); -NCNN_EXPORT ncnn_mat_t ncnn_mat_create_external_3d_elem(int w, int h, int c, void* data, size_t elemsize, int elempack, ncnn_allocator_t allocator); -NCNN_EXPORT void ncnn_mat_destroy(ncnn_mat_t mat); - -NCNN_EXPORT void ncnn_mat_fill_float(ncnn_mat_t mat, float v); - -NCNN_EXPORT ncnn_mat_t ncnn_mat_clone(const ncnn_mat_t mat, ncnn_allocator_t allocator); -NCNN_EXPORT ncnn_mat_t ncnn_mat_reshape_1d(const ncnn_mat_t mat, int w, ncnn_allocator_t allocator); -NCNN_EXPORT ncnn_mat_t ncnn_mat_reshape_2d(const ncnn_mat_t mat, int w, int h, ncnn_allocator_t allocator); -NCNN_EXPORT ncnn_mat_t ncnn_mat_reshape_3d(const ncnn_mat_t mat, int w, int h, int c, ncnn_allocator_t allocator); - -NCNN_EXPORT int ncnn_mat_get_dims(const ncnn_mat_t mat); -NCNN_EXPORT int ncnn_mat_get_w(const ncnn_mat_t mat); -NCNN_EXPORT int ncnn_mat_get_h(const ncnn_mat_t mat); -NCNN_EXPORT int ncnn_mat_get_c(const ncnn_mat_t mat); -NCNN_EXPORT size_t ncnn_mat_get_elemsize(const ncnn_mat_t mat); -NCNN_EXPORT int ncnn_mat_get_elempack(const ncnn_mat_t mat); -NCNN_EXPORT size_t ncnn_mat_get_cstep(const ncnn_mat_t mat); -NCNN_EXPORT void* ncnn_mat_get_data(const ncnn_mat_t mat); - -NCNN_EXPORT void* ncnn_mat_get_channel_data(const ncnn_mat_t mat, int c); - -#if NCNN_PIXEL - -/* mat pixel api */ -#define NCNN_MAT_PIXEL_RGB 1 -#define NCNN_MAT_PIXEL_BGR 2 -#define NCNN_MAT_PIXEL_GRAY 3 -#define NCNN_MAT_PIXEL_RGBA 4 -#define NCNN_MAT_PIXEL_BGRA 5 -#define NCNN_MAT_PIXEL_X2Y(X, Y) (X | (Y << 16)) -NCNN_EXPORT ncnn_mat_t ncnn_mat_from_pixels(const unsigned char* pixels, int type, int w, int h, int stride, ncnn_allocator_t allocator); -NCNN_EXPORT ncnn_mat_t ncnn_mat_from_pixels_resize(const unsigned char* pixels, int type, int w, int h, int stride, int target_width, int target_height, ncnn_allocator_t allocator); -NCNN_EXPORT ncnn_mat_t ncnn_mat_from_pixels_roi(const unsigned char* pixels, int type, int w, int h, int stride, int roix, int roiy, int roiw, int roih, ncnn_allocator_t allocator); -NCNN_EXPORT ncnn_mat_t ncnn_mat_from_pixels_roi_resize(const unsigned char* pixels, int type, int w, int h, int stride, int roix, int roiy, int roiw, int roih, int target_width, int target_height, ncnn_allocator_t allocator); -NCNN_EXPORT void ncnn_mat_to_pixels(const ncnn_mat_t mat, unsigned char* pixels, int type, int stride); -NCNN_EXPORT void ncnn_mat_to_pixels_resize(const ncnn_mat_t mat, unsigned char* pixels, int type, int target_width, int target_height, int target_stride); - -#endif /* NCNN_PIXEL */ - -NCNN_EXPORT void ncnn_mat_substract_mean_normalize(ncnn_mat_t mat, const float* mean_vals, const float* norm_vals); - -NCNN_EXPORT void ncnn_convert_packing(const ncnn_mat_t src, ncnn_mat_t* dst, int elempack, const ncnn_option_t opt); -NCNN_EXPORT void ncnn_flatten(const ncnn_mat_t src, ncnn_mat_t* dst, const ncnn_option_t opt); - -/* blob api */ -typedef struct __ncnn_blob_t* ncnn_blob_t; - -#if NCNN_STRING -NCNN_EXPORT const char* ncnn_blob_get_name(const ncnn_blob_t blob); -#endif /* NCNN_STRING */ - -NCNN_EXPORT int ncnn_blob_get_producer(const ncnn_blob_t blob); -NCNN_EXPORT int ncnn_blob_get_consumer(const ncnn_blob_t blob); - -NCNN_EXPORT void ncnn_blob_get_shape(const ncnn_blob_t blob, int* dims, int* w, int* h, int* c); - -/* paramdict api */ -typedef struct __ncnn_paramdict_t* ncnn_paramdict_t; - -NCNN_EXPORT ncnn_paramdict_t ncnn_paramdict_create(); -NCNN_EXPORT void ncnn_paramdict_destroy(ncnn_paramdict_t pd); - -NCNN_EXPORT int ncnn_paramdict_get_type(const ncnn_paramdict_t pd, int id); - -NCNN_EXPORT int ncnn_paramdict_get_int(const ncnn_paramdict_t pd, int id, int def); -NCNN_EXPORT float ncnn_paramdict_get_float(const ncnn_paramdict_t pd, int id, float def); -NCNN_EXPORT ncnn_mat_t ncnn_paramdict_get_array(const ncnn_paramdict_t pd, int id, const ncnn_mat_t def); - -NCNN_EXPORT void ncnn_paramdict_set_int(ncnn_paramdict_t pd, int id, int i); -NCNN_EXPORT void ncnn_paramdict_set_float(ncnn_paramdict_t pd, int id, float f); -NCNN_EXPORT void ncnn_paramdict_set_array(ncnn_paramdict_t pd, int id, const ncnn_mat_t v); - -/* datareader api */ -typedef struct __ncnn_datareader_t* ncnn_datareader_t; -struct NCNN_EXPORT __ncnn_datareader_t -{ - void* pthis; - -#if NCNN_STRING - int (*scan)(ncnn_datareader_t dr, const char* format, void* p); -#endif /* NCNN_STRING */ - size_t (*read)(ncnn_datareader_t dr, void* buf, size_t size); -}; - -NCNN_EXPORT ncnn_datareader_t ncnn_datareader_create(); -#if NCNN_STDIO -NCNN_EXPORT ncnn_datareader_t ncnn_datareader_create_from_stdio(FILE* fp); -#endif /* NCNN_STDIO */ -NCNN_EXPORT ncnn_datareader_t ncnn_datareader_create_from_memory(const unsigned char** mem); -NCNN_EXPORT void ncnn_datareader_destroy(ncnn_datareader_t dr); - -/* modelbin api */ -typedef struct __ncnn_modelbin_t* ncnn_modelbin_t; -struct NCNN_EXPORT __ncnn_modelbin_t -{ - void* pthis; - - ncnn_mat_t (*load_1d)(const ncnn_modelbin_t mb, int w, int type); - ncnn_mat_t (*load_2d)(const ncnn_modelbin_t mb, int w, int h, int type); - ncnn_mat_t (*load_3d)(const ncnn_modelbin_t mb, int w, int h, int c, int type); -}; - -NCNN_EXPORT ncnn_modelbin_t ncnn_modelbin_create_from_datareader(const ncnn_datareader_t dr); -NCNN_EXPORT ncnn_modelbin_t ncnn_modelbin_create_from_mat_array(const ncnn_mat_t* weights, int n); -NCNN_EXPORT void ncnn_modelbin_destroy(ncnn_modelbin_t mb); - -/* layer api */ -typedef struct __ncnn_layer_t* ncnn_layer_t; -struct NCNN_EXPORT __ncnn_layer_t -{ - void* pthis; - - int (*load_param)(ncnn_layer_t layer, const ncnn_paramdict_t pd); - int (*load_model)(ncnn_layer_t layer, const ncnn_modelbin_t mb); - - int (*create_pipeline)(ncnn_layer_t layer, const ncnn_option_t opt); - int (*destroy_pipeline)(ncnn_layer_t layer, const ncnn_option_t opt); - - int (*forward_1)(const ncnn_layer_t layer, const ncnn_mat_t bottom_blob, ncnn_mat_t* top_blob, const ncnn_option_t opt); - int (*forward_n)(const ncnn_layer_t layer, const ncnn_mat_t* bottom_blobs, int n, ncnn_mat_t** top_blobs, int n2, const ncnn_option_t opt); - - int (*forward_inplace_1)(const ncnn_layer_t layer, ncnn_mat_t bottom_top_blob, const ncnn_option_t opt); - int (*forward_inplace_n)(const ncnn_layer_t layer, ncnn_mat_t* bottom_top_blobs, int n, const ncnn_option_t opt); -}; - -NCNN_EXPORT ncnn_layer_t ncnn_layer_create(); -NCNN_EXPORT ncnn_layer_t ncnn_layer_create_by_typeindex(int typeindex); -#if NCNN_STRING -NCNN_EXPORT ncnn_layer_t ncnn_layer_create_by_type(const char* type); -#endif /* NCNN_STRING */ -NCNN_EXPORT void ncnn_layer_destroy(ncnn_layer_t layer); - -#if NCNN_STRING -NCNN_EXPORT const char* ncnn_layer_get_name(const ncnn_layer_t layer); -#endif /* NCNN_STRING */ - -NCNN_EXPORT int ncnn_layer_get_typeindex(const ncnn_layer_t layer); -#if NCNN_STRING -NCNN_EXPORT const char* ncnn_layer_get_type(const ncnn_layer_t layer); -#endif /* NCNN_STRING */ - -NCNN_EXPORT int ncnn_layer_get_one_blob_only(const ncnn_layer_t layer); -NCNN_EXPORT int ncnn_layer_get_support_inplace(const ncnn_layer_t layer); -NCNN_EXPORT int ncnn_layer_get_support_vulkan(const ncnn_layer_t layer); -NCNN_EXPORT int ncnn_layer_get_support_packing(const ncnn_layer_t layer); -NCNN_EXPORT int ncnn_layer_get_support_bf16_storage(const ncnn_layer_t layer); -NCNN_EXPORT int ncnn_layer_get_support_fp16_storage(const ncnn_layer_t layer); -NCNN_EXPORT int ncnn_layer_get_support_image_storage(const ncnn_layer_t layer); - -NCNN_EXPORT void ncnn_layer_set_one_blob_only(ncnn_layer_t layer, int enable); -NCNN_EXPORT void ncnn_layer_set_support_inplace(ncnn_layer_t layer, int enable); -NCNN_EXPORT void ncnn_layer_set_support_vulkan(ncnn_layer_t layer, int enable); -NCNN_EXPORT void ncnn_layer_set_support_packing(ncnn_layer_t layer, int enable); -NCNN_EXPORT void ncnn_layer_set_support_bf16_storage(ncnn_layer_t layer, int enable); -NCNN_EXPORT void ncnn_layer_set_support_fp16_storage(ncnn_layer_t layer, int enable); -NCNN_EXPORT void ncnn_layer_set_support_image_storage(ncnn_layer_t layer, int enable); - -NCNN_EXPORT int ncnn_layer_get_bottom_count(const ncnn_layer_t layer); -NCNN_EXPORT int ncnn_layer_get_bottom(const ncnn_layer_t layer, int i); -NCNN_EXPORT int ncnn_layer_get_top_count(const ncnn_layer_t layer); -NCNN_EXPORT int ncnn_layer_get_top(const ncnn_layer_t layer, int i); - -NCNN_EXPORT void ncnn_blob_get_bottom_shape(const ncnn_layer_t layer, int i, int* dims, int* w, int* h, int* c); -NCNN_EXPORT void ncnn_blob_get_top_shape(const ncnn_layer_t layer, int i, int* dims, int* w, int* h, int* c); - -/* layer factory function */ -typedef ncnn_layer_t (*ncnn_layer_creator_t)(void* userdata); -typedef void (*ncnn_layer_destroyer_t)(ncnn_layer_t layer, void* userdata); - -typedef struct __ncnn_net_custom_layer_factory_t* ncnn_net_custom_layer_factory_t; -struct __ncnn_net_custom_layer_factory_t -{ - ncnn_layer_creator_t creator; - ncnn_layer_destroyer_t destroyer; - void* userdata; - ncnn_net_custom_layer_factory_t next; -}; - -/* net api */ -typedef struct __ncnn_net_t* ncnn_net_t; -struct __ncnn_net_t -{ - void* pthis; - - ncnn_net_custom_layer_factory_t custom_layer_factory; -}; - -NCNN_EXPORT ncnn_net_t ncnn_net_create(); -NCNN_EXPORT void ncnn_net_destroy(ncnn_net_t net); - -NCNN_EXPORT void ncnn_net_set_option(ncnn_net_t net, ncnn_option_t opt); - -#if NCNN_STRING -NCNN_EXPORT void ncnn_net_register_custom_layer_by_type(ncnn_net_t net, const char* type, ncnn_layer_creator_t creator, ncnn_layer_destroyer_t destroyer, void* userdata); -#endif /* NCNN_STRING */ -NCNN_EXPORT void ncnn_net_register_custom_layer_by_typeindex(ncnn_net_t net, int typeindex, ncnn_layer_creator_t creator, ncnn_layer_destroyer_t destroyer, void* userdata); - -#if NCNN_STDIO -#if NCNN_STRING -NCNN_EXPORT int ncnn_net_load_param(ncnn_net_t net, const char* path); -#endif /* NCNN_STRING */ -NCNN_EXPORT int ncnn_net_load_param_bin(ncnn_net_t net, const char* path); -NCNN_EXPORT int ncnn_net_load_model(ncnn_net_t net, const char* path); -#endif /* NCNN_STDIO */ - -#if NCNN_STDIO -#if NCNN_STRING -NCNN_EXPORT int ncnn_net_load_param_memory(ncnn_net_t net, const char* mem); -#endif /* NCNN_STRING */ -#endif /* NCNN_STDIO */ -NCNN_EXPORT int ncnn_net_load_param_bin_memory(ncnn_net_t net, const unsigned char* mem); -NCNN_EXPORT int ncnn_net_load_model_memory(ncnn_net_t net, const unsigned char* mem); - -#if NCNN_STRING -NCNN_EXPORT int ncnn_net_load_param_datareader(ncnn_net_t net, const ncnn_datareader_t dr); -#endif /* NCNN_STRING */ -NCNN_EXPORT int ncnn_net_load_param_bin_datareader(ncnn_net_t net, const ncnn_datareader_t dr); -NCNN_EXPORT int ncnn_net_load_model_datareader(ncnn_net_t net, const ncnn_datareader_t dr); - -NCNN_EXPORT void ncnn_net_clear(ncnn_net_t net); - -/* extractor api */ -typedef struct __ncnn_extractor_t* ncnn_extractor_t; - -NCNN_EXPORT ncnn_extractor_t ncnn_extractor_create(ncnn_net_t net); -NCNN_EXPORT void ncnn_extractor_destroy(ncnn_extractor_t ex); - -NCNN_EXPORT void ncnn_extractor_set_option(ncnn_extractor_t ex, const ncnn_option_t opt); - -#if NCNN_STRING -NCNN_EXPORT int ncnn_extractor_input(ncnn_extractor_t ex, const char* name, const ncnn_mat_t mat); -NCNN_EXPORT int ncnn_extractor_extract(ncnn_extractor_t ex, const char* name, ncnn_mat_t* mat); -#endif /* NCNN_STRING */ -NCNN_EXPORT int ncnn_extractor_input_index(ncnn_extractor_t ex, int index, const ncnn_mat_t mat); -NCNN_EXPORT int ncnn_extractor_extract_index(ncnn_extractor_t ex, int index, ncnn_mat_t* mat); - -#ifdef __cplusplus -} /* extern "C" */ -#endif - -#endif /* NCNN_C_API_H */ diff --git a/ncnn/command.h b/ncnn/command.h deleted file mode 100644 index 337d0850..00000000 --- a/ncnn/command.h +++ /dev/null @@ -1,136 +0,0 @@ -// Tencent is pleased to support the open source community by making ncnn available. -// -// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. -// -// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// https://opensource.org/licenses/BSD-3-Clause -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef NCNN_COMMAND_H -#define NCNN_COMMAND_H - -#include "platform.h" - -#if NCNN_VULKAN - -#include "mat.h" - -#include - -namespace ncnn { - -class Pipeline; -#if NCNN_PLATFORM_API -#if __ANDROID_API__ >= 26 -class ImportAndroidHardwareBufferPipeline; -#endif // __ANDROID_API__ >= 26 -#endif // NCNN_PLATFORM_API -class VkComputePrivate; -class NCNN_EXPORT VkCompute -{ -public: - explicit VkCompute(const VulkanDevice* vkdev); - virtual ~VkCompute(); - -public: - void record_upload(const Mat& src, VkMat& dst, const Option& opt); - - void record_upload(const Mat& src, VkImageMat& dst, const Option& opt); - - void record_download(const VkMat& src, Mat& dst, const Option& opt); - - void record_download(const VkImageMat& src, Mat& dst, const Option& opt); - - void record_buffer_to_image(const VkMat& src, VkImageMat& dst, const Option& opt); - - void record_image_to_buffer(const VkImageMat& src, VkMat& dst, const Option& opt); - - void record_clone(const Mat& src, VkMat& dst, const Option& opt); - - void record_clone(const Mat& src, VkImageMat& dst, const Option& opt); - - void record_clone(const VkMat& src, Mat& dst, const Option& opt); - - void record_clone(const VkImageMat& src, Mat& dst, const Option& opt); - - void record_clone(const VkMat& src, VkMat& dst, const Option& opt); - - void record_clone(const VkImageMat& src, VkImageMat& dst, const Option& opt); - - void record_clone(const VkMat& src, VkImageMat& dst, const Option& opt); - - void record_clone(const VkImageMat& src, VkMat& dst, const Option& opt); - - void record_pipeline(const Pipeline* pipeline, const std::vector& bindings, const std::vector& constants, const VkMat& dispatcher); - - void record_pipeline(const Pipeline* pipeline, const std::vector& bindings, const std::vector& constants, const VkImageMat& dispatcher); - - void record_pipeline(const Pipeline* pipeline, const std::vector& buffer_bindings, const std::vector& image_bindings, const std::vector& constants, const VkMat& dispatcher); - void record_pipeline(const Pipeline* pipeline, const std::vector& buffer_bindings, const std::vector& image_bindings, const std::vector& constants, const VkImageMat& dispatcher); - void record_pipeline(const Pipeline* pipeline, const std::vector& buffer_bindings, const std::vector& image_bindings, const std::vector& constants, const Mat& dispatcher); - -#if NCNN_BENCHMARK - void record_write_timestamp(uint32_t query); -#endif // NCNN_BENCHMARK - -#if NCNN_PLATFORM_API -#if __ANDROID_API__ >= 26 - void record_import_android_hardware_buffer(const ImportAndroidHardwareBufferPipeline* pipeline, const VkImageMat& src, const VkMat& dst); - - void record_import_android_hardware_buffer(const ImportAndroidHardwareBufferPipeline* pipeline, const VkImageMat& src, const VkImageMat& dst); -#endif // __ANDROID_API__ >= 26 -#endif // NCNN_PLATFORM_API - - int submit_and_wait(); - - int reset(); - -#if NCNN_BENCHMARK - int create_query_pool(uint32_t query_count); - - int get_query_pool_results(uint32_t first_query, uint32_t query_count, std::vector& results); -#endif // NCNN_BENCHMARK - -protected: - const VulkanDevice* vkdev; - - void barrier_readwrite(const VkMat& binding); - void barrier_readwrite(const VkImageMat& binding); - void barrier_readonly(const VkImageMat& binding); - -private: - VkComputePrivate* const d; -}; - -class VkTransferPrivate; -class NCNN_EXPORT VkTransfer -{ -public: - explicit VkTransfer(const VulkanDevice* vkdev); - virtual ~VkTransfer(); - -public: - void record_upload(const Mat& src, VkMat& dst, const Option& opt, bool flatten = true); - - void record_upload(const Mat& src, VkImageMat& dst, const Option& opt); - - int submit_and_wait(); - -protected: - const VulkanDevice* vkdev; - -private: - VkTransferPrivate* const d; -}; - -} // namespace ncnn - -#endif // NCNN_VULKAN - -#endif // NCNN_COMMAND_H diff --git a/ncnn/cpu.h b/ncnn/cpu.h deleted file mode 100644 index 90f3a938..00000000 --- a/ncnn/cpu.h +++ /dev/null @@ -1,120 +0,0 @@ -// Tencent is pleased to support the open source community by making ncnn available. -// -// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. -// -// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// https://opensource.org/licenses/BSD-3-Clause -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef NCNN_CPU_H -#define NCNN_CPU_H - -#include - -#if defined __ANDROID__ || defined __linux__ -#include // cpu_set_t -#endif - -#include "platform.h" - -namespace ncnn { - -class NCNN_EXPORT CpuSet -{ -public: - CpuSet(); - void enable(int cpu); - void disable(int cpu); - void disable_all(); - bool is_enabled(int cpu) const; - int num_enabled() const; - -public: -#if defined __ANDROID__ || defined __linux__ - cpu_set_t cpu_set; -#endif -#if __APPLE__ - unsigned int policy; -#endif -}; - -// test optional cpu features -// neon = armv7 neon or aarch64 asimd -NCNN_EXPORT int cpu_support_arm_neon(); -// vfpv4 = armv7 fp16 + fma -NCNN_EXPORT int cpu_support_arm_vfpv4(); -// asimdhp = aarch64 asimd half precision -NCNN_EXPORT int cpu_support_arm_asimdhp(); -// asimddp = aarch64 asimd dot product -NCNN_EXPORT int cpu_support_arm_asimddp(); - -// avx2 = x86_64 avx2 + fma + f16c -NCNN_EXPORT int cpu_support_x86_avx2(); - -// avx = x86_64 avx -NCNN_EXPORT int cpu_support_x86_avx(); - -// msa = mips mas -NCNN_EXPORT int cpu_support_mips_msa(); -// mmi = loongson mmi -NCNN_EXPORT int cpu_support_loongson_mmi(); - -// v = riscv vector -NCNN_EXPORT int cpu_support_riscv_v(); -// zfh = riscv half-precision float -NCNN_EXPORT int cpu_support_riscv_zfh(); -// vlenb = riscv vector length in bytes -NCNN_EXPORT int cpu_riscv_vlenb(); - -// cpu info -NCNN_EXPORT int get_cpu_count(); -NCNN_EXPORT int get_little_cpu_count(); -NCNN_EXPORT int get_big_cpu_count(); - -// bind all threads on little clusters if powersave enabled -// affects HMP arch cpu like ARM big.LITTLE -// only implemented on android at the moment -// switching powersave is expensive and not thread-safe -// 0 = all cores enabled(default) -// 1 = only little clusters enabled -// 2 = only big clusters enabled -// return 0 if success for setter function -NCNN_EXPORT int get_cpu_powersave(); -NCNN_EXPORT int set_cpu_powersave(int powersave); - -// convenient wrapper -NCNN_EXPORT const CpuSet& get_cpu_thread_affinity_mask(int powersave); - -// set explicit thread affinity -NCNN_EXPORT int set_cpu_thread_affinity(const CpuSet& thread_affinity_mask); - -// misc function wrapper for openmp routines -NCNN_EXPORT int get_omp_num_threads(); -NCNN_EXPORT void set_omp_num_threads(int num_threads); - -NCNN_EXPORT int get_omp_dynamic(); -NCNN_EXPORT void set_omp_dynamic(int dynamic); - -NCNN_EXPORT int get_omp_thread_num(); - -NCNN_EXPORT int get_kmp_blocktime(); -NCNN_EXPORT void set_kmp_blocktime(int time_ms); - -// need to flush denormals on Intel Chipset. -// Other architectures such as ARM can be added as needed. -// 0 = DAZ OFF, FTZ OFF -// 1 = DAZ ON , FTZ OFF -// 2 = DAZ OFF, FTZ ON -// 3 = DAZ ON, FTZ ON -NCNN_EXPORT int get_flush_denormals(); -NCNN_EXPORT int set_flush_denormals(int flush_denormals); - -} // namespace ncnn - -#endif // NCNN_CPU_H diff --git a/ncnn/datareader.h b/ncnn/datareader.h deleted file mode 100644 index ed2aba3c..00000000 --- a/ncnn/datareader.h +++ /dev/null @@ -1,122 +0,0 @@ -// Tencent is pleased to support the open source community by making ncnn available. -// -// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. -// -// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// https://opensource.org/licenses/BSD-3-Clause -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef NCNN_DATAREADER_H -#define NCNN_DATAREADER_H - -#include "platform.h" -#if NCNN_STDIO -#include -#endif - -#if NCNN_PLATFORM_API -#if __ANDROID_API__ >= 9 -#include -#endif -#endif // NCNN_PLATFORM_API - -namespace ncnn { - -// data read wrapper -class NCNN_EXPORT DataReader -{ -public: - DataReader(); - virtual ~DataReader(); - -#if NCNN_STRING - // parse plain param text - // return 1 if scan success - virtual int scan(const char* format, void* p) const; -#endif // NCNN_STRING - - // read binary param and model data - // return bytes read - virtual size_t read(void* buf, size_t size) const; - - // get model data reference - // return bytes referenced - virtual size_t reference(size_t size, const void** buf) const; -}; - -#if NCNN_STDIO -class DataReaderFromStdioPrivate; -class NCNN_EXPORT DataReaderFromStdio : public DataReader -{ -public: - explicit DataReaderFromStdio(FILE* fp); - virtual ~DataReaderFromStdio(); - -#if NCNN_STRING - virtual int scan(const char* format, void* p) const; -#endif // NCNN_STRING - virtual size_t read(void* buf, size_t size) const; - -private: - DataReaderFromStdio(const DataReaderFromStdio&); - DataReaderFromStdio& operator=(const DataReaderFromStdio&); - -private: - DataReaderFromStdioPrivate* const d; -}; -#endif // NCNN_STDIO - -class DataReaderFromMemoryPrivate; -class NCNN_EXPORT DataReaderFromMemory : public DataReader -{ -public: - explicit DataReaderFromMemory(const unsigned char*& mem); - virtual ~DataReaderFromMemory(); - -#if NCNN_STRING - virtual int scan(const char* format, void* p) const; -#endif // NCNN_STRING - virtual size_t read(void* buf, size_t size) const; - virtual size_t reference(size_t size, const void** buf) const; - -private: - DataReaderFromMemory(const DataReaderFromMemory&); - DataReaderFromMemory& operator=(const DataReaderFromMemory&); - -private: - DataReaderFromMemoryPrivate* const d; -}; - -#if NCNN_PLATFORM_API -#if __ANDROID_API__ >= 9 -class DataReaderFromAndroidAssetPrivate; -class NCNN_EXPORT DataReaderFromAndroidAsset : public DataReader -{ -public: - explicit DataReaderFromAndroidAsset(AAsset* asset); - virtual ~DataReaderFromAndroidAsset(); - -#if NCNN_STRING - virtual int scan(const char* format, void* p) const; -#endif // NCNN_STRING - virtual size_t read(void* buf, size_t size) const; - -private: - DataReaderFromAndroidAsset(const DataReaderFromAndroidAsset&); - DataReaderFromAndroidAsset& operator=(const DataReaderFromAndroidAsset&); - -private: - DataReaderFromAndroidAssetPrivate* const d; -}; -#endif // __ANDROID_API__ >= 9 -#endif // NCNN_PLATFORM_API - -} // namespace ncnn - -#endif // NCNN_DATAREADER_H diff --git a/ncnn/gpu.h b/ncnn/gpu.h deleted file mode 100644 index 9b08677b..00000000 --- a/ncnn/gpu.h +++ /dev/null @@ -1,351 +0,0 @@ -// Tencent is pleased to support the open source community by making ncnn available. -// -// Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved. -// -// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// https://opensource.org/licenses/BSD-3-Clause -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef NCNN_GPU_H -#define NCNN_GPU_H - -#include "platform.h" - -#if NCNN_VULKAN - -#include "mat.h" - -#include - -#include "vulkan_header_fix.h" - -namespace ncnn { - -// instance -NCNN_EXPORT int create_gpu_instance(); -NCNN_EXPORT void destroy_gpu_instance(); - -// instance extension capability -extern int support_VK_KHR_external_memory_capabilities; -extern int support_VK_KHR_get_physical_device_properties2; -extern int support_VK_KHR_get_surface_capabilities2; -extern int support_VK_KHR_surface; -extern int support_VK_EXT_debug_utils; -#if __ANDROID_API__ >= 26 -extern int support_VK_KHR_android_surface; -#endif // __ANDROID_API__ >= 26 - -// VK_KHR_external_memory_capabilities -extern PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR vkGetPhysicalDeviceExternalBufferPropertiesKHR; - -// VK_KHR_get_physical_device_properties2 -extern PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR; -extern PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR; -extern PFN_vkGetPhysicalDeviceFormatProperties2KHR vkGetPhysicalDeviceFormatProperties2KHR; -extern PFN_vkGetPhysicalDeviceImageFormatProperties2KHR vkGetPhysicalDeviceImageFormatProperties2KHR; -extern PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR vkGetPhysicalDeviceQueueFamilyProperties2KHR; -extern PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2KHR; -extern PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR vkGetPhysicalDeviceSparseImageFormatProperties2KHR; - -// VK_KHR_get_surface_capabilities2 -extern PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR vkGetPhysicalDeviceSurfaceCapabilities2KHR; -extern PFN_vkGetPhysicalDeviceSurfaceFormats2KHR vkGetPhysicalDeviceSurfaceFormats2KHR; - -// VK_KHR_surface -extern PFN_vkDestroySurfaceKHR vkDestroySurfaceKHR; -extern PFN_vkGetPhysicalDeviceSurfaceSupportKHR vkGetPhysicalDeviceSurfaceSupportKHR; -extern PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR vkGetPhysicalDeviceSurfaceCapabilitiesKHR; -extern PFN_vkGetPhysicalDeviceSurfaceFormatsKHR vkGetPhysicalDeviceSurfaceFormatsKHR; -extern PFN_vkGetPhysicalDeviceSurfacePresentModesKHR vkGetPhysicalDeviceSurfacePresentModesKHR; - -#if __ANDROID_API__ >= 26 -// VK_KHR_android_surface -extern PFN_vkCreateAndroidSurfaceKHR vkCreateAndroidSurfaceKHR; -#endif // __ANDROID_API__ >= 26 - -// get info -NCNN_EXPORT int get_gpu_count(); -NCNN_EXPORT int get_default_gpu_index(); - -class GpuInfoPrivate; -class NCNN_EXPORT GpuInfo -{ -public: - explicit GpuInfo(); - virtual ~GpuInfo(); - - // vulkan physical device - VkPhysicalDevice physical_device() const; - - // memory properties - const VkPhysicalDeviceMemoryProperties& physical_device_memory_properties() const; - - // info - uint32_t api_version() const; - uint32_t driver_version() const; - uint32_t vendor_id() const; - uint32_t device_id() const; - const char* device_name() const; - uint8_t* pipeline_cache_uuid() const; - - // 0 = discrete gpu - // 1 = integrated gpu - // 2 = virtual gpu - // 3 = cpu - int type() const; - - // hardware limit - uint32_t max_shared_memory_size() const; - uint32_t max_workgroup_count_x() const; - uint32_t max_workgroup_count_y() const; - uint32_t max_workgroup_count_z() const; - uint32_t max_workgroup_invocations() const; - uint32_t max_workgroup_size_x() const; - uint32_t max_workgroup_size_y() const; - uint32_t max_workgroup_size_z() const; - size_t memory_map_alignment() const; - size_t buffer_offset_alignment() const; - size_t non_coherent_atom_size() const; - size_t buffer_image_granularity() const; - uint32_t max_image_dimension_1d() const; - uint32_t max_image_dimension_2d() const; - uint32_t max_image_dimension_3d() const; - float timestamp_period() const; - - // runtime - uint32_t compute_queue_family_index() const; - uint32_t graphics_queue_family_index() const; - uint32_t transfer_queue_family_index() const; - - uint32_t compute_queue_count() const; - uint32_t graphics_queue_count() const; - uint32_t transfer_queue_count() const; - - // property - bool unified_compute_transfer_queue() const; - - // subgroup - uint32_t subgroup_size() const; - bool support_subgroup_basic() const; - bool support_subgroup_vote() const; - bool support_subgroup_ballot() const; - bool support_subgroup_shuffle() const; - - // bug is not feature - bool bug_storage_buffer_no_l1() const; - bool bug_corrupted_online_pipeline_cache() const; - bool bug_buffer_image_load_zero() const; - - // but sometimes bug is a feature - bool bug_implicit_fp16_arithmetic() const; - - // fp16 and int8 feature - bool support_fp16_packed() const; - bool support_fp16_storage() const; - bool support_fp16_arithmetic() const; - bool support_int8_packed() const; - bool support_int8_storage() const; - bool support_int8_arithmetic() const; - - // ycbcr conversion feature - bool support_ycbcr_conversion() const; - - // extension capability - int support_VK_KHR_8bit_storage() const; - int support_VK_KHR_16bit_storage() const; - int support_VK_KHR_bind_memory2() const; - int support_VK_KHR_create_renderpass2() const; - int support_VK_KHR_dedicated_allocation() const; - int support_VK_KHR_descriptor_update_template() const; - int support_VK_KHR_external_memory() const; - int support_VK_KHR_get_memory_requirements2() const; - int support_VK_KHR_maintenance1() const; - int support_VK_KHR_maintenance2() const; - int support_VK_KHR_maintenance3() const; - int support_VK_KHR_multiview() const; - int support_VK_KHR_push_descriptor() const; - int support_VK_KHR_sampler_ycbcr_conversion() const; - int support_VK_KHR_shader_float16_int8() const; - int support_VK_KHR_shader_float_controls() const; - int support_VK_KHR_storage_buffer_storage_class() const; - int support_VK_KHR_swapchain() const; - int support_VK_EXT_descriptor_indexing() const; - int support_VK_EXT_memory_budget() const; - int support_VK_EXT_queue_family_foreign() const; -#if __ANDROID_API__ >= 26 - int support_VK_ANDROID_external_memory_android_hardware_buffer() const; -#endif // __ANDROID_API__ >= 26 - -private: - GpuInfo(const GpuInfo&); - GpuInfo& operator=(const GpuInfo&); - -private: - friend int create_gpu_instance(); - GpuInfoPrivate* const d; -}; - -NCNN_EXPORT const GpuInfo& get_gpu_info(int device_index = get_default_gpu_index()); - -class VkAllocator; -class VkCompute; -class Option; -class PipelineCache; -class VulkanDevicePrivate; -class NCNN_EXPORT VulkanDevice -{ -public: - VulkanDevice(int device_index = get_default_gpu_index()); - ~VulkanDevice(); - - const GpuInfo& info; - - VkDevice vkdevice() const; - - VkShaderModule compile_shader_module(const uint32_t* spv_data, size_t spv_data_size) const; - - // with fixed workgroup size - VkShaderModule compile_shader_module(const uint32_t* spv_data, size_t spv_data_size, uint32_t local_size_x, uint32_t local_size_y, uint32_t local_size_z) const; - - // helper for creating pipeline - int create_descriptorset_layout(int binding_count, const int* binding_types, VkDescriptorSetLayout* descriptorset_layout) const; - int create_pipeline_layout(int push_constant_count, VkDescriptorSetLayout descriptorset_layout, VkPipelineLayout* pipeline_layout) const; - int create_pipeline(VkShaderModule shader_module, VkPipelineLayout pipeline_layout, const std::vector& specializations, VkPipeline* pipeline) const; - int create_descriptor_update_template(int binding_count, const int* binding_types, VkDescriptorSetLayout descriptorset_layout, VkPipelineLayout pipeline_layout, VkDescriptorUpdateTemplateKHR* descriptor_update_template) const; - - uint32_t find_memory_index(uint32_t memory_type_bits, VkFlags required, VkFlags preferred, VkFlags preferred_not) const; - bool is_mappable(uint32_t memory_type_index) const; - bool is_coherent(uint32_t memory_type_index) const; - - VkQueue acquire_queue(uint32_t queue_family_index) const; - void reclaim_queue(uint32_t queue_family_index, VkQueue queue) const; - - // allocator on this device - VkAllocator* acquire_blob_allocator() const; - void reclaim_blob_allocator(VkAllocator* allocator) const; - - VkAllocator* acquire_staging_allocator() const; - void reclaim_staging_allocator(VkAllocator* allocator) const; - - // immutable sampler for texelfetch - const VkSampler* immutable_texelfetch_sampler() const; - - // dummy buffer image - VkMat get_dummy_buffer() const; - VkImageMat get_dummy_image() const; - VkImageMat get_dummy_image_readonly() const; - - // pipeline cache on this device - const PipelineCache* get_pipeline_cache() const; - - // test image allocation - bool shape_support_image_storage(const Mat& shape) const; - - // current gpu heap memory budget in MB - uint32_t get_heap_budget() const; - - // utility operator - void convert_packing(const VkMat& src, VkMat& dst, int dst_elempack, VkCompute& cmd, const Option& opt) const; - void convert_packing(const VkImageMat& src, VkImageMat& dst, int dst_elempack, VkCompute& cmd, const Option& opt) const; - void convert_packing(const VkMat& src, VkImageMat& dst, int dst_elempack, VkCompute& cmd, const Option& opt) const; - void convert_packing(const VkImageMat& src, VkMat& dst, int dst_elempack, VkCompute& cmd, const Option& opt) const; - - // VK_KHR_bind_memory2 - PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR; - PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR; - - // VK_KHR_create_renderpass2 - PFN_vkCmdBeginRenderPass2KHR vkCmdBeginRenderPass2KHR; - PFN_vkCmdEndRenderPass2KHR vkCmdEndRenderPass2KHR; - PFN_vkCmdNextSubpass2KHR vkCmdNextSubpass2KHR; - PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR; - - // VK_KHR_descriptor_update_template - PFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR; - PFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR; - PFN_vkUpdateDescriptorSetWithTemplateKHR vkUpdateDescriptorSetWithTemplateKHR; - - // VK_KHR_get_memory_requirements2 - PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR; - PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR; - PFN_vkGetImageSparseMemoryRequirements2KHR vkGetImageSparseMemoryRequirements2KHR; - - // VK_KHR_maintenance1 - PFN_vkTrimCommandPoolKHR vkTrimCommandPoolKHR; - - // VK_KHR_maintenance3 - PFN_vkGetDescriptorSetLayoutSupportKHR vkGetDescriptorSetLayoutSupportKHR; - - // VK_KHR_push_descriptor - PFN_vkCmdPushDescriptorSetWithTemplateKHR vkCmdPushDescriptorSetWithTemplateKHR; - PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR; - - // VK_KHR_sampler_ycbcr_conversion - PFN_vkCreateSamplerYcbcrConversionKHR vkCreateSamplerYcbcrConversionKHR; - PFN_vkDestroySamplerYcbcrConversionKHR vkDestroySamplerYcbcrConversionKHR; - - // VK_KHR_swapchain - PFN_vkCreateSwapchainKHR vkCreateSwapchainKHR; - PFN_vkDestroySwapchainKHR vkDestroySwapchainKHR; - PFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR; - PFN_vkAcquireNextImageKHR vkAcquireNextImageKHR; - PFN_vkQueuePresentKHR vkQueuePresentKHR; - -#if __ANDROID_API__ >= 26 - // VK_ANDROID_external_memory_android_hardware_buffer - PFN_vkGetAndroidHardwareBufferPropertiesANDROID vkGetAndroidHardwareBufferPropertiesANDROID; - PFN_vkGetMemoryAndroidHardwareBufferANDROID vkGetMemoryAndroidHardwareBufferANDROID; -#endif // __ANDROID_API__ >= 26 - -protected: - // device extension - int init_device_extension(); - -private: - VulkanDevice(const VulkanDevice&); - VulkanDevice& operator=(const VulkanDevice&); - -private: - VulkanDevicePrivate* const d; -}; - -NCNN_EXPORT VulkanDevice* get_gpu_device(int device_index = get_default_gpu_index()); - -// online spirv compilation -NCNN_EXPORT int compile_spirv_module(const char* comp_string, const Option& opt, std::vector& spirv); -NCNN_EXPORT int compile_spirv_module(const char* comp_data, int comp_data_size, const Option& opt, std::vector& spirv); -NCNN_EXPORT int compile_spirv_module(int shader_type_index, const Option& opt, std::vector& spirv); - -// info from spirv -class NCNN_EXPORT ShaderInfo -{ -public: - int specialization_count; - int binding_count; - int push_constant_count; - - // 0 = null - // 1 = storage buffer - // 2 = storage image - // 3 = combined image sampler - int binding_types[16]; // 16 is large enough I think ... - - int reserved_0; - int reserved_1; - int reserved_2; - int reserved_3; -}; - -NCNN_EXPORT int resolve_shader_info(const uint32_t* spv_data, size_t spv_data_size, ShaderInfo& shader_info); - -} // namespace ncnn - -#endif // NCNN_VULKAN - -#endif // NCNN_GPU_H diff --git a/ncnn/layer.h b/ncnn/layer.h deleted file mode 100644 index 7707ddc2..00000000 --- a/ncnn/layer.h +++ /dev/null @@ -1,216 +0,0 @@ -// Tencent is pleased to support the open source community by making ncnn available. -// -// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. -// -// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// https://opensource.org/licenses/BSD-3-Clause -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef NCNN_LAYER_H -#define NCNN_LAYER_H - -#include "mat.h" -#include "modelbin.h" -#include "option.h" -#include "paramdict.h" -#include "platform.h" - -#include - -#if NCNN_VULKAN -#include "command.h" -#include "pipeline.h" - -#include -#endif // NCNN_VULKAN - -namespace ncnn { - -class NCNN_EXPORT Layer -{ -public: - // empty - Layer(); - // virtual destructor - virtual ~Layer(); - - // load layer specific parameter from parsed dict - // return 0 if success - virtual int load_param(const ParamDict& pd); - - // load layer specific weight data from model binary - // return 0 if success - virtual int load_model(const ModelBin& mb); - - // layer implementation specific setup - // return 0 if success - virtual int create_pipeline(const Option& opt); - - // layer implementation specific clean - // return 0 if success - virtual int destroy_pipeline(const Option& opt); - -public: - // one input and one output blob - bool one_blob_only; - - // support inplace inference - bool support_inplace; - - // support vulkan compute - bool support_vulkan; - - // accept input blob with packed storage - bool support_packing; - - // accept bf16 - bool support_bf16_storage; - - // accept fp16 - bool support_fp16_storage; - - // accept int8 - bool support_int8_storage; - - // shader image storage - bool support_image_storage; - - // shader tensor storage - bool support_tensor_storage; - - // TODO drop these fields - bool support_weight_fp16_storage; - - bool support_reserved_0; - bool support_reserved_1; - bool support_reserved_2; - bool support_reserved_3; - bool support_reserved_4; - bool support_reserved_5; - bool support_reserved_6; - bool support_reserved_7; - bool support_reserved_8; - bool support_reserved_9; - bool support_reserved_10; - bool support_reserved_11; - bool support_reserved_12; - bool support_reserved_13; - -public: - // implement inference - // return 0 if success - virtual int forward(const std::vector& bottom_blobs, std::vector& top_blobs, const Option& opt) const; - virtual int forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const; - - // implement inplace inference - // return 0 if success - virtual int forward_inplace(std::vector& bottom_top_blobs, const Option& opt) const; - virtual int forward_inplace(Mat& bottom_top_blob, const Option& opt) const; - -#if NCNN_VULKAN -public: - // upload weight blob from host to device - virtual int upload_model(VkTransfer& cmd, const Option& opt); - -public: - // implement inference - // return 0 if success - virtual int forward(const std::vector& bottom_blobs, std::vector& top_blobs, VkCompute& cmd, const Option& opt) const; - virtual int forward(const VkMat& bottom_blob, VkMat& top_blob, VkCompute& cmd, const Option& opt) const; - - // implement inference - // return 0 if success - virtual int forward(const std::vector& bottom_blobs, std::vector& top_blobs, VkCompute& cmd, const Option& opt) const; - virtual int forward(const VkImageMat& bottom_blob, VkImageMat& top_blob, VkCompute& cmd, const Option& opt) const; - - // implement inplace inference - // return 0 if success - virtual int forward_inplace(std::vector& bottom_top_blobs, VkCompute& cmd, const Option& opt) const; - virtual int forward_inplace(VkMat& bottom_top_blob, VkCompute& cmd, const Option& opt) const; - - // implement inplace inference - // return 0 if success - virtual int forward_inplace(std::vector& bottom_top_blobs, VkCompute& cmd, const Option& opt) const; - virtual int forward_inplace(VkImageMat& bottom_top_blob, VkCompute& cmd, const Option& opt) const; - -public: - // assigned immediately after creating this layer - const VulkanDevice* vkdev; -#endif // NCNN_VULKAN - -public: - // custom user data - void* userdata; - // layer type index - int typeindex; -#if NCNN_STRING - // layer type name - std::string type; - // layer name - std::string name; -#endif // NCNN_STRING - // blob index which this layer needs as input - std::vector bottoms; - // blob index which this layer produces as output - std::vector tops; - // shape hint - std::vector bottom_shapes; - std::vector top_shapes; -}; - -// layer factory function -typedef Layer* (*layer_creator_func)(void*); -typedef void (*layer_destroyer_func)(Layer*, void*); - -struct layer_registry_entry -{ -#if NCNN_STRING - // layer type name - const char* name; -#endif // NCNN_STRING - // layer factory entry - layer_creator_func creator; -}; - -struct custom_layer_registry_entry -{ -#if NCNN_STRING - // layer type name - const char* name; -#endif // NCNN_STRING - // layer factory entry - layer_creator_func creator; - layer_destroyer_func destroyer; - void* userdata; -}; - -#if NCNN_STRING -// get layer type from type name -NCNN_EXPORT int layer_to_index(const char* type); -// create layer from type name -NCNN_EXPORT Layer* create_layer(const char* type); -#endif // NCNN_STRING -// create layer from layer type -NCNN_EXPORT Layer* create_layer(int index); - -#define DEFINE_LAYER_CREATOR(name) \ - ::ncnn::Layer* name##_layer_creator(void* /*userdata*/) \ - { \ - return new name; \ - } - -#define DEFINE_LAYER_DESTROYER(name) \ - void name##_layer_destroyer(::ncnn::Layer* layer, void* /*userdata*/) \ - { \ - delete layer; \ - } - -} // namespace ncnn - -#endif // NCNN_LAYER_H diff --git a/ncnn/layer_shader_type.h b/ncnn/layer_shader_type.h deleted file mode 100644 index c143e7de..00000000 --- a/ncnn/layer_shader_type.h +++ /dev/null @@ -1,29 +0,0 @@ -// Tencent is pleased to support the open source community by making ncnn available. -// -// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. -// -// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// https://opensource.org/licenses/BSD-3-Clause -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef NCNN_LAYER_SHADER_TYPE_H -#define NCNN_LAYER_SHADER_TYPE_H - -namespace ncnn { - -namespace LayerShaderType { -enum LayerShaderType -{ -#include "layer_shader_type_enum.h" -}; -} // namespace LayerShaderType - -} // namespace ncnn - -#endif // NCNN_LAYER_SHADER_TYPE_H diff --git a/ncnn/layer_shader_type_enum.h b/ncnn/layer_shader_type_enum.h deleted file mode 100644 index aac8803b..00000000 --- a/ncnn/layer_shader_type_enum.h +++ /dev/null @@ -1,5 +0,0 @@ -// Layer Shader Enum header -// -// This file is auto-generated by cmake, don't edit it. - - diff --git a/ncnn/layer_type.h b/ncnn/layer_type.h deleted file mode 100644 index 511c714b..00000000 --- a/ncnn/layer_type.h +++ /dev/null @@ -1,30 +0,0 @@ -// Tencent is pleased to support the open source community by making ncnn available. -// -// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. -// -// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// https://opensource.org/licenses/BSD-3-Clause -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef NCNN_LAYER_TYPE_H -#define NCNN_LAYER_TYPE_H - -namespace ncnn { - -namespace LayerType { -enum LayerType -{ -#include "layer_type_enum.h" - CustomBit = (1 << 8), -}; -} // namespace LayerType - -} // namespace ncnn - -#endif // NCNN_LAYER_TYPE_H diff --git a/ncnn/layer_type_enum.h b/ncnn/layer_type_enum.h deleted file mode 100644 index 70c3e0d2..00000000 --- a/ncnn/layer_type_enum.h +++ /dev/null @@ -1,88 +0,0 @@ -// Layer Type Enum header -// -// This file is auto-generated by cmake, don't edit it. - -AbsVal = 0, -ArgMax = 1, -BatchNorm = 2, -Bias = 3, -BNLL = 4, -Concat = 5, -Convolution = 6, -Crop = 7, -Deconvolution = 8, -Dropout = 9, -Eltwise = 10, -ELU = 11, -Embed = 12, -Exp = 13, -Flatten = 14, -InnerProduct = 15, -Input = 16, -Log = 17, -LRN = 18, -MemoryData = 19, -MVN = 20, -Pooling = 21, -Power = 22, -PReLU = 23, -Proposal = 24, -Reduction = 25, -ReLU = 26, -Reshape = 27, -ROIPooling = 28, -Scale = 29, -Sigmoid = 30, -Slice = 31, -Softmax = 32, -Split = 33, -SPP = 34, -TanH = 35, -Threshold = 36, -Tile = 37, -RNN = 38, -LSTM = 39, -BinaryOp = 40, -UnaryOp = 41, -ConvolutionDepthWise = 42, -Padding = 43, -Squeeze = 44, -ExpandDims = 45, -Normalize = 46, -Permute = 47, -PriorBox = 48, -DetectionOutput = 49, -Interp = 50, -DeconvolutionDepthWise = 51, -ShuffleChannel = 52, -InstanceNorm = 53, -Clip = 54, -Reorg = 55, -YoloDetectionOutput = 56, -Quantize = 57, -Dequantize = 58, -Yolov3DetectionOutput = 59, -PSROIPooling = 60, -ROIAlign = 61, -Packing = 62, -Requantize = 63, -Cast = 64, -HardSigmoid = 65, -SELU = 66, -HardSwish = 67, -Noop = 68, -PixelShuffle = 69, -DeepCopy = 70, -Mish = 71, -StatisticsPooling = 72, -Swish = 73, -Gemm = 74, -GroupNorm = 75, -LayerNorm = 76, -Softplus = 77, -GRU = 78, -MultiHeadAttention = 79, -GELU = 80, -Convolution1D = 81, -Pooling1D = 82, - diff --git a/ncnn/mat.h b/ncnn/mat.h deleted file mode 100644 index 93443db1..00000000 --- a/ncnn/mat.h +++ /dev/null @@ -1,1694 +0,0 @@ -// Tencent is pleased to support the open source community by making ncnn available. -// -// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. -// -// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// https://opensource.org/licenses/BSD-3-Clause -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef NCNN_MAT_H -#define NCNN_MAT_H - -#include -#include -#if __ARM_NEON -#include -#endif -#if __AVX__ -#include -#endif -#if __mips_msa -#include -#endif -#if __riscv_vector -#ifdef RVV_SPEC_0_7 -#include "layer/riscv/riscv_v_071_fix.h" -#else -#include -#endif -#include "cpu.h" // cpu_riscv_vlenb() -#endif - -#include "allocator.h" -#include "option.h" -#include "platform.h" - -#if NCNN_VULKAN -#include -#endif // NCNN_VULKAN - -#if NCNN_PIXEL -#if NCNN_PLATFORM_API -#if __ANDROID_API__ >= 9 -#include -#include -#endif // __ANDROID_API__ >= 9 -#endif // NCNN_PLATFORM_API -#endif // NCNN_PIXEL - -namespace ncnn { - -#if NCNN_VULKAN -class VkMat; -class VkImageMat; -#endif // NCNN_VULKAN - -// the three dimension matrix -class NCNN_EXPORT Mat -{ -public: - // empty - Mat(); - // vec - Mat(int w, size_t elemsize = 4u, Allocator* allocator = 0); - // image - Mat(int w, int h, size_t elemsize = 4u, Allocator* allocator = 0); - // dim - Mat(int w, int h, int c, size_t elemsize = 4u, Allocator* allocator = 0); - // packed vec - Mat(int w, size_t elemsize, int elempack, Allocator* allocator = 0); - // packed image - Mat(int w, int h, size_t elemsize, int elempack, Allocator* allocator = 0); - // packed dim - Mat(int w, int h, int c, size_t elemsize, int elempack, Allocator* allocator = 0); - // copy - Mat(const Mat& m); - // external vec - Mat(int w, void* data, size_t elemsize = 4u, Allocator* allocator = 0); - // external image - Mat(int w, int h, void* data, size_t elemsize = 4u, Allocator* allocator = 0); - // external dim - Mat(int w, int h, int c, void* data, size_t elemsize = 4u, Allocator* allocator = 0); - // external packed vec - Mat(int w, void* data, size_t elemsize, int elempack, Allocator* allocator = 0); - // external packed image - Mat(int w, int h, void* data, size_t elemsize, int elempack, Allocator* allocator = 0); - // external packed dim - Mat(int w, int h, int c, void* data, size_t elemsize, int elempack, Allocator* allocator = 0); - // release - ~Mat(); - // assign - Mat& operator=(const Mat& m); - // set all - void fill(float v); - void fill(int v); -#if __ARM_NEON - void fill(float32x4_t _v); - void fill(uint16x4_t _v); - void fill(int32x4_t _v); - void fill(int32x4_t _v0, int32x4_t _v1); -#if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - void fill(float16x4_t _v); - void fill(float16x8_t _v); -#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -#endif // __ARM_NEON -#if __AVX__ - void fill(__m256 _v); - void fill(__m128i _v); -#endif // __AVX__ -#if __mips_msa - void fill(v4f32 _v); -#endif // __mips_msa -#if __riscv_vector - void fill(vfloat32m1_t _v); - void fill(vuint16m1_t _v); - void fill(vint8m1_t _v); -#if __riscv_zfh - void fill(vfloat16m1_t _v); -#endif // __riscv_zfh -#endif // __riscv_vector - template - void fill(T v); - // deep copy - Mat clone(Allocator* allocator = 0) const; - // deep copy from other mat, inplace - void clone_from(const ncnn::Mat& mat, Allocator* allocator = 0); - // reshape vec - Mat reshape(int w, Allocator* allocator = 0) const; - // reshape image - Mat reshape(int w, int h, Allocator* allocator = 0) const; - // reshape dim - Mat reshape(int w, int h, int c, Allocator* allocator = 0) const; - // allocate vec - void create(int w, size_t elemsize = 4u, Allocator* allocator = 0); - // allocate image - void create(int w, int h, size_t elemsize = 4u, Allocator* allocator = 0); - // allocate dim - void create(int w, int h, int c, size_t elemsize = 4u, Allocator* allocator = 0); - // allocate packed vec - void create(int w, size_t elemsize, int elempack, Allocator* allocator = 0); - // allocate packed image - void create(int w, int h, size_t elemsize, int elempack, Allocator* allocator = 0); - // allocate packed dim - void create(int w, int h, int c, size_t elemsize, int elempack, Allocator* allocator = 0); - // allocate like - void create_like(const Mat& m, Allocator* allocator = 0); -#if NCNN_VULKAN - // allocate like - void create_like(const VkMat& m, Allocator* allocator = 0); - // allocate like - void create_like(const VkImageMat& im, Allocator* allocator = 0); -#endif // NCNN_VULKAN - // refcount++ - void addref(); - // refcount-- - void release(); - - bool empty() const; - size_t total() const; - - // bits per element - int elembits() const; - - // shape only - Mat shape() const; - - // data reference - Mat channel(int c); - const Mat channel(int c) const; - float* row(int y); - const float* row(int y) const; - template - T* row(int y); - template - const T* row(int y) const; - - // range reference - Mat channel_range(int c, int channels); - const Mat channel_range(int c, int channels) const; - Mat row_range(int y, int rows); - const Mat row_range(int y, int rows) const; - Mat range(int x, int n); - const Mat range(int x, int n) const; - - // access raw data - template - operator T*(); - template - operator const T*() const; - - // convenient access float vec element - float& operator[](size_t i); - const float& operator[](size_t i) const; - -#if NCNN_PIXEL - enum PixelType - { - PIXEL_CONVERT_SHIFT = 16, - PIXEL_FORMAT_MASK = 0x0000ffff, - PIXEL_CONVERT_MASK = 0xffff0000, - - PIXEL_RGB = 1, - PIXEL_BGR = 2, - PIXEL_GRAY = 3, - PIXEL_RGBA = 4, - PIXEL_BGRA = 5, - - PIXEL_RGB2BGR = PIXEL_RGB | (PIXEL_BGR << PIXEL_CONVERT_SHIFT), - PIXEL_RGB2GRAY = PIXEL_RGB | (PIXEL_GRAY << PIXEL_CONVERT_SHIFT), - PIXEL_RGB2RGBA = PIXEL_RGB | (PIXEL_RGBA << PIXEL_CONVERT_SHIFT), - PIXEL_RGB2BGRA = PIXEL_RGB | (PIXEL_BGRA << PIXEL_CONVERT_SHIFT), - - PIXEL_BGR2RGB = PIXEL_BGR | (PIXEL_RGB << PIXEL_CONVERT_SHIFT), - PIXEL_BGR2GRAY = PIXEL_BGR | (PIXEL_GRAY << PIXEL_CONVERT_SHIFT), - PIXEL_BGR2RGBA = PIXEL_BGR | (PIXEL_RGBA << PIXEL_CONVERT_SHIFT), - PIXEL_BGR2BGRA = PIXEL_BGR | (PIXEL_BGRA << PIXEL_CONVERT_SHIFT), - - PIXEL_GRAY2RGB = PIXEL_GRAY | (PIXEL_RGB << PIXEL_CONVERT_SHIFT), - PIXEL_GRAY2BGR = PIXEL_GRAY | (PIXEL_BGR << PIXEL_CONVERT_SHIFT), - PIXEL_GRAY2RGBA = PIXEL_GRAY | (PIXEL_RGBA << PIXEL_CONVERT_SHIFT), - PIXEL_GRAY2BGRA = PIXEL_GRAY | (PIXEL_BGRA << PIXEL_CONVERT_SHIFT), - - PIXEL_RGBA2RGB = PIXEL_RGBA | (PIXEL_RGB << PIXEL_CONVERT_SHIFT), - PIXEL_RGBA2BGR = PIXEL_RGBA | (PIXEL_BGR << PIXEL_CONVERT_SHIFT), - PIXEL_RGBA2GRAY = PIXEL_RGBA | (PIXEL_GRAY << PIXEL_CONVERT_SHIFT), - PIXEL_RGBA2BGRA = PIXEL_RGBA | (PIXEL_BGRA << PIXEL_CONVERT_SHIFT), - - PIXEL_BGRA2RGB = PIXEL_BGRA | (PIXEL_RGB << PIXEL_CONVERT_SHIFT), - PIXEL_BGRA2BGR = PIXEL_BGRA | (PIXEL_BGR << PIXEL_CONVERT_SHIFT), - PIXEL_BGRA2GRAY = PIXEL_BGRA | (PIXEL_GRAY << PIXEL_CONVERT_SHIFT), - PIXEL_BGRA2RGBA = PIXEL_BGRA | (PIXEL_RGBA << PIXEL_CONVERT_SHIFT), - }; - // convenient construct from pixel data - static Mat from_pixels(const unsigned char* pixels, int type, int w, int h, Allocator* allocator = 0); - // convenient construct from pixel data with stride(bytes-per-row) parameter - static Mat from_pixels(const unsigned char* pixels, int type, int w, int h, int stride, Allocator* allocator = 0); - // convenient construct from pixel data and resize to specific size - static Mat from_pixels_resize(const unsigned char* pixels, int type, int w, int h, int target_width, int target_height, Allocator* allocator = 0); - // convenient construct from pixel data and resize to specific size with stride(bytes-per-row) parameter - static Mat from_pixels_resize(const unsigned char* pixels, int type, int w, int h, int stride, int target_width, int target_height, Allocator* allocator = 0); - // convenient construct from pixel data roi - static Mat from_pixels_roi(const unsigned char* pixels, int type, int w, int h, int roix, int roiy, int roiw, int roih, Allocator* allocator = 0); - // convenient construct from pixel data roi with stride(bytes-per-row) parameter - static Mat from_pixels_roi(const unsigned char* pixels, int type, int w, int h, int stride, int roix, int roiy, int roiw, int roih, Allocator* allocator = 0); - // convenient construct from pixel data roi and resize to specific size - static Mat from_pixels_roi_resize(const unsigned char* pixels, int type, int w, int h, int roix, int roiy, int roiw, int roih, int target_width, int target_height, Allocator* allocator = 0); - // convenient construct from pixel data roi and resize to specific size with stride(bytes-per-row) parameter - static Mat from_pixels_roi_resize(const unsigned char* pixels, int type, int w, int h, int stride, int roix, int roiy, int roiw, int roih, int target_width, int target_height, Allocator* allocator = 0); - - // convenient export to pixel data - void to_pixels(unsigned char* pixels, int type) const; - // convenient export to pixel data with stride(bytes-per-row) parameter - void to_pixels(unsigned char* pixels, int type, int stride) const; - // convenient export to pixel data and resize to specific size - void to_pixels_resize(unsigned char* pixels, int type, int target_width, int target_height) const; - // convenient export to pixel data and resize to specific size with stride(bytes-per-row) parameter - void to_pixels_resize(unsigned char* pixels, int type, int target_width, int target_height, int target_stride) const; - -#if NCNN_PLATFORM_API -#if __ANDROID_API__ >= 9 - // convenient construct from android Bitmap - static Mat from_android_bitmap(JNIEnv* env, jobject bitmap, int type_to, Allocator* allocator = 0); - // convenient construct from android Bitmap and resize to specific size - static Mat from_android_bitmap_resize(JNIEnv* env, jobject bitmap, int type_to, int target_width, int target_height, Allocator* allocator = 0); - // convenient construct from android Bitmap roi - static Mat from_android_bitmap_roi(JNIEnv* env, jobject bitmap, int type_to, int roix, int roiy, int roiw, int roih, Allocator* allocator = 0); - // convenient construct from android Bitmap roi and resize to specific size - static Mat from_android_bitmap_roi_resize(JNIEnv* env, jobject bitmap, int type_to, int roix, int roiy, int roiw, int roih, int target_width, int target_height, Allocator* allocator = 0); - // convenient export to android Bitmap and resize to the android Bitmap size - void to_android_bitmap(JNIEnv* env, jobject bitmap, int type_from) const; -#endif // __ANDROID_API__ >= 9 -#endif // NCNN_PLATFORM_API -#endif // NCNN_PIXEL - - // substract channel-wise mean values, then multiply by normalize values, pass 0 to skip - void substract_mean_normalize(const float* mean_vals, const float* norm_vals); - - // convenient construct from half precision floating point data - static Mat from_float16(const unsigned short* data, int size); - - // pointer to the data - void* data; - - // pointer to the reference counter - // when points to user-allocated data, the pointer is NULL - int* refcount; - - // element size in bytes - // 4 = float32/int32 - // 2 = float16 - // 1 = int8/uint8 - // 0 = empty - size_t elemsize; - - // packed count inside element - // c/1-h-w-1 h/1-w-1 w/1-1 scalar - // c/4-h-w-4 h/4-w-4 w/4-4 sse/neon - // c/8-h-w-8 h/8-w-8 w/8-8 avx/fp16 - int elempack; - - // the allocator - Allocator* allocator; - - // the dimension rank - int dims; - - int w; - int h; - int c; - - size_t cstep; -}; - -#if NCNN_VULKAN - -// the three dimension matrix, vulkan version -class NCNN_EXPORT VkMat -{ -public: - // empty - VkMat(); - // vec - VkMat(int w, size_t elemsize, VkAllocator* allocator); - // image - VkMat(int w, int h, size_t elemsize, VkAllocator* allocator); - // dim - VkMat(int w, int h, int c, size_t elemsize, VkAllocator* allocator); - // packed vec - VkMat(int w, size_t elemsize, int elempack, VkAllocator* allocator); - // packed image - VkMat(int w, int h, size_t elemsize, int elempack, VkAllocator* allocator); - // packed dim - VkMat(int w, int h, int c, size_t elemsize, int elempack, VkAllocator* allocator); - // copy - VkMat(const VkMat& m); - // external vec - VkMat(int w, VkBufferMemory* data, size_t elemsize, VkAllocator* allocator); - // external image - VkMat(int w, int h, VkBufferMemory* data, size_t elemsize, VkAllocator* allocator); - // external dim - VkMat(int w, int h, int c, VkBufferMemory* data, size_t elemsize, VkAllocator* allocator); - // external packed vec - VkMat(int w, VkBufferMemory* data, size_t elemsize, int elempack, VkAllocator* allocator); - // external packed image - VkMat(int w, int h, VkBufferMemory* data, size_t elemsize, int elempack, VkAllocator* allocator); - // external packed dim - VkMat(int w, int h, int c, VkBufferMemory* data, size_t elemsize, int elempack, VkAllocator* allocator); - // release - ~VkMat(); - // assign - VkMat& operator=(const VkMat& m); - // allocate vec - void create(int w, size_t elemsize, VkAllocator* allocator); - // allocate image - void create(int w, int h, size_t elemsize, VkAllocator* allocator); - // allocate dim - void create(int w, int h, int c, size_t elemsize, VkAllocator* allocator); - // allocate packed vec - void create(int w, size_t elemsize, int elempack, VkAllocator* allocator); - // allocate packed image - void create(int w, int h, size_t elemsize, int elempack, VkAllocator* allocator); - // allocate packed dim - void create(int w, int h, int c, size_t elemsize, int elempack, VkAllocator* allocator); - // allocate like - void create_like(const Mat& m, VkAllocator* allocator); - // allocate like - void create_like(const VkMat& m, VkAllocator* allocator); - // allocate like - void create_like(const VkImageMat& im, VkAllocator* allocator); - - // mapped - Mat mapped() const; - void* mapped_ptr() const; - - // refcount++ - void addref(); - // refcount-- - void release(); - - bool empty() const; - size_t total() const; - - // bits per element - int elembits() const; - - // shape only - Mat shape() const; - - // low-level reference - VkBuffer buffer() const; - size_t buffer_offset() const; - size_t buffer_capacity() const; - - // device buffer - VkBufferMemory* data; - - // pointer to the reference counter - // when points to user-allocated data, the pointer is NULL - int* refcount; - - // element size in bytes - // 4 = float32/int32 - // 2 = float16 - // 1 = int8/uint8 - // 0 = empty - size_t elemsize; - - // packed count inside element - // c/1-h-w-1 h/1-w-1 w/1-1 scalar - // c/4-h-w-4 h/4-w-4 w/4-4 sse/neon - // c/8-h-w-8 h/8-w-8 w/8-8 avx/fp16 - int elempack; - - // the allocator - VkAllocator* allocator; - - // the dimension rank - int dims; - - int w; - int h; - int c; - - size_t cstep; -}; - -class NCNN_EXPORT VkImageMat -{ -public: - // empty - VkImageMat(); - // vec - VkImageMat(int w, size_t elemsize, VkAllocator* allocator); - // image - VkImageMat(int w, int h, size_t elemsize, VkAllocator* allocator); - // dim - VkImageMat(int w, int h, int c, size_t elemsize, VkAllocator* allocator); - // packed vec - VkImageMat(int w, size_t elemsize, int elempack, VkAllocator* allocator); - // packed image - VkImageMat(int w, int h, size_t elemsize, int elempack, VkAllocator* allocator); - // packed dim - VkImageMat(int w, int h, int c, size_t elemsize, int elempack, VkAllocator* allocator); - // copy - VkImageMat(const VkImageMat& m); - // external vec - VkImageMat(int w, VkImageMemory* data, size_t elemsize, VkAllocator* allocator); - // external image - VkImageMat(int w, int h, VkImageMemory* data, size_t elemsize, VkAllocator* allocator); - // external dim - VkImageMat(int w, int h, int c, VkImageMemory* data, size_t elemsize, VkAllocator* allocator); - // external packed vec - VkImageMat(int w, VkImageMemory* data, size_t elemsize, int elempack, VkAllocator* allocator); - // external packed image - VkImageMat(int w, int h, VkImageMemory* data, size_t elemsize, int elempack, VkAllocator* allocator); - // external packed dim - VkImageMat(int w, int h, int c, VkImageMemory* data, size_t elemsize, int elempack, VkAllocator* allocator); - // release - ~VkImageMat(); - // assign - VkImageMat& operator=(const VkImageMat& m); - // allocate vec - void create(int w, size_t elemsize, VkAllocator* allocator); - // allocate image - void create(int w, int h, size_t elemsize, VkAllocator* allocator); - // allocate dim - void create(int w, int h, int c, size_t elemsize, VkAllocator* allocator); - // allocate packed vec - void create(int w, size_t elemsize, int elempack, VkAllocator* allocator); - // allocate packed image - void create(int w, int h, size_t elemsize, int elempack, VkAllocator* allocator); - // allocate packed dim - void create(int w, int h, int c, size_t elemsize, int elempack, VkAllocator* allocator); - // allocate like - void create_like(const Mat& m, VkAllocator* allocator); - // allocate like - void create_like(const VkMat& m, VkAllocator* allocator); - // allocate like - void create_like(const VkImageMat& im, VkAllocator* allocator); - - // mapped - Mat mapped() const; - void* mapped_ptr() const; - - // refcount++ - void addref(); - // refcount-- - void release(); - - bool empty() const; - size_t total() const; - - // bits per element - int elembits() const; - - // shape only - Mat shape() const; - - // low-level reference - VkImage image() const; - VkImageView imageview() const; - -#if NCNN_PLATFORM_API -#if __ANDROID_API__ >= 26 - // convenient construct from android hardware buffer - static VkImageMat from_android_hardware_buffer(VkAndroidHardwareBufferImageAllocator* allocator); -#endif // __ANDROID_API__ >= 26 -#endif // NCNN_PLATFORM_API - - // device image - VkImageMemory* data; - - // pointer to the reference counter - // when points to user-allocated data, the pointer is NULL - int* refcount; - - // element size in bytes - // 4 = float32/int32 - // 2 = float16 - // 1 = int8/uint8 - // 0 = empty - size_t elemsize; - - // packed count inside element - // c/1-h-w-1 h/1-w-1 w/1-1 scalar - // c/4-h-w-4 h/4-w-4 w/4-4 sse/neon - // c/8-h-w-8 h/8-w-8 w/8-8 avx/fp16 - int elempack; - - // the allocator - VkAllocator* allocator; - - // the dimension rank - int dims; - - int w; - int h; - int c; -}; - -// type for vulkan specialization constant and push constant -union vk_specialization_type -{ - int i; - float f; - uint32_t u32; -}; -union vk_constant_type -{ - int i; - float f; -}; -#endif // NCNN_VULKAN - -// misc function -#if NCNN_PIXEL -// convert yuv420sp(nv21) to rgb, the fast approximate version -NCNN_EXPORT void yuv420sp2rgb(const unsigned char* yuv420sp, int w, int h, unsigned char* rgb); -// convert yuv420sp(nv12) to rgb, the fast approximate version -NCNN_EXPORT void yuv420sp2rgb_nv12(const unsigned char* yuv420sp, int w, int h, unsigned char* rgb); -// convert yuv420sp(nv21) to rgb with half resize, the faster approximate version -NCNN_EXPORT void yuv420sp2rgb_half(const unsigned char* yuv420sp, int w, int h, unsigned char* rgb); -// image pixel bilinear resize -NCNN_EXPORT void resize_bilinear_c1(const unsigned char* src, int srcw, int srch, unsigned char* dst, int w, int h); -NCNN_EXPORT void resize_bilinear_c2(const unsigned char* src, int srcw, int srch, unsigned char* dst, int w, int h); -NCNN_EXPORT void resize_bilinear_c3(const unsigned char* src, int srcw, int srch, unsigned char* dst, int w, int h); -NCNN_EXPORT void resize_bilinear_c4(const unsigned char* src, int srcw, int srch, unsigned char* dst, int w, int h); -// image pixel bilinear resize with stride(bytes-per-row) parameter -NCNN_EXPORT void resize_bilinear_c1(const unsigned char* src, int srcw, int srch, int srcstride, unsigned char* dst, int w, int h, int stride); -NCNN_EXPORT void resize_bilinear_c2(const unsigned char* src, int srcw, int srch, int srcstride, unsigned char* dst, int w, int h, int stride); -NCNN_EXPORT void resize_bilinear_c3(const unsigned char* src, int srcw, int srch, int srcstride, unsigned char* dst, int w, int h, int stride); -NCNN_EXPORT void resize_bilinear_c4(const unsigned char* src, int srcw, int srch, int srcstride, unsigned char* dst, int w, int h, int stride); -// image pixel bilinear resize, convenient wrapper for yuv420sp(nv21/nv12) -NCNN_EXPORT void resize_bilinear_yuv420sp(const unsigned char* src, int srcw, int srch, unsigned char* dst, int w, int h); -#endif // NCNN_PIXEL -#if NCNN_PIXEL_ROTATE -// type is the from type, 6 means rotating from 6 to 1 -// -// 1 2 3 4 5 6 7 8 -// -// 888888 888888 88 88 8888888888 88 88 8888888888 -// 88 88 88 88 88 88 88 88 88 88 88 88 -// 8888 8888 8888 8888 88 8888888888 8888888888 88 -// 88 88 88 88 -// 88 88 888888 888888 -// -// ref http://sylvana.net/jpegcrop/exif_orientation.html -// image pixel kanna rotate -NCNN_EXPORT void kanna_rotate_c1(const unsigned char* src, int srcw, int srch, unsigned char* dst, int w, int h, int type); -NCNN_EXPORT void kanna_rotate_c2(const unsigned char* src, int srcw, int srch, unsigned char* dst, int w, int h, int type); -NCNN_EXPORT void kanna_rotate_c3(const unsigned char* src, int srcw, int srch, unsigned char* dst, int w, int h, int type); -NCNN_EXPORT void kanna_rotate_c4(const unsigned char* src, int srcw, int srch, unsigned char* dst, int w, int h, int type); -// image pixel kanna rotate with stride(bytes-per-row) parameter -NCNN_EXPORT void kanna_rotate_c1(const unsigned char* src, int srcw, int srch, int srcstride, unsigned char* dst, int w, int h, int stride, int type); -NCNN_EXPORT void kanna_rotate_c2(const unsigned char* src, int srcw, int srch, int srcstride, unsigned char* dst, int w, int h, int stride, int type); -NCNN_EXPORT void kanna_rotate_c3(const unsigned char* src, int srcw, int srch, int srcstride, unsigned char* dst, int w, int h, int stride, int type); -NCNN_EXPORT void kanna_rotate_c4(const unsigned char* src, int srcw, int srch, int srcstride, unsigned char* dst, int w, int h, int stride, int type); -// image pixel kanna rotate, convenient wrapper for yuv420sp(nv21/nv12) -NCNN_EXPORT void kanna_rotate_yuv420sp(const unsigned char* src, int srcw, int srch, unsigned char* dst, int w, int h, int type); -#endif // NCNN_PIXEL_ROTATE -#if NCNN_PIXEL_AFFINE -// resolve affine transform matrix from rotation angle, scale factor and x y offset -NCNN_EXPORT void get_rotation_matrix(float angle, float scale, float dx, float dy, float* tm); -// resolve affine transform matrix from two set of points, num_point must be >= 2 -NCNN_EXPORT void get_affine_transform(const float* points_from, const float* points_to, int num_point, float* tm); -// resolve the inversion affine transform matrix -NCNN_EXPORT void invert_affine_transform(const float* tm, float* tm_inv); -// image pixel bilinear warpaffine inverse transform, set -233 for transparent border color, the color RGBA is little-endian encoded -NCNN_EXPORT void warpaffine_bilinear_c1(const unsigned char* src, int srcw, int srch, unsigned char* dst, int w, int h, const float* tm, int type = 0, unsigned int v = 0); -NCNN_EXPORT void warpaffine_bilinear_c2(const unsigned char* src, int srcw, int srch, unsigned char* dst, int w, int h, const float* tm, int type = 0, unsigned int v = 0); -NCNN_EXPORT void warpaffine_bilinear_c3(const unsigned char* src, int srcw, int srch, unsigned char* dst, int w, int h, const float* tm, int type = 0, unsigned int v = 0); -NCNN_EXPORT void warpaffine_bilinear_c4(const unsigned char* src, int srcw, int srch, unsigned char* dst, int w, int h, const float* tm, int type = 0, unsigned int v = 0); -// image pixel bilinear warpaffine inverse transform with stride(bytes-per-row) parameter, set -233 for transparent border color, the color RGBA is little-endian encoded -NCNN_EXPORT void warpaffine_bilinear_c1(const unsigned char* src, int srcw, int srch, int srcstride, unsigned char* dst, int w, int h, int stride, const float* tm, int type = 0, unsigned int v = 0); -NCNN_EXPORT void warpaffine_bilinear_c2(const unsigned char* src, int srcw, int srch, int srcstride, unsigned char* dst, int w, int h, int stride, const float* tm, int type = 0, unsigned int v = 0); -NCNN_EXPORT void warpaffine_bilinear_c3(const unsigned char* src, int srcw, int srch, int srcstride, unsigned char* dst, int w, int h, int stride, const float* tm, int type = 0, unsigned int v = 0); -NCNN_EXPORT void warpaffine_bilinear_c4(const unsigned char* src, int srcw, int srch, int srcstride, unsigned char* dst, int w, int h, int stride, const float* tm, int type = 0, unsigned int v = 0); -// image pixel bilinear warpaffine, convenient wrapper for yuv420sp(nv21/nv12), set -233 for transparent border color, the color YUV_ is little-endian encoded -NCNN_EXPORT void warpaffine_bilinear_yuv420sp(const unsigned char* src, int srcw, int srch, unsigned char* dst, int w, int h, const float* tm, int type = 0, unsigned int v = 0); -#endif // NCNN_PIXEL_AFFINE -#if NCNN_PIXEL_DRAWING -// draw rectangle, set thickness -1 for filled rectangle, the color RGBA is little-endian encoded -NCNN_EXPORT void draw_rectangle_c1(unsigned char* pixels, int w, int h, int rx, int ry, int rw, int rh, unsigned int color, int thickness); -NCNN_EXPORT void draw_rectangle_c2(unsigned char* pixels, int w, int h, int rx, int ry, int rw, int rh, unsigned int color, int thickness); -NCNN_EXPORT void draw_rectangle_c3(unsigned char* pixels, int w, int h, int rx, int ry, int rw, int rh, unsigned int color, int thickness); -NCNN_EXPORT void draw_rectangle_c4(unsigned char* pixels, int w, int h, int rx, int ry, int rw, int rh, unsigned int color, int thickness); -// draw rectangle with stride(bytes-per-row) parameter, set thickness -1 for filled rectangle, the color RGBA is little-endian encoded -NCNN_EXPORT void draw_rectangle_c1(unsigned char* pixels, int w, int h, int stride, int rx, int ry, int rw, int rh, unsigned int color, int thickness); -NCNN_EXPORT void draw_rectangle_c2(unsigned char* pixels, int w, int h, int stride, int rx, int ry, int rw, int rh, unsigned int color, int thickness); -NCNN_EXPORT void draw_rectangle_c3(unsigned char* pixels, int w, int h, int stride, int rx, int ry, int rw, int rh, unsigned int color, int thickness); -NCNN_EXPORT void draw_rectangle_c4(unsigned char* pixels, int w, int h, int stride, int rx, int ry, int rw, int rh, unsigned int color, int thickness); -// draw rectangle, convenient wrapper for yuv420sp(nv21/nv12), set thickness -1 for filled rectangle, the color YUV_ is little-endian encoded -NCNN_EXPORT void draw_rectangle_yuv420sp(unsigned char* yuv420sp, int w, int h, int rx, int ry, int rw, int rh, unsigned int color, int thickness); -// draw circle, set thickness -1 for filled circle, the color RGBA is little-endian encoded -NCNN_EXPORT void draw_circle_c1(unsigned char* pixels, int w, int h, int cx, int cy, int radius, unsigned int color, int thickness); -NCNN_EXPORT void draw_circle_c2(unsigned char* pixels, int w, int h, int cx, int cy, int radius, unsigned int color, int thickness); -NCNN_EXPORT void draw_circle_c3(unsigned char* pixels, int w, int h, int cx, int cy, int radius, unsigned int color, int thickness); -NCNN_EXPORT void draw_circle_c4(unsigned char* pixels, int w, int h, int cx, int cy, int radius, unsigned int color, int thickness); -// draw circle with stride(bytes-per-row) parameter, set thickness -1 for filled circle, the color RGBA is little-endian encoded -NCNN_EXPORT void draw_circle_c1(unsigned char* pixels, int w, int h, int stride, int cx, int cy, int radius, unsigned int color, int thickness); -NCNN_EXPORT void draw_circle_c2(unsigned char* pixels, int w, int h, int stride, int cx, int cy, int radius, unsigned int color, int thickness); -NCNN_EXPORT void draw_circle_c3(unsigned char* pixels, int w, int h, int stride, int cx, int cy, int radius, unsigned int color, int thickness); -NCNN_EXPORT void draw_circle_c4(unsigned char* pixels, int w, int h, int stride, int cx, int cy, int radius, unsigned int color, int thickness); -// draw circle, convenient wrapper for yuv420sp(nv21/nv12), set thickness -1 for filled circle, the color YUV_ is little-endian encoded -NCNN_EXPORT void draw_circle_yuv420sp(unsigned char* yuv420sp, int w, int h, int cx, int cy, int radius, unsigned int color, int thickness); -// draw line, the color RGBA is little-endian encoded -NCNN_EXPORT void draw_line_c1(unsigned char* pixels, int w, int h, int x0, int y0, int x1, int y1, unsigned int color, int thickness); -NCNN_EXPORT void draw_line_c2(unsigned char* pixels, int w, int h, int x0, int y0, int x1, int y1, unsigned int color, int thickness); -NCNN_EXPORT void draw_line_c3(unsigned char* pixels, int w, int h, int x0, int y0, int x1, int y1, unsigned int color, int thickness); -NCNN_EXPORT void draw_line_c4(unsigned char* pixels, int w, int h, int x0, int y0, int x1, int y1, unsigned int color, int thickness); -// draw line with stride(bytes-per-row) parameter, the color RGBA is little-endian encoded -NCNN_EXPORT void draw_line_c1(unsigned char* pixels, int w, int h, int stride, int x0, int y0, int x1, int y1, unsigned int color, int thickness); -NCNN_EXPORT void draw_line_c2(unsigned char* pixels, int w, int h, int stride, int x0, int y0, int x1, int y1, unsigned int color, int thickness); -NCNN_EXPORT void draw_line_c3(unsigned char* pixels, int w, int h, int stride, int x0, int y0, int x1, int y1, unsigned int color, int thickness); -NCNN_EXPORT void draw_line_c4(unsigned char* pixels, int w, int h, int stride, int x0, int y0, int x1, int y1, unsigned int color, int thickness); -// draw line, convenient wrapper for yuv420sp(nv21/nv12), the color YUV_ is little-endian encoded -NCNN_EXPORT void draw_line_yuv420sp(unsigned char* yuv420sp, int w, int h, int x0, int y0, int x1, int y1, unsigned int color, int thickness); -// resolve text bounding box size -NCNN_EXPORT void get_text_drawing_size(const char* text, int fontpixelsize, int* w, int* h); -// draw ascii printables and newline, the color RGBA is little-endian encoded -NCNN_EXPORT void draw_text_c1(unsigned char* pixels, int w, int h, const char* text, int x, int y, int fontpixelsize, unsigned int color); -NCNN_EXPORT void draw_text_c2(unsigned char* pixels, int w, int h, const char* text, int x, int y, int fontpixelsize, unsigned int color); -NCNN_EXPORT void draw_text_c3(unsigned char* pixels, int w, int h, const char* text, int x, int y, int fontpixelsize, unsigned int color); -NCNN_EXPORT void draw_text_c4(unsigned char* pixels, int w, int h, const char* text, int x, int y, int fontpixelsize, unsigned int color); -// draw ascii printables and newline with stride(bytes-per-row) parameter, the color RGBA is little-endian encoded -NCNN_EXPORT void draw_text_c1(unsigned char* pixels, int w, int h, int stride, const char* text, int x, int y, int fontpixelsize, unsigned int color); -NCNN_EXPORT void draw_text_c2(unsigned char* pixels, int w, int h, int stride, const char* text, int x, int y, int fontpixelsize, unsigned int color); -NCNN_EXPORT void draw_text_c3(unsigned char* pixels, int w, int h, int stride, const char* text, int x, int y, int fontpixelsize, unsigned int color); -NCNN_EXPORT void draw_text_c4(unsigned char* pixels, int w, int h, int stride, const char* text, int x, int y, int fontpixelsize, unsigned int color); -// draw ascii printables and newline, convenient wrapper for yuv420sp(nv21/nv12), the color YUV_ is little-endian encoded -NCNN_EXPORT void draw_text_yuv420sp(unsigned char* yuv420sp, int w, int h, const char* text, int x, int y, int fontpixelsize, unsigned int color); -#endif // NCNN_PIXEL_DRAWING - -// type conversion -// convert float to half precision floating point -NCNN_EXPORT unsigned short float32_to_float16(float value); -// convert half precision floating point to float -NCNN_EXPORT float float16_to_float32(unsigned short value); -// convert float to brain half -NCNN_EXPORT inline unsigned short float32_to_bfloat16(float value) -{ - // 16 : 16 - union - { - unsigned int u; - float f; - } tmp; - tmp.f = value; - return tmp.u >> 16; -} -// convert brain half to float -NCNN_EXPORT inline float bfloat16_to_float32(unsigned short value) -{ - // 16 : 16 - union - { - unsigned int u; - float f; - } tmp; - tmp.u = value << 16; - return tmp.f; -} -#if __ARM_NEON -NCNN_EXPORT inline uint16x4_t vcvt_bf16_f32(float32x4_t _v) -{ - return vshrn_n_u32(vreinterpretq_u32_f32(_v), 16); -} -NCNN_EXPORT inline float32x4_t vcvt_f32_bf16(uint16x4_t _v) -{ - return vreinterpretq_f32_u32(vshll_n_u16(_v, 16)); -} -#endif // __ARM_NEON - -// mat process -enum BorderType -{ - BORDER_CONSTANT = 0, - BORDER_REPLICATE = 1, - BORDER_TRANSPARENT = -233, -}; -NCNN_EXPORT void copy_make_border(const Mat& src, Mat& dst, int top, int bottom, int left, int right, int type, float v, const Option& opt = Option()); -NCNN_EXPORT void copy_cut_border(const Mat& src, Mat& dst, int top, int bottom, int left, int right, const Option& opt = Option()); -NCNN_EXPORT void resize_nearest(const Mat& src, Mat& dst, int w, int h, const Option& opt = Option()); -NCNN_EXPORT void resize_bilinear(const Mat& src, Mat& dst, int w, int h, const Option& opt = Option()); -NCNN_EXPORT void resize_bicubic(const Mat& src, Mat& dst, int w, int h, const Option& opt = Option()); -NCNN_EXPORT void convert_packing(const Mat& src, Mat& dst, int elempack, const Option& opt = Option()); -NCNN_EXPORT void flatten(const Mat& src, Mat& dst, const Option& opt = Option()); -NCNN_EXPORT void cast_float32_to_float16(const Mat& src, Mat& dst, const Option& opt = Option()); -NCNN_EXPORT void cast_float16_to_float32(const Mat& src, Mat& dst, const Option& opt = Option()); -NCNN_EXPORT void cast_int8_to_float32(const Mat& src, Mat& dst, const Option& opt = Option()); -NCNN_EXPORT void cast_float32_to_bfloat16(const Mat& src, Mat& dst, const Option& opt = Option()); -NCNN_EXPORT void cast_bfloat16_to_float32(const Mat& src, Mat& dst, const Option& opt = Option()); -NCNN_EXPORT void quantize_to_int8(const Mat& src, Mat& dst, const Mat& scale_data, const Option& opt = Option()); -NCNN_EXPORT void dequantize_from_int32(const Mat& src, Mat& dst, const Mat& scale_data, const Mat& bias_data, const Option& opt = Option()); -NCNN_EXPORT void requantize_from_int32_to_int8(const Mat& src, Mat& dst, const Mat& scale_in_data, const Mat& scale_out_data, const Mat& bias_data, int activation_type, const Mat& activation_params, const Option& opt = Option()); - -inline Mat::Mat() - : data(0), refcount(0), elemsize(0), elempack(0), allocator(0), dims(0), w(0), h(0), c(0), cstep(0) -{ -} - -inline Mat::Mat(int _w, size_t _elemsize, Allocator* _allocator) - : data(0), refcount(0), elemsize(0), elempack(0), allocator(0), dims(0), w(0), h(0), c(0), cstep(0) -{ - create(_w, _elemsize, _allocator); -} - -inline Mat::Mat(int _w, int _h, size_t _elemsize, Allocator* _allocator) - : data(0), refcount(0), elemsize(0), elempack(0), allocator(0), dims(0), w(0), h(0), c(0), cstep(0) -{ - create(_w, _h, _elemsize, _allocator); -} - -inline Mat::Mat(int _w, int _h, int _c, size_t _elemsize, Allocator* _allocator) - : data(0), refcount(0), elemsize(0), elempack(0), allocator(0), dims(0), w(0), h(0), c(0), cstep(0) -{ - create(_w, _h, _c, _elemsize, _allocator); -} - -inline Mat::Mat(int _w, size_t _elemsize, int _elempack, Allocator* _allocator) - : data(0), refcount(0), elemsize(0), elempack(0), allocator(0), dims(0), w(0), h(0), c(0), cstep(0) -{ - create(_w, _elemsize, _elempack, _allocator); -} - -inline Mat::Mat(int _w, int _h, size_t _elemsize, int _elempack, Allocator* _allocator) - : data(0), refcount(0), elemsize(0), elempack(0), allocator(0), dims(0), w(0), h(0), c(0), cstep(0) -{ - create(_w, _h, _elemsize, _elempack, _allocator); -} - -inline Mat::Mat(int _w, int _h, int _c, size_t _elemsize, int _elempack, Allocator* _allocator) - : data(0), refcount(0), elemsize(0), elempack(0), allocator(0), dims(0), w(0), h(0), c(0), cstep(0) -{ - create(_w, _h, _c, _elemsize, _elempack, _allocator); -} - -inline Mat::Mat(const Mat& m) - : data(m.data), refcount(m.refcount), elemsize(m.elemsize), elempack(m.elempack), allocator(m.allocator), dims(m.dims), w(m.w), h(m.h), c(m.c), cstep(m.cstep) -{ - addref(); -} - -inline Mat::Mat(int _w, void* _data, size_t _elemsize, Allocator* _allocator) - : data(_data), refcount(0), elemsize(_elemsize), elempack(1), allocator(_allocator), dims(1), w(_w), h(1), c(1) -{ - cstep = w; -} - -inline Mat::Mat(int _w, int _h, void* _data, size_t _elemsize, Allocator* _allocator) - : data(_data), refcount(0), elemsize(_elemsize), elempack(1), allocator(_allocator), dims(2), w(_w), h(_h), c(1) -{ - cstep = (size_t)w * h; -} - -inline Mat::Mat(int _w, int _h, int _c, void* _data, size_t _elemsize, Allocator* _allocator) - : data(_data), refcount(0), elemsize(_elemsize), elempack(1), allocator(_allocator), dims(3), w(_w), h(_h), c(_c) -{ - cstep = alignSize((size_t)w * h * elemsize, 16) / elemsize; -} - -inline Mat::Mat(int _w, void* _data, size_t _elemsize, int _elempack, Allocator* _allocator) - : data(_data), refcount(0), elemsize(_elemsize), elempack(_elempack), allocator(_allocator), dims(1), w(_w), h(1), c(1) -{ - cstep = w; -} - -inline Mat::Mat(int _w, int _h, void* _data, size_t _elemsize, int _elempack, Allocator* _allocator) - : data(_data), refcount(0), elemsize(_elemsize), elempack(_elempack), allocator(_allocator), dims(2), w(_w), h(_h), c(1) -{ - cstep = (size_t)w * h; -} - -inline Mat::Mat(int _w, int _h, int _c, void* _data, size_t _elemsize, int _elempack, Allocator* _allocator) - : data(_data), refcount(0), elemsize(_elemsize), elempack(_elempack), allocator(_allocator), dims(3), w(_w), h(_h), c(_c) -{ - cstep = alignSize((size_t)w * h * elemsize, 16) / elemsize; -} - -inline Mat::~Mat() -{ - release(); -} - -inline void Mat::fill(float _v) -{ - int size = (int)total(); - float* ptr = (float*)data; - -#if __ARM_NEON - int nn = size >> 2; - int remain = size - (nn << 2); -#else - int remain = size; -#endif // __ARM_NEON - -#if __ARM_NEON - float32x4_t _c = vdupq_n_f32(_v); -#if __aarch64__ - if (nn > 0) - { - asm volatile( - "0: \n" - "subs %w0, %w0, #1 \n" - "st1 {%4.4s}, [%1], #16 \n" - "bne 0b \n" - : "=r"(nn), // %0 - "=r"(ptr) // %1 - : "0"(nn), - "1"(ptr), - "w"(_c) // %4 - : "cc", "memory"); - } -#else - if (nn > 0) - { - asm volatile( - "0: \n" - "subs %0, #1 \n" - "vst1.f32 {%e4-%f4}, [%1 :128]!\n" - "bne 0b \n" - : "=r"(nn), // %0 - "=r"(ptr) // %1 - : "0"(nn), - "1"(ptr), - "w"(_c) // %4 - : "cc", "memory"); - } -#endif // __aarch64__ -#endif // __ARM_NEON - for (; remain > 0; remain--) - { - *ptr++ = _v; - } -} - -inline void Mat::fill(int _v) -{ - int size = (int)total(); - int* ptr = (int*)data; - -#if __ARM_NEON - int nn = size >> 2; - int remain = size - (nn << 2); -#else - int remain = size; -#endif // __ARM_NEON - -#if __ARM_NEON - int32x4_t _c = vdupq_n_s32(_v); -#if __aarch64__ - if (nn > 0) - { - asm volatile( - "0: \n" - "subs %w0, %w0, #1 \n" - "st1 {%4.4s}, [%1], #16 \n" - "bne 0b \n" - : "=r"(nn), // %0 - "=r"(ptr) // %1 - : "0"(nn), - "1"(ptr), - "w"(_c) // %4 - : "cc", "memory"); - } -#else - if (nn > 0) - { - asm volatile( - "0: \n" - "subs %0, #1 \n" - "vst1.s32 {%e4-%f4}, [%1 :128]!\n" - "bne 0b \n" - : "=r"(nn), // %0 - "=r"(ptr) // %1 - : "0"(nn), - "1"(ptr), - "w"(_c) // %4 - : "cc", "memory"); - } -#endif // __aarch64__ -#endif // __ARM_NEON - for (; remain > 0; remain--) - { - *ptr++ = _v; - } -} - -#if __ARM_NEON -inline void Mat::fill(float32x4_t _v) -{ - int size = (int)total(); - float* ptr = (float*)data; - for (int i = 0; i < size; i++) - { - vst1q_f32(ptr, _v); - ptr += 4; - } -} - -inline void Mat::fill(uint16x4_t _v) -{ - int size = (int)total(); - unsigned short* ptr = (unsigned short*)data; - for (int i = 0; i < size; i++) - { - vst1_u16(ptr, _v); - ptr += 4; - } -} - -inline void Mat::fill(int32x4_t _v) -{ - int size = (int)total(); - int* ptr = (int*)data; - for (int i = 0; i < size; i++) - { - vst1q_s32(ptr, _v); - ptr += 4; - } -} - -inline void Mat::fill(int32x4_t _v0, int32x4_t _v1) -{ - int size = (int)total(); - int* ptr = (int*)data; - for (int i = 0; i < size; i++) - { - vst1q_s32(ptr, _v0); - vst1q_s32(ptr + 4, _v1); - ptr += 8; - } -} -#if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -inline void Mat::fill(float16x4_t _v) -{ - int size = (int)total(); - __fp16* ptr = (__fp16*)data; - for (int i = 0; i < size; i++) - { - vst1_f16(ptr, _v); - ptr += 4; - } -} - -inline void Mat::fill(float16x8_t _v) -{ - int size = (int)total(); - __fp16* ptr = (__fp16*)data; - for (int i = 0; i < size; i++) - { - vst1q_f16(ptr, _v); - ptr += 8; - } -} -#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -#endif // __ARM_NEON -#if __AVX__ -inline void Mat::fill(__m256 _v) -{ - int size = (int)total(); - float* ptr = (float*)data; - for (int i = 0; i < size; i++) - { - _mm256_storeu_ps(ptr, _v); - ptr += 8; - } -} -inline void Mat::fill(__m128i _v) -{ - int size = (int)total(); - unsigned short* ptr = (unsigned short*)data; - for (int i = 0; i < size; i++) - { - _mm_store_si128((__m128i*)ptr, _v); - ptr += 8; - } -} -#endif // __AVX__ - -#if __mips_msa -inline void Mat::fill(v4f32 _v) -{ - int size = (int)total(); - float* ptr = (float*)data; - for (int i = 0; i < size; i++) - { - __msa_st_w((v4i32)_v, ptr, 0); - ptr += 4; - } -} -#endif // __mips_msa - -#if __riscv_vector -inline void Mat::fill(vfloat32m1_t _v) -{ - const int packn = cpu_riscv_vlenb() / 4; - const word_type vl = vsetvl_e32m1(packn); - - int size = (int)total(); - float* ptr = (float*)data; - for (int i = 0; i < size; i++) - { - vse32_v_f32m1(ptr, _v, vl); - ptr += packn; - } -} - -inline void Mat::fill(vuint16m1_t _v) -{ - const int packn = cpu_riscv_vlenb() / 2; - const word_type vl = vsetvl_e16m1(packn); - - int size = (int)total(); - unsigned short* ptr = (unsigned short*)data; - for (int i = 0; i < size; i++) - { - vse16_v_u16m1(ptr, _v, vl); - ptr += packn; - } -} - -inline void Mat::fill(vint8m1_t _v) -{ - const int packn = cpu_riscv_vlenb() / 1; - const word_type vl = vsetvl_e8m1(packn); - - int size = (int)total(); - signed char* ptr = (signed char*)data; - for (int i = 0; i < size; i++) - { - vse8_v_i8m1(ptr, _v, vl); - ptr += packn; - } -} -#if __riscv_zfh -inline void Mat::fill(vfloat16m1_t _v) -{ - const int packn = cpu_riscv_vlenb() / 2; - const word_type vl = vsetvl_e16m1(packn); - - int size = (int)total(); - __fp16* ptr = (__fp16*)data; - for (int i = 0; i < size; i++) - { - vse16_v_f16m1(ptr, _v, vl); - ptr += packn; - } -} -#endif // __riscv_zfh -#endif // __riscv_vector - -template -inline void Mat::fill(T _v) -{ - int size = (int)total(); - T* ptr = (T*)data; - for (int i = 0; i < size; i++) - { - ptr[i] = _v; - } -} - -inline Mat& Mat::operator=(const Mat& m) -{ - if (this == &m) - return *this; - - if (m.refcount) - NCNN_XADD(m.refcount, 1); - - release(); - - data = m.data; - refcount = m.refcount; - elemsize = m.elemsize; - elempack = m.elempack; - allocator = m.allocator; - - dims = m.dims; - w = m.w; - h = m.h; - c = m.c; - - cstep = m.cstep; - - return *this; -} - -inline void Mat::addref() -{ - if (refcount) - NCNN_XADD(refcount, 1); -} - -inline void Mat::release() -{ - if (refcount && NCNN_XADD(refcount, -1) == 1) - { - if (allocator) - allocator->fastFree(data); - else - fastFree(data); - } - - data = 0; - - elemsize = 0; - elempack = 0; - - dims = 0; - w = 0; - h = 0; - c = 0; - - cstep = 0; - - refcount = 0; -} - -inline bool Mat::empty() const -{ - return data == 0 || total() == 0; -} - -inline size_t Mat::total() const -{ - return cstep * c; -} - -inline int Mat::elembits() const -{ - return elempack ? static_cast(elemsize * 8) / elempack : 0; -} - -inline Mat Mat::shape() const -{ - if (dims == 1) - return Mat(w * elempack, (void*)0); - if (dims == 2) - return Mat(w, h * elempack, (void*)0); - if (dims == 3) - return Mat(w, h, c * elempack, (void*)0); - - return Mat(); -} - -inline Mat Mat::channel(int _c) -{ - return Mat(w, h, (unsigned char*)data + cstep * _c * elemsize, elemsize, elempack, allocator); -} - -inline const Mat Mat::channel(int _c) const -{ - return Mat(w, h, (unsigned char*)data + cstep * _c * elemsize, elemsize, elempack, allocator); -} - -inline float* Mat::row(int y) -{ - return (float*)((unsigned char*)data + (size_t)w * y * elemsize); -} - -inline const float* Mat::row(int y) const -{ - return (const float*)((unsigned char*)data + (size_t)w * y * elemsize); -} - -template -inline T* Mat::row(int y) -{ - return (T*)((unsigned char*)data + (size_t)w * y * elemsize); -} - -template -inline const T* Mat::row(int y) const -{ - return (const T*)((unsigned char*)data + (size_t)w * y * elemsize); -} - -inline Mat Mat::channel_range(int _c, int channels) -{ - return Mat(w, h, channels, (unsigned char*)data + cstep * _c * elemsize, elemsize, elempack, allocator); -} - -inline const Mat Mat::channel_range(int _c, int channels) const -{ - return Mat(w, h, channels, (unsigned char*)data + cstep * _c * elemsize, elemsize, elempack, allocator); -} - -inline Mat Mat::row_range(int y, int rows) -{ - return Mat(w, rows, (unsigned char*)data + (size_t)w * y * elemsize, elemsize, elempack, allocator); -} - -inline const Mat Mat::row_range(int y, int rows) const -{ - return Mat(w, rows, (unsigned char*)data + (size_t)w * y * elemsize, elemsize, elempack, allocator); -} - -inline Mat Mat::range(int x, int n) -{ - return Mat(n, (unsigned char*)data + x * elemsize, elemsize, elempack, allocator); -} - -inline const Mat Mat::range(int x, int n) const -{ - return Mat(n, (unsigned char*)data + x * elemsize, elemsize, elempack, allocator); -} - -template -inline Mat::operator T*() -{ - return (T*)data; -} - -template -inline Mat::operator const T*() const -{ - return (const T*)data; -} - -inline float& Mat::operator[](size_t i) -{ - return ((float*)data)[i]; -} - -inline const float& Mat::operator[](size_t i) const -{ - return ((const float*)data)[i]; -} - -#if NCNN_VULKAN - -inline VkMat::VkMat() - : data(0), refcount(0), elemsize(0), elempack(0), allocator(0), dims(0), w(0), h(0), c(0), cstep(0) -{ -} - -inline VkMat::VkMat(int _w, size_t _elemsize, VkAllocator* _allocator) - : data(0), refcount(0), elemsize(0), elempack(0), allocator(0), dims(0), w(0), h(0), c(0), cstep(0) -{ - create(_w, _elemsize, _allocator); -} - -inline VkMat::VkMat(int _w, int _h, size_t _elemsize, VkAllocator* _allocator) - : data(0), refcount(0), elemsize(0), elempack(0), allocator(0), dims(0), w(0), h(0), c(0), cstep(0) -{ - create(_w, _h, _elemsize, _allocator); -} - -inline VkMat::VkMat(int _w, int _h, int _c, size_t _elemsize, VkAllocator* _allocator) - : data(0), refcount(0), elemsize(0), elempack(0), allocator(0), dims(0), w(0), h(0), c(0), cstep(0) -{ - create(_w, _h, _c, _elemsize, _allocator); -} - -inline VkMat::VkMat(int _w, size_t _elemsize, int _elempack, VkAllocator* _allocator) - : data(0), refcount(0), elemsize(0), elempack(0), allocator(0), dims(0), w(0), h(0), c(0), cstep(0) -{ - create(_w, _elemsize, _elempack, _allocator); -} - -inline VkMat::VkMat(int _w, int _h, size_t _elemsize, int _elempack, VkAllocator* _allocator) - : data(0), refcount(0), elemsize(0), elempack(0), allocator(0), dims(0), w(0), h(0), c(0), cstep(0) -{ - create(_w, _h, _elemsize, _elempack, _allocator); -} - -inline VkMat::VkMat(int _w, int _h, int _c, size_t _elemsize, int _elempack, VkAllocator* _allocator) - : data(0), refcount(0), elemsize(0), elempack(0), allocator(0), dims(0), w(0), h(0), c(0), cstep(0) -{ - create(_w, _h, _c, _elemsize, _elempack, _allocator); -} - -inline VkMat::VkMat(const VkMat& m) - : data(m.data), refcount(m.refcount), elemsize(m.elemsize), elempack(m.elempack), allocator(m.allocator), dims(m.dims), w(m.w), h(m.h), c(m.c) -{ - addref(); - - cstep = m.cstep; -} - -inline VkMat::VkMat(int _w, VkBufferMemory* _data, size_t _elemsize, VkAllocator* _allocator) - : data(_data), refcount(0), elemsize(_elemsize), elempack(1), allocator(_allocator), dims(1), w(_w), h(1), c(1) -{ - cstep = w; -} - -inline VkMat::VkMat(int _w, int _h, VkBufferMemory* _data, size_t _elemsize, VkAllocator* _allocator) - : data(_data), refcount(0), elemsize(_elemsize), elempack(1), allocator(_allocator), dims(2), w(_w), h(_h), c(1) -{ - cstep = w * h; -} - -inline VkMat::VkMat(int _w, int _h, int _c, VkBufferMemory* _data, size_t _elemsize, VkAllocator* _allocator) - : data(_data), refcount(0), elemsize(_elemsize), elempack(1), allocator(_allocator), dims(3), w(_w), h(_h), c(_c) -{ - cstep = alignSize(w * h * elemsize, 16) / elemsize; -} - -inline VkMat::VkMat(int _w, VkBufferMemory* _data, size_t _elemsize, int _elempack, VkAllocator* _allocator) - : data(_data), refcount(0), elemsize(_elemsize), elempack(_elempack), allocator(_allocator), dims(1), w(_w), h(1), c(1) -{ - cstep = w; -} - -inline VkMat::VkMat(int _w, int _h, VkBufferMemory* _data, size_t _elemsize, int _elempack, VkAllocator* _allocator) - : data(_data), refcount(0), elemsize(_elemsize), elempack(_elempack), allocator(_allocator), dims(2), w(_w), h(_h), c(1) -{ - cstep = w * h; -} - -inline VkMat::VkMat(int _w, int _h, int _c, VkBufferMemory* _data, size_t _elemsize, int _elempack, VkAllocator* _allocator) - : data(_data), refcount(0), elemsize(_elemsize), elempack(_elempack), allocator(_allocator), dims(3), w(_w), h(_h), c(_c) -{ - cstep = alignSize(w * h * elemsize, 16) / elemsize; -} - -inline VkMat::~VkMat() -{ - release(); -} - -inline VkMat& VkMat::operator=(const VkMat& m) -{ - if (this == &m) - return *this; - - if (m.refcount) - NCNN_XADD(m.refcount, 1); - - release(); - - data = m.data; - refcount = m.refcount; - elemsize = m.elemsize; - elempack = m.elempack; - allocator = m.allocator; - - dims = m.dims; - w = m.w; - h = m.h; - c = m.c; - - cstep = m.cstep; - - return *this; -} - -inline Mat VkMat::mapped() const -{ - if (!allocator->mappable) - return Mat(); - - if (dims == 1) - return Mat(w, mapped_ptr(), elemsize, elempack, 0); - - if (dims == 2) - return Mat(w, h, mapped_ptr(), elemsize, elempack, 0); - - if (dims == 3) - return Mat(w, h, c, mapped_ptr(), elemsize, elempack, 0); - - return Mat(); -} - -inline void* VkMat::mapped_ptr() const -{ - if (!allocator->mappable) - return 0; - - return (unsigned char*)data->mapped_ptr + data->offset; -} - -inline void VkMat::addref() -{ - if (refcount) - NCNN_XADD(refcount, 1); -} - -inline void VkMat::release() -{ - if (refcount && NCNN_XADD(refcount, -1) == 1) - { - if (allocator && data) - { - allocator->fastFree(data); - } - } - - data = 0; - - elemsize = 0; - elempack = 0; - - dims = 0; - w = 0; - h = 0; - c = 0; - - cstep = 0; - - refcount = 0; -} - -inline bool VkMat::empty() const -{ - return data == 0 || total() == 0; -} - -inline size_t VkMat::total() const -{ - return cstep * c; -} - -inline int VkMat::elembits() const -{ - return elempack ? static_cast(elemsize) * 8 / elempack : 0; -} - -inline Mat VkMat::shape() const -{ - if (dims == 1) - return Mat(w * elempack, (void*)0); - if (dims == 2) - return Mat(w, h * elempack, (void*)0); - if (dims == 3) - return Mat(w, h, c * elempack, (void*)0); - - return Mat(); -} - -inline VkBuffer VkMat::buffer() const -{ - return data->buffer; -} - -inline size_t VkMat::buffer_offset() const -{ - return data->offset; -} - -inline size_t VkMat::buffer_capacity() const -{ - return data->capacity; -} - -inline VkImageMat::VkImageMat() - : data(0), refcount(0), elemsize(0), elempack(0), allocator(0), dims(0), w(0), h(0), c(0) -{ -} - -inline VkImageMat::VkImageMat(int _w, size_t _elemsize, VkAllocator* _allocator) - : data(0), refcount(0), elemsize(0), elempack(0), allocator(0), dims(0), w(0), h(0), c(0) -{ - create(_w, _elemsize, _allocator); -} - -inline VkImageMat::VkImageMat(int _w, int _h, size_t _elemsize, VkAllocator* _allocator) - : data(0), refcount(0), elemsize(0), elempack(0), allocator(0), dims(0), w(0), h(0), c(0) -{ - create(_w, _h, _elemsize, _allocator); -} - -inline VkImageMat::VkImageMat(int _w, int _h, int _c, size_t _elemsize, VkAllocator* _allocator) - : data(0), refcount(0), elemsize(0), elempack(0), allocator(0), dims(0), w(0), h(0), c(0) -{ - create(_w, _h, _c, _elemsize, _allocator); -} - -inline VkImageMat::VkImageMat(int _w, size_t _elemsize, int _elempack, VkAllocator* _allocator) - : data(0), refcount(0), elemsize(0), elempack(0), allocator(0), dims(0), w(0), h(0), c(0) -{ - create(_w, _elemsize, _elempack, _allocator); -} - -inline VkImageMat::VkImageMat(int _w, int _h, size_t _elemsize, int _elempack, VkAllocator* _allocator) - : data(0), refcount(0), elemsize(0), elempack(0), allocator(0), dims(0), w(0), h(0), c(0) -{ - create(_w, _h, _elemsize, _elempack, _allocator); -} - -inline VkImageMat::VkImageMat(int _w, int _h, int _c, size_t _elemsize, int _elempack, VkAllocator* _allocator) - : data(0), refcount(0), elemsize(0), elempack(0), allocator(0), dims(0), w(0), h(0), c(0) -{ - create(_w, _h, _c, _elemsize, _elempack, _allocator); -} - -inline VkImageMat::VkImageMat(const VkImageMat& m) - : data(m.data), refcount(m.refcount), elemsize(m.elemsize), elempack(m.elempack), allocator(m.allocator), dims(m.dims), w(m.w), h(m.h), c(m.c) -{ - addref(); -} - -inline VkImageMat::VkImageMat(int _w, VkImageMemory* _data, size_t _elemsize, VkAllocator* _allocator) - : data(_data), refcount(0), elemsize(_elemsize), elempack(1), allocator(_allocator), dims(1), w(_w), h(1), c(1) -{ -} - -inline VkImageMat::VkImageMat(int _w, int _h, VkImageMemory* _data, size_t _elemsize, VkAllocator* _allocator) - : data(_data), refcount(0), elemsize(_elemsize), elempack(1), allocator(_allocator), dims(2), w(_w), h(_h), c(1) -{ -} - -inline VkImageMat::VkImageMat(int _w, int _h, int _c, VkImageMemory* _data, size_t _elemsize, VkAllocator* _allocator) - : data(_data), refcount(0), elemsize(_elemsize), elempack(1), allocator(_allocator), dims(3), w(_w), h(_h), c(_c) -{ -} - -inline VkImageMat::VkImageMat(int _w, VkImageMemory* _data, size_t _elemsize, int _elempack, VkAllocator* _allocator) - : data(_data), refcount(0), elemsize(_elemsize), elempack(_elempack), allocator(_allocator), dims(1), w(_w), h(1), c(1) -{ -} - -inline VkImageMat::VkImageMat(int _w, int _h, VkImageMemory* _data, size_t _elemsize, int _elempack, VkAllocator* _allocator) - : data(_data), refcount(0), elemsize(_elemsize), elempack(_elempack), allocator(_allocator), dims(2), w(_w), h(_h), c(1) -{ -} - -inline VkImageMat::VkImageMat(int _w, int _h, int _c, VkImageMemory* _data, size_t _elemsize, int _elempack, VkAllocator* _allocator) - : data(_data), refcount(0), elemsize(_elemsize), elempack(_elempack), allocator(_allocator), dims(3), w(_w), h(_h), c(_c) -{ -} - -inline VkImageMat::~VkImageMat() -{ - release(); -} - -inline VkImageMat& VkImageMat::operator=(const VkImageMat& m) -{ - if (this == &m) - return *this; - - if (m.refcount) - NCNN_XADD(m.refcount, 1); - - release(); - - data = m.data; - refcount = m.refcount; - elemsize = m.elemsize; - elempack = m.elempack; - allocator = m.allocator; - - dims = m.dims; - w = m.w; - h = m.h; - c = m.c; - - return *this; -} - -inline Mat VkImageMat::mapped() const -{ - if (!allocator->mappable || !data->mapped_ptr) - return Mat(); - - if (dims == 1) - return Mat(w, mapped_ptr(), elemsize, elempack, 0); - - if (dims == 2) - return Mat(w, h, mapped_ptr(), elemsize, elempack, 0); - - if (dims == 3) - return Mat(w, h, c, mapped_ptr(), elemsize, elempack, 0); - - return Mat(); -} - -inline void* VkImageMat::mapped_ptr() const -{ - if (!allocator->mappable || !data->mapped_ptr) - return 0; - - return (unsigned char*)data->mapped_ptr + data->bind_offset; -} - -inline void VkImageMat::addref() -{ - if (refcount) - NCNN_XADD(refcount, 1); -} - -inline void VkImageMat::release() -{ - if (refcount && NCNN_XADD(refcount, -1) == 1) - { - if (allocator && data) - { - allocator->fastFree(data); - } - } - - data = 0; - - elemsize = 0; - elempack = 0; - - dims = 0; - w = 0; - h = 0; - c = 0; - - refcount = 0; -} - -inline bool VkImageMat::empty() const -{ - return data == 0 || total() == 0; -} - -inline size_t VkImageMat::total() const -{ - return w * h * c; -} - -inline int VkImageMat::elembits() const -{ - return elempack ? static_cast(elemsize) * 8 / elempack : 0; -} - -inline Mat VkImageMat::shape() const -{ - if (dims == 1) - return Mat(w * elempack, (void*)0); - if (dims == 2) - return Mat(w, h * elempack, (void*)0); - if (dims == 3) - return Mat(w, h, c * elempack, (void*)0); - - return Mat(); -} - -inline VkImage VkImageMat::image() const -{ - return data->image; -} - -inline VkImageView VkImageMat::imageview() const -{ - return data->imageview; -} - -#endif // NCNN_VULKAN - -} // namespace ncnn - -#endif // NCNN_MAT_H diff --git a/ncnn/modelbin.h b/ncnn/modelbin.h deleted file mode 100644 index 216ae533..00000000 --- a/ncnn/modelbin.h +++ /dev/null @@ -1,78 +0,0 @@ -// Tencent is pleased to support the open source community by making ncnn available. -// -// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. -// -// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// https://opensource.org/licenses/BSD-3-Clause -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef NCNN_MODELBIN_H -#define NCNN_MODELBIN_H - -#include "mat.h" - -namespace ncnn { - -class DataReader; -class NCNN_EXPORT ModelBin -{ -public: - ModelBin(); - virtual ~ModelBin(); - // element type - // 0 = auto - // 1 = float32 - // 2 = float16 - // 3 = int8 - // load vec - virtual Mat load(int w, int type) const = 0; - // load image - virtual Mat load(int w, int h, int type) const; - // load dim - virtual Mat load(int w, int h, int c, int type) const; -}; - -class ModelBinFromDataReaderPrivate; -class NCNN_EXPORT ModelBinFromDataReader : public ModelBin -{ -public: - explicit ModelBinFromDataReader(const DataReader& dr); - virtual ~ModelBinFromDataReader(); - - virtual Mat load(int w, int type) const; - -private: - ModelBinFromDataReader(const ModelBinFromDataReader&); - ModelBinFromDataReader& operator=(const ModelBinFromDataReader&); - -private: - ModelBinFromDataReaderPrivate* const d; -}; - -class ModelBinFromMatArrayPrivate; -class NCNN_EXPORT ModelBinFromMatArray : public ModelBin -{ -public: - // construct from weight blob array - explicit ModelBinFromMatArray(const Mat* weights); - virtual ~ModelBinFromMatArray(); - - virtual Mat load(int w, int type) const; - -private: - ModelBinFromMatArray(const ModelBinFromMatArray&); - ModelBinFromMatArray& operator=(const ModelBinFromMatArray&); - -private: - ModelBinFromMatArrayPrivate* const d; -}; - -} // namespace ncnn - -#endif // NCNN_MODELBIN_H diff --git a/ncnn/ncnn_export.h b/ncnn/ncnn_export.h deleted file mode 100644 index e2f5fdee..00000000 --- a/ncnn/ncnn_export.h +++ /dev/null @@ -1,42 +0,0 @@ - -#ifndef NCNN_EXPORT_H -#define NCNN_EXPORT_H - -#ifdef NCNN_STATIC_DEFINE -# define NCNN_EXPORT -# define NCNN_NO_EXPORT -#else -# ifndef NCNN_EXPORT -# ifdef ncnn_EXPORTS - /* We are building this library */ -# define NCNN_EXPORT __attribute__((visibility("default"))) -# else - /* We are using this library */ -# define NCNN_EXPORT __attribute__((visibility("default"))) -# endif -# endif - -# ifndef NCNN_NO_EXPORT -# define NCNN_NO_EXPORT __attribute__((visibility("hidden"))) -# endif -#endif - -#ifndef NCNN_DEPRECATED -# define NCNN_DEPRECATED __attribute__ ((__deprecated__)) -#endif - -#ifndef NCNN_DEPRECATED_EXPORT -# define NCNN_DEPRECATED_EXPORT NCNN_EXPORT NCNN_DEPRECATED -#endif - -#ifndef NCNN_DEPRECATED_NO_EXPORT -# define NCNN_DEPRECATED_NO_EXPORT NCNN_NO_EXPORT NCNN_DEPRECATED -#endif - -#if 0 /* DEFINE_NO_DEPRECATED */ -# ifndef NCNN_NO_DEPRECATED -# define NCNN_NO_DEPRECATED -# endif -#endif - -#endif /* NCNN_EXPORT_H */ diff --git a/ncnn/net.h b/ncnn/net.h deleted file mode 100644 index 10cf307a..00000000 --- a/ncnn/net.h +++ /dev/null @@ -1,272 +0,0 @@ -// Tencent is pleased to support the open source community by making ncnn available. -// -// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. -// -// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// https://opensource.org/licenses/BSD-3-Clause -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef NCNN_NET_H -#define NCNN_NET_H - -#include "blob.h" -#include "layer.h" -#include "mat.h" -#include "option.h" -#include "platform.h" - -#if NCNN_PLATFORM_API -#if __ANDROID_API__ >= 9 -#include -#endif // __ANDROID_API__ >= 9 -#endif // NCNN_PLATFORM_API - -namespace ncnn { - -#if NCNN_VULKAN -class VkCompute; -#endif // NCNN_VULKAN -class DataReader; -class Extractor; -class NetPrivate; -class NCNN_EXPORT Net -{ -public: - // empty init - Net(); - // clear and destroy - virtual ~Net(); - -public: - // option can be changed before loading - Option opt; - -#if NCNN_VULKAN - // set gpu device by index - void set_vulkan_device(int device_index); - - // set gpu device by device handle, no owner transfer - void set_vulkan_device(const VulkanDevice* vkdev); - - const VulkanDevice* vulkan_device() const; -#endif // NCNN_VULKAN - -#if NCNN_STRING - // register custom layer by layer type name - // return 0 if success - int register_custom_layer(const char* type, layer_creator_func creator, layer_destroyer_func destroyer = 0, void* userdata = 0); -#endif // NCNN_STRING - // register custom layer by layer type - // return 0 if success - int register_custom_layer(int index, layer_creator_func creator, layer_destroyer_func destroyer = 0, void* userdata = 0); - -#if NCNN_STRING - int load_param(const DataReader& dr); -#endif // NCNN_STRING - - int load_param_bin(const DataReader& dr); - - int load_model(const DataReader& dr); - -#if NCNN_STDIO -#if NCNN_STRING - // load network structure from plain param file - // return 0 if success - int load_param(FILE* fp); - int load_param(const char* protopath); - int load_param_mem(const char* mem); -#endif // NCNN_STRING - // load network structure from binary param file - // return 0 if success - int load_param_bin(FILE* fp); - int load_param_bin(const char* protopath); - - // load network weight data from model file - // return 0 if success - int load_model(FILE* fp); - int load_model(const char* modelpath); -#endif // NCNN_STDIO - - // load network structure from external memory - // memory pointer must be 32-bit aligned - // return bytes consumed - int load_param(const unsigned char* mem); - - // reference network weight data from external memory - // weight data is not copied but referenced - // so external memory should be retained when used - // memory pointer must be 32-bit aligned - // return bytes consumed - int load_model(const unsigned char* mem); - -#if NCNN_PLATFORM_API -#if __ANDROID_API__ >= 9 -#if NCNN_STRING - // convenient load network structure from android asset plain param file - int load_param(AAsset* asset); - int load_param(AAssetManager* mgr, const char* assetpath); -#endif // NCNN_STRING - // convenient load network structure from android asset binary param file - int load_param_bin(AAsset* asset); - int load_param_bin(AAssetManager* mgr, const char* assetpath); - - // convenient load network weight data from android asset model file - int load_model(AAsset* asset); - int load_model(AAssetManager* mgr, const char* assetpath); -#endif // __ANDROID_API__ >= 9 -#endif // NCNN_PLATFORM_API - - // unload network structure and weight data - void clear(); - - // construct an Extractor from network - Extractor create_extractor() const; - - // get input/output indexes/names - const std::vector& input_indexes() const; - const std::vector& output_indexes() const; -#if NCNN_STRING - const std::vector& input_names() const; - const std::vector& output_names() const; -#endif - - const std::vector& blobs() const; - const std::vector& layers() const; - - std::vector& mutable_blobs(); - std::vector& mutable_layers(); - -protected: - friend class Extractor; -#if NCNN_STRING - int find_blob_index_by_name(const char* name) const; - int find_layer_index_by_name(const char* name) const; - virtual int custom_layer_to_index(const char* type); - virtual Layer* create_custom_layer(const char* type); -#endif // NCNN_STRING - virtual Layer* create_custom_layer(int index); - -private: - Net(const Net&); - Net& operator=(const Net&); - -private: - NetPrivate* const d; -}; - -class ExtractorPrivate; -class NCNN_EXPORT Extractor -{ -public: - virtual ~Extractor(); - - // copy - Extractor(const Extractor&); - - // assign - Extractor& operator=(const Extractor&); - - // clear blob mats and alloctors - void clear(); - - // enable light mode - // intermediate blob will be recycled when enabled - // enabled by default - void set_light_mode(bool enable); - - // set thread count for this extractor - // this will overwrite the global setting - // default count is system depended - void set_num_threads(int num_threads); - - // set blob memory allocator - void set_blob_allocator(Allocator* allocator); - - // set workspace memory allocator - void set_workspace_allocator(Allocator* allocator); - -#if NCNN_VULKAN - void set_vulkan_compute(bool enable); - - void set_blob_vkallocator(VkAllocator* allocator); - - void set_workspace_vkallocator(VkAllocator* allocator); - - void set_staging_vkallocator(VkAllocator* allocator); -#endif // NCNN_VULKAN - -#if NCNN_STRING - // set input by blob name - // return 0 if success - int input(const char* blob_name, const Mat& in); - - // get result by blob name - // return 0 if success - // type = 0, default - // type = 1, do not convert fp16/bf16 or / and packing - int extract(const char* blob_name, Mat& feat, int type = 0); -#endif // NCNN_STRING - - // set input by blob index - // return 0 if success - int input(int blob_index, const Mat& in); - - // get result by blob index - // return 0 if success - // type = 0, default - // type = 1, do not convert fp16/bf16 or / and packing - int extract(int blob_index, Mat& feat, int type = 0); - -#if NCNN_VULKAN -#if NCNN_STRING - // set input by blob name - // return 0 if success - int input(const char* blob_name, const VkMat& in); - - // get result by blob name - // return 0 if success - int extract(const char* blob_name, VkMat& feat, VkCompute& cmd); - - // set input by blob name - // return 0 if success - int input(const char* blob_name, const VkImageMat& in); - - // get result by blob name - // return 0 if success - int extract(const char* blob_name, VkImageMat& feat, VkCompute& cmd); -#endif // NCNN_STRING - - // set input by blob index - // return 0 if success - int input(int blob_index, const VkMat& in); - - // get result by blob index - // return 0 if success - int extract(int blob_index, VkMat& feat, VkCompute& cmd); - - // set input by blob index - // return 0 if success - int input(int blob_index, const VkImageMat& in); - - // get result by blob index - // return 0 if success - int extract(int blob_index, VkImageMat& feat, VkCompute& cmd); -#endif // NCNN_VULKAN - -protected: - friend Extractor Net::create_extractor() const; - Extractor(const Net* net, size_t blob_count); - -private: - ExtractorPrivate* const d; -}; - -} // namespace ncnn - -#endif // NCNN_NET_H diff --git a/ncnn/option.h b/ncnn/option.h deleted file mode 100644 index a84c9a70..00000000 --- a/ncnn/option.h +++ /dev/null @@ -1,149 +0,0 @@ -// Tencent is pleased to support the open source community by making ncnn available. -// -// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. -// -// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// https://opensource.org/licenses/BSD-3-Clause -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef NCNN_OPTION_H -#define NCNN_OPTION_H - -#include "platform.h" - -namespace ncnn { - -#if NCNN_VULKAN -class VkAllocator; -class PipelineCache; -#endif // NCNN_VULKAN - -class Allocator; -class NCNN_EXPORT Option -{ -public: - // default option - Option(); - -public: - // light mode - // intermediate blob will be recycled when enabled - // enabled by default - bool lightmode; - - // thread count - // default value is the one returned by get_cpu_count() - int num_threads; - - // blob memory allocator - Allocator* blob_allocator; - - // workspace memory allocator - Allocator* workspace_allocator; - -#if NCNN_VULKAN - // blob memory allocator - VkAllocator* blob_vkallocator; - - // workspace memory allocator - VkAllocator* workspace_vkallocator; - - // staging memory allocator - VkAllocator* staging_vkallocator; - - // pipeline cache - PipelineCache* pipeline_cache; -#endif // NCNN_VULKAN - - // the time openmp threads busy-wait for more work before going to sleep - // default value is 20ms to keep the cores enabled - // without too much extra power consumption afterwards - int openmp_blocktime; - - // enable winograd convolution optimization - // improve convolution 3x3 stride1 performance, may consume more memory - // changes should be applied before loading network structure and weight - // enabled by default - bool use_winograd_convolution; - - // enable sgemm convolution optimization - // improve convolution 1x1 stride1 performance, may consume more memory - // changes should be applied before loading network structure and weight - // enabled by default - bool use_sgemm_convolution; - - // enable quantized int8 inference - // use low-precision int8 path for quantized model - // changes should be applied before loading network structure and weight - // enabled by default - bool use_int8_inference; - - // enable vulkan compute - bool use_vulkan_compute; - - // enable bf16 data type for storage - // improve most operator performance on all arm devices, may consume more memory - bool use_bf16_storage; - - // enable options for gpu inference - bool use_fp16_packed; - bool use_fp16_storage; - bool use_fp16_arithmetic; - bool use_int8_packed; - bool use_int8_storage; - bool use_int8_arithmetic; - - // enable simd-friendly packed memory layout - // improve all operator performance on all arm devices, will consume more memory - // changes should be applied before loading network structure and weight - // enabled by default - bool use_packing_layout; - - bool use_shader_pack8; - - // subgroup option - bool use_subgroup_basic; - bool use_subgroup_vote; - bool use_subgroup_ballot; - bool use_subgroup_shuffle; - - // turn on for adreno - bool use_image_storage; - bool use_tensor_storage; - - // used for fp16 weight storage in AVX - // TODO drop this option - bool use_weight_fp16_storage; - - // enable DAZ(Denormals-Are-Zero) and FTZ(Flush-To-Zero) - // default value is 3 - // 0 = DAZ OFF, FTZ OFF - // 1 = DAZ ON , FTZ OFF - // 2 = DAZ OFF, FTZ ON - // 3 = DAZ ON, FTZ ON - int flush_denormals; - - bool use_local_pool_allocator; - - bool use_reserved_1; - bool use_reserved_2; - bool use_reserved_3; - bool use_reserved_4; - bool use_reserved_5; - bool use_reserved_6; - bool use_reserved_7; - bool use_reserved_8; - bool use_reserved_9; - bool use_reserved_10; - bool use_reserved_11; -}; - -} // namespace ncnn - -#endif // NCNN_OPTION_H diff --git a/ncnn/paramdict.h b/ncnn/paramdict.h deleted file mode 100644 index c2ef1606..00000000 --- a/ncnn/paramdict.h +++ /dev/null @@ -1,73 +0,0 @@ -// Tencent is pleased to support the open source community by making ncnn available. -// -// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. -// -// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// https://opensource.org/licenses/BSD-3-Clause -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef NCNN_PARAMDICT_H -#define NCNN_PARAMDICT_H - -#include "mat.h" - -// at most 32 parameters -#define NCNN_MAX_PARAM_COUNT 32 - -namespace ncnn { - -class DataReader; -class Net; -class ParamDictPrivate; -class NCNN_EXPORT ParamDict -{ -public: - // empty - ParamDict(); - - virtual ~ParamDict(); - - // copy - ParamDict(const ParamDict&); - - // assign - ParamDict& operator=(const ParamDict&); - - // get type - int type(int id) const; - - // get int - int get(int id, int def) const; - // get float - float get(int id, float def) const; - // get array - Mat get(int id, const Mat& def) const; - - // set int - void set(int id, int i); - // set float - void set(int id, float f); - // set array - void set(int id, const Mat& v); - -protected: - friend class Net; - - void clear(); - - int load_param(const DataReader& dr); - int load_param_bin(const DataReader& dr); - -private: - ParamDictPrivate* const d; -}; - -} // namespace ncnn - -#endif // NCNN_PARAMDICT_H diff --git a/ncnn/pipeline.h b/ncnn/pipeline.h deleted file mode 100644 index c284a148..00000000 --- a/ncnn/pipeline.h +++ /dev/null @@ -1,113 +0,0 @@ -// Tencent is pleased to support the open source community by making ncnn available. -// -// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. -// -// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// https://opensource.org/licenses/BSD-3-Clause -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef NCNN_PIPELINE_H -#define NCNN_PIPELINE_H - -#include "mat.h" -#include "platform.h" -#if NCNN_VULKAN -#include "gpu.h" - -#include -#endif // NCNN_VULKAN - -namespace ncnn { - -#if NCNN_VULKAN -class Option; -class PipelinePrivate; -class NCNN_EXPORT Pipeline -{ -public: - explicit Pipeline(const VulkanDevice* vkdev); - virtual ~Pipeline(); - -public: - void set_optimal_local_size_xyz(int w = 4, int h = 4, int c = 4); - void set_optimal_local_size_xyz(const Mat& local_size_xyz); - void set_local_size_xyz(int w, int h, int c); - - int create(const uint32_t* spv_data, size_t spv_data_size, const std::vector& specializations); - - int create(int shader_type_index, const Option& opt, const std::vector& specializations); - -public: - VkShaderModule shader_module() const; - VkDescriptorSetLayout descriptorset_layout() const; - VkPipelineLayout pipeline_layout() const; - VkPipeline pipeline() const; - VkDescriptorUpdateTemplateKHR descriptor_update_template() const; - - const ShaderInfo& shader_info() const; - - uint32_t local_size_x() const; - uint32_t local_size_y() const; - uint32_t local_size_z() const; - -protected: - void set_shader_module(VkShaderModule shader_module); - void set_descriptorset_layout(VkDescriptorSetLayout descriptorset_layout); - void set_pipeline_layout(VkPipelineLayout pipeline_layout); - void set_pipeline(VkPipeline pipeline); - void set_descriptor_update_template(VkDescriptorUpdateTemplateKHR descriptor_update_template); - - void set_shader_info(const ShaderInfo& shader_info); - -public: - const VulkanDevice* vkdev; - -private: - Pipeline(const Pipeline&); - Pipeline& operator=(const Pipeline&); - -private: - PipelinePrivate* const d; -}; - -#if NCNN_PLATFORM_API -#if __ANDROID_API__ >= 26 -class VkCompute; -class NCNN_EXPORT ImportAndroidHardwareBufferPipeline : private Pipeline -{ -public: - explicit ImportAndroidHardwareBufferPipeline(const VulkanDevice* vkdev); - virtual ~ImportAndroidHardwareBufferPipeline(); - - int create(VkAndroidHardwareBufferImageAllocator* ahb_im_allocator, int type_to, int rotate_from, const Option& opt); - int create(VkAndroidHardwareBufferImageAllocator* ahb_im_allocator, int type_to, int rotate_from, int target_width, int target_height, const Option& opt); - void destroy(); - - friend class VkCompute; - -protected: - int create_shader_module(const Option& opt); - int create_sampler(VkAndroidHardwareBufferImageAllocator* ahb_im_allocator); - int create_descriptorset_layout(); - -public: - int type_to; - int rotate_from; - bool need_resize; - - VkSampler sampler; -}; -#endif // __ANDROID_API__ >= 26 -#endif // NCNN_PLATFORM_API - -#endif // NCNN_VULKAN - -} // namespace ncnn - -#endif // NCNN_PIPELINE_H diff --git a/ncnn/pipelinecache.h b/ncnn/pipelinecache.h deleted file mode 100644 index bb6b8fb2..00000000 --- a/ncnn/pipelinecache.h +++ /dev/null @@ -1,85 +0,0 @@ -// Tencent is pleased to support the open source community by making ncnn available. -// -// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. -// -// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// https://opensource.org/licenses/BSD-3-Clause -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef NCNN_PIPELINECACHE_H -#define NCNN_PIPELINECACHE_H - -#include "platform.h" - -#if NCNN_VULKAN -#include -#endif // NCNN_VULKAN - -#include "mat.h" -#include "gpu.h" - -namespace ncnn { - -#if NCNN_VULKAN - -class VulkanDevice; -class PipelineCachePrivate; -class NCNN_EXPORT PipelineCache -{ -public: - explicit PipelineCache(const VulkanDevice* _vkdev); - - virtual ~PipelineCache(); - - void clear(); - - int get_pipeline(const uint32_t* spv_data, size_t spv_data_size, const std::vector& specializations, - uint32_t local_size_x, uint32_t local_size_y, uint32_t local_size_z, - VkShaderModule* shader_module, - VkDescriptorSetLayout* descriptorset_layout, - VkPipelineLayout* pipeline_layout, - VkPipeline* pipeline, - VkDescriptorUpdateTemplateKHR* descriptor_update_template, - ShaderInfo& shader_info) const; - - int get_pipeline(int shader_type_index, const Option& opt, const std::vector& specializations, - uint32_t local_size_x, uint32_t local_size_y, uint32_t local_size_z, - VkShaderModule* shader_module, - VkDescriptorSetLayout* descriptorset_layout, - VkPipelineLayout* pipeline_layout, - VkPipeline* pipeline, - VkDescriptorUpdateTemplateKHR* descriptor_update_template, - ShaderInfo& shader_info) const; - -protected: - int create_shader_module(int shader_type_index, const Option& opt, uint32_t local_size_x, uint32_t local_size_y, uint32_t local_size_z, - VkShaderModule* _shader_module, ShaderInfo& si) const; - - int new_pipeline(VkShaderModule shader_module, const ShaderInfo& shader_info, const std::vector& specializations, - VkDescriptorSetLayout* descriptorset_layout, - VkPipelineLayout* pipeline_layout, - VkPipeline* pipeline, - VkDescriptorUpdateTemplateKHR* descriptor_update_template) const; - -protected: - const VulkanDevice* vkdev; - -private: - PipelineCache(const PipelineCache&); - PipelineCache& operator=(const PipelineCache&); - -private: - PipelineCachePrivate* const d; -}; - -#endif // NCNN_VULKAN - -} // namespace ncnn - -#endif // NCNN_PIPELINECACHE_H diff --git a/ncnn/platform.h b/ncnn/platform.h deleted file mode 100644 index 72ffcd64..00000000 --- a/ncnn/platform.h +++ /dev/null @@ -1,243 +0,0 @@ -// Tencent is pleased to support the open source community by making ncnn available. -// -// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. -// -// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// https://opensource.org/licenses/BSD-3-Clause -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef NCNN_PLATFORM_H -#define NCNN_PLATFORM_H - -#define NCNN_STDIO 1 -#define NCNN_STRING 1 -#define NCNN_SIMPLEOCV 0 -#define NCNN_SIMPLEOMP 0 -#define NCNN_SIMPLESTL 0 -#define NCNN_THREADS 1 -#define NCNN_BENCHMARK 0 -#define NCNN_PLATFORM_API 1 -#define NCNN_PIXEL 1 -#define NCNN_PIXEL_ROTATE 1 -#define NCNN_PIXEL_AFFINE 1 -#define NCNN_PIXEL_DRAWING 1 -#define NCNN_VULKAN 0 -#define NCNN_RUNTIME_CPU 1 -#define NCNN_AVX2 1 -#define NCNN_AVX 1 -#define NCNN_ARM82 0 -#define NCNN_ARM82DOT 0 -#define NCNN_MSA 0 -#define NCNN_MMI 0 -#define NCNN_RVV 0 -#define NCNN_INT8 1 -#define NCNN_BF16 1 - -#define NCNN_VERSION_STRING "1.0.21.10.07" - -#include "ncnn_export.h" - -#ifdef __cplusplus - -#if NCNN_THREADS -#if (defined _WIN32 && !(defined __MINGW32__)) -#define WIN32_LEAN_AND_MEAN -#include -#include -#else -#include -#endif -#endif // NCNN_THREADS - -#if __ANDROID_API__ >= 26 -#define VK_USE_PLATFORM_ANDROID_KHR -#endif // __ANDROID_API__ >= 26 - -namespace ncnn { - -#if NCNN_THREADS -#if (defined _WIN32 && !(defined __MINGW32__)) -class NCNN_EXPORT Mutex -{ -public: - Mutex() { InitializeSRWLock(&srwlock); } - ~Mutex() {} - void lock() { AcquireSRWLockExclusive(&srwlock); } - void unlock() { ReleaseSRWLockExclusive(&srwlock); } -private: - friend class ConditionVariable; - // NOTE SRWLock is available from windows vista - SRWLOCK srwlock; -}; - -class NCNN_EXPORT ConditionVariable -{ -public: - ConditionVariable() { InitializeConditionVariable(&condvar); } - ~ConditionVariable() {} - void wait(Mutex& mutex) { SleepConditionVariableSRW(&condvar, &mutex.srwlock, INFINITE, 0); } - void broadcast() { WakeAllConditionVariable(&condvar); } - void signal() { WakeConditionVariable(&condvar); } -private: - CONDITION_VARIABLE condvar; -}; - -static unsigned __stdcall start_wrapper(void* args); -class NCNN_EXPORT Thread -{ -public: - Thread(void* (*start)(void*), void* args = 0) { _start = start; _args = args; handle = (HANDLE)_beginthreadex(0, 0, start_wrapper, this, 0, 0); } - ~Thread() {} - void join() { WaitForSingleObject(handle, INFINITE); CloseHandle(handle); } -private: - friend unsigned __stdcall start_wrapper(void* args) - { - Thread* t = (Thread*)args; - t->_start(t->_args); - return 0; - } - HANDLE handle; - void* (*_start)(void*); - void* _args; -}; - -class NCNN_EXPORT ThreadLocalStorage -{ -public: - ThreadLocalStorage() { key = TlsAlloc(); } - ~ThreadLocalStorage() { TlsFree(key); } - void set(void* value) { TlsSetValue(key, (LPVOID)value); } - void* get() { return (void*)TlsGetValue(key); } -private: - DWORD key; -}; -#else // (defined _WIN32 && !(defined __MINGW32__)) -class NCNN_EXPORT Mutex -{ -public: - Mutex() { pthread_mutex_init(&mutex, 0); } - ~Mutex() { pthread_mutex_destroy(&mutex); } - void lock() { pthread_mutex_lock(&mutex); } - void unlock() { pthread_mutex_unlock(&mutex); } -private: - friend class ConditionVariable; - pthread_mutex_t mutex; -}; - -class NCNN_EXPORT ConditionVariable -{ -public: - ConditionVariable() { pthread_cond_init(&cond, 0); } - ~ConditionVariable() { pthread_cond_destroy(&cond); } - void wait(Mutex& mutex) { pthread_cond_wait(&cond, &mutex.mutex); } - void broadcast() { pthread_cond_broadcast(&cond); } - void signal() { pthread_cond_signal(&cond); } -private: - pthread_cond_t cond; -}; - -class NCNN_EXPORT Thread -{ -public: - Thread(void* (*start)(void*), void* args = 0) { pthread_create(&t, 0, start, args); } - ~Thread() {} - void join() { pthread_join(t, 0); } -private: - pthread_t t; -}; - -class NCNN_EXPORT ThreadLocalStorage -{ -public: - ThreadLocalStorage() { pthread_key_create(&key, 0); } - ~ThreadLocalStorage() { pthread_key_delete(key); } - void set(void* value) { pthread_setspecific(key, value); } - void* get() { return pthread_getspecific(key); } -private: - pthread_key_t key; -}; -#endif // (defined _WIN32 && !(defined __MINGW32__)) -#else // NCNN_THREADS -class NCNN_EXPORT Mutex -{ -public: - Mutex() {} - ~Mutex() {} - void lock() {} - void unlock() {} -}; - -class NCNN_EXPORT ConditionVariable -{ -public: - ConditionVariable() {} - ~ConditionVariable() {} - void wait(Mutex& /*mutex*/) {} - void broadcast() {} - void signal() {} -}; - -class NCNN_EXPORT Thread -{ -public: - Thread(void* (*/*start*/)(void*), void* /*args*/ = 0) {} - ~Thread() {} - void join() {} -}; - -class NCNN_EXPORT ThreadLocalStorage -{ -public: - ThreadLocalStorage() { data = 0; } - ~ThreadLocalStorage() {} - void set(void* value) { data = value; } - void* get() { return data; } -private: - void* data; -}; -#endif // NCNN_THREADS - -class NCNN_EXPORT MutexLockGuard -{ -public: - MutexLockGuard(Mutex& _mutex) : mutex(_mutex) { mutex.lock(); } - ~MutexLockGuard() { mutex.unlock(); } -private: - Mutex& mutex; -}; - -} // namespace ncnn - -#if NCNN_SIMPLESTL -#include "simplestl.h" -#else -#include -#include -#include -#include -#endif - -#endif // __cplusplus - -#if NCNN_STDIO -#if NCNN_PLATFORM_API && __ANDROID_API__ >= 8 -#include -#define NCNN_LOGE(...) do { \ - fprintf(stderr, ##__VA_ARGS__); fprintf(stderr, "\n"); \ - __android_log_print(ANDROID_LOG_WARN, "ncnn", ##__VA_ARGS__); } while(0) -#else // NCNN_PLATFORM_API && __ANDROID_API__ >= 8 -#include -#define NCNN_LOGE(...) do { \ - fprintf(stderr, ##__VA_ARGS__); fprintf(stderr, "\n"); } while(0) -#endif // NCNN_PLATFORM_API && __ANDROID_API__ >= 8 -#else -#define NCNN_LOGE(...) -#endif - -#endif // NCNN_PLATFORM_H diff --git a/ncnn/simpleocv.h b/ncnn/simpleocv.h deleted file mode 100644 index 55ede15b..00000000 --- a/ncnn/simpleocv.h +++ /dev/null @@ -1,501 +0,0 @@ -// Tencent is pleased to support the open source community by making ncnn available. -// -// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. -// -// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// https://opensource.org/licenses/BSD-3-Clause -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef NCNN_SIMPLEOCV_H -#define NCNN_SIMPLEOCV_H - -#include "platform.h" - -#if NCNN_SIMPLEOCV - -#include -#include -#include "allocator.h" -#include "mat.h" - -#if defined(_MSC_VER) || defined(__GNUC__) -#pragma push_macro("min") -#pragma push_macro("max") -#undef min -#undef max -#endif - -#ifndef NCNN_XADD -using ncnn::NCNN_XADD; -#endif - -typedef unsigned char uchar; -typedef unsigned short ushort; -typedef unsigned int uint; - -enum -{ - CV_LOAD_IMAGE_UNCHANGED = -1, - CV_LOAD_IMAGE_GRAYSCALE = 0, - CV_LOAD_IMAGE_COLOR = 1, -}; - -enum -{ - CV_IMWRITE_JPEG_QUALITY = 1 -}; - -// minimal opencv style data structure implementation -namespace cv { - -template -static inline _Tp saturate_cast(int v) -{ - return _Tp(v); -} -template<> -inline uchar saturate_cast(int v) -{ - return (uchar)((unsigned)v <= UCHAR_MAX ? v : v > 0 ? UCHAR_MAX : 0); -} - -template -struct Scalar_ -{ - Scalar_() - { - v[0] = 0; - v[1] = 0; - v[2] = 0; - v[3] = 0; - } - Scalar_(_Tp _v0) - { - v[0] = _v0; - v[1] = 0; - v[2] = 0; - v[3] = 0; - } - Scalar_(_Tp _v0, _Tp _v1, _Tp _v2) - { - v[0] = _v0; - v[1] = _v1; - v[2] = _v2; - v[3] = 0; - } - Scalar_(_Tp _v0, _Tp _v1, _Tp _v2, _Tp _v3) - { - v[0] = _v0; - v[1] = _v1; - v[2] = _v2; - v[3] = _v3; - } - - const _Tp operator[](const int i) const - { - return v[i]; - } - - _Tp operator[](const int i) - { - return v[i]; - } - - _Tp v[4]; -}; - -typedef Scalar_ Scalar; - -template -struct Point_ -{ - Point_() - : x(0), y(0) - { - } - Point_(_Tp _x, _Tp _y) - : x(_x), y(_y) - { - } - - template - operator Point_<_Tp2>() const - { - return Point_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y)); - } - - _Tp x; - _Tp y; -}; - -typedef Point_ Point; -typedef Point_ Point2f; - -template -struct Size_ -{ - Size_() - : width(0), height(0) - { - } - Size_(_Tp _w, _Tp _h) - : width(_w), height(_h) - { - } - - template - operator Size_<_Tp2>() const - { - return Size_<_Tp2>(saturate_cast<_Tp2>(width), saturate_cast<_Tp2>(height)); - } - - _Tp width; - _Tp height; -}; - -typedef Size_ Size; -typedef Size_ Size2f; - -template -struct Rect_ -{ - Rect_() - : x(0), y(0), width(0), height(0) - { - } - Rect_(_Tp _x, _Tp _y, _Tp _w, _Tp _h) - : x(_x), y(_y), width(_w), height(_h) - { - } - Rect_(Point_<_Tp> _p, Size_<_Tp> _size) - : x(_p.x), y(_p.y), width(_size.width), height(_size.height) - { - } - - template - operator Rect_<_Tp2>() const - { - return Rect_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y), saturate_cast<_Tp2>(width), saturate_cast<_Tp2>(height)); - } - - _Tp x; - _Tp y; - _Tp width; - _Tp height; - - // area - _Tp area() const - { - return width * height; - } -}; - -template -static inline Rect_<_Tp>& operator&=(Rect_<_Tp>& a, const Rect_<_Tp>& b) -{ - _Tp x1 = std::max(a.x, b.x), y1 = std::max(a.y, b.y); - a.width = std::min(a.x + a.width, b.x + b.width) - x1; - a.height = std::min(a.y + a.height, b.y + b.height) - y1; - a.x = x1; - a.y = y1; - if (a.width <= 0 || a.height <= 0) - a = Rect_<_Tp>(); - return a; -} - -template -static inline Rect_<_Tp>& operator|=(Rect_<_Tp>& a, const Rect_<_Tp>& b) -{ - _Tp x1 = std::min(a.x, b.x), y1 = std::min(a.y, b.y); - a.width = std::max(a.x + a.width, b.x + b.width) - x1; - a.height = std::max(a.y + a.height, b.y + b.height) - y1; - a.x = x1; - a.y = y1; - return a; -} - -template -static inline Rect_<_Tp> operator&(const Rect_<_Tp>& a, const Rect_<_Tp>& b) -{ - Rect_<_Tp> c = a; - return c &= b; -} - -template -static inline Rect_<_Tp> operator|(const Rect_<_Tp>& a, const Rect_<_Tp>& b) -{ - Rect_<_Tp> c = a; - return c |= b; -} - -typedef Rect_ Rect; -typedef Rect_ Rect2f; - -#define CV_8UC1 1 -#define CV_8UC3 3 -#define CV_8UC4 4 -#define CV_32FC1 4 - -struct NCNN_EXPORT Mat -{ - Mat() - : data(0), refcount(0), rows(0), cols(0), c(0) - { - } - - Mat(int _rows, int _cols, int flags) - : data(0), refcount(0) - { - create(_rows, _cols, flags); - } - - // copy - Mat(const Mat& m) - : data(m.data), refcount(m.refcount) - { - if (refcount) - NCNN_XADD(refcount, 1); - - rows = m.rows; - cols = m.cols; - c = m.c; - } - - Mat(int _rows, int _cols, int flags, void* _data) - : data((unsigned char*)_data), refcount(0) - { - rows = _rows; - cols = _cols; - c = flags; - } - - ~Mat() - { - release(); - } - - // assign - Mat& operator=(const Mat& m) - { - if (this == &m) - return *this; - - if (m.refcount) - NCNN_XADD(m.refcount, 1); - - release(); - - data = m.data; - refcount = m.refcount; - - rows = m.rows; - cols = m.cols; - c = m.c; - - return *this; - } - - Mat& operator=(const Scalar& s) - { - if (total() > 0) - { - uchar* p = data; - for (int i = 0; i < cols * rows; i++) - { - for (int j = 0; j < c; j++) - { - *p++ = s[j]; - } - } - } - - return *this; - } - - void create(int _rows, int _cols, int flags) - { - release(); - - rows = _rows; - cols = _cols; - c = flags; - - if (total() > 0) - { - // refcount address must be aligned, so we expand totalsize here - size_t totalsize = (total() + 3) >> 2 << 2; - data = (uchar*)ncnn::fastMalloc(totalsize + (int)sizeof(*refcount)); - refcount = (int*)(((uchar*)data) + totalsize); - *refcount = 1; - } - } - - void release() - { - if (refcount && NCNN_XADD(refcount, -1) == 1) - ncnn::fastFree(data); - - data = 0; - - rows = 0; - cols = 0; - c = 0; - - refcount = 0; - } - - Mat clone() const - { - if (empty()) - return Mat(); - - Mat m(rows, cols, c); - - if (total() > 0) - { - memcpy(m.data, data, total()); - } - - return m; - } - - bool empty() const - { - return data == 0 || total() == 0; - } - - int channels() const - { - return c; - } - - int type() const - { - return c; - } - - size_t total() const - { - return cols * rows * c; - } - - const uchar* ptr(int y) const - { - return data + y * cols * c; - } - - uchar* ptr(int y) - { - return data + y * cols * c; - } - - template - const _Tp* ptr(int y) const - { - return (const _Tp*)data + y * cols * c; - } - - template - _Tp* ptr(int y) - { - return (_Tp*)data + y * cols * c; - } - - // roi - Mat operator()(const Rect& roi) const - { - if (empty()) - return Mat(); - - Mat m(roi.height, roi.width, c); - - int sy = roi.y; - for (int y = 0; y < roi.height; y++) - { - const uchar* sptr = ptr(sy) + roi.x * c; - uchar* dptr = m.ptr(y); - memcpy(dptr, sptr, roi.width * c); - sy++; - } - - return m; - } - - uchar* data; - - // pointer to the reference counter; - // when points to user-allocated data, the pointer is NULL - int* refcount; - - int rows; - int cols; - - int c; -}; - -enum ImreadModes -{ - IMREAD_UNCHANGED = -1, - IMREAD_GRAYSCALE = 0, - IMREAD_COLOR = 1 -}; - -NCNN_EXPORT Mat imread(const std::string& path, int flags = IMREAD_COLOR); - -enum ImwriteFlags -{ - IMWRITE_JPEG_QUALITY = 1 -}; - -NCNN_EXPORT bool imwrite(const std::string& path, const Mat& m, const std::vector& params = std::vector()); - -NCNN_EXPORT void imshow(const std::string& name, const Mat& m); - -NCNN_EXPORT int waitKey(int delay = 0); - -#if NCNN_PIXEL -NCNN_EXPORT void resize(const Mat& src, Mat& dst, const Size& size, float sw = 0.f, float sh = 0.f, int flags = 0); -#endif // NCNN_PIXEL - -#if NCNN_PIXEL_DRAWING - -enum -{ - FILLED = -1 -}; - -NCNN_EXPORT void rectangle(Mat& img, Point pt1, Point pt2, const Scalar& color, int thickness = 1); - -NCNN_EXPORT void rectangle(Mat& img, Rect rec, const Scalar& color, int thickness = 1); - -NCNN_EXPORT void circle(Mat& img, Point center, int radius, const Scalar& color, int thickness = 1); - -NCNN_EXPORT void line(Mat& img, Point p0, Point p1, const Scalar& color, int thickness = 1); - -enum -{ - FONT_HERSHEY_SIMPLEX = 0 -}; - -NCNN_EXPORT void putText(Mat& img, const std::string& text, Point org, int fontFace, double fontScale, Scalar color, int thickness = 1); - -NCNN_EXPORT Size getTextSize(const std::string& text, int fontFace, double fontScale, int thickness, int* baseLine); - -#endif // NCNN_PIXEL_DRAWING - -} // namespace cv - -#if defined(_MSC_VER) || defined(__GNUC__) -#pragma pop_macro("min") -#pragma pop_macro("max") -#endif - -#endif // NCNN_SIMPLEOCV - -#endif // NCNN_SIMPLEOCV_H diff --git a/ncnn/simpleomp.h b/ncnn/simpleomp.h deleted file mode 100644 index 13e24529..00000000 --- a/ncnn/simpleomp.h +++ /dev/null @@ -1,53 +0,0 @@ -// Tencent is pleased to support the open source community by making ncnn available. -// -// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. -// -// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// https://opensource.org/licenses/BSD-3-Clause -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef NCNN_SIMPLEOMP_H -#define NCNN_SIMPLEOMP_H - -#include "platform.h" - -#if NCNN_SIMPLEOMP - -#include - -// This minimal openmp runtime implementation only supports the llvm openmp abi -// and only supports #pragma omp parallel for num_threads(X) - -#ifdef __cplusplus -extern "C" { -#endif - -NCNN_EXPORT int omp_get_max_threads(); - -NCNN_EXPORT void omp_set_num_threads(int num_threads); - -NCNN_EXPORT int omp_get_dynamic(); - -NCNN_EXPORT void omp_set_dynamic(int dynamic); - -NCNN_EXPORT int omp_get_num_threads(); - -NCNN_EXPORT int omp_get_thread_num(); - -NCNN_EXPORT int kmp_get_blocktime(); - -NCNN_EXPORT void kmp_set_blocktime(int blocktime); - -#ifdef __cplusplus -} -#endif - -#endif // NCNN_SIMPLEOMP - -#endif // NCNN_SIMPLEOMP_H diff --git a/ncnn/simplestl.h b/ncnn/simplestl.h deleted file mode 100644 index b8454c40..00000000 --- a/ncnn/simplestl.h +++ /dev/null @@ -1,565 +0,0 @@ -// Tencent is pleased to support the open source community by making ncnn available. -// -// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. -// -// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// https://opensource.org/licenses/BSD-3-Clause -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef NCNN_SIMPLESTL_H -#define NCNN_SIMPLESTL_H - -#include -#include -#include - -#if !NCNN_SIMPLESTL - -#include - -#else - -// allocation functions -NCNN_EXPORT void* operator new(size_t size); -NCNN_EXPORT void* operator new[](size_t size); -// placement allocation functions -NCNN_EXPORT void* operator new(size_t size, void* ptr); -NCNN_EXPORT void* operator new[](size_t size, void* ptr); -// deallocation functions -NCNN_EXPORT void operator delete(void* ptr); -NCNN_EXPORT void operator delete[](void* ptr); -// deallocation functions since c++14 -#if __cplusplus >= 201402L -NCNN_EXPORT void operator delete(void* ptr, size_t sz); -NCNN_EXPORT void operator delete[](void* ptr, size_t sz); -#endif -// placement deallocation functions -NCNN_EXPORT void operator delete(void* ptr, void* voidptr2); -NCNN_EXPORT void operator delete[](void* ptr, void* voidptr2); - -#endif - -// minimal stl data structure implementation -namespace std { - -template -const T& max(const T& a, const T& b) -{ - return (a < b) ? b : a; -} - -template -const T& min(const T& a, const T& b) -{ - return (a > b) ? b : a; -} - -template -void swap(T& a, T& b) -{ - T temp(a); - a = b; - b = temp; -} - -template -struct pair -{ - pair() - : first(), second() - { - } - pair(const T1& t1, const T2& t2) - : first(t1), second(t2) - { - } - - T1 first; - T2 second; -}; - -template -bool operator==(const pair& x, const pair& y) -{ - return (x.first == y.first && x.second == y.second); -} -template -bool operator<(const pair& x, const pair& y) -{ - return x.first < y.first || (!(y.first < x.first) && x.second < y.second); -} -template -bool operator!=(const pair& x, const pair& y) -{ - return !(x == y); -} -template -bool operator>(const pair& x, const pair& y) -{ - return y < x; -} -template -bool operator<=(const pair& x, const pair& y) -{ - return !(y < x); -} -template -bool operator>=(const pair& x, const pair& y) -{ - return !(x < y); -} - -template -pair make_pair(const T1& t1, const T2& t2) -{ - return pair(t1, t2); -} - -template -struct node -{ - node* prev_; - node* next_; - T data_; - - node() - : prev_(0), next_(0), data_() - { - } - node(const T& t) - : prev_(0), next_(0), data_(t) - { - } -}; - -template -struct iter_list -{ - iter_list() - : curr_(0) - { - } - iter_list(node* n) - : curr_(n) - { - } - iter_list(const iter_list& i) - : curr_(i.curr_) - { - } - ~iter_list() - { - } - - iter_list& operator=(const iter_list& i) - { - curr_ = i.curr_; - return *this; - } - - T& operator*() - { - return curr_->data_; - } - T* operator->() - { - return &(curr_->data_); - } - - bool operator==(const iter_list& i) - { - return curr_ == i.curr_; - } - bool operator!=(const iter_list& i) - { - return curr_ != i.curr_; - } - - iter_list& operator++() - { - curr_ = curr_->next_; - return *this; - } - iter_list& operator--() - { - curr_ = curr_->prev_; - return *this; - } - - node* curr_; -}; - -template -struct list -{ - typedef iter_list iterator; - - list() - { - head_ = new node(); - tail_ = head_; - count_ = 0; - } - ~list() - { - clear(); - delete head_; - } - list(const list& l) - { - head_ = new node(); - tail_ = head_; - count_ = 0; - - for (iter_list i = l.begin(); i != l.end(); ++i) - { - push_back(*i); - } - } - - list& operator=(const list& l) - { - if (this == &l) - { - return *this; - } - clear(); - - for (iter_list i = l.begin(); i != l.end(); ++i) - { - push_back(*i); - } - return *this; - } - - void clear() - { - while (count_ > 0) - { - pop_front(); - } - } - - void pop_front() - { - if (count_ > 0) - { - head_ = head_->next_; - delete head_->prev_; - head_->prev_ = 0; - --count_; - } - } - - size_t size() const - { - return count_; - } - iter_list begin() const - { - return iter_list(head_); - } - iter_list end() const - { - return iter_list(tail_); - } - bool empty() const - { - return count_ == 0; - } - - void push_back(const T& t) - { - if (count_ == 0) - { - head_ = new node(t); - head_->prev_ = 0; - head_->next_ = tail_; - tail_->prev_ = head_; - count_ = 1; - } - else - { - node* temp = new node(t); - temp->prev_ = tail_->prev_; - temp->next_ = tail_; - tail_->prev_->next_ = temp; - tail_->prev_ = temp; - ++count_; - } - } - - iter_list erase(iter_list pos) - { - if (pos != end()) - { - node* temp = pos.curr_; - if (temp == head_) - { - ++pos; - temp->next_->prev_ = 0; - head_ = temp->next_; - } - else - { - --pos; - temp->next_->prev_ = temp->prev_; - temp->prev_->next_ = temp->next_; - ++pos; - } - delete temp; - --count_; - } - return pos; - } - -protected: - node* head_; - node* tail_; - size_t count_; -}; - -template -struct greater -{ - bool operator()(const T& x, const T& y) const - { - return (x > y); - } -}; - -template -struct less -{ - bool operator()(const T& x, const T& y) const - { - return (x < y); - } -}; - -template -void partial_sort(RandomAccessIter first, RandomAccessIter middle, RandomAccessIter last, Compare comp) -{ - // [TODO] heap sort should be used here, but we simply use bubble sort now - for (RandomAccessIter i = first; i < middle; ++i) - { - // bubble sort - for (RandomAccessIter j = last - 1; j > first; --j) - { - if (comp(*j, *(j - 1))) - { - swap(*j, *(j - 1)); - } - } - } -} - -template -struct vector -{ - vector() - : data_(0), size_(0), capacity_(0) - { - } - vector(const size_t new_size, const T& value = T()) - : data_(0), size_(0), capacity_(0) - { - resize(new_size, value); - } - ~vector() - { - clear(); - } - vector(const vector& v) - : data_(0), size_(0), capacity_(0) - { - resize(v.size()); - for (size_t i = 0; i < size_; i++) - { - data_[i] = v.data_[i]; - } - } - - vector& operator=(const vector& v) - { - if (this == &v) - { - return *this; - } - resize(0); - resize(v.size()); - for (size_t i = 0; i < size_; i++) - { - data_[i] = v.data_[i]; - } - return *this; - } - - void resize(const size_t new_size, const T& value = T()) - { - try_alloc(new_size); - if (new_size > size_) - { - for (size_t i = size_; i < new_size; i++) - { - new (&data_[i]) T(value); - } - } - else if (new_size < size_) - { - for (size_t i = new_size; i < size_; i++) - { - data_[i].~T(); - } - } - size_ = new_size; - } - - void clear() - { - for (size_t i = 0; i < size_; i++) - { - data_[i].~T(); - } - delete[](char*) data_; - data_ = 0; - size_ = 0; - capacity_ = 0; - } - - T* data() const - { - return data_; - } - size_t size() const - { - return size_; - } - T& operator[](size_t i) const - { - return data_[i]; - } - T* begin() const - { - return &data_[0]; - } - T* end() const - { - return &data_[size_]; - } - bool empty() const - { - return size_ == 0; - } - - void push_back(const T& t) - { - try_alloc(size_ + 1); - new (&data_[size_]) T(t); - size_++; - } - - void insert(T* pos, T* b, T* e) - { - vector* v = 0; - if (b >= begin() && b < end()) - { - //the same vector - v = new vector(*this); - b = v->begin() + (b - begin()); - e = v->begin() + (e - begin()); - } - size_t diff = pos - begin(); - try_alloc(size_ + (e - b)); - pos = begin() + diff; - memmove(pos + (e - b), pos, (end() - pos) * sizeof(T)); - size_t len = e - b; - size_ += len; - for (size_t i = 0; i < len; i++) - { - *pos = *b; - pos++; - b++; - } - delete v; - } - - T* erase(T* pos) - { - pos->~T(); - memmove(pos, pos + 1, (end() - pos - 1) * sizeof(T)); - size_--; - return pos; - } - -protected: - T* data_; - size_t size_; - size_t capacity_; - void try_alloc(size_t new_size) - { - if (new_size * 3 / 2 > capacity_ / 2) - { - capacity_ = new_size * 2; - T* new_data = (T*)new char[capacity_ * sizeof(T)]; - memset(new_data, 0, capacity_ * sizeof(T)); - if (data_) - { - memmove(new_data, data_, sizeof(T) * size_); - delete[](char*) data_; - } - data_ = new_data; - } - } -}; - -struct NCNN_EXPORT string : public vector -{ - string() - { - } - string(const char* str) - { - size_t len = strlen(str); - resize(len); - memcpy(data_, str, len); - } - const char* c_str() const - { - return (const char*)data_; - } - bool operator==(const string& str2) const - { - return strcmp(data_, str2.data_) == 0; - } - bool operator==(const char* str2) const - { - return strcmp(data_, str2) == 0; - } - bool operator!=(const char* str2) const - { - return strcmp(data_, str2) != 0; - } - string& operator+=(const string& str1) - { - insert(end(), str1.begin(), str1.end()); - return *this; - } -}; - -inline string operator+(const string& str1, const string& str2) -{ - string str(str1); - str.insert(str.end(), str2.begin(), str2.end()); - return str; -} - -} // namespace std - -#endif // NCNN_SIMPLESTL_H diff --git a/ncnn/vulkan_header_fix.h b/ncnn/vulkan_header_fix.h deleted file mode 100644 index 60f935fa..00000000 --- a/ncnn/vulkan_header_fix.h +++ /dev/null @@ -1,189 +0,0 @@ -// Tencent is pleased to support the open source community by making ncnn available. -// -// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. -// -// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except -// in compliance with the License. You may obtain a copy of the License at -// -// https://opensource.org/licenses/BSD-3-Clause -// -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the -// specific language governing permissions and limitations under the License. - -#ifndef NCNN_VULKAN_HEADER_FIX_H -#define NCNN_VULKAN_HEADER_FIX_H - -#include - -// This header contains new structure and function declearation to fix build with old vulkan sdk - -#if VK_HEADER_VERSION < 70 -#define VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES (VkStructureType)1000094000 -typedef enum VkSubgroupFeatureFlagBits -{ - VK_SUBGROUP_FEATURE_BASIC_BIT = 0x00000001, - VK_SUBGROUP_FEATURE_VOTE_BIT = 0x00000002, - VK_SUBGROUP_FEATURE_ARITHMETIC_BIT = 0x00000004, - VK_SUBGROUP_FEATURE_BALLOT_BIT = 0x00000008, - VK_SUBGROUP_FEATURE_SHUFFLE_BIT = 0x00000010, - VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT = 0x00000020, - VK_SUBGROUP_FEATURE_CLUSTERED_BIT = 0x00000040, - VK_SUBGROUP_FEATURE_QUAD_BIT = 0x00000080, - VK_SUBGROUP_FEATURE_PARTITIONED_BIT_NV = 0x00000100, - VK_SUBGROUP_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF -} VkSubgroupFeatureFlagBits; -typedef VkFlags VkSubgroupFeatureFlags; -typedef struct VkPhysicalDeviceSubgroupProperties -{ - VkStructureType sType; - void* pNext; - uint32_t subgroupSize; - VkShaderStageFlags supportedStages; - VkSubgroupFeatureFlags supportedOperations; - VkBool32 quadOperationsInAllStages; -} VkPhysicalDeviceSubgroupProperties; -#define VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES (VkStructureType)1000168000 -#define VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT (VkStructureType)1000168001 -typedef struct VkPhysicalDeviceMaintenance3Properties -{ - VkStructureType sType; - void* pNext; - uint32_t maxPerSetDescriptors; - VkDeviceSize maxMemoryAllocationSize; -} VkPhysicalDeviceMaintenance3Properties; -typedef struct VkDescriptorSetLayoutSupport -{ - VkStructureType sType; - void* pNext; - VkBool32 supported; -} VkDescriptorSetLayoutSupport; -typedef VkPhysicalDeviceMaintenance3Properties VkPhysicalDeviceMaintenance3PropertiesKHR; -typedef VkDescriptorSetLayoutSupport VkDescriptorSetLayoutSupportKHR; -typedef void(VKAPI_PTR* PFN_vkGetDescriptorSetLayoutSupportKHR)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport); -#endif // VK_HEADER_VERSION < 70 - -#if VK_HEADER_VERSION < 80 -#define VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR (VkStructureType)1000177000 -typedef struct VkPhysicalDevice8BitStorageFeaturesKHR -{ - VkStructureType sType; - void* pNext; - VkBool32 storageBuffer8BitAccess; - VkBool32 uniformAndStorageBuffer8BitAccess; - VkBool32 storagePushConstant8; -} VkPhysicalDevice8BitStorageFeaturesKHR; -#define VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2_KHR (VkStructureType)1000109000 -#define VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2_KHR (VkStructureType)1000109001 -#define VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2_KHR (VkStructureType)1000109002 -#define VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2_KHR (VkStructureType)1000109003 -#define VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2_KHR (VkStructureType)1000109004 -#define VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO_KHR (VkStructureType)1000109005 -#define VK_STRUCTURE_TYPE_SUBPASS_END_INFO_KHR (VkStructureType)1000109006 -typedef struct VkAttachmentDescription2KHR -{ - VkStructureType sType; - const void* pNext; - VkAttachmentDescriptionFlags flags; - VkFormat format; - VkSampleCountFlagBits samples; - VkAttachmentLoadOp loadOp; - VkAttachmentStoreOp storeOp; - VkAttachmentLoadOp stencilLoadOp; - VkAttachmentStoreOp stencilStoreOp; - VkImageLayout initialLayout; - VkImageLayout finalLayout; -} VkAttachmentDescription2KHR; -typedef struct VkAttachmentReference2KHR -{ - VkStructureType sType; - const void* pNext; - uint32_t attachment; - VkImageLayout layout; - VkImageAspectFlags aspectMask; -} VkAttachmentReference2KHR; -typedef struct VkSubpassDescription2KHR -{ - VkStructureType sType; - const void* pNext; - VkSubpassDescriptionFlags flags; - VkPipelineBindPoint pipelineBindPoint; - uint32_t viewMask; - uint32_t inputAttachmentCount; - const VkAttachmentReference2KHR* pInputAttachments; - uint32_t colorAttachmentCount; - const VkAttachmentReference2KHR* pColorAttachments; - const VkAttachmentReference2KHR* pResolveAttachments; - const VkAttachmentReference2KHR* pDepthStencilAttachment; - uint32_t preserveAttachmentCount; - const uint32_t* pPreserveAttachments; -} VkSubpassDescription2KHR; -typedef struct VkSubpassDependency2KHR -{ - VkStructureType sType; - const void* pNext; - uint32_t srcSubpass; - uint32_t dstSubpass; - VkPipelineStageFlags srcStageMask; - VkPipelineStageFlags dstStageMask; - VkAccessFlags srcAccessMask; - VkAccessFlags dstAccessMask; - VkDependencyFlags dependencyFlags; - int32_t viewOffset; -} VkSubpassDependency2KHR; -typedef struct VkRenderPassCreateInfo2KHR -{ - VkStructureType sType; - const void* pNext; - VkRenderPassCreateFlags flags; - uint32_t attachmentCount; - const VkAttachmentDescription2KHR* pAttachments; - uint32_t subpassCount; - const VkSubpassDescription2KHR* pSubpasses; - uint32_t dependencyCount; - const VkSubpassDependency2KHR* pDependencies; - uint32_t correlatedViewMaskCount; - const uint32_t* pCorrelatedViewMasks; -} VkRenderPassCreateInfo2KHR; -typedef struct VkSubpassBeginInfoKHR -{ - VkStructureType sType; - const void* pNext; - VkSubpassContents contents; -} VkSubpassBeginInfoKHR; - -typedef struct VkSubpassEndInfoKHR -{ - VkStructureType sType; - const void* pNext; -} VkSubpassEndInfoKHR; -typedef VkResult(VKAPI_PTR* PFN_vkCreateRenderPass2KHR)(VkDevice device, const VkRenderPassCreateInfo2KHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass); -typedef void(VKAPI_PTR* PFN_vkCmdBeginRenderPass2KHR)(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, const VkSubpassBeginInfoKHR* pSubpassBeginInfo); -typedef void(VKAPI_PTR* PFN_vkCmdNextSubpass2KHR)(VkCommandBuffer commandBuffer, const VkSubpassBeginInfoKHR* pSubpassBeginInfo, const VkSubpassEndInfoKHR* pSubpassEndInfo); -typedef void(VKAPI_PTR* PFN_vkCmdEndRenderPass2KHR)(VkCommandBuffer commandBuffer, const VkSubpassEndInfoKHR* pSubpassEndInfo); -#endif // VK_HEADER_VERSION < 80 - -#if VK_HEADER_VERSION < 95 -#define VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR (VkStructureType)1000082000 -typedef struct VkPhysicalDeviceFloat16Int8FeaturesKHR -{ - VkStructureType sType; - void* pNext; - VkBool32 shaderFloat16; - VkBool32 shaderInt8; -} VkPhysicalDeviceFloat16Int8FeaturesKHR; -#endif // VK_HEADER_VERSION < 95 - -#if VK_HEADER_VERSION < 97 -#define VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT (VkStructureType)1000237000 -typedef struct VkPhysicalDeviceMemoryBudgetPropertiesEXT -{ - VkStructureType sType; - void* pNext; - VkDeviceSize heapBudget[VK_MAX_MEMORY_HEAPS]; - VkDeviceSize heapUsage[VK_MAX_MEMORY_HEAPS]; -} VkPhysicalDeviceMemoryBudgetPropertiesEXT; -#endif // VK_HEADER_VERSION < 97 - -#endif // NCNN_VULKAN_HEADER_FIX_H diff --git a/onnxruntime/core/common/code_location.h b/onnxruntime/core/common/code_location.h deleted file mode 100644 index 2fdb2d3a..00000000 --- a/onnxruntime/core/common/code_location.h +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -#pragma once - -#include -#include -#include - -namespace onnxruntime { -/** - CodeLocation captures information on where in the source code a message came from. -*/ -struct CodeLocation { - /** - @param file_path Usually the value of __FILE__ - @param line Usually the value of __LINE__ - @param func Usually the value of __PRETTY_FUNCTION__ or __FUNCTION__ - */ - CodeLocation(const char* file_path, const int line, const char* func) - : file_and_path{file_path}, line_num{line}, function{func} { - } - - /** - @param file_path Usually the value of __FILE__ - @param line Usually the value of __LINE__ - @param func Usually the value of __PRETTY_FUNCTION__ or __FUNCTION__ - @param stacktrace Stacktrace from source of message. - */ - CodeLocation(const char* file_path, const int line, const char* func, const std::vector& stacktrace) - : file_and_path{file_path}, line_num{line}, function{func}, stacktrace(stacktrace) { - } - - std::string FileNoPath() const { - // assuming we always have work to do, so not trying to avoid creating a new string if - // no path was removed. - return file_and_path.substr(file_and_path.find_last_of("/\\") + 1); - } - - enum Format { - kFilename, - kFilenameAndPath - }; - - std::string ToString(Format format = Format::kFilename) const { - std::ostringstream out; - out << (format == Format::kFilename ? FileNoPath() : file_and_path) << ":" << line_num << " " << function; - return out.str(); - } - - const std::string file_and_path; - const int line_num; - const std::string function; - const std::vector stacktrace; -}; - -} // namespace onnxruntime diff --git a/onnxruntime/core/common/common.h b/onnxruntime/core/common/common.h deleted file mode 100644 index 6394c8f3..00000000 --- a/onnxruntime/core/common/common.h +++ /dev/null @@ -1,286 +0,0 @@ -/** - * Copyright (c) 2016-present, Facebook, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -// Portions Copyright (c) Microsoft Corporation - -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "core/common/code_location.h" -#include "core/common/exceptions.h" -#include "core/common/make_string.h" -#include "core/common/make_unique.h" -#include "core/common/status.h" - -#ifdef USE_MIMALLOC_ARENA_ALLOCATOR -#include -#endif - -#ifdef ORT_NO_EXCEPTIONS -#include -#endif - -namespace onnxruntime { - -using TimePoint = std::chrono::high_resolution_clock::time_point; - -// Using statements for common classes that we refer to in ONNXRuntime very often. -// TODO(Task:137) Remove 'using' statements from header files -using common::Status; - -#ifdef _WIN32 -#define ORT_UNUSED_PARAMETER(x) (x) -#else -#define ORT_UNUSED_PARAMETER(x) (void)(x) -#endif - -#ifndef ORT_HAVE_ATTRIBUTE -#ifdef __has_attribute -#define ORT_HAVE_ATTRIBUTE(x) __has_attribute(x) -#else -#define ORT_HAVE_ATTRIBUTE(x) 0 -#endif -#endif - -// ORT_ATTRIBUTE_UNUSED -// -// Prevents the compiler from complaining about or optimizing away variables -// that appear unused on Linux -#if ORT_HAVE_ATTRIBUTE(unused) || (defined(__GNUC__) && !defined(__clang__)) -#undef ORT_ATTRIBUTE_UNUSED -#define ORT_ATTRIBUTE_UNUSED __attribute__((__unused__)) -#else -#define ORT_ATTRIBUTE_UNUSED -#endif - -// macro to explicitly ignore the return value from a function call so Code Analysis doesn't complain -#define ORT_IGNORE_RETURN_VALUE(fn) \ - static_cast(fn) - -std::vector GetStackTrace(); -// these is a helper function that gets defined by platform/Telemetry -void LogRuntimeError(uint32_t session_id, const common::Status& status, const char* file, - const char* function, uint32_t line); - -// __PRETTY_FUNCTION__ isn't a macro on gcc, so use a check for _MSC_VER -// so we only define it as one for MSVC -#if (_MSC_VER && !defined(__PRETTY_FUNCTION__)) -#define __PRETTY_FUNCTION__ __FUNCTION__ -#endif - -// Capture where a message is coming from. Use __FUNCTION__ rather than the much longer __PRETTY_FUNCTION__ -#define ORT_WHERE \ - ::onnxruntime::CodeLocation(__FILE__, __LINE__, __FUNCTION__) - -#define ORT_WHERE_WITH_STACK \ - ::onnxruntime::CodeLocation(__FILE__, __LINE__, __PRETTY_FUNCTION__, ::onnxruntime::GetStackTrace()) - -#ifdef ORT_NO_EXCEPTIONS - -#define ORT_TRY if (true) -#define ORT_CATCH(x) else if (false) -#define ORT_RETHROW - -// In order to ignore the catch statement when a specific exception (not ... ) is caught and referred -// in the body of the catch statements, it is necessary to wrap the body of the catch statement into -// a lambda function. otherwise the exception referred will be undefined and cause build break -#define ORT_HANDLE_EXCEPTION(func) - -// TODO, consider changing the output of the error message from std::cerr to logging when the -// exceptions are disabled, since using std::cerr might increase binary size, and std::cerr output -// might not be easily accesible on some systems such as mobile - -// Throw an exception with optional message. -// NOTE: The arguments get streamed into a string via ostringstream::operator<< -// DO NOT use a printf format string, as that will not work as you expect. -#define ORT_THROW(...) \ - do { \ - std::cerr << ::onnxruntime::OnnxRuntimeException(ORT_WHERE_WITH_STACK, \ - ::onnxruntime::MakeString(__VA_ARGS__)) \ - .what() \ - << std::endl; \ - abort(); \ - } while (false) - -// Just in order to mark things as not implemented. Do not use in final code. -#define ORT_NOT_IMPLEMENTED(...) \ - do { \ - std::cerr << ::onnxruntime::NotImplementedException(::onnxruntime::MakeString(__VA_ARGS__)) \ - .what() \ - << std::endl; \ - abort(); \ - } while (false) - -// Check condition. -// NOTE: The arguments get streamed into a string via ostringstream::operator<< -// DO NOT use a printf format string, as that will not work as you expect. -#define ORT_ENFORCE(condition, ...) \ - do { \ - if (!(condition)) { \ - std::cerr << ::onnxruntime::OnnxRuntimeException(ORT_WHERE_WITH_STACK, #condition, \ - ::onnxruntime::MakeString(__VA_ARGS__)) \ - .what() \ - << std::endl; \ - abort(); \ - } \ - } while (false) - -#define ORT_THROW_EX(ex, ...) \ - do { \ - std::cerr << #ex << "(" << ::onnxruntime::MakeString(__VA_ARGS__) << ")" << std::endl; \ - abort(); \ - } while (false) - -#else - -#define ORT_TRY try -#define ORT_CATCH(x) catch (x) -#define ORT_RETHROW throw; - -#define ORT_HANDLE_EXCEPTION(func) func() - -// Throw an exception with optional message. -// NOTE: The arguments get streamed into a string via ostringstream::operator<< -// DO NOT use a printf format string, as that will not work as you expect. -#define ORT_THROW(...) \ - throw ::onnxruntime::OnnxRuntimeException(ORT_WHERE_WITH_STACK, ::onnxruntime::MakeString(__VA_ARGS__)) - -// Just in order to mark things as not implemented. Do not use in final code. -#define ORT_NOT_IMPLEMENTED(...) \ - throw ::onnxruntime::NotImplementedException(::onnxruntime::MakeString(__VA_ARGS__)) - -// Check condition. -// NOTE: The arguments get streamed into a string via ostringstream::operator<< -// DO NOT use a printf format string, as that will not work as you expect. -#define ORT_ENFORCE(condition, ...) \ - if (!(condition)) \ - throw ::onnxruntime::OnnxRuntimeException(ORT_WHERE_WITH_STACK, #condition, \ - ::onnxruntime::MakeString(__VA_ARGS__)) - -#define ORT_THROW_EX(ex, ...) \ - throw ex(__VA_ARGS__) - -#endif - -#define ORT_MAKE_STATUS(category, code, ...) \ - ::onnxruntime::common::Status(::onnxruntime::common::category, \ - ::onnxruntime::common::code, \ - ::onnxruntime::MakeString(__VA_ARGS__)) - -// Check condition. if met, return status. -#define ORT_RETURN_IF(condition, ...) \ - if (condition) { \ - return ::onnxruntime::common::Status(::onnxruntime::common::ONNXRUNTIME, \ - ::onnxruntime::common::FAIL, \ - ::onnxruntime::MakeString(ORT_WHERE.ToString(), " ", __VA_ARGS__)); \ - } - -// Check condition. if not met, return status. -#define ORT_RETURN_IF_NOT(condition, ...) \ - ORT_RETURN_IF(!(condition), __VA_ARGS__) - -// Macros to disable the copy and/or move ctor and assignment methods -// These are usually placed in the private: declarations for a class. - -#define ORT_DISALLOW_COPY(TypeName) TypeName(const TypeName&) = delete - -#define ORT_DISALLOW_ASSIGNMENT(TypeName) TypeName& operator=(const TypeName&) = delete - -#define ORT_DISALLOW_COPY_AND_ASSIGNMENT(TypeName) \ - ORT_DISALLOW_COPY(TypeName); \ - ORT_DISALLOW_ASSIGNMENT(TypeName) - -#define ORT_DISALLOW_MOVE(TypeName) \ - TypeName(TypeName&&) = delete; \ - TypeName& operator=(TypeName&&) = delete - -#define ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(TypeName) \ - ORT_DISALLOW_COPY_AND_ASSIGNMENT(TypeName); \ - ORT_DISALLOW_MOVE(TypeName) - -#define ORT_RETURN_IF_ERROR_SESSIONID(expr, session_id) \ - do { \ - auto _status = (expr); \ - if ((!_status.IsOK())) { \ - ::onnxruntime::LogRuntimeError(session_id, _status, __FILE__, __FUNCTION__, __LINE__); \ - return _status; \ - } \ - } while (0) - -#define ORT_RETURN_IF_ERROR_SESSIONID_(expr) ORT_RETURN_IF_ERROR_SESSIONID(expr, session_id_) -#define ORT_RETURN_IF_ERROR(expr) ORT_RETURN_IF_ERROR_SESSIONID(expr, 0) - -#define ORT_THROW_IF_ERROR(expr) \ - do { \ - auto _status = (expr); \ - if ((!_status.IsOK())) { \ - ::onnxruntime::LogRuntimeError(0, _status, __FILE__, __FUNCTION__, __LINE__); \ - ORT_THROW(_status); \ - } \ - } while (0) - -// use this macro when cannot early return -#define ORT_CHECK_AND_SET_RETVAL(expr) \ - do { \ - if (retval.IsOK()) { \ - retval = (expr); \ - } \ - } while (0) - -// C++ Core Guideline check suppression. -#if defined(_MSC_VER) && !defined(__NVCC__) -#define GSL_SUPPRESS(tag) [[gsl::suppress(tag)]] -#else -#define GSL_SUPPRESS(tag) -#endif - -inline long long TimeDiffMicroSeconds(TimePoint start_time) { - auto end_time = std::chrono::high_resolution_clock::now(); - return std::chrono::duration_cast(end_time - start_time).count(); -} - -inline long long TimeDiffMicroSeconds(TimePoint start_time, TimePoint end_time) { - return std::chrono::duration_cast(end_time - start_time).count(); -} - -struct null_type {}; -inline std::string ToMBString(const std::string& s) { return s; } -#ifdef _WIN32 -/** - * Convert a wide character string into a narrow one, with local ANSI code page(like CP936) - * DO NOT assume the result string is encoded in UTF-8 - */ -std::string ToMBString(const std::wstring& s); - -std::wstring ToWideString(const std::string& s); -inline std::wstring ToWideString(const std::wstring& s) { return s; } -#else -inline std::string ToWideString(const std::string& s) { return s; } -#endif - -} // namespace onnxruntime diff --git a/onnxruntime/core/common/const_pointer_container.h b/onnxruntime/core/common/const_pointer_container.h deleted file mode 100644 index 1d821ba6..00000000 --- a/onnxruntime/core/common/const_pointer_container.h +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -#pragma once - -#include - -namespace onnxruntime { -/** - Container has T* entries. e.g. std::vector, and this class provides const access to those - via iterators and direct access, as the standard behavior only makes the pointer constant, - and not what is pointed too. i.e. you get a const pointer to T not a pointer to const T without this wrapper. - See https://stackoverflow.com/questions/8017036/understanding-const-iterator-with-pointers -*/ -template -class ConstPointerContainer { - public: - using T = typename std::remove_pointer::type; - - class ConstIterator { - public: - using const_iterator = typename Container::const_iterator; - using iterator_category = std::input_iterator_tag; - using value_type = T*; - using difference_type = std::ptrdiff_t; - using pointer = T**; - using reference = T*&; - - /** Construct iterator for container that will return const T* entries.*/ - explicit ConstIterator(const_iterator position) noexcept : current_{position}, item_{nullptr} {} - ConstIterator(const ConstIterator& other) = default; - ConstIterator& operator=(const ConstIterator& other) = default; - - bool operator==(const ConstIterator& other) const noexcept { return current_ == other.current_; } - bool operator!=(const ConstIterator& other) const noexcept { return current_ != other.current_; } - - ConstIterator& operator++() { - ++current_; - return *this; - } - - ConstIterator operator++(int) { - ConstIterator tmp{*this}; - ++(*this); - return tmp; - } - - const T*& operator*() const { - item_ = *current_; - return item_; - } - - const T** operator->() const { return &(operator*()); }; - - private: - const_iterator current_; - mutable const T* item_; - }; - - /** - Construct wrapper class that will provide const access to the pointers in a container of non-const pointers. - @param data Container with non-const pointers. e.g. std::vector - */ - explicit ConstPointerContainer(const Container& data) noexcept : data_(data) {} - - size_t size() const noexcept { return data_.size(); } - bool empty() const noexcept { return data_.empty(); } - - ConstIterator cbegin() const noexcept { return ConstIterator(data_.cbegin()); } - ConstIterator cend() const noexcept { return ConstIterator(data_.cend()); } - - ConstIterator begin() const noexcept { return ConstIterator(data_.cbegin()); } - ConstIterator end() const noexcept { return ConstIterator(data_.cend()); } - - const T* operator[](size_t index) const { return data_[index]; } - - const T* at(size_t index) const { - ORT_ENFORCE(index < data_.size()); - return data_[index]; - } - - private: - const Container& data_; -}; -} // namespace onnxruntime diff --git a/onnxruntime/core/common/denormal.h b/onnxruntime/core/common/denormal.h deleted file mode 100644 index baac1093..00000000 --- a/onnxruntime/core/common/denormal.h +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -#pragma once - -namespace onnxruntime { - -// Set or unset flush-to-zero and denormal=as-zero if SSE3 instructions are supported. -// Return true if SSE3 instruction is supported, otherwise return false. -bool SetDenormalAsZero(bool on); - -#ifdef _OPENMP -// Set flush-to-zero and denormal-as-zero on OpenMP threads when on is true. -void InitializeWithDenormalAsZero(bool on); -#endif - -} // namespace onnxruntime diff --git a/onnxruntime/core/common/eigen_common_wrapper.h b/onnxruntime/core/common/eigen_common_wrapper.h deleted file mode 100644 index fa153997..00000000 --- a/onnxruntime/core/common/eigen_common_wrapper.h +++ /dev/null @@ -1,41 +0,0 @@ -//----------------------------------------------------------------------------- -// -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -// -//----------------------------------------------------------------------------- -#pragma once -#include "onnxruntime_config.h" -// build/external/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h:162:71: -// error: ignoring attributes on template argument "Eigen::PacketType::type {aka __vector(4) float}" [-Werror=ignored-attributes] -#if defined(__GNUC__) -#pragma GCC diagnostic push -#if __GNUC__ >= 6 -#pragma GCC diagnostic ignored "-Wignored-attributes" -#endif -#pragma GCC diagnostic ignored "-Wunused-parameter" -#ifdef HAS_DEPRECATED_COPY -#pragma GCC diagnostic ignored "-Wdeprecated-copy" -#endif -#elif defined(_MSC_VER) -// build\windows\debug\external\eigen3\unsupported\eigen\cxx11\src/Tensor/Tensor.h(76): -// warning C4554: '&': check operator precedence for possible error; use parentheses to clarify precedence - -// unsupported\eigen\cxx11\src\Tensor\TensorUInt128.h(150,0): Warning C4245: 'initializing': conversion from '__int64' -// to 'uint64_t', signed/unsigned mismatch -#pragma warning(push) -#pragma warning(disable : 4554) -#pragma warning(disable : 4245) -#pragma warning(disable : 4127) -#pragma warning(disable : 4805) -#pragma warning(disable : 6313) -#pragma warning(disable : 6294) -#endif - -#include "unsupported/Eigen/CXX11/Tensor" - -#if defined(__GNUC__) -#pragma GCC diagnostic pop -#elif defined(_MSC_VER) -#pragma warning(pop) -#endif diff --git a/onnxruntime/core/common/exceptions.h b/onnxruntime/core/common/exceptions.h deleted file mode 100644 index cbebc88c..00000000 --- a/onnxruntime/core/common/exceptions.h +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -#pragma once - -#include -#include -#include -#include -#include -#include - -#include "core/common/common.h" -#include "core/common/code_location.h" - -namespace onnxruntime { - -class NotImplementedException : public std::logic_error { - public: - explicit NotImplementedException(const char* _Message = "Function not yet implemented") noexcept : std::logic_error(_Message){}; - explicit NotImplementedException(const std::string& _Message = "Function not yet implemented") noexcept : std::logic_error(_Message){}; -}; - -class TypeMismatchException : public std::logic_error { - public: - TypeMismatchException() noexcept : logic_error("Type mismatch"){}; -}; - -class OnnxRuntimeException : public std::exception { - public: - OnnxRuntimeException(const CodeLocation& location, const std::string& msg) noexcept - : OnnxRuntimeException(location, nullptr, msg) { - } - - /** - Create a new exception that captures the location it was thrown from. - @param location Location in the source code the exception is being thrown from - @param failed_condition Optional string containing the condition that failed. - e.g. "tensor.Size() == input.Size()". May be nullptr. - @param msg Message containing additional information about the exception cause. - */ - OnnxRuntimeException(const CodeLocation& location, const char* failed_condition, const std::string& msg) - : location_{location} { - std::ostringstream ss; - - ss << location.ToString(CodeLocation::kFilenameAndPath); // output full path in case just the filename is ambiguous - if (failed_condition != nullptr) { - ss << " " << failed_condition << " was false."; - } - - ss << " " << msg << "\n"; - if (!location.stacktrace.empty()) { - ss << "Stacktrace:\n"; - // skip the first entry in the stacktrace as we have that information from location.ToString() - std::copy(++location.stacktrace.begin(), location.stacktrace.end(), std::ostream_iterator(ss, "\n")); - } - - what_ = ss.str(); - } - - const char* what() const noexcept override { - return what_.c_str(); - } - - private: - const CodeLocation location_; - const std::vector stacktrace_; - std::string what_; -}; - -} // namespace onnxruntime \ No newline at end of file diff --git a/onnxruntime/core/common/logging/capture.h b/onnxruntime/core/common/logging/capture.h deleted file mode 100644 index 4f71bb33..00000000 --- a/onnxruntime/core/common/logging/capture.h +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -#pragma once - -#include -#include -#include "core/common/common.h" -#include "core/common/code_location.h" -#include "core/common/logging/severity.h" - -namespace onnxruntime { -namespace logging { - -class Logger; -enum class DataType; - -/** - Class to capture the details of a log message. -*/ -class Capture { - public: - /** - Initializes a new instance of the Capture class. - @param logger The logger. - @param severity The severity. - @param category The category. - @param dataType Type of the data. - @param location The file location the log message is coming from. - */ - Capture(const Logger& logger, logging::Severity severity, const char* category, - logging::DataType dataType, const CodeLocation& location) - : logger_{&logger}, severity_{severity}, category_{category}, data_type_{dataType}, location_{location} { - } - - /** - The stream that can capture the message via operator<<. - @returns Output stream. - */ - std::ostream& Stream() noexcept { - return stream_; - } - -#ifdef _MSC_VER -// add SAL annotation for printf format string. requires Code Analysis to run to validate usage. -#define msvc_printf_check _Printf_format_string_ -#define __attribute__(x) // Disable for MSVC. Supported by GCC and CLang. -#else -#define msvc_printf_check -#endif - - /** - Captures a printf style log message. - @param name="format">The printf format. - @param name="">Arguments to the printf format if needed. - @remarks - A maximum of 2K of output will be captured currently. - Non-static method, so 'this' is implicit first arg, and we use format(printf(2,3) - */ - void CapturePrintf(msvc_printf_check const char* format, ...) __attribute__((format(printf, 2, 3))); - - /** - Process a printf style log message. - @param format The printf format. - @param ... Arguments to the printf format if needed. - @remarks - A maximum of 2K of output will be captured currently. - Note: As va_list is 'char *', we have to disambiguate this from CapturePrintf - so that something like "One string: %s", "the string" does not consider "the string" - to be the va_list. - */ - void ProcessPrintf(msvc_printf_check const char* format, va_list args); - - logging::Severity Severity() const noexcept { - return severity_; - } - - char SeverityPrefix() const noexcept { - // Carefully setup so severity_ is a valid index - GSL_SUPPRESS(bounds .2) { - return logging::SEVERITY_PREFIX[static_cast(severity_)]; - } - } - - const char* Category() const noexcept { - return category_; - } - - logging::DataType DataType() const noexcept { - return data_type_; - } - - const CodeLocation& Location() const noexcept { - return location_; - } - - std::string Message() const noexcept { - return stream_.str(); - } - - ~Capture(); - - private: - ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(Capture); - - const Logger* logger_; - const logging::Severity severity_; - const char* category_; - const logging::DataType data_type_; - const CodeLocation location_; - - std::ostringstream stream_; -}; -} // namespace logging -} // namespace onnxruntime diff --git a/onnxruntime/core/common/logging/isink.h b/onnxruntime/core/common/logging/isink.h deleted file mode 100644 index a67777d4..00000000 --- a/onnxruntime/core/common/logging/isink.h +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -#pragma once - -#include - -#include "core/common/logging/logging.h" - -namespace onnxruntime { -namespace logging { -class ISink { - public: - ISink() = default; - - /** - Sends the message to the sink. - @param timestamp The timestamp. - @param logger_id The logger identifier. - @param message The captured message. - */ - void Send(const Timestamp& timestamp, const std::string& logger_id, const Capture& message) { - SendImpl(timestamp, logger_id, message); - } - - /** - Sends a Profiling Event Record to the sink. - @param Profiling Event Record - */ - virtual void SendProfileEvent(profiling::EventRecord&) const {}; - - virtual ~ISink() = default; - - private: - // Make Code Analysis happy by disabling all for now. Enable as needed. - ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(ISink); - - virtual void SendImpl(const Timestamp& timestamp, const std::string& logger_id, const Capture& message) = 0; -}; -} // namespace logging -} // namespace onnxruntime diff --git a/onnxruntime/core/common/logging/logging.h b/onnxruntime/core/common/logging/logging.h deleted file mode 100644 index 96c07f09..00000000 --- a/onnxruntime/core/common/logging/logging.h +++ /dev/null @@ -1,347 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -#pragma once - -#include -#include -#include -#include -#include -#include -#include - -#include "core/common/common.h" -#include "core/common/logging/capture.h" -#include "core/common/logging/severity.h" - -#include "core/common/logging/macros.h" - -/* - - Logging overview and expected usage: - - At program startup: - * Create one or more ISink instances. If multiple, combine using composite_sink. - * Create a LoggingManager instance with the sink/s with is_default_instance set to true - * Only one instance should be created in this way, and it should remain valid for - until the program no longer needs to produce log output. - - You can either use the static default Logger which LoggingManager will create when constructed - via LoggingManager::DefaultLogger(), or separate Logger instances each with different log ids - via LoggingManager::CreateLogger. - - The log id is passed to the ISink instance with the sink determining how the log id is used - in the output. - - LoggingManager - * creates the Logger instances used by the application - * provides a static default logger instance - * owns the log sink instance - * applies checks on severity and output of user data - - The log macros create a Capture instance to capture the information to log. - If the severity and/or user filtering settings would prevent logging, no evaluation - of the log arguments will occur, so no performance cost beyond the severity and user - filtering check. - - A sink can do further filter as needed. - -*/ - -namespace onnxruntime { -namespace profiling { - -enum EventCategory { - SESSION_EVENT = 0, - NODE_EVENT, - EVENT_CATEGORY_MAX -}; - -/* -Event descriptions for the above session events. -*/ -static constexpr const char* event_categor_names_[EVENT_CATEGORY_MAX] = { - "Session", - "Node"}; - -/* -Timing record for all events. -*/ -struct EventRecord { - EventRecord(EventCategory category, - int process_id, - int thread_id, - std::string event_name, - long long time_stamp, - long long duration, - std::unordered_map&& event_args) : cat(category), - pid(process_id), - tid(thread_id), - name(std::move(event_name)), - ts(time_stamp), - dur(duration), - args(event_args) {} - EventCategory cat; - int pid; - int tid; - std::string name; - long long ts; - long long dur; - std::unordered_map args; -}; -} // namespace profiling - -namespace logging { - -using Timestamp = std::chrono::time_point; - -#ifndef NDEBUG -ORT_ATTRIBUTE_UNUSED static bool vlog_enabled = true; // Set directly based on your needs. -#else -constexpr bool vlog_enabled = false; // no VLOG output -#endif - -enum class DataType { - SYSTEM = 0, ///< System data. - USER = 1 ///< Contains potentially sensitive user data. -}; - -// Internal log categories. -// Logging interface takes const char* so arbitrary values can also be used. -struct Category { - static const char* onnxruntime; ///< General output - static const char* System; ///< Log output regarding interactions with the host system - // TODO: What other high level categories are meaningful? Model? Optimizer? Execution? -}; - -class ISink; -class Logger; -class Capture; - -/// -/// The logging manager. -/// Owns the log sink and potentially provides a default Logger instance. -/// Provides filtering based on a minimum LogSeverity level, and of messages with DataType::User if enabled. -/// -class LoggingManager final { - public: - enum InstanceType { - Default, ///< Default instance of LoggingManager that should exist for the lifetime of the program - Temporal ///< Temporal instance. CreateLogger(...) should be used, however DefaultLogger() will NOT be provided via this instance. - }; - - /** - Initializes a new instance of the LoggingManager class. - @param sink The sink to write to. Use CompositeSink if you need to write to multiple places. - @param default_min_severity The default minimum severity. Messages with lower severity will be ignored unless - overridden in CreateLogger. - @param default_filter_user_data If set to true ignore messages with DataType::USER unless overridden in CreateLogger. - @param instance_type If InstanceType::Default, this is the default instance of the LoggingManager - and is expected to exist for the lifetime of the program. - It creates and owns the default logger that calls to the static DefaultLogger method return. - @param default_logger_id Logger Id to use for the default logger. nullptr/ignored if instance_type == Temporal. - @param default_max_vlog_level Default maximum level for VLOG messages to be created unless overridden in CreateLogger. - Requires a severity of kVERBOSE for VLOG messages to be logged. - */ - LoggingManager(std::unique_ptr sink, Severity default_min_severity, bool default_filter_user_data, - InstanceType instance_type, - const std::string* default_logger_id = nullptr, - int default_max_vlog_level = -1); - - /** - Creates a new logger instance which will use the provided logger_id and default severity and vlog levels. - @param logger_id The log identifier. - @returns A new Logger instance that the caller owns. - */ - std::unique_ptr CreateLogger(const std::string& logger_id); - - /** - Creates a new logger instance which will use the provided logger_id, severity and vlog levels. - @param logger_id The log identifier. - @param min_severity The minimum severity. Requests to create messages with lower severity will be ignored. - @param filter_user_data If set to true ignore messages with DataType::USER. - @param max_vlog_level Maximum level for VLOG messages to be created. - @returns A new Logger instance that the caller owns. - */ - std::unique_ptr CreateLogger(const std::string& logger_id, - Severity min_severity, bool filter_user_data, int max_vlog_level = -1); - - /** - Gets the default logger instance if set. Throws if no default logger is currently registered. - @remarks - Creating a LoggingManager instance with is_default_instance == true registers a default logger. - Note that the default logger is only valid until the LoggerManager that registered it is destroyed. - @returns The default logger if available. - */ - static const Logger& DefaultLogger(); - - /** - Change the minimum severity level for log messages to be output by the default logger. - @param severity The severity. - */ - static void SetDefaultLoggerSeverity(Severity severity); - - /** - Logs a FATAL level message and creates an exception that can be thrown with error information. - @param category The log category. - @param location The location the log message was generated. - @param format_str The printf format string. - @param ... The printf arguments. - @returns A new Logger instance that the caller owns. - */ - static std::exception LogFatalAndCreateException(const char* category, - const CodeLocation& location, - const char* format_str, ...); - - /** - Logs the message using the provided logger id. - @param logger_id The log identifier. - @param message The log message. - */ - void Log(const std::string& logger_id, const Capture& message) const; - - /** - Sends a Profiling Event Record to the sink. - @param Profiling Event Record - */ - void SendProfileEvent(profiling::EventRecord& eventRecord) const; - ~LoggingManager(); - - private: - ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(LoggingManager); - - Timestamp GetTimestamp() const noexcept; - void CreateDefaultLogger(const std::string& logger_id); - - std::unique_ptr sink_; - const Severity default_min_severity_; - const bool default_filter_user_data_; - const int default_max_vlog_level_; - bool owns_default_logger_; - - static Logger* s_default_logger_; - - struct Epochs { - const std::chrono::time_point high_res; - const std::chrono::time_point system; - const std::chrono::minutes localtime_offset_from_utc; - }; - - static const Epochs& GetEpochs() noexcept; -}; - -/** - Logger provides a per-instance log id. Everything else is passed back up to the LoggingManager -*/ -class Logger { - public: - /** - Initializes a new instance of the Logger class. - @param loggingManager The logging manager. - @param id The identifier for messages coming from this Logger. - @param severity Minimum severity for messages to be created and logged. - @param filter_user_data Should USER data be filtered from output. - @param vlog_level Minimum level for VLOG messages to be created. Note that a severity of kVERBOSE must be provided - for VLOG messages to be logged. - */ - Logger(const LoggingManager& loggingManager, std::string id, - Severity severity, bool filter_user_data, int vlog_level) - : logging_manager_{&loggingManager}, - id_{id}, - min_severity_{severity}, - filter_user_data_{filter_user_data}, - max_vlog_level_{severity > Severity::kVERBOSE ? -1 : vlog_level} { // disable unless logging VLOG messages - } - - /** - Get the minimum severity level for log messages to be output. - @returns The severity. - */ - Severity GetSeverity() const noexcept { return min_severity_; } - - /** - Change the minimum severity level for log messages to be output. - @param severity The severity. - */ - void SetSeverity(Severity severity) noexcept { min_severity_ = severity; } - - /** - Check if output is enabled for the provided LogSeverity and DataType values. - @param severity The severity. - @param data_type Type of the data. - @returns True if a message with these values will be logged. - */ - bool OutputIsEnabled(Severity severity, DataType data_type) const noexcept { - return (severity >= min_severity_ && (data_type != DataType::USER || !filter_user_data_)); - } - - /** - Return the maximum VLOG level allowed. - */ - int VLOGMaxLevel() const noexcept { - return max_vlog_level_; - } - - /** - Logs the captured message. - @param message The log message. - */ - void Log(const Capture& message) const { - logging_manager_->Log(id_, message); - } - - /** - Sends a Profiling Event Record to the sink. - @param Profiling Event Record - */ - void SendProfileEvent(profiling::EventRecord& eventRecord) const { - logging_manager_->SendProfileEvent(eventRecord); - } - - private: - const LoggingManager* logging_manager_; - const std::string id_; - Severity min_severity_; - const bool filter_user_data_; - const int max_vlog_level_; -}; - -inline const Logger& LoggingManager::DefaultLogger() { - if (s_default_logger_ == nullptr) { - // fail early for attempted misuse. don't use logging macros as we have no logger. - ORT_THROW("Attempt to use DefaultLogger but none has been registered."); - } - - return *s_default_logger_; -} - -inline void LoggingManager::SetDefaultLoggerSeverity(Severity severity) { - if (s_default_logger_ == nullptr) { - // fail early for attempted misuse. don't use logging macros as we have no logger. - ORT_THROW("Attempt to use DefaultLogger but none has been registered."); - } - - s_default_logger_->SetSeverity(severity); -} - -inline Timestamp LoggingManager::GetTimestamp() const noexcept { - static const Epochs& epochs = GetEpochs(); - - const auto high_res_now = std::chrono::high_resolution_clock::now(); - return std::chrono::time_point_cast( - epochs.system + (high_res_now - epochs.high_res) + epochs.localtime_offset_from_utc); -} - -/** - Return the current thread id. -*/ -unsigned int GetThreadId(); - -/** - Return the current process id. -*/ -unsigned int GetProcessId(); - -} // namespace logging -} // namespace onnxruntime diff --git a/onnxruntime/core/common/logging/macros.h b/onnxruntime/core/common/logging/macros.h deleted file mode 100644 index 570bc14f..00000000 --- a/onnxruntime/core/common/logging/macros.h +++ /dev/null @@ -1,209 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -#pragma once -// NOTE: Don't include this file directly. Include logging.h - -#define CREATE_MESSAGE(logger, severity, category, datatype) \ - ::onnxruntime::logging::Capture(logger, ::onnxruntime::logging::Severity::k##severity, category, datatype, ORT_WHERE) - -/* - Both printf and stream style logging are supported. - Not that printf currently has a 2K limit to the message size. - - LOGS_* macros are for stream style - LOGF_* macros are for printf style - - The Message class captures the log input, and pushes it through the logger in its destructor. - - Use the *FATAL* macros if you want a Severity::kFatal message to also throw. - - There are a few variants to minimize the length of the macro name required in the calling code. - They are optimized so the shortest names are for the (expected) most common usage. This can be - tweaked if needed. - - Explicit logger vs LoggingManager::DefaulLogger() - Default is for a logger instance to be explicitly passed in. - The logger instance provides an identifier so that log messages from different runs can be separated. - - Variants with DEFAULT in the macro name use the default logger provided by logging manager. This is - static so accessible from any code, provided a LoggingManager instance created with InstanceType::Default - exists somewhere. See logging.h for further explanation of the expected setup. - - DataType - Default uses DataType::SYSTEM. - - Variants with USER in the macro name use DataType::USER. This is data that could be PII, and may need to - be filtered from output. LoggingManager applies this filtering. - - Category - Default category is ::onnxruntime::Logging::Category::onnxruntime. - - If you wish to provide a different category, use variants with CATEGORY in the macro name - -*/ - -// Logging with explicit category - -// iostream style logging. Capture log info in Message, and push to the logger in ~Message. -#define LOGS_CATEGORY(logger, severity, category) \ - if ((logger).OutputIsEnabled(::onnxruntime::logging::Severity::k##severity, ::onnxruntime::logging::DataType::SYSTEM)) \ - CREATE_MESSAGE(logger, severity, category, ::onnxruntime::logging::DataType::SYSTEM).Stream() - -#define LOGS_USER_CATEGORY(logger, severity, category) \ - if ((logger).OutputIsEnabled(::onnxruntime::logging::Severity::k##severity, ::onnxruntime::logging::DataType::USER)) \ - CREATE_MESSAGE(logger, severity, category, ::onnxruntime::logging::DataType::USER).Stream() - - // printf style logging. Capture log info in Message, and push to the logger in ~Message. -#define LOGF_CATEGORY(logger, severity, category, format_str, ...) \ - if ((logger).OutputIsEnabled(::onnxruntime::logging::Severity::k##severity, ::onnxruntime::logging::DataType::SYSTEM)) \ - CREATE_MESSAGE(logger, severity, category, ::onnxruntime::logging::DataType::SYSTEM).CapturePrintf(format_str, ##__VA_ARGS__) - -#define LOGF_USER_CATEGORY(logger, severity, category, format_str, ...) \ - if ((logger).OutputIsEnabled(::onnxruntime::logging::Severity::k##severity, ::onnxruntime::logging::DataType::USER)) \ - CREATE_MESSAGE(logger, severity, category, ::onnxruntime::logging::DataType::USER).CapturePrintf(format_str, ##__VA_ARGS__) - - // Logging with category of "onnxruntime" - -#define LOGS(logger, severity) \ - LOGS_CATEGORY(logger, severity, ::onnxruntime::logging::Category::onnxruntime) - -#define LOGS_USER(logger, severity) \ - LOGS_USER_CATEGORY(logger, severity, ::onnxruntime::logging::Category::onnxruntime) - - // printf style logging. Capture log info in Message, and push to the logger in ~Message. -#define LOGF(logger, severity, format_str, ...) \ - LOGF_CATEGORY(logger, severity, ::onnxruntime::logging::Category::onnxruntime, format_str, ##__VA_ARGS__) - -#define LOGF_USER(logger, severity, format_str, ...) \ - LOGF_USER_CATEGORY(logger, severity, ::onnxruntime::logging::Category::onnxruntime, format_str, ##__VA_ARGS__) - - /* - - Macros that use the default logger. - A LoggingManager instance must be currently valid for the default logger to be available. - - */ - - // Logging with explicit category - -#define LOGS_DEFAULT_CATEGORY(severity, category) \ - LOGS_CATEGORY(::onnxruntime::logging::LoggingManager::DefaultLogger(), severity, category) - -#define LOGS_USER_DEFAULT_CATEGORY(severity, category) \ - LOGS_USER_CATEGORY(::onnxruntime::logging::LoggingManager::DefaultLogger(), severity, category) - -#define LOGF_DEFAULT_CATEGORY(severity, category, format_str, ...) \ - LOGF_CATEGORY(::onnxruntime::logging::LoggingManager::DefaultLogger(), severity, category, format_str, ##__VA_ARGS__) - -#define LOGF_USER_DEFAULT_CATEGORY(severity, category, format_str, ...) \ - LOGF_USER_CATEGORY(::onnxruntime::logging::LoggingManager::DefaultLogger(), severity, category, format_str, ##__VA_ARGS__) - -// Logging with category of "onnxruntime" - -#define LOGS_DEFAULT(severity) \ - LOGS_DEFAULT_CATEGORY(severity, ::onnxruntime::logging::Category::onnxruntime) - -#define LOGS_USER_DEFAULT(severity) \ - LOGS_USER_DEFAULT_CATEGORY(severity, ::onnxruntime::logging::Category::onnxruntime) - -#define LOGF_DEFAULT(severity, format_str, ...) \ - LOGF_DEFAULT_CATEGORY(severity, ::onnxruntime::logging::Category::onnxruntime, format_str, ##__VA_ARGS__) - -#define LOGF_USER_DEFAULT(severity, format_str, ...) \ - LOGF_USER_DEFAULT_CATEGORY(severity, ::onnxruntime::logging::Category::onnxruntime, format_str, ##__VA_ARGS__) - - /* - - Conditional logging - - */ - - // Logging with explicit category - -#define LOGS_CATEGORY_IF(boolean_expression, logger, severity, category) \ - if ((boolean_expression) == true) LOGS_CATEGORY(logger, severity, category) - -#define LOGS_DEFAULT_CATEGORY_IF(boolean_expression, severity, category) \ - if ((boolean_expression) == true) LOGS_DEFAULT_CATEGORY(severity, category) - -#define LOGS_USER_CATEGORY_IF(boolean_expression, logger, severity, category) \ - if ((boolean_expression) == true) LOGS_USER_CATEGORY(logger, severity, category) - -#define LOGS_USER_DEFAULT_CATEGORY_IF(boolean_expression, severity, category) \ - if ((boolean_expression) == true) LOGS_USER_DEFAULT_CATEGORY(severity, category) - -#define LOGF_CATEGORY_IF(boolean_expression, logger, severity, category, format_str, ...) \ - if ((boolean_expression) == true) LOGF_CATEGORY(logger, severity, category, format_str, ##__VA_ARGS__) - -#define LOGF_DEFAULT_CATEGORY_IF(boolean_expression, severity, category, format_str, ...) \ - if ((boolean_expression) == true) LOGF_DEFAULT_CATEGORY(severity, category, format_str, ##__VA_ARGS__) - -#define LOGF_USER_CATEGORY_IF(boolean_expression, logger, severity, category, format_str, ...) \ - if ((boolean_expression) == true) LOGF_USER_CATEGORY(logger, severity, category, format_str, ##__VA_ARGS__) - -#define LOGF_USER_DEFAULT_CATEGORY_IF(boolean_expression, severity, category, format_str, ...) \ - if ((boolean_expression) == true) LOGF_USER_DEFAULT_CATEGORY(severity, category, format_str, ##__VA_ARGS__) - - // Logging with category of "onnxruntime" - -#define LOGS_IF(boolean_expression, logger, severity) \ - LOGS_CATEGORY_IF(boolean_expression, logger, severity, ::onnxruntime::logging::Category::onnxruntime) - -#define LOGS_DEFAULT_IF(boolean_expression, severity) \ - LOGS_DEFAULT_CATEGORY_IF(boolean_expression, severity, ::onnxruntime::logging::Category::onnxruntime) - -#define LOGS_USER_IF(boolean_expression, logger, severity) \ - LOGS_USER_CATEGORY_IF(boolean_expression, logger, severity, ::onnxruntime::logging::Category::onnxruntime) - -#define LOGS_USER_DEFAULT_IF(boolean_expression, severity) \ - LOGS_USER_DEFAULT_CATEGORY_IF(boolean_expression, severity, ::onnxruntime::logging::Category::onnxruntime) - -#define LOGF_IF(boolean_expression, logger, severity, format_str, ...) \ - LOGF_CATEGORY_IF(boolean_expression, logger, severity, ::onnxruntime::logging::Category::onnxruntime, format_str, ##__VA_ARGS__) - -#define LOGF_DEFAULT_IF(boolean_expression, severity, format_str, ...) \ - LOGF_DEFAULT_CATEGORY_IF(boolean_expression, severity, ::onnxruntime::logging::Category::onnxruntime, format_str, ##__VA_ARGS__) - -#define LOGF_USER_IF(boolean_expression, logger, severity, format_str, ...) \ - LOGF_USER_CATEGORY_IF(boolean_expression, logger, severity, ::onnxruntime::logging::Category::onnxruntime, \ - format_str, ##__VA_ARGS__) - -#define LOGF_USER_DEFAULT_IF(boolean_expression, severity, format_str, ...) \ - LOGF_USER_DEFAULT_CATEGORY_IF(boolean_expression, severity, ::onnxruntime::logging::Category::onnxruntime, \ - format_str, ##__VA_ARGS__) - -/* - - Debug verbose logging of caller provided level. - Disabled in Release builds. - Use the _USER variants for VLOG statements involving user data that may need to be filtered. -*/ -#define VLOGS(logger, level) \ - if (::onnxruntime::logging::vlog_enabled && level <= (logger).VLOGMaxLevel()) \ - LOGS_CATEGORY(logger, VERBOSE, "VLOG" #level) - -#define VLOGS_USER(logger, level) \ - if (::onnxruntime::logging::vlog_enabled && level <= (logger).VLOGMaxLevel()) \ - LOGS_USER_CATEGORY(logger, VERBOSE, "VLOG" #level) - -#define VLOGF(logger, level, format_str, ...) \ - if (::onnxruntime::logging::vlog_enabled && level <= (logger).VLOGMaxLevel()) \ - LOGF_CATEGORY(logger, VERBOSE, "VLOG" #level, format_str, ##__VA_ARGS__) - -#define VLOGF_USER(logger, level, format_str, ...) \ - if (::onnxruntime::logging::vlog_enabled && level <= (logger).VLOGMaxLevel()) \ - LOGF_USER_CATEGORY(logger, VERBOSE, "VLOG" #level, format_str, ##__VA_ARGS__) - - // Default logger variants -#define VLOGS_DEFAULT(level) \ - VLOGS(::onnxruntime::logging::LoggingManager::DefaultLogger(), level) - -#define VLOGS_USER_DEFAULT(level) \ - VLOGS_USER(::onnxruntime::logging::LoggingManager::DefaultLogger(), level) - -#define VLOGF_DEFAULT(level, format_str, ...) \ - VLOGF(::onnxruntime::logging::LoggingManager::DefaultLogger(), level, format_str, ##__VA_ARGS__) - -#define VLOGF_USER_DEFAULT(level, format_str, ...) \ - VLOGF_USER(::onnxruntime::logging::LoggingManager::DefaultLogger(), level, format_str, ##__VA_ARGS__) diff --git a/onnxruntime/core/common/logging/severity.h b/onnxruntime/core/common/logging/severity.h deleted file mode 100644 index e43f192e..00000000 --- a/onnxruntime/core/common/logging/severity.h +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -#pragma once - -namespace onnxruntime { -namespace logging { -// mild violation of naming convention. the 'k' lets us use token concatenation in the macro -// ::onnxruntime::Logging::Severity::k##severity. It's not legal to have ::onnxruntime::Logging::Severity::##severity -// the uppercase makes the LOG macro usage look as expected for passing an enum value as it will be LOGS(logger, ERROR) -enum class Severity { - kVERBOSE = 0, - kINFO = 1, - kWARNING = 2, - kERROR = 3, - kFATAL = 4 -}; - -constexpr const char* SEVERITY_PREFIX = "VIWEF"; - -} // namespace logging -} // namespace onnxruntime diff --git a/onnxruntime/core/common/make_string.h b/onnxruntime/core/common/make_string.h deleted file mode 100644 index a6e421a6..00000000 --- a/onnxruntime/core/common/make_string.h +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Copyright (c) 2016-present, Facebook, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -// Portions Copyright (c) Microsoft Corporation - -#pragma once - -#include -#include - -namespace onnxruntime { - -namespace detail { -inline void MakeStringImpl(std::ostringstream& /*ss*/) noexcept { -} - -template -inline void MakeStringImpl(std::ostringstream& ss, const T& t) noexcept { - ss << t; -} - -template -inline void MakeStringImpl(std::ostringstream& ss, const T& t, const Args&... args) noexcept { - MakeStringImpl(ss, t); - MakeStringImpl(ss, args...); -} -} // namespace detail - -/** - * Makes a string by concatenating string representations of the arguments. - * This version uses the current locale. - */ -template -std::string MakeString(const Args&... args) { - std::ostringstream ss; - detail::MakeStringImpl(ss, args...); - return ss.str(); -} - -/** - * Makes a string by concatenating string representations of the arguments. - * This version uses std::locale::classic(). - */ -template -std::string MakeStringWithClassicLocale(const Args&... args) { - std::ostringstream ss; - ss.imbue(std::locale::classic()); - detail::MakeStringImpl(ss, args...); - return ss.str(); -} - -// MakeString versions for already-a-string types. - -inline std::string MakeString(const std::string& str) { - return str; -} - -inline std::string MakeString(const char* cstr) { - return cstr; -} - -inline std::string MakeStringWithClassicLocale(const std::string& str) { - return str; -} - -inline std::string MakeStringWithClassicLocale(const char* cstr) { - return cstr; -} - -} // namespace onnxruntime diff --git a/onnxruntime/core/common/make_unique.h b/onnxruntime/core/common/make_unique.h deleted file mode 100644 index b401f0d4..00000000 --- a/onnxruntime/core/common/make_unique.h +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2017 The Abseil Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ----------------------------------------------------------------------------- -// File: make_unique.h -// ----------------------------------------------------------------------------- -// -// This header file contains utility functions for managing the creation and -// conversion of smart pointers. This file is an extension to the C++ -// standard library header file. -/* Modifications Copyright (c) Microsoft. */ - -#pragma once - -#include -#include -#include -#include -#include -#include - -namespace onnxruntime { - -template -using remove_extent_t = typename std::remove_extent::type; - -namespace memory_internal { - -// Traits to select proper overload and return type for `absl::make_unique<>`. -template -struct MakeUniqueResult { - using scalar = std::unique_ptr; -}; -template -struct MakeUniqueResult { - using array = std::unique_ptr; -}; -template -struct MakeUniqueResult { - using invalid = void; -}; - -} // namespace memory_internal - -// gcc 4.8 has __cplusplus at 201301 but doesn't define make_unique. Other -// supported compilers either just define __cplusplus as 201103 but have -// make_unique (msvc), or have make_unique whenever __cplusplus > 201103 (clang) -#if (__cplusplus > 201103L || defined(_MSC_VER)) && \ - !(defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 8) -using std::make_unique; -#else -// ----------------------------------------------------------------------------- -// Function Template: make_unique() -// ----------------------------------------------------------------------------- -// -// Creates a `std::unique_ptr<>`, while avoiding issues creating temporaries -// during the construction process. `absl::make_unique<>` also avoids redundant -// type declarations, by avoiding the need to explicitly use the `new` operator. -// -// This implementation of `absl::make_unique<>` is designed for C++11 code and -// will be replaced in C++14 by the equivalent `std::make_unique<>` abstraction. -// `absl::make_unique<>` is designed to be 100% compatible with -// `std::make_unique<>` so that the eventual migration will involve a simple -// rename operation. -// -// For more background on why `std::unique_ptr(new T(a,b))` is problematic, -// see Herb Sutter's explanation on -// (Exception-Safe Function Calls)[https://herbsutter.com/gotw/_102/]. -// (In general, reviewers should treat `new T(a,b)` with scrutiny.) -// -// Example usage: -// -// auto p = make_unique(args...); // 'p' is a std::unique_ptr -// auto pa = make_unique(5); // 'pa' is a std::unique_ptr -// -// Three overloads of `absl::make_unique` are required: -// -// - For non-array T: -// -// Allocates a T with `new T(std::forward args...)`, -// forwarding all `args` to T's constructor. -// Returns a `std::unique_ptr` owning that object. -// -// - For an array of unknown bounds T[]: -// -// `absl::make_unique<>` will allocate an array T of type U[] with -// `new U[n]()` and return a `std::unique_ptr` owning that array. -// -// Note that 'U[n]()' is different from 'U[n]', and elements will be -// value-initialized. Note as well that `std::unique_ptr` will perform its -// own destruction of the array elements upon leaving scope, even though -// the array [] does not have a default destructor. -// -// NOTE: an array of unknown bounds T[] may still be (and often will be) -// initialized to have a size, and will still use this overload. E.g: -// -// auto my_array = absl::make_unique(10); -// -// - For an array of known bounds T[N]: -// -// `absl::make_unique<>` is deleted (like with `std::make_unique<>`) as -// this overload is not useful. -// -// NOTE: an array of known bounds T[N] is not considered a useful -// construction, and may cause undefined behavior in templates. E.g: -// -// auto my_array = absl::make_unique(); -// -// In those cases, of course, you can still use the overload above and -// simply initialize it to its desired size: -// -// auto my_array = absl::make_unique(10); - -// `absl::make_unique` overload for non-array types. -template -typename memory_internal::MakeUniqueResult::scalar make_unique( - Args&&... args) { - return std::unique_ptr(new T(std::forward(args)...)); -} - -// `absl::make_unique` overload for an array T[] of unknown bounds. -// The array allocation needs to use the `new T[size]` form and cannot take -// element constructor arguments. The `std::unique_ptr` will manage destructing -// these array elements. -template -typename memory_internal::MakeUniqueResult::array make_unique(size_t n) { - return std::unique_ptr(new typename onnxruntime::remove_extent_t[n]()); -} - -// `absl::make_unique` overload for an array T[N] of known bounds. -// This construction will be rejected. -template -typename memory_internal::MakeUniqueResult::invalid make_unique( - Args&&... /* args */) = delete; -#endif - -} \ No newline at end of file diff --git a/onnxruntime/core/common/optional.h b/onnxruntime/core/common/optional.h deleted file mode 100644 index ab32cf6b..00000000 --- a/onnxruntime/core/common/optional.h +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -#pragma once - -#include - -namespace onnxruntime { - -using nonstd::optional; - -#ifndef ORT_NO_EXCEPTIONS -using nonstd::bad_optional_access; -#endif - -using nonstd::nullopt; -using nonstd::nullopt_t; - -using nonstd::in_place; -using nonstd::in_place_t; - -using nonstd::make_optional; - -} // namespace onnxruntime diff --git a/onnxruntime/core/common/parse_string.h b/onnxruntime/core/common/parse_string.h deleted file mode 100644 index 1429859d..00000000 --- a/onnxruntime/core/common/parse_string.h +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -#pragma once - -#include -#include -#include - -#include "core/common/common.h" - -namespace onnxruntime { - -/** - * Tries to parse a value from an entire string. - */ -template -bool TryParseStringWithClassicLocale(const std::string& str, T& value) { - if (std::is_integral::value && std::is_unsigned::value) { - // if T is unsigned integral type, reject negative values which will wrap - if (!str.empty() && str[0] == '-') { - return false; - } - } - - // don't allow leading whitespace - if (!str.empty() && std::isspace(str[0], std::locale::classic())) { - return false; - } - - std::istringstream is{str}; - is.imbue(std::locale::classic()); - T parsed_value{}; - - const bool parse_successful = - is >> parsed_value && - is.get() == std::istringstream::traits_type::eof(); // don't allow trailing characters - if (!parse_successful) { - return false; - } - - value = std::move(parsed_value); - return true; -} - -inline bool TryParseStringWithClassicLocale(const std::string& str, std::string& value) { - value = str; - return true; -} - -inline bool TryParseStringWithClassicLocale(const std::string& str, bool& value) { - if (str == "0" || str == "False" || str == "false") { - value = false; - return true; - } - - if (str == "1" || str == "True" || str == "true") { - value = true; - return true; - } - - return false; -} - -/** - * Parses a value from an entire string. - */ -template -Status ParseStringWithClassicLocale(const std::string& s, T& value) { - ORT_RETURN_IF_NOT(TryParseStringWithClassicLocale(s, value), "Failed to parse value: \"", value, "\""); - return Status::OK(); -} - -/** - * Parses a value from an entire string. - */ -template -T ParseStringWithClassicLocale(const std::string& s) { - T value{}; - ORT_THROW_IF_ERROR(ParseStringWithClassicLocale(s, value)); - return value; -} - -} // namespace onnxruntime diff --git a/onnxruntime/core/common/spin_pause.h b/onnxruntime/core/common/spin_pause.h deleted file mode 100644 index 49b71e55..00000000 --- a/onnxruntime/core/common/spin_pause.h +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -#pragma once - -#if defined(_M_AMD64) -#include -#endif - -#if defined(__x86_64__) -#include -#endif - -namespace onnxruntime { - -namespace concurrency { - -// Intrinsic to use in spin-loops - -inline void SpinPause() { -#if defined(_M_AMD64) || defined(__x86_64__) - _mm_pause(); -#endif -} - -} // namespace concurrency - -} // namespace onnxruntime diff --git a/onnxruntime/core/common/status.h b/onnxruntime/core/common/status.h deleted file mode 100644 index c1076558..00000000 --- a/onnxruntime/core/common/status.h +++ /dev/null @@ -1,191 +0,0 @@ -/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -// Modifications Copyright (c) Microsoft. - -#pragma once - -#include -#include -#include -#ifdef _WIN32 -#include -#endif - -namespace onnxruntime { -namespace common { - -enum StatusCategory { - NONE = 0, - SYSTEM = 1, - ONNXRUNTIME = 2, -}; - -/** - Error code for ONNXRuntime. -*/ -enum StatusCode { - OK = 0, - FAIL = 1, - INVALID_ARGUMENT = 2, - NO_SUCHFILE = 3, - NO_MODEL = 4, - ENGINE_ERROR = 5, - RUNTIME_EXCEPTION = 6, - INVALID_PROTOBUF = 7, - MODEL_LOADED = 8, - NOT_IMPLEMENTED = 9, - INVALID_GRAPH = 10, - EP_FAIL = 11 -}; - -inline const char* StatusCodeToString(StatusCode status) noexcept { - switch (status) { - case StatusCode::OK: - return "SUCCESS"; - case StatusCode::FAIL: - return "FAIL"; - case StatusCode::INVALID_ARGUMENT: - return "INVALID_ARGUMENT"; - case StatusCode::NO_SUCHFILE: - return "NO_SUCHFILE"; - case StatusCode::NO_MODEL: - return "NO_MODEL"; - case StatusCode::ENGINE_ERROR: - return "ENGINE_ERROR"; - case StatusCode::RUNTIME_EXCEPTION: - return "RUNTIME_EXCEPTION"; - case StatusCode::INVALID_PROTOBUF: - return "INVALID_PROTOBUF"; - case StatusCode::MODEL_LOADED: - return "MODEL_LOADED"; - case StatusCode::NOT_IMPLEMENTED: - return "NOT_IMPLEMENTED"; - case StatusCode::INVALID_GRAPH: - return "INVALID_GRAPH"; - case StatusCode::EP_FAIL: - return "EP_FAIL"; - default: - return "GENERAL ERROR"; - } -} - -#ifdef _WIN32 -inline HRESULT StatusCodeToHRESULT(StatusCode status) noexcept { - switch (status) - { - case StatusCode::OK: - return S_OK; - case StatusCode::FAIL: - return E_FAIL; - case StatusCode::INVALID_ARGUMENT: - return E_INVALIDARG; - case StatusCode::NO_SUCHFILE: - return __HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND); - case StatusCode::NO_MODEL: - return __HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND); - case StatusCode::ENGINE_ERROR: - return E_FAIL; - case StatusCode::RUNTIME_EXCEPTION: - return E_FAIL; - case StatusCode::INVALID_PROTOBUF: - return __HRESULT_FROM_WIN32(ERROR_FILE_CORRUPT); - case StatusCode::MODEL_LOADED: - return __HRESULT_FROM_WIN32(ERROR_INTERNAL_ERROR); - case StatusCode::NOT_IMPLEMENTED: - return E_NOTIMPL; - case StatusCode::INVALID_GRAPH: - return __HRESULT_FROM_WIN32(ERROR_FILE_CORRUPT); - case StatusCode::EP_FAIL: - return __HRESULT_FROM_WIN32(ERROR_INTERNAL_ERROR); - default: - return E_FAIL; - } -} -#endif - -class Status { - public: - Status() noexcept = default; - - Status(StatusCategory category, int code, const std::string& msg); - - Status(StatusCategory category, int code, const char* msg); - - Status(StatusCategory category, int code); - - Status(const Status& other) - : state_((other.state_ == nullptr) ? nullptr : new State(*other.state_)) {} - - Status& operator=(const Status& other) { - if (state_ != other.state_) { - if (other.state_ == nullptr) { - state_.reset(); - } else { - state_.reset(new State(*other.state_)); - } - } - return *this; - } - - Status(Status&&) = default; - Status& operator=(Status&&) = default; - ~Status() = default; - - bool IsOK() const { - return (state_ == nullptr); - } - - int Code() const noexcept; - - StatusCategory Category() const noexcept; - - const std::string& ErrorMessage() const noexcept; - - std::string ToString() const; - - bool operator==(const Status& other) const { - return (this->state_ == other.state_) || (ToString() == other.ToString()); - } - - bool operator!=(const Status& other) const { - return !(*this == other); - } - - static Status OK() { - return Status(); - } - - private: - static const std::string& EmptyString() noexcept; - - struct State { - State(StatusCategory cat0, int code0, const std::string& msg0) - : category(cat0), code(code0), msg(msg0) {} - - State(StatusCategory cat0, int code0, const char* msg0) - : category(cat0), code(code0), msg(msg0) {} - - const StatusCategory category; - const int code; - const std::string msg; - }; - - // As long as Code() is OK, state_ == nullptr. - std::unique_ptr state_; -}; - -inline std::ostream& operator<<(std::ostream& out, const Status& status) { - return out << status.ToString(); -} - -} // namespace common -} // namespace onnxruntime diff --git a/onnxruntime/core/framework/alloc_kind.h b/onnxruntime/core/framework/alloc_kind.h deleted file mode 100644 index 4534d084..00000000 --- a/onnxruntime/core/framework/alloc_kind.h +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -#pragma once -#include - -namespace onnxruntime { -// The ml-Values fall into the following categories with respect to their -// memory management: -// - inference inputs: owned (allocated and freed) by caller, and is by -// default read-only by the runtime. -// - inference outputs: allocated by runtime, ownership transferred to -// caller. TODO: Make sure this semantics is clear in InferenceSession API. -// - weights (constant tensors): can be allocated once (statically), and -// reused by all inference calls within an InferenceSession. -// - tensor values: The lifetimes of these tensor-values are statically -// determined, which is used for memory reuse/sharing optimizations. The -// runtime allocates/frees these values at the right time (as determined -// by the static allocation plan). Note that this is simplified since we -// do not try to optimize for "slice" like ops, where we may be able to -// conditionally reuse memory/data in some cases but not others. -// Generalizing this is future work. - -enum class AllocKind { - kNotSet = -1, - kAllocate = 0, - kReuse = 1, - kPreExisting = 2, - kAllocateStatically = 3, - kAllocateOutput = 4, - kShare = 5 -}; - -std::ostream& operator<<(std::ostream& out, AllocKind alloc_kind); -} // namespace onnxruntime diff --git a/onnxruntime/core/framework/allocator.h b/onnxruntime/core/framework/allocator.h deleted file mode 100644 index b66548fe..00000000 --- a/onnxruntime/core/framework/allocator.h +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -#pragma once - -#include "core/common/common.h" -#include "core/framework/fence.h" -#include "core/session/onnxruntime_c_api.h" -#include "ortdevice.h" -#include "ortmemoryinfo.h" - -// This configures the arena based allocator used by ORT -// See docs/C_API.md for details on what these mean and how to choose these values -struct OrtArenaCfg { - size_t max_mem; // use 0 to allow ORT to choose the default - int arena_extend_strategy; // use -1 to allow ORT to choose the default, 0 = kNextPowerOfTwo, 1 = kSameAsRequested - int initial_chunk_size_bytes; // use -1 to allow ORT to choose the default - int max_dead_bytes_per_chunk; // use -1 to allow ORT to choose the default -}; - -namespace onnxruntime { -constexpr const char* CPU = "Cpu"; -constexpr const char* CUDA = "Cuda"; -constexpr const char* CUDA_PINNED = "CudaPinned"; -constexpr const char* MIGRAPHX = "MIGraphX"; -constexpr const char* MIGRAPHX_PINNED = "MIGraphXPinned"; - -constexpr size_t kAllocAlignment = 256; - -// forward declaration -class SessionState; - -template -using IAllocatorUniquePtr = std::unique_ptr>; - -class IAllocator { - public: - IAllocator(const OrtMemoryInfo& info) : memory_info_(info) {} - virtual ~IAllocator() = default; - /** - @remarks Use SafeInt when calculating the size of memory to allocate using Alloc. - */ - virtual void* Alloc(size_t size) = 0; - virtual void Free(void* p) = 0; - const OrtMemoryInfo& Info() const { return memory_info_; }; - - /** - optional CreateFence interface, as provider like DML has its own fence - */ - virtual FencePtr CreateFence(const SessionState* /*unused*/) { return nullptr; } - - static bool CalcMemSizeForArray(size_t nmemb, size_t size, size_t* out) noexcept { - return CalcMemSizeForArrayWithAlignment(nmemb, size, 0, out); - } - - /** - * Calculate the memory size for an array. The size is bounds checked using SafeInt. - * \tparam alignment must be power of 2 - * \param nmemb Number of members or elements in the array - * \param size Size of each element - * \param out Total size required after any alignment is applied - * \return true, successful. false, overflow - */ - static bool CalcMemSizeForArrayWithAlignment(size_t nmemb, size_t size, size_t alignment, size_t* out) noexcept ORT_MUST_USE_RESULT; - - /** - * https://cwe.mitre.org/data/definitions/190.html - * \param alignment must be power of 2 - * \param nmemb Number of members or elements in the array - * \param size Size of each element - * \param out Total size required after any alignment is applied - * \return true, successful. false, overflow - * \remarks This was the original API and was implemented in the header. Replaced with the above version - * implemented in the .cc file so that the SafeInt dependency is internal. - */ - template - static bool CalcMemSizeForArrayWithAlignment(size_t nmemb, size_t size, size_t* out) noexcept ORT_MUST_USE_RESULT; - - /** - * allocate memory for an array which has nmemb items of data, each size bytes long - */ - void* AllocArray(size_t nmemb, size_t size) { - size_t len; - if (!CalcMemSizeForArray(nmemb, size, &len)) - return nullptr; - return Alloc(len); - } - - /** - * allocate memory for an array which has nmemb items of data, each size bytes long - */ - template - void* AllocArrayWithAlignment(size_t nmemb, size_t size) { - size_t len; - if (!CalcMemSizeForArrayWithAlignment(nmemb, size, alignment, &len)) - return nullptr; - return Alloc(len); - } - - /** - Create a std::unique_ptr that is allocated and freed by the provided IAllocator. - @param allocator The allocator. - @param count_or_bytes The exact bytes to allocate if T is void, otherwise the number of elements to allocate. - @returns std::unique_ptr with allocated memory and deleter. - */ - template - static IAllocatorUniquePtr MakeUniquePtr(std::shared_ptr allocator, size_t count_or_bytes) { - if (allocator == nullptr) return nullptr; - // for now limit to fundamental types. we could support others, but to do so either we or the caller - // needs to call the dtor for the objects, for buffers allocated on device we don't have destructor - //static_assert(std::is_fundamental::value, "Fundamental type required as no destructors are called."); - - size_t alloc_size = count_or_bytes; - - // if T is not void, 'count_or_bytes' == number of items so allow for that - if (!std::is_void::value) { - // sizeof(void) isn't valid, but the compiler isn't smart enough to ignore that this line isn't - // reachable if T is void. use std::conditional to 'use' void* in the sizeof call - if (!CalcMemSizeForArray(count_or_bytes, - sizeof(typename std::conditional::value, void*, T>::type), - &alloc_size)) return nullptr; - } - - return IAllocatorUniquePtr{ - static_cast(allocator->Alloc(alloc_size)), // allocate - [=](T* ptr) { // capture 'allocator' by value so it's always valid - allocator->Free(ptr); - }}; - } - - private: - OrtMemoryInfo memory_info_; -}; - -template -bool IAllocator::CalcMemSizeForArrayWithAlignment(size_t nmemb, size_t size, size_t* out) noexcept { - return CalcMemSizeForArrayWithAlignment(nmemb, size, alignment, out); -} - -class CPUAllocator : public IAllocator { - public: - explicit CPUAllocator(const OrtMemoryInfo& memory_info) : IAllocator(memory_info) {} - - CPUAllocator() : IAllocator(OrtMemoryInfo(CPU, OrtAllocatorType::OrtDeviceAllocator)) {} - - void* Alloc(size_t size) override; - void Free(void* p) override; -}; - -#if defined(USE_MIMALLOC_ARENA_ALLOCATOR) -class MiMallocAllocator : public IAllocator { - public: - explicit MiMallocAllocator(const OrtMemoryInfo& memory_info) : IAllocator(memory_info) {} - MiMallocAllocator() : IAllocator(OrtMemoryInfo(CPU, OrtAllocatorType::OrtDeviceAllocator)) {} - - void* Alloc(size_t size) override; - void Free(void* p) override; -}; - -#endif - -#if defined(USE_MIMALLOC_ARENA_ALLOCATOR) -using TAllocator = MiMallocAllocator; -#else -using TAllocator = CPUAllocator; -#endif - -using AllocatorPtr = std::shared_ptr; - -} // namespace onnxruntime diff --git a/onnxruntime/core/framework/customregistry.h b/onnxruntime/core/framework/customregistry.h deleted file mode 100644 index 52f6169e..00000000 --- a/onnxruntime/core/framework/customregistry.h +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -#pragma once - -#include "core/common/status.h" -#include "core/common/logging/logging.h" -#include "core/framework/op_kernel.h" -#include "core/framework/kernel_def_builder.h" -#include "core/framework/kernel_registry.h" - -#if !defined(ORT_MINIMAL_BUILD) -#include "core/graph/schema_registry.h" -#endif - -namespace onnxruntime { - -/** - Represents a registry that contains both custom kernels and custom schemas. -*/ -class CustomRegistry final { - public: - CustomRegistry() - : kernel_registry_(std::make_shared()) -#if !defined(ORT_MINIMAL_BUILD) - , - opschema_registry_(std::make_shared()) -#endif - { - } - - /** - * Register a kernel definition together with kernel factory method to this session. - * If any conflict happened between registered kernel def and built-in kernel def, - * registered kernel will have higher priority. - * Call this before invoking Initialize(). - * @return OK if success. - */ - common::Status RegisterCustomKernel(KernelDefBuilder& kernel_def_builder, const KernelCreateFn& kernel_creator); - - common::Status RegisterCustomKernel(KernelCreateInfo&); - - const std::shared_ptr& GetKernelRegistry(); - -#if !defined(ORT_MINIMAL_BUILD) - common::Status RegisterOpSet(std::vector& schemas, const std::string& domain, - int baseline_opset_version, int opset_version); - - const std::shared_ptr& GetOpschemaRegistry(); -#endif - - private: - ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(CustomRegistry); - std::shared_ptr kernel_registry_; -#if !defined(ORT_MINIMAL_BUILD) - std::shared_ptr opschema_registry_; -#endif -}; - -} // namespace onnxruntime diff --git a/onnxruntime/core/framework/data_types.h b/onnxruntime/core/framework/data_types.h deleted file mode 100644 index c71376d3..00000000 --- a/onnxruntime/core/framework/data_types.h +++ /dev/null @@ -1,1000 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -#pragma once - -#include -#include -#include -#include -#include -#include - -#include "core/common/common.h" -#include "core/common/exceptions.h" -#include "core/framework/endian.h" -#include "core/graph/onnx_protobuf.h" - -struct OrtValue; - -namespace ONNX_NAMESPACE { -class TypeProto; -} // namespace ONNX_NAMESPACE - -namespace onnxruntime { -/// Predefined registered types - -#if !defined(DISABLE_ML_OPS) -//maps (only used by ML ops) -using MapStringToString = std::map; -using MapStringToInt64 = std::map; -using MapStringToFloat = std::map; -using MapStringToDouble = std::map; -using MapInt64ToString = std::map; -using MapInt64ToInt64 = std::map; -using MapInt64ToFloat = std::map; -using MapInt64ToDouble = std::map; -#endif - -//vectors/sequences -#if !defined(DISABLE_ML_OPS) -using VectorMapStringToFloat = std::vector; -using VectorMapInt64ToFloat = std::vector; -#endif -using VectorString = std::vector; -using VectorInt64 = std::vector; - -class DataTypeImpl; -class TensorTypeBase; -class SparseTensorTypeBase; -class SequenceTensorTypeBase; -class NonTensorTypeBase; -class PrimitiveDataTypeBase; - -// MLFloat16 -struct MLFloat16 { - uint16_t val; - - MLFloat16() : val(0) {} - explicit MLFloat16(uint16_t x) : val(x) {} - explicit MLFloat16(float f); - - float ToFloat() const; - - operator float() const { - return ToFloat(); - } -}; - -inline bool operator==(const MLFloat16& left, const MLFloat16& right) { - return left.val == right.val; -} - -inline bool operator!=(const MLFloat16& left, const MLFloat16& right) { - return left.val != right.val; -} - -inline bool operator<(const MLFloat16& left, const MLFloat16& right) { - return left.val < right.val; -} - -//BFloat16 -struct BFloat16 { - uint16_t val{0}; - explicit BFloat16() = default; - explicit BFloat16(uint16_t v) : val(v) {} - explicit BFloat16(float v) { - if (endian::native == endian::little) { - std::memcpy(&val, reinterpret_cast(&v) + sizeof(uint16_t), sizeof(uint16_t)); - } else { - std::memcpy(&val, &v, sizeof(uint16_t)); - } - } - - float ToFloat() const { - float result; - char* const first = reinterpret_cast(&result); - char* const second = first + sizeof(uint16_t); - if (endian::native == endian::little) { - std::memset(first, 0, sizeof(uint16_t)); - std::memcpy(second, &val, sizeof(uint16_t)); - } else { - std::memcpy(first, &val, sizeof(uint16_t)); - std::memset(second, 0, sizeof(uint16_t)); - } - return result; - } - - operator float() const { - return ToFloat(); - } -}; - -inline void BFloat16ToFloat(const BFloat16* blf, float* flt, size_t size) { - auto src = blf; - auto d = flt; - for (; size != 0; ++src, ++d, --size) { - *d = src->ToFloat(); - } -} - -inline void FloatToBFloat16(const float* flt, BFloat16* blf, size_t size) { - auto src = flt; - auto d = blf; - for (; size != 0; ++src, ++d, --size) { - new (d) BFloat16(*src); - } -} - -inline bool operator==(const BFloat16& left, const BFloat16& right) { - return left.val == right.val; -} - -inline bool operator!=(const BFloat16& left, const BFloat16& right) { - return left.val != right.val; -} - -inline bool operator<(const BFloat16& left, const BFloat16& right) { - return left.val < right.val; -} - -// DataTypeImpl pointer as unique DataTypeImpl identifier. -using MLDataType = const DataTypeImpl*; -// be used with class MLValue -using DeleteFunc = void (*)(void*); -using CreateFunc = void* (*)(); - -/** - * \brief Base class for MLDataType - * - */ -class DataTypeImpl { - public: - virtual ~DataTypeImpl() = default; - - /** - * \brief this API will be used to check type compatibility at runtime - * - * \param type_proto a TypeProto instance that is constructed for a specific type - * will be checked against a TypeProto instance contained within a corresponding - * MLDataType instance. - */ - virtual bool IsCompatible(const ONNX_NAMESPACE::TypeProto& type_proto) const = 0; - - virtual size_t Size() const = 0; - - virtual DeleteFunc GetDeleteFunc() const = 0; - - /** - * \brief Retrieves an instance of TypeProto for - * a given MLDataType - * \returns optional TypeProto. Only ONNX types - has type proto, non-ONNX types will return nullptr. - */ - virtual const ONNX_NAMESPACE::TypeProto* GetTypeProto() const = 0; - - virtual bool IsTensorType() const { - return false; - } - - virtual bool IsTensorSequenceType() const { - return false; - } - - virtual bool IsSparseTensorType() const { - return false; - } - - // Returns this if this is of tensor-type and null otherwise - virtual const TensorTypeBase* AsTensorType() const { - return nullptr; - } - - virtual const SequenceTensorTypeBase* AsSequenceTensorBase() const { - return nullptr; - } - - // Returns this if this is of sparse-tensor-type and null otherwise - virtual const SparseTensorTypeBase* AsSparseTensorType() const { - return nullptr; - } - - virtual const NonTensorTypeBase* AsNonTensorTypeBase() const { - return nullptr; - } - - // Returns this if this is one of the primitive data types (specialization of PrimitiveDataTypeBase) - // and null otherwise - virtual const PrimitiveDataTypeBase* AsPrimitiveDataType() const { - return nullptr; - } - - // Return the type meta that we are using in the runtime. - template - static MLDataType GetType(); - - // Return the types for a concrete tensor type, like Tensor_Float - template - static MLDataType GetTensorType(); - - template - static MLDataType GetSequenceTensorType(); - - // Return the MLDataType for a concrete sparse tensor type. - template - static MLDataType GetSparseTensorType(); - - /** - * Convert an ONNX TypeProto to onnxruntime DataTypeImpl. - * However, this conversion is lossy. Don't try to use 'this->GetTypeProto()' converting it back. - * Even though GetTypeProto() will not have the original information, it will still have enough to correctly - * map to MLDataType. - * \param proto - */ - static MLDataType TypeFromProto(const ONNX_NAMESPACE::TypeProto& proto); - - static const TensorTypeBase* TensorTypeFromONNXEnum(int type); - static const SparseTensorTypeBase* SparseTensorTypeFromONNXEnum(int type); - static const NonTensorTypeBase* SequenceTensorTypeFromONNXEnum(int type); - - static const char* ToString(MLDataType type); - static std::vector ToString(const std::vector& types); - // Registers ONNX_NAMESPACE::DataType (internalized string) with - // MLDataType. DataType is produced by internalizing an instance of - // TypeProto contained within MLDataType - static void RegisterDataType(MLDataType); - static MLDataType GetDataType(const std::string&); - - static const std::vector& AllTensorTypes(); - static const std::vector& AllFixedSizeTensorTypes(); - static const std::vector& AllSequenceTensorTypes(); - static const std::vector& AllFixedSizeSequenceTensorTypes(); - static const std::vector& AllNumericTensorTypes(); - static const std::vector& AllIEEEFloatTensorTypes(); - static const std::vector& AllFixedSizeTensorExceptHalfTypes(); - static const std::vector& AllIEEEFloatTensorExceptHalfTypes(); - static const std::vector& AllTensorAndSequenceTensorTypes(); - static const std::vector& AllFixedSizeTensorAndSequenceTensorTypes(); -}; - -std::ostream& operator<<(std::ostream& out, MLDataType data_type); - -/* - * Type registration helpers - */ -namespace data_types_internal { -/// TensorType helpers -/// - -template -constexpr ONNX_NAMESPACE::TensorProto_DataType ToTensorDataType(); - -template <> -constexpr ONNX_NAMESPACE::TensorProto_DataType ToTensorDataType() { - return ONNX_NAMESPACE::TensorProto_DataType_FLOAT; -} -template <> -constexpr ONNX_NAMESPACE::TensorProto_DataType ToTensorDataType() { - return ONNX_NAMESPACE::TensorProto_DataType_UINT8; -}; -template <> -constexpr ONNX_NAMESPACE::TensorProto_DataType ToTensorDataType() { - return ONNX_NAMESPACE::TensorProto_DataType_INT8; -}; -template <> -constexpr ONNX_NAMESPACE::TensorProto_DataType ToTensorDataType() { - return ONNX_NAMESPACE::TensorProto_DataType_UINT16; -}; -template <> -constexpr ONNX_NAMESPACE::TensorProto_DataType ToTensorDataType() { - return ONNX_NAMESPACE::TensorProto_DataType_INT16; -}; -template <> -constexpr ONNX_NAMESPACE::TensorProto_DataType ToTensorDataType() { - return ONNX_NAMESPACE::TensorProto_DataType_INT32; -}; -template <> -constexpr ONNX_NAMESPACE::TensorProto_DataType ToTensorDataType() { - return ONNX_NAMESPACE::TensorProto_DataType_INT64; -}; -template <> -constexpr ONNX_NAMESPACE::TensorProto_DataType ToTensorDataType() { - return ONNX_NAMESPACE::TensorProto_DataType_STRING; -}; -template <> -constexpr ONNX_NAMESPACE::TensorProto_DataType ToTensorDataType() { - return ONNX_NAMESPACE::TensorProto_DataType_BOOL; -}; -template <> -constexpr ONNX_NAMESPACE::TensorProto_DataType ToTensorDataType() { - return ONNX_NAMESPACE::TensorProto_DataType_FLOAT16; -}; -template <> -constexpr ONNX_NAMESPACE::TensorProto_DataType ToTensorDataType() { - return ONNX_NAMESPACE::TensorProto_DataType_DOUBLE; -}; -template <> -constexpr ONNX_NAMESPACE::TensorProto_DataType ToTensorDataType() { - return ONNX_NAMESPACE::TensorProto_DataType_UINT32; -}; -template <> -constexpr ONNX_NAMESPACE::TensorProto_DataType ToTensorDataType() { - return ONNX_NAMESPACE::TensorProto_DataType_UINT64; -}; -template <> -constexpr ONNX_NAMESPACE::TensorProto_DataType ToTensorDataType() { - return ONNX_NAMESPACE::TensorProto_DataType_BFLOAT16; -} - -// There is a specialization only for one -// type argument. -template -struct TensorElementTypeSetter { - static void SetTensorElementType(ONNX_NAMESPACE::TypeProto&); - static void SetMapKeyType(ONNX_NAMESPACE::TypeProto&); - static int32_t GetElementType(); -}; - -/// Is a given type on the list of types? -/// Accepts a list of types and the first argument is the type -/// We are checking if it is listed among those that follow -template -struct IsAnyOf; - -/// Two types remaining, end of the list -template -struct IsAnyOf : public std::is_same { -}; - -template -struct IsAnyOf { - static constexpr bool value = (std::is_same::value || - IsAnyOf::value); -}; - -/// Tells if the specified type is one of fundamental types -/// that can be contained within a tensor. -/// We do not have raw fundamental types, rather a subset -/// of fundamental types is contained within tensors. -template -struct IsTensorContainedType : public IsAnyOf { -}; - -/// Use "IsSparseTensorContainedType::value" to test if a type T -/// is permitted as the element-type of a sparse-tensor. - -template -struct IsSparseTensorContainedType : public IsAnyOf { -}; - -/// This template's Get() returns a corresponding MLDataType -/// It dispatches the call to either GetTensorType<>() or -/// GetType<>() -template -struct GetMLDataType; - -template -struct GetMLDataType { - static MLDataType Get() { - return DataTypeImpl::GetTensorType(); - } -}; - -template -struct GetMLDataType { - static MLDataType Get() { - return DataTypeImpl::GetType(); - } -}; - -#if !defined(DISABLE_ML_OPS) -/// MapTypes helper API -/// K should always be one of the primitive data types -/// V can be either a primitive type (in which case it is a tensor) -/// or other preregistered types - -void CopyMutableMapValue(const ONNX_NAMESPACE::TypeProto&, - ONNX_NAMESPACE::TypeProto&); - -template -struct SetMapTypes { - static void Set(ONNX_NAMESPACE::TypeProto& proto) { - TensorElementTypeSetter::SetMapKeyType(proto); - MLDataType dt = GetMLDataType::value>::Get(); - const auto* value_proto = dt->GetTypeProto(); -#ifdef ORT_NO_RTTI - ORT_ENFORCE(value_proto != nullptr, "expected a registered ONNX type"); -#else - ORT_ENFORCE(value_proto != nullptr, typeid(V).name(), - " expected to be a registered ONNX type"); -#endif - CopyMutableMapValue(*value_proto, proto); - } -}; -#endif - -/// Sequence helpers -/// -// Element type is a primitive type so we set it to a tensor -void CopyMutableSeqElement(const ONNX_NAMESPACE::TypeProto&, - ONNX_NAMESPACE::TypeProto&); - -template -struct SetSequenceType { - static void Set(ONNX_NAMESPACE::TypeProto& proto) { - MLDataType dt = GetMLDataType::value>::Get(); - const auto* elem_proto = dt->GetTypeProto(); -#ifdef ORT_NO_RTTI - ORT_ENFORCE(elem_proto != nullptr, "expected a registered ONNX type"); -#else - ORT_ENFORCE(elem_proto != nullptr, typeid(T).name(), - " expected to be a registered ONNX type"); -#endif - CopyMutableSeqElement(*elem_proto, proto); - } -}; - -/// OpaqueTypes helpers -/// -void AssignOpaqueDomainName(const char* domain, const char* name, - ONNX_NAMESPACE::TypeProto& proto); - -} // namespace data_types_internal - -/// All tensors base -class TensorTypeBase : public DataTypeImpl { - public: - static MLDataType Type(); - - /// We first compare type_proto pointers and then - /// if they do not match try to account for the case - /// where TypeProto was created ad-hoc and not queried from MLDataType - bool IsCompatible(const ONNX_NAMESPACE::TypeProto& type_proto) const override; - - bool IsTensorType() const override { - return true; - } - - const TensorTypeBase* AsTensorType() const override { - return this; - } - - size_t Size() const override; - - DeleteFunc GetDeleteFunc() const override; - - const ONNX_NAMESPACE::TypeProto* GetTypeProto() const override; - - virtual MLDataType GetElementType() const { - // should never reach here. - ORT_NOT_IMPLEMENTED(__FUNCTION__, " is not implemented"); - } - - TensorTypeBase(const TensorTypeBase&) = delete; - TensorTypeBase& operator=(const TensorTypeBase&) = delete; - - protected: - ONNX_NAMESPACE::TypeProto& mutable_type_proto(); - - TensorTypeBase(); - ~TensorTypeBase() override; - - private: - struct Impl; - Impl* impl_; -}; - -/** - * \brief Tensor type. This type does not have a C++ type associated with - * it at registration time except the element type. One of the types mentioned - * above at IsTensorContainedType<> list is acceptable. - * - * \details - * Usage: - * ORT_REGISTER_TENSOR(ELEMENT_TYPE) - * Currently all of the Tensors irrespective of the dimensions are mapped to Tensor - * type. IsCompatible() currently ignores shape. - */ - -template -class TensorType : public TensorTypeBase { - public: - static_assert(data_types_internal::IsTensorContainedType::value, - "Requires one of the tensor fundamental types"); - - static MLDataType Type(); - - /// Tensors only can contain basic data types - /// that have been previously registered with ONNXRuntime - MLDataType GetElementType() const override { - return DataTypeImpl::GetType(); - } - - private: - TensorType() { - using namespace data_types_internal; - TensorElementTypeSetter::SetTensorElementType(this->mutable_type_proto()); - } -}; - -/// Common base-class for all sparse-tensors (with different element types). -class SparseTensorTypeBase : public DataTypeImpl { - public: - static MLDataType Type(); - - bool IsSparseTensorType() const override { - return true; - } - - const SparseTensorTypeBase* AsSparseTensorType() const override { - return this; - } - - bool IsCompatible(const ONNX_NAMESPACE::TypeProto& type_proto) const override; - - size_t Size() const override; - - DeleteFunc GetDeleteFunc() const override; - - const ONNX_NAMESPACE::TypeProto* GetTypeProto() const override; - - virtual MLDataType GetElementType() const { - // should never reach here. - ORT_NOT_IMPLEMENTED(__FUNCTION__, " is not implemented"); - } - - SparseTensorTypeBase(const SparseTensorTypeBase&) = delete; - SparseTensorTypeBase& operator=(const SparseTensorTypeBase&) = delete; - - protected: - ONNX_NAMESPACE::TypeProto& mutable_type_proto(); - - SparseTensorTypeBase(); - ~SparseTensorTypeBase() override; - - private: - struct Impl; - Impl* impl_; -}; - -template -class SparseTensorType : public SparseTensorTypeBase { - public: - static_assert(data_types_internal::IsSparseTensorContainedType::value, - "Requires one of the sparse-tensor fundamental types"); - - static MLDataType Type(); - - /// Return a MLDataType representing the element-type - MLDataType GetElementType() const override { - return DataTypeImpl::GetType(); - } - - private: - SparseTensorType() { - using namespace data_types_internal; - TensorElementTypeSetter::SetSparseTensorElementType(mutable_type_proto()); - } -}; - -/** - * \brief Provide a specialization for your C++ Non-tensor type - * so your implementation FromDataTypeContainer/ToDataTypeContainer - * functions correctly. Otherwise you get a default implementation - * which may not be what you need/want. - * - * This class is used to create OrtValue, fetch data from OrtValue via - * C/C++ APIs - */ -template -struct NonTensorTypeConverter { - static void FromContainer(MLDataType /*dtype*/, const void* /*data*/, size_t /*data_size*/, OrtValue& /*output*/) { - ORT_THROW("Not implemented"); - } - static void ToContainer(const OrtValue& /*input*/, size_t /*data_size*/, void* /*data*/) { - ORT_THROW("Not implemented"); - } -}; - -/** - * \brief Base type for all non-tensors, maps, sequences and opaques - */ -class NonTensorTypeBase : public DataTypeImpl { - public: - size_t Size() const override = 0; - - DeleteFunc GetDeleteFunc() const override = 0; - - virtual CreateFunc GetCreateFunc() const = 0; - - const ONNX_NAMESPACE::TypeProto* GetTypeProto() const override; - - const NonTensorTypeBase* AsNonTensorTypeBase() const override { - return this; - } - - // \brief Override for Non-tensor types to initialize non-tensor CPP - // data representation from data. The caller of the interface - // should have a shared definition of the data which is used to initialize - // CPP data representation. This is used from C API. - // - // \param data - pointer to a data container structure non_tensor type specific - // \param data_size - size of the data container structure, used for rudimentary checks - // \param output - reference to a default constructed non-tensor type - // \returns OrtValue - // \throw if there is an error - virtual void FromDataContainer(const void* data, size_t data_size, OrtValue& output) const; - - // \brief Override for Non-tensor types to fetch data from the internal CPP data representation - // The caller of the interface should have a shared definition of the data which is used to initialize - // CPP data representation. This is used from C API. - // - // \param input - OrtValue containing data - // \param data_size - size of the structure that is being passed for receiving data, used for - // validation - // \param data - pointer to receiving data structure - virtual void ToDataContainer(const OrtValue& input, size_t data_size, void* data) const; - - NonTensorTypeBase(const NonTensorTypeBase&) = delete; - NonTensorTypeBase& operator=(const NonTensorTypeBase&) = delete; - - protected: - NonTensorTypeBase(); - ~NonTensorTypeBase() override; - - ONNX_NAMESPACE::TypeProto& mutable_type_proto(); - - bool IsMapCompatible(const ONNX_NAMESPACE::TypeProto& type_proto) const; - - bool IsSequenceCompatible(const ONNX_NAMESPACE::TypeProto& type_proto) const; - - bool IsOpaqueCompatible(const ONNX_NAMESPACE::TypeProto& type_proto) const; - - private: - struct Impl; - Impl* impl_; -}; - -// This is where T is the actual CPPRuntimeType -template -class NonTensorType : public NonTensorTypeBase { - private: - static void Delete(void* p) { - delete static_cast(p); - } - - public: - size_t Size() const override { - return sizeof(T); - } - - DeleteFunc GetDeleteFunc() const override { - return &Delete; - } - - CreateFunc GetCreateFunc() const override { - return []() -> void* { return new T(); }; - } - - protected: - NonTensorType() = default; -}; - -#if !defined(DISABLE_ML_OPS) -/** - * \brief MapType. Use this type to register - * mapping types. - * - * \param T - cpp type that you wish to register as runtime MapType - * - * \details Usage: ORT_REGISTER_MAP(C++Type) - * The type is required to have mapped_type and - * key_type defined - */ -template -class MapType : public NonTensorType { - public: - static_assert(data_types_internal::IsTensorContainedType::value, - "Requires one of the tensor fundamental types as key"); - - static MLDataType Type(); - - bool IsCompatible(const ONNX_NAMESPACE::TypeProto& type_proto) const override { - return this->IsMapCompatible(type_proto); - } - - private: - MapType() { - using namespace data_types_internal; - SetMapTypes::Set(this->mutable_type_proto()); - } -}; -#endif - -/** - * \brief SequenceType. Use to register sequence for non-tensor types. - * - * \param T - CPP type that you wish to register as Sequence - * runtime type. - * - * \details Usage: ORT_REGISTER_SEQ(C++Type) - * The type is required to have value_type defined - */ -template -class SequenceType : public NonTensorType { - public: - static MLDataType Type(); - - bool IsCompatible(const ONNX_NAMESPACE::TypeProto& type_proto) const override { - return this->IsSequenceCompatible(type_proto); - } - - private: - SequenceType() { - data_types_internal::SetSequenceType::Set(this->mutable_type_proto()); - } -}; - -/** - * \brief SequenceTensorTypeBase serves as a base type class for - * Tensor sequences. Akin TensorTypeBase. - * Runtime representation is always TensorSeq. - */ -class SequenceTensorTypeBase : public DataTypeImpl { - public: - static MLDataType Type(); - - bool IsCompatible(const ONNX_NAMESPACE::TypeProto& type_proto) const override; - - bool IsTensorSequenceType() const override { - return true; - } - - const SequenceTensorTypeBase* AsSequenceTensorBase() const override { - return this; - } - - virtual MLDataType GetElementType() const { - // should never reach here. - ORT_NOT_IMPLEMENTED(__FUNCTION__, " is not implemented"); - } - - size_t Size() const override; - - DeleteFunc GetDeleteFunc() const override; - - const ONNX_NAMESPACE::TypeProto* GetTypeProto() const override; - - SequenceTensorTypeBase(const SequenceTensorTypeBase&) = delete; - SequenceTensorTypeBase& operator=(const SequenceTensorTypeBase&) = delete; - - protected: - SequenceTensorTypeBase(); - ~SequenceTensorTypeBase(); - - ONNX_NAMESPACE::TypeProto& mutable_type_proto(); - - private: - struct Impl; - Impl* impl_; -}; - -/** - * \brief SequenceTensorType. Use to register sequence for non-tensor types. - * - * \param CPPRuntime - We always use TensorSeq - * - * \param TensorElemType - one of the primitive types - * - * \details Usage: ORT_REGISTER_SEQ_TENSOR_TYPE() - * The type is required to have value_type defined - */ -template -class SequenceTensorType : public SequenceTensorTypeBase { - public: - static MLDataType Type(); - - /// Return a MLDataType representing the element-type - MLDataType GetElementType() const override { - return DataTypeImpl::GetType(); - } - - private: - SequenceTensorType() { - data_types_internal::SetSequenceType::Set(this->mutable_type_proto()); - } -}; - -/** - * \brief OpaqueType - * - * \param T - cpp runtume that implements the Opaque type - * - * \param const char D[] - domain must be extern to be unique - * - * \param const char N[] - name must be extern to be unique - * - * \details Only one CPP type can be associated with a particular - * OpaqueType registration - * - */ -template -class OpaqueType : public NonTensorType { - public: - static MLDataType Type(); - - bool IsCompatible(const ONNX_NAMESPACE::TypeProto& type_proto) const override { - return this->IsOpaqueCompatible(type_proto); - } - - void FromDataContainer(const void* data, size_t data_size, OrtValue& output) const override { - NonTensorTypeConverter::FromContainer(this, data, data_size, output); - } - - void ToDataContainer(const OrtValue& input, size_t data_size, void* data) const override { - NonTensorTypeConverter::ToContainer(input, data_size, data); - } - - private: - OpaqueType() { - data_types_internal::AssignOpaqueDomainName(D, N, this->mutable_type_proto()); - } -}; - -/** - * \brief PrimitiveDataTypeBase - * Base class for primitive Tensor contained types - * - * \details This class contains an integer constant that can be - * used for input data type dispatching - * - */ -class PrimitiveDataTypeBase : public DataTypeImpl { - public: - bool IsCompatible(const ONNX_NAMESPACE::TypeProto&) const override { - return false; - } - - const PrimitiveDataTypeBase* AsPrimitiveDataType() const override final { - return this; - } - - const ONNX_NAMESPACE::TypeProto* GetTypeProto() const final { - return nullptr; - } - - int32_t GetDataType() const { - return data_type_; - } - - protected: - PrimitiveDataTypeBase() = default; - - void SetDataType(int32_t data_type) { - data_type_ = data_type; - } - - private: - int32_t data_type_; -}; - -/** - * \brief PrimitiveDataType - * Typed specialization for primitive types. - * Concrete instances of this class are used by Tensor. - * - * \param T - primitive data type - * - */ -template -class PrimitiveDataType : public PrimitiveDataTypeBase { - private: - static void Delete(void* p) { - delete static_cast(p); - } - - public: - static MLDataType Type(); - - size_t Size() const override { - return sizeof(T); - } - - DeleteFunc GetDeleteFunc() const override { - return &Delete; - } - - private: - PrimitiveDataType() { - this->SetDataType(data_types_internal::TensorElementTypeSetter::GetElementType()); - } -}; - -// Explicit specialization of base class template function -// is only possible within the enclosing namespace scope, -// thus a simple way to pre-instantiate a given template -// at a registration time does not currently work and the macro -// is needed. -#define ORT_REGISTER_TENSOR_TYPE(ELEM_TYPE) \ - template <> \ - MLDataType TensorType::Type() { \ - static TensorType tensor_type; \ - return &tensor_type; \ - } \ - template <> \ - MLDataType DataTypeImpl::GetTensorType() { \ - return TensorType::Type(); \ - } - -#define ORT_REGISTER_SPARSE_TENSOR_TYPE(ELEM_TYPE) \ - template <> \ - MLDataType SparseTensorType::Type() { \ - static SparseTensorType tensor_type; \ - return &tensor_type; \ - } \ - template <> \ - MLDataType DataTypeImpl::GetSparseTensorType() { \ - return SparseTensorType::Type(); \ - } - -#if !defined(DISABLE_ML_OPS) -#define ORT_REGISTER_MAP(TYPE) \ - template <> \ - MLDataType MapType::Type() { \ - static MapType map_type; \ - return &map_type; \ - } \ - template <> \ - MLDataType DataTypeImpl::GetType() { \ - return MapType::Type(); \ - } -#endif - -#define ORT_REGISTER_SEQ(TYPE) \ - template <> \ - MLDataType SequenceType::Type() { \ - static SequenceType sequence_type; \ - return &sequence_type; \ - } \ - template <> \ - MLDataType DataTypeImpl::GetType() { \ - return SequenceType::Type(); \ - } - -#define ORT_REGISTER_SEQ_TENSOR_TYPE(ELEM_TYPE) \ - template <> \ - MLDataType SequenceTensorType::Type() { \ - static SequenceTensorType sequence_tensor_type; \ - return &sequence_tensor_type; \ - } \ - template <> \ - MLDataType DataTypeImpl::GetSequenceTensorType() { \ - return SequenceTensorType::Type(); \ - } - -#define ORT_REGISTER_PRIM_TYPE(TYPE) \ - template <> \ - MLDataType PrimitiveDataType::Type() { \ - static PrimitiveDataType prim_data_type; \ - return &prim_data_type; \ - } \ - template <> \ - MLDataType DataTypeImpl::GetType() { \ - return PrimitiveDataType::Type(); \ - } - -#define ORT_REGISTER_OPAQUE_TYPE(CPPType, Domain, Name) \ - template <> \ - MLDataType OpaqueType::Type() { \ - static OpaqueType opaque_type; \ - return &opaque_type; \ - } \ - template <> \ - MLDataType DataTypeImpl::GetType() { \ - return OpaqueType::Type(); \ - } -} // namespace onnxruntime diff --git a/onnxruntime/core/framework/data_types_internal.h b/onnxruntime/core/framework/data_types_internal.h deleted file mode 100644 index c1ce7581..00000000 --- a/onnxruntime/core/framework/data_types_internal.h +++ /dev/null @@ -1,574 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -#pragma once - -#include -#include -#include -#include -#include -#include - -#include "boost/mp11.hpp" - -#include "core/common/common.h" -#include "core/common/type_list.h" -#include "core/framework/data_types.h" -#include "core/graph/onnx_protobuf.h" - -namespace onnxruntime { -namespace utils { - -template -constexpr ONNX_NAMESPACE::TensorProto_DataType ToTensorProtoElementType() { - return ONNX_NAMESPACE::TensorProto_DataType_UNDEFINED; -} - -template <> -constexpr ONNX_NAMESPACE::TensorProto_DataType ToTensorProtoElementType() { - return ONNX_NAMESPACE::TensorProto_DataType_FLOAT; -} -template <> -constexpr ONNX_NAMESPACE::TensorProto_DataType ToTensorProtoElementType() { - return ONNX_NAMESPACE::TensorProto_DataType_UINT8; -}; -template <> -constexpr ONNX_NAMESPACE::TensorProto_DataType ToTensorProtoElementType() { - return ONNX_NAMESPACE::TensorProto_DataType_INT8; -}; -template <> -constexpr ONNX_NAMESPACE::TensorProto_DataType ToTensorProtoElementType() { - return ONNX_NAMESPACE::TensorProto_DataType_UINT16; -}; -template <> -constexpr ONNX_NAMESPACE::TensorProto_DataType ToTensorProtoElementType() { - return ONNX_NAMESPACE::TensorProto_DataType_INT16; -}; -template <> -constexpr ONNX_NAMESPACE::TensorProto_DataType ToTensorProtoElementType() { - return ONNX_NAMESPACE::TensorProto_DataType_INT32; -}; -template <> -constexpr ONNX_NAMESPACE::TensorProto_DataType ToTensorProtoElementType() { - return ONNX_NAMESPACE::TensorProto_DataType_INT64; -}; -template <> -constexpr ONNX_NAMESPACE::TensorProto_DataType ToTensorProtoElementType() { - return ONNX_NAMESPACE::TensorProto_DataType_STRING; -}; -template <> -constexpr ONNX_NAMESPACE::TensorProto_DataType ToTensorProtoElementType() { - return ONNX_NAMESPACE::TensorProto_DataType_BOOL; -}; -template <> -constexpr ONNX_NAMESPACE::TensorProto_DataType ToTensorProtoElementType() { - return ONNX_NAMESPACE::TensorProto_DataType_FLOAT16; -}; -template <> -constexpr ONNX_NAMESPACE::TensorProto_DataType ToTensorProtoElementType() { - return ONNX_NAMESPACE::TensorProto_DataType_DOUBLE; -}; -template <> -constexpr ONNX_NAMESPACE::TensorProto_DataType ToTensorProtoElementType() { - return ONNX_NAMESPACE::TensorProto_DataType_UINT32; -}; -template <> -constexpr ONNX_NAMESPACE::TensorProto_DataType ToTensorProtoElementType() { - return ONNX_NAMESPACE::TensorProto_DataType_UINT64; -}; -template <> -constexpr ONNX_NAMESPACE::TensorProto_DataType ToTensorProtoElementType() { - return ONNX_NAMESPACE::TensorProto_DataType_BFLOAT16; -}; - - // The following primitives are strongly recommended for switching on tensor input datatypes for - // kernel implementations. - // - // 1) If you need to handle all of the primitive tensor contained datatypes, the best choice would be macros - // DispatchOnTensorType or DispatchOnTensorTypeWithReturn. Use inline wrappers so your function can be invoked as function(). - // 2) if you have a few types, use Tensor.IsDataType()/IsDataTypeString() or use utils::IsPrimitiveDataType() - // if you have a standalone MLDatatType with a sequence of if/else statements. - // 3) For something in between, we suggest to use CallDispatcher pattern. - // - // Invoking DataTypeImpl::GetType() for switching on input types is discouraged and should be avoided. - // Every primitive type carries with it an integer constant that can be used for quick switching on types. - -#define DispatchOnTensorType(tensor_type, function, ...) \ - switch (tensor_type->AsPrimitiveDataType()->GetDataType()) { \ - case ONNX_NAMESPACE::TensorProto_DataType_FLOAT: \ - function(__VA_ARGS__); \ - break; \ - case ONNX_NAMESPACE::TensorProto_DataType_BOOL: \ - function(__VA_ARGS__); \ - break; \ - case ONNX_NAMESPACE::TensorProto_DataType_DOUBLE: \ - function(__VA_ARGS__); \ - break; \ - case ONNX_NAMESPACE::TensorProto_DataType_STRING: \ - function(__VA_ARGS__); \ - break; \ - case ONNX_NAMESPACE::TensorProto_DataType_INT8: \ - function(__VA_ARGS__); \ - break; \ - case ONNX_NAMESPACE::TensorProto_DataType_UINT8: \ - function(__VA_ARGS__); \ - break; \ - case ONNX_NAMESPACE::TensorProto_DataType_INT16: \ - function(__VA_ARGS__); \ - break; \ - case ONNX_NAMESPACE::TensorProto_DataType_UINT16: \ - function(__VA_ARGS__); \ - break; \ - case ONNX_NAMESPACE::TensorProto_DataType_INT32: \ - function(__VA_ARGS__); \ - break; \ - case ONNX_NAMESPACE::TensorProto_DataType_UINT32: \ - function(__VA_ARGS__); \ - break; \ - case ONNX_NAMESPACE::TensorProto_DataType_INT64: \ - function(__VA_ARGS__); \ - break; \ - case ONNX_NAMESPACE::TensorProto_DataType_UINT64: \ - function(__VA_ARGS__); \ - break; \ - case ONNX_NAMESPACE::TensorProto_DataType_FLOAT16: \ - function(__VA_ARGS__); \ - break; \ - case ONNX_NAMESPACE::TensorProto_DataType_BFLOAT16: \ - function(__VA_ARGS__); \ - break; \ - default: \ - ORT_ENFORCE(false, "Unknown tensor type of ", tensor_type); \ - } - -#define DispatchOnTensorTypeWithReturn(tensor_type, retval, function, ...) \ - switch (tensor_type->AsPrimitiveDataType()->GetDataType()) { \ - case ONNX_NAMESPACE::TensorProto_DataType_FLOAT: \ - retval = function(__VA_ARGS__); \ - break; \ - case ONNX_NAMESPACE::TensorProto_DataType_BOOL: \ - retval = function(__VA_ARGS__); \ - break; \ - case ONNX_NAMESPACE::TensorProto_DataType_DOUBLE: \ - retval = function(__VA_ARGS__); \ - break; \ - case ONNX_NAMESPACE::TensorProto_DataType_STRING: \ - retval = function(__VA_ARGS__); \ - break; \ - case ONNX_NAMESPACE::TensorProto_DataType_INT8: \ - retval = function(__VA_ARGS__); \ - break; \ - case ONNX_NAMESPACE::TensorProto_DataType_UINT8: \ - retval = function(__VA_ARGS__); \ - break; \ - case ONNX_NAMESPACE::TensorProto_DataType_UINT16: \ - retval = function(__VA_ARGS__); \ - break; \ - case ONNX_NAMESPACE::TensorProto_DataType_INT16: \ - retval = function(__VA_ARGS__); \ - break; \ - case ONNX_NAMESPACE::TensorProto_DataType_INT32: \ - retval = function(__VA_ARGS__); \ - break; \ - case ONNX_NAMESPACE::TensorProto_DataType_UINT32: \ - retval = function(__VA_ARGS__); \ - break; \ - case ONNX_NAMESPACE::TensorProto_DataType_INT64: \ - retval = function(__VA_ARGS__); \ - break; \ - case ONNX_NAMESPACE::TensorProto_DataType_UINT64: \ - retval = function(__VA_ARGS__); \ - break; \ - case ONNX_NAMESPACE::TensorProto_DataType_FLOAT16: \ - retval = function(__VA_ARGS__); \ - break; \ - case ONNX_NAMESPACE::TensorProto_DataType_BFLOAT16: \ - retval = function(__VA_ARGS__); \ - break; \ - default: \ - ORT_ENFORCE(false, "Unknown tensor type of ", tensor_type); \ - } - -//////////////////////////////////////////////////////////////////////////////// -/// Use the following primitives if you have a few types to switch on so you -// can write a short sequence of if/else statements. - -// This is a frequently used check so we make a separate utility function. -inline bool IsDataTypeString(MLDataType dt_type) { - auto prim_type = dt_type->AsPrimitiveDataType(); - return (prim_type != nullptr && prim_type->GetDataType() == ONNX_NAMESPACE::TensorProto_DataType_STRING); -} - -// Test if MLDataType is a concrete type of PrimitiveDataTypeBase -// and it is T -template -inline bool IsPrimitiveDataType(MLDataType dt_type) { - auto prim_type = dt_type->AsPrimitiveDataType(); - return (prim_type != nullptr && prim_type->GetDataType() == ToTensorProtoElementType()); -} - -// Use after AsPrimitiveDataType() is successful -// Check if PrimitiveDataTypeBase is of type T -template -inline bool IsPrimitiveDataType(const PrimitiveDataTypeBase* prim_type) { - assert(prim_type != nullptr); - return prim_type->GetDataType() == ToTensorProtoElementType(); -} - -// This implementation contains a workaround for GCC bug https://gcc.gnu.org/bugzilla/show_bug.cgi?id=47226 -// GCC until very recently does not support template parameter pack expansion within lambda context. -namespace mltype_dispatcher_internal { - -// T - type handled by this helper -class CallableDispatchableHelper { - int32_t dt_type_; // Type currently dispatched - size_t called_; - - public: - explicit CallableDispatchableHelper(int32_t dt_type) noexcept : dt_type_(dt_type), called_(0) {} - - // Must return integer to be in a expandable context - template - int Invoke(Fn&& fn, Args&&... args) { - if (utils::ToTensorProtoElementType() == dt_type_) { - std::forward(fn)(std::forward(args)...); - ++called_; - } - return 0; - } - - void CheckCalledOnce() { - ORT_ENFORCE(called_ == 1, "Unsupported data type: ", dt_type_); - } -}; - -// Default policy is to throw an exception. -// Other policies may set the second result argument accordingly. -template -struct UnsupportedTypeDefaultPolicy { - void operator()(int32_t dt_type, Ret& /*result*/) const { - ORT_THROW("Unsupported data type: ", dt_type); - } -}; - -// Helper with the result type -template -class CallableDispatchableRetHelper { - int32_t dt_type_; // Type currently dispatched - size_t called_; - Ret result_; - - public: - explicit CallableDispatchableRetHelper(int32_t dt_type) noexcept : dt_type_(dt_type), called_(0), result_() {} - - Ret Get() { - // No type was invoked - if (called_ == 0) { - UnsupportedPolicy()(dt_type_, result_); - } - return result_; - } - - // Must return integer to be in a expandable context - template - int Invoke(Fn&& fn, Args&&... args) { - if (utils::ToTensorProtoElementType() == dt_type_) { - result_ = std::forward(fn)(std::forward(args)...); - ++called_; - } - return 0; - } -}; - -template -using TensorProtoElementTypeConstant = - std::integral_constant()>; - -using UndefinedTensorProtoElementTypeConstant = - std::integral_constant; - -} // namespace mltype_dispatcher_internal - -/** - * This class helps to efficiently dispatch calls to implementation function - * objects with a tensor element type template argument. - * - * The constructor accepts a value corresponding to a tensor element type. - * For example, it can be obtained from: - * input_tensor->GetElementType() - * - * The Invoke member functions will instantiate and invoke the provided - * function object template, Fn. Fn must be default constructible. Fn must also - * have a tensor element type template argument. This type template argument - * will be the type that corresponds to the value given in the constructor. - * These functions accept and forward arbitrary function arguments. They ensure - * that Fn is called once with the type specified in the constructor. - * - * @tparam Types The types supported by the implementation. This should be a - * set of ONNX tensor element types that are supported by ORT. - */ -template -class MLTypeCallDispatcher { - using SupportedTypeList = TypeList; - using SupportedTensorProtoElementTypeList = - boost::mp11::mp_transform< - mltype_dispatcher_internal::TensorProtoElementTypeConstant, SupportedTypeList>; - - static_assert( - boost::mp11::mp_and< - boost::mp11::mp_is_set, - boost::mp11::mp_not< - boost::mp11::mp_set_contains< - SupportedTensorProtoElementTypeList, - mltype_dispatcher_internal::UndefinedTensorProtoElementTypeConstant>>>::value, - "Types must map to a unique set of ONNX tensor element data types supported by ORT."); - - int32_t dt_type_; - - public: - /** - * Constructor. - * @param dt_type The value corresponding to the tensor element type to be - * dispatched to. This can be obtained from - * input_tensor->GetElementType() or - * utils::ToTensorProtoElementType(). - */ - explicit MLTypeCallDispatcher(int32_t dt_type) noexcept : dt_type_(dt_type) {} - - /** - * Invokes Fn with the specified arguments. - * - * @tparam Fn The function object template. - * @tparam Args The argument types. - */ - template