diff --git a/README.md b/README.md index a1accd4c..a8e5bc9c 100644 --- a/README.md +++ b/README.md @@ -183,7 +183,7 @@ One-step installation scripts are provided for the dependencies' installation. P * run object detection service sample code input from Image Run image processing service: ```bash - ros2 run dynamic_vino_sample image_object_server "image_object_server_oss.yaml" + ros2 launch dynamic_vino_sample image_object_server_oss.launch.py ``` Run example application with an absolute path of an image on another console: ```bash @@ -197,3 +197,4 @@ One-step installation scripts are provided for the dependencies' installation. P # More Information * ROS2 OpenVINO discription writen in Chinese: https://mp.weixin.qq.com/s/BgG3RGauv5pmHzV_hkVAdw + diff --git a/data/images/car_vihecle.png b/data/images/car_vihecle.png new file mode 100644 index 00000000..2b2aa223 Binary files /dev/null and b/data/images/car_vihecle.png differ diff --git a/doc/BINARY_VERSION_README.md b/doc/BINARY_VERSION_README.md index 4ce6deba..3df484b4 100644 --- a/doc/BINARY_VERSION_README.md +++ b/doc/BINARY_VERSION_README.md @@ -129,24 +129,30 @@ This project is a ROS2 wrapper for CV API of [OpenVINO™](https://software.inte ## 5. Running the Demo * Preparation * download and convert a trained model to produce an optimized Intermediate Representation (IR) of the model - ```bash - cd /opt/intel/computer_vision_sdk/deployment_tools/model_optimizer/install_prerequisites - sudo ./install_prerequisites.sh - mkdir -p ~/Downloads/models - cd ~/Downloads/models - wget http://download.tensorflow.org/models/object_detection/mask_rcnn_inception_v2_coco_2018_01_28.tar.gz - tar -zxvf mask_rcnn_inception_v2_coco_2018_01_28.tar.gz - cd mask_rcnn_inception_v2_coco_2018_01_28 - python3 /opt/intel/computer_vision_sdk/deployment_tools/model_optimizer/mo_tf.py --input_model frozen_inference_graph.pb --tensorflow_use_custom_operations_config /opt/intel/computer_vision_sdk/deployment_tools/model_optimizer/extensions/front/tf/mask_rcnn_support.json --tensorflow_object_detection_api_pipeline_config pipeline.config --reverse_input_channels --output_dir ./output/ - sudo mkdir -p /opt/models - sudo ln -sf ~/Downloads/models/mask_rcnn_inception_v2_coco_2018_01_28 /opt/models/ - ``` + ```bash + #object segmentation model + cd /opt/intel/computer_vision_sdk/deployment_tools/model_optimizer/install_prerequisites + sudo ./install_prerequisites.sh + mkdir -p ~/Downloads/models + cd ~/Downloads/models + wget http://download.tensorflow.org/models/object_detection/mask_rcnn_inception_v2_coco_2018_01_28.tar.gz + tar -zxvf mask_rcnn_inception_v2_coco_2018_01_28.tar.gz + cd mask_rcnn_inception_v2_coco_2018_01_28 + python3 /opt/intel/computer_vision_sdk/deployment_tools/model_optimizer/mo_tf.py --input_model frozen_inference_graph.pb --tensorflow_use_custom_operations_config /opt/intel/computer_vision_sdk/deployment_tools/model_optimizer/extensions/front/tf/mask_rcnn_support.json --tensorflow_object_detection_api_pipeline_config pipeline.config --reverse_input_channels --output_dir ./output/ + sudo mkdir -p /opt/models + sudo ln -sf ~/Downloads/models/mask_rcnn_inception_v2_coco_2018_01_28 /opt/models/ + #object detection model + cd /opt/intel/computer_vision_sdk/deployment_tools/model_downloader + sudo python3 downloader.py --name ssd300 + sudo python3 /opt/intel/computer_vision_sdk/deployment_tools/model_optimizer/mo.py --input_model /opt/intel/computer_vision_sdk/deployment_tools/model_downloader/object_detection/common/ssd/300/caffe/ssd300.caffemodel --output_dir /opt/intel/computer_vision_sdk/deployment_tools/model_downloader/object_detection/common/ssd/300/caffe/output/ + ``` * copy label files (excute _once_)
```bash sudo cp /opt/openvino_toolkit/ros2_openvino_toolkit/data/labels/emotions-recognition/FP32/emotions-recognition-retail-0003.labels /opt/intel/computer_vision_sdk/deployment_tools/intel_models/emotions-recognition-retail-0003/FP32 sudo cp /opt/openvino_toolkit/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/intel/computer_vision_sdk/deployment_tools/intel_models/face-detection-adas-0001/FP32 sudo cp /opt/openvino_toolkit/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/intel/computer_vision_sdk/deployment_tools/intel_models/face-detection-adas-0001/FP16 sudo cp /opt/openvino_toolkit/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels ~/Downloads/models/mask_rcnn_inception_v2_coco_2018_01_28/output + sudo cp /opt/openvino_toolkit/ros2_openvino_toolkit/data/labels/object_detection/ssd300.labels /opt/intel/computer_vision_sdk/deployment_tools/model_downloader/object_detection/common/ssd/300/caffe/output ``` * set ENV LD_LIBRARY_PATH ```bash @@ -164,10 +170,6 @@ This project is a ROS2 wrapper for CV API of [OpenVINO™](https://software.inte ```bash ros2 launch dynamic_vino_sample pipeline_object.launch.py ``` -* run object detection sample code input from RealSenseCameraTopic. - ```bash - ros2 launch dynamic_vino_sample pipeline_object_topic.launch.py - ``` * run object segmentation sample code input from RealSenseCameraTopic. ```bash ros2 launch dynamic_vino_sample pipeline_segmentation.launch.py @@ -179,7 +181,7 @@ This project is a ROS2 wrapper for CV API of [OpenVINO™](https://software.inte * run object detection service sample code input from Image Run image processing service: ```bash - ros2 run dynamic_vino_sample image_object_server "image_object_server.yaml" + ros2 launch dynamic_vino_sample image_object_server.launch.py ``` Run example application with an absolute path of an image on another console: ```bash @@ -196,3 +198,4 @@ This project is a ROS2 wrapper for CV API of [OpenVINO™](https://software.inte ``` > solution - Please reboot while connecting Intel® Neural Compute Stick 2. + diff --git a/dynamic_vino_lib/CMakeLists.txt b/dynamic_vino_lib/CMakeLists.txt index 6dfdb946..c26ae236 100644 --- a/dynamic_vino_lib/CMakeLists.txt +++ b/dynamic_vino_lib/CMakeLists.txt @@ -12,16 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -cmake_minimum_required (VERSION 3.5) +cmake_minimum_required(VERSION 3.5) project(dynamic_vino_lib) -list (APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake) +list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake) #################################### ## to use C++14 -set (CMAKE_CXX_STANDARD 14) -set (CMAKE_CXX_STANDARD_REQUIRED ON) +set(CMAKE_CXX_STANDARD 14) +set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_FLAGS "-std=c++14 ${CMAKE_CXX_FLAGS}") #################################### @@ -32,7 +32,7 @@ set(CMAKE_CXX_FLAGS "-std=c++14 ${CMAKE_CXX_FLAGS}") message(STATUS "Looking for inference engine configuration file at: ${CMAKE_PREFIX_PATH}") find_package(InferenceEngine 1.1) -if (NOT InferenceEngine_FOUND) +if(NOT InferenceEngine_FOUND) message(FATAL_ERROR "") endif() @@ -62,7 +62,7 @@ find_package(cv_bridge REQUIRED) find_package(vino_param_lib REQUIRED) find_package(yaml_cpp_vendor REQUIRED) -set (CpuExtension_lib $ENV{CPU_EXTENSION_LIB}) +set(CpuExtension_lib $ENV{CPU_EXTENSION_LIB}) add_library(cpu_extension SHARED IMPORTED) set_target_properties(cpu_extension PROPERTIES @@ -74,46 +74,46 @@ if("${CMAKE_BUILD_TYPE}" STREQUAL "") set(CMAKE_BUILD_TYPE "Release") endif() -if (NOT(BIN_FOLDER)) +if(NOT(BIN_FOLDER)) if("${CMAKE_SIZEOF_VOID_P}" EQUAL "8") - set (ARCH intel64) + set(ARCH intel64) else() - set (ARCH ia32) + set(ARCH ia32) endif() - set (BIN_FOLDER ${ARCH}) + set(BIN_FOLDER ${ARCH}) endif() -if (NOT (IE_MAIN_SOURCE_DIR)) +if(NOT (IE_MAIN_SOURCE_DIR)) set(NEED_EXTENSIONS TRUE) - if (WIN32) - set (IE_MAIN_SOURCE_DIR ${CMAKE_SOURCE_DIR}/../bin/) + if(WIN32) + set(IE_MAIN_SOURCE_DIR ${CMAKE_SOURCE_DIR}/../bin/) else() - set (IE_MAIN_SOURCE_DIR ${CMAKE_CURRENT_BINARY_DIR}) + set(IE_MAIN_SOURCE_DIR ${CMAKE_CURRENT_BINARY_DIR}) endif() endif() if(NOT(UNIX)) - set (CMAKE_LIBRARY_OUTPUT_DIRECTORY ${IE_MAIN_SOURCE_DIR}/${BIN_FOLDER}) - set (CMAKE_LIBRARY_PATH ${IE_MAIN_SOURCE_DIR}/${BIN_FOLDER}) - set (CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${IE_MAIN_SOURCE_DIR}/${BIN_FOLDER}) - set (CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY ${IE_MAIN_SOURCE_DIR}/${BIN_FOLDER}) - set (CMAKE_PDB_OUTPUT_DIRECTORY ${IE_MAIN_SOURCE_DIR}/${BIN_FOLDER}) - set (CMAKE_RUNTIME_OUTPUT_DIRECTORY ${IE_MAIN_SOURCE_DIR}/${BIN_FOLDER}) - set (LIBRARY_OUTPUT_DIRECTORY ${IE_MAIN_SOURCE_DIR}/${BIN_FOLDER}) - set (LIBRARY_OUTPUT_PATH ${LIBRARY_OUTPUT_DIRECTORY}) # compatibility issue: linux uses LIBRARY_OUTPUT_PATH, windows uses LIBRARY_OUTPUT_DIRECTORY -else () - set (CMAKE_LIBRARY_OUTPUT_DIRECTORY ${IE_MAIN_SOURCE_DIR}/${BIN_FOLDER}/${CMAKE_BUILD_TYPE}/lib) - set (CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${IE_MAIN_SOURCE_DIR}/${BIN_FOLDER}/${CMAKE_BUILD_TYPE}/lib) - set (CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY ${IE_MAIN_SOURCE_DIR}/${BIN_FOLDER}/${CMAKE_BUILD_TYPE}) - set (CMAKE_PDB_OUTPUT_DIRECTORY ${IE_MAIN_SOURCE_DIR}/${BIN_FOLDER}/${CMAKE_BUILD_TYPE}) - set (CMAKE_RUNTIME_OUTPUT_DIRECTORY ${IE_MAIN_SOURCE_DIR}/${BIN_FOLDER}/${CMAKE_BUILD_TYPE}) - set (LIBRARY_OUTPUT_DIRECTORY ${IE_MAIN_SOURCE_DIR}/${BIN_FOLDER}/${CMAKE_BUILD_TYPE}/lib) - set (LIBRARY_OUTPUT_PATH ${LIBRARY_OUTPUT_DIRECTORY}/lib) + set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${IE_MAIN_SOURCE_DIR}/${BIN_FOLDER}) + set(CMAKE_LIBRARY_PATH ${IE_MAIN_SOURCE_DIR}/${BIN_FOLDER}) + set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${IE_MAIN_SOURCE_DIR}/${BIN_FOLDER}) + set(CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY ${IE_MAIN_SOURCE_DIR}/${BIN_FOLDER}) + set(CMAKE_PDB_OUTPUT_DIRECTORY ${IE_MAIN_SOURCE_DIR}/${BIN_FOLDER}) + set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${IE_MAIN_SOURCE_DIR}/${BIN_FOLDER}) + set(LIBRARY_OUTPUT_DIRECTORY ${IE_MAIN_SOURCE_DIR}/${BIN_FOLDER}) + set(LIBRARY_OUTPUT_PATH ${LIBRARY_OUTPUT_DIRECTORY}) # compatibility issue: linux uses LIBRARY_OUTPUT_PATH, windows uses LIBRARY_OUTPUT_DIRECTORY +else() + set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${IE_MAIN_SOURCE_DIR}/${BIN_FOLDER}/${CMAKE_BUILD_TYPE}/lib) + set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${IE_MAIN_SOURCE_DIR}/${BIN_FOLDER}/${CMAKE_BUILD_TYPE}/lib) + set(CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY ${IE_MAIN_SOURCE_DIR}/${BIN_FOLDER}/${CMAKE_BUILD_TYPE}) + set(CMAKE_PDB_OUTPUT_DIRECTORY ${IE_MAIN_SOURCE_DIR}/${BIN_FOLDER}/${CMAKE_BUILD_TYPE}) + set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${IE_MAIN_SOURCE_DIR}/${BIN_FOLDER}/${CMAKE_BUILD_TYPE}) + set(LIBRARY_OUTPUT_DIRECTORY ${IE_MAIN_SOURCE_DIR}/${BIN_FOLDER}/${CMAKE_BUILD_TYPE}/lib) + set(LIBRARY_OUTPUT_PATH ${LIBRARY_OUTPUT_DIRECTORY}/lib) endif() -if (WIN32) +if(WIN32) if(NOT "${CMAKE_SIZEOF_VOID_P}" EQUAL "8") message(FATAL_ERROR "Only 64-bit supported on Windows") endif() @@ -122,16 +122,16 @@ if (WIN32) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_SCL_SECURE_NO_WARNINGS -DNOMINMAX") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc") #no asynchronous structured exception handling set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /LARGEADDRESSAWARE") - if (ENABLE_OMP) + if(ENABLE_OMP) find_package(OpenMP) - if (OPENMP_FOUND) - set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}") - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}") + if(OPENMP_FOUND) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}") endif() endif() else() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Werror=return-type ") - if (APPLE) + if(APPLE) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=unused-command-line-argument") elseif(UNIX) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wuninitialized -Winit-self -Wmaybe-uninitialized") @@ -152,10 +152,14 @@ if(UNIX OR APPLE) endif() endif() -include(feature_defs OPTIONAL) +#The below command is commented in order to ignore "colcon test" errors. +#This maybe impacts CPU's inference tuning, although we tested on Core i7-6700 and got the similar performance. +#If you concern the inference performence, you can try to get the resource from the follow url and enable the below line. +#https://github.com/opencv/dldt/tree/2018/inference-engine/src/extension/cmake +#include(feature_defs OPTIONAL) # Properties->C/C++->General->Additional Include Directories -include_directories ( +include_directories( # ${CMAKE_CURRENT_SOURCE_DIR}/common/format_reader ${CMAKE_CURRENT_SOURCE_DIR}/include ${InferenceEngine_INCLUDE_DIRS} @@ -165,8 +169,8 @@ include_directories ( ${librealsense2_INCLUDE_DIRS} ) -if (UNIX) - SET(LIB_DL dl) +if(UNIX) + set(LIB_DL dl) endif() set(DEPENDENCIES ${librealsense2_LIBRARIES} ${OpenCV_LIBS} cpu_extension) @@ -202,7 +206,7 @@ add_library(${PROJECT_NAME} SHARED src/outputs/rviz_output.cpp src/outputs/base_output.cpp src/outputs/ros_service_output.cpp - ) +) target_link_libraries(${PROJECT_NAME} ${DEPENDENCIES}) diff --git a/dynamic_vino_lib/cmake/CPUID.cmake b/dynamic_vino_lib/cmake/CPUID.cmake deleted file mode 100644 index 904a65cb..00000000 --- a/dynamic_vino_lib/cmake/CPUID.cmake +++ /dev/null @@ -1,353 +0,0 @@ -# the module will build and run cpuid utility, which store detected -# host processor features into cpuid.txt file in form: -# FEATURE [not] supported -# variable HAVE_CPUID_INFO set in case of success -# if variable HAVE_CPUID_INFO is set then it is possible -# to test HAVE_SSE42/HAVE_AVX2 variables - -include (CheckCXXSourceRuns) - -if(NOT WIN32) - set(CMAKE_REQUIRED_FLAGS "-std=c++11") -endif() - -check_cxx_source_runs( -" -// InstructionSet.cpp -// Compile by using: cl /EHsc /W4 InstructionSet.cpp -// processor: x86, x64 -// Uses the __cpuid intrinsic to get information about -// CPU extended instruction set support. -// -// source origin: -// https://msdn.microsoft.com/en-us/library/hskdteyh.aspx -// https://gcc.gnu.org/git/?p=gcc.git;a=blob_plain;f=gcc/config/i386/driver-i386.c - - -#include -#include -#include -#include -#include -#include -#ifdef WIN32 -#include -#else -#include -#include -#endif - -class InstructionSet -{ - // forward declarations - class InstructionSet_Internal; - -public: - // getters - static std::string Vendor(void) { return CPU_Rep.vendor_; } - static std::string Brand(void) { return CPU_Rep.brand_; } - - static bool SSE3(void) { return CPU_Rep.f_1_ECX_[0]; } - static bool PCLMULQDQ(void) { return CPU_Rep.f_1_ECX_[1]; } - static bool MONITOR(void) { return CPU_Rep.f_1_ECX_[3]; } - static bool SSSE3(void) { return CPU_Rep.f_1_ECX_[9]; } - static bool FMA(void) { return CPU_Rep.f_1_ECX_[12]; } - static bool CMPXCHG16B(void) { return CPU_Rep.f_1_ECX_[13]; } - static bool SSE41(void) { return CPU_Rep.f_1_ECX_[19]; } - static bool SSE42(void) { return CPU_Rep.f_1_ECX_[20]; } - static bool MOVBE(void) { return CPU_Rep.f_1_ECX_[22]; } - static bool POPCNT(void) { return CPU_Rep.f_1_ECX_[23]; } - static bool AES(void) { return CPU_Rep.f_1_ECX_[25]; } - static bool XSAVE(void) { return CPU_Rep.f_1_ECX_[26]; } - static bool OSXSAVE(void) { return CPU_Rep.f_1_ECX_[27]; } - static bool AVX(void) { return CPU_Rep.f_1_ECX_[28]; } - static bool F16C(void) { return CPU_Rep.f_1_ECX_[29]; } - static bool RDRAND(void) { return CPU_Rep.f_1_ECX_[30]; } - - static bool MSR(void) { return CPU_Rep.f_1_EDX_[5]; } - static bool CX8(void) { return CPU_Rep.f_1_EDX_[8]; } - static bool SEP(void) { return CPU_Rep.f_1_EDX_[11]; } - static bool CMOV(void) { return CPU_Rep.f_1_EDX_[15]; } - static bool CLFSH(void) { return CPU_Rep.f_1_EDX_[19]; } - static bool MMX(void) { return CPU_Rep.f_1_EDX_[23]; } - static bool FXSR(void) { return CPU_Rep.f_1_EDX_[24]; } - static bool SSE(void) { return CPU_Rep.f_1_EDX_[25]; } - static bool SSE2(void) { return CPU_Rep.f_1_EDX_[26]; } - - static bool FSGSBASE(void) { return CPU_Rep.f_7_EBX_[0]; } - static bool BMI1(void) { return CPU_Rep.f_7_EBX_[3]; } - static bool HLE(void) { return CPU_Rep.isIntel_ && CPU_Rep.f_7_EBX_[4]; } - static bool AVX2(void) { return CPU_Rep.f_7_EBX_[5]; } - static bool BMI2(void) { return CPU_Rep.f_7_EBX_[8]; } - static bool ERMS(void) { return CPU_Rep.f_7_EBX_[9]; } - static bool INVPCID(void) { return CPU_Rep.f_7_EBX_[10]; } - static bool RTM(void) { return CPU_Rep.isIntel_ && CPU_Rep.f_7_EBX_[11]; } - static bool AVX512F(void) { return CPU_Rep.f_7_EBX_[16]; } - static bool RDSEED(void) { return CPU_Rep.f_7_EBX_[18]; } - static bool ADX(void) { return CPU_Rep.f_7_EBX_[19]; } - static bool AVX512PF(void) { return CPU_Rep.f_7_EBX_[26]; } - static bool AVX512ER(void) { return CPU_Rep.f_7_EBX_[27]; } - static bool AVX512CD(void) { return CPU_Rep.f_7_EBX_[28]; } - static bool SHA(void) { return CPU_Rep.f_7_EBX_[29]; } - - static bool PREFETCHWT1(void) { return CPU_Rep.f_7_ECX_[0]; } - - static bool LAHF(void) { return CPU_Rep.f_81_ECX_[0]; } - static bool LZCNT(void) { return CPU_Rep.isIntel_ && CPU_Rep.f_81_ECX_[5]; } - static bool ABM(void) { return CPU_Rep.isAMD_ && CPU_Rep.f_81_ECX_[5]; } - static bool SSE4a(void) { return CPU_Rep.isAMD_ && CPU_Rep.f_81_ECX_[6]; } - static bool XOP(void) { return CPU_Rep.isAMD_ && CPU_Rep.f_81_ECX_[11]; } - static bool TBM(void) { return CPU_Rep.isAMD_ && CPU_Rep.f_81_ECX_[21]; } - - static bool SYSCALL(void) { return CPU_Rep.isIntel_ && CPU_Rep.f_81_EDX_[11]; } - static bool MMXEXT(void) { return CPU_Rep.isAMD_ && CPU_Rep.f_81_EDX_[22]; } - static bool RDTSCP(void) { return CPU_Rep.isIntel_ && CPU_Rep.f_81_EDX_[27]; } - static bool _3DNOWEXT(void) { return CPU_Rep.isAMD_ && CPU_Rep.f_81_EDX_[30]; } - static bool _3DNOW(void) { return CPU_Rep.isAMD_ && CPU_Rep.f_81_EDX_[31]; } - -private: - static const InstructionSet_Internal CPU_Rep; - - class InstructionSet_Internal - { - public: - InstructionSet_Internal() - : nIds_{ 0 }, - nExIds_{ 0 }, - isIntel_{ false }, - isAMD_{ false }, - f_1_ECX_{ 0 }, - f_1_EDX_{ 0 }, - f_7_EBX_{ 0 }, - f_7_ECX_{ 0 }, - f_81_ECX_{ 0 }, - f_81_EDX_{ 0 }, - data_{}, - extdata_{} - { -#ifdef WIN32 - std::array cpui; -#else - std::array cpui; -#endif - - // Calling __cpuid with 0x0 as the function_id argument - // gets the number of the highest valid function ID. -#ifdef WIN32 - __cpuid(cpui.data(), 0); -#else - cpui[0] = __get_cpuid_max(0, &cpui[1]); -#endif - nIds_ = cpui[0]; - - for (int i = 0; i <= nIds_; ++i) - { -#ifdef WIN32 - __cpuidex(cpui.data(), i, 0); -#else - __cpuid_count(i, 0, cpui[0], cpui[1], cpui[2], cpui[3]); -#endif - - data_.push_back(cpui); - } - - // Capture vendor string - char vendor[0x20]; - memset(vendor, 0, sizeof(vendor)); - *reinterpret_cast(vendor + 0) = data_[0][1]; - *reinterpret_cast(vendor + 4) = data_[0][3]; - *reinterpret_cast(vendor + 8) = data_[0][2]; - vendor_ = vendor; - if (vendor_ == \"GenuineIntel\") - { - isIntel_ = true; - } - else if (vendor_ == \"AuthenticAMD\") - { - isAMD_ = true; - } - - // load bitset with flags for function 0x00000001 - if (nIds_ >= 1) - { - f_1_ECX_ = data_[1][2]; - f_1_EDX_ = data_[1][3]; - } - - // load bitset with flags for function 0x00000007 - if (nIds_ >= 7) - { - f_7_EBX_ = data_[7][1]; - f_7_ECX_ = data_[7][2]; - } - - // Calling __cpuid with 0x80000000 as the function_id argument - // gets the number of the highest valid extended ID. -#ifdef WIN32 - __cpuid(cpui.data(), 0x80000000); -#else - __cpuid(0x80000000, cpui[0], cpui[1], cpui[2], cpui[3]); -#endif - nExIds_ = cpui[0]; - - char brand[0x40]; - memset(brand, 0, sizeof(brand)); - - for (int i = 0x80000000; i <= nExIds_; ++i) - { -#ifdef WIN32 - __cpuidex(cpui.data(), i, 0); -#else - __cpuid_count(i, 0, cpui[0], cpui[1], cpui[2], cpui[3]); -#endif - extdata_.push_back(cpui); - } - - // load bitset with flags for function 0x80000001 - if (nExIds_ >= 0x80000001) - { - f_81_ECX_ = extdata_[1][2]; - f_81_EDX_ = extdata_[1][3]; - } - - // Interpret CPU brand string if reported - if (nExIds_ >= 0x80000004) - { - memcpy(brand + 0, extdata_[2].data(), sizeof(cpui)); - memcpy(brand + 16, extdata_[3].data(), sizeof(cpui)); - memcpy(brand + 32, extdata_[4].data(), sizeof(cpui)); - brand_ = brand; - } - }; - - int nIds_; - int nExIds_; - std::string vendor_; - std::string brand_; - bool isIntel_; - bool isAMD_; - std::bitset<32> f_1_ECX_; - std::bitset<32> f_1_EDX_; - std::bitset<32> f_7_EBX_; - std::bitset<32> f_7_ECX_; - std::bitset<32> f_81_ECX_; - std::bitset<32> f_81_EDX_; -#ifdef WIN32 - std::vector> data_; - std::vector> extdata_; -#else - std::vector> data_; - std::vector> extdata_; -#endif - }; -}; - -// Initialize static member data -const InstructionSet::InstructionSet_Internal InstructionSet::CPU_Rep; - -// Print out supported instruction set extensions -int main() -{ - std::ofstream fo(\"cpuid.txt\"); - auto& outstream = fo;//std::cout; - - auto support_message = [&outstream](std::string isa_feature, bool is_supported) { - outstream << isa_feature << (is_supported ? \" supported\" : \" not supported\") << std::endl; - }; - - std::cout << InstructionSet::Vendor() << std::endl; - std::cout << InstructionSet::Brand() << std::endl; - - support_message(\"3DNOW\", InstructionSet::_3DNOW()); - support_message(\"3DNOWEXT\", InstructionSet::_3DNOWEXT()); - support_message(\"ABM\", InstructionSet::ABM()); - support_message(\"ADX\", InstructionSet::ADX()); - support_message(\"AES\", InstructionSet::AES()); - support_message(\"AVX\", InstructionSet::AVX()); - support_message(\"AVX2\", InstructionSet::AVX2()); - support_message(\"AVX512CD\", InstructionSet::AVX512CD()); - support_message(\"AVX512F\", InstructionSet::AVX512F()); - support_message(\"AVX512ER\", InstructionSet::AVX512ER()); - support_message(\"AVX512PF\", InstructionSet::AVX512PF()); - support_message(\"BMI1\", InstructionSet::BMI1()); - support_message(\"BMI2\", InstructionSet::BMI2()); - support_message(\"CLFSH\", InstructionSet::CLFSH()); - support_message(\"CMPXCHG16B\", InstructionSet::CMPXCHG16B()); - support_message(\"CX8\", InstructionSet::CX8()); - support_message(\"ERMS\", InstructionSet::ERMS()); - support_message(\"F16C\", InstructionSet::F16C()); - support_message(\"FMA\", InstructionSet::FMA()); - support_message(\"FSGSBASE\", InstructionSet::FSGSBASE()); - support_message(\"FXSR\", InstructionSet::FXSR()); - support_message(\"HLE\", InstructionSet::HLE()); - support_message(\"INVPCID\", InstructionSet::INVPCID()); - support_message(\"LAHF\", InstructionSet::LAHF()); - support_message(\"LZCNT\", InstructionSet::LZCNT()); - support_message(\"MMX\", InstructionSet::MMX()); - support_message(\"MMXEXT\", InstructionSet::MMXEXT()); - support_message(\"MONITOR\", InstructionSet::MONITOR()); - support_message(\"MOVBE\", InstructionSet::MOVBE()); - support_message(\"MSR\", InstructionSet::MSR()); - support_message(\"OSXSAVE\", InstructionSet::OSXSAVE()); - support_message(\"PCLMULQDQ\", InstructionSet::PCLMULQDQ()); - support_message(\"POPCNT\", InstructionSet::POPCNT()); - support_message(\"PREFETCHWT1\", InstructionSet::PREFETCHWT1()); - support_message(\"RDRAND\", InstructionSet::RDRAND()); - support_message(\"RDSEED\", InstructionSet::RDSEED()); - support_message(\"RDTSCP\", InstructionSet::RDTSCP()); - support_message(\"RTM\", InstructionSet::RTM()); - support_message(\"SEP\", InstructionSet::SEP()); - support_message(\"SHA\", InstructionSet::SHA()); - support_message(\"SSE\", InstructionSet::SSE()); - support_message(\"SSE2\", InstructionSet::SSE2()); - support_message(\"SSE3\", InstructionSet::SSE3()); - support_message(\"SSE4.1\", InstructionSet::SSE41()); - support_message(\"SSE4.2\", InstructionSet::SSE42()); - support_message(\"SSE4a\", InstructionSet::SSE4a()); - support_message(\"SSSE3\", InstructionSet::SSSE3()); - support_message(\"SYSCALL\", InstructionSet::SYSCALL()); - support_message(\"TBM\", InstructionSet::TBM()); - support_message(\"XOP\", InstructionSet::XOP()); - support_message(\"XSAVE\", InstructionSet::XSAVE()); - return 0; -} -" -HAVE_CPUID_INFO -) - -if(HAVE_CPUID_INFO) - set(_CPUID_INFO "${CMAKE_BINARY_DIR}/cpuid.txt") - set(HAVE_AVX512F FALSE) - set(HAVE_AVX2 FALSE) - set(HAVE_SSE42 FALSE) - - file(STRINGS ${_CPUID_INFO} _FEATURES) - - message(STATUS "Host CPU features:") - - foreach(FEATURE IN ITEMS ${_FEATURES}) - - message(STATUS " ${FEATURE}") - - string(COMPARE EQUAL "${FEATURE}" "AVX512F supported" _FEATURE_FOUND) - if(${_FEATURE_FOUND}) - if (NOT CMAKE_CXX_COMPILER_ID STREQUAL "GNU" OR CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 4.9) - set(HAVE_AVX512F ${_FEATURE_FOUND}) - else() - message(WARNING "Compiler doesn't support AVX512 instructuion set") - endif() - endif() - string(COMPARE EQUAL "${FEATURE}" "AVX2 supported" _FEATURE_FOUND) - if(${_FEATURE_FOUND}) - set(HAVE_AVX2 ${_FEATURE_FOUND}) - endif() - string(COMPARE EQUAL "${FEATURE}" "SSE4.2 supported" _FEATURE_FOUND) - if(${_FEATURE_FOUND}) - set(HAVE_SSE42 ${_FEATURE_FOUND}) - endif() - endforeach(FEATURE) - - unset(_FEATURE_FOUND) - unset(_CPUID_INFO) - unset(_FEATURES) -endif() diff --git a/dynamic_vino_lib/cmake/OptimizationFlags.cmake b/dynamic_vino_lib/cmake/OptimizationFlags.cmake deleted file mode 100644 index 8e87ea46..00000000 --- a/dynamic_vino_lib/cmake/OptimizationFlags.cmake +++ /dev/null @@ -1,180 +0,0 @@ -# -# service functions: -# set_target_cpu_flags -# set_target_vectorizer_report_flags -# print_target_compiler_options - - -# set processor speicif compilation options, based either on -# externally defined ENABLE_SSE42/ENABLE_AVX2 options or -# based on detected host processor featured (HAVE_SSE/HAVE_AVX2) -# Note, when ENABLE_AVX2 option is on by any means then ENABLE_SSE42 option -# will be turned on if not set explicitely - - -function(set_target_cpu_flags TARGET_NAME) - # if have cpuid info and not requested specific cpu features externally - # turn on cpu specific compile options based on detected features - # of host processor - # if don't have cpuid info or cpu specific features explicitly requested - # set compile options based on requested features - if(${HAVE_CPUID_INFO}) - # ENABLE_SSE42, ENABLE_AVX2, ENABLE_AVX512 weren't set explicitly, - # so derive it from host cpu features - if( (NOT DEFINED ENABLE_SSE42) AND (NOT DEFINED ENABLE_AVX2) AND (NOT DEFINED ENABLE_AVX512F) ) - set(ENABLE_SSE42 ${HAVE_SSE42}) - set(ENABLE_AVX2 ${HAVE_AVX2}) - set(ENABLE_AVX512F ${HAVE_AVX512F}) - endif() - # ENABLE_SSE42 was set explicitly, ENABLE_AVX2 and ENABLE_AVX512F were not defined. - # Consider as request to build for Atom, turn off AVX2 and AVX512 - if( (${ENABLE_SSE42}) AND (NOT DEFINED ENABLE_AVX2) AND (NOT DEFINED ENABLE_AVX512F) ) - set(ENABLE_AVX2 OFF) - set(ENABLE_AVX512F OFF) - endif() - # ENABLE_AVX2 was set explicitly, ENABLE_SSE42 and ENABLE_AVX512F were not defined - # Consider as request to build for Core, turn on SSE42 as supported feature - if( (NOT DEFINED ENABLE_SSE42) AND (${ENABLE_AVX2}) AND (NOT DEFINED ENABLE_AVX512F) ) - set(ENABLE_SSE42 ON) - set(ENABLE_AVX512F OFF) - endif() - # ENABLE_AVX512 was set explicitly, ENABLE_SSE42 and ENABLE_AVX2 were not defined - # Consider as request to build for Xeon (Skylake server and later), turn on SSE42 and AVX2 as supported feature - if( (NOT DEFINED ENABLE_SSE42) AND (NOT DEFINED ENABLE_AVX2) AND (${ENABLE_AVX512F}) ) - set(ENABLE_SSE42 ON) - set(ENABLE_AVX2 ON) - endif() - endif() - - if(WIN32) - if(${ENABLE_AVX512F}) - target_compile_definitions(${TARGET_NAME} PUBLIC "-DHAVE_SSE") - target_compile_definitions(${TARGET_NAME} PUBLIC "-DHAVE_AVX2") - target_compile_definitions(${TARGET_NAME} PUBLIC "-DHAVE_AVX512F") - if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel") - target_compile_options(${TARGET_NAME} PUBLIC "/QxCOMMON-AVX512") - target_compile_options(${TARGET_NAME} PUBLIC "/Qvc14") - endif() - if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") - message(WARNING "MSVC Compiler doesn't support AVX512 instructuion set") - endif() - elseif(${ENABLE_AVX2}) - target_compile_definitions(${TARGET_NAME} PUBLIC "-DHAVE_SSE") - target_compile_definitions(${TARGET_NAME} PUBLIC "-DHAVE_AVX2") - if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel") - target_compile_options(${TARGET_NAME} PUBLIC "/QxCORE-AVX2") - target_compile_options(${TARGET_NAME} PUBLIC "/Qvc14") - endif() - if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") - target_compile_options(${TARGET_NAME} PUBLIC "/arch:AVX2") - endif() - elseif(${ENABLE_SSE42}) - target_compile_definitions(${TARGET_NAME} PUBLIC "-DHAVE_SSE") - if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel") - target_compile_options(${TARGET_NAME} PUBLIC "/arch:SSE4.2") - target_compile_options(${TARGET_NAME} PUBLIC "/QxSSE4.2") - target_compile_options(${TARGET_NAME} PUBLIC "/Qvc14") - endif() - if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") - target_compile_options(${TARGET_NAME} PUBLIC "/arch:SSE4.2") - endif() - endif() - endif() - if(UNIX) - if(${ENABLE_AVX512F}) - target_compile_definitions(${TARGET_NAME} PUBLIC "-DHAVE_SSE") - target_compile_definitions(${TARGET_NAME} PUBLIC "-DHAVE_AVX2") - target_compile_definitions(${TARGET_NAME} PUBLIC "-DHAVE_AVX512F") - if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel") - target_compile_options(${TARGET_NAME} PUBLIC "-xCOMMON-AVX512") - endif() - if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - target_compile_options(${TARGET_NAME} PUBLIC "-mavx512f") - target_compile_options(${TARGET_NAME} PUBLIC "-mfma") - endif() - elseif(${ENABLE_AVX2}) - target_compile_definitions(${TARGET_NAME} PUBLIC "-DHAVE_SSE") - target_compile_definitions(${TARGET_NAME} PUBLIC "-DHAVE_AVX2") - if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel") - target_compile_options(${TARGET_NAME} PUBLIC "-march=core-avx2") - target_compile_options(${TARGET_NAME} PUBLIC "-xCORE-AVX2") - target_compile_options(${TARGET_NAME} PUBLIC "-mtune=core-avx2") - endif() - if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - target_compile_options(${TARGET_NAME} PUBLIC "-mavx2") - target_compile_options(${TARGET_NAME} PUBLIC "-mfma") - endif() - if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") - target_compile_options(${TARGET_NAME} PUBLIC "-mavx2") - target_compile_options(${TARGET_NAME} PUBLIC "-mfma") - endif() - elseif(${ENABLE_SSE42}) - target_compile_definitions(${TARGET_NAME} PUBLIC "-DHAVE_SSE") - if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel") - target_compile_options(${TARGET_NAME} PUBLIC "-msse4.2") - target_compile_options(${TARGET_NAME} PUBLIC "-xSSE4.2") - endif() - if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - target_compile_options(${TARGET_NAME} PUBLIC "-msse4.2") - endif() - endif() - endif() -endfunction() - - -# function set vectorization report flags in case of -# Intel compiler (might be useful for analisys of which loops were not -# vectorized and why) -function(set_target_vectorizer_report_flags TARGET_NAME) - if(WIN32) - if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel") - target_compile_options(${TARGET_NAME} PUBLIC "/Qopt-report=3") - target_compile_options(${TARGET_NAME} PUBLIC "/Qopt-report-format=vs") - target_compile_options(${TARGET_NAME} PUBLIC "/Qopt-report-per-object") - endif() - endif() - if(UNIX) - if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel") - target_compile_options(${TARGET_NAME} PUBLIC "-qopt-report=3") - target_compile_options(${TARGET_NAME} PUBLIC "-qopt-report-format=text") - target_compile_options(${TARGET_NAME} PUBLIC "-qopt-report-per-object") - endif() - endif() -endfunction() - - -# function print target compiler options to console -function(print_target_compiler_options TARGET_NAME) - - if(NOT TARGET ${TARGET_NAME}) - message("There is no target named '${TARGET_NAME}'") - return() - endif() - - message(STATUS "Target ${TARGET_NAME}") - message(STATUS " compiler definitions:") - get_target_property(TARGET_COMPILE_DEFINITIONS ${TARGET_NAME} COMPILE_DEFINITIONS) - if(TARGET_COMPILE_DEFINITIONS) - message(STATUS " ${TARGET_COMPILE_DEFINITIONS}") - else() - message(STATUS " not set") - endif() - message(STATUS " compiler options:") - get_target_property(TARGET_COMPILE_OPTIONS ${TARGET_NAME} COMPILE_OPTIONS) - if(TARGET_COMPILE_OPTIONS) - message(STATUS " ${TARGET_COMPILE_OPTIONS}") - else() - message(STATUS " not set") - endif() - message(STATUS " compiler flags:") - get_target_property(TARGET_COMPILE_FLAGS ${TARGET_NAME} COMPILE_FLAGS) - if(TARGET_COMPILE_FLAGS) - message(STATUS " ${TARGET_COMPILE_FLAGS}") - else() - message(STATUS " not set") - endif() - - message(STATUS " CXX_FLAGS:") - message(STATUS " ${CMAKE_CXX_FLAGS}") - -endfunction() diff --git a/dynamic_vino_lib/cmake/feature_defs.cmake b/dynamic_vino_lib/cmake/feature_defs.cmake deleted file mode 100644 index 52eb5130..00000000 --- a/dynamic_vino_lib/cmake/feature_defs.cmake +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (c) 2018 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -cmake_minimum_required (VERSION 2.8) - -include(CPUID) -include(OptimizationFlags) - -set(OpenCV_STATIC OFF) - -find_package(OpenCV 3.3 COMPONENTS core imgproc highgui imgcodecs) -if(NOT(OpenCV_FOUND)) - find_package(OpenCV 3.3 REQUIRED world) -endif() -set (BUILD_VALIDATION_APP OFF) -if (OpenCV_FOUND) - set (BUILD_VALIDATION_APP ON) -endif() - -macro(enable_omp) - if(UNIX) # Linux - add_definitions(-fopenmp) - find_library(intel_omp_lib iomp5 - PATHS ${InferenceEngine_INCLUDE_DIRS}/../external/mkltiny_lnx/lib - ) - elseif(WIN32) # Windows - if(${CMAKE_CXX_COMPILER_ID} STREQUAL MSVC) - set(OPENMP_FLAGS "/Qopenmp /openmp") - set(CMAKE_SHARED_LINKER_FLAGS " ${CMAKE_SHARED_LINKER_FLAGS} /nodefaultlib:vcomp") - elseif(${CMAKE_CXX_COMPILER_ID} STREQUAL Intel) - set(OPENMP_FLAGS "/Qopenmp /openmp") - else() - message("Unknown compiler ID. OpenMP support is disabled.") - endif() - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OPENMP_FLAGS}") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OPENMP_FLAGS}") - find_library(intel_omp_lib - libiomp5md - PATHS "${InferenceEngine_INCLUDE_DIRS}/../lib/intel64/${CMAKE_BUILD_TYPE}" - ) - endif() -endmacro(enable_omp) diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/args_helper.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/args_helper.hpp index b99cb995..9b1e00b8 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/args_helper.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/args_helper.hpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief a header file with common samples functionality @@ -37,29 +35,29 @@ /** * @brief This function check input args and find images in given folder */ -void readImagesArguments(std::vector& images, - const std::string& arg) { +void readImagesArguments(std::vector & images, const std::string & arg) +{ struct stat sb; if (stat(arg.c_str(), &sb) != 0) { - std::cout << "[ WARNING ] File " << arg << " cannot be opened!" - << std::endl; + std::cout << "[ WARNING ] File " << arg << " cannot be opened!" << std::endl; return; } if (S_ISDIR(sb.st_mode)) { - DIR* dp; + DIR * dp; dp = opendir(arg.c_str()); if (dp == nullptr) { - std::cout << "[ WARNING ] Directory " << arg << " cannot be opened!" - << std::endl; + std::cout << "[ WARNING ] Directory " << arg << " cannot be opened!" << std::endl; return; } - struct dirent* ep; + struct dirent * ep; while (nullptr != (ep = readdir(dp))) { std::string fileName = ep->d_name; - if (fileName == "." || fileName == "..") continue; - std::cout << "[ INFO ] Add file " << ep->d_name << " from directory " - << arg << "." << std::endl; + if (fileName == "." || fileName == "..") { + continue; + } + std::cout << "[ INFO ] Add file " << ep->d_name << " from directory " << arg << "." << + std::endl; images.push_back(arg + "/" + ep->d_name); } } else { @@ -71,7 +69,8 @@ void readImagesArguments(std::vector& images, * @brief This function find -i/--images key in input args * It's necessary to process multiple values for single key */ -void parseImagesArguments(std::vector& images) { +void parseImagesArguments(std::vector & images) +{ std::vector args = gflags::GetArgvs(); bool readArguments = false; for (size_t i = 0; i < args.size(); i++) { diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/common.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/common.hpp index 1e2d542e..ad6548cb 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/common.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/common.hpp @@ -1,26 +1,31 @@ -/* // Copyright (c) 2018 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -*/ -/** - * @brief a header file with common samples functionality - * @file common.hpp - */ +// +// @brief a header file with common samples functionality +// @file common.hpp +// -#pragma once +#ifndef DYNAMIC_VINO_LIB__COMMON_HPP_ +#define DYNAMIC_VINO_LIB__COMMON_HPP_ +#include +#include +#include +#include +#include +#include #include #include #include @@ -29,7 +34,6 @@ #include #include #include -#include #include #include #include @@ -38,18 +42,11 @@ #include #include -#include -#include -#include -#include -#include -#ifndef UNUSED - #ifdef WIN32 - #define UNUSED - #else - #define UNUSED __attribute__((unused)) - #endif +#ifdef WIN32 +#define UNUSED +#else +#define UNUSED __attribute__((unused)) #endif /** @@ -57,10 +54,14 @@ * @param s - string to trim * @return trimmed string */ -inline std::string &trim(std::string &s) { - s.erase(s.begin(), std::find_if(s.begin(), s.end(), std::not1(std::ptr_fun(std::isspace)))); - s.erase(std::find_if(s.rbegin(), s.rend(), std::not1(std::ptr_fun(std::isspace))).base(), s.end()); - return s; +inline std::string & trim(std::string & s) +{ + s.erase(s.begin(), + std::find_if(s.begin(), s.end(), std::not1(std::ptr_fun(std::isspace)))); + s.erase( + std::find_if(s.rbegin(), s.rend(), std::not1(std::ptr_fun(std::isspace))).base(), + s.end()); + return s; } /** @@ -69,8 +70,9 @@ inline std::string &trim(std::string &s) { * @return TargetDevice value that corresponds to input string. * eDefault in case no corresponding value was found */ -static InferenceEngine::TargetDevice getDeviceFromStr(const std::string &deviceName) { - return InferenceEngine::TargetDeviceInfo::fromStr(deviceName); +static InferenceEngine::TargetDevice getDeviceFromStr(const std::string & deviceName) +{ + return InferenceEngine::TargetDeviceInfo::fromStr(deviceName); } /** @@ -80,16 +82,18 @@ static InferenceEngine::TargetDevice getDeviceFromStr(const std::string &deviceN * @param device - device to infer on * @return Plugin pointer */ -static InferenceEngine::InferenceEnginePluginPtr selectPlugin(const std::vector &pluginDirs, - const std::string &plugin, - InferenceEngine::TargetDevice device) { - InferenceEngine::PluginDispatcher dispatcher(pluginDirs); - - if (!plugin.empty()) { - return dispatcher.getPluginByName(plugin); - } else { - return dispatcher.getSuitablePlugin(device); - } +static InferenceEngine::InferenceEnginePluginPtr +selectPlugin( + const std::vector & pluginDirs, const std::string & plugin, + InferenceEngine::TargetDevice device) +{ + InferenceEngine::PluginDispatcher dispatcher(pluginDirs); + + if (!plugin.empty()) { + return dispatcher.getPluginByName(plugin); + } else { + return dispatcher.getSuitablePlugin(device); + } } /** @@ -99,10 +103,12 @@ static InferenceEngine::InferenceEnginePluginPtr selectPlugin(const std::vector< * @param device - string representation of device to infer on * @return Plugin pointer */ -static UNUSED InferenceEngine::InferenceEnginePluginPtr selectPlugin(const std::vector &pluginDirs, - const std::string &plugin, - const std::string &device) { - return selectPlugin(pluginDirs, plugin, getDeviceFromStr(device)); +static UNUSED InferenceEngine::InferenceEnginePluginPtr +selectPlugin( + const std::vector & pluginDirs, const std::string & plugin, + const std::string & device) +{ + return selectPlugin(pluginDirs, plugin, getDeviceFromStr(device)); } /** @@ -110,10 +116,13 @@ static UNUSED InferenceEngine::InferenceEnginePluginPtr selectPlugin(const std:: * @param filepath - full file name * @return filename without extension */ -static UNUSED std::string fileNameNoExt(const std::string &filepath) { - auto pos = filepath.rfind('.'); - if (pos == std::string::npos) return filepath; - return filepath.substr(0, pos); +static UNUSED std::string fileNameNoExt(const std::string & filepath) +{ + auto pos = filepath.rfind('.'); + if (pos == std::string::npos) { + return filepath; + } + return filepath.substr(0, pos); } /** @@ -121,148 +130,170 @@ static UNUSED std::string fileNameNoExt(const std::string &filepath) { * @param filename - name of the file which extension should be extracted * @return string with extracted file extension */ -inline std::string fileExt(const std::string& filename) { - auto pos = filename.rfind('.'); - if (pos == std::string::npos) return ""; - return filename.substr(pos + 1); +inline std::string fileExt(const std::string & filename) +{ + auto pos = filename.rfind('.'); + if (pos == std::string::npos) { + return ""; + } + return filename.substr(pos + 1); } -static UNUSED std::ostream &operator<<(std::ostream &os, const InferenceEngine::Version *version) { - os << "\n\tAPI version ............ "; - if (nullptr == version) { - os << "UNKNOWN"; - } else { - os << version->apiVersion.major << "." << version->apiVersion.minor; - if (nullptr != version->buildNumber) { - os << "\n\t" << "Build .................. " << version->buildNumber; - } - if (nullptr != version->description) { - os << "\n\t" << "Description ....... " << version->description; - } +static UNUSED std::ostream & operator<<(std::ostream & os, const InferenceEngine::Version * version) +{ + os << "\n\tAPI version ............ "; + if (nullptr == version) { + os << "UNKNOWN"; + } else { + os << version->apiVersion.major << "." << version->apiVersion.minor; + if (nullptr != version->buildNumber) { + os << "\n\t" << + "Build .................. " << version->buildNumber; + } + if (nullptr != version->description) { + os << "\n\t" << + "Description ....... " << version->description; } - return os; + } + return os; } /** * @class PluginVersion * @brief A PluginVersion class stores plugin version and initialization status */ -struct PluginVersion : public InferenceEngine::Version { - bool initialized = false; - - explicit PluginVersion(const InferenceEngine::Version *ver) { - if (nullptr == ver) { - return; - } - InferenceEngine::Version::operator=(*ver); - initialized = true; - } - - operator bool() const noexcept { - return initialized; +struct PluginVersion : public InferenceEngine::Version +{ + bool initialized = false; + + explicit PluginVersion(const InferenceEngine::Version * ver) + { + if (nullptr == ver) { + return; } + InferenceEngine::Version::operator=(*ver); + initialized = true; + } + + operator bool() const noexcept + { + return initialized; + } }; -static UNUSED std::ostream &operator<<(std::ostream &os, const PluginVersion &version) { - os << "\tPlugin version ......... "; - if (!version) { - os << "UNKNOWN"; - } else { - os << version.apiVersion.major << "." << version.apiVersion.minor; - } - - os << "\n\tPlugin name ............ "; - if (!version || version.description == nullptr) { - os << "UNKNOWN"; - } else { - os << version.description; - } - - os << "\n\tPlugin build ........... "; - if (!version || version.buildNumber == nullptr) { - os << "UNKNOWN"; - } else { - os << version.buildNumber; - } - - return os; +static UNUSED std::ostream & operator<<(std::ostream & os, const PluginVersion & version) +{ + os << "\tPlugin version ......... "; + if (!version) { + os << "UNKNOWN"; + } else { + os << version.apiVersion.major << "." << version.apiVersion.minor; + } + + os << "\n\tPlugin name ............ "; + if (!version || version.description == nullptr) { + os << "UNKNOWN"; + } else { + os << version.description; + } + + os << "\n\tPlugin build ........... "; + if (!version || version.buildNumber == nullptr) { + os << "UNKNOWN"; + } else { + os << version.buildNumber; + } + + return os; } -inline void printPluginVersion(InferenceEngine::InferenceEnginePluginPtr ptr, std::ostream& stream) { - const PluginVersion *pluginVersion; - ptr->GetVersion((const InferenceEngine::Version*&)pluginVersion); - stream << pluginVersion << std::endl; +inline void printPluginVersion(InferenceEngine::InferenceEnginePluginPtr ptr, std::ostream & stream) +{ + const PluginVersion * pluginVersion; + ptr->GetVersion((const InferenceEngine::Version * &)pluginVersion); + stream << pluginVersion << std::endl; } -static UNUSED std::vector> blobToImageOutputArray(InferenceEngine::TBlob::Ptr output, - size_t *pWidth, size_t *pHeight, - size_t *pChannels) { - std::vector> outArray; - size_t W = output->dims().at(0); - size_t H = output->dims().at(1); - size_t C = output->dims().at(2); - - // Get classes - const float *outData = output->data(); - for (unsigned h = 0; h < H; h++) { - std::vector row; - for (unsigned w = 0; w < W; w++) { - float max_value = outData[h * W + w]; - size_t index = 0; - for (size_t c = 1; c < C; c++) { - size_t dataIndex = c * H * W + h * W + w; - if (outData[dataIndex] > max_value) { - index = c; - max_value = outData[dataIndex]; - } - } - row.push_back(index); +static UNUSED std::vector> blobToImageOutputArray( + InferenceEngine::TBlob::Ptr output, size_t * pWidth, size_t * pHeight, size_t * pChannels) +{ + std::vector> outArray; + size_t W = output->dims().at(0); + size_t H = output->dims().at(1); + size_t C = output->dims().at(2); + + // Get classes + const float * outData = output->data(); + for (unsigned h = 0; h < H; h++) { + std::vector row; + for (unsigned w = 0; w < W; w++) { + float max_value = outData[h * W + w]; + size_t index = 0; + for (size_t c = 1; c < C; c++) { + size_t dataIndex = c * H * W + h * W + w; + if (outData[dataIndex] > max_value) { + index = c; + max_value = outData[dataIndex]; } - outArray.push_back(row); + } + row.push_back(index); } - - if (pWidth != nullptr) *pWidth = W; - if (pHeight != nullptr) *pHeight = H; - if (pChannels != nullptr) *pChannels = C; - - return outArray; + outArray.push_back(row); + } + + if (pWidth != nullptr) { + *pWidth = W; + } + if (pHeight != nullptr) { + *pHeight = H; + } + if (pChannels != nullptr) { + *pChannels = C; + } + + return outArray; } /** * @class Color * @brief A Color class stores channels of a given color */ -class Color { +class Color +{ private: - unsigned char _r; - unsigned char _g; - unsigned char _b; + unsigned char _r; + unsigned char _g; + unsigned char _b; public: - /** - * A default constructor. - * @param r - value for red channel - * @param g - value for green channel - * @param b - value for blue channel - */ - Color(unsigned char r, - unsigned char g, - unsigned char b) : _r(r), _g(g), _b(b) {} - - inline unsigned char red() { - return _r; - } - - inline unsigned char blue() { - return _b; - } - - inline unsigned char green() { - return _g; - } + /** + * A default constructor. + * @param r - value for red channel + * @param g - value for green channel + * @param b - value for blue channel + */ + Color(unsigned char r, unsigned char g, unsigned char b) + : _r(r), _g(g), _b(b) + { + } + + inline unsigned char red() + { + return _r; + } + + inline unsigned char blue() + { + return _b; + } + + inline unsigned char green() + { + return _g; + } }; -// TODO : keep only one version of writeOutputBMP +// TODO(image_name) : keep only one version of writeOutputBMP /** * @brief Writes output data to image @@ -271,109 +302,98 @@ class Color { * @param classesNum - the number of classes * @return false if error else true */ -static UNUSED void writeOutputBmp(std::vector> data, size_t classesNum, std::ostream &outFile) { - unsigned int seed = (unsigned int) time(NULL); - // Known colors for training classes from Cityscape dataset - static std::vector colors = { - {128, 64, 128}, - {232, 35, 244}, - {70, 70, 70}, - {156, 102, 102}, - {153, 153, 190}, - {153, 153, 153}, - {30, 170, 250}, - {0, 220, 220}, - {35, 142, 107}, - {152, 251, 152}, - {180, 130, 70}, - {60, 20, 220}, - {0, 0, 255}, - {142, 0, 0}, - {70, 0, 0}, - {100, 60, 0}, - {90, 0, 0}, - {230, 0, 0}, - {32, 11, 119}, - {0, 74, 111}, - {81, 0, 81} - }; - - while (classesNum > colors.size()) { - static std::mt19937 rng(seed); - std::uniform_int_distribution dist(0, 255); - Color color(dist(rng), dist(rng), dist(rng)); - colors.push_back(color); - } - - unsigned char file[14] = { - 'B', 'M', // magic - 0, 0, 0, 0, // size in bytes - 0, 0, // app data - 0, 0, // app data - 40 + 14, 0, 0, 0 // start of data offset - }; - unsigned char info[40] = { - 40, 0, 0, 0, // info hd size - 0, 0, 0, 0, // width - 0, 0, 0, 0, // height - 1, 0, // number color planes - 24, 0, // bits per pixel - 0, 0, 0, 0, // compression is none - 0, 0, 0, 0, // image bits size - 0x13, 0x0B, 0, 0, // horz resolution in pixel / m - 0x13, 0x0B, 0, 0, // vert resolution (0x03C3 = 96 dpi, 0x0B13 = 72 dpi) - 0, 0, 0, 0, // #colors in palette - 0, 0, 0, 0, // #important colors - }; - - auto height = data.size(); - auto width = data.at(0).size(); - - if (height > (size_t) std::numeric_limits::max || width > (size_t) std::numeric_limits::max) { - THROW_IE_EXCEPTION << "File size is too big: " << height << " X " << width; - } - - int padSize = static_cast(4 - (width * 3) % 4) % 4; - int sizeData = static_cast(width * height * 3 + height * padSize); - int sizeAll = sizeData + sizeof(file) + sizeof(info); - - file[2] = (unsigned char) (sizeAll); - file[3] = (unsigned char) (sizeAll >> 8); - file[4] = (unsigned char) (sizeAll >> 16); - file[5] = (unsigned char) (sizeAll >> 24); - - info[4] = (unsigned char) (width); - info[5] = (unsigned char) (width >> 8); - info[6] = (unsigned char) (width >> 16); - info[7] = (unsigned char) (width >> 24); - - int32_t negativeHeight = -(int32_t) height; - info[8] = (unsigned char) (negativeHeight); - info[9] = (unsigned char) (negativeHeight >> 8); - info[10] = (unsigned char) (negativeHeight >> 16); - info[11] = (unsigned char) (negativeHeight >> 24); - - info[20] = (unsigned char) (sizeData); - info[21] = (unsigned char) (sizeData >> 8); - info[22] = (unsigned char) (sizeData >> 16); - info[23] = (unsigned char) (sizeData >> 24); - - outFile.write(reinterpret_cast(file), sizeof(file)); - outFile.write(reinterpret_cast(info), sizeof(info)); - - unsigned char pad[3] = {0, 0, 0}; - - for (size_t y = 0; y < height; y++) { - for (size_t x = 0; x < width; x++) { - unsigned char pixel[3]; - size_t index = data.at(y).at(x); - pixel[0] = colors.at(index).red(); - pixel[1] = colors.at(index).green(); - pixel[2] = colors.at(index).blue(); - outFile.write(reinterpret_cast(pixel), 3); - } - outFile.write(reinterpret_cast(pad), padSize); +static UNUSED void writeOutputBmp( + std::vector> data, size_t classesNum, + std::ostream & outFile) +{ + unsigned int seed = (unsigned int)time(NULL); + // Known colors for training classes from Cityscape dataset + static std::vector colors = { {128, 64, 128}, {232, 35, 244}, {70, 70, 70}, + {156, 102, 102}, {153, 153, 190}, {153, 153, 153}, + {30, 170, 250}, {0, 220, 220}, {35, 142, 107}, + {152, 251, 152}, {180, 130, 70}, {60, 20, 220}, + {0, 0, 255}, {142, 0, 0}, {70, 0, 0}, + {100, 60, 0}, {90, 0, 0}, {230, 0, 0}, + {32, 11, 119}, {0, 74, 111}, {81, 0, 81}}; + + while (classesNum > colors.size()) { + static std::mt19937 rng(seed); + std::uniform_int_distribution dist(0, 255); + Color color(dist(rng), dist(rng), dist(rng)); + colors.push_back(color); + } + + unsigned char file[14] = { + 'B', 'M', // magic + 0, 0, 0, 0, // size in bytes + 0, 0, // app data + 0, 0, // app data + 40 + 14, 0, 0, 0 // start of data offset + }; + unsigned char info[40] = { + 40, 0, 0, 0, // info hd size + 0, 0, 0, 0, // width + 0, 0, 0, 0, // height + 1, 0, // number color planes + 24, 0, // bits per pixel + 0, 0, 0, 0, // compression is none + 0, 0, 0, 0, // image bits size + 0x13, 0x0B, 0, 0, // horz resolution in pixel / m + 0x13, 0x0B, 0, 0, // vert resolution (0x03C3 = 96 dpi, 0x0B13 = 72 dpi) + 0, 0, 0, 0, // #colors in palette + 0, 0, 0, 0, // #important colors + }; + + auto height = data.size(); + auto width = data.at(0).size(); + + if (height > (size_t)std::numeric_limits::max || + width > (size_t)std::numeric_limits::max) + { + THROW_IE_EXCEPTION << "File size is too big: " << height << " X " << width; + } + + int padSize = static_cast(4 - (width * 3) % 4) % 4; + int sizeData = static_cast(width * height * 3 + height * padSize); + int sizeAll = sizeData + sizeof(file) + sizeof(info); + + file[2] = (unsigned char)(sizeAll); + file[3] = (unsigned char)(sizeAll >> 8); + file[4] = (unsigned char)(sizeAll >> 16); + file[5] = (unsigned char)(sizeAll >> 24); + + info[4] = (unsigned char)(width); + info[5] = (unsigned char)(width >> 8); + info[6] = (unsigned char)(width >> 16); + info[7] = (unsigned char)(width >> 24); + + int32_t negativeHeight = -(int32_t)height; + info[8] = (unsigned char)(negativeHeight); + info[9] = (unsigned char)(negativeHeight >> 8); + info[10] = (unsigned char)(negativeHeight >> 16); + info[11] = (unsigned char)(negativeHeight >> 24); + + info[20] = (unsigned char)(sizeData); + info[21] = (unsigned char)(sizeData >> 8); + info[22] = (unsigned char)(sizeData >> 16); + info[23] = (unsigned char)(sizeData >> 24); + + outFile.write(reinterpret_cast(file), sizeof(file)); + outFile.write(reinterpret_cast(info), sizeof(info)); + + unsigned char pad[3] = {0, 0, 0}; + + for (size_t y = 0; y < height; y++) { + for (size_t x = 0; x < width; x++) { + unsigned char pixel[3]; + size_t index = data.at(y).at(x); + pixel[0] = colors.at(index).red(); + pixel[1] = colors.at(index).green(); + pixel[2] = colors.at(index).blue(); + outFile.write(reinterpret_cast(pixel), 3); } + outFile.write(reinterpret_cast(pad), padSize); + } } /** @@ -384,83 +404,87 @@ static UNUSED void writeOutputBmp(std::vector> data, size_t * @param width - width of the target image * @return false if error else true */ -static UNUSED bool writeOutputBmp(std::string name, unsigned char *data, size_t height, size_t width) { - std::ofstream outFile; - outFile.open(name, std::ofstream::binary); - if (!outFile.is_open()) { - return false; - } - - unsigned char file[14] = { - 'B', 'M', // magic - 0, 0, 0, 0, // size in bytes - 0, 0, // app data - 0, 0, // app data - 40 + 14, 0, 0, 0 // start of data offset - }; - unsigned char info[40] = { - 40, 0, 0, 0, // info hd size - 0, 0, 0, 0, // width - 0, 0, 0, 0, // height - 1, 0, // number color planes - 24, 0, // bits per pixel - 0, 0, 0, 0, // compression is none - 0, 0, 0, 0, // image bits size - 0x13, 0x0B, 0, 0, // horz resolution in pixel / m - 0x13, 0x0B, 0, 0, // vert resolution (0x03C3 = 96 dpi, 0x0B13 = 72 dpi) - 0, 0, 0, 0, // #colors in palette - 0, 0, 0, 0, // #important colors - }; - - if (height > (size_t)std::numeric_limits::max || width > (size_t)std::numeric_limits::max) { - THROW_IE_EXCEPTION << "File size is too big: " << height << " X " << width; +static UNUSED bool writeOutputBmp( + std::string name, unsigned char * data, size_t height, + size_t width) +{ + std::ofstream outFile; + outFile.open(name, std::ofstream::binary); + if (!outFile.is_open()) { + return false; + } + + unsigned char file[14] = { + 'B', 'M', // magic + 0, 0, 0, 0, // size in bytes + 0, 0, // app data + 0, 0, // app data + 40 + 14, 0, 0, 0 // start of data offset + }; + unsigned char info[40] = { + 40, 0, 0, 0, // info hd size + 0, 0, 0, 0, // width + 0, 0, 0, 0, // height + 1, 0, // number color planes + 24, 0, // bits per pixel + 0, 0, 0, 0, // compression is none + 0, 0, 0, 0, // image bits size + 0x13, 0x0B, 0, 0, // horz resolution in pixel / m + 0x13, 0x0B, 0, 0, // vert resolution (0x03C3 = 96 dpi, 0x0B13 = 72 dpi) + 0, 0, 0, 0, // #colors in palette + 0, 0, 0, 0, // #important colors + }; + + if (height > (size_t)std::numeric_limits::max || + width > (size_t)std::numeric_limits::max) + { + THROW_IE_EXCEPTION << "File size is too big: " << height << " X " << width; + } + + int padSize = static_cast(4 - (width * 3) % 4) % 4; + int sizeData = static_cast(width * height * 3 + height * padSize); + int sizeAll = sizeData + sizeof(file) + sizeof(info); + + file[2] = (unsigned char)(sizeAll); + file[3] = (unsigned char)(sizeAll >> 8); + file[4] = (unsigned char)(sizeAll >> 16); + file[5] = (unsigned char)(sizeAll >> 24); + + info[4] = (unsigned char)(width); + info[5] = (unsigned char)(width >> 8); + info[6] = (unsigned char)(width >> 16); + info[7] = (unsigned char)(width >> 24); + + int32_t negativeHeight = -(int32_t)height; + info[8] = (unsigned char)(negativeHeight); + info[9] = (unsigned char)(negativeHeight >> 8); + info[10] = (unsigned char)(negativeHeight >> 16); + info[11] = (unsigned char)(negativeHeight >> 24); + + info[20] = (unsigned char)(sizeData); + info[21] = (unsigned char)(sizeData >> 8); + info[22] = (unsigned char)(sizeData >> 16); + info[23] = (unsigned char)(sizeData >> 24); + + outFile.write(reinterpret_cast(file), sizeof(file)); + outFile.write(reinterpret_cast(info), sizeof(info)); + + unsigned char pad[3] = {0, 0, 0}; + + for (size_t y = 0; y < height; y++) { + for (size_t x = 0; x < width; x++) { + unsigned char pixel[3]; + pixel[0] = data[y * width * 3 + x * 3]; + pixel[1] = data[y * width * 3 + x * 3 + 1]; + pixel[2] = data[y * width * 3 + x * 3 + 2]; + + outFile.write(reinterpret_cast(pixel), 3); } - - int padSize = static_cast(4 - (width * 3) % 4) % 4; - int sizeData = static_cast(width * height * 3 + height * padSize); - int sizeAll = sizeData + sizeof(file) + sizeof(info); - - file[2] = (unsigned char)(sizeAll); - file[3] = (unsigned char)(sizeAll >> 8); - file[4] = (unsigned char)(sizeAll >> 16); - file[5] = (unsigned char)(sizeAll >> 24); - - info[4] = (unsigned char)(width); - info[5] = (unsigned char)(width >> 8); - info[6] = (unsigned char)(width >> 16); - info[7] = (unsigned char)(width >> 24); - - int32_t negativeHeight = -(int32_t)height; - info[8] = (unsigned char)(negativeHeight); - info[9] = (unsigned char)(negativeHeight >> 8); - info[10] = (unsigned char)(negativeHeight >> 16); - info[11] = (unsigned char)(negativeHeight >> 24); - - info[20] = (unsigned char)(sizeData); - info[21] = (unsigned char)(sizeData >> 8); - info[22] = (unsigned char)(sizeData >> 16); - info[23] = (unsigned char)(sizeData >> 24); - - outFile.write(reinterpret_cast(file), sizeof(file)); - outFile.write(reinterpret_cast(info), sizeof(info)); - - unsigned char pad[3] = { 0, 0, 0 }; - - for (size_t y = 0; y < height; y++) { - for (size_t x = 0; x < width; x++) { - unsigned char pixel[3]; - pixel[0] = data[y * width * 3 + x * 3]; - pixel[1] = data[y * width * 3 + x * 3 + 1]; - pixel[2] = data[y * width * 3 + x * 3 + 2]; - - outFile.write(reinterpret_cast(pixel), 3); - } - outFile.write(reinterpret_cast(pad), padSize); - } - return true; + outFile.write(reinterpret_cast(pad), padSize); + } + return true; } - /** * @brief Adds colored rectangles to the image * @param data - data where rectangles are put @@ -469,77 +493,80 @@ static UNUSED bool writeOutputBmp(std::string name, unsigned char *data, size_t * @param rectangles - vector points for the rectangle, should be 4x compared to num classes * @param classes - vector of classes */ -static UNUSED void addRectangles(unsigned char *data, size_t height, size_t width, std::vector rectangles, std::vector classes) { - std::vector colors = { - { 128, 64, 128 }, - { 232, 35, 244 }, - { 70, 70, 70 }, - { 156, 102, 102 }, - { 153, 153, 190 }, - { 153, 153, 153 }, - { 30, 170, 250 }, - { 0, 220, 220 }, - { 35, 142, 107 }, - { 152, 251, 152 }, - { 180, 130, 70 }, - { 60, 20, 220 }, - { 0, 0, 255 }, - { 142, 0, 0 }, - { 70, 0, 0 }, - { 100, 60, 0 }, - { 90, 0, 0 }, - { 230, 0, 0 }, - { 32, 11, 119 }, - { 0, 74, 111 }, - { 81, 0, 81 } - }; - if (rectangles.size() % 4 != 0 || rectangles.size() / 4 != classes.size()) { - return; +static UNUSED void addRectangles( + unsigned char * data, size_t height, size_t width, + std::vector rectangles, std::vector classes) +{ + std::vector colors = { {128, 64, 128}, {232, 35, 244}, {70, 70, 70}, + {156, 102, 102}, {153, 153, 190}, {153, 153, 153}, + {30, 170, 250}, {0, 220, 220}, {35, 142, 107}, + {152, 251, 152}, {180, 130, 70}, {60, 20, 220}, + {0, 0, 255}, {142, 0, 0}, {70, 0, 0}, + {100, 60, 0}, {90, 0, 0}, {230, 0, 0}, + {32, 11, 119}, {0, 74, 111}, {81, 0, 81}}; + if (rectangles.size() % 4 != 0 || rectangles.size() / 4 != classes.size()) { + return; + } + + for (size_t i = 0; i < classes.size(); i++) { + int x = rectangles.at(i * 4); + int y = rectangles.at(i * 4 + 1); + int w = rectangles.at(i * 4 + 2); + int h = rectangles.at(i * 4 + 3); + + if (x < 0) { + x = 0; + } + if (y < 0) { + y = 0; + } + if (w < 0) { + w = 0; + } + if (h < 0) { + h = 0; } - for (size_t i = 0; i < classes.size(); i++) { - int x = rectangles.at(i * 4); - int y = rectangles.at(i * 4 + 1); - int w = rectangles.at(i * 4 + 2); - int h = rectangles.at(i * 4 + 3); - - if (x < 0) x = 0; - if (y < 0) y = 0; - if (w < 0) w = 0; - if (h < 0) h = 0; - - if (x >= width) { x = width - 1; w = 0; } - if (y >= height) { y = height - 1; h = 0; } - - if (x + w >= width) { w = width - x - 1; } - if (y + h >= height) { h = height - y - 1; } - - size_t shift_first = y*width * 3; - size_t shift_second = (y + h)*width * 3; - int cls = classes.at(i) % colors.size(); - for (int i = x; i < x + w; i++) { - data[shift_first + i * 3] = colors.at(cls).red(); - data[shift_first + i * 3 + 1] = colors.at(cls).green(); - data[shift_first + i * 3 + 2] = colors.at(cls).blue(); - data[shift_second + i * 3] = colors.at(cls).red(); - data[shift_second + i * 3 + 1] = colors.at(cls).green(); - data[shift_second + i * 3 + 2] = colors.at(cls).blue(); - } + if (x >= width) { + x = width - 1; + w = 0; + } + if (y >= height) { + y = height - 1; + h = 0; + } - shift_first = x * 3; - shift_second = (x + w) * 3; - for (int i = y; i < y + h; i++) { - data[shift_first + i*width * 3] = colors.at(cls).red(); - data[shift_first + i*width * 3 + 1] = colors.at(cls).green(); - data[shift_first + i*width * 3 + 2] = colors.at(cls).blue(); - data[shift_second + i*width * 3] = colors.at(cls).red(); - data[shift_second + i*width * 3 + 1] = colors.at(cls).green(); - data[shift_second + i*width * 3 + 2] = colors.at(cls).blue(); - } + if (x + w >= width) { + w = width - x - 1; + } + if (y + h >= height) { + h = height - y - 1; } -} + size_t shift_first = y * width * 3; + size_t shift_second = (y + h) * width * 3; + int cls = classes.at(i) % colors.size(); + for (int i = x; i < x + w; i++) { + data[shift_first + i * 3] = colors.at(cls).red(); + data[shift_first + i * 3 + 1] = colors.at(cls).green(); + data[shift_first + i * 3 + 2] = colors.at(cls).blue(); + data[shift_second + i * 3] = colors.at(cls).red(); + data[shift_second + i * 3 + 1] = colors.at(cls).green(); + data[shift_second + i * 3 + 2] = colors.at(cls).blue(); + } + shift_first = x * 3; + shift_second = (x + w) * 3; + for (int i = y; i < y + h; i++) { + data[shift_first + i * width * 3] = colors.at(cls).red(); + data[shift_first + i * width * 3 + 1] = colors.at(cls).green(); + data[shift_first + i * width * 3 + 2] = colors.at(cls).blue(); + data[shift_second + i * width * 3] = colors.at(cls).red(); + data[shift_second + i * width * 3 + 1] = colors.at(cls).green(); + data[shift_second + i * width * 3 + 2] = colors.at(cls).blue(); + } + } +} /** * Write output data to image @@ -549,428 +576,488 @@ static UNUSED void addRectangles(unsigned char *data, size_t height, size_t widt * \return false if error else true */ -static UNUSED bool writeOutputBmp(unsigned char *data, size_t height, size_t width, std::ostream &outFile) { - unsigned char file[14] = { - 'B', 'M', // magic - 0, 0, 0, 0, // size in bytes - 0, 0, // app data - 0, 0, // app data - 40+14, 0, 0, 0 // start of data offset - }; - unsigned char info[40] = { - 40, 0, 0, 0, // info hd size - 0, 0, 0, 0, // width - 0, 0, 0, 0, // height - 1, 0, // number color planes - 24, 0, // bits per pixel - 0, 0, 0, 0, // compression is none - 0, 0, 0, 0, // image bits size - 0x13, 0x0B, 0, 0, // horz resolution in pixel / m - 0x13, 0x0B, 0, 0, // vert resolution (0x03C3 = 96 dpi, 0x0B13 = 72 dpi) - 0, 0, 0, 0, // #colors in palette - 0, 0, 0, 0, // #important colors - }; - - if (height > (size_t)std::numeric_limits::max || width > (size_t)std::numeric_limits::max) { - THROW_IE_EXCEPTION << "File size is too big: " << height << " X " << width; +static UNUSED bool writeOutputBmp( + unsigned char * data, size_t height, size_t width, + std::ostream & outFile) +{ + unsigned char file[14] = { + 'B', 'M', // magic + 0, 0, 0, 0, // size in bytes + 0, 0, // app data + 0, 0, // app data + 40 + 14, 0, 0, 0 // start of data offset + }; + unsigned char info[40] = { + 40, 0, 0, 0, // info hd size + 0, 0, 0, 0, // width + 0, 0, 0, 0, // height + 1, 0, // number color planes + 24, 0, // bits per pixel + 0, 0, 0, 0, // compression is none + 0, 0, 0, 0, // image bits size + 0x13, 0x0B, 0, 0, // horz resolution in pixel / m + 0x13, 0x0B, 0, 0, // vert resolution (0x03C3 = 96 dpi, 0x0B13 = 72 dpi) + 0, 0, 0, 0, // #colors in palette + 0, 0, 0, 0, // #important colors + }; + + if (height > (size_t)std::numeric_limits::max || + width > (size_t)std::numeric_limits::max) + { + THROW_IE_EXCEPTION << "File size is too big: " << height << " X " << width; + } + + int padSize = static_cast(4 - (width * 3) % 4) % 4; + int sizeData = static_cast(width * height * 3 + height * padSize); + int sizeAll = sizeData + sizeof(file) + sizeof(info); + + file[2] = (unsigned char)(sizeAll); + file[3] = (unsigned char)(sizeAll >> 8); + file[4] = (unsigned char)(sizeAll >> 16); + file[5] = (unsigned char)(sizeAll >> 24); + + info[4] = (unsigned char)(width); + info[5] = (unsigned char)(width >> 8); + info[6] = (unsigned char)(width >> 16); + info[7] = (unsigned char)(width >> 24); + + int32_t negativeHeight = -(int32_t)height; + info[8] = (unsigned char)(negativeHeight); + info[9] = (unsigned char)(negativeHeight >> 8); + info[10] = (unsigned char)(negativeHeight >> 16); + info[11] = (unsigned char)(negativeHeight >> 24); + + info[20] = (unsigned char)(sizeData); + info[21] = (unsigned char)(sizeData >> 8); + info[22] = (unsigned char)(sizeData >> 16); + info[23] = (unsigned char)(sizeData >> 24); + + outFile.write(reinterpret_cast(file), sizeof(file)); + outFile.write(reinterpret_cast(info), sizeof(info)); + + unsigned char pad[3] = {0, 0, 0}; + + for (size_t y = 0; y < height; y++) { + for (size_t x = 0; x < width; x++) { + unsigned char pixel[3]; + pixel[0] = data[y * width * 3 + x * 3]; + pixel[1] = data[y * width * 3 + x * 3 + 1]; + pixel[2] = data[y * width * 3 + x * 3 + 2]; + outFile.write(reinterpret_cast(pixel), 3); } + outFile.write(reinterpret_cast(pad), padSize); + } - int padSize = static_cast(4 - (width * 3) % 4) % 4; - int sizeData = static_cast(width * height * 3 + height * padSize); - int sizeAll = sizeData + sizeof(file) + sizeof(info); - - file[ 2] = (unsigned char)(sizeAll ); - file[ 3] = (unsigned char)(sizeAll >> 8); - file[ 4] = (unsigned char)(sizeAll >> 16); - file[ 5] = (unsigned char)(sizeAll >> 24); - - info[ 4] = (unsigned char)(width ); - info[ 5] = (unsigned char)(width >> 8); - info[ 6] = (unsigned char)(width >> 16); - info[ 7] = (unsigned char)(width >> 24); - - int32_t negativeHeight = -(int32_t)height; - info[ 8] = (unsigned char)(negativeHeight ); - info[ 9] = (unsigned char)(negativeHeight >> 8); - info[10] = (unsigned char)(negativeHeight >> 16); - info[11] = (unsigned char)(negativeHeight >> 24); - - info[20] = (unsigned char)(sizeData ); - info[21] = (unsigned char)(sizeData >> 8); - info[22] = (unsigned char)(sizeData >> 16); - info[23] = (unsigned char)(sizeData >> 24); - - outFile.write(reinterpret_cast(file), sizeof(file)); - outFile.write(reinterpret_cast(info), sizeof(info)); - - unsigned char pad[3] = {0, 0, 0}; - - for (size_t y = 0; y < height; y++) { - for (size_t x = 0; x < width; x++) { - unsigned char pixel[3]; - pixel[0] = data[y*width*3 + x*3]; - pixel[1] = data[y*width*3 + x*3 + 1]; - pixel[2] = data[y*width*3 + x*3 + 2]; - outFile.write(reinterpret_cast(pixel), 3); - } - outFile.write(reinterpret_cast(pad), padSize); - } - - return true; + return true; } -inline double getDurationOf(std::function func) { - auto t0 = std::chrono::high_resolution_clock::now(); - func(); - auto t1 = std::chrono::high_resolution_clock::now(); - std::chrono::duration fs = t1 - t0; - return std::chrono::duration_cast>>(fs).count(); +inline double getDurationOf(std::function func) +{ + auto t0 = std::chrono::high_resolution_clock::now(); + func(); + auto t1 = std::chrono::high_resolution_clock::now(); + std::chrono::duration fs = t1 - t0; + return std::chrono::duration_cast>>(fs).count(); } - -static UNUSED void printPerformanceCounts(const std::map& performanceMap, - std::ostream &stream, - bool bshowHeader = true) { - long long totalTime = 0; - // Print performance counts - if (bshowHeader) { - stream << std::endl << "performance counts:" << std::endl << std::endl; +static UNUSED void printPerformanceCounts( + const std::map & performanceMap, + std::ostream & stream, bool bshowHeader = true) +{ + int64_t totalTime = 0; + // Print performance counts + if (bshowHeader) { + stream << std::endl << "performance counts:" << std::endl << std::endl; + } + for (const auto & it : performanceMap) { + std::string toPrint(it.first); + const int maxLayerName = 30; + + if (it.first.length() >= maxLayerName) { + toPrint = it.first.substr(0, maxLayerName - 4); + toPrint += "..."; } - for (const auto & it : performanceMap) { - std::string toPrint(it.first); - const int maxLayerName = 30; - if (it.first.length() >= maxLayerName) { - toPrint = it.first.substr(0, maxLayerName - 4); - toPrint += "..."; - } - - - stream << std::setw(maxLayerName) << std::left << toPrint; - switch (it.second.status) { - case InferenceEngine::InferenceEngineProfileInfo::EXECUTED: - stream << std::setw(15) << std::left << "EXECUTED"; - break; - case InferenceEngine::InferenceEngineProfileInfo::NOT_RUN: - stream << std::setw(15) << std::left << "NOT_RUN"; - break; - case InferenceEngine::InferenceEngineProfileInfo::OPTIMIZED_OUT: - stream << std::setw(15) << std::left << "OPTIMIZED_OUT"; - break; - } - stream << std::setw(30) << std::left << "layerType: " + std::string(it.second.layer_type) + " "; - stream << std::setw(20) << std::left << "realTime: " + std::to_string(it.second.realTime_uSec); - stream << std::setw(20) << std::left << " cpu: " + std::to_string(it.second.cpu_uSec); - stream << " execType: " << it.second.exec_type << std::endl; - if (it.second.realTime_uSec > 0) { - totalTime += it.second.realTime_uSec; - } + stream << std::setw(maxLayerName) << std::left << toPrint; + switch (it.second.status) { + case InferenceEngine::InferenceEngineProfileInfo::EXECUTED: + stream << std::setw(15) << std::left << "EXECUTED"; + break; + case InferenceEngine::InferenceEngineProfileInfo::NOT_RUN: + stream << std::setw(15) << std::left << "NOT_RUN"; + break; + case InferenceEngine::InferenceEngineProfileInfo::OPTIMIZED_OUT: + stream << std::setw(15) << std::left << "OPTIMIZED_OUT"; + break; + } + stream << std::setw(30) << std::left << "layerType: " + std::string(it.second.layer_type) + " "; + stream << std::setw(20) << std::left << "realTime: " + std::to_string(it.second.realTime_uSec); + stream << std::setw(20) << std::left << " cpu: " + std::to_string(it.second.cpu_uSec); + stream << " execType: " << it.second.exec_type << std::endl; + if (it.second.realTime_uSec > 0) { + totalTime += it.second.realTime_uSec; } - stream << std::setw(20) << std::left << "Total time: " + std::to_string(totalTime) << " microseconds" << std::endl; + } + stream << std::setw(20) << std::left << "Total time: " + std::to_string(totalTime) << + " microseconds" << std::endl; } -static UNUSED void printPerformanceCounts(InferenceEngine::InferRequest request, std::ostream &stream) { - auto perfomanceMap = request.GetPerformanceCounts(); - printPerformanceCounts(perfomanceMap, stream); +static UNUSED void printPerformanceCounts( + InferenceEngine::InferRequest request, + std::ostream & stream) +{ + auto perfomanceMap = request.GetPerformanceCounts(); + printPerformanceCounts(perfomanceMap, stream); } /** * @deprecated */ -static UNUSED void printPerformanceCountsPlugin(InferenceEngine::InferenceEnginePluginPtr plugin, std::ostream &stream) { - std::map perfomanceMap; - plugin->GetPerformanceCounts(perfomanceMap, nullptr); - printPerformanceCounts(perfomanceMap, stream); +static UNUSED void printPerformanceCountsPlugin( + InferenceEngine::InferenceEnginePluginPtr plugin, + std::ostream & stream) +{ + std::map perfomanceMap; + plugin->GetPerformanceCounts(perfomanceMap, nullptr); + printPerformanceCounts(perfomanceMap, stream); } /** * @brief This class represents an object that is found by an object detection net */ -class DetectedObject { +class DetectedObject +{ public: - int objectType; - float xmin, xmax, ymin, ymax, prob; - bool difficult; + int objectType; + float xmin, xmax, ymin, ymax, prob; + bool difficult; + + DetectedObject( + int objectType, float xmin, float ymin, float xmax, float ymax, float prob, + bool difficult = false) + : objectType(objectType), + xmin(xmin), + xmax(xmax), + ymin(ymin), + ymax(ymax), + prob(prob), + difficult(difficult) + { + } + + DetectedObject(const DetectedObject & other) = default; + + static float ioU(const DetectedObject & detectedObject1_, const DetectedObject & detectedObject2_) + { + // Add small space to eliminate empty squares + float epsilon = 0; // 1e-5f; + + DetectedObject detectedObject1(detectedObject1_.objectType, (detectedObject1_.xmin - epsilon), + (detectedObject1_.ymin - epsilon), + (detectedObject1_.xmax - epsilon), + (detectedObject1_.ymax - epsilon), detectedObject1_.prob); + DetectedObject detectedObject2(detectedObject2_.objectType, (detectedObject2_.xmin + epsilon), + (detectedObject2_.ymin + epsilon), (detectedObject2_.xmax), + (detectedObject2_.ymax), detectedObject2_.prob); + + if (detectedObject1.objectType != detectedObject2.objectType) { + // objects are different, so the result is 0 + return 0.0f; + } - DetectedObject(int objectType, float xmin, float ymin, float xmax, float ymax, float prob, bool difficult = false) - : objectType(objectType), xmin(xmin), xmax(xmax), ymin(ymin), ymax(ymax), prob(prob), difficult(difficult) { + if (detectedObject1.xmax < detectedObject1.xmin) { + return 0.0; + } + if (detectedObject1.ymax < detectedObject1.ymin) { + return 0.0; + } + if (detectedObject2.xmax < detectedObject2.xmin) { + return 0.0; + } + if (detectedObject2.ymax < detectedObject2.ymin) { + return 0.0; } - DetectedObject(const DetectedObject& other) = default; - - static float ioU(const DetectedObject& detectedObject1_, const DetectedObject& detectedObject2_) { - // Add small space to eliminate empty squares - float epsilon = 0; // 1e-5f; - - DetectedObject detectedObject1(detectedObject1_.objectType, - (detectedObject1_.xmin - epsilon), - (detectedObject1_.ymin - epsilon), - (detectedObject1_.xmax- epsilon), - (detectedObject1_.ymax- epsilon), detectedObject1_.prob); - DetectedObject detectedObject2(detectedObject2_.objectType, - (detectedObject2_.xmin + epsilon), - (detectedObject2_.ymin + epsilon), - (detectedObject2_.xmax), - (detectedObject2_.ymax), detectedObject2_.prob); - - if (detectedObject1.objectType != detectedObject2.objectType) { - // objects are different, so the result is 0 - return 0.0f; - } + float xmin = (std::max)(detectedObject1.xmin, detectedObject2.xmin); + float ymin = (std::max)(detectedObject1.ymin, detectedObject2.ymin); + float xmax = (std::min)(detectedObject1.xmax, detectedObject2.xmax); + float ymax = (std::min)(detectedObject1.ymax, detectedObject2.ymax); - if (detectedObject1.xmax < detectedObject1.xmin) return 0.0; - if (detectedObject1.ymax < detectedObject1.ymin) return 0.0; - if (detectedObject2.xmax < detectedObject2.xmin) return 0.0; - if (detectedObject2.ymax < detectedObject2.ymin) return 0.0; - - - float xmin = (std::max)(detectedObject1.xmin, detectedObject2.xmin); - float ymin = (std::max)(detectedObject1.ymin, detectedObject2.ymin); - float xmax = (std::min)(detectedObject1.xmax, detectedObject2.xmax); - float ymax = (std::min)(detectedObject1.ymax, detectedObject2.ymax); - - // Caffe adds 1 to every length if the box isn't normalized. So do we... - float addendum; - if (xmax > 1 || ymax > 1) - addendum = 1; - else - addendum = 0; - - // intersection - float intr; - if ((xmax >= xmin) && (ymax >= ymin)) { - intr = (addendum + xmax - xmin) * (addendum + ymax - ymin); - } else { - intr = 0.0f; - } + // Caffe adds 1 to every length if the box isn't normalized. So do we... + float addendum; + if (xmax > 1 || ymax > 1) { + addendum = 1; + } else { + addendum = 0; + } + + // intersection + float intr; + if ((xmax >= xmin) && (ymax >= ymin)) { + intr = (addendum + xmax - xmin) * (addendum + ymax - ymin); + } else { + intr = 0.0f; + } - // union - float square1 = (addendum + detectedObject1.xmax - detectedObject1.xmin) * (addendum + detectedObject1.ymax - detectedObject1.ymin); - float square2 = (addendum + detectedObject2.xmax - detectedObject2.xmin) * (addendum + detectedObject2.ymax - detectedObject2.ymin); + // union + float square1 = (addendum + detectedObject1.xmax - detectedObject1.xmin) * + (addendum + detectedObject1.ymax - detectedObject1.ymin); + float square2 = (addendum + detectedObject2.xmax - detectedObject2.xmin) * + (addendum + detectedObject2.ymax - detectedObject2.ymin); - float unn = square1 + square2 - intr; + float unn = square1 + square2 - intr; - return static_cast(intr) / unn; - } + return static_cast(intr) / unn; + } - DetectedObject scale(float scale_x, float scale_y) const { - return DetectedObject(objectType, xmin * scale_x, ymin * scale_y, xmax * scale_x, ymax * scale_y, prob, difficult); - } + DetectedObject scale(float scale_x, float scale_y) const + { + return DetectedObject(objectType, xmin * scale_x, ymin * scale_y, xmax * scale_x, + ymax * scale_y, prob, difficult); + } }; -class ImageDescription { +class ImageDescription +{ public: - const std::list alist; - const bool check_probs; - - explicit ImageDescription(const std::list &alist, bool check_probs = false) - : alist(alist), check_probs(check_probs) { + const std::list alist; + const bool check_probs; + + explicit ImageDescription(const std::list & alist, bool check_probs = false) + : alist(alist), check_probs(check_probs) + { + } + + static float ioUMultiple( + const ImageDescription & detectedObjects, + const ImageDescription & desiredObjects) + { + const ImageDescription * detectedObjectsSmall, * detectedObjectsBig; + bool check_probs = desiredObjects.check_probs; + + if (detectedObjects.alist.size() < desiredObjects.alist.size()) { + detectedObjectsSmall = &detectedObjects; + detectedObjectsBig = &desiredObjects; + } else { + detectedObjectsSmall = &desiredObjects; + detectedObjectsBig = &detectedObjects; } - static float ioUMultiple(const ImageDescription &detectedObjects, const ImageDescription &desiredObjects) { - const ImageDescription *detectedObjectsSmall, *detectedObjectsBig; - bool check_probs = desiredObjects.check_probs; - - if (detectedObjects.alist.size() < desiredObjects.alist.size()) { - detectedObjectsSmall = &detectedObjects; - detectedObjectsBig = &desiredObjects; - } else { - detectedObjectsSmall = &desiredObjects; - detectedObjectsBig = &detectedObjects; + std::list doS = detectedObjectsSmall->alist; + std::list doB = detectedObjectsBig->alist; + + float fullScore = 0.0f; + while (doS.size() > 0) { + float score = 0.0f; + std::list::iterator bestJ = doB.end(); + for (auto j = doB.begin(); j != doB.end(); j++) { + float curscore = DetectedObject::ioU(*doS.begin(), *j); + if (score < curscore) { + score = curscore; + bestJ = j; } + } - std::list doS = detectedObjectsSmall->alist; - std::list doB = detectedObjectsBig->alist; - - float fullScore = 0.0f; - while (doS.size() > 0) { - float score = 0.0f; - std::list::iterator bestJ = doB.end(); - for (auto j = doB.begin(); j != doB.end(); j++) { - float curscore = DetectedObject::ioU(*doS.begin(), *j); - if (score < curscore) { - score = curscore; - bestJ = j; - } - } - - float coeff = 1.0; - if (check_probs) { - if (bestJ != doB.end()) { - DetectedObject test = *bestJ; - DetectedObject test1 = *doS.begin(); - float mn = std::min((*bestJ).prob, (*doS.begin()).prob); - float mx = std::max((*bestJ).prob, (*doS.begin()).prob); + float coeff = 1.0; + if (check_probs) { + if (bestJ != doB.end()) { + DetectedObject test = *bestJ; + DetectedObject test1 = *doS.begin(); + float mn = std::min((*bestJ).prob, (*doS.begin()).prob); + float mx = std::max((*bestJ).prob, (*doS.begin()).prob); - coeff = mn/mx; - } - } - - doS.pop_front(); - if (bestJ != doB.end()) doB.erase(bestJ); - fullScore += coeff * score; + coeff = mn / mx; } - fullScore /= detectedObjectsBig->alist.size(); - + } - return fullScore; + doS.pop_front(); + if (bestJ != doB.end()) { + doB.erase(bestJ); + } + fullScore += coeff * score; } + fullScore /= detectedObjectsBig->alist.size(); - ImageDescription scale(float scale_x, float scale_y) const { - std::list slist; - for (auto& dob : alist) { - slist.push_back(dob.scale(scale_x, scale_y)); - } - return ImageDescription(slist, check_probs); + return fullScore; + } + + ImageDescription scale(float scale_x, float scale_y) const + { + std::list slist; + for (auto & dob : alist) { + slist.push_back(dob.scale(scale_x, scale_y)); } + return ImageDescription(slist, check_probs); + } }; -struct AveragePrecisionCalculator { +struct AveragePrecisionCalculator +{ private: - enum MatchKind { - TruePositive, FalsePositive - }; + enum MatchKind + { + TruePositive, + FalsePositive + }; - /** - * Here we count all TP and FP matches for all the classes in all the images. - */ - std::map>> matches; + /** + * Here we count all TP and FP matches for all the classes in all the images. + */ + std::map>> matches; - std::map N; + std::map N; - double threshold; + double threshold; - static bool SortBBoxDescend(const DetectedObject& bbox1, const DetectedObject& bbox2) { - return bbox1.prob > bbox2.prob; - } + static bool SortBBoxDescend(const DetectedObject & bbox1, const DetectedObject & bbox2) + { + return bbox1.prob > bbox2.prob; + } - static bool SortPairDescend(const std::pair& p1, const std::pair& p2) { - return p1.first > p2.first; - } + static bool SortPairDescend( + const std::pair & p1, + const std::pair & p2) + { + return p1.first > p2.first; + } public: - AveragePrecisionCalculator(double threshold) : threshold(threshold) { } - - // gt_bboxes -> des - // bboxes -> det - - void consumeImage(const ImageDescription &detectedObjects, const ImageDescription &desiredObjects) { - // Collecting IoU values - int tp = 0, fp = 0; - - std::vector visited(desiredObjects.alist.size(), false); - std::vector bboxes{ std::begin(detectedObjects.alist), std::end(detectedObjects.alist) }; - std::sort(bboxes.begin(), bboxes.end(), SortBBoxDescend); - - - for (auto&& detObj : bboxes) { - // Searching for the best match to this detection - - // Searching for desired object - float overlap_max = -1; - int jmax = -1; - auto desmax = desiredObjects.alist.end(); - - int j = 0; - for (auto desObj = desiredObjects.alist.begin(); desObj != desiredObjects.alist.end(); desObj++, j++) { - double iou = DetectedObject::ioU(detObj, *desObj); - if (iou > overlap_max) { - overlap_max = iou; - jmax = j; - desmax = desObj; - } - } - - MatchKind mk; - if (overlap_max >= threshold) { - if (!desmax->difficult) { - if (!visited[jmax]) { - mk = TruePositive; - visited[jmax] = true; - } else { - mk = FalsePositive; - } - matches[detObj.objectType].push_back(std::make_pair(detObj.prob, mk)); - } - } else { - mk = FalsePositive; - matches[detObj.objectType].push_back(std::make_pair(detObj.prob, mk)); - } + explicit AveragePrecisionCalculator(double threshold) + : threshold(threshold) + { + } + + // gt_bboxes -> des + // bboxes -> det + + void consumeImage( + const ImageDescription & detectedObjects, + const ImageDescription & desiredObjects) + { + // Collecting IoU values + int tp = 0, fp = 0; + + std::vector visited(desiredObjects.alist.size(), false); + std::vector bboxes{std::begin(detectedObjects.alist), + std::end(detectedObjects.alist)}; + std::sort(bboxes.begin(), bboxes.end(), SortBBoxDescend); + + for (auto && detObj : bboxes) { + // Searching for the best match to this detection + + // Searching for desired object + float overlap_max = -1; + int jmax = -1; + auto desmax = desiredObjects.alist.end(); + + int j = 0; + for (auto desObj = desiredObjects.alist.begin(); desObj != desiredObjects.alist.end(); + desObj++, j++) + { + double iou = DetectedObject::ioU(detObj, *desObj); + if (iou > overlap_max) { + overlap_max = iou; + jmax = j; + desmax = desObj; } - - for (auto desObj = desiredObjects.alist.begin(); desObj != desiredObjects.alist.end(); desObj++) { - if (!desObj->difficult) { - N[desObj->objectType]++; - } - } + } + + MatchKind mk; + if (overlap_max >= threshold) { + if (!desmax->difficult) { + if (!visited[jmax]) { + mk = TruePositive; + visited[jmax] = true; + } else { + mk = FalsePositive; + } + matches[detObj.objectType].push_back(std::make_pair(detObj.prob, mk)); } + } else { + mk = FalsePositive; + matches[detObj.objectType].push_back(std::make_pair(detObj.prob, mk)); + } + } - std::map calculateAveragePrecisionPerClass() const { - /** - * Precision-to-TP curve per class (a variation of precision-to-recall curve without dividing into N) - */ - std::map> precisionToTP; - + for (auto desObj = desiredObjects.alist.begin(); desObj != desiredObjects.alist.end(); + desObj++) + { + if (!desObj->difficult) { + N[desObj->objectType]++; + } + } + } - std::map res; + std::map calculateAveragePrecisionPerClass() const + { + /** + * Precision-to-TP curve per class (a variation of precision-to-recall curve without dividing + * into N) + */ + std::map> precisionToTP; - double AP = 0; - double q = 0; - for (auto m : matches) { - // Sorting - std::sort(m.second.begin(), m.second.end(), SortPairDescend); + std::map res; - int clazz = m.first; - int TP = 0, FP = 0; + double AP = 0; + double q = 0; + for (auto m : matches) { + // Sorting + std::sort(m.second.begin(), m.second.end(), SortPairDescend); - std::vector prec; - std::vector rec; + int clazz = m.first; + int TP = 0, FP = 0; - for (auto mm : m.second) { - // Here we are descending in a probability value - MatchKind mk = mm.second; - if (mk == TruePositive) TP++; - else if (mk == FalsePositive) FP++; + std::vector prec; + std::vector rec; - double precision = static_cast(TP) / (TP + FP); - double recall = 0; - if (N.find(clazz) != N.end()) { - recall = static_cast(TP) / N.at(clazz); - } + for (auto mm : m.second) { + // Here we are descending in a probability value + MatchKind mk = mm.second; + if (mk == TruePositive) { + TP++; + } else if (mk == FalsePositive) { + FP++; + } - prec.push_back(precision); - rec.push_back(recall); - } + double precision = static_cast(TP) / (TP + FP); + double recall = 0; + if (N.find(clazz) != N.end()) { + recall = static_cast(TP) / N.at(clazz); + } - int num = rec.size(); - - // 11point from Caffe - double ap = 0; - std::vector max_precs(11, 0.); - int start_idx = num - 1; - for (int j = 10; j >= 0; --j) { - for (int i = start_idx; i >= 0; --i) { - if (rec[i] < j / 10.) { - start_idx = i; - if (j > 0) { - max_precs[j-1] = max_precs[j]; - } - break; - } else { - if (max_precs[j] < prec[i]) { - max_precs[j] = prec[i]; - } - } - } + prec.push_back(precision); + rec.push_back(recall); + } + + int num = rec.size(); + + // 11point from Caffe + double ap = 0; + std::vector max_precs(11, 0.); + int start_idx = num - 1; + for (int j = 10; j >= 0; --j) { + for (int i = start_idx; i >= 0; --i) { + if (rec[i] < j / 10.) { + start_idx = i; + if (j > 0) { + max_precs[j - 1] = max_precs[j]; } - for (int j = 10; j >= 0; --j) { - ap += max_precs[j] / 11; + break; + } else { + if (max_precs[j] < prec[i]) { + max_precs[j] = prec[i]; } - res[clazz] = ap; + } } - - return res; + } + for (int j = 10; j >= 0; --j) { + ap += max_precs[j] / 11; + } + res[clazz] = ap; } + + return res; + } }; /** @@ -980,59 +1067,48 @@ struct AveragePrecisionCalculator { * @param width - width of the rectangle * @param detectedObjects - vector of detected objects */ -static UNUSED void addRectangles(unsigned char *data, size_t height, size_t width, std::vector detectedObjects) { - std::vector colors = { - { 128, 64, 128 }, - { 232, 35, 244 }, - { 70, 70, 70 }, - { 156, 102, 102 }, - { 153, 153, 190 }, - { 153, 153, 153 }, - { 30, 170, 250 }, - { 0, 220, 220 }, - { 35, 142, 107 }, - { 152, 251, 152 }, - { 180, 130, 70 }, - { 60, 20, 220 }, - { 0, 0, 255 }, - { 142, 0, 0 }, - { 70, 0, 0 }, - { 100, 60, 0 }, - { 90, 0, 0 }, - { 230, 0, 0 }, - { 32, 11, 119 }, - { 0, 74, 111 }, - { 81, 0, 81 } - }; - - for (size_t i = 0; i < detectedObjects.size(); i++) { - int cls = detectedObjects[i].objectType % colors.size(); - - int xmin = detectedObjects[i].xmin * width; - int xmax = detectedObjects[i].xmax * width; - int ymin = detectedObjects[i].ymin * height; - int ymax = detectedObjects[i].ymax * height; - - size_t shift_first = ymin*width * 3; - size_t shift_second = ymax*width * 3; - for (int x = xmin; x < xmax; x++) { - data[shift_first + x * 3] = colors.at(cls).red(); - data[shift_first + x * 3 + 1] = colors.at(cls).green(); - data[shift_first + x * 3 + 2] = colors.at(cls).blue(); - data[shift_second + x * 3] = colors.at(cls).red(); - data[shift_second + x * 3 + 1] = colors.at(cls).green(); - data[shift_second + x * 3 + 2] = colors.at(cls).blue(); - } +static UNUSED void addRectangles( + unsigned char * data, size_t height, size_t width, + std::vector detectedObjects) +{ + std::vector colors = { {128, 64, 128}, {232, 35, 244}, {70, 70, 70}, + {156, 102, 102}, {153, 153, 190}, {153, 153, 153}, + {30, 170, 250}, {0, 220, 220}, {35, 142, 107}, + {152, 251, 152}, {180, 130, 70}, {60, 20, 220}, + {0, 0, 255}, {142, 0, 0}, {70, 0, 0}, + {100, 60, 0}, {90, 0, 0}, {230, 0, 0}, + {32, 11, 119}, {0, 74, 111}, {81, 0, 81}}; + + for (size_t i = 0; i < detectedObjects.size(); i++) { + int cls = detectedObjects[i].objectType % colors.size(); + + int xmin = detectedObjects[i].xmin * width; + int xmax = detectedObjects[i].xmax * width; + int ymin = detectedObjects[i].ymin * height; + int ymax = detectedObjects[i].ymax * height; + + size_t shift_first = ymin * width * 3; + size_t shift_second = ymax * width * 3; + for (int x = xmin; x < xmax; x++) { + data[shift_first + x * 3] = colors.at(cls).red(); + data[shift_first + x * 3 + 1] = colors.at(cls).green(); + data[shift_first + x * 3 + 2] = colors.at(cls).blue(); + data[shift_second + x * 3] = colors.at(cls).red(); + data[shift_second + x * 3 + 1] = colors.at(cls).green(); + data[shift_second + x * 3 + 2] = colors.at(cls).blue(); + } - shift_first = xmin * 3; - shift_second = xmax * 3; - for (int y = ymin; y < ymax; y++) { - data[shift_first + y*width * 3] = colors.at(cls).red(); - data[shift_first + y*width * 3 + 1] = colors.at(cls).green(); - data[shift_first + y*width * 3 + 2] = colors.at(cls).blue(); - data[shift_second + y*width * 3] = colors.at(cls).red(); - data[shift_second + y*width * 3 + 1] = colors.at(cls).green(); - data[shift_second + y*width * 3 + 2] = colors.at(cls).blue(); - } + shift_first = xmin * 3; + shift_second = xmax * 3; + for (int y = ymin; y < ymax; y++) { + data[shift_first + y * width * 3] = colors.at(cls).red(); + data[shift_first + y * width * 3 + 1] = colors.at(cls).green(); + data[shift_first + y * width * 3 + 2] = colors.at(cls).blue(); + data[shift_second + y * width * 3] = colors.at(cls).red(); + data[shift_second + y * width * 3 + 1] = colors.at(cls).green(); + data[shift_second + y * width * 3 + 2] = colors.at(cls).blue(); } + } } + +#endif // DYNAMIC_VINO_LIB__COMMON_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine.hpp index 88ac3c7b..d9b74534 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine.hpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief A header file with declaration for NetworkEngine class @@ -31,9 +29,11 @@ * @brief This class is used to get the infer request * from a inference plugin and an inference network */ -namespace Engines { -class Engine { - public: +namespace Engines +{ +class Engine +{ +public: /** * @brief Create an NetworkEngine instance * from a inference plugin and an inference network. @@ -43,18 +43,22 @@ class Engine { * @brief Get the inference request this instance holds. * @return The inference request this instance holds. */ - inline InferenceEngine::InferRequest::Ptr& getRequest() { return request_; } + inline InferenceEngine::InferRequest::Ptr & getRequest() + { + return request_; + } /** * @brief Set a callback function for the infer request. * @param[in] callbackToSet A lambda function as callback function. * The callback function will be called when request is finished. */ - template - void setCompletionCallback(const T& callbackToSet) { + template + void setCompletionCallback(const T & callbackToSet) + { request_->SetCompletionCallback(callbackToSet); } - private: +private: InferenceEngine::InferRequest::Ptr request_; }; } // namespace Engines diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/factory.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/factory.hpp index ab35452e..ee87e1cf 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/factory.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/factory.hpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief a header file with declaration of Factory class @@ -22,7 +20,6 @@ #ifndef DYNAMIC_VINO_LIB__FACTORY_HPP_ #define DYNAMIC_VINO_LIB__FACTORY_HPP_ - #include #include @@ -38,8 +35,9 @@ * class corresponding to * the input string */ -class Factory { - public: +class Factory +{ +public: /** * @brief This function produces the derived input device class corresponding * to the input string @@ -49,7 +47,7 @@ class Factory { * @return the instance of derived input device referenced by a smart pointer */ static std::shared_ptr makeInputDeviceByName( - const std::string& input_device_name, const std::string& input_file_path=""); + const std::string & input_device_name, const std::string & input_file_path = ""); /** * @brief This function produces the derived inference plugin corresponding to * the input string @@ -61,10 +59,10 @@ class Factory { * @return the instance of derived inference plugin referenced by a smart * pointer */ - static std::unique_ptr makePluginByName( - const std::string& device_name, - const std::string& custom_cpu_library_message, - const std::string& custom_cldnn_message, bool performance_message); + static std::unique_ptr + makePluginByName( + const std::string & device_name, const std::string & custom_cpu_library_message, + const std::string & custom_cldnn_message, bool performance_message); }; #endif // DYNAMIC_VINO_LIB__FACTORY_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/age_gender_detection.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/age_gender_detection.hpp index 4a6c787c..0cf35ede 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/age_gender_detection.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/age_gender_detection.hpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief A header file with declaration for AgeGenderDetection Class @@ -21,6 +19,8 @@ #ifndef DYNAMIC_VINO_LIB__INFERENCES__AGE_GENDER_DETECTION_HPP_ #define DYNAMIC_VINO_LIB__INFERENCES__AGE_GENDER_DETECTION_HPP_ +#include +#include #include #include #include @@ -28,33 +28,40 @@ #include "dynamic_vino_lib/engines/engine.hpp" #include "dynamic_vino_lib/inferences/base_inference.hpp" #include "dynamic_vino_lib/models/age_gender_detection_model.hpp" -#include -#include #include "inference_engine.hpp" #include "opencv2/opencv.hpp" -namespace Outputs { +namespace Outputs +{ class BaseOuput; } -namespace dynamic_vino_lib { +namespace dynamic_vino_lib +{ /** * @class AgeGenderResult * @brief Class for storing and processing age and gender detection result. */ -class AgeGenderResult : public Result { - public: - explicit AgeGenderResult(const cv::Rect& location); +class AgeGenderResult : public Result +{ +public: + explicit AgeGenderResult(const cv::Rect & location); /** * @brief Get the age of the detected person from the result. * @return The predictea age. */ - float getAge() const {return age_;} + float getAge() const + { + return age_; + } /** * @brief Get the possibility that the detected person is a * male from the result. * @return The possibility that the detected person is a male. */ - float getMaleProbability() const {return male_prob_;} + float getMaleProbability() const + { + return male_prob_; + } float age_ = -1; float male_prob_ = -1; @@ -62,11 +69,12 @@ class AgeGenderResult : public Result { /** * @class AgeGenderDetection - * @brief Class to load age and gender detection model and perform + * @brief Class to load age and gender detection model and perform age and gender detection. */ -class AgeGenderDetection : public BaseInference { - public: +class AgeGenderDetection : public BaseInference +{ +public: using Result = dynamic_vino_lib::AgeGenderResult; AgeGenderDetection(); ~AgeGenderDetection() override; @@ -82,7 +90,7 @@ class AgeGenderDetection : public BaseInference { * to the frame generated by the input device. * @return Whether this operation is successful. */ - bool enqueue(const cv::Mat& frame, const cv::Rect&) override; + bool enqueue(const cv::Mat & frame, const cv::Rect &) override; /** * @brief Start inference for all buffered frames. * @return Whether this operation is successful. @@ -105,7 +113,7 @@ class AgeGenderDetection : public BaseInference { * to the frame generated by the input device. * @param[in] idx The index of the result. */ - const dynamic_vino_lib::Result* getLocationResult(int idx) const override; + const dynamic_vino_lib::Result * getLocationResult(int idx) const override; /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. @@ -115,10 +123,9 @@ class AgeGenderDetection : public BaseInference { * @brief Show the observed detection result either through image window * or ROS topic. */ - const void observeOutput( - const std::shared_ptr& output) override; + const void observeOutput(const std::shared_ptr & output) override; - private: +private: std::shared_ptr valid_model_; std::vector results_; }; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.hpp index 7e737cc3..3f2a1f79 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.hpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief A header file with declaration for BaseInference Class @@ -23,13 +21,15 @@ #include #include +#include #include "dynamic_vino_lib/engines/engine.hpp" #include "dynamic_vino_lib/slog.hpp" #include "inference_engine.hpp" #include "opencv2/opencv.hpp" -namespace Outputs { +namespace Outputs +{ class BaseOutput; } /** @@ -39,14 +39,16 @@ class BaseOutput; * @param[in] scale_factor Scale factor for loading. * @param[in] batch_index Indicates the batch index for the frame. */ -template -void matU8ToBlob(const cv::Mat& orig_image, InferenceEngine::Blob::Ptr& blob, - float scale_factor = 1.0, int batch_index = 0) { +template +void matU8ToBlob( + const cv::Mat & orig_image, InferenceEngine::Blob::Ptr & blob, + float scale_factor = 1.0, int batch_index = 0) +{ InferenceEngine::SizeVector blob_size = blob->getTensorDesc().getDims(); const size_t width = blob_size[3]; const size_t height = blob_size[2]; const size_t channels = blob_size[1]; - T* blob_data = blob->buffer().as(); + T * blob_data = blob->buffer().as(); cv::Mat resized_image(orig_image); if (width != orig_image.size().width || height != orig_image.size().height) { @@ -58,24 +60,29 @@ void matU8ToBlob(const cv::Mat& orig_image, InferenceEngine::Blob::Ptr& blob, for (size_t h = 0; h < height; h++) { for (size_t w = 0; w < width; w++) { blob_data[batchOffset + c * width * height + h * width + w] = - resized_image.at(h, w)[c] * scale_factor; + resized_image.at(h, w)[c] * scale_factor; } } } } -namespace dynamic_vino_lib { +namespace dynamic_vino_lib +{ /** * @class Result * @brief Base class for detection result. */ -class Result { - public: +class Result +{ +public: friend class BaseInference; - explicit Result(const cv::Rect& location); - inline const cv::Rect getLocation() const { return location_; } + explicit Result(const cv::Rect & location); + inline const cv::Rect getLocation() const + { + return location_; + } - private: +private: cv::Rect location_; }; @@ -83,8 +90,9 @@ class Result { * @class BaseInference * @brief Base class for network inference. */ -class BaseInference { - public: +class BaseInference +{ +public: BaseInference(); virtual ~BaseInference(); /** @@ -96,14 +104,18 @@ class BaseInference { * @brief Get the loaded Engine instance. * @return The loaded Engine instance. */ - inline const std::shared_ptr getEngine() const { + inline const std::shared_ptr getEngine() const + { return engine_; } /** * @brief Get the number of enqueued frames to be infered. * @return The number of enqueued frames to be infered. */ - inline const int getEnqueuedNum() const { return enqueued_frames; } + inline const int getEnqueuedNum() const + { + return enqueued_frames; + } /** * @brief Enqueue a frame to this class. * The frame will be buffered but not infered yet. @@ -112,8 +124,7 @@ class BaseInference { * to the frame generated by the input device. * @return Whether this operation is successful. */ - virtual bool enqueue(const cv::Mat& frame, - const cv::Rect& input_frame_loc) = 0; + virtual bool enqueue(const cv::Mat & frame, const cv::Rect & input_frame_loc) = 0; /** * @brief Start inference for all buffered frames. * @return Whether this operation is successful. @@ -121,8 +132,7 @@ class BaseInference { virtual bool submitRequest(); virtual bool SynchronousRequest(); - virtual const void observeOutput( - const std::shared_ptr& output) = 0; + virtual const void observeOutput(const std::shared_ptr & output) = 0; /** * @brief This function will fetch the results of the previous inference and @@ -140,29 +150,30 @@ class BaseInference { * to the frame generated by the input device. * @param[in] idx The index of the result. */ - virtual const dynamic_vino_lib::Result* getLocationResult(int idx) const = 0; + virtual const dynamic_vino_lib::Result * getLocationResult(int idx) const = 0; /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. */ virtual const std::string getName() const = 0; - protected: +protected: /** * @brief Enqueue the fram into the input blob of the target calculation * device. Check OpenVINO document for detailed information. * @return Whether this operation is successful. */ - template - bool enqueue(const cv::Mat& frame, const cv::Rect&, float scale_factor, - int batch_index, const std::string& input_name) { + template + bool enqueue( + const cv::Mat & frame, const cv::Rect &, float scale_factor, int batch_index, + const std::string & input_name) + { if (enqueued_frames == max_batch_size_) { - slog::warn << "Number of " << getName() << "input more than maximum(" - << max_batch_size_ << ") processed by inference" << slog::endl; + slog::warn << "Number of " << getName() << "input more than maximum(" << max_batch_size_ << + ") processed by inference" << slog::endl; return false; } - InferenceEngine::Blob::Ptr input_blob = - engine_->getRequest()->GetBlob(input_name); + InferenceEngine::Blob::Ptr input_blob = engine_->getRequest()->GetBlob(input_name); matU8ToBlob(frame, input_blob, scale_factor, batch_index); enqueued_frames += 1; return true; @@ -170,12 +181,13 @@ class BaseInference { /** * @brief Set the max batch size for one inference. */ - inline void setMaxBatchSize(int max_batch_size) { + inline void setMaxBatchSize(int max_batch_size) + { max_batch_size_ = max_batch_size; } std::vector results_; - private: +private: std::shared_ptr engine_; int max_batch_size_ = 1; int enqueued_frames = 0; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/emotions_detection.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/emotions_detection.hpp index 55acc87c..564889ff 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/emotions_detection.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/emotions_detection.hpp @@ -1,19 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief A header file with declaration for EmotionsDetection Class * @file emotions_detection.hpp @@ -31,25 +28,31 @@ #include "opencv2/opencv.hpp" #include "dynamic_vino_lib/models/emotion_detection_model.hpp" -namespace Outputs { +namespace Outputs +{ class BaseOuput; } -namespace dynamic_vino_lib { +namespace dynamic_vino_lib +{ /** * @class EmotionResult * @brief Class for storing and processing emotion detection result. */ -class EmotionsResult : public Result { - public: +class EmotionsResult : public Result +{ +public: friend class EmotionsDetection; - explicit EmotionsResult(const cv::Rect& location); + explicit EmotionsResult(const cv::Rect & location); /** * @brief Get the emotion label of the detected person. * @return The predictea emotion label. */ - std::string getLabel() const { return label_; } + std::string getLabel() const + { + return label_; + } - private: +private: std::string label_ = ""; float confidence_ = -1; }; @@ -58,8 +61,9 @@ class EmotionsResult : public Result { * @class EmotionDetection * @brief Class to load emotion detection model and perform emotion detection. */ -class EmotionsDetection : public BaseInference { - public: +class EmotionsDetection : public BaseInference +{ +public: using Result = dynamic_vino_lib::EmotionsResult; EmotionsDetection(); ~EmotionsDetection() override; @@ -75,7 +79,7 @@ class EmotionsDetection : public BaseInference { * to the frame generated by the input device. * @return Whether this operation is successful. */ - bool enqueue(const cv::Mat&, const cv::Rect&) override; + bool enqueue(const cv::Mat &, const cv::Rect &) override; /** * @brief Start inference for all buffered frames. * @return Whether this operation is successful. @@ -98,7 +102,7 @@ class EmotionsDetection : public BaseInference { * to the frame generated by the input device. * @param[in] idx The index of the result. */ - const dynamic_vino_lib::Result* getLocationResult(int idx) const override; + const dynamic_vino_lib::Result * getLocationResult(int idx) const override; /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. @@ -108,12 +112,14 @@ class EmotionsDetection : public BaseInference { * @brief Show the observed detection result either through image window * or ROS topic. */ - const void observeOutput( - const std::shared_ptr& output) override; + const void observeOutput(const std::shared_ptr & output) override; - std::vector getResults() { return results_; }; + std::vector getResults() + { + return results_; + } - private: +private: std::shared_ptr valid_model_; std::vector results_; }; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_detection.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_detection.hpp index aabb0ea4..12899fe6 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_detection.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_detection.hpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief A header file with declaration for FaceDetection Class @@ -37,22 +35,25 @@ #include "opencv2/opencv.hpp" // namespace -namespace dynamic_vino_lib { +namespace dynamic_vino_lib +{ /** * @class FaceDetectionResult * @brief Class for storing and processing face detection result. */ -class FaceDetectionResult : public ObjectDetectionResult { - public: - explicit FaceDetectionResult(const cv::Rect& location); +class FaceDetectionResult : public ObjectDetectionResult +{ +public: + explicit FaceDetectionResult(const cv::Rect & location); }; /** * @class FaceDetection * @brief Class to load face detection model and perform face detection. */ -class FaceDetection : public ObjectDetection { - public: +class FaceDetection : public ObjectDetection +{ +public: explicit FaceDetection(double); }; } // namespace dynamic_vino_lib diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/head_pose_detection.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/head_pose_detection.hpp index 8780af97..07173761 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/head_pose_detection.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/head_pose_detection.hpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief A header file with declaration for FaceDetection Class @@ -31,31 +29,43 @@ #include "inference_engine.hpp" #include "opencv2/opencv.hpp" -namespace dynamic_vino_lib { +namespace dynamic_vino_lib +{ /** * @class HeadPoseResult * @brief Class for storing and processing headpose detection result. */ -class HeadPoseResult : public Result { - public: +class HeadPoseResult : public Result +{ +public: friend class HeadPoseDetection; - explicit HeadPoseResult(const cv::Rect& location); + explicit HeadPoseResult(const cv::Rect & location); /** * @brief Get the yawl angle of the headpose. * @return The yawl value. */ - float getAngleY() const {return angle_y_;} + float getAngleY() const + { + return angle_y_; + } /** * @brief Get the pitch angle of the headpose. * @return The pitch value. */ - float getAngleP() const {return angle_p_;} + float getAngleP() const + { + return angle_p_; + } /** * @brief Get the roll angle of the headpose. * @return The roll value. */ - float getAngleR() const {return angle_r_;} - private: + float getAngleR() const + { + return angle_r_; + } + +private: float angle_y_ = -1; float angle_p_ = -1; float angle_r_ = -1; @@ -65,8 +75,9 @@ class HeadPoseResult : public Result { * @class HeadPoseDetection * @brief Class to load headpose detection model and perform headpose detection. */ -class HeadPoseDetection : public BaseInference { - public: +class HeadPoseDetection : public BaseInference +{ +public: using Result = dynamic_vino_lib::HeadPoseResult; HeadPoseDetection(); ~HeadPoseDetection() override; @@ -82,7 +93,7 @@ class HeadPoseDetection : public BaseInference { * to the frame generated by the input device. * @return Whether this operation is successful. */ - bool enqueue(const cv::Mat& frame, const cv::Rect&) override; + bool enqueue(const cv::Mat & frame, const cv::Rect &) override; /** * @brief Start inference for all buffered frames. * @return Whether this operation is successful. @@ -105,7 +116,7 @@ class HeadPoseDetection : public BaseInference { * to the frame generated by the input device. * @param[in] idx The index of the result. */ - const dynamic_vino_lib::Result* getLocationResult(int idx) const override; + const dynamic_vino_lib::Result * getLocationResult(int idx) const override; /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. @@ -115,12 +126,14 @@ class HeadPoseDetection : public BaseInference { * @brief Show the observed detection result either through image window or ROS topic. */ - const void observeOutput( - const std::shared_ptr& output) override; - - std::vector getResults() { return results_; }; + const void observeOutput(const std::shared_ptr & output) override; + + std::vector getResults() + { + return results_; + } - private: +private: std::shared_ptr valid_model_; std::vector results_; }; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection.hpp index 96f03884..416ea317 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection.hpp @@ -1,18 +1,17 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + /** * @brief A header file with declaration for ObjectDetection Class * @file object_detection.hpp @@ -33,22 +32,31 @@ #include "inference_engine.hpp" #include "opencv2/opencv.hpp" // namespace -namespace dynamic_vino_lib { +namespace dynamic_vino_lib +{ /** * @class ObjectDetectionResult * @brief Class for storing and processing face detection result. */ -class ObjectDetectionResult : public Result { - public: +class ObjectDetectionResult : public Result +{ +public: friend class ObjectDetection; - explicit ObjectDetectionResult(const cv::Rect& location); - std::string getLabel() const { return label_; } + explicit ObjectDetectionResult(const cv::Rect & location); + std::string getLabel() const + { + return label_; + } /** * @brief Get the confidence that the detected area is a face. - * @return The confidence value. + * @return The confidence value. */ - float getConfidence() const { return confidence_; } - private: + float getConfidence() const + { + return confidence_; + } + +private: std::string label_ = ""; float confidence_ = -1; }; @@ -56,8 +64,9 @@ class ObjectDetectionResult : public Result { * @class ObjectDetection * @brief Class to load face detection model and perform face detection. */ -class ObjectDetection : public BaseInference { - public: +class ObjectDetection : public BaseInference +{ +public: using Result = dynamic_vino_lib::ObjectDetectionResult; explicit ObjectDetection(double); ~ObjectDetection() override; @@ -73,7 +82,7 @@ class ObjectDetection : public BaseInference { * to the frame generated by the input device. * @return Whether this operation is successful. */ - bool enqueue(const cv::Mat&, const cv::Rect&) override; + bool enqueue(const cv::Mat &, const cv::Rect &) override; /** * @brief Start inference for all buffered frames. * @return Whether this operation is successful. @@ -96,19 +105,19 @@ class ObjectDetection : public BaseInference { * to the frame generated by the input device. * @param[in] idx The index of the result. */ - const dynamic_vino_lib::Result* getLocationResult(int idx) const override; + const dynamic_vino_lib::Result * getLocationResult(int idx) const override; /** * @brief Show the observed detection result either through image window or ROS topic. */ - const void observeOutput(const std::shared_ptr& output); + const void observeOutput(const std::shared_ptr & output); /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. */ const std::string getName() const override; - private: +private: std::shared_ptr valid_model_; std::vector results_; int width_ = 0; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_segmentation.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_segmentation.hpp index 890d88c1..3fde54c6 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_segmentation.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_segmentation.hpp @@ -1,18 +1,17 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + /** * @brief A header file with declaration for ObjectSegmentation Class * @file object_detection.hpp @@ -32,23 +31,35 @@ #include "inference_engine.hpp" #include "opencv2/opencv.hpp" // namespace -namespace dynamic_vino_lib { +namespace dynamic_vino_lib +{ /** * @class ObjectSegmentationResult * @brief Class for storing and processing object segmentation result. */ -class ObjectSegmentationResult : public Result { - public: +class ObjectSegmentationResult : public Result +{ +public: friend class ObjectSegmentation; - explicit ObjectSegmentationResult(const cv::Rect& location); - std::string getLabel() const { return label_; } + explicit ObjectSegmentationResult(const cv::Rect & location); + std::string getLabel() const + { + return label_; + } /** * @brief Get the confidence that the detected area is a face. - * @return The confidence value. + * @return The confidence value. */ - float getConfidence() const { return confidence_; } - cv::Mat getMask() const { return mask_; } - private: + float getConfidence() const + { + return confidence_; + } + cv::Mat getMask() const + { + return mask_; + } + +private: std::string label_ = ""; float confidence_ = -1; cv::Mat mask_; @@ -57,8 +68,9 @@ class ObjectSegmentationResult : public Result { * @class ObjectSegmentation * @brief Class to load object segmentation model and perform object segmentation. */ -class ObjectSegmentation : public BaseInference { - public: +class ObjectSegmentation : public BaseInference +{ +public: using Result = dynamic_vino_lib::ObjectSegmentationResult; explicit ObjectSegmentation(double); ~ObjectSegmentation() override; @@ -74,7 +86,7 @@ class ObjectSegmentation : public BaseInference { * to the frame generated by the input device. * @return Whether this operation is successful. */ - bool enqueue(const cv::Mat&, const cv::Rect&) override; + bool enqueue(const cv::Mat &, const cv::Rect &) override; /** * @brief Start inference for all buffered frames. * @return Whether this operation is successful. @@ -97,18 +109,19 @@ class ObjectSegmentation : public BaseInference { * to the frame generated by the input device. * @param[in] idx The index of the result. */ - const dynamic_vino_lib::Result* getLocationResult(int idx) const override; + const dynamic_vino_lib::Result * getLocationResult(int idx) const override; /** * @brief Show the observed detection result either through image window or ROS topic. */ - const void observeOutput(const std::shared_ptr& output); + const void observeOutput(const std::shared_ptr & output); /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. */ const std::string getName() const override; - private: + +private: std::shared_ptr valid_model_; std::vector results_; int width_ = 0; @@ -116,4 +129,4 @@ class ObjectSegmentation : public BaseInference { double show_output_thresh_ = 0; }; } // namespace dynamic_vino_lib -#endif // DYNAMIC_VINO_LIB__INFERENCES__OBJECT_SEGMENTATION_HPP_ \ No newline at end of file +#endif // DYNAMIC_VINO_LIB__INFERENCES__OBJECT_SEGMENTATION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/base_input.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/base_input.hpp index 135a4714..5c707139 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/base_input.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/base_input.hpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief A header file with declaration for BaseInput Class @@ -22,6 +20,8 @@ #define DYNAMIC_VINO_LIB__INPUTS__BASE_INPUT_HPP_ #include +#include +#include #include "dynamic_vino_lib/inputs/ros2_handler.hpp" /** @@ -29,9 +29,11 @@ * @brief This class is an interface for three kinds of * input devices: realsense camera, standard camera and video */ -namespace Input { -class BaseInputDevice : public Ros2Handler { - public: +namespace Input +{ +class BaseInputDevice : public Ros2Handler +{ +public: /** * @brief Initialize the input device, * for cameras, it will turn the camera on and get ready to read frames, @@ -54,8 +56,11 @@ class BaseInputDevice : public Ros2Handler { * @brief Read next frame, and give the value to argument frame. * @return Whether the next frame is successfully read. */ - virtual bool read(cv::Mat* frame) = 0; - virtual bool readService(cv::Mat* frame, std::string config_path) { return true;}; + virtual bool read(cv::Mat * frame) = 0; + virtual bool readService(cv::Mat * frame, std::string config_path) + { + return true; + } virtual void config() = 0; //< TODO virtual ~BaseInputDevice() = default; @@ -63,45 +68,68 @@ class BaseInputDevice : public Ros2Handler { * @brief Get the width of the frame read from input device. * @return The width of the frame read from input device. */ - inline size_t getWidth() { return width_; } + inline size_t getWidth() + { + return width_; + } /** * @brief Set the width of the frame read from input device. * @param[in] width Width to be set for the frame. */ - inline void setWidth(size_t width) { width_ = width; } + inline void setWidth(size_t width) + { + width_ = width; + } /** * @brief Get the height of the frame read from input device. * @return The height of the frame read from input device. */ - inline size_t getHeight() { return height_; } + inline size_t getHeight() + { + return height_; + } /** * @brief Set the height of the frame read from input device. * @param[in] width Width to be set for the frame. */ - inline void setHeight(size_t height) { height_ = height; } + inline void setHeight(size_t height) + { + height_ = height; + } /** * @brief Check whether the input device is successfully initiated. * @return Whether the input device is successfully initiated. */ - inline bool isInit() { return is_init_; } + inline bool isInit() + { + return is_init_; + } /** * @brief Set the initialization state for input device. * @param[in] is_init The initialization state to be set. */ - inline void setInitStatus(bool is_init) { is_init_ = is_init; } + inline void setInitStatus(bool is_init) + { + is_init_ = is_init; + } /** * @brief Set the frame_id of input device for ROSTopic outputs. * @param[in] frame_id The frame_id of input device. */ - inline void setFrameID(std::string frame_id) {frame_id_ = frame_id; } + inline void setFrameID(std::string frame_id) + { + frame_id_ = frame_id; + } /** * @brief Get the frame_id of input device. * @return Frame_id of input device. */ - inline std::string getFrameID() { return frame_id_; } - + inline std::string getFrameID() + { + return frame_id_; + } - private: +private: size_t width_ = 0; size_t height_ = 0; bool is_init_ = false; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_input.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_input.hpp index 193c228c..9b9f0832 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_input.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_input.hpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief A header file with declaration for Image class @@ -25,14 +23,16 @@ #include #include "dynamic_vino_lib/inputs/base_input.hpp" -namespace Input { +namespace Input +{ /** * @class Image * @brief Class for recieving an image file as input. */ -class Image : public BaseInputDevice { - public: - explicit Image(const std::string&); +class Image : public BaseInputDevice +{ +public: + explicit Image(const std::string &); /** * @brief Read an image file from the file path. * @param[in] An image file path. @@ -44,22 +44,28 @@ class Image : public BaseInputDevice { * No implementation for Image class. * @return Whether the input device is successfully turned on. */ - bool initialize(int t) override { return initialize(); }; + bool initialize(int t) override + { + return initialize(); + } /** * @brief Initialize the input device with given width and height. * No implementation for Image class. * @return Whether the input device is successfully turned on. */ - bool initialize(size_t width, size_t height) override { return initialize(); }; + bool initialize(size_t width, size_t height) override + { + return initialize(); + } /** * @brief Read next frame, and give the value to argument frame. * @return Whether the next frame is successfully read. */ - bool read(cv::Mat* frame) override; - bool readService(cv::Mat* frame, std::string config_path); + bool read(cv::Mat * frame) override; + bool readService(cv::Mat * frame, std::string config_path); void config() override; - private: +private: cv::Mat image_; std::string file_; }; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera.hpp index 4f04fbb5..03d1bbc6 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera.hpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief A header file with declaration for RealSenseCamera class @@ -26,15 +24,17 @@ #include #include "dynamic_vino_lib/inputs/base_input.hpp" -namespace Input { +namespace Input +{ /** * @class RealSenseCamera * @brief Class for recieving a realsense camera as input. */ -class RealSenseCamera : public BaseInputDevice { - public: +class RealSenseCamera : public BaseInputDevice +{ +public: /** - * @brief Initialize the input device, turn the + * @brief Initialize the input device, turn the * camera on and get ready to read frames. * @return Whether the input device is successfully turned on. */ @@ -44,7 +44,10 @@ class RealSenseCamera : public BaseInputDevice { * Initialize camera by its index when multiple standard camera is connected. * @return Whether the input device is successfully turned on. */ - bool initialize(int t) override { return true; }; + bool initialize(int t) override + { + return true; + } /** * @brief Initialize the input device with given width and height. * @return Whether the input device is successfully turned on. @@ -54,10 +57,10 @@ class RealSenseCamera : public BaseInputDevice { * @brief Read next frame, and give the value to argument frame. * @return Whether the next frame is successfully read. */ - bool read(cv::Mat* frame) override; + bool read(cv::Mat * frame) override; void config() override; - private: +private: rs2::config cfg_; rs2::pipeline pipe_; bool first_read_ = true; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera_topic.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera_topic.hpp index dd96bab9..b76a692c 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera_topic.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera_topic.hpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief A header file with declaration for RealSenseCamera class @@ -30,21 +28,29 @@ #include "dynamic_vino_lib/inputs/base_input.hpp" -namespace Input { +namespace Input +{ /** * @class RealSenseCameraTopic * @brief Class for recieving a realsense camera topic as input. */ -class RealSenseCameraTopic : public BaseInputDevice, public rclcpp::Node{ - public: +class RealSenseCameraTopic : public BaseInputDevice, public rclcpp::Node +{ +public: RealSenseCameraTopic(); bool initialize() override; - bool initialize(int t) override { return true; }; - bool initialize(size_t width, size_t height) override { return true; }; - bool read(cv::Mat* frame) override; + bool initialize(int t) override + { + return true; + } + bool initialize(size_t width, size_t height) override + { + return true; + } + bool read(cv::Mat * frame) override; void config() override; - private: +private: rclcpp::Subscription::SharedPtr sub_; cv::Mat image; int image_count; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/ros2_handler.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/ros2_handler.hpp index cdd2be7e..61e29b1f 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/ros2_handler.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/ros2_handler.hpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief A header file with declaration for ROS/ROS2 handler @@ -23,23 +21,26 @@ #define DYNAMIC_VINO_LIB__INPUTS__ROS2_HANDLER_HPP_ #include +#include -namespace Input { - -class Ros2Handler { +namespace Input +{ +class Ros2Handler +{ public: - void setHandler(const std::shared_ptr& node) { + void setHandler(const std::shared_ptr & node) + { node_ = node; } - std::shared_ptr getHandler() const{ + std::shared_ptr getHandler() const + { return node_; } + private: std::shared_ptr node_; - }; - -} // namespace +} // namespace Input -#endif // DYNAMIC_VINO_LIB__INPUTS__ROS_HANDLER_HPP_ \ No newline at end of file +#endif // DYNAMIC_VINO_LIB__INPUTS__ROS2_HANDLER_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/standard_camera.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/standard_camera.hpp index 9100b60f..0af9ee81 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/standard_camera.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/standard_camera.hpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief A header file with declaration for StandardCamera class @@ -22,17 +20,18 @@ #ifndef DYNAMIC_VINO_LIB__INPUTS__STANDARD_CAMERA_HPP_ #define DYNAMIC_VINO_LIB__INPUTS__STANDARD_CAMERA_HPP_ - #include #include "dynamic_vino_lib/inputs/base_input.hpp" -namespace Input { +namespace Input +{ /** * @class StandardCamera * @brief Class for recieving a standard camera as input. */ -class StandardCamera : public BaseInputDevice { - public: +class StandardCamera : public BaseInputDevice +{ +public: /** * @brief Initialize the input device, * for cameras, it will turn the camera on and get ready to read frames, @@ -55,10 +54,10 @@ class StandardCamera : public BaseInputDevice { * @brief Read next frame, and give the value to argument frame. * @return Whether the next frame is successfully read. */ - bool read(cv::Mat* frame) override; + bool read(cv::Mat * frame) override; void config() override; - private: +private: cv::VideoCapture cap; }; } // namespace Input diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/video_input.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/video_input.hpp index 8b730fa4..5ac76863 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/video_input.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/inputs/video_input.hpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief A header file with declaration for Video class @@ -25,14 +23,16 @@ #include #include "dynamic_vino_lib/inputs/base_input.hpp" -namespace Input { +namespace Input +{ /** * @class Video * @brief Class for recieving a video file as input. */ -class Video : public BaseInputDevice { - public: - explicit Video(const std::string&); +class Video : public BaseInputDevice +{ +public: + explicit Video(const std::string &); /** * @brief Read a video file from the file path. * @param[in] An video file path. @@ -44,7 +44,10 @@ class Video : public BaseInputDevice { * No implementation for Video class. * @return Whether the input device is successfully turned on. */ - bool initialize(int t) override { return initialize(); }; + bool initialize(int t) override + { + return initialize(); + } /** * @brief Initialize the input device with given width and height. * No implementation for Video class. @@ -55,10 +58,10 @@ class Video : public BaseInputDevice { * @brief Read next frame, and give the value to argument frame. * @return Whether the next frame is successfully read. */ - bool read(cv::Mat* frame) override; + bool read(cv::Mat * frame) override; void config() override; - private: +private: cv::VideoCapture cap; std::string video_; }; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/age_gender_detection_model.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/models/age_gender_detection_model.hpp index 2b6f43b7..bb096910 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/age_gender_detection_model.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/age_gender_detection_model.hpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief A header file with declaration for AgeGenderDetectionModel Class @@ -25,29 +23,38 @@ #include #include "dynamic_vino_lib/models/base_model.hpp" -namespace Models { +namespace Models +{ /** * @class AgeGenderDetectionModel * @brief This class generates the age gender detection model. */ -class AgeGenderDetectionModel : public BaseModel { - public: - AgeGenderDetectionModel(const std::string&, int, int, int); +class AgeGenderDetectionModel : public BaseModel +{ +public: + AgeGenderDetectionModel(const std::string &, int, int, int); /** * @brief Get the input name. * @return Input name. */ - inline const std::string getInputName() const { return input_; } + inline const std::string getInputName() const + { + return input_; + } /** * @brief Get the age from the detection reuslt. * @return Detected age. */ - inline const std::string getOutputAgeName() const { return output_age_; } + inline const std::string getOutputAgeName() const + { + return output_age_; + } /** * @brief Get the gender from the detection reuslt. * @return Detected gender. */ - inline const std::string getOutputGenderName() const { + inline const std::string getOutputGenderName() const + { return output_gender_; } /** @@ -56,11 +63,11 @@ class AgeGenderDetectionModel : public BaseModel { */ const std::string getModelName() const override; - protected: - void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr&) override; +protected: + void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; - private: +private: std::string input_; std::string output_age_; std::string output_gender_; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.hpp index 54e6d0f8..7fe1681d 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.hpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief A header file with declaration for BaseModel Class @@ -28,19 +26,22 @@ #include "inference_engine.hpp" -namespace Engines { +namespace Engines +{ class Engine; } -namespace Models { +namespace Models +{ /** * @class BaseModel * @brief This class represents the network given by .xml and .bin file */ -class BaseModel { +class BaseModel +{ using Ptr = std::shared_ptr; - public: +public: /** * @brief Initialize the class with given .xml, .bin and .labels file. It will * also check whether the number of input and output are fit. @@ -51,18 +52,23 @@ class BaseModel { * @param[in] batch_size The number of batch size the network should have. * @return Whether the input device is successfully turned on. */ - BaseModel(const std::string& model_loc, int input_num, int output_num, - int batch_size); + BaseModel(const std::string & model_loc, int input_num, int output_num, int batch_size); /** * @brief Get the label vector. * @return The label vector. */ - inline std::vector& getLabels() { return labels_; } + inline std::vector & getLabels() + { + return labels_; + } /** * @brief Get the maximum batch size of the model. * @return The maximum batch size of the model. */ - inline const int getMaxBatchSize() const { return max_batch_size_; } + inline const int getMaxBatchSize() const + { + return max_batch_size_; + } /** * @brief Initialize the model. During the process the class will check * the network input, output size, check layer property and @@ -75,23 +81,21 @@ class BaseModel { */ virtual const std::string getModelName() const = 0; - protected: +protected: /** * @brief Check whether the layer property * (output layer name, output layer type, etc.) is right * @param[in] network_reader The reader of the network to be checked. */ - virtual void checkLayerProperty( - const InferenceEngine::CNNNetReader::Ptr& network_reader) = 0; + virtual void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr & network_reader) = 0; /** * @brief Set the layer property (layer layout, layer precision, etc.). * @param[in] network_reader The reader of the network to be set. */ - virtual void - setLayerProperty(InferenceEngine::CNNNetReader::Ptr network_reader) = 0; + virtual void setLayerProperty(InferenceEngine::CNNNetReader::Ptr network_reader) = 0; virtual void checkNetworkSize(int, int, InferenceEngine::CNNNetReader::Ptr); - private: +private: friend class Engines::Engine; InferenceEngine::CNNNetReader::Ptr net_reader_; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/emotion_detection_model.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/models/emotion_detection_model.hpp index 949152c9..0624ac51 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/emotion_detection_model.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/emotion_detection_model.hpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief A header file with declaration for EmotionDetectionModel Class @@ -25,27 +23,35 @@ #include #include "dynamic_vino_lib/models/base_model.hpp" -namespace Models { +namespace Models +{ /** * @class EmotionDetectionModel * @brief This class generates the emotion detection model. */ -class EmotionDetectionModel : public BaseModel { - public: - EmotionDetectionModel(const std::string&, int, int, int); - inline const std::string getInputName() { return input_; } - inline const std::string getOutputName() { return output_; } +class EmotionDetectionModel : public BaseModel +{ +public: + EmotionDetectionModel(const std::string &, int, int, int); + inline const std::string getInputName() + { + return input_; + } + inline const std::string getOutputName() + { + return output_; + } /** * @brief Get the name of this detection model. * @return Name of the model. */ const std::string getModelName() const override; - protected: - void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr&) override; +protected: + void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; - private: +private: std::string input_; std::string output_; }; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/face_detection_model.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/models/face_detection_model.hpp index 02196de3..acd24142 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/face_detection_model.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/face_detection_model.hpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief A header file with declaration for FaceDetectionModel Class @@ -25,14 +23,16 @@ #include #include "dynamic_vino_lib/models/object_detection_model.hpp" -namespace Models { +namespace Models +{ /** * @class FaceDetectionModel * @brief This class generates the face detection model. */ -class FaceDetectionModel : public ObjectDetectionModel { - public: - FaceDetectionModel(const std::string&, int, int, int); +class FaceDetectionModel : public ObjectDetectionModel +{ +public: + FaceDetectionModel(const std::string &, int, int, int); /** * @brief Get the name of this detection model. * @return Name of the model. diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/head_pose_detection_model.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/models/head_pose_detection_model.hpp index 412b8b19..0084bc31 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/head_pose_detection_model.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/head_pose_detection_model.hpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief A header file with declaration for HeadPoseDetectionModel Class @@ -25,34 +23,42 @@ #include #include "dynamic_vino_lib/models/base_model.hpp" -namespace Models { +namespace Models +{ /** * @class HeadPoseDetectionModel * @brief This class generates the headpose detection model. */ -class HeadPoseDetectionModel : public BaseModel { - public: - HeadPoseDetectionModel(const std::string&, int, int, int); - inline const std::string getInputName() const { return input_; } +class HeadPoseDetectionModel : public BaseModel +{ +public: + HeadPoseDetectionModel(const std::string &, int, int, int); + inline const std::string getInputName() const + { + return input_; + } /** * @brief Get the output angle roll. * @return Roll value. */ - inline const std::string getOutputOutputAngleR() const { + inline const std::string getOutputOutputAngleR() const + { return output_angle_r_; } /** * @brief Get the output angle pitch. * @return Pitch value. */ - inline const std::string getOutputOutputAngleP() const { + inline const std::string getOutputOutputAngleP() const + { return output_angle_p_; } /** * @brief Get the output angle yawl. * @return Yawl value. */ - inline const std::string getOutputOutputAngleY() const { + inline const std::string getOutputOutputAngleY() const + { return output_angle_y_; } /** @@ -61,11 +67,11 @@ class HeadPoseDetectionModel : public BaseModel { */ const std::string getModelName() const override; - protected: - void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr&) override; +protected: + void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; - private: +private: std::string input_; std::string output_angle_r_ = "angle_r_fc"; std::string output_angle_p_ = "angle_p_fc"; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_model.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_model.hpp index db1f5962..c2a3b8cd 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_model.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_model.hpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief A header file with declaration for ObjectDetectionModel Class * @file face_detection_model.h @@ -21,25 +19,40 @@ #define DYNAMIC_VINO_LIB__MODELS__OBJECT_DETECTION_MODEL_HPP_ #include #include "dynamic_vino_lib/models/base_model.hpp" -namespace Models { +namespace Models +{ /** * @class ObjectDetectionModel * @brief This class generates the face detection model. */ -class ObjectDetectionModel : public BaseModel { - public: - ObjectDetectionModel(const std::string&, int, int, int); - inline const int getMaxProposalCount() { return max_proposal_count_; } - inline const int getObjectSize() { return object_size_; } - inline const std::string getInputName() { return input_; } - inline const std::string getOutputName() { return output_; } +class ObjectDetectionModel : public BaseModel +{ +public: + ObjectDetectionModel(const std::string &, int, int, int); + inline const int getMaxProposalCount() + { + return max_proposal_count_; + } + inline const int getObjectSize() + { + return object_size_; + } + inline const std::string getInputName() + { + return input_; + } + inline const std::string getOutputName() + { + return output_; + } /** * @brief Get the name of this detection model. * @return Name of the model. */ const std::string getModelName() const override; - protected: - void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr&) override; + +protected: + void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; int max_proposal_count_; @@ -48,4 +61,4 @@ class ObjectDetectionModel : public BaseModel { std::string output_; }; } // namespace Models -#endif // DYNAMIC_VINO_LIB__MODELS__OBJECT_DETECTION_MODEL_HPP_ \ No newline at end of file +#endif // DYNAMIC_VINO_LIB__MODELS__OBJECT_DETECTION_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/object_segmentation_model.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/models/object_segmentation_model.hpp index 1e07a508..196b82ac 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/object_segmentation_model.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/object_segmentation_model.hpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief A header file with declaration for ObjectSegmentationModel Class * @file face_detection_model.h @@ -21,30 +19,49 @@ #define DYNAMIC_VINO_LIB__MODELS__OBJECT_SEGMENTATION_MODEL_HPP_ #include #include "dynamic_vino_lib/models/base_model.hpp" -namespace Models { +namespace Models +{ /** * @class ObjectSegmentationModel * @brief This class generates the object segmentation model. */ -class ObjectSegmentationModel : public BaseModel { - public: - ObjectSegmentationModel(const std::string&, int, int, int); - inline const int getMaxProposalCount() { return max_proposal_count_; } - inline const int getObjectSize() { return object_size_; } - inline const std::string getInputName() { return input_; } - inline const std::string getDetectionOutputName() { return detection_output_; } - inline const std::string getMaskOutputName() { return mask_output_; } +class ObjectSegmentationModel : public BaseModel +{ +public: + ObjectSegmentationModel(const std::string &, int, int, int); + inline const int getMaxProposalCount() + { + return max_proposal_count_; + } + inline const int getObjectSize() + { + return object_size_; + } + inline const std::string getInputName() + { + return input_; + } + inline const std::string getDetectionOutputName() + { + return detection_output_; + } + inline const std::string getMaskOutputName() + { + return mask_output_; + } /** * @brief Get the name of this segmentation model. * @return Name of the model. */ const std::string getModelName() const override; - protected: - void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr&) override; + +protected: + void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; void checkNetworkSize(int, int, InferenceEngine::CNNNetReader::Ptr) override; - private: + +private: int max_proposal_count_; int object_size_; std::string input_; @@ -52,4 +69,4 @@ class ObjectSegmentationModel : public BaseModel { std::string detection_output_; }; } // namespace Models -#endif // DYNAMIC_VINO_LIB__MODELS__OBJECT_SEGMENTATION_MODEL_HPP_ \ No newline at end of file +#endif // DYNAMIC_VINO_LIB__MODELS__OBJECT_SEGMENTATION_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.hpp index 8d8bd02f..5a063e87 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.hpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief A header file with declaration for HeadPoseDetectionModel Class @@ -23,7 +21,7 @@ #define DYNAMIC_VINO_LIB__OUTPUTS__BASE_OUTPUT_HPP_ #include -#include +#include #include #include #include @@ -36,6 +34,7 @@ #include #include #include +#include #include "dynamic_vino_lib/inferences/age_gender_detection.hpp" #include "dynamic_vino_lib/inferences/base_inference.hpp" @@ -47,49 +46,62 @@ #include "opencv2/opencv.hpp" class Pipeline; -namespace Outputs { +namespace Outputs +{ /** * @class BaseOutput * @brief This class is a base class for various output devices. It employs * visitor pattern to perform different operations to different inference * result with different output device */ -class BaseOutput { - public: +class BaseOutput +{ +public: BaseOutput() = default; /** * @brief Generate output content according to the object segmentation result. */ - virtual void accept( - const std::vector&) {} + virtual void accept(const std::vector &) + { + } /** * @brief Generate output content according to the object detection result. */ - virtual void accept( - const std::vector&) {} + virtual void accept(const std::vector &) + { + } /** * @brief Generate output content according to the face detection result. */ - virtual void accept( - const std::vector&) {} + virtual void accept(const std::vector &) + { + } /** * @brief Generate output content according to the emotion detection result. */ - virtual void accept(const std::vector&) {} + virtual void accept(const std::vector &) + { + } /** * @brief Generate output content according to the age and gender detection * result. */ - virtual void accept(const std::vector&) {} + virtual void accept(const std::vector &) + { + } /** * @brief Generate output content according to the headpose detection result. */ - virtual void accept(const std::vector&) {} + virtual void accept(const std::vector &) + { + } /** * @brief Calculate the camera matrix of a frame for image window output, no implementation for ros topic output. */ - virtual void feedFrame(const cv::Mat&) {} + virtual void feedFrame(const cv::Mat &) + { + } /** * @brief Show all the contents generated by the accept functions. */ @@ -99,17 +111,17 @@ class BaseOutput { */ int getFPS() const; - void setPipeline(Pipeline* const pipeline); - virtual void setResponse(std::shared_ptr response) {} ; - virtual void setResponse(std::shared_ptr response) {} ; - virtual void setResponse(std::shared_ptr response) {} ; - virtual void setResponse(std::shared_ptr response) {} ; - Pipeline* getPipeline() const; + void setPipeline(Pipeline * const pipeline); + virtual void setResponse(std::shared_ptr response) {} + virtual void setResponse(std::shared_ptr response) {} + virtual void setResponse(std::shared_ptr response) {} + virtual void setResponse(std::shared_ptr response) {} + Pipeline * getPipeline() const; cv::Mat getFrame() const; - protected: +protected: cv::Mat frame_; - Pipeline* pipeline_; + Pipeline * pipeline_; }; } // namespace Outputs #endif // DYNAMIC_VINO_LIB__OUTPUTS__BASE_OUTPUT_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.hpp index 892d2e9e..9adf33cc 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.hpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief A header file with declaration for ImageWindowOutput Class @@ -26,22 +24,23 @@ #include #include "dynamic_vino_lib/outputs/base_output.hpp" -namespace Outputs { +namespace Outputs +{ /** * @class ImageWindowOutput * @brief This class handles and shows the detection result with image window. */ -class ImageWindowOutput : public BaseOutput { - public: - explicit ImageWindowOutput(const std::string& window_name, - int focal_length = 950); +class ImageWindowOutput : public BaseOutput +{ +public: + explicit ImageWindowOutput(const std::string & window_name, int focal_length = 950); /** * @brief Calculate the camera matrix of a frame for image * window output. * @param[in] A frame. */ - void feedFrame(const cv::Mat&) override; - + void feedFrame(const cv::Mat &) override; + /** * @brief Decorate frame according to detection result */ @@ -55,99 +54,77 @@ class ImageWindowOutput : public BaseOutput { * the object segmentation result. * @param[in] An obejct segmentation result objetc. */ - void accept( - const std::vector&) override; + void accept(const std::vector &) override; /** * @brief Generate image window output content according to * the face detection result. * @param[in] A face detection result objetc. */ - void accept( - const std::vector&) override; + void accept(const std::vector &) override; /** * @brief Generate image window output content according to * the object detection result. * @param[in] results A bundle of object detection results. */ - void accept( - const std::vector&) override; + void accept(const std::vector &) override; /** * @brief Generate image window output content according to * the emotion detection result. * @param[in] A emotion detection result objetc. */ - void accept( - const std::vector&) override; + void accept(const std::vector &) override; /** * @brief Generate image window output content according to * the age and gender detection result. * @param[in] A head pose detection result objetc. */ - void accept( - const std::vector&) override; + void accept(const std::vector &) override; /** * @brief Generate image window output content according to * the headpose detection result. * @param[in] An age gender detection result objetc. */ - void accept( - const std::vector&) override; + void accept(const std::vector &) override; - private: - void initOutputs(unsigned size); - /** - * @brief Calculate the axises of the coordinates for showing - * the image window. - */ - cv::Point calcAxis(cv::Mat r, double cx, double cy, double cz, cv::Point cp); - /** - * @brief Calculte the rotation transform from the rotation pose. - * @param[in] yaw Yaw rotation value. - * @param[in] pitch Pitch rotation value. - * @param[in] roll Roll rotation value. - */ - cv::Mat getRotationTransform(double yaw, double pitch, double roll); +private: + void initOutputs(unsigned size); + /** + * @brief Calculate the axises of the coordinates for showing + * the image window. + */ + cv::Point calcAxis(cv::Mat r, double cx, double cy, double cz, cv::Point cp); + /** + * @brief Calculte the rotation transform from the rotation pose. + * @param[in] yaw Yaw rotation value. + * @param[in] pitch Pitch rotation value. + * @param[in] roll Roll rotation value. + */ + cv::Mat getRotationTransform(double yaw, double pitch, double roll); - void mergeMask( - const std::vector&); + void mergeMask(const std::vector &); - struct OutputData { + struct OutputData + { std::string desc; cv::Rect rect; cv::Scalar scalar; - cv::Point hp_cp; // for headpose, center point - cv::Point hp_x; // for headpose, end point of xAxis - cv::Point hp_y; // for headpose, end point of yAxis - cv::Point hp_zs; // for headpose, start point of zAxis - cv::Point hp_ze; // for headpose, end point of zAxis + cv::Point hp_cp; // for headpose, center point + cv::Point hp_x; // for headpose, end point of xAxis + cv::Point hp_y; // for headpose, end point of yAxis + cv::Point hp_zs; // for headpose, start point of zAxis + cv::Point hp_ze; // for headpose, end point of zAxis }; std::vector outputs_; const std::string window_name_; float focal_length_; cv::Mat camera_matrix_; - std::vector> colors_ = { - {128, 64, 128}, - {232, 35, 244}, - {70, 70, 70}, - {156, 102, 102}, - {153, 153, 190}, - {153, 153, 153}, - {30, 170, 250}, - {0, 220, 220}, - {35, 142, 107}, - {152, 251, 152}, - {180, 130, 70}, - {60, 20, 220}, - {0, 0, 255}, - {142, 0, 0}, - {70, 0, 0}, - {100, 60, 0}, - {90, 0, 0}, - {230, 0, 0}, - {32, 11, 119}, - {0, 74, 111}, - {81, 0, 81} + std::vector> colors_ = { + {128, 64, 128}, {232, 35, 244}, {70, 70, 70}, {156, 102, 102}, {153, 153, 190}, + {153, 153, 153}, {30, 170, 250}, {0, 220, 220}, {35, 142, 107}, {152, 251, 152}, + {180, 130, 70}, {60, 20, 220}, {0, 0, 255}, {142, 0, 0}, {70, 0, 0}, + {100, 60, 0}, {90, 0, 0}, {230, 0, 0}, {32, 11, 119}, {0, 74, 111}, + {81, 0, 81} }; }; } // namespace Outputs diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.hpp index 128362cf..4af36622 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.hpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief A header file with declaration for RosTopicOutput Class @@ -41,59 +39,59 @@ #include "dynamic_vino_lib/inferences/face_detection.hpp" #include "dynamic_vino_lib/outputs/base_output.hpp" -namespace Outputs { +namespace Outputs +{ /** * @class RosTopicOutput * @brief This class handles and publish the detection result with ros topic. */ -class RosServiceOutput : public BaseOutput { - public: - RosServiceOutput() { }; +class RosServiceOutput : public BaseOutput +{ +public: + RosServiceOutput() {} /** * @brief Calculate the camera matrix of a frame. * @param[in] A frame. */ - void feedFrame(const cv::Mat&) override; + void feedFrame(const cv::Mat &) override; /** * @brief Publish all the detected infomations generated by the accept * functions with ros topic. */ - void handleOutput() override { }; + void handleOutput() override {} /** * @brief Generate ros topic infomation according to * the object detection result. * @param[in] results a bundle of object detection results. */ - void accept( - const std::vector& results) override; + void accept(const std::vector & results) override; /** * @brief Generate ros topic infomation according to * the face detection result. * @param[in] An face detection result objetc. */ - void accept( - const std::vector&) override; + void accept(const std::vector &) override; /** * @brief Generate ros topic infomation according to * the emotion detection result. * @param[in] An emotion detection result objetc. */ - void accept(const std::vector&) override; + void accept(const std::vector &) override; /** * @brief Generate ros topic infomation according to * the age gender detection result. * @param[in] An age gender detection result objetc. */ - void accept(const std::vector&) override; + void accept(const std::vector &) override; /** * @brief Generate ros topic infomation according to * the headpose detection result. * @param[in] An head pose detection result objetc. */ - void accept(const std::vector&) override; - void setResponse(std::shared_ptr response) ; + void accept(const std::vector &) override; + void setResponse(std::shared_ptr response); - private: +private: std_msgs::msg::Header getHeader(); const std::string service_name_; std::shared_ptr node_; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.hpp index 02650d65..8778208c 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.hpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief A header file with declaration for RosTopicOutput Class @@ -43,19 +41,21 @@ #include "dynamic_vino_lib/inferences/face_detection.hpp" #include "dynamic_vino_lib/outputs/base_output.hpp" -namespace Outputs { +namespace Outputs +{ /** * @class RosTopicOutput * @brief This class handles and publish the detection result with ros topic. */ -class RosTopicOutput : public BaseOutput { - public: +class RosTopicOutput : public BaseOutput +{ +public: RosTopicOutput(); /** * @brief Calculate the camera matrix of a frame. * @param[in] A frame. */ - void feedFrame(const cv::Mat&) override; + void feedFrame(const cv::Mat &) override; /** * @brief Publish all the detected infomations generated by the accept * functions with ros topic. @@ -66,49 +66,46 @@ class RosTopicOutput : public BaseOutput { * the object segmentation result. * @param[in] results a bundle of object segmentation results. */ - void accept( - const std::vector& ) override; + void accept(const std::vector &) override; /** * @brief Generate ros topic infomation according to * the object detection result. * @param[in] results a bundle of object detection results. */ - void accept( - const std::vector& ) override; + void accept(const std::vector &) override; /** * @brief Generate ros topic infomation according to * the face detection result. * @param[in] An face detection result objetc. */ - void accept( - const std::vector&) override; + void accept(const std::vector &) override; /** * @brief Generate ros topic infomation according to * the emotion detection result. * @param[in] An emotion detection result objetc. */ - void accept(const std::vector&) override; + void accept(const std::vector &) override; /** * @brief Generate ros topic infomation according to * the age gender detection result. * @param[in] An age gender detection result objetc. */ - void accept(const std::vector&) override; + void accept(const std::vector &) override; /** * @brief Generate ros topic infomation according to * the headpose detection result. * @param[in] An head pose detection result objetc. */ - void accept(const std::vector&) override; + void accept(const std::vector &) override; - private: +private: std_msgs::msg::Header getHeader(); const std::string topic_name_; std::shared_ptr node_; rclcpp::Publisher::SharedPtr pub_segmented_object_; std::shared_ptr segmented_objects_topic_; rclcpp::Publisher::SharedPtr pub_detected_object_; - std::shared_ptr detected_objects_topic_; + std::shared_ptr detected_objects_topic_; rclcpp::Publisher::SharedPtr pub_face_; std::shared_ptr faces_topic_; rclcpp::Publisher::SharedPtr pub_emotion_; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/rviz_output.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/rviz_output.hpp index 093c9edd..515e1da0 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/rviz_output.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/outputs/rviz_output.hpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief A header file with declaration for RvizOutput Class @@ -22,27 +20,30 @@ #ifndef DYNAMIC_VINO_LIB__OUTPUTS__RVIZ_OUTPUT_HPP_ #define DYNAMIC_VINO_LIB__OUTPUTS__RVIZ_OUTPUT_HPP_ +#include +#include #include #include +#include -#include -#include #include "dynamic_vino_lib/outputs/base_output.hpp" #include "dynamic_vino_lib/outputs/image_window_output.hpp" -namespace Outputs { +namespace Outputs +{ /** * @class RvizOutput * @brief This class handles and shows the detection result with rviz. */ -class RvizOutput : public BaseOutput { - public: - explicit RvizOutput(); +class RvizOutput : public BaseOutput +{ +public: + RvizOutput(); /** * @brief Construct frame for rviz * @param[in] A frame. */ - void feedFrame(const cv::Mat&) override; + void feedFrame(const cv::Mat &) override; /** * @brief Show all the contents generated by the accept * functions with rviz. @@ -53,45 +54,39 @@ class RvizOutput : public BaseOutput { * the face detection result. * @param[in] A face detection result objetc. */ - void accept( - const std::vector&) override; + void accept(const std::vector &) override; /** * @brief Generate rviz output content according to * the object detection result. * @param[in] results A bundle of object detection results. */ - void accept( - const std::vector&) override; - /** - * @brief Generate rviz output content according to - * the object segmentation result. - * @param[in] results A bundle of object segmentation results. - */ - void accept( - const std::vector&) override; + void accept(const std::vector &) override; + /** + * @brief Generate rviz output content according to + * the object segmentation result. + * @param[in] results A bundle of object segmentation results. + */ + void accept(const std::vector &) override; /** * @brief Generate rviz output content according to * the emotion detection result. * @param[in] A emotion detection result objetc. */ - void accept( - const std::vector&) override; + void accept(const std::vector &) override; /** * @brief Generate rviz output content according to * the age and gender detection result. * @param[in] A head pose detection result objetc. */ - void accept( - const std::vector&) override; + void accept(const std::vector &) override; /** * @brief Generate rviz output content according to * the headpose detection result. * @param[in] An age gender detection result objetc. */ - void accept( - const std::vector&) override; + void accept(const std::vector &) override; - private: +private: std_msgs::msg::Header getHeader(); std::shared_ptr node_; rclcpp::Publisher::SharedPtr pub_image_; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline.hpp index 19699e4d..44f21279 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline.hpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief a header file with declaration of Pipeline class @@ -41,17 +39,17 @@ * the input device, output device and networks and make inference. One pipeline * should have only one input device. */ -class Pipeline { - public: - explicit Pipeline(const std::string& name = "pipeline"); +class Pipeline +{ +public: + explicit Pipeline(const std::string & name = "pipeline"); /** * @brief Add input device to the pipeline. * @param[in] name name of the current input device. * @param[in] input_device the input device instance to be added. * @return whether the add operation is successful */ - bool add(const std::string& name, - std::shared_ptr input_device); + bool add(const std::string & name, std::shared_ptr input_device); /** * @brief Add inference network to the pipeline. * @param[in] parent name of the parent device or inference. @@ -59,8 +57,9 @@ class Pipeline { * @param[in] inference the inference instance to be added. * @return whether the add operation is successful */ - bool add(const std::string& parent, const std::string& name, - std::shared_ptr inference); + bool add( + const std::string & parent, const std::string & name, + std::shared_ptr inference); /** * @brief Add output device to the pipeline. * @param[in] parent name of the parent inference. @@ -68,21 +67,20 @@ class Pipeline { * @param[in] output the output instance to be added. * @return whether the add operation is successful */ - bool add(const std::string& parent, const std::string& name, - std::shared_ptr output); + bool add( + const std::string & parent, const std::string & name, + std::shared_ptr output); - bool add(const std::string& name, - std::shared_ptr output); - void addConnect(const std::string& parent, const std::string& name); - bool add(const std::string& name, - std::shared_ptr inference); + bool add(const std::string & name, std::shared_ptr output); + void addConnect(const std::string & parent, const std::string & name); + bool add(const std::string & name, std::shared_ptr inference); /** * @brief Add inference network-output device edge to the pipeline. * @param[in] parent name of the parent inference. * @param[in] name name of the current output device. * @return whether the add operation is successful */ - bool add(const std::string& parent, const std::string& name); + bool add(const std::string & parent, const std::string & name); /** * @brief Do the inference once. * Data flow from input device to inference network, then to output device. @@ -93,7 +91,7 @@ class Pipeline { * pipeline. */ void runService(std::string config_path); - void callback(const std::string& detection_name); + void callback(const std::string & detection_name); /** * @brief Set the inference network to call the callback function as soon as * each inference is @@ -101,14 +99,24 @@ class Pipeline { */ void setCallback(); void printPipeline(); - std::map> getOutputHandle() { return name_to_output_map_; } ; - void setParams(PipelineParams pipeline_params) { + std::map> getOutputHandle() + { + return name_to_output_map_; + } + void setParams(PipelineParams pipeline_params) + { params_ = std::make_shared(pipeline_params); - }; - const std::shared_ptr getParameters() { return params_; }; - std::shared_ptr getInputDevice() { return input_device_; }; + } + const std::shared_ptr getParameters() + { + return params_; + } + std::shared_ptr getInputDevice() + { + return input_device_; + } - private: +private: void initInferenceCounter(); void increaseInferenceCounter(); void decreaseInferenceCounter(); @@ -125,10 +133,8 @@ class Pipeline { std::shared_ptr input_device_; std::string input_device_name_; std::multimap next_; - std::map> - name_to_detection_map_; - std::map> - name_to_output_map_; + std::map> name_to_detection_map_; + std::map> name_to_output_map_; int total_inference_ = 0; std::set output_names_; int width_ = 0; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.hpp index d8aa6e31..da694616 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.hpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief a header file with declaration of Pipeline Manager class @@ -21,6 +19,7 @@ #ifndef DYNAMIC_VINO_LIB__PIPELINE_MANAGER_HPP_ #define DYNAMIC_VINO_LIB__PIPELINE_MANAGER_HPP_ +#include #include #include #include @@ -28,45 +27,47 @@ #include #include #include - -#include +#include #include "dynamic_vino_lib/pipeline.hpp" /** * @class PipelineManager * @brief This class manages the lifecycles of pipelines. */ -class PipelineManager { - public: +class PipelineManager +{ +public: /** * @brief Get the singleton instance of PipelineManager class. * The instance will be created when first call. * @return The reference of PipelineManager instance. */ - static PipelineManager& getInstance() { + static PipelineManager & getInstance() + { static PipelineManager manager_; return manager_; - }; + } - std::shared_ptr createPipeline( - const Params::ParamManager::PipelineParams& params); + std::shared_ptr createPipeline(const Params::ParamManager::PipelineParams & params); - void removePipeline(const std::string& name); - PipelineManager& updatePipeline( - const std::string& name, - const Params::ParamManager::PipelineParams& params); + void removePipeline(const std::string & name); + PipelineManager & updatePipeline( + const std::string & name, + const Params::ParamManager::PipelineParams & params); void runAll(); void stopAll(); void joinAll(); - - enum PipelineState { + + enum PipelineState + { PipelineState_ThreadNotCreated, PipelineState_ThreadStopped, PipelineState_ThreadRunning, PipelineState_Error }; - struct PipelineData { + struct PipelineData + { Params::ParamManager::PipelineParams params; std::shared_ptr pipeline; std::vector> spin_nodes; @@ -74,31 +75,36 @@ class PipelineManager { PipelineState state; }; - std::map getPipelines() { return pipelines_; }; + std::map getPipelines() + { + return pipelines_; + } - private: - PipelineManager(){}; - PipelineManager(PipelineManager const&); - void operator=(PipelineManager const&); - void threadPipeline(const char* name); +private: + PipelineManager() + { + } + PipelineManager(PipelineManager const &); + void operator=(PipelineManager const &); + void threadPipeline(const char * name); std::map> - parseInputDevice(const Params::ParamManager::PipelineParams& params); - std::map> parseOutput( - const Params::ParamManager::PipelineParams& params); + parseInputDevice(const Params::ParamManager::PipelineParams & params); + std::map> + parseOutput(const Params::ParamManager::PipelineParams & params); std::map> - parseInference(const Params::ParamManager::PipelineParams& params); - std::shared_ptr createFaceDetection( - const Params::ParamManager::InferenceParams& infer); - std::shared_ptr createAgeGenderRecognition( - const Params::ParamManager::InferenceParams& infer); - std::shared_ptr createEmotionRecognition( - const Params::ParamManager::InferenceParams& infer); - std::shared_ptr createHeadPoseEstimation( - const Params::ParamManager::InferenceParams& infer); - std::shared_ptr createObjectDetection( - const Params::ParamManager::InferenceParams& infer); - std::shared_ptr createObjectSegmentation( - const Params::ParamManager::InferenceParams& infer); + parseInference(const Params::ParamManager::PipelineParams & params); + std::shared_ptr + createFaceDetection(const Params::ParamManager::InferenceParams & infer); + std::shared_ptr + createAgeGenderRecognition(const Params::ParamManager::InferenceParams & infer); + std::shared_ptr + createEmotionRecognition(const Params::ParamManager::InferenceParams & infer); + std::shared_ptr + createHeadPoseEstimation(const Params::ParamManager::InferenceParams & infer); + std::shared_ptr + createObjectDetection(const Params::ParamManager::InferenceParams & infer); + std::shared_ptr + createObjectSegmentation(const Params::ParamManager::InferenceParams & infer); std::map pipelines_; std::map plugins_for_devices_; diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.hpp index 5dfacf6c..e38383cc 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.hpp @@ -1,26 +1,25 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief a header file with declaration of Pipeline class * @file pipeline_params.hpp */ -#ifndef DYNAMIC_VINO_LIB__PIPELINE_PARAM_HPP_ -#define DYNAMIC_VINO_LIB__PIPELINE_PARAM_HPP_ +#ifndef DYNAMIC_VINO_LIB__PIPELINE_PARAMS_HPP_ +#define DYNAMIC_VINO_LIB__PIPELINE_PARAMS_HPP_ +#include #include #include #include @@ -29,50 +28,49 @@ #include #include -#include #include "dynamic_vino_lib/inferences/base_inference.hpp" #include "dynamic_vino_lib/inputs/standard_camera.hpp" #include "dynamic_vino_lib/outputs/base_output.hpp" #include "opencv2/opencv.hpp" -extern const std::string kInputType_Image; -extern const std::string kInputType_Video; -extern const std::string kInputType_StandardCamera; -extern const std::string kInputType_CameraTopic; -extern const std::string kInputType_RealSenseCamera; -extern const std::string kInputType_ServiceImage; +const char kInputType_Image[] = "Image"; +const char kInputType_Video[] = "Video"; +const char kInputType_StandardCamera[] = "StandardCamera"; +const char kInputType_CameraTopic[] = "RealSenseCameraTopic"; +const char kInputType_RealSenseCamera[] = "RealSenseCamera"; +const char kInputType_ServiceImage[] = "ServiceImage"; -extern const std::string kOutputTpye_RViz; -extern const std::string kOutputTpye_ImageWindow; -extern const std::string kOutputTpye_RosTopic; -extern const std::string kOutputTpye_RosService; +const char kOutputTpye_RViz[] = "RViz"; +const char kOutputTpye_ImageWindow[] = "ImageWindow"; +const char kOutputTpye_RosTopic[] = "RosTopic"; +const char kOutputTpye_RosService[] = "RosService"; -extern const std::string kInferTpye_FaceDetection; -extern const std::string kInferTpye_AgeGenderRecognition; -extern const std::string kInferTpye_EmotionRecognition; -extern const std::string kInferTpye_HeadPoseEstimation; -extern const std::string kInferTpye_ObjectDetection; -extern const std::string kInferTpye_ObjectSegmentation; +const char kInferTpye_FaceDetection[] = "FaceDetection"; +const char kInferTpye_AgeGenderRecognition[] = "AgeGenderRecognition"; +const char kInferTpye_EmotionRecognition[] = "EmotionRecognition"; +const char kInferTpye_HeadPoseEstimation[] = "HeadPoseEstimation"; +const char kInferTpye_ObjectDetection[] = "ObjectDetection"; +const char kInferTpye_ObjectSegmentation[] = "ObjectSegmentation"; /** * @class PipelineParams * @brief This class is a pipeline parameter management that stores parameters * of a given pipeline */ -class PipelineParams { - public: - PipelineParams(const std::string& name); - PipelineParams(const Params::ParamManager::PipelineParams& params); - static Params::ParamManager::PipelineParams getPipeline( - const std::string& name); - PipelineParams& operator=(const Params::ParamManager::PipelineParams& params); +class PipelineParams +{ +public: + explicit PipelineParams(const std::string & name); + explicit PipelineParams(const Params::ParamManager::PipelineParams & params); + static Params::ParamManager::PipelineParams getPipeline(const std::string & name); + PipelineParams & operator=(const Params::ParamManager::PipelineParams & params); void update(); - void update(const Params::ParamManager::PipelineParams& params); - bool isOutputTo(std::string& name); + void update(const Params::ParamManager::PipelineParams & params); + bool isOutputTo(std::string & name); bool isGetFps(); - private: +private: Params::ParamManager::PipelineParams params_; }; -#endif // DYNAMIC_VINO_LIB__PIPELINE_PARAM_HPP_ +#endif // DYNAMIC_VINO_LIB__PIPELINE_PARAMS_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/services/frame_processing_server.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/services/frame_processing_server.hpp index e7ffeedb..a157cf7b 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/services/frame_processing_server.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/services/frame_processing_server.hpp @@ -1,31 +1,27 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef DYNAMIC_VINO_LIB__FRAME_PROCESSING_SERVER_HPP_ -#define DYNAMIC_VINO_LIB__FRAME_PROCESSING_SERVER_HPP_ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#ifndef DYNAMIC_VINO_LIB__SERVICES__FRAME_PROCESSING_SERVER_HPP_ +#define DYNAMIC_VINO_LIB__SERVICES__FRAME_PROCESSING_SERVER_HPP_ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include @@ -34,28 +30,37 @@ #include #include +#include +#include +#include -namespace vino_service { - -class FrameProcessingServer : public rclcpp::Node { +namespace vino_service +{ +class FrameProcessingServer : public rclcpp::Node +{ public: explicit FrameProcessingServer(const std::string service_name, const std::string config_path); private: - void cbFaceDetection(const std::shared_ptr request, - std::shared_ptr response); + void cbFaceDetection( + const std::shared_ptr request, + std::shared_ptr response); - void cbAgeGenderRecognition(const std::shared_ptr request, - std::shared_ptr response); + void cbAgeGenderRecognition( + const std::shared_ptr request, + std::shared_ptr response); - void cbEmotionRecognition(const std::shared_ptr request, - std::shared_ptr response); + void cbEmotionRecognition( + const std::shared_ptr request, + std::shared_ptr response); - void cbHeadPoseRecognition(const std::shared_ptr request, - std::shared_ptr response); + void cbHeadPoseRecognition( + const std::shared_ptr request, + std::shared_ptr response); - void cbObjectDetection(const std::shared_ptr request, - std::shared_ptr response); + void cbObjectDetection( + const std::shared_ptr request, + std::shared_ptr response); rclcpp::Service::SharedPtr face_service_; rclcpp::Service::SharedPtr age_gender_service_; @@ -63,5 +68,5 @@ class FrameProcessingServer : public rclcpp::Node { rclcpp::Service::SharedPtr head_pose_service_; rclcpp::Service::SharedPtr object_service_; }; -} //namespace frame_processing_service -#endif // DYNAMIC_VINO_LIB__FRAME_PROCESSING_SERVER_HPP_ +} // namespace vino_service +#endif // DYNAMIC_VINO_LIB__SERVICES__FRAME_PROCESSING_SERVER_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/slog.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/slog.hpp index 84381342..1d1ab434 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/slog.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/slog.hpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief a header file with logging facility for common samples @@ -26,13 +24,16 @@ #include #include -namespace slog { +namespace slog +{ /** * @class LogStreamEndLine * @brief The LogStreamEndLine class implements an end line marker for a log * stream */ -class LogStreamEndLine {}; +class LogStreamEndLine +{ +}; static constexpr LogStreamEndLine endl; @@ -40,18 +41,20 @@ static constexpr LogStreamEndLine endl; * @class LogStream * @brief The LogStream class implements a stream for sample logging */ -class LogStream { +class LogStream +{ std::string _prefix; - std::ostream* _log_stream; + std::ostream * _log_stream; bool _new_line; - public: +public: /** * @brief A constructor. Creates an LogStream object * @param prefix The prefix to print */ - LogStream(const std::string& prefix, std::ostream& log_stream) - : _prefix(prefix), _new_line(true) { + LogStream(const std::string & prefix, std::ostream & log_stream) + : _prefix(prefix), _new_line(true) + { _log_stream = &log_stream; } @@ -59,8 +62,9 @@ class LogStream { * @brief A stream output operator to be used within the logger * @param arg Object for serialization in the logger message */ - template - LogStream& operator<<(const T& arg) { + template + LogStream & operator<<(const T & arg) + { if (_new_line) { (*_log_stream) << "[ " << _prefix << " ] "; _new_line = false; @@ -71,7 +75,8 @@ class LogStream { } // Specializing for LogStreamEndLine to support slog::endl - LogStream& operator<<(const LogStreamEndLine& arg) { + LogStream & operator<<(const LogStreamEndLine & arg) + { _new_line = true; (*_log_stream) << std::endl; diff --git a/dynamic_vino_lib/package.xml b/dynamic_vino_lib/package.xml index 9933a199..4d954087 100644 --- a/dynamic_vino_lib/package.xml +++ b/dynamic_vino_lib/package.xml @@ -13,15 +13,13 @@ See the License for the specific language governing permissions and limitations under the License. --> - + dynamic_vino_lib 0.3.0 a ROS2 wrapper package for Intel OpenVINO - Weizhi Liu - Chao Li - Hongkun Chen Weizhi Liu Chao Li + Hongkun Chen Apache License 2.0 ament_cmake diff --git a/dynamic_vino_lib/src/engines/engine.cpp b/dynamic_vino_lib/src/engines/engine.cpp index 18bcf57b..ac78d3de 100644 --- a/dynamic_vino_lib/src/engines/engine.cpp +++ b/dynamic_vino_lib/src/engines/engine.cpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief a header file with definition of Engine class @@ -20,8 +18,9 @@ */ #include "dynamic_vino_lib/engines/engine.hpp" -Engines::Engine::Engine(InferenceEngine::InferencePlugin plg, - const Models::BaseModel::Ptr base_model) { - request_ = (plg.LoadNetwork(base_model->net_reader_->getNetwork(), {})) - .CreateInferRequestPtr(); +Engines::Engine::Engine( + InferenceEngine::InferencePlugin plg, + const Models::BaseModel::Ptr base_model) +{ + request_ = (plg.LoadNetwork(base_model->net_reader_->getNetwork(), {})).CreateInferRequestPtr(); } diff --git a/dynamic_vino_lib/src/factory.cpp b/dynamic_vino_lib/src/factory.cpp index 0a19e509..89c0fc76 100644 --- a/dynamic_vino_lib/src/factory.cpp +++ b/dynamic_vino_lib/src/factory.cpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief a header file with declaration of Factory class @@ -28,12 +26,11 @@ #include "dynamic_vino_lib/inputs/video_input.hpp" #include "dynamic_vino_lib/inputs/realsense_camera_topic.hpp" #include "dynamic_vino_lib/inputs/image_input.hpp" - -using namespace InferenceEngine; +#include "inference_engine.hpp" std::shared_ptr Factory::makeInputDeviceByName( - const std::string& input_device_name, const std::string& input_file_path) { - + const std::string & input_device_name, const std::string & input_file_path) +{ std::cout << "InputDvice: " << input_device_name << std::endl; if (input_device_name == "RealSenseCamera") { return std::make_unique(); @@ -51,35 +48,38 @@ std::shared_ptr Factory::makeInputDeviceByName( } } -std::unique_ptr Factory::makePluginByName( - const std::string& device_name, - const std::string& custom_cpu_library_message, // FLAGS_l - const std::string& custom_cldnn_message, // FLAGS_c - bool performance_message) { // FLAGS_pc - InferencePlugin plugin = PluginDispatcher({"../../../lib/intel64", ""}) - .getPluginByDevice(device_name); +std::unique_ptr +Factory::makePluginByName( + const std::string & device_name, + const std::string & custom_cpu_library_message, // FLAGS_l + const std::string & custom_cldnn_message, // FLAGS_c + bool performance_message) +{ // FLAGS_pc + InferenceEngine::InferencePlugin plugin = + InferenceEngine::PluginDispatcher({"../../../lib/intel64", ""}) + .getPluginByDevice(device_name); /** Printing plugin version **/ printPluginVersion(plugin, std::cout); /** Load extensions for the CPU plugin **/ if ((device_name.find("CPU") != std::string::npos)) { - plugin.AddExtension(std::make_shared()); + plugin.AddExtension(std::make_shared()); if (!custom_cpu_library_message.empty()) { // CPU(MKLDNN) extensions are loaded as a shared library and passed as a // pointer to base // extension - auto extension_ptr = make_so_pointer( - custom_cpu_library_message); + auto extension_ptr = + InferenceEngine::make_so_pointer(custom_cpu_library_message); plugin.AddExtension(extension_ptr); } } else if (!custom_cldnn_message.empty()) { // Load Extensions for other plugins not CPU plugin.SetConfig( - {{PluginConfigParams::KEY_CONFIG_FILE, custom_cldnn_message}}); + { {InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE, custom_cldnn_message}}); } if (performance_message) { - plugin.SetConfig( - {{PluginConfigParams::KEY_PERF_COUNT, PluginConfigParams::YES}}); + plugin.SetConfig({ {InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, + InferenceEngine::PluginConfigParams::YES}}); } - return std::make_unique( - InferenceEngine::InferenceEnginePluginPtr(plugin)); + return std::make_unique( + InferenceEngine::InferenceEnginePluginPtr(plugin)); } diff --git a/dynamic_vino_lib/src/inferences/age_gender_detection.cpp b/dynamic_vino_lib/src/inferences/age_gender_detection.cpp index f74e3260..a463ada5 100644 --- a/dynamic_vino_lib/src/inferences/age_gender_detection.cpp +++ b/dynamic_vino_lib/src/inferences/age_gender_detection.cpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief a header file with declaration of AgeGenderResult class @@ -26,70 +24,84 @@ #include "dynamic_vino_lib/outputs/base_output.hpp" // AgeGenderResult -dynamic_vino_lib::AgeGenderResult::AgeGenderResult(const cv::Rect& location) - : Result(location){} +dynamic_vino_lib::AgeGenderResult::AgeGenderResult(const cv::Rect & location) +: Result(location) +{ +} // AgeGender Detection dynamic_vino_lib::AgeGenderDetection::AgeGenderDetection() - : dynamic_vino_lib::BaseInference(){} +: dynamic_vino_lib::BaseInference() +{ +} dynamic_vino_lib::AgeGenderDetection::~AgeGenderDetection() = default; void dynamic_vino_lib::AgeGenderDetection::loadNetwork( - std::shared_ptr network) { + std::shared_ptr network) +{ valid_model_ = network; setMaxBatchSize(network->getMaxBatchSize()); } bool dynamic_vino_lib::AgeGenderDetection::enqueue( - const cv::Mat& frame, const cv::Rect& input_frame_loc) { + const cv::Mat & frame, + const cv::Rect & input_frame_loc) +{ if (getEnqueuedNum() == 0) { results_.clear(); } bool succeed = dynamic_vino_lib::BaseInference::enqueue( - frame, input_frame_loc, 1, getResultsLength(), - valid_model_->getInputName()); - if (!succeed) return false; + frame, input_frame_loc, 1, getResultsLength(), valid_model_->getInputName()); + if (!succeed) { + return false; + } Result r(input_frame_loc); results_.emplace_back(r); return true; } -bool dynamic_vino_lib::AgeGenderDetection::submitRequest() { +bool dynamic_vino_lib::AgeGenderDetection::submitRequest() +{ return dynamic_vino_lib::BaseInference::submitRequest(); } -bool dynamic_vino_lib::AgeGenderDetection::fetchResults() { +bool dynamic_vino_lib::AgeGenderDetection::fetchResults() +{ bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); - if (!can_fetch) return false; + if (!can_fetch) { + return false; + } auto request = getEngine()->getRequest(); - InferenceEngine::Blob::Ptr genderBlob = - request->GetBlob(valid_model_->getOutputGenderName()); - InferenceEngine::Blob::Ptr ageBlob = - request->GetBlob(valid_model_->getOutputAgeName()); + InferenceEngine::Blob::Ptr genderBlob = request->GetBlob(valid_model_->getOutputGenderName()); + InferenceEngine::Blob::Ptr ageBlob = request->GetBlob(valid_model_->getOutputAgeName()); for (int i = 0; i < results_.size(); ++i) { - results_[i].age_ = ageBlob->buffer().as()[i] * 100; - results_[i].male_prob_ = genderBlob->buffer().as()[i * 2 + 1]; + results_[i].age_ = ageBlob->buffer().as()[i] * 100; + results_[i].male_prob_ = genderBlob->buffer().as()[i * 2 + 1]; } return true; } -const int dynamic_vino_lib::AgeGenderDetection::getResultsLength() const { +const int dynamic_vino_lib::AgeGenderDetection::getResultsLength() const +{ return static_cast(results_.size()); } -const dynamic_vino_lib::Result* -dynamic_vino_lib::AgeGenderDetection::getLocationResult(int idx) const { +const dynamic_vino_lib::Result * +dynamic_vino_lib::AgeGenderDetection::getLocationResult(int idx) const +{ return &(results_[idx]); } -const std::string dynamic_vino_lib::AgeGenderDetection::getName() const { +const std::string dynamic_vino_lib::AgeGenderDetection::getName() const +{ return valid_model_->getModelName(); } const void dynamic_vino_lib::AgeGenderDetection::observeOutput( - const std::shared_ptr& output) { + const std::shared_ptr & output) +{ if (output != nullptr) { output->accept(results_); } diff --git a/dynamic_vino_lib/src/inferences/base_inference.cpp b/dynamic_vino_lib/src/inferences/base_inference.cpp index e0bbc5b4..7e3a1d3f 100644 --- a/dynamic_vino_lib/src/inferences/base_inference.cpp +++ b/dynamic_vino_lib/src/inferences/base_inference.cpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief a header file with declaration of BaseInference class @@ -24,7 +22,8 @@ #include "dynamic_vino_lib/inferences/base_inference.hpp" // Result -dynamic_vino_lib::Result::Result(const cv::Rect& location) { +dynamic_vino_lib::Result::Result(const cv::Rect & location) +{ location_ = location; } @@ -33,33 +32,44 @@ dynamic_vino_lib::BaseInference::BaseInference() = default; dynamic_vino_lib::BaseInference::~BaseInference() = default; -void dynamic_vino_lib::BaseInference::loadEngine( - const std::shared_ptr engine) { +void dynamic_vino_lib::BaseInference::loadEngine(const std::shared_ptr engine) +{ engine_ = engine; } -bool dynamic_vino_lib::BaseInference::submitRequest() { - if (engine_->getRequest() == nullptr) return false; - if (!enqueued_frames) return false; +bool dynamic_vino_lib::BaseInference::submitRequest() +{ + if (engine_->getRequest() == nullptr) { + return false; + } + if (!enqueued_frames) { + return false; + } enqueued_frames = 0; results_fetched_ = false; engine_->getRequest()->StartAsync(); return true; } -bool dynamic_vino_lib::BaseInference::SynchronousRequest() { - - if (engine_->getRequest() == nullptr) return false; - if (!enqueued_frames) return false; +bool dynamic_vino_lib::BaseInference::SynchronousRequest() +{ + if (engine_->getRequest() == nullptr) { + return false; + } + if (!enqueued_frames) { + return false; + } enqueued_frames = 0; results_fetched_ = false; engine_->getRequest()->Infer(); return true; } -bool dynamic_vino_lib::BaseInference::fetchResults() { - if (results_fetched_) return false; +bool dynamic_vino_lib::BaseInference::fetchResults() +{ + if (results_fetched_) { + return false; + } results_fetched_ = true; return true; } - diff --git a/dynamic_vino_lib/src/inferences/emotions_detection.cpp b/dynamic_vino_lib/src/inferences/emotions_detection.cpp index b5b424f8..2fcb114c 100644 --- a/dynamic_vino_lib/src/inferences/emotions_detection.cpp +++ b/dynamic_vino_lib/src/inferences/emotions_detection.cpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief a header file with declaration of EmotionsDetection class and @@ -28,29 +26,35 @@ #include "dynamic_vino_lib/slog.hpp" // EmotionsResult -dynamic_vino_lib::EmotionsResult::EmotionsResult(const cv::Rect& location) - : Result(location) {} +dynamic_vino_lib::EmotionsResult::EmotionsResult(const cv::Rect & location) +: Result(location) +{ +} // Emotions Detection dynamic_vino_lib::EmotionsDetection::EmotionsDetection() - : dynamic_vino_lib::BaseInference(){} +: dynamic_vino_lib::BaseInference() +{ +} dynamic_vino_lib::EmotionsDetection::~EmotionsDetection() = default; void dynamic_vino_lib::EmotionsDetection::loadNetwork( - const std::shared_ptr network) { + const std::shared_ptr network) +{ valid_model_ = network; setMaxBatchSize(network->getMaxBatchSize()); } bool dynamic_vino_lib::EmotionsDetection::enqueue( - const cv::Mat& frame, const cv::Rect& input_frame_loc) { + const cv::Mat & frame, + const cv::Rect & input_frame_loc) +{ if (getEnqueuedNum() == 0) { results_.clear(); } bool succeed = dynamic_vino_lib::BaseInference::enqueue( - frame, input_frame_loc, 1, getResultsLength(), - valid_model_->getInputName()); + frame, input_frame_loc, 1, getResultsLength(), valid_model_->getInputName()); if (!succeed) { slog::err << "Failed enqueue Emotion frame." << slog::endl; // TODO(weizhi): throw an error here @@ -61,61 +65,66 @@ bool dynamic_vino_lib::EmotionsDetection::enqueue( return true; } -bool dynamic_vino_lib::EmotionsDetection::submitRequest() { +bool dynamic_vino_lib::EmotionsDetection::submitRequest() +{ return dynamic_vino_lib::BaseInference::submitRequest(); } -bool dynamic_vino_lib::EmotionsDetection::fetchResults() { +bool dynamic_vino_lib::EmotionsDetection::fetchResults() +{ bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); - if (!can_fetch) return false; + if (!can_fetch) { + return false; + } int label_length = static_cast(valid_model_->getLabels().size()); std::string output_name = valid_model_->getOutputName(); - InferenceEngine::Blob::Ptr emotions_blob = - getEngine()->getRequest()->GetBlob(output_name); + InferenceEngine::Blob::Ptr emotions_blob = getEngine()->getRequest()->GetBlob(output_name); /** emotions vector must have the same size as number of channels in model output. Default output format is NCHW so we check index 1 */ int64 num_of_channels = emotions_blob->getTensorDesc().getDims().at(1); if (num_of_channels != label_length) { - slog::err << "Output size (" << num_of_channels - << ") of the Emotions Recognition network is not equal " - << "to used emotions vector size (" << label_length << ")" - << slog::endl; + slog::err << "Output size (" << num_of_channels << + ") of the Emotions Recognition network is not equal " << + "to used emotions vector size (" << label_length << ")" << slog::endl; throw std::logic_error("Output size (" + std::to_string(num_of_channels) + - ") of the Emotions Recognition network is not equal " - "to used emotions vector size (" + - std::to_string(label_length) + ")"); + ") of the Emotions Recognition network is not equal " + "to used emotions vector size (" + + std::to_string(label_length) + ")"); } /** we identify an index of the most probable emotion in output array for idx image to return appropriate emotion name */ - auto emotions_values = emotions_blob->buffer().as(); + auto emotions_values = emotions_blob->buffer().as(); for (int idx = 0; idx < results_.size(); ++idx) { - auto output_idx_pos = emotions_values + label_length*idx; + auto output_idx_pos = emotions_values + label_length * idx; int64 max_prob_emotion_idx = - std::max_element(output_idx_pos, output_idx_pos + label_length) - - output_idx_pos; + std::max_element(output_idx_pos, output_idx_pos + label_length) - output_idx_pos; results_[idx].label_ = valid_model_->getLabels()[max_prob_emotion_idx]; } return true; } -const int dynamic_vino_lib::EmotionsDetection::getResultsLength() const { +const int dynamic_vino_lib::EmotionsDetection::getResultsLength() const +{ return static_cast(results_.size()); } -const dynamic_vino_lib::Result* -dynamic_vino_lib::EmotionsDetection::getLocationResult(int idx) const { +const dynamic_vino_lib::Result * +dynamic_vino_lib::EmotionsDetection::getLocationResult(int idx) const +{ return &(results_[idx]); } -const std::string dynamic_vino_lib::EmotionsDetection::getName() const { +const std::string dynamic_vino_lib::EmotionsDetection::getName() const +{ return valid_model_->getModelName(); } const void dynamic_vino_lib::EmotionsDetection::observeOutput( - const std::shared_ptr& output) { + const std::shared_ptr & output) +{ if (output != nullptr) { output->accept(results_); } diff --git a/dynamic_vino_lib/src/inferences/face_detection.cpp b/dynamic_vino_lib/src/inferences/face_detection.cpp index cc2baae8..686dc122 100644 --- a/dynamic_vino_lib/src/inferences/face_detection.cpp +++ b/dynamic_vino_lib/src/inferences/face_detection.cpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief a header file with declaration of FaceDetection class and @@ -29,11 +27,13 @@ #include "dynamic_vino_lib/slog.hpp" // FaceDetectionResult -dynamic_vino_lib::FaceDetectionResult::FaceDetectionResult( - const cv::Rect& location) - : ObjectDetectionResult(location){} +dynamic_vino_lib::FaceDetectionResult::FaceDetectionResult(const cv::Rect & location) +: ObjectDetectionResult(location) +{ +} // FaceDetection dynamic_vino_lib::FaceDetection::FaceDetection(double show_output_thresh) - : ObjectDetection(show_output_thresh){} - +: ObjectDetection(show_output_thresh) +{ +} diff --git a/dynamic_vino_lib/src/inferences/head_pose_detection.cpp b/dynamic_vino_lib/src/inferences/head_pose_detection.cpp index f0765c09..c9da5cce 100644 --- a/dynamic_vino_lib/src/inferences/head_pose_detection.cpp +++ b/dynamic_vino_lib/src/inferences/head_pose_detection.cpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief a header file with declaration of HeadPoseDetection class and @@ -26,73 +24,86 @@ #include "dynamic_vino_lib/outputs/base_output.hpp" // HeadPoseResult -dynamic_vino_lib::HeadPoseResult::HeadPoseResult(const cv::Rect& location) - : Result(location){} +dynamic_vino_lib::HeadPoseResult::HeadPoseResult(const cv::Rect & location) +: Result(location) +{ +} // Head Pose Detection dynamic_vino_lib::HeadPoseDetection::HeadPoseDetection() - : dynamic_vino_lib::BaseInference(){} +: dynamic_vino_lib::BaseInference() +{ +} dynamic_vino_lib::HeadPoseDetection::~HeadPoseDetection() = default; void dynamic_vino_lib::HeadPoseDetection::loadNetwork( - std::shared_ptr network) { + std::shared_ptr network) +{ valid_model_ = network; setMaxBatchSize(network->getMaxBatchSize()); } bool dynamic_vino_lib::HeadPoseDetection::enqueue( - const cv::Mat& frame, const cv::Rect& input_frame_loc) { + const cv::Mat & frame, + const cv::Rect & input_frame_loc) +{ if (getEnqueuedNum() == 0) { results_.clear(); } bool succeed = dynamic_vino_lib::BaseInference::enqueue( - frame, input_frame_loc, 1, getResultsLength(), - valid_model_->getInputName()); - if (!succeed) return false; + frame, input_frame_loc, 1, getResultsLength(), valid_model_->getInputName()); + if (!succeed) { + return false; + } Result r(input_frame_loc); results_.emplace_back(r); return true; } -bool dynamic_vino_lib::HeadPoseDetection::submitRequest() { +bool dynamic_vino_lib::HeadPoseDetection::submitRequest() +{ return dynamic_vino_lib::BaseInference::submitRequest(); } -bool dynamic_vino_lib::HeadPoseDetection::fetchResults() { +bool dynamic_vino_lib::HeadPoseDetection::fetchResults() +{ bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); - if (!can_fetch) return false; + if (!can_fetch) { + return false; + } auto request = getEngine()->getRequest(); - InferenceEngine::Blob::Ptr angle_r = - request->GetBlob(valid_model_->getOutputOutputAngleR()); - InferenceEngine::Blob::Ptr angle_p = - request->GetBlob(valid_model_->getOutputOutputAngleP()); - InferenceEngine::Blob::Ptr angle_y = - request->GetBlob(valid_model_->getOutputOutputAngleY()); + InferenceEngine::Blob::Ptr angle_r = request->GetBlob(valid_model_->getOutputOutputAngleR()); + InferenceEngine::Blob::Ptr angle_p = request->GetBlob(valid_model_->getOutputOutputAngleP()); + InferenceEngine::Blob::Ptr angle_y = request->GetBlob(valid_model_->getOutputOutputAngleY()); for (int i = 0; i < getResultsLength(); ++i) { - results_[i].angle_r_ = angle_r->buffer().as()[i]; - results_[i].angle_p_ = angle_p->buffer().as()[i]; - results_[i].angle_y_ = angle_y->buffer().as()[i]; + results_[i].angle_r_ = angle_r->buffer().as()[i]; + results_[i].angle_p_ = angle_p->buffer().as()[i]; + results_[i].angle_y_ = angle_y->buffer().as()[i]; } return true; } -const int dynamic_vino_lib::HeadPoseDetection::getResultsLength() const { +const int dynamic_vino_lib::HeadPoseDetection::getResultsLength() const +{ return static_cast(results_.size()); } -const dynamic_vino_lib::Result* -dynamic_vino_lib::HeadPoseDetection::getLocationResult(int idx) const { +const dynamic_vino_lib::Result * +dynamic_vino_lib::HeadPoseDetection::getLocationResult(int idx) const +{ return &(results_[idx]); } -const std::string dynamic_vino_lib::HeadPoseDetection::getName() const { +const std::string dynamic_vino_lib::HeadPoseDetection::getName() const +{ return valid_model_->getModelName(); } const void dynamic_vino_lib::HeadPoseDetection::observeOutput( - const std::shared_ptr& output) { + const std::shared_ptr & output) +{ if (output != nullptr) { output->accept(results_); } diff --git a/dynamic_vino_lib/src/inferences/object_detection.cpp b/dynamic_vino_lib/src/inferences/object_detection.cpp index f631561d..8ac51404 100644 --- a/dynamic_vino_lib/src/inferences/object_detection.cpp +++ b/dynamic_vino_lib/src/inferences/object_detection.cpp @@ -1,18 +1,17 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + /** * @brief a header file with declaration of ObjectDetection class and * ObjectDetectionResult class @@ -25,27 +24,33 @@ #include "dynamic_vino_lib/outputs/base_output.hpp" #include "dynamic_vino_lib/slog.hpp" // ObjectDetectionResult -dynamic_vino_lib::ObjectDetectionResult::ObjectDetectionResult( - const cv::Rect& location) - : Result(location){} +dynamic_vino_lib::ObjectDetectionResult::ObjectDetectionResult(const cv::Rect & location) +: Result(location) +{ +} // ObjectDetection dynamic_vino_lib::ObjectDetection::ObjectDetection(double show_output_thresh) - : show_output_thresh_(show_output_thresh), - dynamic_vino_lib::BaseInference(){} +: show_output_thresh_(show_output_thresh), dynamic_vino_lib::BaseInference() +{ +} dynamic_vino_lib::ObjectDetection::~ObjectDetection() = default; void dynamic_vino_lib::ObjectDetection::loadNetwork( - const std::shared_ptr network) { + const std::shared_ptr network) +{ valid_model_ = network; max_proposal_count_ = network->getMaxProposalCount(); object_size_ = network->getObjectSize(); setMaxBatchSize(network->getMaxBatchSize()); } -bool dynamic_vino_lib::ObjectDetection::enqueue(const cv::Mat& frame, - const cv::Rect& input_frame_loc) { +bool dynamic_vino_lib::ObjectDetection::enqueue( + const cv::Mat & frame, + const cv::Rect & input_frame_loc) +{ width_ = frame.cols; height_ = frame.rows; - if (!dynamic_vino_lib::BaseInference::enqueue( - frame, input_frame_loc, 1, 0, valid_model_->getInputName())) { + if (!dynamic_vino_lib::BaseInference::enqueue(frame, input_frame_loc, 1, 0, + valid_model_->getInputName())) + { return false; } Result r(input_frame_loc); @@ -54,45 +59,51 @@ bool dynamic_vino_lib::ObjectDetection::enqueue(const cv::Mat& frame, return true; } -bool dynamic_vino_lib::ObjectDetection::submitRequest() { +bool dynamic_vino_lib::ObjectDetection::submitRequest() +{ return dynamic_vino_lib::BaseInference::submitRequest(); } -bool dynamic_vino_lib::ObjectDetection::fetchResults() { +bool dynamic_vino_lib::ObjectDetection::fetchResults() +{ bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); - if (!can_fetch) return false; + if (!can_fetch) { + return false; + } bool found_result = false; results_.clear(); InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); std::string output = valid_model_->getOutputName(); - const float* detections = request->GetBlob(output)->buffer().as(); + const float * detections = request->GetBlob(output)->buffer().as(); for (int i = 0; i < max_proposal_count_; i++) { float image_id = detections[i * object_size_ + 0]; cv::Rect r; auto label_num = static_cast(detections[i * object_size_ + 1]); - std::vector& labels = valid_model_->getLabels(); + std::vector & labels = valid_model_->getLabels(); r.x = static_cast(detections[i * object_size_ + 3] * width_); - if (r.x < 0) + if (r.x < 0) { r.x = 0; + } r.y = static_cast(detections[i * object_size_ + 4] * height_); - if (r.y < 0) + if (r.y < 0) { r.y = 0; + } r.width = static_cast(detections[i * object_size_ + 5] * width_ - r.x); - if (r.width < 0) + if (r.width < 0) { r.width = 0; + } - r.height = - static_cast(detections[i * object_size_ + 6] * height_ - r.y); - if (r.height < 0) + r.height = static_cast(detections[i * object_size_ + 6] * height_ - r.y); + if (r.height < 0) { r.height = 0; + } Result result(r); - result.label_ = label_num < labels.size() - ? labels[label_num] - : std::string("label #") + std::to_string(label_num); + result.label_ = label_num < labels.size() ? labels[label_num] : + std::string("label #") + std::to_string(label_num); result.confidence_ = detections[i * object_size_ + 2]; if (result.confidence_ <= show_output_thresh_) { continue; @@ -103,22 +114,28 @@ bool dynamic_vino_lib::ObjectDetection::fetchResults() { found_result = true; results_.emplace_back(result); } - if (!found_result) results_.clear(); + if (!found_result) { + results_.clear(); + } return true; } -const int dynamic_vino_lib::ObjectDetection::getResultsLength() const { +const int dynamic_vino_lib::ObjectDetection::getResultsLength() const +{ return static_cast(results_.size()); } -const dynamic_vino_lib::Result* -dynamic_vino_lib::ObjectDetection::getLocationResult(int idx) const { +const dynamic_vino_lib::Result * dynamic_vino_lib::ObjectDetection::getLocationResult(int idx) const +{ return &(results_[idx]); } -const std::string dynamic_vino_lib::ObjectDetection::getName() const { +const std::string dynamic_vino_lib::ObjectDetection::getName() const +{ return valid_model_->getModelName(); } -const void dynamic_vino_lib::ObjectDetection::observeOutput( - const std::shared_ptr& output) { +const void +dynamic_vino_lib::ObjectDetection::observeOutput( + const std::shared_ptr & output) +{ if (output != nullptr) { output->accept(results_); } diff --git a/dynamic_vino_lib/src/inferences/object_segmentation.cpp b/dynamic_vino_lib/src/inferences/object_segmentation.cpp index 21d2e356..d86ce9f4 100644 --- a/dynamic_vino_lib/src/inferences/object_segmentation.cpp +++ b/dynamic_vino_lib/src/inferences/object_segmentation.cpp @@ -1,18 +1,17 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + /** * @brief a header file with declaration of ObjectSegmentation class and * ObjectSegmentationResult class @@ -28,31 +27,37 @@ #include "dynamic_vino_lib/slog.hpp" // ObjectSegmentationResult -dynamic_vino_lib::ObjectSegmentationResult::ObjectSegmentationResult( - const cv::Rect& location) - : Result(location){} +dynamic_vino_lib::ObjectSegmentationResult::ObjectSegmentationResult(const cv::Rect & location) +: Result(location) +{ +} // ObjectSegmentation dynamic_vino_lib::ObjectSegmentation::ObjectSegmentation(double show_output_thresh) - : show_output_thresh_(show_output_thresh), - dynamic_vino_lib::BaseInference(){} +: show_output_thresh_(show_output_thresh), dynamic_vino_lib::BaseInference() +{ +} dynamic_vino_lib::ObjectSegmentation::~ObjectSegmentation() = default; void dynamic_vino_lib::ObjectSegmentation::loadNetwork( - const std::shared_ptr network) { + const std::shared_ptr network) +{ valid_model_ = network; setMaxBatchSize(network->getMaxBatchSize()); } -bool dynamic_vino_lib::ObjectSegmentation::enqueue(const cv::Mat& frame, - const cv::Rect& input_frame_loc) { +bool dynamic_vino_lib::ObjectSegmentation::enqueue( + const cv::Mat & frame, + const cv::Rect & input_frame_loc) +{ if (width_ == 0 && height_ == 0) { width_ = frame.cols; height_ = frame.rows; } - if (!dynamic_vino_lib::BaseInference::enqueue( - frame, input_frame_loc, 1, 0, valid_model_->getInputName())) { + if (!dynamic_vino_lib::BaseInference::enqueue(frame, input_frame_loc, 1, 0, + valid_model_->getInputName())) + { return false; } Result r(input_frame_loc); @@ -61,22 +66,26 @@ bool dynamic_vino_lib::ObjectSegmentation::enqueue(const cv::Mat& frame, return true; } -bool dynamic_vino_lib::ObjectSegmentation::submitRequest() { +bool dynamic_vino_lib::ObjectSegmentation::submitRequest() +{ return dynamic_vino_lib::BaseInference::submitRequest(); } -bool dynamic_vino_lib::ObjectSegmentation::fetchResults() { +bool dynamic_vino_lib::ObjectSegmentation::fetchResults() +{ bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); - if (!can_fetch) return false; + if (!can_fetch) { + return false; + } bool found_result = false; results_.clear(); InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); std::string detection_output = valid_model_->getDetectionOutputName(); std::string mask_output = valid_model_->getMaskOutputName(); const auto do_blob = request->GetBlob(detection_output.c_str()); - const auto do_data = do_blob->buffer().as(); + const auto do_data = do_blob->buffer().as(); const auto masks_blob = request->GetBlob(mask_output.c_str()); - const auto masks_data = masks_blob->buffer().as(); + const auto masks_data = masks_blob->buffer().as(); // amount of elements in each detected box description (batch, label, prob, x1, y1, x2, y2) size_t box_num = masks_blob->dims().at(3); size_t label_num = masks_blob->dims().at(2); @@ -85,54 +94,60 @@ bool dynamic_vino_lib::ObjectSegmentation::fetchResults() { size_t W = masks_blob->dims().at(0); size_t box_stride = W * H * label_num; for (size_t box = 0; box < box_num; ++box) { - float* box_info = do_data + box * box_description_size; - float batch = box_info[0]; - if (batch < 0) break; - float prob = box_info[2]; - if (prob > show_output_thresh_) { - float x1 = std::min(std::max(0.0f, box_info[3] * width_), static_cast(width_)); - float y1 = std::min(std::max(0.0f, box_info[4] * height_), static_cast(height_)); - float x2 = std::min(std::max(0.0f, box_info[5] * width_), static_cast(width_)); - float y2 = std::min(std::max(0.0f, box_info[6] * height_), static_cast(height_)); - int box_width = std::min(static_cast(std::max(0.0f, x2 - x1)), width_); - int box_height = std::min(static_cast(std::max(0.0f, y2 - y1)), height_); - int class_id = static_cast(box_info[1] + 1e-6f); - float* mask_arr = masks_data + box_stride * box + H * W * (class_id - 1); - cv::Mat mask_mat(H, W, CV_32FC1, mask_arr); - cv::Rect roi = cv::Rect( - static_cast(x1), static_cast(y1), box_width, box_height); - cv::Mat resized_mask_mat(box_height, box_width, CV_32FC1); - cv::resize(mask_mat, resized_mask_mat, cv::Size(box_width, box_height)); - Result result(roi); - result.confidence_ = prob; - std::vector& labels = valid_model_->getLabels(); - result.label_ = class_id < labels.size() - ? labels[class_id] - : std::string("label #") + std::to_string(class_id); - result.mask_ = resized_mask_mat; - found_result = true; - results_.emplace_back(result); - } + float * box_info = do_data + box * box_description_size; + float batch = box_info[0]; + if (batch < 0) { + break; + } + float prob = box_info[2]; + if (prob > show_output_thresh_) { + float x1 = std::min(std::max(0.0f, box_info[3] * width_), static_cast(width_)); + float y1 = std::min(std::max(0.0f, box_info[4] * height_), static_cast(height_)); + float x2 = std::min(std::max(0.0f, box_info[5] * width_), static_cast(width_)); + float y2 = std::min(std::max(0.0f, box_info[6] * height_), static_cast(height_)); + int box_width = std::min(static_cast(std::max(0.0f, x2 - x1)), width_); + int box_height = std::min(static_cast(std::max(0.0f, y2 - y1)), height_); + int class_id = static_cast(box_info[1] + 1e-6f); + float * mask_arr = masks_data + box_stride * box + H * W * (class_id - 1); + cv::Mat mask_mat(H, W, CV_32FC1, mask_arr); + cv::Rect roi = cv::Rect(static_cast(x1), static_cast(y1), box_width, box_height); + cv::Mat resized_mask_mat(box_height, box_width, CV_32FC1); + cv::resize(mask_mat, resized_mask_mat, cv::Size(box_width, box_height)); + Result result(roi); + result.confidence_ = prob; + std::vector & labels = valid_model_->getLabels(); + result.label_ = class_id < labels.size() ? labels[class_id] : + std::string("label #") + std::to_string(class_id); + result.mask_ = resized_mask_mat; + found_result = true; + results_.emplace_back(result); + } + } + if (!found_result) { + results_.clear(); } - if (!found_result) results_.clear(); return true; } -const int dynamic_vino_lib::ObjectSegmentation::getResultsLength() const { +const int dynamic_vino_lib::ObjectSegmentation::getResultsLength() const +{ return static_cast(results_.size()); } -const dynamic_vino_lib::Result* -dynamic_vino_lib::ObjectSegmentation::getLocationResult(int idx) const { +const dynamic_vino_lib::Result * +dynamic_vino_lib::ObjectSegmentation::getLocationResult(int idx) const +{ return &(results_[idx]); } -const std::string dynamic_vino_lib::ObjectSegmentation::getName() const { +const std::string dynamic_vino_lib::ObjectSegmentation::getName() const +{ return valid_model_->getModelName(); } const void dynamic_vino_lib::ObjectSegmentation::observeOutput( - const std::shared_ptr& output) { + const std::shared_ptr & output) +{ if (output != nullptr) { output->accept(results_); } diff --git a/dynamic_vino_lib/src/inputs/image_input.cpp b/dynamic_vino_lib/src/inputs/image_input.cpp index da7f32a2..1b4e7609 100644 --- a/dynamic_vino_lib/src/inputs/image_input.cpp +++ b/dynamic_vino_lib/src/inputs/image_input.cpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief a header file with declaration of Image class @@ -22,9 +20,13 @@ #include #include "dynamic_vino_lib/inputs/image_input.hpp" -Input::Image::Image(const std::string& file) { file_.assign(file); } +Input::Image::Image(const std::string & file) +{ + file_.assign(file); +} -bool Input::Image::initialize() { +bool Input::Image::initialize() +{ setFrameID("image_frame"); image_ = cv::imread(file_); if (image_.data != NULL) { @@ -37,7 +39,8 @@ bool Input::Image::initialize() { return isInit(); } -bool Input::Image::read(cv::Mat* frame) { +bool Input::Image::read(cv::Mat * frame) +{ if (!isInit()) { return false; } @@ -45,8 +48,8 @@ bool Input::Image::read(cv::Mat* frame) { return true; } -bool Input::Image::readService(cv::Mat* frame, std::string config_path) { - +bool Input::Image::readService(cv::Mat * frame, std::string config_path) +{ image_ = cv::imread(config_path); if (image_.data != NULL) { setInitStatus(true); @@ -62,6 +65,7 @@ bool Input::Image::readService(cv::Mat* frame, std::string config_path) { return true; } -void Input::Image::config() { +void Input::Image::config() +{ // TODO(weizhi): config } diff --git a/dynamic_vino_lib/src/inputs/image_service_input.cpp b/dynamic_vino_lib/src/inputs/image_service_input.cpp index 23dfc667..de5700e5 100644 --- a/dynamic_vino_lib/src/inputs/image_service_input.cpp +++ b/dynamic_vino_lib/src/inputs/image_service_input.cpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief a header file with declaration of Image class @@ -23,9 +21,13 @@ #include "dynamic_vino_lib/inputs/image_service_input.hpp" // Image -Input::ServiceImage::ServiceImage(const std::string& file) { file_.assign(file); } +Input::ServiceImage::ServiceImage(const std::string & file) +{ + file_.assign(file); +} -bool Input::ServiceImage::initialize() { +bool Input::ServiceImage::initialize() +{ image_ = cv::imread(file_); if (image_.data != NULL) { setInitStatus(true); @@ -37,7 +39,8 @@ bool Input::ServiceImage::initialize() { return isInit(); } -bool Input::ServiceImage::read(cv::Mat* frame) { +bool Input::ServiceImage::read(cv::Mat * frame) +{ if (!isInit()) { return false; } @@ -48,6 +51,7 @@ bool Input::ServiceImage::read(cv::Mat* frame) { return true; } -void Input::ServiceImage::config() { +void Input::ServiceImage::config() +{ // TODO(weizhi): config } diff --git a/dynamic_vino_lib/src/inputs/realsense_camera.cpp b/dynamic_vino_lib/src/inputs/realsense_camera.cpp index b2e82a27..85db27bc 100644 --- a/dynamic_vino_lib/src/inputs/realsense_camera.cpp +++ b/dynamic_vino_lib/src/inputs/realsense_camera.cpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief a header file with declaration of RealSenseCamera class @@ -23,7 +21,8 @@ #include "dynamic_vino_lib/slog.hpp" // RealSenseCamera -bool Input::RealSenseCamera::initialize() { +bool Input::RealSenseCamera::initialize() +{ setFrameID("realsense_camera_frame"); cfg_.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_BGR8, 30); setInitStatus(pipe_.start(cfg_)); @@ -46,15 +45,15 @@ bool Input::RealSenseCamera::initialize() { } return true; } -bool Input::RealSenseCamera::initialize(size_t width, size_t height) { +bool Input::RealSenseCamera::initialize(size_t width, size_t height) +{ setFrameID("realsense_camera_frame"); if (3 * width != 4 * height) { - slog::err << "The aspect ratio must be 4:3 when using RealSense camera" - << slog::endl; + slog::err << "The aspect ratio must be 4:3 when using RealSense camera" << slog::endl; return false; } - cfg_.enable_stream(RS2_STREAM_COLOR, static_cast(width), - static_cast(height), RS2_FORMAT_BGR8, 30); + cfg_.enable_stream(RS2_STREAM_COLOR, static_cast(width), static_cast(height), + RS2_FORMAT_BGR8, 30); setInitStatus(pipe_.start(cfg_)); setWidth(width); setHeight(height); @@ -75,12 +74,12 @@ bool Input::RealSenseCamera::initialize(size_t width, size_t height) { } return true; } -bool Input::RealSenseCamera::read(cv::Mat* frame) { +bool Input::RealSenseCamera::read(cv::Mat * frame) +{ if (!isInit()) { return false; } - rs2::frameset data = - pipe_.wait_for_frames(); // Wait for next set of frames from the camera + rs2::frameset data = pipe_.wait_for_frames(); // Wait for next set of frames from the camera rs2::frame color_frame; try { color_frame = data.get_color_frame(); @@ -88,11 +87,11 @@ bool Input::RealSenseCamera::read(cv::Mat* frame) { return false; } cv::Mat(cv::Size(static_cast(getWidth()), static_cast(getHeight())), CV_8UC3, - (void*)color_frame.get_data(), cv::Mat::AUTO_STEP) - .copyTo(*frame); + const_cast(color_frame.get_data()), cv::Mat::AUTO_STEP) + .copyTo(*frame); return true; } -void Input::RealSenseCamera::config() { +void Input::RealSenseCamera::config() +{ // TODO(weizhi): config } - diff --git a/dynamic_vino_lib/src/inputs/realsense_camera_topic.cpp b/dynamic_vino_lib/src/inputs/realsense_camera_topic.cpp index 27c56834..e3129c6a 100644 --- a/dynamic_vino_lib/src/inputs/realsense_camera_topic.cpp +++ b/dynamic_vino_lib/src/inputs/realsense_camera_topic.cpp @@ -1,67 +1,69 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief a header file with declaration of RealSenseCamera class * @file realsense_camera_topic.cpp */ +#include +#include #include "dynamic_vino_lib/inputs/realsense_camera_topic.hpp" #include "dynamic_vino_lib/slog.hpp" -#include - #define INPUT_TOPIC "/openvino_toolkit/image_raw" -Input::RealSenseCameraTopic::RealSenseCameraTopic() : Node("realsense_topic") { - +Input::RealSenseCameraTopic::RealSenseCameraTopic() +: Node("realsense_topic") +{ } -bool Input::RealSenseCameraTopic::initialize() { +bool Input::RealSenseCameraTopic::initialize() +{ slog::info << "before cameraTOpic init" << slog::endl; std::shared_ptr node(this); setHandler(node); sub_ = this->create_subscription( - "/openvino_toolkit/image_raw", - std::bind(&RealSenseCameraTopic::cb, this, std::placeholders::_1)); + "/openvino_toolkit/image_raw", + std::bind(&RealSenseCameraTopic::cb, this, std::placeholders::_1)); image_count = 0; return true; } -void Input::RealSenseCameraTopic::cb(const sensor_msgs::msg::Image::SharedPtr image_msg) { +void Input::RealSenseCameraTopic::cb(const sensor_msgs::msg::Image::SharedPtr image_msg) +{ slog::info << "Receiving a new image from Camera topic." << slog::endl; setFrameID(image_msg->header.frame_id); image = cv_bridge::toCvCopy(image_msg, "bgr8")->image; ++image_count; } -bool Input::RealSenseCameraTopic::read(cv::Mat* frame) { - - if (image.empty() || image_count <= 0){ +bool Input::RealSenseCameraTopic::read(cv::Mat * frame) +{ + if (image.empty() || image_count <= 0) { slog::warn << "No data received in CameraTopic instance" << slog::endl; return false; } *frame = image; --image_count; - + return true; } -void Input::RealSenseCameraTopic::config() { +void Input::RealSenseCameraTopic::config() +{ // TODO(weizhi): config } diff --git a/dynamic_vino_lib/src/inputs/standard_camera.cpp b/dynamic_vino_lib/src/inputs/standard_camera.cpp index 8dddcc3e..ea96ddb0 100644 --- a/dynamic_vino_lib/src/inputs/standard_camera.cpp +++ b/dynamic_vino_lib/src/inputs/standard_camera.cpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief a header file with declaration of StandardCamera class @@ -21,7 +19,8 @@ #include "dynamic_vino_lib/inputs/standard_camera.hpp" // StandardCamera -bool Input::StandardCamera::initialize() { +bool Input::StandardCamera::initialize() +{ setFrameID("standard_camera_frame"); setInitStatus(cap.open(0)); setWidth((size_t)cap.get(CV_CAP_PROP_FRAME_WIDTH)); @@ -29,7 +28,8 @@ bool Input::StandardCamera::initialize() { return isInit(); } -bool Input::StandardCamera::initialize(int camera_num) { +bool Input::StandardCamera::initialize(int camera_num) +{ setFrameID("standard_camera_frame"); setInitStatus(cap.open(camera_num)); setWidth((size_t)cap.get(CV_CAP_PROP_FRAME_WIDTH)); @@ -37,7 +37,8 @@ bool Input::StandardCamera::initialize(int camera_num) { return isInit(); } -bool Input::StandardCamera::initialize(size_t width, size_t height) { +bool Input::StandardCamera::initialize(size_t width, size_t height) +{ setFrameID("standard_camera_frame"); setWidth(width); setHeight(height); @@ -49,7 +50,8 @@ bool Input::StandardCamera::initialize(size_t width, size_t height) { return isInit(); } -bool Input::StandardCamera::read(cv::Mat* frame) { +bool Input::StandardCamera::read(cv::Mat * frame) +{ if (!isInit()) { return false; } @@ -57,7 +59,7 @@ bool Input::StandardCamera::read(cv::Mat* frame) { return cap.retrieve(*frame); } -void Input::StandardCamera::config() { +void Input::StandardCamera::config() +{ // TODO(weizhi): config } - diff --git a/dynamic_vino_lib/src/inputs/video_input.cpp b/dynamic_vino_lib/src/inputs/video_input.cpp index 7aa11ceb..ac51e54c 100644 --- a/dynamic_vino_lib/src/inputs/video_input.cpp +++ b/dynamic_vino_lib/src/inputs/video_input.cpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief a header file with declaration of Video class @@ -24,9 +22,13 @@ #include "dynamic_vino_lib/inputs/video_input.hpp" // Video -Input::Video::Video(const std::string& video) { video_.assign(video); } +Input::Video::Video(const std::string & video) +{ + video_.assign(video); +} -bool Input::Video::initialize() { +bool Input::Video::initialize() +{ setFrameID("video_frame"); setInitStatus(cap.open(video_)); setWidth((size_t)cap.get(CV_CAP_PROP_FRAME_WIDTH)); @@ -34,7 +36,8 @@ bool Input::Video::initialize() { return isInit(); } -bool Input::Video::initialize(size_t width, size_t height) { +bool Input::Video::initialize(size_t width, size_t height) +{ setFrameID("video_frame"); setWidth(width); setHeight(height); @@ -46,7 +49,8 @@ bool Input::Video::initialize(size_t width, size_t height) { return isInit(); } -bool Input::Video::read(cv::Mat* frame) { +bool Input::Video::read(cv::Mat * frame) +{ if (!isInit()) { return false; } @@ -54,6 +58,7 @@ bool Input::Video::read(cv::Mat* frame) { return cap.retrieve(*frame); } -void Input::Video::config() { +void Input::Video::config() +{ // TODO(weizhi): config } diff --git a/dynamic_vino_lib/src/models/age_gender_detection_model.cpp b/dynamic_vino_lib/src/models/age_gender_detection_model.cpp index eea30c1d..55008e95 100644 --- a/dynamic_vino_lib/src/models/age_gender_detection_model.cpp +++ b/dynamic_vino_lib/src/models/age_gender_detection_model.cpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief a header file with declaration of AgeGenderDetectionModel class @@ -24,24 +22,25 @@ #include "dynamic_vino_lib/models/age_gender_detection_model.hpp" #include "dynamic_vino_lib/slog.hpp" - // Validated Age Gender Classification Network Models::AgeGenderDetectionModel::AgeGenderDetectionModel( - const std::string& model_loc, int input_num, int output_num, - int max_batch_size) - : BaseModel(model_loc, input_num, output_num, max_batch_size){} + const std::string & model_loc, + int input_num, int output_num, + int max_batch_size) +: BaseModel(model_loc, input_num, output_num, max_batch_size) +{ +} void Models::AgeGenderDetectionModel::setLayerProperty( - InferenceEngine::CNNNetReader::Ptr net_reader) { + InferenceEngine::CNNNetReader::Ptr net_reader) +{ // set input property - InferenceEngine::InputsDataMap input_info_map( - net_reader->getNetwork().getInputsInfo()); + InferenceEngine::InputsDataMap input_info_map(net_reader->getNetwork().getInputsInfo()); InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; input_info->setPrecision(InferenceEngine::Precision::FP32); input_info->setLayout(InferenceEngine::Layout::NCHW); // set output property - InferenceEngine::OutputsDataMap output_info_map( - net_reader->getNetwork().getOutputsInfo()); + InferenceEngine::OutputsDataMap output_info_map(net_reader->getNetwork().getOutputsInfo()); auto it = output_info_map.begin(); InferenceEngine::DataPtr age_output_ptr = (it++)->second; InferenceEngine::DataPtr gender_output_ptr = (it++)->second; @@ -56,10 +55,10 @@ void Models::AgeGenderDetectionModel::setLayerProperty( } void Models::AgeGenderDetectionModel::checkLayerProperty( - const InferenceEngine::CNNNetReader::Ptr& net_reader) { + const InferenceEngine::CNNNetReader::Ptr & net_reader) +{ slog::info << "Checking Age Gender Detection outputs" << slog::endl; - InferenceEngine::OutputsDataMap output_info( - net_reader->getNetwork().getOutputsInfo()); + InferenceEngine::OutputsDataMap output_info(net_reader->getNetwork().getOutputsInfo()); auto it = output_info.begin(); InferenceEngine::DataPtr age_output_ptr = (it++)->second; InferenceEngine::DataPtr gender_output_ptr = (it++)->second; @@ -68,23 +67,22 @@ void Models::AgeGenderDetectionModel::checkLayerProperty( std::swap(age_output_ptr, gender_output_ptr); } if (age_output_ptr->getCreatorLayer().lock()->type != "Convolution") { - throw std::logic_error("In Age Gender network, age layer (" + - age_output_ptr->getCreatorLayer().lock()->name + - ") should be a Convolution, but was: " + - age_output_ptr->getCreatorLayer().lock()->type); + throw std::logic_error( + "In Age Gender network, age layer (" + age_output_ptr->getCreatorLayer().lock()->name + + ") should be a Convolution, but was: " + + age_output_ptr->getCreatorLayer().lock()->type); } if (gender_output_ptr->getCreatorLayer().lock()->type != "SoftMax") { throw std::logic_error("In Age Gender network, gender layer (" + - gender_output_ptr->getCreatorLayer().lock()->name + - ") should be a SoftMax, but was: " + - gender_output_ptr->getCreatorLayer().lock()->type); + gender_output_ptr->getCreatorLayer().lock()->name + + ") should be a SoftMax, but was: " + + gender_output_ptr->getCreatorLayer().lock()->type); } - slog::info << "Age layer: " << age_output_ptr->getCreatorLayer().lock()->name - << slog::endl; - slog::info << "Gender layer: " - << gender_output_ptr->getCreatorLayer().lock()->name << slog::endl; + slog::info << "Age layer: " << age_output_ptr->getCreatorLayer().lock()->name << slog::endl; + slog::info << "Gender layer: " << gender_output_ptr->getCreatorLayer().lock()->name << slog::endl; } -const std::string Models::AgeGenderDetectionModel::getModelName() const { +const std::string Models::AgeGenderDetectionModel::getModelName() const +{ return "Age Gender Detection"; } diff --git a/dynamic_vino_lib/src/models/base_model.cpp b/dynamic_vino_lib/src/models/base_model.cpp index 9a0f35cc..dab29ef9 100644 --- a/dynamic_vino_lib/src/models/base_model.cpp +++ b/dynamic_vino_lib/src/models/base_model.cpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief a header file with declaration of BaseModel class @@ -28,19 +26,22 @@ #include "dynamic_vino_lib/slog.hpp" // Validated Base Network -Models::BaseModel::BaseModel(const std::string& model_loc, int input_num, - int output_num, int max_batch_size) - : input_num_(input_num), - output_num_(output_num), - model_loc_(model_loc), - max_batch_size_(max_batch_size) { +Models::BaseModel::BaseModel( + const std::string & model_loc, int input_num, int output_num, + int max_batch_size) +: input_num_(input_num), + output_num_(output_num), + model_loc_(model_loc), + max_batch_size_(max_batch_size) +{ if (model_loc.empty()) { throw std::logic_error("model file name is empty!"); } net_reader_ = std::make_shared(); } -void Models::BaseModel::modelInit() { +void Models::BaseModel::modelInit() +{ slog::info << "Loading network files" << slog::endl; // Read network model net_reader_->ReadNetwork(model_loc_); @@ -56,32 +57,31 @@ void Models::BaseModel::modelInit() { // Read labels (if any) std::string label_file_name = raw_name + ".labels"; std::ifstream input_file(label_file_name); - std::copy(std::istream_iterator(input_file), - std::istream_iterator(), std::back_inserter(labels_)); + std::copy(std::istream_iterator(input_file), std::istream_iterator(), + std::back_inserter(labels_)); checkNetworkSize(input_num_, output_num_, net_reader_); checkLayerProperty(net_reader_); setLayerProperty(net_reader_); } void Models::BaseModel::checkNetworkSize( - int input_size, int output_size, - InferenceEngine::CNNNetReader::Ptr net_reader) { + int input_size, int output_size, + InferenceEngine::CNNNetReader::Ptr net_reader) +{ // TODO(Houk): Repeat, better removed! // check input size slog::info << "Checking input size" << slog::endl; - InferenceEngine::InputsDataMap input_info( - net_reader->getNetwork().getInputsInfo()); + InferenceEngine::InputsDataMap input_info(net_reader->getNetwork().getInputsInfo()); if (input_info.size() != input_size) { - throw std::logic_error(getModelName() + - " should have " + std::to_string(input_size)+ " input"); + throw std::logic_error(getModelName() + " should have " + std::to_string(input_size) + " inpu" + "t"); } // check output size slog::info << "Checking output size" << slog::endl; - InferenceEngine::OutputsDataMap output_info( - net_reader->getNetwork().getOutputsInfo()); + InferenceEngine::OutputsDataMap output_info(net_reader->getNetwork().getOutputsInfo()); if (output_info.size() != output_size) { - throw std::logic_error(getModelName() + - " should have " + std::to_string(output_size) + " output"); + throw std::logic_error(getModelName() + " should have " + std::to_string(output_size) + " outpu" + "t"); } // InferenceEngine::DataPtr& output_data_ptr = output_info.begin()->second; } diff --git a/dynamic_vino_lib/src/models/emotion_detection_model.cpp b/dynamic_vino_lib/src/models/emotion_detection_model.cpp index e76881d2..455a9097 100644 --- a/dynamic_vino_lib/src/models/emotion_detection_model.cpp +++ b/dynamic_vino_lib/src/models/emotion_detection_model.cpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief a header file with declaration of EmotionDetectionModel class @@ -23,25 +21,24 @@ #include "dynamic_vino_lib/models/emotion_detection_model.hpp" #include "dynamic_vino_lib/slog.hpp" - // Validated Emotions Detection Network Models::EmotionDetectionModel::EmotionDetectionModel( - const std::string& model_loc, int input_num, int output_num, - int max_batch_size) - : BaseModel(model_loc, input_num, output_num, max_batch_size){} + const std::string & model_loc, int input_num, + int output_num, int max_batch_size) +: BaseModel(model_loc, input_num, output_num, max_batch_size) +{ +} -void Models::EmotionDetectionModel::setLayerProperty( - InferenceEngine::CNNNetReader::Ptr net_reader) { +void Models::EmotionDetectionModel::setLayerProperty(InferenceEngine::CNNNetReader::Ptr net_reader) +{ // set input property - InferenceEngine::InputsDataMap input_info_map( - net_reader->getNetwork().getInputsInfo()); + InferenceEngine::InputsDataMap input_info_map(net_reader->getNetwork().getInputsInfo()); InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; input_info->setPrecision(InferenceEngine::Precision::FP32); input_info->setLayout(InferenceEngine::Layout::NCHW); // set output property - InferenceEngine::OutputsDataMap output_info_map( - net_reader->getNetwork().getOutputsInfo()); - InferenceEngine::DataPtr& output_data_ptr = output_info_map.begin()->second; + InferenceEngine::OutputsDataMap output_info_map(net_reader->getNetwork().getOutputsInfo()); + InferenceEngine::DataPtr & output_data_ptr = output_info_map.begin()->second; output_data_ptr->setPrecision(InferenceEngine::Precision::FP32); output_data_ptr->setLayout(InferenceEngine::Layout::NCHW); // set input and output layer name @@ -50,23 +47,23 @@ void Models::EmotionDetectionModel::setLayerProperty( } void Models::EmotionDetectionModel::checkLayerProperty( - const InferenceEngine::CNNNetReader::Ptr& net_reader) { + const InferenceEngine::CNNNetReader::Ptr & net_reader) +{ slog::info << "Checking Emotions Detection outputs" << slog::endl; - InferenceEngine::OutputsDataMap output_info( - net_reader->getNetwork().getOutputsInfo()); + InferenceEngine::OutputsDataMap output_info(net_reader->getNetwork().getOutputsInfo()); InferenceEngine::DataPtr emotions_output_ptr = output_info.begin()->second; // output layer should be SoftMax type if (emotions_output_ptr->getCreatorLayer().lock()->type != "SoftMax") { throw std::logic_error("In Emotions Recognition network, Emotion layer (" + - emotions_output_ptr->getCreatorLayer().lock()->name + - ") should be a SoftMax, but was: " + - emotions_output_ptr->getCreatorLayer().lock()->type); + emotions_output_ptr->getCreatorLayer().lock()->name + + ") should be a SoftMax, but was: " + + emotions_output_ptr->getCreatorLayer().lock()->type); } - slog::info << "Emotions layer: " - << emotions_output_ptr->getCreatorLayer().lock()->name - << slog::endl; + slog::info << "Emotions layer: " << emotions_output_ptr->getCreatorLayer().lock()->name << + slog::endl; } -const std::string Models::EmotionDetectionModel::getModelName() const { +const std::string Models::EmotionDetectionModel::getModelName() const +{ return "Emotions Detection"; } diff --git a/dynamic_vino_lib/src/models/face_detection_model.cpp b/dynamic_vino_lib/src/models/face_detection_model.cpp index fc9f1932..07945289 100644 --- a/dynamic_vino_lib/src/models/face_detection_model.cpp +++ b/dynamic_vino_lib/src/models/face_detection_model.cpp @@ -1,19 +1,16 @@ - -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief a header file with declaration of FaceDetectionModel class @@ -26,11 +23,14 @@ #include "dynamic_vino_lib/slog.hpp" // Validated Face Detection Network -Models::FaceDetectionModel::FaceDetectionModel(const std::string& model_loc, - int input_num, int output_num, - int max_batch_size) - : ObjectDetectionModel(model_loc, input_num, output_num, max_batch_size){} +Models::FaceDetectionModel::FaceDetectionModel( + const std::string & model_loc, int input_num, + int output_num, int max_batch_size) +: ObjectDetectionModel(model_loc, input_num, output_num, max_batch_size) +{ +} -const std::string Models::FaceDetectionModel::getModelName() const { +const std::string Models::FaceDetectionModel::getModelName() const +{ return "Face Detection"; } diff --git a/dynamic_vino_lib/src/models/head_pose_detection_model.cpp b/dynamic_vino_lib/src/models/head_pose_detection_model.cpp index b41d52f3..0b26cae1 100644 --- a/dynamic_vino_lib/src/models/head_pose_detection_model.cpp +++ b/dynamic_vino_lib/src/models/head_pose_detection_model.cpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief a header file with declaration of HeadPoseDetectionModel class @@ -25,51 +23,48 @@ #include "dynamic_vino_lib/models/head_pose_detection_model.hpp" #include "dynamic_vino_lib/slog.hpp" - // Validated Head Pose Network Models::HeadPoseDetectionModel::HeadPoseDetectionModel( - const std::string& model_loc, int input_num, int output_num, - int max_batch_size) - : BaseModel(model_loc, input_num, output_num, max_batch_size){} + const std::string & model_loc, int input_num, + int output_num, int max_batch_size) +: BaseModel(model_loc, input_num, output_num, max_batch_size) +{ +} void Models::HeadPoseDetectionModel::checkLayerProperty( - const InferenceEngine::CNNNetReader::Ptr& net_reader) { + const InferenceEngine::CNNNetReader::Ptr & net_reader) +{ slog::info << "Checking Head Pose network outputs" << slog::endl; - InferenceEngine::OutputsDataMap outputInfo( - net_reader->getNetwork().getOutputsInfo()); - std::map layerNames = {{output_angle_r_, false}, - {output_angle_p_, false}, - {output_angle_y_, false}}; + InferenceEngine::OutputsDataMap outputInfo(net_reader->getNetwork().getOutputsInfo()); + std::map layerNames = { {output_angle_r_, false}, + {output_angle_p_, false}, + {output_angle_y_, false}}; - for (auto&& output : outputInfo) { - InferenceEngine::CNNLayerPtr layer = - output.second->getCreatorLayer().lock(); + for (auto && output : outputInfo) { + InferenceEngine::CNNLayerPtr layer = output.second->getCreatorLayer().lock(); if (layerNames.find(layer->name) == layerNames.end()) { - throw std::logic_error("Head Pose network output layer unknown: " + - layer->name + ", should be " + output_angle_r_ + - " or " + output_angle_p_ + " or " + - output_angle_y_); + throw std::logic_error("Head Pose network output layer unknown: " + layer->name + + ", should be " + output_angle_r_ + " or " + output_angle_p_ + " or " + + output_angle_y_); } if (layer->type != "FullyConnected") { throw std::logic_error("Head Pose network output layer (" + layer->name + - ") has invalid type: " + layer->type + - ", should be FullyConnected"); + ") has invalid type: " + layer->type + ", should be FullyConnected"); } - auto fc = dynamic_cast(layer.get()); + auto fc = dynamic_cast(layer.get()); if (fc->_out_num != 1) { throw std::logic_error("Head Pose network output layer (" + layer->name + - ") has invalid out-size=" + - std::to_string(fc->_out_num) + ", should be 1"); + ") has invalid out-size=" + std::to_string(fc->_out_num) + + ", should be 1"); } layerNames[layer->name] = true; } } -void Models::HeadPoseDetectionModel::setLayerProperty( - InferenceEngine::CNNNetReader::Ptr net_reader) { +void Models::HeadPoseDetectionModel::setLayerProperty(InferenceEngine::CNNNetReader::Ptr net_reader) +{ // set input property - InferenceEngine::InputsDataMap input_info_map( - net_reader->getNetwork().getInputsInfo()); + InferenceEngine::InputsDataMap input_info_map(net_reader->getNetwork().getInputsInfo()); InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; input_info->setPrecision(InferenceEngine::Precision::U8); @@ -77,15 +72,15 @@ void Models::HeadPoseDetectionModel::setLayerProperty( input_ = input_info_map.begin()->first; // set output property - InferenceEngine::OutputsDataMap output_info_map( - net_reader->getNetwork().getOutputsInfo()); + InferenceEngine::OutputsDataMap output_info_map(net_reader->getNetwork().getOutputsInfo()); - for (auto& output : output_info_map){ + for (auto & output : output_info_map) { output.second->setPrecision(InferenceEngine::Precision::FP32); output.second->setLayout(InferenceEngine::Layout::NC); } } -const std::string Models::HeadPoseDetectionModel::getModelName() const { +const std::string Models::HeadPoseDetectionModel::getModelName() const +{ return "Head Pose Network"; } diff --git a/dynamic_vino_lib/src/models/object_detection_model.cpp b/dynamic_vino_lib/src/models/object_detection_model.cpp index fba7904b..48eaba25 100644 --- a/dynamic_vino_lib/src/models/object_detection_model.cpp +++ b/dynamic_vino_lib/src/models/object_detection_model.cpp @@ -1,18 +1,17 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + /** * @brief a header file with declaration of ObjectDetectionModel class * @file object_detection_model.cpp @@ -21,15 +20,16 @@ #include "dynamic_vino_lib/models/object_detection_model.hpp" #include "dynamic_vino_lib/slog.hpp" // Validated Object Detection Network -Models::ObjectDetectionModel::ObjectDetectionModel(const std::string& model_loc, - int input_num, int output_num, - int max_batch_size) - : BaseModel(model_loc, input_num, output_num, max_batch_size){} -void Models::ObjectDetectionModel::setLayerProperty( - InferenceEngine::CNNNetReader::Ptr net_reader) { +Models::ObjectDetectionModel::ObjectDetectionModel( + const std::string & model_loc, int input_num, + int output_num, int max_batch_size) +: BaseModel(model_loc, input_num, output_num, max_batch_size) +{ +} +void Models::ObjectDetectionModel::setLayerProperty(InferenceEngine::CNNNetReader::Ptr net_reader) +{ // set input property - InferenceEngine::InputsDataMap input_info_map( - net_reader->getNetwork().getInputsInfo()); + InferenceEngine::InputsDataMap input_info_map(net_reader->getNetwork().getInputsInfo()); if (input_info_map.size() != 1) { throw std::logic_error("This sample accepts networks having only one input"); } @@ -37,12 +37,11 @@ void Models::ObjectDetectionModel::setLayerProperty( input_info->setPrecision(InferenceEngine::Precision::U8); input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); // set output property - InferenceEngine::OutputsDataMap output_info_map( - net_reader->getNetwork().getOutputsInfo()); + InferenceEngine::OutputsDataMap output_info_map(net_reader->getNetwork().getOutputsInfo()); if (output_info_map.size() != 1) { throw std::logic_error("This sample accepts networks having only one output"); } - InferenceEngine::DataPtr& output_data_ptr = output_info_map.begin()->second; + InferenceEngine::DataPtr & output_data_ptr = output_info_map.begin()->second; output_data_ptr->setPrecision(InferenceEngine::Precision::FP32); output_data_ptr->setLayout(InferenceEngine::Layout::NCHW); // set input and output layer name @@ -50,25 +49,25 @@ void Models::ObjectDetectionModel::setLayerProperty( output_ = output_info_map.begin()->first; } void Models::ObjectDetectionModel::checkLayerProperty( - const InferenceEngine::CNNNetReader::Ptr& net_reader) { + const InferenceEngine::CNNNetReader::Ptr & net_reader) +{ slog::info << "Checking Object Detection outputs" << slog::endl; - InferenceEngine::OutputsDataMap output_info_map( - net_reader->getNetwork().getOutputsInfo()); + InferenceEngine::OutputsDataMap output_info_map(net_reader->getNetwork().getOutputsInfo()); slog::info << "Checking Object Detection outputs ..." << slog::endl; if (output_info_map.size() != 1) { throw std::logic_error("This sample accepts networks having only one output"); } - InferenceEngine::DataPtr& output_data_ptr = output_info_map.begin()->second; + InferenceEngine::DataPtr & output_data_ptr = output_info_map.begin()->second; output_ = output_info_map.begin()->first; slog::info << "Checking Object Detection output ... Name=" << output_ << slog::endl; const InferenceEngine::CNNLayerPtr output_layer = - net_reader->getNetwork().getLayerByName(output_.c_str()); + net_reader->getNetwork().getLayerByName(output_.c_str()); // output layer should have attribute called num_classes slog::info << "Checking Object Detection num_classes" << slog::endl; if (output_layer->params.find("num_classes") == output_layer->params.end()) { throw std::logic_error("Object Detection network output layer (" + output_ + - ") should have num_classes integer attribute"); + ") should have num_classes integer attribute"); } // class number should be equal to size of label vector // if network has default "background" class, fake is used @@ -83,23 +82,22 @@ void Models::ObjectDetectionModel::checkLayerProperty( } } // last dimension of output layer should be 7 - const InferenceEngine::SizeVector output_dims = - output_data_ptr->getTensorDesc().getDims(); + const InferenceEngine::SizeVector output_dims = output_data_ptr->getTensorDesc().getDims(); max_proposal_count_ = static_cast(output_dims[2]); slog::info << "max proposal count is: " << max_proposal_count_ << slog::endl; object_size_ = static_cast(output_dims[3]); if (object_size_ != 7) { - throw std::logic_error( - "Object Detection network output layer should have 7 as a last " - "dimension"); + throw std::logic_error("Object Detection network output layer should have 7 as a last " + "dimension"); } if (output_dims.size() != 4) { - throw std::logic_error( - "Object Detection network output dimensions not compatible shoulld be 4, " - "but was " + - std::to_string(output_dims.size())); + throw std::logic_error("Object Detection network output dimensions not compatible shoulld be " + "4, " + "but was " + + std::to_string(output_dims.size())); } } -const std::string Models::ObjectDetectionModel::getModelName() const { +const std::string Models::ObjectDetectionModel::getModelName() const +{ return "Object Detection"; } diff --git a/dynamic_vino_lib/src/models/object_segmentation_model.cpp b/dynamic_vino_lib/src/models/object_segmentation_model.cpp index 36fd3e2b..4d67258e 100644 --- a/dynamic_vino_lib/src/models/object_segmentation_model.cpp +++ b/dynamic_vino_lib/src/models/object_segmentation_model.cpp @@ -1,18 +1,17 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + /** * @brief a header file with declaration of ObjectSegmentationModel class * @file object_detection_model.cpp @@ -22,51 +21,50 @@ #include "dynamic_vino_lib/slog.hpp" // Validated Object Detection Network Models::ObjectSegmentationModel::ObjectSegmentationModel( - const std::string& model_loc, int input_num, int output_num, int max_batch_size) - : BaseModel(model_loc, input_num, output_num, max_batch_size){} + const std::string & model_loc, + int input_num, int output_num, + int max_batch_size) +: BaseModel(model_loc, input_num, output_num, max_batch_size) +{ +} void Models::ObjectSegmentationModel::checkNetworkSize( - int input_size, int output_size, - InferenceEngine::CNNNetReader::Ptr net_reader) { + int input_size, int output_size, InferenceEngine::CNNNetReader::Ptr net_reader) +{ slog::info << "Checking input size" << slog::endl; - InferenceEngine::InputsDataMap input_info( - net_reader->getNetwork().getInputsInfo()); + InferenceEngine::InputsDataMap input_info(net_reader->getNetwork().getInputsInfo()); if (input_info.size() != input_size) { - throw std::logic_error(getModelName() + - " should have " + std::to_string(input_size)+ " input"); + throw std::logic_error(getModelName() + " should have " + std::to_string(input_size) + " inpu" + "t"); } // check output size slog::info << "Checking output size" << slog::endl; - InferenceEngine::OutputsDataMap output_info( - net_reader->getNetwork().getOutputsInfo()); - if (output_info.size() != output_size && output_info.size() != (output_size-1)) { - throw std::logic_error(getModelName() + - " should have " + std::to_string(output_size) + " output"); + InferenceEngine::OutputsDataMap output_info(net_reader->getNetwork().getOutputsInfo()); + if (output_info.size() != output_size && output_info.size() != (output_size - 1)) { + throw std::logic_error(getModelName() + " should have " + std::to_string(output_size) + " outpu" + "t"); } } void Models::ObjectSegmentationModel::setLayerProperty( - InferenceEngine::CNNNetReader::Ptr net_reader) { + InferenceEngine::CNNNetReader::Ptr net_reader) +{ // set input property - InferenceEngine::InputsDataMap input_info_map( - net_reader->getNetwork().getInputsInfo()); + InferenceEngine::InputsDataMap input_info_map(net_reader->getNetwork().getInputsInfo()); auto inputInfoItem = *input_info_map.begin(); inputInfoItem.second->setPrecision(InferenceEngine::Precision::U8); auto network = net_reader->getNetwork(); try { network.addOutput(std::string("detection_output"), 0); - } - catch (std::exception& error) { - throw std::logic_error(getModelName() + - "is failed when adding detection_output laryer."); + } catch (std::exception & error) { + throw std::logic_error(getModelName() + "is failed when adding detection_output laryer."); } network.setBatchSize(1); - slog::info << "Batch size is " - << std::to_string(net_reader->getNetwork().getBatchSize()) << slog::endl; - InferenceEngine::OutputsDataMap output_info_map( - net_reader->getNetwork().getOutputsInfo()); + slog::info << "Batch size is " << std::to_string(net_reader->getNetwork().getBatchSize()) << + slog::endl; + InferenceEngine::OutputsDataMap output_info_map(net_reader->getNetwork().getOutputsInfo()); for (auto & item : output_info_map) { - item.second->setPrecision(InferenceEngine::Precision::FP32); + item.second->setPrecision(InferenceEngine::Precision::FP32); } auto output_ptr = output_info_map.begin(); input_ = input_info_map.begin()->first; @@ -75,9 +73,10 @@ void Models::ObjectSegmentationModel::setLayerProperty( } void Models::ObjectSegmentationModel::checkLayerProperty( - const InferenceEngine::CNNNetReader::Ptr& net_reader) { + const InferenceEngine::CNNNetReader::Ptr & net_reader) +{ const InferenceEngine::CNNLayerPtr output_layer = - net_reader->getNetwork().getLayerByName("detection_output"); + net_reader->getNetwork().getLayerByName("detection_output"); const int num_classes = output_layer->GetParamAsInt("num_classes"); slog::info << "Checking Object Segmentation output ... num_classes=" << num_classes << slog::endl; if (getLabels().size() != num_classes) { @@ -89,6 +88,7 @@ void Models::ObjectSegmentationModel::checkLayerProperty( } } -const std::string Models::ObjectSegmentationModel::getModelName() const { +const std::string Models::ObjectSegmentationModel::getModelName() const +{ return "Object Segmentation"; } diff --git a/dynamic_vino_lib/src/outputs/base_output.cpp b/dynamic_vino_lib/src/outputs/base_output.cpp index 600acef1..35be522f 100644 --- a/dynamic_vino_lib/src/outputs/base_output.cpp +++ b/dynamic_vino_lib/src/outputs/base_output.cpp @@ -1,23 +1,22 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #include "dynamic_vino_lib/outputs/base_output.hpp" #include "dynamic_vino_lib/pipeline.hpp" -int Outputs::BaseOutput::getFPS() const { +int Outputs::BaseOutput::getFPS() const +{ static int fps = 0; static auto t_start = std::chrono::high_resolution_clock::now(); @@ -29,8 +28,7 @@ int Outputs::BaseOutput::getFPS() const { typedef std::chrono::duration> ms; ms secondDetection = std::chrono::duration_cast(t_end - t_start); - if (secondDetection.count() > 1000) - { + if (secondDetection.count() > 1000) { fps = frame_cnt; frame_cnt = 0; t_start = t_end; @@ -39,14 +37,17 @@ int Outputs::BaseOutput::getFPS() const { return fps; } -void Outputs::BaseOutput::setPipeline(Pipeline* const pipeline){ +void Outputs::BaseOutput::setPipeline(Pipeline * const pipeline) +{ pipeline_ = pipeline; } -Pipeline* Outputs::BaseOutput::getPipeline() const { +Pipeline * Outputs::BaseOutput::getPipeline() const +{ return pipeline_; } -cv::Mat Outputs::BaseOutput::getFrame() const { +cv::Mat Outputs::BaseOutput::getFrame() const +{ return frame_; } diff --git a/dynamic_vino_lib/src/outputs/image_window_output.cpp b/dynamic_vino_lib/src/outputs/image_window_output.cpp index 28a12045..af8c813b 100644 --- a/dynamic_vino_lib/src/outputs/image_window_output.cpp +++ b/dynamic_vino_lib/src/outputs/image_window_output.cpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief a header file with declaration of ImageWindowOutput class @@ -27,12 +25,13 @@ #include "dynamic_vino_lib/outputs/image_window_output.hpp" #include "dynamic_vino_lib/pipeline.hpp" -Outputs::ImageWindowOutput::ImageWindowOutput(const std::string& window_name, - int focal_length) - : window_name_(window_name), focal_length_(focal_length) { +Outputs::ImageWindowOutput::ImageWindowOutput(const std::string & window_name, int focal_length) +: window_name_(window_name), focal_length_(focal_length) +{ } -void Outputs::ImageWindowOutput::feedFrame(const cv::Mat& frame) { +void Outputs::ImageWindowOutput::feedFrame(const cv::Mat & frame) +{ // frame_ = frame; frame_ = frame.clone(); if (camera_matrix_.empty()) { @@ -48,13 +47,15 @@ void Outputs::ImageWindowOutput::feedFrame(const cv::Mat& frame) { } void Outputs::ImageWindowOutput::mergeMask( -const std::vector& results) { + const std::vector & results) +{ std::map class_color; for (unsigned i = 0; i < results.size(); i++) { std::string class_label = results[i].getLabel(); - if (class_color.find(class_label) == class_color.end()) + if (class_color.find(class_label) == class_color.end()) { class_color[class_label] = class_color.size(); - auto& color = colors_[class_color[class_label]]; + } + auto & color = colors_[class_color[class_label]]; const float alpha = 0.7f; const float MASK_THRESHOLD = 0.5; @@ -63,24 +64,27 @@ const std::vector& results) { cv::Mat mask = results[i].getMask(); cv::Mat colored_mask(location.height, location.width, frame_.type()); - for (int h = 0; h < mask.size().height; ++h) - for (int w = 0; w < mask.size().width; ++w) - for (int ch = 0; ch < colored_mask.channels(); ++ch) - colored_mask.at(h, w)[ch] = mask.at(h, w) > MASK_THRESHOLD - ? 255 * color[ch] - : roi_img.at(h, w)[ch]; + for (int h = 0; h < mask.size().height; ++h) { + for (int w = 0; w < mask.size().width; ++w) { + for (int ch = 0; ch < colored_mask.channels(); ++ch) { + colored_mask.at(h, w)[ch] = mask.at(h, w) > MASK_THRESHOLD ? + 255 * color[ch] : + roi_img.at(h, w)[ch]; + } + } + } cv::addWeighted(colored_mask, alpha, roi_img, 1.0f - alpha, 0.0f, roi_img); } } void Outputs::ImageWindowOutput::accept( - const std::vector& results) { + const std::vector & results) +{ if (outputs_.size() == 0) { initOutputs(results.size()); } if (outputs_.size() != results.size()) { - slog::err << "the size of Object Segmentation and Output Vector is not equal!" - << slog::endl; + slog::err << "the size of Object Segmentation and Output Vector is not equal!" << slog::endl; return; } for (unsigned i = 0; i < results.size(); i++) { @@ -98,14 +102,14 @@ void Outputs::ImageWindowOutput::accept( } void Outputs::ImageWindowOutput::accept( - const std::vector& results) { + const std::vector & results) +{ if (outputs_.size() == 0) { initOutputs(results.size()); } if (outputs_.size() != results.size()) { // throw std::logic_error("size is not equal!"); - slog::err << "the size of Face Detection and Output Vector is not equal!" - << slog::endl; + slog::err << "the size of Face Detection and Output Vector is not equal!" << slog::endl; return; } for (unsigned i = 0; i < results.size(); i++) { @@ -122,14 +126,14 @@ void Outputs::ImageWindowOutput::accept( } void Outputs::ImageWindowOutput::accept( - const std::vector& results) { + const std::vector & results) +{ if (outputs_.size() == 0) { initOutputs(results.size()); } if (outputs_.size() != results.size()) { // throw std::logic_error("size is not equal!"); - slog::err << "the size of Face Detection and Output Vector is not equal!" - << slog::endl; + slog::err << "the size of Face Detection and Output Vector is not equal!" << slog::endl; return; } for (unsigned i = 0; i < results.size(); i++) { @@ -148,14 +152,14 @@ void Outputs::ImageWindowOutput::accept( } void Outputs::ImageWindowOutput::accept( - const std::vector& results) { + const std::vector & results) +{ if (outputs_.size() == 0) { initOutputs(results.size()); } if (outputs_.size() != results.size()) { // throw std::logic_error("size is not equal!"); - slog::err << "the size of Emotion Detection and Output Vector is not equal!" - << slog::endl; + slog::err << "the size of Emotion Detection and Output Vector is not equal!" << slog::endl; return; } for (unsigned i = 0; i < results.size(); i++) { @@ -166,22 +170,20 @@ void Outputs::ImageWindowOutput::accept( } void Outputs::ImageWindowOutput::accept( - const std::vector& results) { + const std::vector & results) +{ if (outputs_.size() == 0) { initOutputs(results.size()); } if (outputs_.size() != results.size()) { // throw std::logic_error("size is not equal!"); - slog::err - << "the size of AgeGender Detection and Output Vector is not equal!" - << slog::endl; + slog::err << "the size of AgeGender Detection and Output Vector is not equal!" << slog::endl; return; } for (unsigned i = 0; i < results.size(); i++) { std::ostringstream ostream; // auto age = results[i].getAge(); - ostream << "[Y" << std::fixed << std::setprecision(0) << results[i].getAge() - << "]"; + ostream << "[Y" << std::fixed << std::setprecision(0) << results[i].getAge() << "]"; outputs_[i].desc += ostream.str(); auto male_prob = results[i].getMaleProbability(); @@ -191,8 +193,10 @@ void Outputs::ImageWindowOutput::accept( } } -cv::Point Outputs::ImageWindowOutput::calcAxis(cv::Mat r, double cx, double cy, - double cz, cv::Point cp) { +cv::Point Outputs::ImageWindowOutput::calcAxis( + cv::Mat r, double cx, double cy, double cz, + cv::Point cp) +{ cv::Mat Axis(3, 1, CV_32F); Axis.at(0) = cx; Axis.at(1) = cy; @@ -201,23 +205,19 @@ cv::Point Outputs::ImageWindowOutput::calcAxis(cv::Mat r, double cx, double cy, o.at(2) = camera_matrix_.at(0); Axis = r * Axis + o; cv::Point point; - point.x = static_cast( - (Axis.at(0) / Axis.at(2) * camera_matrix_.at(0)) + - cp.x); - point.y = static_cast( - (Axis.at(1) / Axis.at(2) * camera_matrix_.at(4)) + - cp.y); + point.x = static_cast((Axis.at(0) / Axis.at(2) * camera_matrix_.at(0)) + + cp.x); + point.y = static_cast((Axis.at(1) / Axis.at(2) * camera_matrix_.at(4)) + + cp.y); return point; } -cv::Mat Outputs::ImageWindowOutput::getRotationTransform(double yaw, - double pitch, - double roll) { +cv::Mat Outputs::ImageWindowOutput::getRotationTransform(double yaw, double pitch, double roll) +{ pitch *= CV_PI / 180.0; yaw *= CV_PI / 180.0; roll *= CV_PI / 180.0; - cv::Matx33f Rx(1, 0, 0, 0, cos(pitch), -sin(pitch), 0, sin(pitch), - cos(pitch)); + cv::Matx33f Rx(1, 0, 0, 0, cos(pitch), -sin(pitch), 0, sin(pitch), cos(pitch)); cv::Matx33f Ry(cos(yaw), 0, -sin(yaw), 0, 1, 0, sin(yaw), 0, cos(yaw)); cv::Matx33f Rz(cos(roll), -sin(roll), 0, sin(roll), cos(roll), 0, 0, 0, 1); auto r = cv::Mat(Rz * Ry * Rx); @@ -225,15 +225,14 @@ cv::Mat Outputs::ImageWindowOutput::getRotationTransform(double yaw, } void Outputs::ImageWindowOutput::accept( - const std::vector& results) { + const std::vector & results) +{ if (outputs_.size() == 0) { initOutputs(results.size()); } if (outputs_.size() != results.size()) { // throw std::logic_error("size is not equal!"); - slog::err - << "the size of HeadPose Detection and Output Vector is not equal!" - << slog::endl; + slog::err << "the size of HeadPose Detection and Output Vector is not equal!" << slog::endl; return; } for (unsigned i = 0; i < results.size(); i++) { @@ -245,8 +244,7 @@ void Outputs::ImageWindowOutput::accept( feedFrame(frame_); cv::Mat r = getRotationTransform(yaw, pitch, roll); cv::Rect location = result.getLocation(); - auto cp = cv::Point(location.x + location.width / 2, - location.y + location.height / 2); + auto cp = cv::Point(location.x + location.width / 2, location.y + location.height / 2); outputs_[i].hp_cp = cp; outputs_[i].hp_x = calcAxis(r, scale, 0, 0, cp); outputs_[i].hp_y = calcAxis(r, 0, -scale, 0, cp); @@ -255,18 +253,19 @@ void Outputs::ImageWindowOutput::accept( } } -void Outputs::ImageWindowOutput::decorateFrame() { +void Outputs::ImageWindowOutput::decorateFrame() +{ if (getPipeline()->getParameters()->isGetFps()) { int fps = getFPS(); std::stringstream ss; ss << "FPS: " << fps; - cv::putText(frame_, ss.str(), cv::Point2f(0, 65), cv::FONT_HERSHEY_TRIPLEX, - 0.5, cv::Scalar(255, 0, 0)); + cv::putText(frame_, ss.str(), cv::Point2f(0, 65), cv::FONT_HERSHEY_TRIPLEX, 0.5, + cv::Scalar(255, 0, 0)); } for (auto o : outputs_) { auto new_y = std::max(15, o.rect.y - 15); - cv::putText(frame_, o.desc, cv::Point2f(o.rect.x, new_y), - cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, o.scalar); + cv::putText(frame_, o.desc, cv::Point2f(o.rect.x, new_y), cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, + o.scalar); cv::rectangle(frame_, o.rect, o.scalar, 1); if (o.hp_cp != o.hp_x) { cv::line(frame_, o.hp_cp, o.hp_x, cv::Scalar(0, 0, 255), 2); @@ -282,14 +281,16 @@ void Outputs::ImageWindowOutput::decorateFrame() { outputs_.clear(); } -void Outputs::ImageWindowOutput::handleOutput() { +void Outputs::ImageWindowOutput::handleOutput() +{ cv::namedWindow(window_name_, cv::WINDOW_AUTOSIZE); decorateFrame(); cv::imshow(window_name_, frame_); cv::waitKey(1); } -void Outputs::ImageWindowOutput::initOutputs(unsigned size) { +void Outputs::ImageWindowOutput::initOutputs(unsigned size) +{ outputs_.resize(size); for (unsigned i = 0; i < size; i++) { outputs_[i].desc = ""; diff --git a/dynamic_vino_lib/src/outputs/ros_service_output.cpp b/dynamic_vino_lib/src/outputs/ros_service_output.cpp index 52960edc..bd21812d 100644 --- a/dynamic_vino_lib/src/outputs/ros_service_output.cpp +++ b/dynamic_vino_lib/src/outputs/ros_service_output.cpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief a header file with declaration of RosTopicOutput class @@ -25,15 +23,18 @@ #include "dynamic_vino_lib/outputs/ros_service_output.hpp" #include "cv_bridge/cv_bridge.h" -//Outputs::RosServiceOutput::RosServiceOutput() +// Outputs::RosServiceOutput::RosServiceOutput() -void Outputs::RosServiceOutput::feedFrame(const cv::Mat& frame) {frame_ = frame.clone();} +void Outputs::RosServiceOutput::feedFrame(const cv::Mat & frame) +{ + frame_ = frame.clone(); +} void Outputs::RosServiceOutput::accept( - const std::vector& results) { - + const std::vector & results) +{ objects_.clear(); - for (auto& r : results) { + for (auto & r : results) { auto loc = r.getLocation(); object_.roi.x_offset = loc.x; object_.roi.y_offset = loc.y; @@ -46,8 +47,8 @@ void Outputs::RosServiceOutput::accept( } void Outputs::RosServiceOutput::accept( - const std::vector& results) { - + const std::vector & results) +{ for (auto r : results) { // slog::info << ">"; auto loc = r.getLocation(); @@ -57,14 +58,14 @@ void Outputs::RosServiceOutput::accept( face_.roi.height = loc.height; face_.object.object_name = r.getLabel(); face_.object.probability = r.getConfidence(); - //faces_topic_->objects_vector.push_back(face); - //objects_topic_->objects_vector.push_back(face); + // faces_topic_->objects_vector.push_back(face); + // objects_topic_->objects_vector.push_back(face); } } void Outputs::RosServiceOutput::accept( - const std::vector& results) { - + const std::vector & results) +{ for (auto r : results) { auto loc = r.getLocation(); emotion_.roi.x_offset = loc.x; @@ -72,13 +73,13 @@ void Outputs::RosServiceOutput::accept( emotion_.roi.width = loc.width; emotion_.roi.height = loc.height; emotion_.emotion = r.getLabel(); - //emotions_topic_->emotions.push_back(emotion); + // emotions_topic_->emotions.push_back(emotion); } } void Outputs::RosServiceOutput::accept( - const std::vector& results) { - + const std::vector & results) +{ for (auto r : results) { auto loc = r.getLocation(); ag_.roi.x_offset = loc.x; @@ -87,20 +88,20 @@ void Outputs::RosServiceOutput::accept( ag_.roi.height = loc.height; ag_.age = r.getAge(); auto male_prob = r.getMaleProbability(); - if (male_prob > 0.5){ + if (male_prob > 0.5) { ag_.gender = "Male"; ag_.gender_confidence = male_prob; } else { ag_.gender = "Female"; ag_.gender_confidence = 1.0 - male_prob; } - //age_gender_topic_->objects.push_back(ag); + // age_gender_topic_->objects.push_back(ag); } } void Outputs::RosServiceOutput::accept( - const std::vector& results) { - + const std::vector & results) +{ for (auto r : results) { auto loc = r.getLocation(); hp_.roi.x_offset = loc.x; @@ -110,26 +111,29 @@ void Outputs::RosServiceOutput::accept( hp_.yaw = r.getAngleY(); hp_.pitch = r.getAngleP(); hp_.roll = r.getAngleR(); - //headpose_topic_->headposes.push_back(hp); + // headpose_topic_->headposes.push_back(hp); } } -//void Outputs::RosServiceOutput::handleOutput() +// void Outputs::RosServiceOutput::handleOutput() -void Outputs::RosServiceOutput::setResponse(std::shared_ptr response) { +void Outputs::RosServiceOutput::setResponse( + std::shared_ptr response) +{ response->objects.objects_vector = objects_; } /** * TODO: implement the value gain */ -std_msgs::msg::Header Outputs::RosServiceOutput::getHeader() { +std_msgs::msg::Header Outputs::RosServiceOutput::getHeader() +{ std_msgs::msg::Header header; header.frame_id = "default_camera"; std::chrono::high_resolution_clock::time_point tp = std::chrono::high_resolution_clock::now(); int64 ns = tp.time_since_epoch().count(); - header.stamp.sec = ns/1000000000; - header.stamp.nanosec = ns%1000000000; + header.stamp.sec = ns / 1000000000; + header.stamp.nanosec = ns % 1000000000; return header; } diff --git a/dynamic_vino_lib/src/outputs/ros_topic_output.cpp b/dynamic_vino_lib/src/outputs/ros_topic_output.cpp index f2e8a579..c3fc62bd 100644 --- a/dynamic_vino_lib/src/outputs/ros_topic_output.cpp +++ b/dynamic_vino_lib/src/outputs/ros_topic_output.cpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief a header file with declaration of RosTopicOutput class @@ -27,8 +25,8 @@ #include "dynamic_vino_lib/pipeline.hpp" #include "cv_bridge/cv_bridge.h" - -Outputs::RosTopicOutput::RosTopicOutput() { +Outputs::RosTopicOutput::RosTopicOutput() +{ // rmw_qos_profile_t qos = rmw_qos_profile_default; // qos.depth = 10; // qos.reliability = RMW_QOS_POLICY_RELIABILITY_RELIABLE; @@ -38,14 +36,14 @@ Outputs::RosTopicOutput::RosTopicOutput() { "/openvino_toolkit/segmented_obejcts", 16); pub_detected_object_ = node_->create_publisher( "/openvino_toolkit/detected_objects", 16); - pub_face_ = node_->create_publisher( - "/openvino_toolkit/faces", 16); - pub_emotion_ = node_->create_publisher( - "/openvino_toolkit/emotions", 16); + pub_face_ = + node_->create_publisher("/openvino_toolkit/faces", 16); + pub_emotion_ = + node_->create_publisher("/openvino_toolkit/emotions", 16); pub_age_gender_ = node_->create_publisher( "/openvino_toolkit/age_genders", 16); - pub_headpose_ = node_->create_publisher( - "/openvino_toolkit/headposes", 16); + pub_headpose_ = + node_->create_publisher("/openvino_toolkit/headposes", 16); emotions_topic_ = nullptr; detected_objects_topic_ = nullptr; faces_topic_ = nullptr; @@ -54,14 +52,17 @@ Outputs::RosTopicOutput::RosTopicOutput() { segmented_objects_topic_ = nullptr; } -void Outputs::RosTopicOutput::feedFrame(const cv::Mat& frame) {frame_ = frame.clone();} - +void Outputs::RosTopicOutput::feedFrame(const cv::Mat & frame) +{ + frame_ = frame.clone(); +} void Outputs::RosTopicOutput::accept( - const std::vector& results) { + const std::vector & results) +{ segmented_objects_topic_ = std::make_shared(); people_msgs::msg::ObjectInMask object; - for (auto& r : results) { + for (auto & r : results) { // slog::info << ">"; auto loc = r.getLocation(); object.roi.x_offset = loc.x; @@ -71,19 +72,21 @@ void Outputs::RosTopicOutput::accept( object.object_name = r.getLabel(); object.probability = r.getConfidence(); cv::Mat mask = r.getMask(); - for (int h = 0; h < mask.size().height; ++h) - for (int w = 0; w < mask.size().width; ++w) - object.mask_array.push_back(mask.at(h, w)); + for (int h = 0; h < mask.size().height; ++h) { + for (int w = 0; w < mask.size().width; ++w) { + object.mask_array.push_back(mask.at(h, w)); + } + } segmented_objects_topic_->objects_vector.push_back(object); } } - void Outputs::RosTopicOutput::accept( - const std::vector& results) { + const std::vector & results) +{ detected_objects_topic_ = std::make_shared(); object_msgs::msg::ObjectInBox object; - for (auto& r : results) { + for (auto & r : results) { // slog::info << ">"; auto loc = r.getLocation(); object.roi.x_offset = loc.x; @@ -96,9 +99,9 @@ void Outputs::RosTopicOutput::accept( } } - void Outputs::RosTopicOutput::accept( - const std::vector& results) { + const std::vector & results) +{ faces_topic_ = std::make_shared(); object_msgs::msg::ObjectInBox face; @@ -116,8 +119,8 @@ void Outputs::RosTopicOutput::accept( } } -void Outputs::RosTopicOutput::accept( - const std::vector& results) { +void Outputs::RosTopicOutput::accept(const std::vector & results) +{ emotions_topic_ = std::make_shared(); people_msgs::msg::Emotion emotion; @@ -133,8 +136,8 @@ void Outputs::RosTopicOutput::accept( } } -void Outputs::RosTopicOutput::accept( - const std::vector& results) { +void Outputs::RosTopicOutput::accept(const std::vector & results) +{ age_gender_topic_ = std::make_shared(); people_msgs::msg::AgeGender ag; @@ -147,7 +150,7 @@ void Outputs::RosTopicOutput::accept( ag.roi.height = loc.height; ag.age = r.getAge(); auto male_prob = r.getMaleProbability(); - if (male_prob > 0.5){ + if (male_prob > 0.5) { ag.gender = "Male"; ag.gender_confidence = male_prob; } else { @@ -158,8 +161,8 @@ void Outputs::RosTopicOutput::accept( } } -void Outputs::RosTopicOutput::accept( - const std::vector& results) { +void Outputs::RosTopicOutput::accept(const std::vector & results) +{ headpose_topic_ = std::make_shared(); people_msgs::msg::HeadPose hp; @@ -176,7 +179,8 @@ void Outputs::RosTopicOutput::accept( } } -void Outputs::RosTopicOutput::handleOutput() { +void Outputs::RosTopicOutput::handleOutput() +{ auto header = getHeader(); if (segmented_objects_topic_ != nullptr) { // slog::info << "publishing faces outputs." << slog::endl; @@ -218,12 +222,13 @@ void Outputs::RosTopicOutput::handleOutput() { /** * TODO: implement the value gain */ -std_msgs::msg::Header Outputs::RosTopicOutput::getHeader() { +std_msgs::msg::Header Outputs::RosTopicOutput::getHeader() +{ std_msgs::msg::Header header; header.frame_id = getPipeline()->getInputDevice()->getFrameID(); std::chrono::high_resolution_clock::time_point tp = std::chrono::high_resolution_clock::now(); int64 ns = tp.time_since_epoch().count(); - header.stamp.sec = ns/1000000000; - header.stamp.nanosec = ns%1000000000; + header.stamp.sec = ns / 1000000000; + header.stamp.nanosec = ns % 1000000000; return header; } diff --git a/dynamic_vino_lib/src/outputs/rviz_output.cpp b/dynamic_vino_lib/src/outputs/rviz_output.cpp index 7b5b191c..6ee1a51f 100644 --- a/dynamic_vino_lib/src/outputs/rviz_output.cpp +++ b/dynamic_vino_lib/src/outputs/rviz_output.cpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief a header file with declaration of ImageWindowOutput class @@ -27,67 +25,71 @@ #include "dynamic_vino_lib/pipeline.hpp" #include "dynamic_vino_lib/outputs/rviz_output.hpp" -Outputs::RvizOutput::RvizOutput() { +Outputs::RvizOutput::RvizOutput() +{ node_ = rclcpp::Node::make_shared("image_publisher"); image_topic_ = nullptr; - pub_image_ = node_->create_publisher( - "/openvino_toolkit/images", 16); - image_window_output_ = std::make_shared( - "WindowForRviz", 950); + pub_image_ = node_->create_publisher("/openvino_toolkit/images", 16); + image_window_output_ = std::make_shared("WindowForRviz", 950); } -void Outputs::RvizOutput::feedFrame(const cv::Mat& frame) { +void Outputs::RvizOutput::feedFrame(const cv::Mat & frame) +{ image_window_output_->feedFrame(frame); } -void Outputs::RvizOutput::accept( - const std::vector& results) { +void Outputs::RvizOutput::accept(const std::vector & results) +{ image_window_output_->accept(results); } void Outputs::RvizOutput::accept( - const std::vector& results) { + const std::vector & results) +{ image_window_output_->accept(results); } void Outputs::RvizOutput::accept( - const std::vector& results) { + const std::vector & results) +{ image_window_output_->accept(results); } -void Outputs::RvizOutput::accept( - const std::vector& results) { +void Outputs::RvizOutput::accept(const std::vector & results) +{ image_window_output_->accept(results); } -void Outputs::RvizOutput::accept( - const std::vector& results) { +void Outputs::RvizOutput::accept(const std::vector & results) +{ image_window_output_->accept(results); } -void Outputs::RvizOutput::accept( - const std::vector& results) { +void Outputs::RvizOutput::accept(const std::vector & results) +{ image_window_output_->accept(results); } -void Outputs::RvizOutput::handleOutput() { +void Outputs::RvizOutput::handleOutput() +{ image_window_output_->setPipeline(getPipeline()); image_window_output_->decorateFrame(); cv::Mat frame = image_window_output_->getFrame(); std_msgs::msg::Header header = getHeader(); - std::shared_ptr - cv_ptr = std::make_shared(header, "bgr8", frame); + std::shared_ptr cv_ptr = + std::make_shared(header, "bgr8", frame); image_topic_ = cv_ptr->toImageMsg(); pub_image_->publish(image_topic_); } -std_msgs::msg::Header Outputs::RvizOutput::getHeader() { +std_msgs::msg::Header Outputs::RvizOutput::getHeader() +{ std_msgs::msg::Header header; header.frame_id = getPipeline()->getInputDevice()->getFrameID(); std::chrono::high_resolution_clock::time_point tp = std::chrono::high_resolution_clock::now(); int64 ns = tp.time_since_epoch().count(); - header.stamp.sec = ns/1000000000; - header.stamp.nanosec = ns%1000000000; + header.stamp.sec = ns / 1000000000; + header.stamp.nanosec = ns % 1000000000; return header; } diff --git a/dynamic_vino_lib/src/pipeline.cpp b/dynamic_vino_lib/src/pipeline.cpp index 6e042a0f..0392cc8c 100644 --- a/dynamic_vino_lib/src/pipeline.cpp +++ b/dynamic_vino_lib/src/pipeline.cpp @@ -1,44 +1,42 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief a header file with declaration of Pipeline class * @file pipeline.cpp */ +#include #include #include #include +#include -#include #include "dynamic_vino_lib/inputs/base_input.hpp" #include "dynamic_vino_lib/inputs/image_input.hpp" #include "dynamic_vino_lib/pipeline.hpp" -using namespace InferenceEngine; - -Pipeline::Pipeline(const std::string& name) { +Pipeline::Pipeline(const std::string & name) +{ if (!name.empty()) { params_ = std::make_shared(name); } counter_ = 0; } -bool Pipeline::add(const std::string& name, - std::shared_ptr input_device) { +bool Pipeline::add(const std::string & name, std::shared_ptr input_device) +{ if (name.empty()) { slog::err << "Item name can't be empty!" << slog::endl; return false; @@ -51,10 +49,11 @@ bool Pipeline::add(const std::string& name, return true; } -bool Pipeline::add(const std::string& parent, const std::string& name, - std::shared_ptr output) { - if (parent.empty() || name.empty() || !isLegalConnect(parent, name) || - output == nullptr) { +bool Pipeline::add( + const std::string & parent, const std::string & name, + std::shared_ptr output) +{ + if (parent.empty() || name.empty() || !isLegalConnect(parent, name) || output == nullptr) { slog::err << "ARGuments ERROR when adding output instance!" << slog::endl; return false; } @@ -68,7 +67,8 @@ bool Pipeline::add(const std::string& parent, const std::string& name, return false; } -bool Pipeline::add(const std::string& parent, const std::string& name) { +bool Pipeline::add(const std::string & parent, const std::string & name) +{ if (isLegalConnect(parent, name)) { addConnect(parent, name); return true; @@ -77,19 +77,18 @@ bool Pipeline::add(const std::string& parent, const std::string& name) { return false; } -bool Pipeline::add(const std::string& name, - std::shared_ptr output) { +bool Pipeline::add(const std::string & name, std::shared_ptr output) +{ if (name.empty()) { slog::err << "Item name can't be empty!" << slog::endl; return false; } std::map>::iterator it = - name_to_output_map_.find(name); + name_to_output_map_.find(name); if (it != name_to_output_map_.end()) { - slog::warn << "inferance instance for [" << name - << "] already exists, update it with new instance." - << slog::endl; + slog::warn << "inferance instance for [" << name << + "] already exists, update it with new instance." << slog::endl; } name_to_output_map_[name] = output; output_names_.insert(name); @@ -99,30 +98,31 @@ bool Pipeline::add(const std::string& name, return true; } -void Pipeline::addConnect(const std::string& parent, const std::string& name) { +void Pipeline::addConnect(const std::string & parent, const std::string & name) +{ std::pair::iterator, - std::multimap::iterator> - ret; + std::multimap::iterator> + ret; ret = next_.equal_range(parent); - for (std::multimap::iterator it = ret.first; - it != ret.second; ++it) { + for (std::multimap::iterator it = ret.first; it != ret.second; ++it) { if (it->second == name) { - slog::warn << "The connect [" << parent << "<-->" << name - << "] already exists." << slog::endl; + slog::warn << "The connect [" << parent << "<-->" << name << "] already exists." << + slog::endl; return; } } - slog::info << "Adding connection into pipeline:[" << parent << "<-->" << name - << "]" << slog::endl; + slog::info << "Adding connection into pipeline:[" << parent << "<-->" << name << "]" << + slog::endl; next_.insert({parent, name}); } -bool Pipeline::add(const std::string& parent, const std::string& name, - std::shared_ptr inference) { +bool Pipeline::add( + const std::string & parent, const std::string & name, + std::shared_ptr inference) +{ if (parent.empty() || name.empty() || !isLegalConnect(parent, name)) { - slog::err << "ARGuments ERROR when adding inference instance!" - << slog::endl; + slog::err << "ARGuments ERROR when adding inference instance!" << slog::endl; return false; } @@ -134,20 +134,20 @@ bool Pipeline::add(const std::string& parent, const std::string& name, return false; } -bool Pipeline::add(const std::string& name, - std::shared_ptr inference) { +bool Pipeline::add( + const std::string & name, + std::shared_ptr inference) +{ if (name.empty()) { slog::err << "Item name can't be empty!" << slog::endl; return false; } - std::map>::iterator it = - name_to_detection_map_.find(name); + std::map>::iterator it = + name_to_detection_map_.find(name); if (it != name_to_detection_map_.end()) { - slog::warn << "inferance instance for [" << name - << "] already exists, update it with new instance." - << slog::endl; + slog::warn << "inferance instance for [" << name << + "] already exists, update it with new instance." << slog::endl; } else { ++total_inference_; } @@ -156,25 +156,23 @@ bool Pipeline::add(const std::string& name, return true; } -bool Pipeline::isLegalConnect(const std::string parent, - const std::string child) { +bool Pipeline::isLegalConnect(const std::string parent, const std::string child) +{ int parent_order = getCatagoryOrder(parent); int child_order = getCatagoryOrder(child); - slog::info << "Checking connection into pipeline:[" << parent << "(" - << parent_order << ")" - << "<-->" << child << "(" << child_order << ")" - << "]" << slog::endl; - return (parent_order != kCatagoryOrder_Unknown) && - (child_order != kCatagoryOrder_Unknown) && + slog::info << "Checking connection into pipeline:[" << parent << "(" << parent_order << ")" << + "<-->" << child << "(" << child_order << ")" << + "]" << slog::endl; + return (parent_order != kCatagoryOrder_Unknown) && (child_order != kCatagoryOrder_Unknown) && (parent_order <= child_order); } -int Pipeline::getCatagoryOrder(const std::string name) { +int Pipeline::getCatagoryOrder(const std::string name) +{ int order = kCatagoryOrder_Unknown; if (name == input_device_name_) { order = kCatagoryOrder_Input; - } else if (name_to_detection_map_.find(name) != - name_to_detection_map_.end()) { + } else if (name_to_detection_map_.find(name) != name_to_detection_map_.end()) { order = kCatagoryOrder_Inference; } else if (name_to_output_map_.find(name) != name_to_output_map_.end()) { order = kCatagoryOrder_Output; @@ -183,7 +181,8 @@ int Pipeline::getCatagoryOrder(const std::string name) { return order; } -void Pipeline::runOnce() { +void Pipeline::runOnce() +{ initInferenceCounter(); if (!input_device_->read(&frame_)) { @@ -193,37 +192,34 @@ void Pipeline::runOnce() { } width_ = frame_.cols; height_ = frame_.rows; - for (auto& pair : name_to_output_map_) { + for (auto & pair : name_to_output_map_) { pair.second->feedFrame(frame_); } auto t0 = std::chrono::high_resolution_clock::now(); - for (auto pos = next_.equal_range(input_device_name_); - pos.first != pos.second; ++pos.first) { + for (auto pos = next_.equal_range(input_device_name_); pos.first != pos.second; ++pos.first) { std::string detection_name = pos.first->second; auto detection_ptr = name_to_detection_map_[detection_name]; - detection_ptr->enqueue(frame_, - cv::Rect(width_ / 2, height_ / 2, width_, height_)); - increaseInferenceCounter(); + detection_ptr->enqueue(frame_, cv::Rect(width_ / 2, height_ / 2, width_, height_)); + increaseInferenceCounter(); detection_ptr->submitRequest(); } std::unique_lock lock(counter_mutex_); - cv_.wait(lock, [self = this]() { return self->counter_ == 0; }); + cv_.wait(lock, [self = this]() {return self->counter_ == 0;}); auto t1 = std::chrono::high_resolution_clock::now(); typedef std::chrono::duration> ms; - for (auto& pair : name_to_output_map_) { + for (auto & pair : name_to_output_map_) { // slog::info << "Handling Output ..." << pair.first << slog::endl; pair.second->handleOutput(); } } -void Pipeline::runService(std::string config_path) { - +void Pipeline::runService(std::string config_path) +{ std::cout << "run service once" << std::endl; initInferenceCounter(); - if (!frame_.empty()) - { + if (!frame_.empty()) { frame_.release(); } @@ -233,7 +229,7 @@ void Pipeline::runService(std::string config_path) { } width_ = frame_.cols; height_ = frame_.rows; - for (auto& pair : name_to_output_map_) { + for (auto & pair : name_to_output_map_) { pair.second->feedFrame(frame_); } @@ -241,53 +237,54 @@ void Pipeline::runService(std::string config_path) { std::string detection_name = pos.first->second; auto detection_ptr = name_to_detection_map_[detection_name]; - detection_ptr->enqueue(frame_, - cv::Rect(width_ / 2, height_ / 2, width_, height_)); + detection_ptr->enqueue(frame_, cv::Rect(width_ / 2, height_ / 2, width_, height_)); detection_ptr->SynchronousRequest(); bool fetch_or_not = detection_ptr->fetchResults(); - for (auto& pair : name_to_output_map_) { + for (auto & pair : name_to_output_map_) { detection_ptr->observeOutput(pair.second); } } -void Pipeline::printPipeline() { - for (auto& current_node : next_) { - printf("%s --> %s\n", current_node.first.c_str(), - current_node.second.c_str()); +void Pipeline::printPipeline() +{ + for (auto & current_node : next_) { + printf("%s --> %s\n", current_node.first.c_str(), current_node.second.c_str()); } } -void Pipeline::setCallback() { +void Pipeline::setCallback() +{ #if 0 if (!input_device_->read(&frame_)) { throw std::logic_error("Failed to get frame from cv::VideoCapture"); } width_ = frame_.cols; height_ = frame_.rows; - for (auto& pair : name_to_output_map_) { + for (auto & pair : name_to_output_map_) { pair.second->feedFrame(frame_); } #endif - for (auto& pair : name_to_detection_map_) { + for (auto & pair : name_to_detection_map_) { std::string detection_name = pair.first; std::function callb; - callb = [ detection_name, self = this ]() { - self->callback(detection_name); - return; - }; + callb = [detection_name, self = this]() + { + self->callback(detection_name); + return; + }; pair.second->getEngine()->getRequest()->SetCompletionCallback(callb); } } -void Pipeline::callback(const std::string& detection_name) { +void Pipeline::callback(const std::string & detection_name) +{ // slog::info<<"Hello callback ----> " << detection_name <fetchResults(); // set output - for (auto pos = next_.equal_range(detection_name); pos.first != pos.second; - ++pos.first) { + for (auto pos = next_.equal_range(detection_name); pos.first != pos.second; ++pos.first) { std::string next_name = pos.first->second; // if next is output, then print if (output_names_.find(next_name) != output_names_.end()) { @@ -299,10 +296,8 @@ void Pipeline::callback(const std::string& detection_name) { if (detection_ptr_iter != name_to_detection_map_.end()) { auto next_detection_ptr = detection_ptr_iter->second; for (size_t i = 0; i < detection_ptr->getResultsLength(); ++i) { - const dynamic_vino_lib::Result* prev_result = - detection_ptr->getLocationResult(i); - auto clippedRect = - prev_result->getLocation() & cv::Rect(0, 0, width_, height_); + const dynamic_vino_lib::Result * prev_result = detection_ptr->getLocationResult(i); + auto clippedRect = prev_result->getLocation() & cv::Rect(0, 0, width_, height_); cv::Mat next_input = frame_(clippedRect); next_detection_ptr->enqueue(next_input, prev_result->getLocation()); } @@ -318,17 +313,20 @@ void Pipeline::callback(const std::string& detection_name) { cv_.notify_all(); } -void Pipeline::initInferenceCounter() { +void Pipeline::initInferenceCounter() +{ std::lock_guard lk(counter_mutex_); counter_ = 0; cv_.notify_all(); } -void Pipeline::increaseInferenceCounter() { +void Pipeline::increaseInferenceCounter() +{ std::lock_guard lk(counter_mutex_); ++counter_; // slog::info << "counter = " << counter_ << slog::endl; } -void Pipeline::decreaseInferenceCounter() { +void Pipeline::decreaseInferenceCounter() +{ std::lock_guard lk(counter_mutex_); --counter_; // slog::info << "counter = " << counter_ << slog::endl; diff --git a/dynamic_vino_lib/src/pipeline_manager.cpp b/dynamic_vino_lib/src/pipeline_manager.cpp index 570e37a9..71d85e26 100644 --- a/dynamic_vino_lib/src/pipeline_manager.cpp +++ b/dynamic_vino_lib/src/pipeline_manager.cpp @@ -1,29 +1,28 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief a header file with declaration of Pipeline Manager class * @file pipeline_manager.cpp */ +#include #include #include #include +#include -#include #include "dynamic_vino_lib/factory.hpp" #include "dynamic_vino_lib/inferences/age_gender_detection.hpp" #include "dynamic_vino_lib/inferences/emotions_detection.hpp" @@ -49,8 +48,9 @@ #include "dynamic_vino_lib/pipeline_manager.hpp" #include "dynamic_vino_lib/pipeline_params.hpp" -std::shared_ptr PipelineManager::createPipeline( - const Params::ParamManager::PipelineParams& params) { +std::shared_ptr +PipelineManager::createPipeline(const Params::ParamManager::PipelineParams & params) +{ if (params.name == "") { throw std::logic_error("The name of pipeline won't be empty!"); } @@ -61,8 +61,7 @@ std::shared_ptr PipelineManager::createPipeline( auto inputs = parseInputDevice(params); if (inputs.size() != 1) { - slog::err << "currently one pipeline only supports ONE input." - << slog::endl; + slog::err << "currently one pipeline only supports ONE input." << slog::endl; return nullptr; } for (auto it = inputs.begin(); it != inputs.end(); ++it) { @@ -99,12 +98,11 @@ std::shared_ptr PipelineManager::createPipeline( return pipeline; } - std::map> -PipelineManager::parseInputDevice( - const Params::ParamManager::PipelineParams& params) { +PipelineManager::parseInputDevice(const Params::ParamManager::PipelineParams & params) +{ std::map> inputs; - for (auto& name : params.inputs) { + for (auto & name : params.inputs) { slog::info << "Parsing InputDvice: " << name << slog::endl; std::shared_ptr device = nullptr; if (name == kInputType_RealSenseCamera) { @@ -136,10 +134,10 @@ PipelineManager::parseInputDevice( } std::map> -PipelineManager::parseOutput( - const Params::ParamManager::PipelineParams& params) { +PipelineManager::parseOutput(const Params::ParamManager::PipelineParams & params) +{ std::map> outputs; - for (auto& name : params.outputs) { + for (auto & name : params.outputs) { slog::info << "Parsing Output: " << name << slog::endl; std::shared_ptr object = nullptr; if (name == kOutputTpye_RosTopic) { @@ -163,17 +161,16 @@ PipelineManager::parseOutput( } std::map> -PipelineManager::parseInference( - const Params::ParamManager::PipelineParams& params) { +PipelineManager::parseInference(const Params::ParamManager::PipelineParams & params) +{ /**< update plugins for devices >**/ auto pcommon = Params::ParamManager::getInstance().getCommon(); std::string FLAGS_l = pcommon.custom_cpu_library; std::string FLAGS_c = pcommon.custom_cldnn_library; bool FLAGS_pc = pcommon.enable_performance_count; - std::map> - inferences; - for (auto& infer : params.infers) { + std::map> inferences; + for (auto & infer : params.infers) { if (infer.name.empty() || infer.model.empty()) { continue; } @@ -181,27 +178,21 @@ PipelineManager::parseInference( std::shared_ptr object = nullptr; if (plugins_for_devices_.find(infer.engine) == plugins_for_devices_.end()) { plugins_for_devices_[infer.engine] = - *Factory::makePluginByName(infer.engine, FLAGS_l, FLAGS_c, FLAGS_pc); + *Factory::makePluginByName(infer.engine, FLAGS_l, FLAGS_c, FLAGS_pc); } if (infer.name == kInferTpye_FaceDetection) { object = createFaceDetection(infer); - } else if (infer.name == kInferTpye_AgeGenderRecognition) { object = createAgeGenderRecognition(infer); - } else if (infer.name == kInferTpye_EmotionRecognition) { object = createEmotionRecognition(infer); - } else if (infer.name == kInferTpye_HeadPoseEstimation) { object = createHeadPoseEstimation(infer); - } else if (infer.name == kInferTpye_ObjectDetection) { object = createObjectDetection(infer); - } else if (infer.name == kInferTpye_ObjectSegmentation) { object = createObjectSegmentation(infer); - } else { slog::err << "Invalid inference name: " << infer.name << slog::endl; } @@ -216,16 +207,15 @@ PipelineManager::parseInference( } std::shared_ptr -PipelineManager::createFaceDetection( - const Params::ParamManager::InferenceParams& infer) { - // TODO: add batch size in param_manager - auto face_detection_model = - std::make_shared(infer.model, 1, 1, 1); +PipelineManager::createFaceDetection(const Params::ParamManager::InferenceParams & infer) +{ + // TODO(batch_size): add batch size in param_manager + auto face_detection_model = std::make_shared(infer.model, 1, 1, 1); face_detection_model->modelInit(); - auto face_detection_engine = std::make_shared( - plugins_for_devices_[infer.engine], face_detection_model); + auto face_detection_engine = + std::make_shared(plugins_for_devices_[infer.engine], face_detection_model); auto face_inference_ptr = std::make_shared( - 0.5); // TODO: add output_threshold in param_manager + 0.5); // TODO(output_threshold): add output_threshold in param_manager face_inference_ptr->loadNetwork(face_detection_model); face_inference_ptr->loadEngine(face_detection_engine); @@ -233,13 +223,11 @@ PipelineManager::createFaceDetection( } std::shared_ptr -PipelineManager::createAgeGenderRecognition( - const Params::ParamManager::InferenceParams& param) { - auto model = - std::make_shared(param.model, 1, 2, 16); +PipelineManager::createAgeGenderRecognition(const Params::ParamManager::InferenceParams & param) +{ + auto model = std::make_shared(param.model, 1, 2, 16); model->modelInit(); - auto engine = std::make_shared( - plugins_for_devices_[param.engine], model); + auto engine = std::make_shared(plugins_for_devices_[param.engine], model); auto infer = std::make_shared(); infer->loadNetwork(model); infer->loadEngine(engine); @@ -248,13 +236,11 @@ PipelineManager::createAgeGenderRecognition( } std::shared_ptr -PipelineManager::createEmotionRecognition( - const Params::ParamManager::InferenceParams& param) { - auto model = - std::make_shared(param.model, 1, 1, 16); +PipelineManager::createEmotionRecognition(const Params::ParamManager::InferenceParams & param) +{ + auto model = std::make_shared(param.model, 1, 1, 16); model->modelInit(); - auto engine = std::make_shared( - plugins_for_devices_[param.engine], model); + auto engine = std::make_shared(plugins_for_devices_[param.engine], model); auto infer = std::make_shared(); infer->loadNetwork(model); infer->loadEngine(engine); @@ -263,13 +249,11 @@ PipelineManager::createEmotionRecognition( } std::shared_ptr -PipelineManager::createHeadPoseEstimation( - const Params::ParamManager::InferenceParams& param) { - auto model = - std::make_shared(param.model, 1, 3, 16); +PipelineManager::createHeadPoseEstimation(const Params::ParamManager::InferenceParams & param) +{ + auto model = std::make_shared(param.model, 1, 3, 16); model->modelInit(); - auto engine = std::make_shared( - plugins_for_devices_[param.engine], model); + auto engine = std::make_shared(plugins_for_devices_[param.engine], model); auto infer = std::make_shared(); infer->loadNetwork(model); infer->loadEngine(engine); @@ -278,33 +262,33 @@ PipelineManager::createHeadPoseEstimation( } std::shared_ptr -PipelineManager::createObjectDetection( - const Params::ParamManager::InferenceParams& infer) { - // TODO: not implemented yet +PipelineManager::createObjectDetection(const Params::ParamManager::InferenceParams & infer) +{ + // TODO(TBD): not implemented yet return createFaceDetection(infer); } std::shared_ptr -PipelineManager::createObjectSegmentation( - const Params::ParamManager::InferenceParams& infer) { +PipelineManager::createObjectSegmentation(const Params::ParamManager::InferenceParams & infer) +{ auto obejct_segmentation_model = - std::make_shared(infer.model, 1, 2, 1); + std::make_shared(infer.model, 1, 2, 1); obejct_segmentation_model->modelInit(); auto obejct_segmentation_engine = std::make_shared( - plugins_for_devices_[infer.engine], obejct_segmentation_model); - auto segmentation_inference_ptr = - std::make_shared(0.5); + plugins_for_devices_[infer.engine], obejct_segmentation_model); + auto segmentation_inference_ptr = std::make_shared(0.5); segmentation_inference_ptr->loadNetwork(obejct_segmentation_model); segmentation_inference_ptr->loadEngine(obejct_segmentation_engine); return segmentation_inference_ptr; } -void PipelineManager::threadPipeline(const char* name) { - PipelineData& p = pipelines_[name]; +void PipelineManager::threadPipeline(const char * name) +{ + PipelineData & p = pipelines_[name]; while (p.state == PipelineState_ThreadRunning && p.pipeline != nullptr) { - for (auto& node : p.spin_nodes) { + for (auto & node : p.spin_nodes) { rclcpp::spin_some(node); } p.pipeline->runOnce(); @@ -312,20 +296,21 @@ void PipelineManager::threadPipeline(const char* name) { } } -void PipelineManager::runAll() { +void PipelineManager::runAll() +{ for (auto it = pipelines_.begin(); it != pipelines_.end(); ++it) { if (it->second.state != PipelineState_ThreadRunning) { it->second.state = PipelineState_ThreadRunning; } if (it->second.thread == nullptr) { - it->second.thread = - std::make_shared(&PipelineManager::threadPipeline, this, - it->second.params.name.c_str()); + it->second.thread = std::make_shared(&PipelineManager::threadPipeline, this, + it->second.params.name.c_str()); } } } -void PipelineManager::stopAll() { +void PipelineManager::stopAll() +{ for (auto it = pipelines_.begin(); it != pipelines_.end(); ++it) { if (it->second.state == PipelineState_ThreadRunning) { it->second.state = PipelineState_ThreadStopped; @@ -333,10 +318,10 @@ void PipelineManager::stopAll() { } } -void PipelineManager::joinAll() { +void PipelineManager::joinAll() +{ for (auto it = pipelines_.begin(); it != pipelines_.end(); ++it) { - if (it->second.thread != nullptr && - it->second.state == PipelineState_ThreadRunning) { + if (it->second.thread != nullptr && it->second.state == PipelineState_ThreadRunning) { it->second.thread->join(); } } diff --git a/dynamic_vino_lib/src/pipeline_params.cpp b/dynamic_vino_lib/src/pipeline_params.cpp index d64d8112..e5d112ce 100644 --- a/dynamic_vino_lib/src/pipeline_params.cpp +++ b/dynamic_vino_lib/src/pipeline_params.cpp @@ -1,59 +1,40 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief a header file with declaration of Pipeline class * @file pipeline.cpp */ +#include #include #include #include - -#include #include "dynamic_vino_lib/pipeline_params.hpp" -const std::string kInputType_Image = "Image"; -const std::string kInputType_Video = "Video"; -const std::string kInputType_StandardCamera = "StandardCamera"; -const std::string kInputType_CameraTopic = "RealSenseCameraTopic"; -const std::string kInputType_RealSenseCamera = "RealSenseCamera"; -const std::string kInputType_ServiceImage = "ServiceImage"; - -const std::string kOutputTpye_RViz = "RViz"; -const std::string kOutputTpye_ImageWindow = "ImageWindow"; -const std::string kOutputTpye_RosTopic = "RosTopic"; -const std::string kOutputTpye_RosService = "RosService"; - -const std::string kInferTpye_FaceDetection = "FaceDetection"; -const std::string kInferTpye_AgeGenderRecognition = "AgeGenderRecognition"; -const std::string kInferTpye_EmotionRecognition = "EmotionRecognition"; -const std::string kInferTpye_HeadPoseEstimation = "HeadPoseEstimation"; -const std::string kInferTpye_ObjectDetection = "ObjectDetection"; -const std::string kInferTpye_ObjectSegmentation = "ObjectSegmentation"; - -PipelineParams::PipelineParams(const std::string& name) { params_.name = name; } +PipelineParams::PipelineParams(const std::string & name) +{ + params_.name = name; +} -PipelineParams::PipelineParams( - const Params::ParamManager::PipelineParams& params) { +PipelineParams::PipelineParams(const Params::ParamManager::PipelineParams & params) +{ params_ = params; } -PipelineParams& PipelineParams::operator=( - const Params::ParamManager::PipelineParams& params) { +PipelineParams & PipelineParams::operator=(const Params::ParamManager::PipelineParams & params) +{ params_.name = params.name; params_.infers = params.infers; params_.inputs = params.inputs; @@ -63,35 +44,37 @@ PipelineParams& PipelineParams::operator=( return *this; } -Params::ParamManager::PipelineParams PipelineParams::getPipeline( - const std::string& name) { +Params::ParamManager::PipelineParams PipelineParams::getPipeline(const std::string & name) +{ return Params::ParamManager::getInstance().getPipeline(name); } -void PipelineParams::update() { +void PipelineParams::update() +{ if (!params_.name.empty()) { params_ = getPipeline(params_.name); } } -void PipelineParams::update( - const Params::ParamManager::PipelineParams& params) { +void PipelineParams::update(const Params::ParamManager::PipelineParams & params) +{ params_ = params; } -bool PipelineParams::isOutputTo(std::string& output) { - if (std::find(params_.outputs.begin(), params_.outputs.end(), output) != - params_.outputs.end()) { +bool PipelineParams::isOutputTo(std::string & output) +{ + if (std::find(params_.outputs.begin(), params_.outputs.end(), output) != params_.outputs.end()) { return true; } return false; } -bool PipelineParams::isGetFps() { +bool PipelineParams::isGetFps() +{ /**< Only "Image" input can't computing FPS >**/ if (params_.inputs.size() == 0) { return false; } - return std::find(params_.inputs.begin(), params_.inputs.end(), - kInputType_Image) == params_.inputs.end(); + return std::find(params_.inputs.begin(), params_.inputs.end(), kInputType_Image) == + params_.inputs.end(); } diff --git a/dynamic_vino_lib/src/services/frame_processing_server.cpp b/dynamic_vino_lib/src/services/frame_processing_server.cpp index e84d84bf..14fc80d9 100644 --- a/dynamic_vino_lib/src/services/frame_processing_server.cpp +++ b/dynamic_vino_lib/src/services/frame_processing_server.cpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #include "dynamic_vino_lib/services/frame_processing_server.hpp" @@ -20,6 +18,8 @@ #include #include #include +#include +#include #include "dynamic_vino_lib/pipeline_manager.hpp" #include "dynamic_vino_lib/pipeline.hpp" @@ -27,9 +27,13 @@ #include "dynamic_vino_lib/inputs/image_input.hpp" #include "dynamic_vino_lib/slog.hpp" -namespace vino_service { - -FrameProcessingServer::FrameProcessingServer(const std::string service_name, const std::string config_path) : Node("node_with_service") { +namespace vino_service +{ +FrameProcessingServer::FrameProcessingServer( + const std::string service_name, + const std::string config_path) +: Node("node_with_service") +{ Params::ParamManager::getInstance().parse(config_path); Params::ParamManager::getInstance().print(); auto pcommon = Params::ParamManager::getInstance().getCommon(); @@ -39,123 +43,144 @@ FrameProcessingServer::FrameProcessingServer(const std::string service_name, con throw std::logic_error("Pipeline parameters should be set!"); } - for (auto& p : pipelines) { + for (auto & p : pipelines) { PipelineManager::getInstance().createPipeline(p); } - for (auto& p : pipelines) { + for (auto & p : pipelines) { for (unsigned int i = 0; i < p.infers.size(); i++) { - if (!p.infers[i].name.compare("FaceDetection")) - { - face_service_ = create_service("/detect_face", std::bind(&FrameProcessingServer::cbFaceDetection, this, std::placeholders::_1, std::placeholders::_2)); + if (!p.infers[i].name.compare("FaceDetection")) { + face_service_ = create_service( + "/detect_face", std::bind(&FrameProcessingServer::cbFaceDetection, this, + std::placeholders::_1, std::placeholders::_2)); } else if (!p.infers[i].name.compare("AgeGenderRecognition")) { - age_gender_service_ = create_service("/detect_age_gender", std::bind(&FrameProcessingServer::cbAgeGenderRecognition, this, std::placeholders::_1, std::placeholders::_2)); + age_gender_service_ = create_service( + "/detect_age_gender", std::bind(&FrameProcessingServer::cbAgeGenderRecognition, this, + std::placeholders::_1, std::placeholders::_2)); } else if (!p.infers[i].name.compare("EmotionRecognition")) { - emotion_service_ = create_service("/detect_emotion", std::bind(&FrameProcessingServer::cbEmotionRecognition, this, std::placeholders::_1, std::placeholders::_2)); + emotion_service_ = create_service( + "/detect_emotion", std::bind(&FrameProcessingServer::cbEmotionRecognition, this, + std::placeholders::_1, std::placeholders::_2)); } else if (!p.infers[i].name.compare("HeadPoseEstimation")) { - head_pose_service_ = create_service("/detect_head_pose", std::bind(&FrameProcessingServer::cbHeadPoseRecognition, this, std::placeholders::_1, std::placeholders::_2)); + head_pose_service_ = create_service( + "/detect_head_pose", std::bind(&FrameProcessingServer::cbHeadPoseRecognition, this, + std::placeholders::_1, std::placeholders::_2)); } else if (!p.infers[i].name.compare("ObjectDetection")) { - object_service_ = create_service("/detect_object", std::bind(&FrameProcessingServer::cbObjectDetection, this, std::placeholders::_1, std::placeholders::_2)); + object_service_ = create_service( + "/detect_object", std::bind(&FrameProcessingServer::cbObjectDetection, this, + std::placeholders::_1, std::placeholders::_2)); } } } - } -void FrameProcessingServer::cbFaceDetection(const std::shared_ptr request, std::shared_ptr response) { - - /* - std::map pipelines_ = PipelineManager::getInstance().getPipelines(); - for (auto it = pipelines_.begin(); it != pipelines_.end(); ++it) { - PipelineManager::PipelineData& p = pipelines_[it->second.params.name.c_str()]; - //p.pipeline->runService(request->image_path); - //auto output_handle = p.pipeline->getOutputHandle(); - - for (auto& pair : output_handle) { - if (pair.first.compare("FaceDetection")) { - pair.second -> setResponse(response); - response->objects.inference_time_ms = 11.11; - } +void FrameProcessingServer::cbFaceDetection( + const std::shared_ptr request, + std::shared_ptr response) +{ + /* +std::map pipelines_ = +PipelineManager::getInstance().getPipelines(); +for (auto it = pipelines_.begin(); it != pipelines_.end(); ++it) { + PipelineManager::PipelineData& p = pipelines_[it->second.params.name.c_str()]; + //p.pipeline->runService(request->image_path); + //auto output_handle = p.pipeline->getOutputHandle(); + + for (auto& pair : output_handle) { + if (pair.first.compare("FaceDetection")) { + pair.second -> setResponse(response); + response->objects.inference_time_ms = 11.11; } - */ - //p.pipeline->runService(request->image_path); - response->objects.inference_time_ms = 11.11; + } + */ + // p.pipeline->runService(request->image_path); + response->objects.inference_time_ms = 11.11; //} } -void FrameProcessingServer::cbAgeGenderRecognition(const std::shared_ptr request, std::shared_ptr response) { - +void FrameProcessingServer::cbAgeGenderRecognition( + const std::shared_ptr request, + std::shared_ptr response) +{ std::cout << "inside cb" << std::endl; - std::map pipelines_ = PipelineManager::getInstance().getPipelines(); + std::map pipelines_ = + PipelineManager::getInstance().getPipelines(); for (auto it = pipelines_.begin(); it != pipelines_.end(); ++it) { - PipelineManager::PipelineData& p = pipelines_[it->second.params.name.c_str()]; - p.pipeline->runService(request->image_path); + PipelineManager::PipelineData & p = pipelines_[it->second.params.name.c_str()]; + p.pipeline->runService(request->image_path); auto output_handle = p.pipeline->getOutputHandle(); - for (auto& pair : output_handle) { + for (auto & pair : output_handle) { if (pair.first.compare("AgeGenderRecognition")) { - pair.second -> setResponse(response); + pair.second->setResponse(response); } } - //p.pipeline->runService(request->image_path); + // p.pipeline->runService(request->image_path); } - } -void FrameProcessingServer::cbEmotionRecognition(const std::shared_ptr request, std::shared_ptr response) { - +void FrameProcessingServer::cbEmotionRecognition( + const std::shared_ptr request, + std::shared_ptr response) +{ std::cout << "inside cb" << std::endl; - std::map pipelines_ = PipelineManager::getInstance().getPipelines(); + std::map pipelines_ = + PipelineManager::getInstance().getPipelines(); for (auto it = pipelines_.begin(); it != pipelines_.end(); ++it) { - PipelineManager::PipelineData& p = pipelines_[it->second.params.name.c_str()]; - p.pipeline->runService(request->image_path); + PipelineManager::PipelineData & p = pipelines_[it->second.params.name.c_str()]; + p.pipeline->runService(request->image_path); auto output_handle = p.pipeline->getOutputHandle(); - for (auto& pair : output_handle) { + for (auto & pair : output_handle) { if (pair.first.compare("EmotionRecognition")) { - pair.second -> setResponse(response); + pair.second->setResponse(response); } } - //p.pipeline->runService(request->image_path); + // p.pipeline->runService(request->image_path); } - } -void FrameProcessingServer::cbHeadPoseRecognition(const std::shared_ptr request, std::shared_ptr response) { - +void FrameProcessingServer::cbHeadPoseRecognition( + const std::shared_ptr request, + std::shared_ptr response) +{ std::cout << "inside cb" << std::endl; - std::map pipelines_ = PipelineManager::getInstance().getPipelines(); + std::map pipelines_ = + PipelineManager::getInstance().getPipelines(); for (auto it = pipelines_.begin(); it != pipelines_.end(); ++it) { - PipelineManager::PipelineData& p = pipelines_[it->second.params.name.c_str()]; - p.pipeline->runService(request->image_path); + PipelineManager::PipelineData & p = pipelines_[it->second.params.name.c_str()]; + p.pipeline->runService(request->image_path); auto output_handle = p.pipeline->getOutputHandle(); - for (auto& pair : output_handle) { + for (auto & pair : output_handle) { if (pair.first.compare("HeadPoseEstimation")) { - pair.second -> setResponse(response); + pair.second->setResponse(response); } } - //p.pipeline->runService(request->image_path); + // p.pipeline->runService(request->image_path); } - } -void FrameProcessingServer::cbObjectDetection(const std::shared_ptr request, std::shared_ptr response) { - std::map pipelines_ = PipelineManager::getInstance().getPipelines(); +void FrameProcessingServer::cbObjectDetection( + const std::shared_ptr request, + std::shared_ptr response) +{ + std::map pipelines_ = + PipelineManager::getInstance().getPipelines(); for (auto it = pipelines_.begin(); it != pipelines_.end(); ++it) { - PipelineManager::PipelineData& p = pipelines_[it->second.params.name.c_str()]; + PipelineManager::PipelineData & p = pipelines_[it->second.params.name.c_str()]; p.pipeline->runService(request->image_path); auto output_handle = p.pipeline->getOutputHandle(); - for (auto& pair : output_handle) { + for (auto & pair : output_handle) { if (!pair.first.compare("RosService")) { - pair.second -> setResponse(response); + pair.second->setResponse(response); } } } } -} // namespace frame_processing_service +} // namespace vino_service diff --git a/sample/CMakeLists.txt b/sample/CMakeLists.txt index cc54f8c7..ebe494a9 100644 --- a/sample/CMakeLists.txt +++ b/sample/CMakeLists.txt @@ -1,12 +1,14 @@ # Copyright (c) 2018 Intel Corporation - -# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 - +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +#http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + cmake_minimum_required(VERSION 3.5) project(dynamic_vino_sample) @@ -20,7 +22,7 @@ if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") add_compile_options(-Wall -Wextra -Wpedantic) endif() -if (CMAKE_BUILD_TYPE EQUAL "RELEASE") +if(CMAKE_BUILD_TYPE EQUAL "RELEASE") message(STATUS "Create Release Build.") set(CMAKE_CXX_FLAGS "-O2 ${CMAKE_CXX_FLAGS}") else() @@ -74,44 +76,44 @@ if( BUILD_SAMPLE_NAME AND NOT ${BUILD_SAMPLE_NAME} STREQUAL ${PROJECT_NAME} ) return() endif() -set (CpuExtension_lib $ENV{CPU_EXTENSION_LIB}) +set(CpuExtension_lib $ENV{CPU_EXTENSION_LIB}) add_library(cpu_extension SHARED IMPORTED) set_target_properties(cpu_extension PROPERTIES IMPORTED_LOCATION $ENV{CPU_EXTENSION_LIB}) -set (Gflags_lib $ENV{GFLAGS_LIB}) +set(Gflags_lib $ENV{GFLAGS_LIB}) add_library(gflags STATIC IMPORTED) set_target_properties(gflags PROPERTIES IMPORTED_LOCATION $ENV{GFLAGS_LIB}) -file (GLOB MAIN_SRC - ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp - ) +file(GLOB MAIN_SRC + ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp +) -file (GLOB MAIN_HEADERS - ${CMAKE_CURRENT_SOURCE_DIR}/*.h - ${CMAKE_CURRENT_SOURCE_DIR}/include/*.h - ) +file(GLOB MAIN_HEADERS + ${CMAKE_CURRENT_SOURCE_DIR}/*.h + ${CMAKE_CURRENT_SOURCE_DIR}/include/*.h +) # Create named folders for the sources within the .vcproj # Empty name lists them directly under the .vcproj source_group("src" FILES ${MAIN_SRC}) source_group("include" FILES ${MAIN_HEADERS}) -include_directories (${OpenCV_INCLUDE_DIRS}) -include_directories (${PROJECT_SOURCE_DIR}/include) -include_directories (${dynamic_vino_lib_INCLUDE_DIRS}) -include_directories (${vino_param_lib_INCLUDE_DIRS}) -include_directories (${InferenceEngine_INCLUDE_DIRS}) -include_directories (${InferenceEngine_INCLUDE_DIRS}/../samples) -include_directories (${InferenceEngine_INCLUDE_DIRS}/../samples/extension) -include_directories (${InferenceEngine_INCLUDE_DIRS}/../src) -include_directories (${InferenceEngine_INCLUDE_DIRS}/../samples/build/thirdparty/gflags/include) -include_directories (${InferenceEngine_INCLUDE_DIRS}/../build/samples/thirdparty/gflags/include) - -include_directories (${librealsense2_INCLUDE_DIRS}) +include_directories(${OpenCV_INCLUDE_DIRS}) +include_directories(${PROJECT_SOURCE_DIR}/include) +include_directories(${dynamic_vino_lib_INCLUDE_DIRS}) +include_directories(${vino_param_lib_INCLUDE_DIRS}) +include_directories(${InferenceEngine_INCLUDE_DIRS}) +include_directories(${InferenceEngine_INCLUDE_DIRS}/../samples) +include_directories(${InferenceEngine_INCLUDE_DIRS}/../samples/extension) +include_directories(${InferenceEngine_INCLUDE_DIRS}/../src) +include_directories(${InferenceEngine_INCLUDE_DIRS}/../samples/build/thirdparty/gflags/include) +include_directories(${InferenceEngine_INCLUDE_DIRS}/../build/samples/thirdparty/gflags/include) + +include_directories(${librealsense2_INCLUDE_DIRS}) #include_directories (/opt/ros2_openvino/include) # Create library file from sources. @@ -128,7 +130,6 @@ ament_target_dependencies(vino_param_sample "vino_param_lib" "dynamic_vino_lib" "rviz_yaml_cpp_vendor" - ) add_executable(pipeline_with_params @@ -208,21 +209,81 @@ install(TARGETS image_object_server install(TARGETS image_object_client DESTINATION lib/${PROJECT_NAME}) -if(BUILD_TESTING) - find_package(ament_lint_auto REQUIRED) - ament_lint_auto_find_test_dependencies() -endif() - # Install param files. -install(DIRECTORY +install(DIRECTORY param DESTINATION share/${PROJECT_NAME}/ ) +# Install param files. +install(DIRECTORY + tests/testParam + DESTINATION share/${PROJECT_NAME}/ +) + # Install launch files. -install(DIRECTORY +install(DIRECTORY launch DESTINATION share/${PROJECT_NAME}/ ) +if(BUILD_TESTING) + find_package(ament_lint_auto REQUIRED) + find_package(InferenceEngine REQUIRED) + find_package(rclcpp REQUIRED) + find_package(vino_param_lib REQUIRED) + find_package(OpenCV REQUIRED) + find_package(object_msgs REQUIRED) + find_package(people_msgs REQUIRED) + find_package(ament_index_cpp REQUIRED) + find_package(yaml_cpp_vendor REQUIRED) + find_package(class_loader REQUIRED) + find_package(dynamic_vino_lib REQUIRED) + ament_lint_auto_find_test_dependencies() + + macro(custom_gtest target) + ament_add_gtest(${target} ${ARGN}) + if(TARGET ${target}) + target_include_directories(${target} PUBLIC + ${${PROJECT_NAME}_INCLUDE_DIRS} + ) + target_link_libraries(${target} + dl + cpu_extension + gflags) + ament_target_dependencies(${target} + "rclcpp" + "InferenceEngine" + "vino_param_lib" + "OpenCV" + "object_msgs" + "people_msgs" + "ament_index_cpp" + "yaml_cpp_vendor" + "class_loader" + "dynamic_vino_lib") + endif() + endmacro() + + custom_gtest(unittest_createPipelineCheck + "tests/lib/unittest_createPipelineCheck.cpp" + TIMEOUT 300) + custom_gtest(unittest_faceDetection + "tests/topic/unittest_faceDetectionCheck.cpp" + TIMEOUT 300) + custom_gtest(unittest_objectDetection + "tests/topic/unittest_objectDetectionCheck.cpp" + TIMEOUT 300) + custom_gtest(unittest_imageDetection + "tests/topic/unittest_imageCheck.cpp" + TIMEOUT 300) + custom_gtest(unittest_segmentation + "tests/topic/unittest_segmentationCheck.cpp" + TIMEOUT 300) + custom_gtest(unittest_objectService + "tests/service/unittest_objectService.cpp" + TIMEOUT 300) + +endif() + ament_package() diff --git a/sample/include/utility.hpp b/sample/include/utility.hpp index 154931e7..9aa1da65 100644 --- a/sample/include/utility.hpp +++ b/sample/include/utility.hpp @@ -1,20 +1,19 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef UTILITY_HPP_ +#define UTILITY_HPP_ #include @@ -33,83 +32,79 @@ static const char help_message[] = "Print a usage message."; /// @brief message for images argument static const char input_choice[] = - "Optional. Input choice (RealSenseCamera, StandardCamera, Video, Image). " \ - "Default value is StandardCamera."; + "Optional. Input choice (RealSenseCamera, StandardCamera, Video, Image). " + "Default value is StandardCamera."; /// @brief message for model argument -static const char face_detection_model_message[] = - "Required. Path to an .xml file with a trained face detection model."; -static const char age_gender_model_message[] = - "Optional. Path to an .xml file with a trained age gender model."; -static const char head_pose_model_message[] = - "Optional. Path to an .xml file with a trained head pose model."; -static const char emotions_model_message[] = - "Optional. Path to an .xml file with a trained emotions model."; +static const char face_detection_model_message[] = "Required. Path to an .xml file with a trained " + "face detection model."; +static const char age_gender_model_message[] = "Optional. Path to an .xml file with a trained age " + "gender model."; +static const char head_pose_model_message[] = "Optional. Path to an .xml file with a trained head " + "pose model."; +static const char emotions_model_message[] = "Optional. Path to an .xml file with a trained " + "emotions model."; static const char object_model_message[] = "Required. Path to an .xml file with a trained model."; /// @brief message for plugin argument static const char plugin_message[] = - "Plugin name. For example MKLDNNPlugin. If this parameter is pointed, " \ -"the sample will look for this plugin only."; + "Plugin name. For example MKLDNNPlugin. If this parameter is pointed, " + "the sample will look for this plugin only."; /// @brief message for assigning face detection calculation to device static const char target_device_message[] = - "Specify the target device for Face Detection (CPU, GPU, FPGA, or MYRIAD). " \ -"Sample will look for a suitable plugin for device specified."; + "Specify the target device for Face Detection (CPU, GPU, FPGA, or MYRIAD). " + "Sample will look for a suitable plugin for device specified."; /// @brief message for assigning age gender calculation to device static const char target_device_message_ag[] = - "Specify the target device for Age Gender Detection (CPU, GPU, FPGA, or MYRIAD). " \ -"Sample will look for a suitable plugin for device specified."; + "Specify the target device for Age Gender Detection (CPU, GPU, FPGA, or MYRIAD). " + "Sample will look for a suitable plugin for device specified."; /// @brief message for assigning age gender calculation to device static const char target_device_message_hp[] = - "Specify the target device for Head Pose Detection (CPU, GPU, FPGA, or MYRIAD). " \ -"Sample will look for a suitable plugin for device specified."; + "Specify the target device for Head Pose Detection (CPU, GPU, FPGA, or MYRIAD). " + "Sample will look for a suitable plugin for device specified."; static const char target_device_message_em[] = - "Specify the target device for Emotions Detection (CPU, GPU, FPGA, or MYRIAD). " \ -"Sample will look for a suitable plugin for device specified."; + "Specify the target device for Emotions Detection (CPU, GPU, FPGA, or MYRIAD). " + "Sample will look for a suitable plugin for device specified."; /// @brief message for number of simultaneously age gender detections using dynamic batch static const char num_batch_ag_message[] = - "Specify number of maximum simultaneously processed faces for Age Gender Detection" \ - "(default is 16)."; + "Specify number of maximum simultaneously processed faces for Age Gender Detection" + "(default is 16)."; /// @brief message for number of simultaneously age gender detections using dynamic batch static const char num_batch_hp_message[] = - "Specify number of maximum simultaneously processed faces for Head Pose Detection" \ - "(default is 16)."; + "Specify number of maximum simultaneously processed faces for Head Pose Detection" + "(default is 16)."; /// @brief message for number of simultaneously age gender detections using dynamic batch static const char num_batch_em_message[] = - "Specify number of maximum simultaneously processed faces for Emotions Detection" \ - "(default is 16)."; + "Specify number of maximum simultaneously processed faces for Emotions Detection" + "(default is 16)."; /// @brief message for performance counters -static const char - performance_counter_message[] = "Enables per-layer performance report."; +static const char performance_counter_message[] = "Enables per-layer performance report."; /// @brief message for clDNN custom kernels desc -static const char - custom_cldnn_message[] = "Required for clDNN (GPU)-targeted custom kernels."\ -"Absolute path to the xml file with the kernels desc."; +static const char custom_cldnn_message[] = "Required for clDNN (GPU)-targeted custom kernels." + "Absolute path to the xml file with the kernels desc."; /// @brief message for user library argument static const char custom_cpu_library_message[] = - "Required for MKLDNN (CPU)-targeted custom layers." \ -"Absolute path to a shared library with the kernels impl."; + "Required for MKLDNN (CPU)-targeted custom layers." + "Absolute path to a shared library with the kernels impl."; /// @brief message for probability threshold argument -static const char - thresh_output_message[] = "Probability threshold for detections."; +static const char thresh_output_message[] = "Probability threshold for detections."; /// @brief message raw output flag static const char raw_output_message[] = "Inference results as raw values."; /// @brief message no wait for keypress after input stream completed -static const char - no_wait_for_keypress_message[] = "No wait for key press in the end."; +static const char no_wait_for_keypress_message[] = "No wait for key press in the end."; /// @brief message no show processed video static const char no_show_processed_video[] = "No show processed video."; @@ -202,7 +197,8 @@ DEFINE_string(config, "", parameter_file_message); /** * \brief This function show a help message */ -static void showUsage() { +static void showUsage() +{ std::cout << std::endl; std::cout << "interactive_face_detection [OPTION]" << std::endl; std::cout << "Options:" << std::endl; @@ -210,60 +206,42 @@ static void showUsage() { std::cout << " -h " << help_message << std::endl; std::cout << " -i " << input_choice << std::endl; std::cout << " -i_path " << input_file_path << std::endl; - std::cout << " -m \"\" " - << face_detection_model_message << std::endl; - std::cout << " -m_ag \"\" " << age_gender_model_message - << std::endl; - std::cout << " -m_hp \"\" " << head_pose_model_message - << std::endl; - std::cout << " -m_em \"\" " << emotions_model_message - << std::endl; - std::cout << " -l \"\" " << custom_cpu_library_message - << std::endl; + std::cout << " -m \"\" " << face_detection_model_message << std::endl; + std::cout << " -m_ag \"\" " << age_gender_model_message << std::endl; + std::cout << " -m_hp \"\" " << head_pose_model_message << std::endl; + std::cout << " -m_em \"\" " << emotions_model_message << std::endl; + std::cout << " -l \"\" " << custom_cpu_library_message << std::endl; std::cout << " Or" << std::endl; - std::cout << " -c \"\" " << custom_cldnn_message - << std::endl; - std::cout << " -d \"\" " << target_device_message - << std::endl; - std::cout << " -d_ag \"\" " << target_device_message_ag - << std::endl; - std::cout << " -d_hp \"\" " << target_device_message_hp - << std::endl; - std::cout << " -d_em \"\" " << target_device_message_em - << std::endl; - std::cout << " -n_ag \"\" " << num_batch_ag_message - << std::endl; - std::cout << " -n_hp \"\" " << num_batch_hp_message - << std::endl; - std::cout << " -n_em \"\" " << num_batch_em_message - << std::endl; - std::cout << " -no_wait " << no_wait_for_keypress_message - << std::endl; - std::cout << " -no_show " << no_show_processed_video - << std::endl; - std::cout << " -pc " << performance_counter_message - << std::endl; - std::cout << " -r " << raw_output_message - << std::endl; - std::cout << " -t " << thresh_output_message - << std::endl; + std::cout << " -c \"\" " << custom_cldnn_message << std::endl; + std::cout << " -d \"\" " << target_device_message << std::endl; + std::cout << " -d_ag \"\" " << target_device_message_ag << std::endl; + std::cout << " -d_hp \"\" " << target_device_message_hp << std::endl; + std::cout << " -d_em \"\" " << target_device_message_em << std::endl; + std::cout << " -n_ag \"\" " << num_batch_ag_message << std::endl; + std::cout << " -n_hp \"\" " << num_batch_hp_message << std::endl; + std::cout << " -n_em \"\" " << num_batch_em_message << std::endl; + std::cout << " -no_wait " << no_wait_for_keypress_message << std::endl; + std::cout << " -no_show " << no_show_processed_video << std::endl; + std::cout << " -pc " << performance_counter_message << std::endl; + std::cout << " -r " << raw_output_message << std::endl; + std::cout << " -t " << thresh_output_message << std::endl; } -static void showUsageForParam() { +static void showUsageForParam() +{ std::cout << std::endl; std::cout << "vino_param_sample [OPTION]" << std::endl; std::cout << "Options:" << std::endl; std::cout << std::endl; std::cout << " -h " << help_message << std::endl; - std::cout << " -config \"\" " << parameter_file_message - << std::endl; + std::cout << " -config \"\" " << parameter_file_message << std::endl; } - /** * \brief This function show a help message */ -static void showUsageForObjectDetection() { +static void showUsageForObjectDetection() +{ std::cout << std::endl; std::cout << "async_object_detection_ssd [OPTION]" << std::endl; std::cout << "Options:" << std::endl; @@ -278,4 +256,6 @@ static void showUsageForObjectDetection() { std::cout << " -pc " << performance_counter_message << std::endl; std::cout << " -r " << raw_output_message << std::endl; std::cout << " -t " << thresh_output_message << std::endl; -} \ No newline at end of file +} + +#endif // UTILITY_HPP_ diff --git a/sample/launch/image_object_server.launch.py b/sample/launch/image_object_server.launch.py new file mode 100644 index 00000000..2d3bbbba --- /dev/null +++ b/sample/launch/image_object_server.launch.py @@ -0,0 +1,33 @@ +# Copyright 2018 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Launch face detection and rviz.""" + +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +import launch_ros.actions + + +def generate_launch_description(): + default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + 'image_object_server.yaml') + return LaunchDescription([ + # Openvino detection + launch_ros.actions.Node( + package='dynamic_vino_sample', node_executable='image_object_server', + arguments=['-config', default_yaml], + output='screen'), + ]) diff --git a/sample/launch/image_object_server_oss.launch.py b/sample/launch/image_object_server_oss.launch.py new file mode 100644 index 00000000..35d2edef --- /dev/null +++ b/sample/launch/image_object_server_oss.launch.py @@ -0,0 +1,33 @@ +# Copyright 2018 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Launch face detection and rviz.""" + +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +import launch_ros.actions + + +def generate_launch_description(): + default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + 'image_object_server_oss.yaml') + return LaunchDescription([ + # Openvino detection + launch_ros.actions.Node( + package='dynamic_vino_sample', node_executable='image_object_server', + arguments=['-config', default_yaml], + output='screen'), + ]) diff --git a/sample/launch/pipeline_image.launch.py b/sample/launch/pipeline_image.launch.py index 52dfaf02..27f9bdec 100644 --- a/sample/launch/pipeline_image.launch.py +++ b/sample/launch/pipeline_image.launch.py @@ -22,10 +22,12 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', 'pipeline_image.yaml'); - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', 'rviz/default.rviz'); + default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + 'pipeline_image.yaml') + default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + 'rviz/default.rviz') return LaunchDescription([ - #openvino detection + # Openvino detection launch_ros.actions.Node( package='dynamic_vino_sample', node_executable='pipeline_with_params', arguments=['-config', default_yaml], @@ -33,11 +35,12 @@ def generate_launch_description(): ('/openvino_toolkit/faces', '/ros2_openvino_toolkit/face_detection'), ('/openvino_toolkit/emotions', '/ros2_openvino_toolkit/emotions_recognition'), ('/openvino_toolkit/headposes', '/ros2_openvino_toolkit/headposes_estimation'), - ('/openvino_toolkit/age_genders', '/ros2_openvino_toolkit/age_genders_Recognition'), + ('/openvino_toolkit/age_genders', + '/ros2_openvino_toolkit/age_genders_Recognition'), ('/openvino_toolkit/images', '/ros2_openvino_toolkit/image_rviz')], output='screen'), - #rviz + # Rviz launch_ros.actions.Node( package='rviz2', node_executable='rviz2', output='screen', arguments=['--display-config', default_rviz]), diff --git a/sample/launch/pipeline_image_oss.launch.py b/sample/launch/pipeline_image_oss.launch.py index 7ca5cd87..f4bdb383 100644 --- a/sample/launch/pipeline_image_oss.launch.py +++ b/sample/launch/pipeline_image_oss.launch.py @@ -22,10 +22,12 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', 'pipeline_image_oss.yaml'); - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', 'rviz/default.rviz'); + default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + 'pipeline_image_oss.yaml') + default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + 'rviz/default.rviz') return LaunchDescription([ - #openvino detection + # Openvino detection launch_ros.actions.Node( package='dynamic_vino_sample', node_executable='pipeline_with_params', arguments=['-config', default_yaml], @@ -33,11 +35,12 @@ def generate_launch_description(): ('/openvino_toolkit/faces', '/ros2_openvino_toolkit/face_detection'), ('/openvino_toolkit/emotions', '/ros2_openvino_toolkit/emotions_recognition'), ('/openvino_toolkit/headposes', '/ros2_openvino_toolkit/headposes_estimation'), - ('/openvino_toolkit/age_genders', '/ros2_openvino_toolkit/age_genders_Recognition'), + ('/openvino_toolkit/age_genders', + '/ros2_openvino_toolkit/age_genders_Recognition'), ('/openvino_toolkit/images', '/ros2_openvino_toolkit/image_rviz')], output='screen'), - #rviz + # Rviz launch_ros.actions.Node( package='rviz2', node_executable='rviz2', output='screen', arguments=['--display-config', default_rviz]), diff --git a/sample/launch/pipeline_object.launch.py b/sample/launch/pipeline_object.launch.py index b8e3f43d..52c5a466 100644 --- a/sample/launch/pipeline_object.launch.py +++ b/sample/launch/pipeline_object.launch.py @@ -22,10 +22,12 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', 'pipeline_object.yaml'); - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', 'rviz/default.rviz'); + default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + 'pipeline_object.yaml') + default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + 'rviz/default.rviz') return LaunchDescription([ - #openvino detection + # Openvino detection launch_ros.actions.Node( package='dynamic_vino_sample', node_executable='pipeline_with_params', arguments=['-config', default_yaml], @@ -34,7 +36,7 @@ def generate_launch_description(): ('/openvino_toolkit/images', '/ros2_openvino_toolkit/image_rviz')], output='screen'), - #rviz + # Rviz launch_ros.actions.Node( package='rviz2', node_executable='rviz2', output='screen', arguments=['--display-config', default_rviz]), diff --git a/sample/launch/pipeline_object_oss.launch.py b/sample/launch/pipeline_object_oss.launch.py index 8519c298..509b4113 100644 --- a/sample/launch/pipeline_object_oss.launch.py +++ b/sample/launch/pipeline_object_oss.launch.py @@ -22,10 +22,12 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', 'pipeline_object_oss.yaml'); - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', 'rviz/default.rviz'); + default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + 'pipeline_object_oss.yaml') + default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + 'rviz/default.rviz') return LaunchDescription([ - #openvino detection + # Openvino detection launch_ros.actions.Node( package='dynamic_vino_sample', node_executable='pipeline_with_params', arguments=['-config', default_yaml], @@ -34,7 +36,7 @@ def generate_launch_description(): ('/openvino_toolkit/images', '/ros2_openvino_toolkit/image_rviz')], output='screen'), - #rviz + # Rviz launch_ros.actions.Node( package='rviz2', node_executable='rviz2', output='screen', arguments=['--display-config', default_rviz]), diff --git a/sample/launch/pipeline_object_oss_topic.launch.py b/sample/launch/pipeline_object_oss_topic.launch.py index 8158da7c..47557a31 100644 --- a/sample/launch/pipeline_object_oss_topic.launch.py +++ b/sample/launch/pipeline_object_oss_topic.launch.py @@ -22,12 +22,15 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', 'pipeline_object_oss_topic.yaml'); - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', 'rviz/default.rviz'); + default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + 'pipeline_object_oss_topic.yaml') + default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + 'rviz/default.rviz') return LaunchDescription([ # Realsense launch_ros.actions.Node( - package='realsense_ros2_camera', node_executable='realsense_ros2_camera',output='screen'), + package='realsense_ros2_camera', node_executable='realsense_ros2_camera', + output='screen'), # Openvino Detection launch_ros.actions.Node( package='dynamic_vino_sample', node_executable='pipeline_with_params', diff --git a/sample/launch/pipeline_object_topic.launch.py b/sample/launch/pipeline_object_topic.launch.py index 0da02d1d..cd8e9889 100644 --- a/sample/launch/pipeline_object_topic.launch.py +++ b/sample/launch/pipeline_object_topic.launch.py @@ -22,14 +22,17 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', 'pipeline_object_topic.yaml'); - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', 'rviz/default.rviz'); + default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + 'pipeline_object_topic.yaml') + default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + 'rviz/default.rviz') return LaunchDescription([ - #realsense + # Realsense launch_ros.actions.Node( - package='realsense_ros2_camera', node_executable='realsense_ros2_camera',output='screen'), + package='realsense_ros2_camera', node_executable='realsense_ros2_camera', + output='screen'), - #openvino detection + # Openvino detection launch_ros.actions.Node( package='dynamic_vino_sample', node_executable='pipeline_with_params', arguments=['-config', default_yaml], @@ -39,7 +42,7 @@ def generate_launch_description(): ('/openvino_toolkit/images', '/ros2_openvino_toolkit/image_rviz')], output='screen'), - #rviz + # Rviz launch_ros.actions.Node( package='rviz2', node_executable='rviz2', output='screen', arguments=['--display-config', default_rviz]), diff --git a/sample/launch/pipeline_people_myriad.launch.py b/sample/launch/pipeline_people_myriad.launch.py index 00b0e790..ad0ed619 100644 --- a/sample/launch/pipeline_people_myriad.launch.py +++ b/sample/launch/pipeline_people_myriad.launch.py @@ -22,10 +22,12 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', 'pipeline_people_myriad.yaml'); - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', 'rviz/default.rviz'); + default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + 'pipeline_people_myriad.yaml') + default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + 'rviz/default.rviz') return LaunchDescription([ - #openvino detection + # Openvino detection launch_ros.actions.Node( package='dynamic_vino_sample', node_executable='pipeline_with_params', arguments=['-config', default_yaml], @@ -33,11 +35,12 @@ def generate_launch_description(): ('/openvino_toolkit/faces', '/ros2_openvino_toolkit/face_detection'), ('/openvino_toolkit/emotions', '/ros2_openvino_toolkit/emotions_recognition'), ('/openvino_toolkit/headposes', '/ros2_openvino_toolkit/headposes_estimation'), - ('/openvino_toolkit/age_genders', '/ros2_openvino_toolkit/age_genders_Recognition'), + ('/openvino_toolkit/age_genders', + '/ros2_openvino_toolkit/age_genders_Recognition'), ('/openvino_toolkit/images', '/ros2_openvino_toolkit/image_rviz')], output='screen'), - #rviz + # Rviz launch_ros.actions.Node( package='rviz2', node_executable='rviz2', output='screen', arguments=['--display-config', default_rviz]), diff --git a/sample/launch/pipeline_people_oss.launch.py b/sample/launch/pipeline_people_oss.launch.py index 6f8b3d9d..79afc1ec 100644 --- a/sample/launch/pipeline_people_oss.launch.py +++ b/sample/launch/pipeline_people_oss.launch.py @@ -22,10 +22,12 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', 'pipeline_people_oss.yaml'); - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', 'rviz/default.rviz'); + default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + 'pipeline_people_oss.yaml') + default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + 'rviz/default.rviz') return LaunchDescription([ - #openvino detection + # Openvino detection launch_ros.actions.Node( package='dynamic_vino_sample', node_executable='pipeline_with_params', arguments=['-config', default_yaml], @@ -33,11 +35,12 @@ def generate_launch_description(): ('/openvino_toolkit/faces', '/ros2_openvino_toolkit/face_detection'), ('/openvino_toolkit/emotions', '/ros2_openvino_toolkit/emotions_recognition'), ('/openvino_toolkit/headposes', '/ros2_openvino_toolkit/headposes_estimation'), - ('/openvino_toolkit/age_genders', '/ros2_openvino_toolkit/age_genders_Recognition'), + ('/openvino_toolkit/age_genders', + '/ros2_openvino_toolkit/age_genders_Recognition'), ('/openvino_toolkit/images', '/ros2_openvino_toolkit/image_rviz')], output='screen'), - #rviz + # Rviz launch_ros.actions.Node( package='rviz2', node_executable='rviz2', output='screen', arguments=['--display-config', default_rviz]), diff --git a/sample/launch/pipeline_segmentation.launch.py b/sample/launch/pipeline_segmentation.launch.py index a779a664..9aaddc38 100644 --- a/sample/launch/pipeline_segmentation.launch.py +++ b/sample/launch/pipeline_segmentation.launch.py @@ -22,24 +22,28 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', 'pipeline_segmentation.yaml'); - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', 'rviz/default.rviz'); + default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + 'pipeline_segmentation.yaml') + default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + 'rviz/default.rviz') return LaunchDescription([ - #realsense + # Realsense launch_ros.actions.Node( - package='realsense_ros2_camera', node_executable='realsense_ros2_camera',output='screen'), + package='realsense_ros2_camera', node_executable='realsense_ros2_camera', + output='screen'), - #openvino detection + # Openvino detection launch_ros.actions.Node( package='dynamic_vino_sample', node_executable='pipeline_with_params', arguments=['-config', default_yaml], remappings=[ ('/openvino_toolkit/image_raw', '/camera/color/image_raw'), - ('/openvino_toolkit/segmented_obejcts', '/ros2_openvino_toolkit/segmented_obejcts'), + ('/openvino_toolkit/segmented_obejcts', + '/ros2_openvino_toolkit/segmented_obejcts'), ('/openvino_toolkit/images', '/ros2_openvino_toolkit/image_rviz')], output='screen'), - #rviz + # Rviz launch_ros.actions.Node( package='rviz2', node_executable='rviz2', output='screen', arguments=['--display-config', default_rviz]), diff --git a/sample/launch/pipeline_video.launch.py b/sample/launch/pipeline_video.launch.py index e031693e..5db5ccec 100644 --- a/sample/launch/pipeline_video.launch.py +++ b/sample/launch/pipeline_video.launch.py @@ -22,19 +22,22 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', 'pipeline_video.yaml'); - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', 'rviz/default.rviz'); + default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + 'pipeline_video.yaml') + default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + 'rviz/default.rviz') return LaunchDescription([ - #openvino detection + # Openvino detection launch_ros.actions.Node( package='dynamic_vino_sample', node_executable='pipeline_with_params', arguments=['-config', default_yaml], remappings=[ - ('/openvino_toolkit/segmented_obejcts', '/ros2_openvino_toolkit/segmented_obejcts'), + ('/openvino_toolkit/segmented_obejcts', + '/ros2_openvino_toolkit/segmented_obejcts'), ('/openvino_toolkit/images', '/ros2_openvino_toolkit/image_rviz')], output='screen'), - #rviz + # Rviz launch_ros.actions.Node( package='rviz2', node_executable='rviz2', output='screen', arguments=['--display-config', default_rviz]), diff --git a/sample/package.xml b/sample/package.xml index d2e139b5..5a734053 100644 --- a/sample/package.xml +++ b/sample/package.xml @@ -13,13 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. --> - + dynamic_vino_sample 0.3.0 a ROS2 wrapper package for Intel OpenVINO - Weizhi Liu - Chao Li - Hongkun Chen Weizhi Liu Chao Li Apache License 2.0 @@ -56,6 +53,7 @@ limitations under the License. ament_lint_auto ament_lint_common + ament_cmake_gtest ament_cmake diff --git a/sample/param/image_object_server.yaml b/sample/param/image_object_server.yaml index aacd8e67..ace5d1c6 100644 --- a/sample/param/image_object_server.yaml +++ b/sample/param/image_object_server.yaml @@ -3,7 +3,7 @@ Pipelines: inputs: [Image] infers: - name: ObjectDetection - model: /opt/intel/computer_vision_sdk/deployment_tools/intel_models/person-vehicle-bike-detection-crossroad-0078/FP32/person-vehicle-bike-detection-crossroad-0078.xml + model: /opt/intel/computer_vision_sdk/deployment_tools/model_downloader/object_detection/common/ssd/300/caffe/output/ssd300.xml engine: CPU label: to/be/set/xxx.labels batch: 16 diff --git a/sample/src/image_object_client.cpp b/sample/src/image_object_client.cpp index e3863e8d..e97ab81a 100644 --- a/sample/src/image_object_client.cpp +++ b/sample/src/image_object_client.cpp @@ -1,39 +1,39 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. -#include "dynamic_vino_lib/services/frame_processing_server.hpp" #include #include #include #include +#include +#include + +#include "dynamic_vino_lib/services/frame_processing_server.hpp" -int main(int argc, char ** argv) { +int main(int argc, char ** argv) +{ rclcpp::init(argc, argv); auto node = rclcpp::Node::make_shared("service_example"); if (argc != 2) { - RCLCPP_INFO(node->get_logger(), - "Usage: ros2 run dynamic_vino_sample image_object_client" + RCLCPP_INFO(node->get_logger(), "Usage: ros2 run dynamic_vino_sample image_object_client" ""); return -1; } std::string image_path = argv[1]; - auto client = - node->create_client("/detect_object"); + auto client = node->create_client("/detect_object"); auto request = std::make_shared(); request->image_path = image_path; @@ -47,7 +47,8 @@ int main(int argc, char ** argv) { } auto result = client->async_send_request(request); - if (rclcpp::spin_until_future_complete(node, result) == rclcpp::executor::FutureReturnCode::SUCCESS) + if (rclcpp::spin_until_future_complete(node, result) == + rclcpp::executor::FutureReturnCode::SUCCESS) { auto srv = result.get(); diff --git a/sample/src/image_object_server.cpp b/sample/src/image_object_server.cpp index a6aae111..2ea5fa7b 100644 --- a/sample/src/image_object_server.cpp +++ b/sample/src/image_object_server.cpp @@ -1,23 +1,22 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #include #include #include #include +#include #include "dynamic_vino_lib/pipeline_manager.hpp" #include "dynamic_vino_lib/services/frame_processing_server.hpp" @@ -28,23 +27,44 @@ #include "gflags/gflags.h" #include "inference_engine.hpp" #include "extension/ext_list.hpp" +#include "utility.hpp" + +bool parseAndCheckCommandLine(int argc, char ** argv) +{ + // -----Parsing and validation of input args--------------------------- + gflags::ParseCommandLineNonHelpFlags(&argc, &argv, true); + if (FLAGS_h) { + showUsageForParam(); + return false; + } + + return true; +} + +std::string getConfigPath(int argc, char * argv[]) +{ + if (parseAndCheckCommandLine(argc, argv)) { + if (!FLAGS_config.empty()) { + return FLAGS_config; + } + } -std::string getConfigPath(int argc, char * argv[]) { std::string content; std::string prefix_path; - ament_index_cpp::get_resource("packages", "dynamic_vino_sample", content, - &prefix_path); - //return prefix_path + "/share/dynamic_vino_sample/param/image_object_server.yaml"; - return prefix_path + "/share/dynamic_vino_sample/param/" + argv[1]; + ament_index_cpp::get_resource("packages", "dynamic_vino_sample", content, &prefix_path); + // slog::info << "prefix_path=" << prefix_path << slog::endl; + return prefix_path + "/share/dynamic_vino_sample/param/image_object_server.yaml"; } -int main(int argc, char ** argv) { +int main(int argc, char ** argv) +{ rclcpp::init(argc, argv); std::string config_path = getConfigPath(argc, argv); try { - auto node = std::make_shared("frame_processing_server", config_path); + auto node = std::make_shared("frame_processing_server", + config_path); rclcpp::spin(node); } catch (...) { std::cout << "[ERROR] [frame_processing_server]: " << diff --git a/sample/src/image_people_client.cpp b/sample/src/image_people_client.cpp index c45a7599..604f37a3 100644 --- a/sample/src/image_people_client.cpp +++ b/sample/src/image_people_client.cpp @@ -1,47 +1,47 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. -#include "dynamic_vino_lib/services/frame_processing_server.hpp" #include #include #include +#include +#include + +#include "dynamic_vino_lib/services/frame_processing_server.hpp" -int main(int argc, char ** argv) { +int main(int argc, char ** argv) +{ rclcpp::init(argc, argv); std::cout << "object client" << std::endl; auto node = rclcpp::Node::make_shared("service_example"); - + std::string image_path = "/home/intel/Pictures/timg.jpeg"; std::cout << "******0000" << std::endl; - auto client = - node->create_client("/detect_face"); + auto client = node->create_client("/detect_face"); std::cout << "******face client created" << std::endl; auto request = std::make_shared(); std::cout << "******face request created" << std::endl; - auto client = - node->create_client("/detect_face"); + auto client = node->create_client("/detect_face"); std::cout << "******face client created" << std::endl; auto request = std::make_shared(); std::cout << "******face request created" << std::endl; - //request->image_path = argv[1]; + // request->image_path = argv[1]; request->image_path = image_path; while (!client->wait_for_service(std::chrono::seconds(1))) { @@ -53,16 +53,16 @@ int main(int argc, char ** argv) { } auto result = client->async_send_request(request); - + std::cout << "******result got" << std::endl; - if (rclcpp::spin_until_future_complete(node, result) == rclcpp::executor::FutureReturnCode::SUCCESS) + if (rclcpp::spin_until_future_complete(node, result) == + rclcpp::executor::FutureReturnCode::SUCCESS) { auto srv = result.get(); + std::cout << "******srv got" << std::endl; - std::cout << "******srv got" << std::endl; - - std::cout << "***********" << srv->objects.inference_time_ms << std::endl; + std::cout << "***********" << srv->objects.inference_time_ms << std::endl; } } diff --git a/sample/src/image_server.cpp b/sample/src/image_server.cpp index 500f08d4..b1f7a346 100644 --- a/sample/src/image_server.cpp +++ b/sample/src/image_server.cpp @@ -1,26 +1,24 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "dynamic_vino_lib/services/frame_processing_server.hpp" +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #include #include #include #include +#include +#include "dynamic_vino_lib/services/frame_processing_server.hpp" #include "dynamic_vino_lib/pipeline_manager.hpp" #include "dynamic_vino_lib/pipeline.hpp" #include "dynamic_vino_lib/slog.hpp" @@ -30,22 +28,24 @@ #include "inference_engine.hpp" #include "extension/ext_list.hpp" -std::string getConfigPath(int argc, char * argv[]) { +std::string getConfigPath(int argc, char * argv[]) +{ std::string content; std::string prefix_path; - ament_index_cpp::get_resource("packages", "dynamic_vino_sample", content, - &prefix_path); + ament_index_cpp::get_resource("packages", "dynamic_vino_sample", content, &prefix_path); return prefix_path + "/share/dynamic_vino_sample/param/image_object_server.yaml"; } -int main(int argc, char ** argv) { +int main(int argc, char ** argv) +{ rclcpp::init(argc, argv); std::string config_path = getConfigPath(argc, argv); std::cout << "***config path is " << config_path << std::endl; try { - auto node = std::make_shared("frame_processing_server", config_path); + auto node = std::make_shared("frame_processing_server", + config_path); rclcpp::spin(node); } catch (...) { std::cout << "[ERROR] [frame_processing_server]: " << diff --git a/sample/src/main.cpp b/sample/src/main.cpp deleted file mode 100644 index c8478a1c..00000000 --- a/sample/src/main.cpp +++ /dev/null @@ -1,260 +0,0 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** -* \brief A sample for this library. This sample performs face detection, - * emotions detection, age gender detection and head pose estimation. -* \file sample/main.cpp -*/ - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "dynamic_vino_lib/common.hpp" -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/factory.hpp" -#include "dynamic_vino_lib/inferences/age_gender_detection.hpp" -#include "dynamic_vino_lib/inferences/base_inference.hpp" -#include "dynamic_vino_lib/inferences/emotions_detection.hpp" -#include "dynamic_vino_lib/inferences/face_detection.hpp" -#include "dynamic_vino_lib/inferences/head_pose_detection.hpp" -#include "dynamic_vino_lib/inputs/realsense_camera_topic.hpp" -#include "dynamic_vino_lib/outputs/image_window_output.hpp" -#include "dynamic_vino_lib/outputs/ros_topic_output.hpp" -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/slog.hpp" -#include "extension/ext_list.hpp" -#include "gflags/gflags.h" -#include "inference_engine.hpp" -#include "librealsense2/rs.hpp" -#include "opencv2/opencv.hpp" -#include "utility.hpp" - -using namespace InferenceEngine; -using namespace rs2; - -bool parseAndCheckCommandLine(int argc, char** argv) { - // ---------------------------Parsing and validation of input - // args----------------------------- - gflags::ParseCommandLineNonHelpFlags(&argc, &argv, true); - if (FLAGS_h) { - showUsage(); - return false; - } - slog::info << "Parsing input parameters" << slog::endl; - if (FLAGS_i.empty()) { - throw std::logic_error("Parameter -i is not set"); - } - if (!FLAGS_i.compare("Video") && FLAGS_i_path.empty()) { - throw std::logic_error("Parameter -i_path is not set"); - } - if (!FLAGS_i.compare("Image") && FLAGS_i_path.empty()) { - throw std::logic_error("Parameter -i_path is not set"); - } - if (FLAGS_m.empty()) { - throw std::logic_error("Parameter -m is not set"); - } - if (FLAGS_n_ag < 1) { - throw std::logic_error("Parameter -n_ag cannot be 0"); - } - if (FLAGS_n_hp < 1) { - throw std::logic_error("Parameter -n_hp cannot be 0"); - } - return true; -} - -void thread_op(std::shared_ptr node, bool& enable) { - slog::info << "In Input Spin Thread (enable: " << enable << ")" << slog::endl; - while (enable) { - // slog::info << "."; - rclcpp::spin_some(node); - } -} - -int main(int argc, char* argv[]) { - rclcpp::init(argc, argv); - // std::shared_ptr node = - // rclcpp::Node::make_shared("dynamic_vino_sample"); - - try { - std::cout << "InferenceEngine: " << GetInferenceEngineVersion() - << std::endl; - - // ------------------------------ Parsing and validation of input args - // ------------------------- - if (!parseAndCheckCommandLine(argc, argv)) { - return 0; - } - - // --------------------------- 1. Load Plugin for inference engine - // ----------------------------- - std::map plugins_for_devices; - std::vector> cmd_options = { - {FLAGS_d, FLAGS_m}, - {FLAGS_d_ag, FLAGS_m_ag}, - {FLAGS_d_hp, FLAGS_m_hp}, - {FLAGS_d_em, FLAGS_m_em}}; - slog::info << "device_FACE:" << FLAGS_d << slog::endl; - slog::info << "model_FACE:" << FLAGS_m << slog::endl; - slog::info << "device_AG:" << FLAGS_d_ag << slog::endl; - slog::info << "model_AG:" << FLAGS_m_ag << slog::endl; - slog::info << "model_HeadPose:" << FLAGS_m_hp << slog::endl; - slog::info << "device_HeadPose:" << FLAGS_d_hp << slog::endl; - - for (auto&& option : cmd_options) { - auto device_name = option.first; - auto network_name = option.second; - if (device_name.empty() || network_name.empty()) { - continue; - } - if (plugins_for_devices.find(device_name) != plugins_for_devices.end()) { - continue; - } - plugins_for_devices[device_name] = - *Factory::makePluginByName(device_name, FLAGS_l, FLAGS_c, FLAGS_pc); - } - - // --------------------------- 2. Generate Input Device and Output - // Device----------------------- - slog::info << "Reading input" << slog::endl; - auto input_ptr = Factory::makeInputDeviceByName(FLAGS_i, FLAGS_i_path); - // auto input_ptr = std::make_shared(); - - // add node handler to input device instance - // input_ptr->setHandler(node); - if (!input_ptr->initialize()) { - throw std::logic_error("Cannot open input file or camera: " + FLAGS_i); - } - std::string window_name = "Results"; - auto output_ptr = std::make_shared(window_name); - - // --------------------------- 3. Generate Inference - // Instance----------------------------------- - // generate face detection inference - auto face_detection_model = - std::make_shared(FLAGS_m, 1, 1, 1); - face_detection_model->modelInit(); - auto face_detection_engine = std::make_shared( - plugins_for_devices[FLAGS_d], face_detection_model); - auto face_inference_ptr = - std::make_shared(FLAGS_t); - face_inference_ptr->loadNetwork(face_detection_model); - face_inference_ptr->loadEngine(face_detection_engine); - - // generate emotions detection inference - auto emotions_detection_model = - std::make_shared(FLAGS_m_em, 1, 1, 16); - emotions_detection_model->modelInit(); - auto emotions_detection_engine = std::make_shared( - plugins_for_devices[FLAGS_d_em], emotions_detection_model); - auto emotions_inference_ptr = - std::make_shared(); - emotions_inference_ptr->loadNetwork(emotions_detection_model); - emotions_inference_ptr->loadEngine(emotions_detection_engine); - - // generate age gender detection inference - auto agegender_detection_model = - std::make_shared(FLAGS_m_ag, 1, 2, 16); - agegender_detection_model->modelInit(); - auto agegender_detection_engine = std::make_shared( - plugins_for_devices[FLAGS_d_ag], agegender_detection_model); - auto agegender_inference_ptr = - std::make_shared(); - agegender_inference_ptr->loadNetwork(agegender_detection_model); - agegender_inference_ptr->loadEngine(agegender_detection_engine); - - // generate head pose estimation inference - - auto headpose_detection_network = - std::make_shared(FLAGS_m_hp, 1, 3, 16); - headpose_detection_network->modelInit(); - auto headpose_detection_engine = std::make_shared( - plugins_for_devices[FLAGS_d_hp], headpose_detection_network); - auto headpose_inference_ptr = - std::make_shared(); - headpose_inference_ptr->loadNetwork(headpose_detection_network); - headpose_inference_ptr->loadEngine(headpose_detection_engine); - - // --------------------------- 4. Build Pipeline - // -------------------------------------- - Pipeline pipe; - // pipe.add("video_input", std::move(input_ptr)); - pipe.add("video_input", input_ptr); - pipe.add("video_input", "face_detection", face_inference_ptr); - pipe.add("face_detection", "emotions_detection", emotions_inference_ptr); - pipe.add("face_detection", "age_gender_detection", agegender_inference_ptr); - pipe.add("face_detection", "headpose_detection", headpose_inference_ptr); - pipe.add("emotions_detection", "video_output", output_ptr); - pipe.add("age_gender_detection", "video_output", output_ptr); - pipe.add("headpose_detection", "video_output", output_ptr); - pipe.add("face_detection", "video_output", output_ptr); - auto ros_topic_output_ptr = std::make_shared(); - pipe.add("face_detection", "ros_output", ros_topic_output_ptr); - pipe.add("emotions_detection", "ros_output", ros_topic_output_ptr); - pipe.add("age_gender_detection", "ros_output", ros_topic_output_ptr); - pipe.add("headpose_detection", "ros_output", ros_topic_output_ptr); - pipe.setCallback(); - pipe.printPipeline(); - - // auto node = rclcpp::Node::make_shared("sample"); - // bool enable = true; - // std::thread input_spin(thread_op, input_ptr->getHandler(), - // std::ref(enable)); - - slog::info << "Wait 3s before entering data process..." << slog::endl; - sleep(3); - slog::info << "DONE!" << slog::endl; - - // --------------------------- 5. Run Pipeline - // ----------------------------------------- - auto node = input_ptr->getHandler(); - while (cv::waitKey(1) < 0 && cvGetWindowHandle(window_name.c_str())) { - if (node != nullptr) { - rclcpp::spin_some(node); - } - pipe.runOnce(); - if (!FLAGS_i.compare("Image")) { - cv::waitKey(0); - } - } - - // enable = false; - // input_spin.join(); - - slog::info << "Execution successful" << slog::endl; - return 0; - } catch (const std::exception& error) { - slog::err << error.what() << slog::endl; - return 1; - } catch (...) { - slog::err << "Unknown/internal exception happened." << slog::endl; - return 1; - } -} diff --git a/sample/src/object_detection_with_params.cpp b/sample/src/object_detection_with_params.cpp deleted file mode 100644 index dcc77316..00000000 --- a/sample/src/object_detection_with_params.cpp +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** -* \brief A sample for this library. This sample performs face detection, - * emotions detection, age gender detection and head pose estimation. -* \file sample/main.cpp -*/ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include "dynamic_vino_lib/common.hpp" -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/factory.hpp" -#include "dynamic_vino_lib/inferences/age_gender_detection.hpp" -#include "dynamic_vino_lib/inferences/base_inference.hpp" -#include "dynamic_vino_lib/inferences/emotions_detection.hpp" -#include "dynamic_vino_lib/inferences/face_detection.hpp" -#include "dynamic_vino_lib/inferences/head_pose_detection.hpp" -#include "dynamic_vino_lib/inputs/realsense_camera_topic.hpp" -#include "dynamic_vino_lib/outputs/image_window_output.hpp" -#include "dynamic_vino_lib/outputs/ros_topic_output.hpp" -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/slog.hpp" -#include "extension/ext_list.hpp" -#include "gflags/gflags.h" -#include "inference_engine.hpp" -#include "librealsense2/rs.hpp" -#include "opencv2/opencv.hpp" -#include "utility.hpp" -using namespace InferenceEngine; -using namespace rs2; -bool parseAndCheckCommandLine(int argc, char** argv) { - // ---------------------------Parsing and validation of input - // args----------------------------- - gflags::ParseCommandLineNonHelpFlags(&argc, &argv, true); - if (FLAGS_h) { - showUsageForObjectDetection(); - return false; - } -/* return true; - slog::info << "Parsing input parameters" << slog::endl; - if (FLAGS_i.empty()) { - throw std::logic_error("Parameter -i is not set"); - } - - if (FLAGS_m.empty()) { - throw std::logic_error("Parameter -m is not set"); - } - */ - return true; -} -int main(int argc, char* argv[]) { - rclcpp::init(argc, argv); - std::string content; - std::string prefix_path; - ament_index_cpp::get_resource("packages", "dynamic_vino_sample", content, - &prefix_path); - slog::info << "prefix_path=" << prefix_path << slog::endl; - - try { - std::cout << "InferenceEngine: " << GetInferenceEngineVersion() - << std::endl; - // ------------------------------ Parsing and validation of input args - // ------------------------- - if (!parseAndCheckCommandLine(argc, argv)) { - return 0; - } - if (FLAGS_config.empty()) { - FLAGS_config = - prefix_path + "/share/dynamic_vino_sample/param/pipeline_object.yaml"; - } - Params::ParamManager::getInstance().parse(FLAGS_config); - Params::ParamManager::getInstance().print(); - auto pipelines = Params::ParamManager::getInstance().getPipelines(); - if (pipelines.size() < 1) { - throw std::logic_error("Pipeline parameters should be set!"); - } - - FLAGS_i = pipelines[0].inputs[0]; - FLAGS_m = pipelines[0].infers[0].model; - - // ----------- 1. Load Plugin for inference engine - std::unique_ptr plugin = Factory::makePluginByName( - FLAGS_d, FLAGS_l, FLAGS_c, FLAGS_pc); - - // --------------------------- 2. Generate Input Device and Output - // Device----------------------- - slog::info << "Reading input" << slog::endl; - auto input_ptr = Factory::makeInputDeviceByName(FLAGS_i); - - // add node handler to input device instance - // input_ptr->setHandler(node); - if (!input_ptr->initialize()) { - throw std::logic_error("Cannot open input file or camera: " + FLAGS_i); - } - std::string window_name = "Results"; - auto output_ptr = std::make_shared(window_name); - // --------------------------- 3. Generate Inference - // Instance----------------------------------- - // generate face detection inference - auto model = - std::make_shared(FLAGS_m, 1, 1, 1); - model->modelInit(); - auto engine = std::make_shared(*plugin, model); - auto object_detection_ptr = - std::make_shared(FLAGS_t); - object_detection_ptr->loadNetwork(model); - object_detection_ptr->loadEngine(engine); - slog::info << "Pass Inference Prep." << slog::endl; - // ------- 4. Build Pipeline ------------- - Pipeline pipe("object"); - auto pipeline_params = Params::ParamManager::getInstance().getPipeline("object"); - pipe.setParams(pipeline_params); - pipe.add("video_input", input_ptr); - pipe.add("video_input", "object_detection", object_detection_ptr); - pipe.add("object_detection", "video_output", output_ptr); - auto ros_topic_output_ptr = std::make_shared(); - pipe.add("object_detection", "ros_output", ros_topic_output_ptr); - pipe.setCallback(); - pipe.printPipeline(); - - slog::info << "Pass Pipeline Init." << slog::endl; - - // ------- 5. Run Pipeline ----------- - auto node = input_ptr->getHandler(); - while (cv::waitKey(1) < 0 && cvGetWindowHandle(window_name.c_str())) { - if (node != nullptr) { - rclcpp::spin_some(node); - } - pipe.runOnce(); - if (!FLAGS_i.compare("Image")) { - cv::waitKey(0); - } - } - // enable = false; - // input_spin.join(); - slog::info << "Execution successful" << slog::endl; - return 0; - } catch (const std::exception& error) { - slog::err << error.what() << slog::endl; - return 1; - } catch (...) { - slog::err << "Unknown/internal exception happened." << slog::endl; - return 1; - } -} diff --git a/sample/src/parameters.cpp b/sample/src/parameters.cpp index 669a8469..e7f8ff21 100644 --- a/sample/src/parameters.cpp +++ b/sample/src/parameters.cpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * \brief A sample for vino_param_manager library. This sample performs @@ -21,8 +19,9 @@ * \file sample/parameters.cpp */ -#include +#include #include +#include #include #include #include @@ -31,10 +30,10 @@ #include #include #include -#include #include "utility.hpp" -bool parseAndCheckCommandLine(int argc, char** argv) { +bool parseAndCheckCommandLine(int argc, char ** argv) +{ // ------Parsing and validation of inpuu args---------------------- gflags::ParseCommandLineNonHelpFlags(&argc, &argv, true); if (FLAGS_h) { @@ -50,7 +49,8 @@ bool parseAndCheckCommandLine(int argc, char** argv) { return true; } -int main(int argc, char* argv[]) { +int main(int argc, char * argv[]) +{ try { // ------Parsing and validation of input args--------- if (!parseAndCheckCommandLine(argc, argv)) { @@ -62,8 +62,7 @@ int main(int argc, char* argv[]) { slog::info << "print again, should same as above....." << slog::endl; Params::ParamManager::getInstance().print(); - - } catch (const std::exception& error) { + } catch (const std::exception & error) { slog::err << error.what() << slog::endl; return 1; } catch (...) { diff --git a/sample/src/pipeline_with_params.cpp b/sample/src/pipeline_with_params.cpp index 4a09dbd7..f5d1b4bc 100644 --- a/sample/src/pipeline_with_params.cpp +++ b/sample/src/pipeline_with_params.cpp @@ -1,18 +1,16 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * \brief A sample for this library. This sample performs face detection, @@ -20,6 +18,9 @@ * \file sample/main.cpp */ +#include +#include +#include #include #include #include @@ -31,14 +32,11 @@ #include #include #include -#include #include #include #include #include -#include -#include #include "dynamic_vino_lib/pipeline.hpp" #include "dynamic_vino_lib/pipeline_manager.hpp" #include "dynamic_vino_lib/slog.hpp" @@ -49,12 +47,9 @@ #include "opencv2/opencv.hpp" #include "utility.hpp" -using namespace InferenceEngine; -using namespace rs2; - -void signalHandler(int signum) { - slog::warn << "!!!!!!!!!!!Interrupt signal (" << signum - << ") received!!!!!!!!!!!!" << slog::endl; +void signalHandler(int signum) +{ + slog::warn << "!!!!!!!!!!!Interrupt signal (" << signum << ") received!!!!!!!!!!!!" << slog::endl; // cleanup and close up stuff here // terminate program @@ -62,7 +57,8 @@ void signalHandler(int signum) { // exit(signum); } -bool parseAndCheckCommandLine(int argc, char** argv) { +bool parseAndCheckCommandLine(int argc, char ** argv) +{ // -----Parsing and validation of input args--------------------------- gflags::ParseCommandLineNonHelpFlags(&argc, &argv, true); if (FLAGS_h) { @@ -73,7 +69,8 @@ bool parseAndCheckCommandLine(int argc, char** argv) { return true; } -std::string getConfigPath(int argc, char* argv[]) { +std::string getConfigPath(int argc, char * argv[]) +{ if (parseAndCheckCommandLine(argc, argv)) { if (!FLAGS_config.empty()) { return FLAGS_config; @@ -82,21 +79,20 @@ std::string getConfigPath(int argc, char* argv[]) { std::string content; std::string prefix_path; - ament_index_cpp::get_resource("packages", "dynamic_vino_sample", content, - &prefix_path); + ament_index_cpp::get_resource("packages", "dynamic_vino_sample", content, &prefix_path); // slog::info << "prefix_path=" << prefix_path << slog::endl; return prefix_path + "/share/dynamic_vino_sample/param/pipeline_people.yaml"; } -int main(int argc, char* argv[]) { +int main(int argc, char * argv[]) +{ rclcpp::init(argc, argv); // register signal SIGINT and signal handler signal(SIGINT, signalHandler); try { - std::cout << "InferenceEngine: " << GetInferenceEngineVersion() - << std::endl; + std::cout << "InferenceEngine: " << InferenceEngine::GetInferenceEngineVersion() << std::endl; // ----- Parsing and validation of input args----------------------- @@ -111,14 +107,13 @@ int main(int argc, char* argv[]) { throw std::logic_error("Pipeline parameters should be set!"); } // auto createPipeline = PipelineManager::getInstance().createPipeline; - for (auto& p : pipelines) { + for (auto & p : pipelines) { PipelineManager::getInstance().createPipeline(p); } PipelineManager::getInstance().runAll(); PipelineManager::getInstance().joinAll(); - - } catch (const std::exception& error) { + } catch (const std::exception & error) { slog::err << error.what() << slog::endl; return 1; } catch (...) { diff --git a/sample/tests/lib/unittest_createPipelineCheck.cpp b/sample/tests/lib/unittest_createPipelineCheck.cpp new file mode 100644 index 00000000..efe0a64c --- /dev/null +++ b/sample/tests/lib/unittest_createPipelineCheck.cpp @@ -0,0 +1,73 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dynamic_vino_lib/pipeline.hpp" +#include "dynamic_vino_lib/pipeline_manager.hpp" +#include "dynamic_vino_lib/slog.hpp" +#include "extension/ext_list.hpp" +#include "gflags/gflags.h" +#include "inference_engine.hpp" +#include "librealsense2/rs.hpp" +#include "opencv2/opencv.hpp" +#include "utility.hpp" + +std::string getConfigPath() +{ + std::string content; + std::string prefix_path; + ament_index_cpp::get_resource("packages", "dynamic_vino_sample", content, &prefix_path); + return prefix_path + "/share/dynamic_vino_sample/testParam/pipeline_face_test.yaml"; +} + +TEST(UnitTestCheckPipeline, testPipeline) +{ + std::string config_file = getConfigPath(); + EXPECT_TRUE(std::ifstream(config_file).is_open()); + ASSERT_NO_THROW({ + Params::ParamManager::getInstance().parse(config_file); + auto pipelines = Params::ParamManager::getInstance().getPipelines(); + EXPECT_GT(pipelines.size(), 0); + + for (auto & p : pipelines) { + PipelineManager::getInstance().createPipeline(p); + } + }); +} + +int main(int argc, char * argv[]) +{ + testing::InitGoogleTest(&argc, argv); + rclcpp::init(argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/sample/tests/service/unittest_objectService.cpp b/sample/tests/service/unittest_objectService.cpp new file mode 100644 index 00000000..e020e1f2 --- /dev/null +++ b/sample/tests/service/unittest_objectService.cpp @@ -0,0 +1,86 @@ +// Copyright (c) 2017 Intel Corporation. All Rights Reserved +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dynamic_vino_lib/services/frame_processing_server.hpp" + +std::string generate_file_path(std::string path) +{ + std::string base_path = __FILE__; + const std::string filename = "sample/tests/service/unittest_objectService.cpp"; + base_path = base_path.substr(0, base_path.length() - filename.length() - 1); + return base_path + "/" + path; +} + +TEST(UnitTestObject, testObject) +{ + auto node = rclcpp::Node::make_shared("openvino_object_service_test"); + + auto client = node->create_client("detect_object"); + auto request = std::make_shared(); + + std::string buffer = generate_file_path("data/images/car_vihecle.png"); + std::cout << buffer << std::endl; + request->image_path = buffer; + + if (!client->wait_for_service(std::chrono::seconds(20))) { + ASSERT_TRUE(false) << "service not available after waiting"; + } + + auto result = client->async_send_request(request); + + auto ret = rclcpp::spin_until_future_complete(node, result, std::chrono::seconds(5)); + EXPECT_EQ(ret, rclcpp::executor::FutureReturnCode::SUCCESS); + + auto srv = result.get(); + + EXPECT_TRUE(srv->objects.objects_vector.size()); + + for (unsigned int i = 0; i < srv->objects.objects_vector.size(); i++) { + EXPECT_EQ(srv->objects.objects_vector[i].object.object_name, "Car"); + } + + EXPECT_TRUE(srv->objects.objects_vector[0].roi.x_offset > 1100 && + srv->objects.objects_vector[0].roi.x_offset < 1795 && + srv->objects.objects_vector[0].roi.y_offset > 215 && + srv->objects.objects_vector[0].roi.y_offset < 480); + EXPECT_TRUE(srv->objects.objects_vector[1].roi.x_offset > 310 && + srv->objects.objects_vector[1].roi.x_offset < 785 && + srv->objects.objects_vector[1].roi.y_offset > 225 && + srv->objects.objects_vector[1].roi.y_offset < 460); + EXPECT_TRUE(srv->objects.objects_vector[2].roi.x_offset > 195 && + srv->objects.objects_vector[2].roi.x_offset < 405 && + srv->objects.objects_vector[2].roi.y_offset > 220 && + srv->objects.objects_vector[2].roi.y_offset < 345); +} + +int main(int argc, char ** argv) +{ + rclcpp::init(argc, argv); + testing::InitGoogleTest(&argc, argv); + system("ros2 launch dynamic_vino_sample image_object_service_test.launch.py &"); + int ret = RUN_ALL_TESTS(); + system("killall -s SIGINT image_object_server &"); + rclcpp::shutdown(); + return ret; +} diff --git a/sample/tests/testParam/image_object_service_test.launch.py b/sample/tests/testParam/image_object_service_test.launch.py new file mode 100644 index 00000000..df75f103 --- /dev/null +++ b/sample/tests/testParam/image_object_service_test.launch.py @@ -0,0 +1,33 @@ +# Copyright 2018 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Launch face detection and rviz.""" + +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +import launch_ros.actions + + +def generate_launch_description(): + default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'testParam', + 'image_object_service_test.yaml') + return LaunchDescription([ + # Openvino detection + launch_ros.actions.Node( + package='dynamic_vino_sample', node_executable='image_object_server', + arguments=['-config', default_yaml], + output='screen'), + ]) diff --git a/sample/tests/testParam/image_object_service_test.yaml b/sample/tests/testParam/image_object_service_test.yaml new file mode 100644 index 00000000..229bf2da --- /dev/null +++ b/sample/tests/testParam/image_object_service_test.yaml @@ -0,0 +1,19 @@ +Pipelines: +- name: object + inputs: [Image] + infers: + - name: ObjectDetection + model: /opt/openvino_toolkit/open_model_zoo/model_downloader/object_detection/common/ssd/300/caffe/output/ssd300.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 16 + outputs: [RosService] + confidence_threshold: 0.2 + connects: + - left: Image + right: [ObjectDetection] + - left: ObjectDetection + right: [RosService] + input_path: "/home/intel/Pictures/car.png" + +Common: diff --git a/sample/tests/testParam/pipeline_face_test.launch.py b/sample/tests/testParam/pipeline_face_test.launch.py new file mode 100644 index 00000000..67f2916e --- /dev/null +++ b/sample/tests/testParam/pipeline_face_test.launch.py @@ -0,0 +1,40 @@ +# Copyright 2018 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Launch face detection and rviz.""" + +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +import launch_ros.actions + + +def generate_launch_description(): + default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'testParam', + 'pipeline_face_test.yaml') + return LaunchDescription([ + # Openvino detection + launch_ros.actions.Node( + package='dynamic_vino_sample', node_executable='pipeline_with_params', + arguments=['-config', default_yaml], + remappings=[ + ('/openvino_toolkit/faces', '/ros2_openvino_toolkit/face_detection'), + ('/openvino_toolkit/emotions', '/ros2_openvino_toolkit/emotions_recognition'), + ('/openvino_toolkit/headposes', '/ros2_openvino_toolkit/headposes_estimation'), + ('/openvino_toolkit/age_genders', + '/ros2_openvino_toolkit/age_genders_Recognition'), + ('/openvino_toolkit/images', '/ros2_openvino_toolkit/image_rviz')], + output='screen'), + ]) diff --git a/sample/tests/testParam/pipeline_face_test.yaml b/sample/tests/testParam/pipeline_face_test.yaml new file mode 100644 index 00000000..7de0bcf1 --- /dev/null +++ b/sample/tests/testParam/pipeline_face_test.yaml @@ -0,0 +1,39 @@ +Pipelines: +- name: people + inputs: [StandardCamera] + infers: + - name: FaceDetection + model: /opt/openvino_toolkit/open_model_zoo/model_downloader/Transportation/object_detection/face/pruned_mobilenet_reduced_ssd_shared_weights/dldt/face-detection-adas-0001.xml + engine: CPU + label: /opt/intel/computer_vision_sdk/deployment_tools/intel_models/face-detection-adas-0001/FP32/face-detection-adas-0001.labels + batch: 1 + - name: AgeGenderRecognition + model: /opt/openvino_toolkit/open_model_zoo/model_downloader/Retail/object_attributes/age_gender/dldt/age-gender-recognition-retail-0013.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 16 + - name: EmotionRecognition + model: /opt/openvino_toolkit/open_model_zoo/model_downloader/Retail/object_attributes/emotions_recognition/0003/dldt/emotions-recognition-retail-0003.xml + engine: CPU + label: /opt/intel/computer_vision_sdk/deployment_tools/intel_models/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels + batch: 16 + - name: HeadPoseEstimation + model: /opt/openvino_toolkit/open_model_zoo/model_downloader/Transportation/object_attributes/headpose/vanilla_cnn/dldt/head-pose-estimation-adas-0001.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 16 + outputs: [RosTopic] + confidence_threshold: 0.2 + connects: + - left: StandardCamera + right: [FaceDetection] + - left: FaceDetection + right: [AgeGenderRecognition, EmotionRecognition, HeadPoseEstimation, RosTopic] + - left: AgeGenderRecognition + right: [RosTopic] + - left: EmotionRecognition + right: [RosTopic] + - left: HeadPoseEstimation + right: [RosTopic] + +Common: diff --git a/sample/tests/testParam/pipeline_image_test.launch.py b/sample/tests/testParam/pipeline_image_test.launch.py new file mode 100644 index 00000000..f3bbf384 --- /dev/null +++ b/sample/tests/testParam/pipeline_image_test.launch.py @@ -0,0 +1,40 @@ +# Copyright 2018 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Launch face detection and rviz.""" + +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +import launch_ros.actions + + +def generate_launch_description(): + default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'testParam', + 'pipeline_image_test.yaml') + return LaunchDescription([ + # Openvino detection + launch_ros.actions.Node( + package='dynamic_vino_sample', node_executable='pipeline_with_params', + arguments=['-config', default_yaml], + remappings=[ + ('/openvino_toolkit/faces', '/ros2_openvino_toolkit/face_detection'), + ('/openvino_toolkit/emotions', '/ros2_openvino_toolkit/emotions_recognition'), + ('/openvino_toolkit/headposes', '/ros2_openvino_toolkit/headposes_estimation'), + ('/openvino_toolkit/age_genders', + '/ros2_openvino_toolkit/age_genders_Recognition'), + ('/openvino_toolkit/images', '/ros2_openvino_toolkit/image_rviz')], + output='screen'), + ]) diff --git a/sample/tests/testParam/pipeline_image_test.yaml b/sample/tests/testParam/pipeline_image_test.yaml new file mode 100644 index 00000000..be19e01d --- /dev/null +++ b/sample/tests/testParam/pipeline_image_test.yaml @@ -0,0 +1,40 @@ +Pipelines: +- name: people + inputs: [Image] + input_path: /opt/openvino_toolkit/ros2_openvino_toolkit/data/images/team.jpg + infers: + - name: FaceDetection + model: /opt/openvino_toolkit/open_model_zoo/model_downloader/Transportation/object_detection/face/pruned_mobilenet_reduced_ssd_shared_weights/dldt/face-detection-adas-0001.xml + engine: CPU + label: /opt/intel/computer_vision_sdk/deployment_tools/intel_models/face-detection-adas-0001/FP32/face-detection-adas-0001.labels + batch: 1 + - name: AgeGenderRecognition + model: /opt/openvino_toolkit/open_model_zoo/model_downloader/Retail/object_attributes/age_gender/dldt/age-gender-recognition-retail-0013.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 16 + - name: EmotionRecognition + model: /opt/openvino_toolkit/open_model_zoo/model_downloader/Retail/object_attributes/emotions_recognition/0003/dldt/emotions-recognition-retail-0003.xml + engine: CPU + label: /opt/intel/computer_vision_sdk/deployment_tools/intel_models/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels + batch: 16 + - name: HeadPoseEstimation + model: /opt/openvino_toolkit/open_model_zoo/model_downloader/Transportation/object_attributes/headpose/vanilla_cnn/dldt/head-pose-estimation-adas-0001.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 16 + outputs: [RosTopic] + confidence_threshold: 0.2 + connects: + - left: Image + right: [FaceDetection] + - left: FaceDetection + right: [AgeGenderRecognition, EmotionRecognition, HeadPoseEstimation, RosTopic] + - left: AgeGenderRecognition + right: [RosTopic] + - left: EmotionRecognition + right: [RosTopic] + - left: HeadPoseEstimation + right: [RosTopic] + +Common: diff --git a/sample/tests/testParam/pipeline_object_test.launch.py b/sample/tests/testParam/pipeline_object_test.launch.py new file mode 100644 index 00000000..9f6556bc --- /dev/null +++ b/sample/tests/testParam/pipeline_object_test.launch.py @@ -0,0 +1,35 @@ +# Copyright 2018 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Launch face detection and rviz.""" + +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +import launch_ros.actions + + +def generate_launch_description(): + default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'testParam', + 'pipeline_object_test.yaml') + return LaunchDescription([ + # Openvino detection + launch_ros.actions.Node( + package='dynamic_vino_sample', node_executable='pipeline_with_params', + arguments=['-config', default_yaml], + remappings=[ + ('/openvino_toolkit/detected_objects', '/ros2_openvino_toolkit/detected_objects')], + output='screen'), + ]) diff --git a/sample/tests/testParam/pipeline_object_test.yaml b/sample/tests/testParam/pipeline_object_test.yaml new file mode 100644 index 00000000..1666674e --- /dev/null +++ b/sample/tests/testParam/pipeline_object_test.yaml @@ -0,0 +1,19 @@ +Pipelines: +- name: object + inputs: [StandardCamera] + infers: + - name: ObjectDetection + model: /opt/openvino_toolkit/open_model_zoo/model_downloader/object_detection/common/ssd/300/caffe/output/ssd300.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 16 + outputs: [RosTopic] + confidence_threshold: 0.2 + connects: + - left: StandardCamera + right: [ObjectDetection] + - left: ObjectDetection + right: [RosTopic] + +OpenvinoCommon: + diff --git a/sample/tests/testParam/pipeline_segmentation_test.launch.py b/sample/tests/testParam/pipeline_segmentation_test.launch.py new file mode 100644 index 00000000..d6de7dfd --- /dev/null +++ b/sample/tests/testParam/pipeline_segmentation_test.launch.py @@ -0,0 +1,36 @@ +# Copyright 2018 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Launch face detection and rviz.""" + +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +import launch_ros.actions + + +def generate_launch_description(): + default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'testParam', + 'pipeline_segmentation_test.yaml') + return LaunchDescription([ + # Openvino detection + launch_ros.actions.Node( + package='dynamic_vino_sample', node_executable='pipeline_with_params', + arguments=['-config', default_yaml], + remappings=[ + ('/openvino_toolkit/segmented_obejcts', + '/ros2_openvino_toolkit/segmented_obejcts')], + output='screen'), + ]) diff --git a/sample/tests/testParam/pipeline_segmentation_test.yaml b/sample/tests/testParam/pipeline_segmentation_test.yaml new file mode 100644 index 00000000..980287e4 --- /dev/null +++ b/sample/tests/testParam/pipeline_segmentation_test.yaml @@ -0,0 +1,19 @@ +Pipelines: +- name: segmentation + inputs: [StandardCamera] + infers: + - name: ObjectSegmentation + model: /opt/models/mask_rcnn_inception_v2_coco_2018_01_28/output/frozen_inference_graph.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 1 + outputs: [RosTopic] + confidence_threshold: 0.2 + connects: + - left: StandardCamera + right: [ObjectSegmentation] + - left: ObjectSegmentation + right: [RosTopic] + +OpenvinoCommon: + diff --git a/sample/tests/topic/unittest_faceDetectionCheck.cpp b/sample/tests/topic/unittest_faceDetectionCheck.cpp new file mode 100644 index 00000000..baded03b --- /dev/null +++ b/sample/tests/topic/unittest_faceDetectionCheck.cpp @@ -0,0 +1,201 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dynamic_vino_lib/pipeline.hpp" +#include "dynamic_vino_lib/pipeline_manager.hpp" +#include "dynamic_vino_lib/slog.hpp" +#include "extension/ext_list.hpp" +#include "gflags/gflags.h" +#include "inference_engine.hpp" +#include "librealsense2/rs.hpp" +#include "opencv2/opencv.hpp" +#include "utility.hpp" +static bool face_test_pass = false; +static bool emotion_test_pass = false; +static bool ageGender_test_pass = false; +static bool headPose_test_pass = false; + +template +void wait_for_future( + rclcpp::executor::Executor & executor, std::shared_future & future, + const DurationT & timeout) +{ + using rclcpp::executor::FutureReturnCode; + rclcpp::executor::FutureReturnCode future_ret; + auto start_time = std::chrono::steady_clock::now(); + future_ret = executor.spin_until_future_complete(future, timeout); + auto elapsed_time = std::chrono::steady_clock::now() - start_time; + EXPECT_EQ(FutureReturnCode::SUCCESS, future_ret) << + "the usb camera don't publish data to topic\n" << + "future failed to be set after: " << + std::chrono::duration_cast(elapsed_time).count() << + " milliseconds\n"; +} + +TEST(UnitTestFaceDetection, testFaceDetection) +{ + auto node = rclcpp::Node::make_shared("openvino_face_test"); + rmw_qos_profile_t custom_qos_profile = rmw_qos_profile_default; + custom_qos_profile.depth = 16; + std::promise sub_called; + std::shared_future sub_called_future(sub_called.get_future()); + + auto openvino_faceDetection_callback = + [&sub_called](const object_msgs::msg::ObjectsInBoxes::SharedPtr msg) -> void { + face_test_pass = true; + sub_called.set_value(true); + }; + + rclcpp::executors::SingleThreadedExecutor executor; + executor.add_node(node); + + { + auto sub1 = node->create_subscription( + "/openvino_toolkit/detected_objects", openvino_faceDetection_callback, custom_qos_profile); + + executor.spin_once(std::chrono::seconds(0)); + + wait_for_future(executor, sub_called_future, std::chrono::seconds(10)); + + EXPECT_TRUE(face_test_pass); + } +} + +TEST(UnitTestFaceDetection, testEmotionDetection) +{ + auto node = rclcpp::Node::make_shared("openvino_emotion_test"); + rmw_qos_profile_t custom_qos_profile = rmw_qos_profile_default; + custom_qos_profile.depth = 16; + std::promise sub_called; + std::shared_future sub_called_future(sub_called.get_future()); + + auto openvino_emotionRecognition_callback = + [&sub_called](const people_msgs::msg::EmotionsStamped::SharedPtr msg) -> void { + emotion_test_pass = true; + sub_called.set_value(true); + }; + + rclcpp::executors::SingleThreadedExecutor executor; + executor.add_node(node); + + { + auto sub2 = node->create_subscription( + "/ros2_openvino_toolkit/emotions_recognition", openvino_emotionRecognition_callback, + custom_qos_profile); + + executor.spin_once(std::chrono::seconds(0)); + + wait_for_future(executor, sub_called_future, std::chrono::seconds(10)); + + EXPECT_TRUE(emotion_test_pass); + } +} + +TEST(UnitTestFaceDetection, testageGenderDetection) +{ + auto node = rclcpp::Node::make_shared("openvino_ageGender_test"); + rmw_qos_profile_t custom_qos_profile = rmw_qos_profile_default; + custom_qos_profile.depth = 16; + std::promise sub_called; + std::shared_future sub_called_future(sub_called.get_future()); + + auto openvino_ageGender_callback = + [&sub_called](const people_msgs::msg::AgeGenderStamped::SharedPtr msg) -> void { + ageGender_test_pass = true; + sub_called.set_value(true); + }; + + rclcpp::executors::SingleThreadedExecutor executor; + executor.add_node(node); + + { + auto sub3 = node->create_subscription( + "/ros2_openvino_toolkit/age_genders_Recognition", openvino_ageGender_callback, + custom_qos_profile); + + executor.spin_once(std::chrono::seconds(0)); + + wait_for_future(executor, sub_called_future, std::chrono::seconds(10)); + + EXPECT_TRUE(ageGender_test_pass); + } +} + +TEST(UnitTestFaceDetection, testheadPoseDetection) +{ + auto node = rclcpp::Node::make_shared("openvino_headPose_test"); + rmw_qos_profile_t custom_qos_profile = rmw_qos_profile_default; + custom_qos_profile.depth = 16; + std::promise sub_called; + std::shared_future sub_called_future(sub_called.get_future()); + + auto openvino_headPose_callback = + [&sub_called](const people_msgs::msg::HeadPoseStamped::SharedPtr msg) -> void { + headPose_test_pass = true; + sub_called.set_value(true); + }; + + rclcpp::executors::SingleThreadedExecutor executor; + executor.add_node(node); + + { + auto sub4 = node->create_subscription( + "/ros2_openvino_toolkit/headposes_estimation", openvino_headPose_callback, + custom_qos_profile); + + executor.spin_once(std::chrono::seconds(0)); + + wait_for_future(executor, sub_called_future, std::chrono::seconds(10)); + + EXPECT_TRUE(headPose_test_pass); + } +} + +int main(int argc, char * argv[]) +{ + testing::InitGoogleTest(&argc, argv); + rclcpp::init(argc, argv); + auto offset = std::chrono::seconds(30); + system("ros2 launch dynamic_vino_sample pipeline_face_test.launch.py &"); + int ret = RUN_ALL_TESTS(); + rclcpp::sleep_for(offset); + system("killall -s SIGINT pipeline_with_params &"); + rclcpp::shutdown(); + return ret; +} diff --git a/sample/tests/topic/unittest_imageCheck.cpp b/sample/tests/topic/unittest_imageCheck.cpp new file mode 100644 index 00000000..badc7a3e --- /dev/null +++ b/sample/tests/topic/unittest_imageCheck.cpp @@ -0,0 +1,201 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dynamic_vino_lib/pipeline.hpp" +#include "dynamic_vino_lib/pipeline_manager.hpp" +#include "dynamic_vino_lib/slog.hpp" +#include "extension/ext_list.hpp" +#include "gflags/gflags.h" +#include "inference_engine.hpp" +#include "librealsense2/rs.hpp" +#include "opencv2/opencv.hpp" +#include "utility.hpp" +static bool face_test_pass = false; +static bool emotion_test_pass = false; +static bool ageGender_test_pass = false; +static bool headPose_test_pass = false; + +template +void wait_for_future( + rclcpp::executor::Executor & executor, std::shared_future & future, + const DurationT & timeout) +{ + using rclcpp::executor::FutureReturnCode; + rclcpp::executor::FutureReturnCode future_ret; + auto start_time = std::chrono::steady_clock::now(); + future_ret = executor.spin_until_future_complete(future, timeout); + auto elapsed_time = std::chrono::steady_clock::now() - start_time; + EXPECT_EQ(FutureReturnCode::SUCCESS, future_ret) << + "the usb camera don't publish data to topic\n" << + "future failed to be set after: " << + std::chrono::duration_cast(elapsed_time).count() << + " milliseconds\n"; +} + +TEST(UnitTestFaceDetection, testFaceDetection) +{ + auto node = rclcpp::Node::make_shared("openvino_face_test"); + rmw_qos_profile_t custom_qos_profile = rmw_qos_profile_default; + custom_qos_profile.depth = 16; + std::promise sub_called; + std::shared_future sub_called_future(sub_called.get_future()); + + auto openvino_faceDetection_callback = + [&sub_called](const object_msgs::msg::ObjectsInBoxes::SharedPtr msg) -> void { + face_test_pass = true; + sub_called.set_value(true); + }; + + rclcpp::executors::SingleThreadedExecutor executor; + executor.add_node(node); + + { + auto sub1 = node->create_subscription( + "/openvino_toolkit/detected_objects", openvino_faceDetection_callback, custom_qos_profile); + + executor.spin_once(std::chrono::seconds(0)); + + wait_for_future(executor, sub_called_future, std::chrono::seconds(10)); + + EXPECT_TRUE(face_test_pass); + } +} + +TEST(UnitTestFaceDetection, testEmotionDetection) +{ + auto node = rclcpp::Node::make_shared("openvino_emotion_test"); + rmw_qos_profile_t custom_qos_profile = rmw_qos_profile_default; + custom_qos_profile.depth = 16; + std::promise sub_called; + std::shared_future sub_called_future(sub_called.get_future()); + + auto openvino_emotionRecognition_callback = + [&sub_called](const people_msgs::msg::EmotionsStamped::SharedPtr msg) -> void { + emotion_test_pass = true; + sub_called.set_value(true); + }; + + rclcpp::executors::SingleThreadedExecutor executor; + executor.add_node(node); + + { + auto sub2 = node->create_subscription( + "/ros2_openvino_toolkit/emotions_recognition", openvino_emotionRecognition_callback, + custom_qos_profile); + + executor.spin_once(std::chrono::seconds(0)); + + wait_for_future(executor, sub_called_future, std::chrono::seconds(10)); + + EXPECT_TRUE(emotion_test_pass); + } +} + +TEST(UnitTestFaceDetection, testageGenderDetection) +{ + auto node = rclcpp::Node::make_shared("openvino_ageGender_test"); + rmw_qos_profile_t custom_qos_profile = rmw_qos_profile_default; + custom_qos_profile.depth = 16; + std::promise sub_called; + std::shared_future sub_called_future(sub_called.get_future()); + + auto openvino_ageGender_callback = + [&sub_called](const people_msgs::msg::AgeGenderStamped::SharedPtr msg) -> void { + ageGender_test_pass = true; + sub_called.set_value(true); + }; + + rclcpp::executors::SingleThreadedExecutor executor; + executor.add_node(node); + + { + auto sub3 = node->create_subscription( + "/ros2_openvino_toolkit/age_genders_Recognition", openvino_ageGender_callback, + custom_qos_profile); + + executor.spin_once(std::chrono::seconds(0)); + + wait_for_future(executor, sub_called_future, std::chrono::seconds(10)); + + EXPECT_TRUE(ageGender_test_pass); + } +} + +TEST(UnitTestFaceDetection, testheadPoseDetection) +{ + auto node = rclcpp::Node::make_shared("openvino_headPose_test"); + rmw_qos_profile_t custom_qos_profile = rmw_qos_profile_default; + custom_qos_profile.depth = 16; + std::promise sub_called; + std::shared_future sub_called_future(sub_called.get_future()); + + auto openvino_headPose_callback = + [&sub_called](const people_msgs::msg::HeadPoseStamped::SharedPtr msg) -> void { + headPose_test_pass = true; + sub_called.set_value(true); + }; + + rclcpp::executors::SingleThreadedExecutor executor; + executor.add_node(node); + + { + auto sub4 = node->create_subscription( + "/ros2_openvino_toolkit/headposes_estimation", openvino_headPose_callback, + custom_qos_profile); + + executor.spin_once(std::chrono::seconds(0)); + + wait_for_future(executor, sub_called_future, std::chrono::seconds(10)); + + EXPECT_TRUE(headPose_test_pass); + } +} + +int main(int argc, char * argv[]) +{ + testing::InitGoogleTest(&argc, argv); + rclcpp::init(argc, argv); + auto offset = std::chrono::seconds(30); + system("ros2 launch dynamic_vino_sample pipeline_image_test.launch.py &"); + int ret = RUN_ALL_TESTS(); + rclcpp::sleep_for(offset); + system("killall -s SIGINT pipeline_with_params &"); + rclcpp::shutdown(); + return ret; +} diff --git a/sample/tests/topic/unittest_objectDetectionCheck.cpp b/sample/tests/topic/unittest_objectDetectionCheck.cpp new file mode 100644 index 00000000..5c6377d4 --- /dev/null +++ b/sample/tests/topic/unittest_objectDetectionCheck.cpp @@ -0,0 +1,104 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dynamic_vino_lib/pipeline.hpp" +#include "dynamic_vino_lib/pipeline_manager.hpp" +#include "dynamic_vino_lib/slog.hpp" +#include "extension/ext_list.hpp" +#include "gflags/gflags.h" +#include "inference_engine.hpp" +#include "librealsense2/rs.hpp" +#include "opencv2/opencv.hpp" +#include "utility.hpp" +static bool test_pass = false; + +template +void wait_for_future( + rclcpp::executor::Executor & executor, std::shared_future & future, + const DurationT & timeout) +{ + using rclcpp::executor::FutureReturnCode; + rclcpp::executor::FutureReturnCode future_ret; + auto start_time = std::chrono::steady_clock::now(); + future_ret = executor.spin_until_future_complete(future, timeout); + auto elapsed_time = std::chrono::steady_clock::now() - start_time; + EXPECT_EQ(FutureReturnCode::SUCCESS, future_ret) << + "the usb camera don't publish data to topic\n" << + "future failed to be set after: " << + std::chrono::duration_cast(elapsed_time).count() << + " milliseconds\n"; +} + +TEST(UnitTestObjectDetection, testObjectDetection) +{ + auto node = rclcpp::Node::make_shared("openvino_objectDetection_test"); + rmw_qos_profile_t custom_qos_profile = rmw_qos_profile_default; + custom_qos_profile.depth = 16; + std::promise sub_called; + std::shared_future sub_called_future(sub_called.get_future()); + + auto openvino_faceDetection_callback = + [&sub_called](const object_msgs::msg::ObjectsInBoxes::SharedPtr msg) -> void { + test_pass = true; + sub_called.set_value(true); + }; + + rclcpp::executors::SingleThreadedExecutor executor; + executor.add_node(node); + + { + auto sub1 = node->create_subscription( + "/ros2_openvino_toolkit/detected_objects", openvino_faceDetection_callback, + custom_qos_profile); + + executor.spin_once(std::chrono::seconds(0)); + + wait_for_future(executor, sub_called_future, std::chrono::seconds(10)); + + EXPECT_TRUE(test_pass); + } +} + +int main(int argc, char * argv[]) +{ + testing::InitGoogleTest(&argc, argv); + rclcpp::init(argc, argv); + auto offset = std::chrono::seconds(20); + system("ros2 launch dynamic_vino_sample pipeline_object_test.launch.py &"); + int ret = RUN_ALL_TESTS(); + rclcpp::sleep_for(offset); + system("killall -s SIGINT pipeline_with_params &"); + rclcpp::shutdown(); + return ret; +} diff --git a/sample/tests/topic/unittest_segmentationCheck.cpp b/sample/tests/topic/unittest_segmentationCheck.cpp new file mode 100644 index 00000000..3aac18c5 --- /dev/null +++ b/sample/tests/topic/unittest_segmentationCheck.cpp @@ -0,0 +1,105 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dynamic_vino_lib/pipeline.hpp" +#include "dynamic_vino_lib/pipeline_manager.hpp" +#include "dynamic_vino_lib/slog.hpp" +#include "extension/ext_list.hpp" +#include "gflags/gflags.h" +#include "inference_engine.hpp" +#include "librealsense2/rs.hpp" +#include "opencv2/opencv.hpp" +#include "utility.hpp" +static bool test_pass = false; + +template +void wait_for_future( + rclcpp::executor::Executor & executor, std::shared_future & future, + const DurationT & timeout) +{ + using rclcpp::executor::FutureReturnCode; + rclcpp::executor::FutureReturnCode future_ret; + auto start_time = std::chrono::steady_clock::now(); + future_ret = executor.spin_until_future_complete(future, timeout); + auto elapsed_time = std::chrono::steady_clock::now() - start_time; + EXPECT_EQ(FutureReturnCode::SUCCESS, future_ret) << + "the usb camera don't publish data to topic\n" << + "future failed to be set after: " << + std::chrono::duration_cast(elapsed_time).count() << + " milliseconds\n"; +} + +TEST(UnitTestObjectDetection, testObjectDetection) +{ + auto node = rclcpp::Node::make_shared("openvino_segmentation_test"); + rmw_qos_profile_t custom_qos_profile = rmw_qos_profile_default; + custom_qos_profile.depth = 16; + std::promise sub_called; + std::shared_future sub_called_future(sub_called.get_future()); + + auto openvino_faceDetection_callback = + [&sub_called](const people_msgs::msg::ObjectsInMasks::SharedPtr msg) -> void { + test_pass = true; + sub_called.set_value(true); + }; + + rclcpp::executors::SingleThreadedExecutor executor; + executor.add_node(node); + + { + auto sub1 = node->create_subscription( + "/ros2_openvino_toolkit/segmented_obejcts", openvino_faceDetection_callback, + custom_qos_profile); + + executor.spin_once(std::chrono::seconds(0)); + + wait_for_future(executor, sub_called_future, std::chrono::seconds(10)); + + EXPECT_TRUE(test_pass); + } +} + +int main(int argc, char * argv[]) +{ + testing::InitGoogleTest(&argc, argv); + rclcpp::init(argc, argv); + auto offset = std::chrono::seconds(10); + system("ros2 launch dynamic_vino_sample pipeline_segmentation_test.launch.py &"); + int ret = RUN_ALL_TESTS(); + rclcpp::sleep_for(offset); + system("killall -s SIGINT pipeline_with_params &"); + rclcpp::shutdown(); + return ret; +} diff --git a/vino_param_lib/CMakeLists.txt b/vino_param_lib/CMakeLists.txt index f6f2353a..c3cddd8f 100644 --- a/vino_param_lib/CMakeLists.txt +++ b/vino_param_lib/CMakeLists.txt @@ -23,11 +23,11 @@ include_directories(include add_library(${PROJECT_NAME} SHARED src/param_manager.cpp - ) +) ament_target_dependencies(${PROJECT_NAME} "yaml_cpp_vendor" - ) +) if(BUILD_TESTING) find_package(ament_lint_auto REQUIRED) diff --git a/vino_param_lib/include/vino_param_lib/param_manager.hpp b/vino_param_lib/include/vino_param_lib/param_manager.hpp index 02971039..62cebabb 100644 --- a/vino_param_lib/include/vino_param_lib/param_manager.hpp +++ b/vino_param_lib/include/vino_param_lib/param_manager.hpp @@ -1,18 +1,17 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + /** * @brief A header file with declaration for parameter management * @file param_manager.hpp @@ -25,7 +24,8 @@ #include #include -namespace Params { +namespace Params +{ /** * @class ParamManager * @brief This class implements yaml-based parameter management. @@ -40,13 +40,14 @@ namespace Params { */ class ParamManager // singleton { - public: +public: /** * @brief Get the singleton instance of ParamManager class. * The instance will be created when first call. * @return The reference of paramManager class. */ - static ParamManager& getInstance() { + static ParamManager & getInstance() + { static ParamManager manager_; return manager_; } @@ -57,13 +58,15 @@ class ParamManager // singleton */ void print() const; - struct InferenceParams { + struct InferenceParams + { std::string name; std::string engine; std::string model; std::string label; }; - struct PipelineParams { + struct PipelineParams + { std::string name; std::vector infers; std::vector inputs; @@ -71,7 +74,8 @@ class ParamManager // singleton std::multimap connects; std::string input_meta; }; - struct CommonParams { + struct CommonParams + { std::string custom_cpu_library; std::string custom_cldnn_library; bool enable_performance_count = false; @@ -96,7 +100,10 @@ class ParamManager // singleton * @brief Retrieve pipeline parameters. * @return A list of struct PipelineParams storing all pipeline parameters. */ - std::vector getPipelines() const { return pipelines_; } + std::vector getPipelines() const + { + return pipelines_; + } /** * @brief Retrieve the specific pipeline parameters by the given pipeline @@ -104,18 +111,23 @@ class ParamManager // singleton * @param[in] name: the name of the pipeline to be retrieved. * @return The pipeline paratmeters, or throw a loginc error. */ - PipelineParams getPipeline(const std::string& name) const; + PipelineParams getPipeline(const std::string & name) const; /** * @brief Retrieve common parameters. * @return struct CommonParams storing all common parameters. */ - CommonParams getCommon() const { return common_; } + CommonParams getCommon() const + { + return common_; + } - private: - ParamManager() {} - ParamManager(ParamManager const&); - void operator=(ParamManager const&); +private: + ParamManager() + { + } + ParamManager(ParamManager const &); + void operator=(ParamManager const &); std::vector pipelines_; CommonParams common_; diff --git a/vino_param_lib/include/vino_param_lib/slog.hpp b/vino_param_lib/include/vino_param_lib/slog.hpp index 84381342..8017191b 100644 --- a/vino_param_lib/include/vino_param_lib/slog.hpp +++ b/vino_param_lib/include/vino_param_lib/slog.hpp @@ -1,38 +1,39 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /** * @brief a header file with logging facility for common samples * @file log.hpp */ -#ifndef DYNAMIC_VINO_LIB__SLOG_HPP_ -#define DYNAMIC_VINO_LIB__SLOG_HPP_ +#ifndef VINO_PARAM_LIB__SLOG_HPP_ +#define VINO_PARAM_LIB__SLOG_HPP_ #pragma once #include #include -namespace slog { +namespace slog +{ /** * @class LogStreamEndLine * @brief The LogStreamEndLine class implements an end line marker for a log * stream */ -class LogStreamEndLine {}; +class LogStreamEndLine +{ +}; static constexpr LogStreamEndLine endl; @@ -40,18 +41,20 @@ static constexpr LogStreamEndLine endl; * @class LogStream * @brief The LogStream class implements a stream for sample logging */ -class LogStream { +class LogStream +{ std::string _prefix; - std::ostream* _log_stream; + std::ostream * _log_stream; bool _new_line; - public: +public: /** * @brief A constructor. Creates an LogStream object * @param prefix The prefix to print */ - LogStream(const std::string& prefix, std::ostream& log_stream) - : _prefix(prefix), _new_line(true) { + LogStream(const std::string & prefix, std::ostream & log_stream) + : _prefix(prefix), _new_line(true) + { _log_stream = &log_stream; } @@ -59,8 +62,9 @@ class LogStream { * @brief A stream output operator to be used within the logger * @param arg Object for serialization in the logger message */ - template - LogStream& operator<<(const T& arg) { + template + LogStream & operator<<(const T & arg) + { if (_new_line) { (*_log_stream) << "[ " << _prefix << " ] "; _new_line = false; @@ -71,7 +75,8 @@ class LogStream { } // Specializing for LogStreamEndLine to support slog::endl - LogStream& operator<<(const LogStreamEndLine& arg) { + LogStream & operator<<(const LogStreamEndLine & arg) + { _new_line = true; (*_log_stream) << std::endl; @@ -84,4 +89,4 @@ static LogStream warn("WARNING", std::cout); static LogStream err("ERROR", std::cerr); } // namespace slog -#endif // DYNAMIC_VINO_LIB__SLOG_HPP_ +#endif // VINO_PARAM_LIB__SLOG_HPP_ diff --git a/vino_param_lib/package.xml b/vino_param_lib/package.xml index 7a201aaa..4563b945 100644 --- a/vino_param_lib/package.xml +++ b/vino_param_lib/package.xml @@ -20,11 +20,9 @@ limitations under the License. vino_param_lib 0.3.0 Library for ROS2 OpenVINO parameter management - Weizhi Liu - Chao Li - Hongkun Chen Weizhi Liu Chao Li + Hongkun Chen Apache 2.0 diff --git a/vino_param_lib/src/param_manager.cpp b/vino_param_lib/src/param_manager.cpp index 6e9ba9a6..caf3bf4e 100644 --- a/vino_param_lib/src/param_manager.cpp +++ b/vino_param_lib/src/param_manager.cpp @@ -1,52 +1,53 @@ -/* - * Copyright (c) 2018 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #include "vino_param_lib/param_manager.hpp" +#include #include #include #include #include #include #include -#include -namespace Params { - -void operator>>(const YAML::Node& node, ParamManager::PipelineParams& pipeline); -void operator>>(const YAML::Node& node, - std::vector& list); -void operator>>(const YAML::Node& node, ParamManager::InferenceParams& infer); -void operator>>(const YAML::Node& node, std::vector& list); -void operator>>(const YAML::Node& node, - std::multimap& connect); -void operator>>(const YAML::Node& node, std::string& str); -void operator>>(const YAML::Node& node, bool& val); -void operator>>(const YAML::Node& node, ParamManager::CommonParams& common); - -#define YAML_PARSE(node, key, val) \ - try { \ - node[key] >> val; \ - } catch (YAML::Exception e) { \ - slog::warn << e.msg << slog::endl; \ - } catch (...) { \ +namespace Params +{ +void operator>>(const YAML::Node & node, ParamManager::PipelineParams & pipeline); +void operator>>(const YAML::Node & node, std::vector & list); +void operator>>(const YAML::Node & node, ParamManager::InferenceParams & infer); +void operator>>(const YAML::Node & node, std::vector & list); +void operator>>(const YAML::Node & node, std::multimap & connect); +void operator>>(const YAML::Node & node, std::string & str); +void operator>>(const YAML::Node & node, bool & val); +void operator>>(const YAML::Node & node, ParamManager::CommonParams & common); + +#define YAML_PARSE(node, key, val) \ + try \ + { \ + node[key] >> val; \ + } \ + catch (YAML::Exception e) \ + { \ + slog::warn << e.msg << slog::endl; \ + } \ + catch (...) \ + { \ slog::warn << "Exception occurs when parsing string." << slog::endl; \ } -void operator>>(const YAML::Node& node, - std::vector& list) { +void operator>>(const YAML::Node & node, std::vector & list) +{ slog::info << "Pipeline size: " << node.size() << slog::endl; for (unsigned i = 0; i < node.size(); i++) { ParamManager::PipelineParams temp; @@ -55,15 +56,16 @@ void operator>>(const YAML::Node& node, } } -void operator>>(const YAML::Node& node, ParamManager::CommonParams& common) { +void operator>>(const YAML::Node & node, ParamManager::CommonParams & common) +{ YAML_PARSE(node, "camera_topic", common.camera_topic) YAML_PARSE(node, "custom_cpu_library", common.custom_cpu_library) YAML_PARSE(node, "custom_cldnn_library", common.custom_cldnn_library) YAML_PARSE(node, "enable_performance_count", common.enable_performance_count) } -void operator>>(const YAML::Node& node, - ParamManager::PipelineParams& pipeline) { +void operator>>(const YAML::Node & node, ParamManager::PipelineParams & pipeline) +{ YAML_PARSE(node, "name", pipeline.name) YAML_PARSE(node, "inputs", pipeline.inputs) YAML_PARSE(node, "infers", pipeline.infers) @@ -73,8 +75,8 @@ void operator>>(const YAML::Node& node, slog::info << "Pipeline Params:name=" << pipeline.name << slog::endl; } -void operator>>(const YAML::Node& node, - std::vector& list) { +void operator>>(const YAML::Node & node, std::vector & list) +{ slog::info << "Inferences size: " << node.size() << slog::endl; for (unsigned i = 0; i < node.size(); i++) { ParamManager::InferenceParams temp_inf; @@ -83,7 +85,8 @@ void operator>>(const YAML::Node& node, } } -void operator>>(const YAML::Node& node, ParamManager::InferenceParams& infer) { +void operator>>(const YAML::Node & node, ParamManager::InferenceParams & infer) +{ YAML_PARSE(node, "name", infer.name) YAML_PARSE(node, "model", infer.model) YAML_PARSE(node, "engine", infer.engine) @@ -91,7 +94,8 @@ void operator>>(const YAML::Node& node, ParamManager::InferenceParams& infer) { slog::info << "Inference Params:name=" << infer.name << slog::endl; } -void operator>>(const YAML::Node& node, std::vector& list) { +void operator>>(const YAML::Node & node, std::vector & list) +{ for (unsigned i = 0; i < node.size(); i++) { std::string temp_i; node[i] >> temp_i; @@ -99,43 +103,48 @@ void operator>>(const YAML::Node& node, std::vector& list) { } } -void operator>>(const YAML::Node& node, - std::multimap& connect) { +void operator>>(const YAML::Node & node, std::multimap & connect) +{ for (unsigned i = 0; i < node.size(); i++) { std::string left; node[i]["left"] >> left; std::vector rights; node[i]["right"] >> rights; - for (auto& r : rights) { + for (auto & r : rights) { connect.insert({left, r}); } } } -void operator>>(const YAML::Node& node, std::string& str) { +void operator>>(const YAML::Node & node, std::string & str) +{ str = node.as(); } -void operator>>(const YAML::Node& node, bool& val) { val = node.as(); } +void operator>>(const YAML::Node & node, bool & val) +{ + val = node.as(); +} -void ParamManager::print() const { +void ParamManager::print() const +{ slog::info << "--------parameters DUMP---------------------" << slog::endl; - for (auto& pipeline : pipelines_) { + for (auto & pipeline : pipelines_) { slog::info << "Pipeline: " << pipeline.name << slog::endl; slog::info << "\tInputs: "; - for (auto& i : pipeline.inputs) { + for (auto & i : pipeline.inputs) { slog::info << i.c_str() << ", "; } slog::info << slog::endl; slog::info << "\tOutputs: "; - for (auto& i : pipeline.outputs) { + for (auto & i : pipeline.outputs) { slog::info << i.c_str() << ", "; } slog::info << slog::endl; slog::info << "\tInferences: " << slog::endl; - for (auto& infer : pipeline.infers) { + for (auto & infer : pipeline.infers) { slog::info << "\t\tName: " << infer.name << slog::endl; slog::info << "\t\tModel: " << infer.model << slog::endl; slog::info << "\t\tEngine: " << infer.engine << slog::endl; @@ -143,7 +152,7 @@ void ParamManager::print() const { } slog::info << "\tConnections: " << slog::endl; - for (auto& c : pipeline.connects) { + for (auto & c : pipeline.connects) { slog::info << "\t\t" << c.first << "->" << c.second << slog::endl; } } @@ -151,15 +160,13 @@ void ParamManager::print() const { // Pring Common Info slog::info << "Common:" << slog::endl; slog::info << "\tcamera_topic: " << common_.camera_topic << slog::endl; - slog::info << "\tcustom_cpu_library: " << common_.custom_cpu_library - << slog::endl; - slog::info << "\tcustom_cldnn_library: " << common_.custom_cldnn_library - << slog::endl; - slog::info << "\tenable_performance_count: " - << common_.enable_performance_count << slog::endl; + slog::info << "\tcustom_cpu_library: " << common_.custom_cpu_library << slog::endl; + slog::info << "\tcustom_cldnn_library: " << common_.custom_cldnn_library << slog::endl; + slog::info << "\tenable_performance_count: " << common_.enable_performance_count << slog::endl; } -void ParamManager::parse(std::string path) { +void ParamManager::parse(std::string path) +{ std::ifstream fin(path); if (fin.fail()) { slog::err << "Could not open config file:" << path << slog::endl; @@ -171,18 +178,19 @@ void ParamManager::parse(std::string path) { YAML_PARSE(doc, "Common", common_) } -std::vector ParamManager::getPipelineNames() const { +std::vector ParamManager::getPipelineNames() const +{ std::vector names; - for (auto& p : pipelines_) { + for (auto & p : pipelines_) { names.push_back(p.name); } return names; } -ParamManager::PipelineParams ParamManager::getPipeline( - const std::string& name) const { - for (auto& p : pipelines_) { +ParamManager::PipelineParams ParamManager::getPipeline(const std::string & name) const +{ + for (auto & p : pipelines_) { if (p.name == name) { return p; }