diff --git a/.gitignore b/.gitignore
index 4afe45317a..32d750fcd7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -11,3 +11,4 @@ modules/java/\.idea/
.scannerwork
build
*.blend1
+**/results/**
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 452ab4c4aa..433a2e4868 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -272,6 +272,28 @@ if(NOT IOS)
include(cmake/VISPDetectPython.cmake)
endif()
+# --- Python Bindings requirements ---
+
+# this avoids non-active conda from getting picked anyway on Windows
+#set(Python_FIND_REGISTRY LAST)
+# Use environment variable PATH to decide preference for Python
+#set(Python_FIND_VIRTUALENV FIRST)
+#set(Python_FIND_STRATEGY LOCATION)
+
+#find_package(Python 3.7 COMPONENTS Interpreter Development) # TODO: use visp function to find python?
+#if(Python_FOUND)
+# set(VISP_PYTHON_BINDINGS_EXECUTABLE "${Python_EXECUTABLE}")
+#endif()
+#find_package(pybind11)
+VP_OPTION(USE_PYBIND11 pybind11 QUIET "Include pybind11 to create Python bindings" "" ON)
+
+#if(pybind11_FOUND)
+# set(VISP_PYBIND11_DIR "${pybind11_DIR}")
+#endif()
+#message("${pybind11_FOUND}")
+# ---
+
+
include_directories(${VISP_INCLUDE_DIR})
#----------------------------------------------------------------------
@@ -413,6 +435,11 @@ VP_OPTION(BUILD_ANDROID_PROJECTS "" "" "Build Android projects providing .apk f
VP_OPTION(BUILD_ANDROID_EXAMPLES "" "" "Build examples for Android platform" "" ON IF ANDROID )
VP_OPTION(INSTALL_ANDROID_EXAMPLES "" "" "Install Android examples" "" OFF IF ANDROID )
+# Build python bindings as an option
+VP_OPTION(BUILD_PYTHON_BINDINGS "" "" "Build Python bindings" "" ON IF (PYTHON3INTERP_FOUND AND USE_PYBIND11) )
+VP_OPTION(BUILD_PYTHON_BINDINGS_DOC "" "" "Build the documentation for the Python bindings" "" ON IF BUILD_PYTHON_BINDINGS )
+
+
# Build demos as an option.
VP_OPTION(BUILD_DEMOS "" "" "Build ViSP demos" "" ON)
# Build tutorials as an option.
@@ -730,10 +757,38 @@ if(DOXYGEN_FOUND)
set(DOXYGEN_USE_MATHJAX "NO")
endif()
+ # HTML version of the doc
+ set(DOXYGEN_GENERATE_HTML "YES")
+ set(DOXYGEN_GENERATE_XML "NO")
+ set(DOXYGEN_GENERATE_TEST_LIST "YES")
+ set(DOXYGEN_QUIET "NO")
+ set(DOXYGEN_INPUTS
+ "${VISP_SOURCE_DIR}/modules"
+ "${VISP_SOURCE_DIR}/example"
+ "${VISP_SOURCE_DIR}/tutorial"
+ "${VISP_SOURCE_DIR}/demo"
+ "${VISP_SOURCE_DIR}/doc"
+ "${VISP_BINARY_DIR}/doc"
+ "${VISP_CONTRIB_MODULES_PATH}"
+ )
+ string (REPLACE ";" " " DOXYGEN_INPUTS "${DOXYGEN_INPUTS}")
configure_file(${VISP_SOURCE_DIR}/doc/config-doxygen.in
${VISP_DOC_DIR}/config-doxygen
@ONLY )
+ # XML version of the doc
+ set(DOXYGEN_GENERATE_HTML "NO")
+ set(DOXYGEN_GENERATE_XML "YES")
+ set(DOXYGEN_GENERATE_TEST_LIST "NO")
+ set(DOXYGEN_QUIET "YES")
+ set(DOXYGEN_INPUTS
+ "${VISP_SOURCE_DIR}/modules"
+ )
+ string (REPLACE ";" " " DOXYGEN_INPUTS "${DOXYGEN_INPUTS}")
+ configure_file(${VISP_SOURCE_DIR}/doc/config-doxygen.in
+ ${VISP_DOC_DIR}/config-doxygen-xml
+ @ONLY )
+
# set vars used in mainpage.dox.in
# - VISP_MAINPAGE_EXTENSION
set(VISP_MAINPAGE_EXTENSION "")
@@ -826,6 +881,8 @@ if(BUILD_JAVA)
endif()
endif()
+
+
if(ANDROID AND ANDROID_EXECUTABLE AND ANT_EXECUTABLE AND (ANT_VERSION VERSION_GREATER 1.7) AND (ANDROID_TOOLS_Pkg_Revision GREATER 13))
SET(CAN_BUILD_ANDROID_PROJECTS TRUE)
else()
@@ -1172,6 +1229,14 @@ if(BUILD_TUTORIALS)
add_subdirectory(tutorial)
vp_add_subdirectories(VISP_CONTRIB_MODULES_PATH tutorial)
endif()
+if(BUILD_APPS)
+ vp_add_subdirectories(VISP_CONTRIB_MODULES_PATH apps)
+endif()
+if(BUILD_PYTHON_BINDINGS)
+ add_subdirectory(modules/python)
+endif()
+
+
# ----------------------------------------------------------------------------
# Make some cmake vars advanced
@@ -1442,7 +1507,8 @@ endif()
# ========================== java ==========================
status("")
-status(" Python (for build):" PYTHON_DEFAULT_AVAILABLE THEN "${PYTHON_DEFAULT_EXECUTABLE}" ELSE "no")
+status(" Python 3:")
+status(" Interpreter:" PYTHON3INTERP_FOUND THEN "${PYTHON3_EXECUTABLE} (ver ${PYTHON3_VERSION_STRING})" ELSE "no")
if(BUILD_JAVA OR BUILD_visp_java)
status("")
@@ -1453,6 +1519,17 @@ if(BUILD_JAVA OR BUILD_visp_java)
endif()
endif()
+# ======================= Python bindings ========================
+status("")
+status(" Python3 bindings:" BUILD_PYTHON_BINDINGS THEN "yes" ELSE "no")
+if(BUILD_PYTHON_BINDINGS)
+ status(" Python3 interpreter:" PYTHON3INTERP_FOUND THEN "${PYTHON3_EXECUTABLE} (ver ${PYTHON3_VERSION_STRING})" ELSE "no")
+ status(" Pybind11:" USE_PYBIND11 THEN "${pybind11_DIR} (${pybind11_VERSION})" ELSE "no")
+ status(" Package version:" "${VISP_PYTHON_PACKAGE_VERSION}")
+ status(" Wrapped modules:" "${VISP_PYTHON_BOUND_MODULES}")
+ status(" Generated input config:" "${VISP_PYTHON_GENERATED_CONFIG_FILE}")
+endif()
+
# ============================ Options ===========================
status("")
status(" Build options: ")
diff --git a/cmake/VISPDetectPython.cmake b/cmake/VISPDetectPython.cmake
index ee0489fee9..7601715c3c 100644
--- a/cmake/VISPDetectPython.cmake
+++ b/cmake/VISPDetectPython.cmake
@@ -85,11 +85,7 @@ if(NOT ${found})
endif()
vp_clear_vars(PYTHONINTERP_FOUND PYTHON_EXECUTABLE PYTHON_VERSION_STRING PYTHON_VERSION_MAJOR PYTHON_VERSION_MINOR PYTHON_VERSION_PATCH)
if(NOT CMAKE_VERSION VERSION_LESS "3.12")
- if(_python_version_major STREQUAL "2")
- set(__PYTHON_PREFIX Python2)
- else()
- set(__PYTHON_PREFIX Python3)
- endif()
+ set(__PYTHON_PREFIX Python3)
find_host_package(${__PYTHON_PREFIX} "${preferred_version}" COMPONENTS Interpreter)
if(${__PYTHON_PREFIX}_EXECUTABLE)
set(PYTHON_EXECUTABLE "${${__PYTHON_PREFIX}_EXECUTABLE}")
@@ -208,9 +204,6 @@ if(NOT ${found})
if(CMAKE_CROSSCOMPILING)
message(STATUS "Cannot probe for Python/Numpy support (because we are cross-compiling ViSP)")
message(STATUS "If you want to enable Python/Numpy support, set the following variables:")
- message(STATUS " PYTHON2_INCLUDE_PATH")
- message(STATUS " PYTHON2_LIBRARIES (optional on Unix-like systems)")
- message(STATUS " PYTHON2_NUMPY_INCLUDE_DIRS")
message(STATUS " PYTHON3_INCLUDE_PATH")
message(STATUS " PYTHON3_LIBRARIES (optional on Unix-like systems)")
message(STATUS " PYTHON3_NUMPY_INCLUDE_DIRS")
@@ -258,7 +251,7 @@ if(NOT ${found})
set(${include_path} "${_include_path}" CACHE INTERNAL "")
set(${include_dir} "${_include_dir}" CACHE PATH "Python include dir")
set(${include_dir2} "${_include_dir2}" CACHE PATH "Python include dir 2")
- set(${packages_path} "${_packages_path}" CACHE PATH "Where to install the python packages.")
+ set(${packages_path} "${_packages_path}" CACHE STRING "Where to install the python packages.")
set(${numpy_include_dirs} ${_numpy_include_dirs} CACHE PATH "Path to numpy headers")
set(${numpy_version} "${_numpy_version}" CACHE INTERNAL "")
endif()
@@ -268,14 +261,6 @@ if(VISP_PYTHON_SKIP_DETECTION)
return()
endif()
-find_python("" "${MIN_VER_PYTHON2}" PYTHON2_LIBRARY PYTHON2_INCLUDE_DIR
- PYTHON2INTERP_FOUND PYTHON2_EXECUTABLE PYTHON2_VERSION_STRING
- PYTHON2_VERSION_MAJOR PYTHON2_VERSION_MINOR PYTHON2LIBS_FOUND
- PYTHON2LIBS_VERSION_STRING PYTHON2_LIBRARIES PYTHON2_LIBRARY
- PYTHON2_DEBUG_LIBRARIES PYTHON2_LIBRARY_DEBUG PYTHON2_INCLUDE_PATH
- PYTHON2_INCLUDE_DIR PYTHON2_INCLUDE_DIR2 PYTHON2_PACKAGES_PATH
- PYTHON2_NUMPY_INCLUDE_DIRS PYTHON2_NUMPY_VERSION)
-
option(VISP_PYTHON3_VERSION "Python3 version" "")
find_python("${VISP_PYTHON3_VERSION}" "${MIN_VER_PYTHON3}" PYTHON3_LIBRARY PYTHON3_INCLUDE_DIR
PYTHON3INTERP_FOUND PYTHON3_EXECUTABLE PYTHON3_VERSION_STRING
@@ -285,31 +270,16 @@ find_python("${VISP_PYTHON3_VERSION}" "${MIN_VER_PYTHON3}" PYTHON3_LIBRARY PYTHO
PYTHON3_INCLUDE_DIR PYTHON3_INCLUDE_DIR2 PYTHON3_PACKAGES_PATH
PYTHON3_NUMPY_INCLUDE_DIRS PYTHON3_NUMPY_VERSION)
-mark_as_advanced(PYTHON2_LIBRARY PYTHON2_INCLUDE_DIR
- PYTHON2INTERP_FOUND PYTHON2_EXECUTABLE PYTHON2_VERSION_STRING
- PYTHON2_VERSION_MAJOR PYTHON2_VERSION_MINOR PYTHON2LIBS_FOUND
- PYTHON2LIBS_VERSION_STRING PYTHON2_LIBRARIES PYTHON2_LIBRARY
- PYTHON2_DEBUG_LIBRARIES PYTHON2_LIBRARY_DEBUG PYTHON2_INCLUDE_PATH
- PYTHON2_INCLUDE_DIR PYTHON2_INCLUDE_DIR2 PYTHON2_PACKAGES_PATH
- PYTHON2_NUMPY_INCLUDE_DIRS PYTHON2_NUMPY_VERSION)
-
-mark_as_advanced(PYTHON3_LIBRARY PYTHON3_INCLUDE_DIR
- PYTHON3INTERP_FOUND PYTHON3_EXECUTABLE PYTHON3_VERSION_STRING
- PYTHON3_VERSION_MAJOR PYTHON3_VERSION_MINOR PYTHON3LIBS_FOUND
- PYTHON3LIBS_VERSION_STRING PYTHON3_LIBRARIES PYTHON3_LIBRARY
- PYTHON3_DEBUG_LIBRARIES PYTHON3_LIBRARY_DEBUG PYTHON3_INCLUDE_PATH
- PYTHON3_INCLUDE_DIR PYTHON3_INCLUDE_DIR2 PYTHON3_PACKAGES_PATH
- PYTHON3_NUMPY_INCLUDE_DIRS PYTHON3_NUMPY_VERSION
- VISP_PYTHON3_VERSION)
-
if(PYTHON_DEFAULT_EXECUTABLE)
set(PYTHON_DEFAULT_AVAILABLE "TRUE")
-elseif(PYTHON2_EXECUTABLE AND PYTHON2INTERP_FOUND)
- # Use Python 2 as default Python interpreter
- set(PYTHON_DEFAULT_AVAILABLE "TRUE")
- set(PYTHON_DEFAULT_EXECUTABLE "${PYTHON2_EXECUTABLE}")
elseif(PYTHON3_EXECUTABLE AND PYTHON3INTERP_FOUND)
- # Use Python 3 as fallback Python interpreter (if there is no Python 2)
set(PYTHON_DEFAULT_AVAILABLE "TRUE")
set(PYTHON_DEFAULT_EXECUTABLE "${PYTHON3_EXECUTABLE}")
endif()
+
+if(PYTHON_DEFAULT_AVAILABLE)
+ execute_process(COMMAND ${PYTHON_DEFAULT_EXECUTABLE} --version
+ OUTPUT_VARIABLE PYTHON_DEFAULT_VERSION
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ string(REGEX MATCH "[0-9]+.[0-9]+.[0-9]+" PYTHON_DEFAULT_VERSION "${PYTHON_DEFAULT_VERSION}")
+endif()
diff --git a/cmake/VISPExtraTargets.cmake b/cmake/VISPExtraTargets.cmake
index 19d4ffb6c1..0b30a5267c 100644
--- a/cmake/VISPExtraTargets.cmake
+++ b/cmake/VISPExtraTargets.cmake
@@ -57,17 +57,25 @@ if(DOXYGEN_FOUND)
COMMAND "${DOXYGEN_EXECUTABLE}" "${VISP_DOC_DIR}/config-doxygen"
DEPENDS "${VISP_DOC_DIR}/config-doxygen"
)
+ add_custom_target(visp_doc_xml
+ COMMAND "${DOXYGEN_EXECUTABLE}" "${VISP_DOC_DIR}/config-doxygen-xml"
+ DEPENDS "${VISP_DOC_DIR}/config-doxygen-xml"
+ )
if(CMAKE_GENERATOR MATCHES "Xcode")
add_dependencies(visp_doc man) # developer_scripts not available when Xcode
+ add_dependencies(visp_doc man)
elseif(UNIX AND NOT ANDROID) # man target available only on unix
add_dependencies(visp_doc man developer_scripts)
+ add_dependencies(visp_doc_xml man developer_scripts)
elseif(NOT(MINGW OR IOS))
add_dependencies(visp_doc developer_scripts)
+ add_dependencies(visp_doc_xml developer_scripts)
endif()
if(ENABLE_SOLUTION_FOLDERS)
set_target_properties(visp_doc PROPERTIES FOLDER "extra")
+ set_target_properties(visp_doc_xml PROPERTIES FOLDER "extra")
set_target_properties(html-doc PROPERTIES FOLDER "extra")
endif()
endif()
diff --git a/doc/config-doxygen.in b/doc/config-doxygen.in
index f55c416da7..ab7ac43a11 100644
--- a/doc/config-doxygen.in
+++ b/doc/config-doxygen.in
@@ -1,4 +1,4 @@
-# Doxyfile 1.8.17
+# Doxyfile 1.9.8
# This file describes the settings to be used by the documentation system
# doxygen (www.doxygen.org) for a project.
@@ -12,6 +12,16 @@
# For lists, items can also be appended using:
# TAG += value [value, ...]
# Values that contain spaces should be placed between quotes (\" \").
+#
+# Note:
+#
+# Use doxygen to compare the used configuration file with the template
+# configuration file:
+# doxygen -x [configFile]
+# Use doxygen to compare the used configuration file with the template
+# configuration file without replacing the environment variables or CMake type
+# replacement variables:
+# doxygen -x_noenv [configFile]
#---------------------------------------------------------------------------
# Project related configuration options
@@ -51,7 +61,7 @@ PROJECT_BRIEF =
# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy
# the logo to the output directory.
-PROJECT_LOGO = "@VISP_SOURCE_DIR@/doc/image/logo/img-logo-visp.png"
+PROJECT_LOGO = @VISP_SOURCE_DIR@/doc/image/logo/img-logo-visp.png
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
# into which the generated documentation will be written. If a relative path is
@@ -60,16 +70,28 @@ PROJECT_LOGO = "@VISP_SOURCE_DIR@/doc/image/logo/img-logo-visp.png"
OUTPUT_DIRECTORY = doc
-# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub-
-# directories (in 2 levels) under the output directory of each output format and
-# will distribute the generated files over these directories. Enabling this
+# If the CREATE_SUBDIRS tag is set to YES then doxygen will create up to 4096
+# sub-directories (in 2 levels) under the output directory of each output format
+# and will distribute the generated files over these directories. Enabling this
# option can be useful when feeding doxygen a huge amount of source files, where
# putting all generated files in the same directory would otherwise causes
-# performance problems for the file system.
+# performance problems for the file system. Adapt CREATE_SUBDIRS_LEVEL to
+# control the number of sub-directories.
# The default value is: NO.
CREATE_SUBDIRS = NO
+# Controls the number of sub-directories that will be created when
+# CREATE_SUBDIRS tag is set to YES. Level 0 represents 16 directories, and every
+# level increment doubles the number of directories, resulting in 4096
+# directories at level 8 which is the default and also the maximum value. The
+# sub-directories are organized in 2 levels, the first level always has a fixed
+# number of 16 directories.
+# Minimum value: 0, maximum value: 8, default value: 8.
+# This tag requires that the tag CREATE_SUBDIRS is set to YES.
+
+CREATE_SUBDIRS_LEVEL = 8
+
# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
# characters to appear in the names of generated files. If set to NO, non-ASCII
# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
@@ -81,26 +103,18 @@ ALLOW_UNICODE_NAMES = NO
# The OUTPUT_LANGUAGE tag is used to specify the language in which all
# documentation generated by doxygen is written. Doxygen will use this
# information to generate all constant output in the proper language.
-# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
-# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
-# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
-# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
-# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
-# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
-# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
-# Ukrainian and Vietnamese.
+# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Bulgarian,
+# Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, Dutch, English
+# (United States), Esperanto, Farsi (Persian), Finnish, French, German, Greek,
+# Hindi, Hungarian, Indonesian, Italian, Japanese, Japanese-en (Japanese with
+# English messages), Korean, Korean-en (Korean with English messages), Latvian,
+# Lithuanian, Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese,
+# Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish,
+# Swedish, Turkish, Ukrainian and Vietnamese.
# The default value is: English.
OUTPUT_LANGUAGE = English
-# The OUTPUT_TEXT_DIRECTION tag is used to specify the direction in which all
-# documentation generated by doxygen is written. Doxygen will use this
-# information to generate all generated output in the proper direction.
-# Possible values are: None, LTR, RTL and Context.
-# The default value is: None.
-
-OUTPUT_TEXT_DIRECTION = None
-
# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member
# descriptions after the members that are listed in the file and class
# documentation (similar to Javadoc). Set to NO to disable this.
@@ -171,7 +185,6 @@ STRIP_FROM_PATH =
STRIP_FROM_INC_PATH = @DOXYGEN_STRIP_FROM_INC_PATH@
-
# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
# less readable) file names. This can be useful is your file systems doesn't
# support long names like on DOS, Mac, or CD-ROM.
@@ -218,6 +231,14 @@ QT_AUTOBRIEF = NO
MULTILINE_CPP_IS_BRIEF = NO
+# By default Python docstrings are displayed as preformatted text and doxygen's
+# special commands cannot be used. By setting PYTHON_DOCSTRING to NO the
+# doxygen's special commands can be used and the contents of the docstring
+# documentation blocks is shown as doxygen documentation.
+# The default value is: YES.
+
+PYTHON_DOCSTRING = YES
+
# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
# documentation from any documented member that it re-implements.
# The default value is: YES.
@@ -241,25 +262,19 @@ TAB_SIZE = 4
# the documentation. An alias has the form:
# name=value
# For example adding
-# "sideeffect=@par Side Effects:\n"
+# "sideeffect=@par Side Effects:^^"
# will allow you to put the command \sideeffect (or @sideeffect) in the
# documentation, which will result in a user-defined paragraph with heading
-# "Side Effects:". You can put \n's in the value part of an alias to insert
-# newlines (in the resulting output). You can put ^^ in the value part of an
-# alias to insert a newline as if a physical newline was in the original file.
-# When you need a literal { or } or , in the value part of an alias you have to
-# escape them by means of a backslash (\), this can lead to conflicts with the
-# commands \{ and \} for these it is advised to use the version @{ and @} or use
-# a double escape (\\{ and \\})
+# "Side Effects:". Note that you cannot put \n's in the value part of an alias
+# to insert newlines (in the resulting output). You can put ^^ in the value part
+# of an alias to insert a newline as if a physical newline was in the original
+# file. When you need a literal { or } or , in the value part of an alias you
+# have to escape them by means of a backslash (\), this can lead to conflicts
+# with the commands \{ and \} for these it is advised to use the version @{ and
+# @} or use a double escape (\\{ and \\})
ALIASES =
-# This tag can be used to specify a number of word-keyword mappings (TCL only).
-# A mapping has the form "name=value". For example adding "class=itcl::class"
-# will allow you to use the command class in the itcl::class meaning.
-
-TCL_SUBST =
-
# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
# only. Doxygen will then generate output that is more tailored for C. For
# instance, some of the names that are used will be different. The list of all
@@ -301,18 +316,21 @@ OPTIMIZE_OUTPUT_SLICE = NO
# extension. Doxygen has a built-in mapping, but you can override or extend it
# using this tag. The format is ext=language, where ext is a file extension, and
# language is one of the parsers supported by doxygen: IDL, Java, JavaScript,
-# Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Slice,
-# Fortran (fixed format Fortran: FortranFixed, free formatted Fortran:
+# Csharp (C#), C, C++, Lex, D, PHP, md (Markdown), Objective-C, Python, Slice,
+# VHDL, Fortran (fixed format Fortran: FortranFixed, free formatted Fortran:
# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser
# tries to guess whether the code is fixed or free formatted code, this is the
-# default for Fortran type files), VHDL, tcl. For instance to make doxygen treat
-# .inc files as Fortran files (default is PHP), and .f files as C (default is
-# Fortran), use: inc=Fortran f=C.
+# default for Fortran type files). For instance to make doxygen treat .inc files
+# as Fortran files (default is PHP), and .f files as C (default is Fortran),
+# use: inc=Fortran f=C.
#
# Note: For files without extension you can use no_extension as a placeholder.
#
# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
-# the files are not read by doxygen.
+# the files are not read by doxygen. When specifying no_extension you should add
+# * to the FILE_PATTERNS.
+#
+# Note see also the list of default file extension mappings.
EXTENSION_MAPPING = json=JavaScript
@@ -335,6 +353,17 @@ MARKDOWN_SUPPORT = YES
TOC_INCLUDE_HEADINGS = 0
+# The MARKDOWN_ID_STYLE tag can be used to specify the algorithm used to
+# generate identifiers for the Markdown headings. Note: Every identifier is
+# unique.
+# Possible values are: DOXYGEN use a fixed 'autotoc_md' string followed by a
+# sequence number starting at 0 and GITHUB use the lower case version of title
+# with any whitespace replaced by '-' and punctuation characters removed.
+# The default value is: DOXYGEN.
+# This tag requires that the tag MARKDOWN_SUPPORT is set to YES.
+
+MARKDOWN_ID_STYLE = DOXYGEN
+
# When enabled doxygen tries to link words that correspond to documented
# classes, or namespaces to their corresponding documentation. Such a link can
# be prevented in individual cases by putting a % sign in front of the word or
@@ -446,6 +475,27 @@ TYPEDEF_HIDES_STRUCT = NO
LOOKUP_CACHE_SIZE = 1
+# The NUM_PROC_THREADS specifies the number of threads doxygen is allowed to use
+# during processing. When set to 0 doxygen will based this on the number of
+# cores available in the system. You can set it explicitly to a value larger
+# than 0 to get more control over the balance between CPU load and processing
+# speed. At this moment only the input processing can be done using multiple
+# threads. Since this is still an experimental feature the default is set to 1,
+# which effectively disables parallel processing. Please report any issues you
+# encounter. Generating dot graphs in parallel is controlled by the
+# DOT_NUM_THREADS setting.
+# Minimum value: 0, maximum value: 32, default value: 1.
+
+NUM_PROC_THREADS = 1
+
+# If the TIMESTAMP tag is set different from NO then each generated page will
+# contain the date or date and time when the page was generated. Setting this to
+# NO can help when comparing the output of multiple runs.
+# Possible values are: YES, NO, DATETIME and DATE.
+# The default value is: NO.
+
+TIMESTAMP = NO
+
#---------------------------------------------------------------------------
# Build related configuration options
#---------------------------------------------------------------------------
@@ -509,6 +559,13 @@ EXTRACT_LOCAL_METHODS = NO
EXTRACT_ANON_NSPACES = NO
+# If this flag is set to YES, the name of an unnamed parameter in a declaration
+# will be determined by the corresponding definition. By default unnamed
+# parameters remain unnamed in the output.
+# The default value is: YES.
+
+RESOLVE_UNNAMED_PARAMS = YES
+
# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
# undocumented members inside documented classes or files. If set to NO these
# members will be included in the various overviews, but no documentation
@@ -520,7 +577,8 @@ HIDE_UNDOC_MEMBERS = NO
# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
# undocumented classes that are normally visible in the class hierarchy. If set
# to NO, these classes will be included in the various overviews. This option
-# has no effect if EXTRACT_ALL is enabled.
+# will also hide undocumented C++ concepts if enabled. This option has no effect
+# if EXTRACT_ALL is enabled.
# The default value is: NO.
HIDE_UNDOC_CLASSES = NO
@@ -546,12 +604,20 @@ HIDE_IN_BODY_DOCS = NO
INTERNAL_DOCS = NO
-# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
-# names in lower-case letters. If set to YES, upper-case letters are also
-# allowed. This is useful if you have classes or files whose names only differ
-# in case and if your file system supports case sensitive file names. Windows
-# (including Cygwin) ands Mac users are advised to set this option to NO.
-# The default value is: system dependent.
+# With the correct setting of option CASE_SENSE_NAMES doxygen will better be
+# able to match the capabilities of the underlying filesystem. In case the
+# filesystem is case sensitive (i.e. it supports files in the same directory
+# whose names only differ in casing), the option must be set to YES to properly
+# deal with such files in case they appear in the input. For filesystems that
+# are not case sensitive the option should be set to NO to properly deal with
+# output files written for symbols that only differ in casing, such as for two
+# classes, one named CLASS and the other named Class, and to also support
+# references to files without having to specify the exact matching casing. On
+# Windows (including Cygwin) and MacOS, users should typically set this option
+# to NO, whereas on Linux or other Unix flavors it should typically be set to
+# YES.
+# Possible values are: SYSTEM, NO and YES.
+# The default value is: SYSTEM.
CASE_SENSE_NAMES = YES
@@ -569,6 +635,12 @@ HIDE_SCOPE_NAMES = NO
HIDE_COMPOUND_REFERENCE= NO
+# If the SHOW_HEADERFILE tag is set to YES then the documentation for a class
+# will show which file needs to be included to use the class.
+# The default value is: YES.
+
+SHOW_HEADERFILE = YES
+
# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
# the files that are included by a file in the documentation of that file.
# The default value is: YES.
@@ -658,7 +730,7 @@ GENERATE_TODOLIST = YES
# list. This list is created by putting \test commands in the documentation.
# The default value is: YES.
-GENERATE_TESTLIST = YES
+GENERATE_TESTLIST = @DOXYGEN_GENERATE_TEST_LIST@
# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug
# list. This list is created by putting \bug commands in the documentation.
@@ -726,7 +798,8 @@ FILE_VERSION_FILTER =
# output files in an output format independent way. To create the layout file
# that represents doxygen's defaults, run doxygen with the -l option. You can
# optionally specify a file name after the option, if omitted DoxygenLayout.xml
-# will be used as the name of the layout file.
+# will be used as the name of the layout file. See also section "Changing the
+# layout of pages" for information.
#
# Note that if you run doxygen from a directory containing a file called
# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
@@ -753,7 +826,7 @@ CITE_BIB_FILES = @DOXYGEN_CITE_BIB_FILES@
# messages are off.
# The default value is: NO.
-QUIET = NO
+QUIET = @DOXYGEN_QUIET@
# The WARNINGS tag can be used to turn on/off the warning messages that are
# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES
@@ -772,24 +845,50 @@ WARNINGS = YES
WARN_IF_UNDOCUMENTED = YES
# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
-# potential errors in the documentation, such as not documenting some parameters
-# in a documented function, or documenting parameters that don't exist or using
-# markup commands wrongly.
+# potential errors in the documentation, such as documenting some parameters in
+# a documented function twice, or documenting parameters that don't exist or
+# using markup commands wrongly.
# The default value is: YES.
WARN_IF_DOC_ERROR = YES
+# If WARN_IF_INCOMPLETE_DOC is set to YES, doxygen will warn about incomplete
+# function parameter documentation. If set to NO, doxygen will accept that some
+# parameters have no documentation without warning.
+# The default value is: YES.
+
+WARN_IF_INCOMPLETE_DOC = YES
+
# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
# are documented, but have no documentation for their parameters or return
-# value. If set to NO, doxygen will only warn about wrong or incomplete
-# parameter documentation, but not about the absence of documentation. If
-# EXTRACT_ALL is set to YES then this flag will automatically be disabled.
+# value. If set to NO, doxygen will only warn about wrong parameter
+# documentation, but not about the absence of documentation. If EXTRACT_ALL is
+# set to YES then this flag will automatically be disabled. See also
+# WARN_IF_INCOMPLETE_DOC
# The default value is: NO.
WARN_NO_PARAMDOC = NO
+# If WARN_IF_UNDOC_ENUM_VAL option is set to YES, doxygen will warn about
+# undocumented enumeration values. If set to NO, doxygen will accept
+# undocumented enumeration values. If EXTRACT_ALL is set to YES then this flag
+# will automatically be disabled.
+# The default value is: NO.
+
+WARN_IF_UNDOC_ENUM_VAL = NO
+
# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when
-# a warning is encountered.
+# a warning is encountered. If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS
+# then doxygen will continue running as if WARN_AS_ERROR tag is set to NO, but
+# at the end of the doxygen process doxygen will return with a non-zero status.
+# If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS_PRINT then doxygen behaves
+# like FAIL_ON_WARNINGS but in case no WARN_LOGFILE is defined doxygen will not
+# write the warning messages in between other messages but write them at the end
+# of a run, in case a WARN_LOGFILE is defined the warning messages will be
+# besides being in the defined file also be shown at the end of a run, unless
+# the WARN_LOGFILE is defined as - i.e. standard output (stdout) in that case
+# the behavior will remain as with the setting FAIL_ON_WARNINGS.
+# Possible values are: NO, YES, FAIL_ON_WARNINGS and FAIL_ON_WARNINGS_PRINT.
# The default value is: NO.
WARN_AS_ERROR = NO
@@ -800,13 +899,27 @@ WARN_AS_ERROR = NO
# and the warning text. Optionally the format may contain $version, which will
# be replaced by the version of the file (if it could be obtained via
# FILE_VERSION_FILTER)
+# See also: WARN_LINE_FORMAT
# The default value is: $file:$line: $text.
WARN_FORMAT = "$file:$line: $text"
+# In the $text part of the WARN_FORMAT command it is possible that a reference
+# to a more specific place is given. To make it easier to jump to this place
+# (outside of doxygen) the user can define a custom "cut" / "paste" string.
+# Example:
+# WARN_LINE_FORMAT = "'vi $file +$line'"
+# See also: WARN_FORMAT
+# The default value is: at line $line of file $file.
+
+WARN_LINE_FORMAT = "at line $line of file $file"
+
# The WARN_LOGFILE tag can be used to specify a file to which warning and error
# messages should be written. If left blank the output is written to standard
-# error (stderr).
+# error (stderr). In case the file specified cannot be opened for writing the
+# warning and error messages are written to standard error. When as file - is
+# specified the warning and error messages are written to standard output
+# (stdout).
WARN_LOGFILE = warning.log
@@ -820,23 +933,28 @@ WARN_LOGFILE = warning.log
# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING
# Note: If this tag is empty the current directory is searched.
-INPUT = "@VISP_SOURCE_DIR@/modules" \
- "@VISP_SOURCE_DIR@/example" \
- "@VISP_SOURCE_DIR@/tutorial" \
- "@VISP_SOURCE_DIR@/demo" \
- "@VISP_SOURCE_DIR@/doc" \
- "@VISP_BINARY_DIR@/doc" \
- "@VISP_CONTRIB_MODULES_PATH@"
+INPUT = @DOXYGEN_INPUTS@
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
-# documentation (see: https://www.gnu.org/software/libiconv/) for the list of
-# possible encodings.
+# documentation (see:
+# https://www.gnu.org/software/libiconv/) for the list of possible encodings.
+# See also: INPUT_FILE_ENCODING
# The default value is: UTF-8.
INPUT_ENCODING = UTF-8
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses The INPUT_FILE_ENCODING tag can be used to specify
+# character encoding on a per file pattern basis. Doxygen will compare the file
+# name with each pattern and apply the encoding instead of the default
+# INPUT_ENCODING) if there is a match. The character encodings are a list of the
+# form: pattern=encoding (like *.php=ISO-8859-1). See cfg_input_encoding
+# "INPUT_ENCODING" for further information on supported encodings.
+
+INPUT_FILE_ENCODING =
+
# If the value of the INPUT tag contains directories, you can use the
# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
# *.h) to filter out the source-files in the directories.
@@ -845,13 +963,15 @@ INPUT_ENCODING = UTF-8
# need to set EXTENSION_MAPPING for the extension otherwise the files are not
# read by doxygen.
#
-# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp,
-# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h,
-# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc,
-# *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C comment),
-# *.doc (to be provided as doxygen C comment), *.txt (to be provided as doxygen
-# C comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f, *.for, *.tcl, *.vhd,
-# *.vhdl, *.ucf, *.qsf and *.ice.
+# Note the list of default checked file patterns might differ from the list of
+# default file extension mappings.
+#
+# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cxxm,
+# *.cpp, *.cppm, *.c++, *.c++m, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl,
+# *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp, *.h++, *.ixx, *.l, *.cs, *.d, *.php,
+# *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown, *.md, *.mm, *.dox (to be
+# provided as doxygen C comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08,
+# *.f18, *.f, *.for, *.vhd, *.vhdl, *.ucf, *.qsf and *.ice.
FILE_PATTERNS = *.h \
*.cpp \
@@ -901,10 +1021,7 @@ EXCLUDE_PATTERNS = *_impl.h \
# (namespaces, classes, functions, etc.) that should be excluded from the
# output. The symbol name can be a fully qualified name, a word, or if the
# wildcard * is used, a substring. Examples: ANamespace, AClass,
-# AClass::ANamespace, ANamespace::*Test
-#
-# Note that the wildcards are matched against the file with absolute path, so to
-# exclude all test directories use the pattern */test/*
+# ANamespace::AClass, ANamespace::*Test
EXCLUDE_SYMBOLS =
@@ -916,8 +1033,7 @@ EXAMPLE_PATH = "@VISP_SOURCE_DIR@/example" \
"@VISP_SOURCE_DIR@/tutorial" \
"@VISP_SOURCE_DIR@/demo" \
"@VISP_SOURCE_DIR@/modules" \
- "@VISP_SOURCE_DIR@/script" \
-
+ "@VISP_SOURCE_DIR@/script"
# If the value of the EXAMPLE_PATH tag contains directories, you can use the
# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
@@ -954,6 +1070,11 @@ IMAGE_PATH = @DOXYGEN_IMAGE_PATH@
# code is scanned, but not when the output code is generated. If lines are added
# or removed, the anchors will not be placed correctly.
#
+# Note that doxygen will use the data processed and written to standard output
+# for further processing, therefore nothing else, like debug statements or used
+# commands (so in case of a Windows batch file always use @echo OFF), should be
+# written to standard output.
+#
# Note that for custom extensions or not directly supported extensions you also
# need to set EXTENSION_MAPPING for the extension otherwise the files are not
# properly processed by doxygen.
@@ -995,6 +1116,15 @@ FILTER_SOURCE_PATTERNS =
USE_MDFILE_AS_MAINPAGE =
+# The Fortran standard specifies that for fixed formatted Fortran code all
+# characters from position 72 are to be considered as comment. A common
+# extension is to allow longer lines before the automatic comment starts. The
+# setting FORTRAN_COMMENT_AFTER will also make it possible that longer lines can
+# be processed before the automatic comment starts.
+# Minimum value: 7, maximum value: 10000, default value: 72.
+
+FORTRAN_COMMENT_AFTER = 72
+
#---------------------------------------------------------------------------
# Configuration options related to source browsing
#---------------------------------------------------------------------------
@@ -1092,17 +1222,11 @@ VERBATIM_HEADERS = YES
ALPHABETICAL_INDEX = YES
-# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
-# which the alphabetical index list will be split.
-# Minimum value: 1, maximum value: 20, default value: 5.
-# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
-
-COLS_IN_ALPHA_INDEX = 4
-
-# In case all classes in a project start with a common prefix, all classes will
-# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
-# can be used to specify a prefix (or a list of prefixes) that should be ignored
-# while generating the index headers.
+# The IGNORE_PREFIX tag can be used to specify a prefix (or a list of prefixes)
+# that should be ignored while generating the index headers. The IGNORE_PREFIX
+# tag works for classes, function and member names. The entity will be placed in
+# the alphabetical list under the first letter of the entity name that remains
+# after removing the prefix.
# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
IGNORE_PREFIX = vp
@@ -1114,7 +1238,7 @@ IGNORE_PREFIX = vp
# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output
# The default value is: YES.
-GENERATE_HTML = YES
+GENERATE_HTML = @DOXYGEN_GENERATE_HTML@
# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
@@ -1181,7 +1305,12 @@ HTML_STYLESHEET =
# Doxygen will copy the style sheet files to the output directory.
# Note: The order of the extra style sheet files is of importance (e.g. the last
# style sheet in the list overrules the setting of the previous ones in the
-# list). For an example see the documentation.
+# list).
+# Note: Since the styling of scrollbars can currently not be overruled in
+# Webkit/Chromium, the styling will be left out of the default doxygen.css if
+# one or more extra stylesheets have been specified. So if scrollbar
+# customization is desired it has to be added explicitly. For an example see the
+# documentation.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_EXTRA_STYLESHEET =
@@ -1196,9 +1325,22 @@ HTML_EXTRA_STYLESHEET =
HTML_EXTRA_FILES =
+# The HTML_COLORSTYLE tag can be used to specify if the generated HTML output
+# should be rendered with a dark or light theme.
+# Possible values are: LIGHT always generate light mode output, DARK always
+# generate dark mode output, AUTO_LIGHT automatically set the mode according to
+# the user preference, use light mode if no preference is set (the default),
+# AUTO_DARK automatically set the mode according to the user preference, use
+# dark mode if no preference is set and TOGGLE allow to user to switch between
+# light and dark mode via a button.
+# The default value is: AUTO_LIGHT.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE = AUTO_LIGHT
+
# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
# will adjust the colors in the style sheet and background images according to
-# this color. Hue is specified as an angle on a colorwheel, see
+# this color. Hue is specified as an angle on a color-wheel, see
# https://en.wikipedia.org/wiki/Hue for more information. For instance the value
# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
# purple, and 360 is red again.
@@ -1208,7 +1350,7 @@ HTML_EXTRA_FILES =
HTML_COLORSTYLE_HUE = 220
# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
-# in the HTML output. For a value of 0 the output will use grayscales only. A
+# in the HTML output. For a value of 0 the output will use gray-scales only. A
# value of 255 will produce the most vivid colors.
# Minimum value: 0, maximum value: 255, default value: 100.
# This tag requires that the tag GENERATE_HTML is set to YES.
@@ -1226,15 +1368,6 @@ HTML_COLORSTYLE_SAT = 100
HTML_COLORSTYLE_GAMMA = 80
-# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
-# page will contain the date and time when the page was generated. Setting this
-# to YES can help to show when doxygen was last run and thus if the
-# documentation is up to date.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_TIMESTAMP = NO
-
# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML
# documentation will contain a main index with vertical navigation menus that
# are dynamically created via JavaScript. If disabled, the navigation index will
@@ -1254,6 +1387,13 @@ HTML_DYNAMIC_MENUS = YES
HTML_DYNAMIC_SECTIONS = YES
+# If the HTML_CODE_FOLDING tag is set to YES then classes and functions can be
+# dynamically folded and expanded in the generated HTML source code.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_CODE_FOLDING = YES
+
# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
# shown in the various tree structured indices initially; the user can expand
# and collapse entries dynamically later on. Doxygen will expand the tree to
@@ -1269,10 +1409,11 @@ HTML_INDEX_NUM_ENTRIES = 100
# If the GENERATE_DOCSET tag is set to YES, additional index files will be
# generated that can be used as input for Apple's Xcode 3 integrated development
-# environment (see: https://developer.apple.com/xcode/), introduced with OSX
-# 10.5 (Leopard). To create a documentation set, doxygen will generate a
-# Makefile in the HTML output directory. Running make will produce the docset in
-# that directory and running make install will install the docset in
+# environment (see:
+# https://developer.apple.com/xcode/), introduced with OSX 10.5 (Leopard). To
+# create a documentation set, doxygen will generate a Makefile in the HTML
+# output directory. Running make will produce the docset in that directory and
+# running make install will install the docset in
# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy
# genXcode/_index.html for more information.
@@ -1289,6 +1430,13 @@ GENERATE_DOCSET = NO
DOCSET_FEEDNAME = "Doxygen generated docs"
+# This tag determines the URL of the docset feed. A documentation feed provides
+# an umbrella under which multiple documentation sets from a single provider
+# (such as a company or product suite) can be grouped.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_FEEDURL =
+
# This tag specifies a string that should uniquely identify the documentation
# set bundle. This should be a reverse domain-name style string, e.g.
# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
@@ -1314,8 +1462,12 @@ DOCSET_PUBLISHER_NAME = Publisher
# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
-# (see: https://www.microsoft.com/en-us/download/details.aspx?id=21138) on
-# Windows.
+# on Windows. In the beginning of 2021 Microsoft took the original page, with
+# a.o. the download links, offline the HTML help workshop was already many years
+# in maintenance mode). You can download the HTML help workshop from the web
+# archives at Installation executable (see:
+# http://web.archive.org/web/20160201063255/http://download.microsoft.com/downlo
+# ad/0/A/9/0A939EF6-E31C-430F-A3DF-DFAE7960D564/htmlhelp.exe).
#
# The HTML Help Workshop contains a compiler that can convert all HTML output
# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
@@ -1345,7 +1497,7 @@ CHM_FILE =
HHC_LOCATION =
# The GENERATE_CHI flag controls if a separate .chi index file is generated
-# (YES) or that it should be included in the master .chm file (NO).
+# (YES) or that it should be included in the main .chm file (NO).
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
@@ -1372,6 +1524,16 @@ BINARY_TOC = NO
TOC_EXPAND = NO
+# The SITEMAP_URL tag is used to specify the full URL of the place where the
+# generated documentation will be placed on the server by the user during the
+# deployment of the documentation. The generated sitemap is called sitemap.xml
+# and placed on the directory specified by HTML_OUTPUT. In case no SITEMAP_URL
+# is specified no sitemap is generated. For information about the sitemap
+# protocol see https://www.sitemaps.org
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+SITEMAP_URL =
+
# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
@@ -1390,7 +1552,8 @@ QCH_FILE =
# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
# Project output. For more information please see Qt Help Project / Namespace
-# (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace).
+# (see:
+# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace).
# The default value is: org.doxygen.Project.
# This tag requires that the tag GENERATE_QHP is set to YES.
@@ -1398,8 +1561,8 @@ QHP_NAMESPACE =
# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
# Help Project output. For more information please see Qt Help Project / Virtual
-# Folders (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-
-# folders).
+# Folders (see:
+# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-folders).
# The default value is: doc.
# This tag requires that the tag GENERATE_QHP is set to YES.
@@ -1407,16 +1570,16 @@ QHP_VIRTUAL_FOLDER = doc
# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
# filter to add. For more information please see Qt Help Project / Custom
-# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-
-# filters).
+# Filters (see:
+# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_CUST_FILTER_NAME =
# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
# custom filter to add. For more information please see Qt Help Project / Custom
-# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-
-# filters).
+# Filters (see:
+# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_CUST_FILTER_ATTRS =
@@ -1428,9 +1591,9 @@ QHP_CUST_FILTER_ATTRS =
QHP_SECT_FILTER_ATTRS =
-# The QHG_LOCATION tag can be used to specify the location of Qt's
-# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
-# generated .qhp file.
+# The QHG_LOCATION tag can be used to specify the location (absolute path
+# including file name) of Qt's qhelpgenerator. If non-empty doxygen will try to
+# run qhelpgenerator on the generated .qhp file.
# This tag requires that the tag GENERATE_QHP is set to YES.
QHG_LOCATION =
@@ -1473,16 +1636,28 @@ DISABLE_INDEX = NO
# to work a browser that supports JavaScript, DHTML, CSS and frames is required
# (i.e. any modern browser). Windows users are probably better off using the
# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can
-# further fine-tune the look of the index. As an example, the default style
-# sheet generated by doxygen has an example that shows how to put an image at
-# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
-# the same information as the tab index, you could consider setting
-# DISABLE_INDEX to YES when enabling this option.
+# further fine tune the look of the index (see "Fine-tuning the output"). As an
+# example, the default style sheet generated by doxygen has an example that
+# shows how to put an image at the root of the tree instead of the PROJECT_NAME.
+# Since the tree basically has the same information as the tab index, you could
+# consider setting DISABLE_INDEX to YES when enabling this option.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_TREEVIEW = YES
+# When both GENERATE_TREEVIEW and DISABLE_INDEX are set to YES, then the
+# FULL_SIDEBAR option determines if the side bar is limited to only the treeview
+# area (value NO) or if it should extend to the full height of the window (value
+# YES). Setting this to YES gives a layout similar to
+# https://docs.readthedocs.io with more room for contents, but less room for the
+# project logo, title, and description. If either GENERATE_TREEVIEW or
+# DISABLE_INDEX is set to NO, this option has no effect.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FULL_SIDEBAR = NO
+
# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
# doxygen will group on one line in the generated HTML documentation.
#
@@ -1507,6 +1682,24 @@ TREEVIEW_WIDTH = 250
EXT_LINKS_IN_WINDOW = NO
+# If the OBFUSCATE_EMAILS tag is set to YES, doxygen will obfuscate email
+# addresses.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+OBFUSCATE_EMAILS = YES
+
+# If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg
+# tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see
+# https://inkscape.org) to generate formulas as SVG images instead of PNGs for
+# the HTML output. These images will generally look nicer at scaled resolutions.
+# Possible values are: png (the default) and svg (looks nicer but requires the
+# pdf2svg or inkscape tool).
+# The default value is: png.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FORMULA_FORMAT = png
+
# Use this tag to change the font size of LaTeX formulas included as images in
# the HTML documentation. When you change the font size after a successful
# doxygen run you need to manually remove any form_*.png images from the HTML
@@ -1516,17 +1709,6 @@ EXT_LINKS_IN_WINDOW = NO
FORMULA_FONTSIZE = 10
-# Use the FORMULA_TRANSPARENT tag to determine whether or not the images
-# generated for formulas are transparent PNGs. Transparent PNGs are not
-# supported properly for IE 6.0, but are supported on all modern browsers.
-#
-# Note that when changing this option you need to delete any form_*.png files in
-# the HTML output directory before the changes have effect.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-FORMULA_TRANSPARENT = YES
-
# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands
# to create new LaTeX commands to be used in formulas as building blocks. See
# the section "Including formulas" for details.
@@ -1544,11 +1726,29 @@ FORMULA_MACROFILE =
USE_MATHJAX = @DOXYGEN_USE_MATHJAX@
+# With MATHJAX_VERSION it is possible to specify the MathJax version to be used.
+# Note that the different versions of MathJax have different requirements with
+# regards to the different settings, so it is possible that also other MathJax
+# settings have to be changed when switching between the different MathJax
+# versions.
+# Possible values are: MathJax_2 and MathJax_3.
+# The default value is: MathJax_2.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_VERSION = MathJax_2
+
# When MathJax is enabled you can set the default output format to be used for
-# the MathJax output. See the MathJax site (see:
-# http://docs.mathjax.org/en/latest/output.html) for more details.
+# the MathJax output. For more details about the output format see MathJax
+# version 2 (see:
+# http://docs.mathjax.org/en/v2.7-latest/output.html) and MathJax version 3
+# (see:
+# http://docs.mathjax.org/en/latest/web/components/output.html).
# Possible values are: HTML-CSS (which is slower, but has the best
-# compatibility), NativeMML (i.e. MathML) and SVG.
+# compatibility. This is the name for Mathjax version 2, for MathJax version 3
+# this will be translated into chtml), NativeMML (i.e. MathML. Only supported
+# for NathJax 2. For MathJax version 3 chtml will be used instead.), chtml (This
+# is the name for Mathjax version 3, for MathJax version 2 this will be
+# translated into HTML-CSS) and SVG.
# The default value is: HTML-CSS.
# This tag requires that the tag USE_MATHJAX is set to YES.
@@ -1561,22 +1761,29 @@ MATHJAX_FORMAT = HTML-CSS
# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
# Content Delivery Network so you can quickly see the result without installing
# MathJax. However, it is strongly recommended to install a local copy of
-# MathJax from https://www.mathjax.org before deployment.
-# The default value is: https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/.
+# MathJax from https://www.mathjax.org before deployment. The default value is:
+# - in case of MathJax version 2: https://cdn.jsdelivr.net/npm/mathjax@2
+# - in case of MathJax version 3: https://cdn.jsdelivr.net/npm/mathjax@3
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
# extension names that should be enabled during MathJax rendering. For example
+# for MathJax version 2 (see
+# https://docs.mathjax.org/en/v2.7-latest/tex.html#tex-and-latex-extensions):
# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
+# For example for MathJax version 3 (see
+# http://docs.mathjax.org/en/latest/input/tex/extensions/index.html):
+# MATHJAX_EXTENSIONS = ams
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_EXTENSIONS =
# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
# of code that will be used on startup of the MathJax code. See the MathJax site
-# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
+# (see:
+# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. For an
# example see the documentation.
# This tag requires that the tag USE_MATHJAX is set to YES.
@@ -1623,7 +1830,8 @@ SERVER_BASED_SEARCH = NO
#
# Doxygen ships with an example indexer (doxyindexer) and search engine
# (doxysearch.cgi) which are based on the open source search engine library
-# Xapian (see: https://xapian.org/).
+# Xapian (see:
+# https://xapian.org/).
#
# See the section "External Indexing and Searching" for details.
# The default value is: NO.
@@ -1636,8 +1844,9 @@ EXTERNAL_SEARCH = NO
#
# Doxygen ships with an example indexer (doxyindexer) and search engine
# (doxysearch.cgi) which are based on the open source search engine library
-# Xapian (see: https://xapian.org/). See the section "External Indexing and
-# Searching" for details.
+# Xapian (see:
+# https://xapian.org/). See the section "External Indexing and Searching" for
+# details.
# This tag requires that the tag SEARCHENGINE is set to YES.
SEARCHENGINE_URL =
@@ -1695,7 +1904,7 @@ LATEX_OUTPUT = latex
# the output language.
# This tag requires that the tag GENERATE_LATEX is set to YES.
-LATEX_CMD_NAME = "@LATEX_COMPILER@"
+LATEX_CMD_NAME = @LATEX_COMPILER@
# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
# index for LaTeX.
@@ -1705,7 +1914,7 @@ LATEX_CMD_NAME = "@LATEX_COMPILER@"
# The default file is: makeindex.
# This tag requires that the tag GENERATE_LATEX is set to YES.
-MAKEINDEX_CMD_NAME = "@MAKEINDEX_COMPILER@"
+MAKEINDEX_CMD_NAME = @MAKEINDEX_COMPILER@
# The LATEX_MAKEINDEX_CMD tag can be used to specify the command name to
# generate index for LaTeX. In case there is no backslash (\) as first character
@@ -1732,7 +1941,7 @@ COMPACT_LATEX = NO
# The default value is: a4.
# This tag requires that the tag GENERATE_LATEX is set to YES.
-PAPER_TYPE = a4wide
+PAPER_TYPE = a4
# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
# that should be included in the LaTeX output. The package can be specified just
@@ -1748,29 +1957,31 @@ EXTRA_PACKAGES = amsmath \
xr \
amsfonts
-# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
-# generated LaTeX document. The header should contain everything until the first
-# chapter. If it is left blank doxygen will generate a standard header. See
-# section "Doxygen usage" for information on how to let doxygen write the
-# default header to a separate file.
+# The LATEX_HEADER tag can be used to specify a user-defined LaTeX header for
+# the generated LaTeX document. The header should contain everything until the
+# first chapter. If it is left blank doxygen will generate a standard header. It
+# is highly recommended to start with a default header using
+# doxygen -w latex new_header.tex new_footer.tex new_stylesheet.sty
+# and then modify the file new_header.tex. See also section "Doxygen usage" for
+# information on how to generate the default header that doxygen normally uses.
#
-# Note: Only use a user-defined header if you know what you are doing! The
-# following commands have a special meaning inside the header: $title,
-# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
-# $projectbrief, $projectlogo. Doxygen will replace $title with the empty
-# string, for the replacement values of the other commands the user is referred
-# to HTML_HEADER.
+# Note: Only use a user-defined header if you know what you are doing!
+# Note: The header is subject to change so you typically have to regenerate the
+# default header when upgrading to a newer version of doxygen. The following
+# commands have a special meaning inside the header (and footer): For a
+# description of the possible markers and block names see the documentation.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_HEADER =
-# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
-# generated LaTeX document. The footer should contain everything after the last
-# chapter. If it is left blank doxygen will generate a standard footer. See
+# The LATEX_FOOTER tag can be used to specify a user-defined LaTeX footer for
+# the generated LaTeX document. The footer should contain everything after the
+# last chapter. If it is left blank doxygen will generate a standard footer. See
# LATEX_HEADER for more information on how to generate a default footer and what
-# special commands can be used inside the footer.
-#
-# Note: Only use a user-defined footer if you know what you are doing!
+# special commands can be used inside the footer. See also section "Doxygen
+# usage" for information on how to generate the default footer that doxygen
+# normally uses. Note: Only use a user-defined footer if you know what you are
+# doing!
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_FOOTER =
@@ -1803,18 +2014,26 @@ LATEX_EXTRA_FILES =
PDF_HYPERLINKS = NO
-# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
-# the PDF file directly from the LaTeX files. Set this option to YES, to get a
-# higher quality PDF documentation.
+# If the USE_PDFLATEX tag is set to YES, doxygen will use the engine as
+# specified with LATEX_CMD_NAME to generate the PDF file directly from the LaTeX
+# files. Set this option to YES, to get a higher quality PDF documentation.
+#
+# See also section LATEX_CMD_NAME for selecting the engine.
# The default value is: YES.
# This tag requires that the tag GENERATE_LATEX is set to YES.
USE_PDFLATEX = NO
-# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
-# command to the generated LaTeX files. This will instruct LaTeX to keep running
-# if errors occur, instead of asking the user for help. This option is also used
-# when generating formulas in HTML.
+# The LATEX_BATCHMODE tag signals the behavior of LaTeX in case of an error.
+# Possible values are: NO same as ERROR_STOP, YES same as BATCH, BATCH In batch
+# mode nothing is printed on the terminal, errors are scrolled as if is
+# hit at every error; missing files that TeX tries to input or request from
+# keyboard input (\read on a not open input stream) cause the job to abort,
+# NON_STOP In nonstop mode the diagnostic message will appear on the terminal,
+# but there is no possibility of user interaction just like in batch mode,
+# SCROLL In scroll mode, TeX will stop only for missing files to input or if
+# keyboard input is necessary and ERROR_STOP In errorstop mode, TeX will stop at
+# each error, asking for user intervention.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
@@ -1827,16 +2046,6 @@ LATEX_BATCHMODE = NO
LATEX_HIDE_INDICES = NO
-# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
-# code with syntax highlighting in the LaTeX output.
-#
-# Note that which sources are shown also depends on other settings such as
-# SOURCE_BROWSER.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_SOURCE_CODE = NO
-
# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
# bibliography, e.g. plainnat, or ieeetr. See
# https://en.wikipedia.org/wiki/BibTeX and \cite for more info.
@@ -1845,14 +2054,6 @@ LATEX_SOURCE_CODE = NO
LATEX_BIB_STYLE = plain
-# If the LATEX_TIMESTAMP tag is set to YES then the footer of each generated
-# page will contain the date and time when the page was generated. Setting this
-# to NO can help when comparing the output of multiple runs.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_TIMESTAMP = NO
-
# The LATEX_EMOJI_DIRECTORY tag is used to specify the (relative or absolute)
# path from which the emoji images will be read. If a relative path is entered,
# it will be relative to the LATEX_OUTPUT directory. If left blank the
@@ -1917,16 +2118,6 @@ RTF_STYLESHEET_FILE =
RTF_EXTENSIONS_FILE =
-# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code
-# with syntax highlighting in the RTF output.
-#
-# Note that which sources are shown also depends on other settings such as
-# SOURCE_BROWSER.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-RTF_SOURCE_CODE = NO
-
#---------------------------------------------------------------------------
# Configuration options related to the man page output
#---------------------------------------------------------------------------
@@ -1979,7 +2170,7 @@ MAN_LINKS = NO
# captures the structure of the code including all documentation.
# The default value is: NO.
-GENERATE_XML = NO
+GENERATE_XML = @DOXYGEN_GENERATE_XML@
# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
@@ -1996,7 +2187,7 @@ XML_OUTPUT = xml
# The default value is: YES.
# This tag requires that the tag GENERATE_XML is set to YES.
-XML_PROGRAMLISTING = YES
+XML_PROGRAMLISTING = NO
# If the XML_NS_MEMB_FILE_SCOPE tag is set to YES, doxygen will include
# namespace members in file scope as well, matching the HTML output.
@@ -2023,27 +2214,44 @@ GENERATE_DOCBOOK = NO
DOCBOOK_OUTPUT = docbook
-# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the
-# program listings (including syntax highlighting and cross-referencing
-# information) to the DOCBOOK output. Note that enabling this will significantly
-# increase the size of the DOCBOOK output.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
-
-DOCBOOK_PROGRAMLISTING = NO
-
#---------------------------------------------------------------------------
# Configuration options for the AutoGen Definitions output
#---------------------------------------------------------------------------
# If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an
-# AutoGen Definitions (see http://autogen.sourceforge.net/) file that captures
+# AutoGen Definitions (see https://autogen.sourceforge.net/) file that captures
# the structure of the code including all documentation. Note that this feature
# is still experimental and incomplete at the moment.
# The default value is: NO.
GENERATE_AUTOGEN_DEF = NO
+#---------------------------------------------------------------------------
+# Configuration options related to Sqlite3 output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_SQLITE3 tag is set to YES doxygen will generate a Sqlite3
+# database with symbols found by doxygen stored in tables.
+# The default value is: NO.
+
+GENERATE_SQLITE3 = NO
+
+# The SQLITE3_OUTPUT tag is used to specify where the Sqlite3 database will be
+# put. If a relative path is entered the value of OUTPUT_DIRECTORY will be put
+# in front of it.
+# The default directory is: sqlite3.
+# This tag requires that the tag GENERATE_SQLITE3 is set to YES.
+
+SQLITE3_OUTPUT = sqlite3
+
+# The SQLITE3_OVERWRITE_DB tag is set to YES, the existing doxygen_sqlite3.db
+# database file will be recreated with each doxygen run. If set to NO, doxygen
+# will warn if an a database file is already found and not modify it.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_SQLITE3 is set to YES.
+
+SQLITE3_RECREATE_DB = YES
+
#---------------------------------------------------------------------------
# Configuration options related to the Perl module output
#---------------------------------------------------------------------------
@@ -2118,7 +2326,8 @@ SEARCH_INCLUDES = YES
# The INCLUDE_PATH tag can be used to specify one or more directories that
# contain include files that are not input files but should be processed by the
-# preprocessor.
+# preprocessor. Note that the INCLUDE_PATH is not recursive, so the setting of
+# RECURSIVE has no effect here.
# This tag requires that the tag SEARCH_INCLUDES is set to YES.
INCLUDE_PATH = "@VISP_BINARY_DIR@/modules/core"
@@ -2303,15 +2512,15 @@ TAGFILES =
GENERATE_TAGFILE =
-# If the ALLEXTERNALS tag is set to YES, all external class will be listed in
-# the class index. If set to NO, only the inherited external classes will be
-# listed.
+# If the ALLEXTERNALS tag is set to YES, all external classes and namespaces
+# will be listed in the class and namespace index. If set to NO, only the
+# inherited external classes will be listed.
# The default value is: NO.
ALLEXTERNALS = NO
# If the EXTERNAL_GROUPS tag is set to YES, all external groups will be listed
-# in the modules index. If set to NO, only the current project's groups will be
+# in the topic index. If set to NO, only the current project's groups will be
# listed.
# The default value is: YES.
@@ -2325,25 +2534,9 @@ EXTERNAL_GROUPS = YES
EXTERNAL_PAGES = YES
#---------------------------------------------------------------------------
-# Configuration options related to the dot tool
+# Configuration options related to diagram generator tools
#---------------------------------------------------------------------------
-# If the CLASS_DIAGRAMS tag is set to YES, doxygen will generate a class diagram
-# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
-# NO turns the diagrams off. Note that this option also works with HAVE_DOT
-# disabled, but it is recommended to install and use dot, since it yields more
-# powerful graphs.
-# The default value is: YES.
-
-CLASS_DIAGRAMS = YES
-
-# You can include diagrams made with dia in doxygen documentation. Doxygen will
-# then run dia to produce the diagram and insert it in the documentation. The
-# DIA_PATH tag allows you to specify the directory where the dia binary resides.
-# If left empty dia is assumed to be found in the default search path.
-
-DIA_PATH =
-
# If set to YES the inheritance and collaboration graphs will hide inheritance
# and usage relations if the target is undocumented or is not a class.
# The default value is: YES.
@@ -2352,7 +2545,7 @@ HIDE_UNDOC_RELATIONS = YES
# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
# available from the path. This tool is part of Graphviz (see:
-# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
+# https://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
# Bell Labs. The other options in this section have no effect if this option is
# set to NO
# The default value is: NO.
@@ -2369,49 +2562,73 @@ HAVE_DOT = NO
DOT_NUM_THREADS = 0
-# When you want a differently looking font in the dot files that doxygen
-# generates you can specify the font name using DOT_FONTNAME. You need to make
-# sure dot is able to find the font, which can be done by putting it in a
-# standard location or by setting the DOTFONTPATH environment variable or by
-# setting DOT_FONTPATH to the directory containing the font.
-# The default value is: Helvetica.
+# DOT_COMMON_ATTR is common attributes for nodes, edges and labels of
+# subgraphs. When you want a differently looking font in the dot files that
+# doxygen generates you can specify fontname, fontcolor and fontsize attributes.
+# For details please see Node,
+# Edge and Graph Attributes specification You need to make sure dot is able
+# to find the font, which can be done by putting it in a standard location or by
+# setting the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the
+# directory containing the font. Default graphviz fontsize is 14.
+# The default value is: fontname=Helvetica,fontsize=10.
# This tag requires that the tag HAVE_DOT is set to YES.
-DOT_FONTNAME =
+DOT_COMMON_ATTR = "fontname=Helvetica,fontsize=10"
-# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
-# dot graphs.
-# Minimum value: 4, maximum value: 24, default value: 10.
+# DOT_EDGE_ATTR is concatenated with DOT_COMMON_ATTR. For elegant style you can
+# add 'arrowhead=open, arrowtail=open, arrowsize=0.5'. Complete documentation about
+# arrows shapes.
+# The default value is: labelfontname=Helvetica,labelfontsize=10.
# This tag requires that the tag HAVE_DOT is set to YES.
-DOT_FONTSIZE = 10
+DOT_EDGE_ATTR = "labelfontname=Helvetica,labelfontsize=10"
-# By default doxygen will tell dot to use the default font as specified with
-# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
-# the path where dot can find it using this tag.
+# DOT_NODE_ATTR is concatenated with DOT_COMMON_ATTR. For view without boxes
+# around nodes set 'shape=plain' or 'shape=plaintext' Shapes specification
+# The default value is: shape=box,height=0.2,width=0.4.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_NODE_ATTR = "shape=box,height=0.2,width=0.4"
+
+# You can set the path where dot can find font specified with fontname in
+# DOT_COMMON_ATTR and others dot attributes.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_FONTPATH =
-# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
-# each documented class showing the direct and indirect inheritance relations.
-# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
+# If the CLASS_GRAPH tag is set to YES or GRAPH or BUILTIN then doxygen will
+# generate a graph for each documented class showing the direct and indirect
+# inheritance relations. In case the CLASS_GRAPH tag is set to YES or GRAPH and
+# HAVE_DOT is enabled as well, then dot will be used to draw the graph. In case
+# the CLASS_GRAPH tag is set to YES and HAVE_DOT is disabled or if the
+# CLASS_GRAPH tag is set to BUILTIN, then the built-in generator will be used.
+# If the CLASS_GRAPH tag is set to TEXT the direct and indirect inheritance
+# relations will be shown as texts / links.
+# Possible values are: NO, YES, TEXT, GRAPH and BUILTIN.
# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
CLASS_GRAPH = YES
# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
# graph for each documented class showing the direct and indirect implementation
# dependencies (inheritance, containment, and class references variables) of the
-# class with other documented classes.
+# class with other documented classes. Explicit enabling a collaboration graph,
+# when COLLABORATION_GRAPH is set to NO, can be accomplished by means of the
+# command \collaborationgraph. Disabling a collaboration graph can be
+# accomplished by means of the command \hidecollaborationgraph.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
COLLABORATION_GRAPH = YES
# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
-# groups, showing the direct groups dependencies.
+# groups, showing the direct groups dependencies. Explicit enabling a group
+# dependency graph, when GROUP_GRAPHS is set to NO, can be accomplished by means
+# of the command \groupgraph. Disabling a directory graph can be accomplished by
+# means of the command \hidegroupgraph. See also the chapter Grouping in the
+# manual.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
@@ -2434,10 +2651,32 @@ UML_LOOK = NO
# but if the number exceeds 15, the total amount of fields shown is limited to
# 10.
# Minimum value: 0, maximum value: 100, default value: 10.
-# This tag requires that the tag HAVE_DOT is set to YES.
+# This tag requires that the tag UML_LOOK is set to YES.
UML_LIMIT_NUM_FIELDS = 10
+# If the DOT_UML_DETAILS tag is set to NO, doxygen will show attributes and
+# methods without types and arguments in the UML graphs. If the DOT_UML_DETAILS
+# tag is set to YES, doxygen will add type and arguments for attributes and
+# methods in the UML graphs. If the DOT_UML_DETAILS tag is set to NONE, doxygen
+# will not generate fields with class member information in the UML graphs. The
+# class diagrams will look similar to the default class diagrams but using UML
+# notation for the relationships.
+# Possible values are: NO, YES and NONE.
+# The default value is: NO.
+# This tag requires that the tag UML_LOOK is set to YES.
+
+DOT_UML_DETAILS = NO
+
+# The DOT_WRAP_THRESHOLD tag can be used to set the maximum number of characters
+# to display on a single line. If the actual line length exceeds this threshold
+# significantly it will wrapped across multiple lines. Some heuristics are apply
+# to avoid ugly line breaks.
+# Minimum value: 0, maximum value: 1000, default value: 17.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_WRAP_THRESHOLD = 17
+
# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
# collaboration graphs will show the relations between templates and their
# instances.
@@ -2449,7 +2688,9 @@ TEMPLATE_RELATIONS = YES
# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
# YES then doxygen will generate a graph for each documented file showing the
# direct and indirect include dependencies of the file with other documented
-# files.
+# files. Explicit enabling an include graph, when INCLUDE_GRAPH is is set to NO,
+# can be accomplished by means of the command \includegraph. Disabling an
+# include graph can be accomplished by means of the command \hideincludegraph.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
@@ -2458,7 +2699,10 @@ INCLUDE_GRAPH = YES
# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
# set to YES then doxygen will generate a graph for each documented file showing
# the direct and indirect include dependencies of the file with other documented
-# files.
+# files. Explicit enabling an included by graph, when INCLUDED_BY_GRAPH is set
+# to NO, can be accomplished by means of the command \includedbygraph. Disabling
+# an included by graph can be accomplished by means of the command
+# \hideincludedbygraph.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
@@ -2498,16 +2742,26 @@ GRAPHICAL_HIERARCHY = YES
# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
# dependencies a directory has on other directories in a graphical way. The
# dependency relations are determined by the #include relations between the
-# files in the directories.
+# files in the directories. Explicit enabling a directory graph, when
+# DIRECTORY_GRAPH is set to NO, can be accomplished by means of the command
+# \directorygraph. Disabling a directory graph can be accomplished by means of
+# the command \hidedirectorygraph.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
DIRECTORY_GRAPH = YES
+# The DIR_GRAPH_MAX_DEPTH tag can be used to limit the maximum number of levels
+# of child directories generated in directory dependency graphs by dot.
+# Minimum value: 1, maximum value: 25, default value: 1.
+# This tag requires that the tag DIRECTORY_GRAPH is set to YES.
+
+DIR_GRAPH_MAX_DEPTH = 1
+
# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
# generated by dot. For an explanation of the image formats see the section
# output formats in the documentation of the dot tool (Graphviz (see:
-# http://www.graphviz.org/)).
+# https://www.graphviz.org/)).
# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
# to make the SVG files visible in IE 9+ (other browsers do not have this
# requirement).
@@ -2535,7 +2789,7 @@ INTERACTIVE_SVG = NO
# found. If left blank, it is assumed the dot tool can be found in the path.
# This tag requires that the tag HAVE_DOT is set to YES.
-DOT_PATH = "@DOXYGEN_DOT_EXECUTABLE_PATH@"
+DOT_PATH = @DOXYGEN_DOT_EXECUTABLE_PATH@
# The DOTFILE_DIRS tag can be used to specify one or more directories that
# contain dot files that are included in the documentation (see the \dotfile
@@ -2544,11 +2798,12 @@ DOT_PATH = "@DOXYGEN_DOT_EXECUTABLE_PATH@"
DOTFILE_DIRS =
-# The MSCFILE_DIRS tag can be used to specify one or more directories that
-# contain msc files that are included in the documentation (see the \mscfile
-# command).
+# You can include diagrams made with dia in doxygen documentation. Doxygen will
+# then run dia to produce the diagram and insert it in the documentation. The
+# DIA_PATH tag allows you to specify the directory where the dia binary resides.
+# If left empty dia is assumed to be found in the default search path.
-MSCFILE_DIRS =
+DIA_PATH =
# The DIAFILE_DIRS tag can be used to specify one or more directories that
# contain dia files that are included in the documentation (see the \diafile
@@ -2557,10 +2812,10 @@ MSCFILE_DIRS =
DIAFILE_DIRS =
# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the
-# path where java can find the plantuml.jar file. If left blank, it is assumed
-# PlantUML is not used or called during a preprocessing step. Doxygen will
-# generate a warning when it encounters a \startuml command in this case and
-# will not generate output for the diagram.
+# path where java can find the plantuml.jar file or to the filename of jar file
+# to be used. If left blank, it is assumed PlantUML is not used or called during
+# a preprocessing step. Doxygen will generate a warning when it encounters a
+# \startuml command in this case and will not generate output for the diagram.
PLANTUML_JAR_PATH =
@@ -2598,18 +2853,6 @@ DOT_GRAPH_MAX_NODES = 150
MAX_DOT_GRAPH_DEPTH = 0
-# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
-# background. This is disabled by default, because dot on Windows does not seem
-# to support this out of the box.
-#
-# Warning: Depending on the platform used, enabling this option may lead to
-# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
-# read).
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_TRANSPARENT = NO
-
# Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output
# files in one run (i.e. multiple -o and -T options on the command line). This
# makes dot run faster, but since only newer versions of dot (>1.8.10) support
@@ -2622,14 +2865,34 @@ DOT_MULTI_TARGETS = YES
# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
# explaining the meaning of the various boxes and arrows in the dot generated
# graphs.
+# Note: This tag requires that UML_LOOK isn't set, i.e. the doxygen internal
+# graphical representation for inheritance and collaboration diagrams is used.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
GENERATE_LEGEND = YES
-# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate dot
+# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate
# files that are used to generate the various graphs.
+#
+# Note: This setting is not only used for dot files but also for msc temporary
+# files.
# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
DOT_CLEANUP = YES
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. If the MSCGEN_TOOL tag is left empty (the default), then doxygen will
+# use a built-in version of mscgen tool to produce the charts. Alternatively,
+# the MSCGEN_TOOL tag can also specify the name an external tool. For instance,
+# specifying prog as the value, doxygen will call the tool as prog -T
+# -o . The external tool should support
+# output file formats "png", "eps", "svg", and "ismap".
+
+MSCGEN_TOOL =
+
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the \mscfile
+# command).
+
+MSCFILE_DIRS =
diff --git a/doc/tutorial/misc/tutorial-synthetic-blenderproc.dox b/doc/tutorial/misc/tutorial-synthetic-blenderproc.dox
index 46e7b2ca49..91f04a807a 100644
--- a/doc/tutorial/misc/tutorial-synthetic-blenderproc.dox
+++ b/doc/tutorial/misc/tutorial-synthetic-blenderproc.dox
@@ -5,21 +5,29 @@
\section dnn_synthetic_intro Introduction
-In this tutorial, we will show how to generate synthetic data that can be used to train a neural network, thanks to blenderproc.
+In this tutorial, we will show how to generate synthetic data that can be used to train a neural network, thanks to
+blenderproc.
-Most of the (manual) work when training a neural network resides in acquiring and labelling data. This process can be slow, tedious and error prone.
-A solution to avoid this step is to use synthetic data, generated by a simulator/computer program. This approach comes with multiple advantages:
+Most of the (manual) work when training a neural network resides in acquiring and labelling data. This process can be
+slow, tedious and error prone.
+A solution to avoid this step is to use synthetic data, generated by a simulator/computer program. This approach comes
+with multiple advantages:
- Data acquisition is fast
- It is easy to acquire accurate ground truth labels
- Variations in the training data can be easily added
There are however, some drawbacks:
-- More knowledge of the scene is required: in the case of detection, we require a 3D model of the object, which is not the case for true images
-- A difference between simulated and real data can be apparent and negatively impact network performance (this is called the Sim2Real gap)
+- More knowledge of the scene is required: in the case of detection, we require a 3D model of the object, which is not
+ the case for true images
+- A difference between simulated and real data can be apparent and negatively impact network performance (this is
+ called the Sim2Real gap)
-The latter point is heavily dependent on the quality of the generated images and the more realistic the images, the better the expected results.
+The latter point is heavily dependent on the quality of the generated images and the more realistic the images, the
+better the expected results.
-Blender, using ray tracing, can generate realistic images. To perform data generation, Blenderproc has been developed and is an extremely useful and flexible tool to generate realistic scenes from Python code.
+Blender, using ray tracing, can generate realistic images. To perform data generation,
+Blenderproc has been developed and is an extremely useful and
+flexible tool to generate realistic scenes from Python code.
Along with RGB images, Blenderproc can generate different labels or inputs:
- Depth map
@@ -29,13 +37,17 @@ Along with RGB images, Blenderproc can generate different labels or inputs:
- Bounding box
- Optical flow (not provided in our generation script)
-In this tutorial, we will install blenderproc and use it to generate simple but varied scenes containing objects of interest.
+In this tutorial, we will install blenderproc and use it to generate simple but varied scenes containing objects of
+interest.
We provide a simple, object-centric generation script that should suffice in many cases.
-However, since Blenderproc is easy to use, with many examples included in the documentation, readapting this script to your needs should be easy.
+However, since Blenderproc is easy to use, with many examples included in the
+documentation, readapting this script to your needs
+should be easy.
\section dnn_synthetic_install Requirements
-First, you should start by installing blenderproc. First, start by creating a new conda environment to avoid potential conflicts with other Python packages.
+First, you should start by installing blenderproc. First, start by creating a new conda environment to avoid potential
+conflicts with other Python packages.
\code{.sh}
$ conda create --name blenderproc python=3.10 pip
$ conda activate blenderproc
@@ -47,7 +59,8 @@ You can then run the Blenderproc sample example with:
\code{.sh}
(blenderproc) $ blenderproc quickstart
\endcode
-This may take some time, as Blenderproc downloads its own version of Blender and sets up its own environment. This setup will only be performed once.
+This may take some time, as Blenderproc downloads its own version of Blender and sets up its own environment.
+This setup will only be performed once.
Once Blenderproc is done, you can check its output with:
\code{.sh}
@@ -55,24 +68,31 @@ Once Blenderproc is done, you can check its output with:
\endcode
-Blenderproc stores its output in HDF5 file format. Each HDF5 **may** contain the RGB image, along with depth, normals, and other modalities.
+Blenderproc stores its output in HDF5 file format. Each HDF5 **may** contain the RGB image, along with depth, normals,
+and other modalities.
-For the simulator to provide useful data, we should obtain a set of realistic textures (thus helping close the Sim2Real gap).
-Thankfully, Blenderproc provides a helpful script to download a dataset of materials from cc0textures.com, containing more than 1500 high resolution materials.
+For the simulator to provide useful data, we should obtain a set of realistic textures
+(thus helping close the Sim2Real gap).
+Thankfully, Blenderproc provides a helpful script to download a dataset of materials from cc0textures.com,
+containing more than 1500 high resolution materials.
To download the materials, run
\code{.sh}
(blenderproc) $ blenderproc download cc_textures path/to/folder/where/to/save/materials
\endcode
-\warning Because the materials are in high definition, downloading the full dataset may take a large amount of disk space (30+ GB). If this is too much for you, you can safely delete some of the materials or stop the script after it has acquired enough materials. While using a small number of materials can be useful when performing quick tests, using the full set should be preferred as variety helps when transferring your deep learning model to real world data.
+\warning Because the materials are in high definition, downloading the full dataset may take a large amount of disk
+space (30+ GB). If this is too much for you, you can safely delete some of the materials or stop the script after it
+has acquired enough materials. While using a small number of materials can be useful when performing quick tests,
+using the full set should be preferred as variety helps when transferring your deep learning model to real world data.
\section dnn_synthetic_script Running the object-centric generation script
We will now run the generation script.
-The script places a random set of objects in a simple cubic room, with added distractors. Materials of the walls and distractors are randomized.
-
-This script and an example configuration file can be found in the `script/dataset_generator` folder of your ViSP source directory.
+The script places a random set of objects in a simple cubic room, with added distractors. Materials of the walls and
+distractors are randomized.
+This script and an example configuration file can be found in the `script/dataset_generator` folder of your ViSP
+source directory.
The basic algorithm is:
\verbatim
@@ -114,12 +134,9 @@ For each scene:
\endverbatim
Many randomization parameters can be modified to alter the rendering, as explained in \ref dnn_input_configuration.
-
-
With this simple approach, we can obtain images such as:
\image html misc/blenderproc_rgb_example.png
-
\subsection dnn_input_objects 3D model format
To use this data generation tool, you should first provide the 3D models.
You can provide multiple models, which will be sampled randomly during generation.
@@ -136,9 +153,12 @@ The models should be contained in a folder as such:
- another_model.mtl
\endverbatim
-When setting up the configuration file in \ref dnn_input_configuration, "models_path" should point to the root folder, models.
-Each subfolder should contain a single object, in `.obj` format (with potential materials and textures). Each object will be considered as having its own class, the class name being the name of the subfolder (e.g., objectA or objectB).
-The class indices start with 1, and are sorted alphabetically depending on the name of the class (e.g., objectA = 1, objectB = 2).
+When setting up the configuration file in \ref dnn_input_configuration, "models_path" should point to the root folder,
+models.
+Each subfolder should contain a single object, in `.obj` format (with potential materials and textures). Each object
+will be considered as having its own class, the class name being the name of the subfolder (e.g., objectA or objectB).
+The class indices start with 1, and are sorted alphabetically depending on the name of the class (e.g., objectA = 1,
+objectB = 2).
\subsection dnn_input_configuration Generation configuration
Configuring the dataset generation is done through a JSON file. An example configuration file can be seen below:
@@ -169,7 +189,8 @@ The general parameters are:
-You can also control some of the rendering parameters. This will impact the rendering time and the quality of the generated RGB images.
+You can also control some of the rendering parameters. This will impact the rendering time and the quality of the
+generated RGB images.
These parameters are located in the "rendering" field.
Name | Type, possible values | Description |
@@ -181,12 +202,14 @@ These parameters are located in the "rendering" field.
denoiser |
One of [null, "INTEL", "OPTIX"] |
- Which denoiser to use after performing ray tracing. null indicates that no denoiser is used. "OPTIX" requires a compatible Nvidia GPU.
+ | Which denoiser to use after performing ray tracing. null indicates that no denoiser is used. "OPTIX" requires
+ a compatible Nvidia GPU.
Using a denoiser allows to obtain a clean image, with a low number of rays per pixels. |
-You can also modify the camera's intrinsic parameters. The camera uses an undistorted perspective projection model. For more information on camera parameters, see vpCameraParameters.
+You can also modify the camera's intrinsic parameters. The camera uses an undistorted perspective projection model.
+For more information on camera parameters, see vpCameraParameters.
These parameters are found in the "camera" field of the configuration.
Name | Type, possible values | Description |
@@ -224,8 +247,10 @@ These parameters are found in the "camera" field of the configuration.
randomize_params_percent |
Float, [0, 100) |
- Controls the randomization of the camera parameters \f$p_x, p_y, u_0, v_0\f$. If randomize_params_percent > 0, then, each time a scene is created the intrinsics are perturbed around the given values.
- For example, if this parameters is equal to 0.10 and \f$p_x = 500\f$, then the used \f$p_x\f$ when generating images will be in the range [450, 550].
+ Controls the randomization of the camera parameters \f$p_x, p_y, u_0, v_0\f$. If randomize_params_percent > 0,
+ then, each time a scene is created the intrinsics are perturbed around the given values.
+ For example, if this parameters is equal to 0.10 and \f$p_x = 500\f$, then the used \f$p_x\f$ when generating
+ images will be in the range [450, 550].
|
@@ -239,8 +264,10 @@ To customize the scene, you can change the parameters in the "scene" field:
Float > 1.0, < room_size_multiplier_max |
Minimum room size as a factor of the biggest sampled target object. The room is cubic.
- The size of the biggest object is the length of the largest diagonal of its axis-aligned bounding box. This tends to overestimate the size of the object.
- If the size of the biggest object is 0.5m, room_size_multiplier_max = 2 and room_size_multiplier_max = 4, then the room's size will be randomly sampled to be between 1m and 2m.
+ The size of the biggest object is the length of the largest diagonal of its axis-aligned bounding box.
+ This tends to overestimate the size of the object.
+ If the size of the biggest object is 0.5m, room_size_multiplier_max = 2 and room_size_multiplier_max = 4,
+ then the room's size will be randomly sampled to be between 1m and 2m.
|
@@ -248,20 +275,25 @@ To customize the scene, you can change the parameters in the "scene" field:
Float > room_size_multiplier_min |
Minimum room size as a factor of the biggest sampled target object. The room is cubic.
- The size of the biggest object is the length of the largest diagonal of its axis-aligned bounding box. This tends to overestimate the size of the object.
- If the size of the biggest object is 0.5m, room_size_multiplier_max = 2 and room_size_multiplier_max = 4, then the room's size will be randomly sampled to be between 1m and 2m.
+ The size of the biggest object is the length of the largest diagonal of its axis-aligned bounding box.
+ This tends to overestimate the size of the object.
+ If the size of the biggest object is 0.5m, room_size_multiplier_max = 2 and room_size_multiplier_max = 4,
+ then the room's size will be randomly sampled to be between 1m and 2m.
|
simulate_physics |
Boolean |
- Whether to simulate physics. If false, then objects will be floating across the room. If true, then objects will fall to the ground. |
+ Whether to simulate physics. If false, then objects will be floating across the room. If true,
+ then objects will fall to the ground. |
max_num_textures |
Int > 0 |
- Max number of textures per blenderproc run. If scenes_per_run is 1, max_num_textures = 50 and the number of distractors is more than 50, then the 50 textures will be used across all distractors (and walls). In this case, new materials will be sampled for each scene. |
+ Max number of textures per blenderproc run. If scenes_per_run is 1, max_num_textures = 50 and the number of
+ distractors is more than 50, then the 50 textures will be used across all distractors (and walls). In this case,
+ new materials will be sampled for each scene. |
distractors |
@@ -280,7 +312,8 @@ To customize the scene, you can change the parameters in the "scene" field:
-Distractors are small, simple objects that are added along with the target objects to create some variations and occlusions. You can also load custom objects as distractors.
+Distractors are small, simple objects that are added along with the target objects to create some variations and
+occlusions. You can also load custom objects as distractors.
To modify their properties, you can change the "distractors" field of the scene
Name | Type, possible values | Description |
@@ -335,7 +368,8 @@ To modify their properties, you can change the "distractors" field of the scene
Float >= 0.0 |
Amount of displacement to apply to distractors.
- Displacement subdivides the mesh and displaces each of the distractor's vertices according to a random noise pattern.
+ Displacement subdivides the mesh and displaces each of the distractor's vertices according to a random noise
+ pattern.
This option greatly slows down rendering: set it to 0 if needed.
|
@@ -344,14 +378,16 @@ To modify their properties, you can change the "distractors" field of the scene
Float >= 0.0 |
Amount of noise to add to the material properties of the distractors.
- These properties include the specularity, the "metallicness" and the roughness of the material, according to Blender's principled BSDF.
+ These properties include the specularity, the "metallicness" and the roughness of the material, according to
+ Blender's principled BSDF.
|
emissive_prob |
Float >= 0.0 , <= 1.0 |
- Probability that a distractor becomes a light source: its surface emits light. Set to more than 0 to add more light variations and shadows.
+ Probability that a distractor becomes a light source: its surface emits light. Set to more than 0 to add more
+ light variations and shadows.
|
@@ -449,23 +485,28 @@ To change the sampling behaviour of target objects, see the properties below:
Float >= 0.0 |
Amount of noise to add to the material properties of the target objects.
- These properties include the specularity, the "metallicness" and the roughness of the material, according to Blender's principled BSDF.
+ These properties include the specularity, the "metallicness" and the roughness of the material, according to
+ Blender's principled BSDF.
|
cam_min_dist_rel |
Float >= 0.0, < cam_max_dist_rel |
- Minimum distance of the camera to the point of interest of the object when sampling camera poses. This is expressed in terms of the size of the target object.
- If the target object has a size of 0.5m and cam_min_dist_rel = 1.5, then the closest possible camera will be at 0.75m away from the point of interest.
+ Minimum distance of the camera to the point of interest of the object when sampling camera poses.
+ This is expressed in terms of the size of the target object.
+ If the target object has a size of 0.5m and cam_min_dist_rel = 1.5, then the closest possible camera will be
+ at 0.75m away from the point of interest.
|
cam_max_dist_rel |
Float >= cam_min_dist_rel |
- Maximum distance of the camera to the point of interest of the object when sampling camera poses. This is expressed in terms of the size of the target object.
- If the target object has a size of 0.5m and cam_max_dist_rel = 2.0, then the farthest possible camera will be 1m away from the point of interest.
+ Maximum distance of the camera to the point of interest of the object when sampling camera poses.
+ This is expressed in terms of the size of the target object.
+ If the target object has a size of 0.5m and cam_max_dist_rel = 2.0, then the farthest possible camera will
+ be 1m away from the point of interest.
|
@@ -481,41 +522,48 @@ To customize the dataset, modify the options in the "dataset" field:
save_path |
String |
- Path to the folder that will contain the final dataset. This folder will contain one folder per scene, and each sample of a scene will be its own HDF5 file. |
+ Path to the folder that will contain the final dataset. This folder will contain one folder per scene,
+ and each sample of a scene will be its own HDF5 file. |
scenes_per_run |
Int > 0 |
- Number of scenes to generate per blenderproc run. Between blenderproc runs, Blender is restarted in order to avoid memory issues.
+ Number of scenes to generate per blenderproc run. Between blenderproc runs, Blender is restarted in order to
+ avoid memory issues.
|
num_scenes |
Int > 0 |
- Total number of scenes to generate. Generating many scenes will add more diversity to the dataset as object placement, materials and lighting are randomized once per scene.
+ Total number of scenes to generate. Generating many scenes will add more diversity to the dataset as object
+ placement, materials and lighting are randomized once per scene.
|
images_per_scene |
Int > 0 |
- Number of images to generate per scene. The total number of samples in the dataset will be num_scenes * (images_per_scene + empty_images_per_scene).
+ Number of images to generate per scene. The total number of samples in the dataset will be
+ num_scenes * (images_per_scene + empty_images_per_scene).
|
empty_images_per_scene |
Int >= 0, <= images_per_scene |
- Number of images without target objects to generate per scene. The camera poses for these images are sampled from the poses used to generate images with target objects. Thus, the only difference will be that the objects are not present, the rest of the scene is left untouched.
+ Number of images without target objects to generate per scene. The camera poses for these images are sampled
+ from the poses used to generate images with target objects. Thus, the only difference will be that the objects
+ are not present, the rest of the scene is left untouched.
|
pose |
Boolean |
- Whether to save the pose of target objects that are visible in the camera. The pose of the objects are expressed in the camera frame as an homogeneous matrix \f$^{c}\mathbf{T}_{o}\f$
+ Whether to save the pose of target objects that are visible in the camera. The pose of the objects are expressed
+ in the camera frame as an homogeneous matrix \f$^{c}\mathbf{T}_{o}\f$
|
@@ -546,7 +594,8 @@ To customize the dataset, modify the options in the "dataset" field:
detection |
Boolean |
- Whether to save the bounding box detections. In this case, bounding boxes are not computed from the segmentation map (also possible with Blenderproc), but rather in way such that occlusion does not influence the final bounding box.
+ Whether to save the bounding box detections. In this case, bounding boxes are not computed from the segmentation
+ map (also possible with Blenderproc), but rather in way such that occlusion does not influence the final bounding box.
The detections can be filtered with the parameters in "detection_params".
|
@@ -554,15 +603,19 @@ To customize the dataset, modify the options in the "dataset" field:
detection_params:min_size_size_px |
Int >= 0 |
- Minimum side length of a detection for it to be considered as valid. Used to filter really far or small objects, for which detection would be hard.
+ Minimum side length of a detection for it to be considered as valid. Used to filter really far or small objects,
+ for which detection would be hard.
|
detection_params:min_visibility |
Float [0.0, 1.0] |
- Percentage of the object that must be visible for a detection to be considered as valid. The visibility score is computed as such:
- First, the vertices of the mesh that are behind the camera are filtered. Then, the vertices that are outside of the camera's field of view are filtered. Then, we randomly sample "detection_params:points_sampling_occlusion" points to test whether the object is occluded (test done through ray casting).
+ Percentage of the object that must be visible for a detection to be considered as valid. The visibility score is
+ computed as such:
+ First, the vertices of the mesh that are behind the camera are filtered. Then, the vertices that are outside of
+ the camera's field of view are filtered. Then, we randomly sample "detection_params:points_sampling_occlusion"
+ points to test whether the object is occluded (test done through ray casting).
If too many points are filtered, then the object is considered as not visible and detection is invalid.
|
@@ -571,7 +624,8 @@ To customize the dataset, modify the options in the "dataset" field:
\section dnn_run_script Running the script to generate data
-Once you have configured the generation to your liking, navigate to the `script/dataset_generator` located in your ViSP source directory.
+Once you have configured the generation to your liking, navigate to the `script/dataset_generator` located in your
+ViSP source directory.
You can then run the `generate_dataset.py` script as such
\code{.sh}
@@ -581,15 +635,18 @@ You can then run the `generate_dataset.py` script as such
If all is well setup, then the dataset generation should start and run.
-\warning If during generation, you encounter a message about invalid camera placement, try to make room_size_multiplier_min and room_size_multiplier_max larger, so that more space is available for object placement.
+\warning If during generation, you encounter a message about invalid camera placement, try to make
+room_size_multiplier_min and room_size_multiplier_max larger, so that more space is available for object placement.
-To give an idea of generation time, generating 1000 images (with a resolution of 640 x 480) and detections of a single object, with a few added distractors, takes around 30mins on a Quadro RTX 6000.
+To give an idea of generation time, generating 1000 images (with a resolution of 640 x 480) and detections of a
+single object, with a few added distractors, takes around 30mins on a Quadro RTX 6000.
Once generation is finished, you are ready to leverage the data to train your neural network.
\section dnn_output Using and parsing the generation output
-The dataset generated by Blender is located in the "dataset:save_path" path that you specified in your JSON configuration file.
+The dataset generated by Blender is located in the "dataset:save_path" path that you specified in your JSON
+configuration file.
The dataset has the following structure
\verbatim
@@ -627,7 +684,11 @@ This script can be run like this:
(blenderproc) $ python export_for_yolov7.py --input path/to/dataset --output path/to/yolodataset --train-split 0.8
\endcode
-here "--input" indicates the path to the location of the blenderproc dataset, while "--output" points to the folder where the dataset in the format that YoloV7 expects will be saved. "--train-split" is an argument that indicates how much of the dataset is kept for training. A value of 0.8 indicates that 80% of the dataset is used for training, while 20% is used for validation. The split is performed randomly across all scenes (a scene may be visible in both train and validation sets).
+here "--input" indicates the path to the location of the blenderproc dataset, while "--output" points to the folder
+where the dataset in the format that YoloV7 expects will be saved. "--train-split" is an argument that indicates how
+much of the dataset is kept for training. A value of 0.8 indicates that 80% of the dataset is used for training,
+while 20% is used for validation. The split is performed randomly across all scenes (a scene may be visible in both
+ train and validation sets).
Once the script has run, the folder "path/to/yolodataset" should be created and contain the dataset as expected by YoloV7.
This folder contains a "dataset.yml" file, which will be used when training a YoloV7. It contains:
@@ -637,12 +698,14 @@ the following:
names:
- esa
nc: 1
-train: /local/sfelton/yolov7_esa_dataset/images/train
-val: /local/sfelton/yolov7_esa_dataset/images/val
+train: /local/user/yolov7_esa_dataset/images/train
+val: /local/user/yolov7_esa_dataset/images/val
```
where nc is the number of class, "names" are the class names, and "train" and "val" are the paths to the dataset splits.
-To start training a YoloV7, you should download the repository and install the required dependencies. Again, we will create a conda environment. You can also use a docker container, as explained in the documentation. We also download the pretrained yolo model, that we will finetune on our own dataset.
+To start training a YoloV7, you should download the repository and install the required dependencies.
+Again, we will create a conda environment. You can also use a docker container, as explained in the documentation.
+We also download the pretrained yolo model, that we will finetune on our own dataset.
```
~ $ git clone https://github.com/WongKinYiu/yolov7.git
~ $ cd yolov7
@@ -652,7 +715,8 @@ To start training a YoloV7, you should download the repository an
(yolov7) ~/yolov7 $ wget https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-tiny.pt
```
-To fine-tune a YoloV7, we should create two new files: the network configuration and the hyperparameters. We will reuse the ones provided for the tiny model.
+To fine-tune a YoloV7, we should create two new files: the network configuration and the hyperparameters.
+We will reuse the ones provided for the tiny model.
```
CFG=cfg/training/yolov7-tiny-custom.yaml
cp cfg/training/yolov7-tiny.yaml $CFG
@@ -660,7 +724,7 @@ cp cfg/training/yolov7-tiny.yaml $CFG
HYP=data/hyp.scratch.custom.yaml
cp data/hyp.scratch.tiny.yaml $HYP
```
-Next open the new cfg file, and modify the number of classes (set "nc" from 80 to the number classes you have in your dataset)
+Next open the new cfg file, and modify the number of classes (set "nc" from 80 to the number classes you have in your dataset).
You can also modify the hyperparameters file to add more augmentation during training.
@@ -684,9 +748,6 @@ Here is an overview of the generated images and the resulting detections for a s
\endhtmlonly
-
-
-
\subsection dnn_output_custom_parsing Parsing HDF5 with a custom script
In Python, an HDF5 file can be read like a dictionary.
@@ -770,7 +831,8 @@ Reading scene 4
\endverbatim
- Both depth and normals are represented as floating points, conserving accuracy.
-- The object data is represented as a JSON document. Which you can directly save or reparse to save only the information of interest.
+- The object data is represented as a JSON document. Which you can directly save or reparse to save only the
+ information of interest.
- Object poses are expressed in the camera frame and are represented as homogeneous matrix.
- Bounding boxes coordinates are in pixels, and the values are [x_min, y_min, width, height]
@@ -778,6 +840,7 @@ You can modify this script to export the dataset to another format, as it was do
\section dnn_synthetic_next Next steps
-If you use this generator to train a detection network, you can combine it with Megapose to perform 6D pose estimation and tracking. See \ref tutorial-tracking-megapose.
+If you use this generator to train a detection network, you can combine it with Megapose to perform 6D pose estimation
+and tracking. See \ref tutorial-tracking-megapose.
*/
diff --git a/modules/core/include/visp3/core/vpArray2D.h b/modules/core/include/visp3/core/vpArray2D.h
index 98192e58d3..0d15edd28d 100644
--- a/modules/core/include/visp3/core/vpArray2D.h
+++ b/modules/core/include/visp3/core/vpArray2D.h
@@ -823,7 +823,7 @@ template class vpArray2D
\code
vpArray2D M(3,4);
vpArray2D::saveYAML("matrix.yml", M, "example: a YAML-formatted header");
- vpArray2D::saveYAML("matrixIndent.yml", M, "example:\n - a YAML-formatted
+ vpArray2D::saveYAML("matrixIndent.yml", M, "example:\n - a YAML-formatted \
header\n - with inner indentation"); \endcode Content of matrix.yml:
\code
example: a YAML-formatted header
diff --git a/modules/core/include/visp3/core/vpCannyEdgeDetection.h b/modules/core/include/visp3/core/vpCannyEdgeDetection.h
index 18f4920c5b..6cc7689212 100644
--- a/modules/core/include/visp3/core/vpCannyEdgeDetection.h
+++ b/modules/core/include/visp3/core/vpCannyEdgeDetection.h
@@ -210,7 +210,7 @@ class VISP_EXPORT vpCannyEdgeDetection
* \param[in] j : The JSON object, resulting from the parsing of a JSON file.
* \param[out] detector : The detector that will be initialized from the JSON data.
*/
- inline friend void from_json(const json &j, vpCannyEdgeDetection &detector)
+ friend inline void from_json(const json &j, vpCannyEdgeDetection &detector)
{
std::string filteringAndGradientName = vpImageFilter::vpCannyFilteringAndGradientTypeToString(detector.m_filteringAndGradientType);
filteringAndGradientName = j.value("filteringAndGradientType", filteringAndGradientName);
@@ -230,7 +230,7 @@ class VISP_EXPORT vpCannyEdgeDetection
* \param[out] j : A JSON parser object.
* \param[in] detector : The vpCannyEdgeDetection object that must be parsed into JSON format.
*/
- inline friend void to_json(json &j, const vpCannyEdgeDetection &detector)
+ friend inline void to_json(json &j, const vpCannyEdgeDetection &detector)
{
std::string filteringAndGradientName = vpImageFilter::vpCannyFilteringAndGradientTypeToString(detector.m_filteringAndGradientType);
j = json {
diff --git a/modules/core/include/visp3/core/vpColVector.h b/modules/core/include/visp3/core/vpColVector.h
index 56111c54f3..518b461043 100644
--- a/modules/core/include/visp3/core/vpColVector.h
+++ b/modules/core/include/visp3/core/vpColVector.h
@@ -319,7 +319,7 @@ class VISP_EXPORT vpColVector : public vpArray2D
* ofs.close();
* }
* \endcode
- * produces `log.csvè file that contains:
+ * produces `log.csv` file that contains:
* \code
* 0
* 1
diff --git a/modules/core/include/visp3/core/vpFrameGrabber.h b/modules/core/include/visp3/core/vpFrameGrabber.h
index 91ab6fd7c6..0648906be2 100644
--- a/modules/core/include/visp3/core/vpFrameGrabber.h
+++ b/modules/core/include/visp3/core/vpFrameGrabber.h
@@ -110,7 +110,7 @@ class VISP_EXPORT vpFrameGrabber
public:
vpFrameGrabber() : init(false), height(0), width(0) { };
-
+ virtual ~vpFrameGrabber() = default;
virtual void open(vpImage &I) = 0;
virtual void open(vpImage &I) = 0;
diff --git a/modules/core/include/visp3/core/vpImage.h b/modules/core/include/visp3/core/vpImage.h
index ff73fe7eca..0d5cbf9894 100644
--- a/modules/core/include/visp3/core/vpImage.h
+++ b/modules/core/include/visp3/core/vpImage.h
@@ -482,6 +482,7 @@ inline std::ostream &operator<<(std::ostream &s, const vpImage &I)
#if defined(VISP_HAVE_PTHREAD) || (defined(_WIN32) && !defined(WINRT_8_0))
namespace
{
+
struct vpImageLut_Param_t
{
unsigned int m_start_index;
diff --git a/modules/core/include/visp3/core/vpImageFilter.h b/modules/core/include/visp3/core/vpImageFilter.h
index eb671845d6..902ee0e73f 100644
--- a/modules/core/include/visp3/core/vpImageFilter.h
+++ b/modules/core/include/visp3/core/vpImageFilter.h
@@ -602,9 +602,11 @@ class VISP_EXPORT vpImageFilter
}
static void filterX(const vpImage &I, vpImage &dIx, const double *filter, unsigned int size);
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
static void filterXR(const vpImage &I, vpImage &dIx, const double *filter, unsigned int size);
static void filterXG(const vpImage &I, vpImage &dIx, const double *filter, unsigned int size);
static void filterXB(const vpImage &I, vpImage &dIx, const double *filter, unsigned int size);
+#endif
template
static inline FilterType filterX(const vpImage &I, unsigned int r, unsigned int c, const FilterType *filter, unsigned int size)
@@ -618,7 +620,7 @@ class VISP_EXPORT vpImageFilter
}
return result + filter[0] * I[r][c];
}
-
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
static inline double filterXR(const vpImage &I, unsigned int r, unsigned int c, const double *filter, unsigned int size)
{
double result;
@@ -784,12 +786,15 @@ class VISP_EXPORT vpImageFilter
}
return result + filter[0] * I[r][c].B;
}
+#endif
+
static void filterY(const vpImage &I, vpImage &dIx, const double *filter, unsigned int size);
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
static void filterYR(const vpImage &I, vpImage &dIx, const double *filter, unsigned int size);
static void filterYG(const vpImage &I, vpImage &dIx, const double *filter, unsigned int size);
static void filterYB(const vpImage &I, vpImage &dIx, const double *filter, unsigned int size);
-
+#endif
template
static void filterY(const vpImage &I, vpImage &dIy, const FilterType *filter, unsigned int size)
{
@@ -823,7 +828,7 @@ class VISP_EXPORT vpImageFilter
}
return result + filter[0] * I[r][c];
}
-
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
static inline double filterYR(const vpImage &I, unsigned int r, unsigned int c, const double *filter, unsigned int size)
{
double result;
@@ -985,19 +990,21 @@ class VISP_EXPORT vpImageFilter
}
return result + filter[0] * I[r][c].B;
}
+#endif
+
/*!
- * Apply a Gaussian blur to an image.
- * \tparam FilterType : Either float, to accelerate the computation time, or double, to have greater precision.
- * \param I : Input image.
- * \param GI : Filtered image.
- * \param size : Filter size. This value should be odd.
- * \param sigma : Gaussian standard deviation. If it is equal to zero or
- * negative, it is computed from filter size as sigma = (size-1)/6.
- * \param normalize : Flag indicating whether to normalize the filter coefficients or not.
- *
- * \sa getGaussianKernel() to know which kernel is used.
- */
+ * Apply a Gaussian blur to an image.
+ * \tparam FilterType : Either float, to accelerate the computation time, or double, to have greater precision.
+ * \param I : Input image.
+ * \param GI : Filtered image.
+ * \param size : Filter size. This value should be odd.
+ * \param sigma : Gaussian standard deviation. If it is equal to zero or
+ * negative, it is computed from filter size as sigma = (size-1)/6.
+ * \param normalize : Flag indicating whether to normalize the filter coefficients or not.
+ *
+ * \sa getGaussianKernel() to know which kernel is used.
+ */
template
static void gaussianBlur(const vpImage &I, vpImage &GI, unsigned int size = 7, FilterType sigma = 0., bool normalize = true)
{
diff --git a/modules/core/include/visp3/core/vpMatrix.h b/modules/core/include/visp3/core/vpMatrix.h
index 3fac0d971c..09b7145244 100644
--- a/modules/core/include/visp3/core/vpMatrix.h
+++ b/modules/core/include/visp3/core/vpMatrix.h
@@ -978,6 +978,7 @@ vpMatrix M(R);
//@}
#if defined(VISP_BUILD_DEPRECATED_FUNCTIONS)
+
vp_deprecated double euclideanNorm() const;
/*!
diff --git a/modules/detection/include/visp3/detection/vpDetectorAprilTag.h b/modules/detection/include/visp3/detection/vpDetectorAprilTag.h
index 92fdda86f2..964dd4a0de 100644
--- a/modules/detection/include/visp3/detection/vpDetectorAprilTag.h
+++ b/modules/detection/include/visp3/detection/vpDetectorAprilTag.h
@@ -288,20 +288,10 @@ class VISP_EXPORT vpDetectorAprilTag : public vpDetectorBase
void setAprilTagQuadSigma(float quadSigma);
void setAprilTagRefineEdges(bool refineEdges);
-#if defined(VISP_BUILD_DEPRECATED_FUNCTIONS)
- /*!
- * @name Deprecated functions
- */
- //@{
- vp_deprecated void setAprilTagRefineDecode(bool refineDecode);
- vp_deprecated void setAprilTagRefinePose(bool refinePose);
- //@}
-#endif
-/*!
- * Allow to enable the display of overlay tag information in the windows
- * (vpDisplay) associated to the input image.
- */
+
+ /*! Allow to enable the display of overlay tag information in the windows
+ * (vpDisplay) associated to the input image. */
inline void setDisplayTag(bool display, const vpColor &color = vpColor::none, unsigned int thickness = 2)
{
m_displayTag = display;
@@ -313,6 +303,16 @@ class VISP_EXPORT vpDetectorAprilTag : public vpDetectorBase
void setZAlignedWithCameraAxis(bool zAlignedWithCameraFrame);
+#if defined(VISP_BUILD_DEPRECATED_FUNCTIONS)
+ /*!
+ @name Deprecated functions
+ */
+ //@{
+ vp_deprecated void setAprilTagRefinePose(bool refinePose);
+ vp_deprecated void setAprilTagRefineDecode(bool refineDecode);
+ //@}
+#endif
+
protected:
bool m_displayTag;
vpColor m_displayTagColor;
diff --git a/modules/detection/include/visp3/detection/vpDetectorDNNOpenCV.h b/modules/detection/include/visp3/detection/vpDetectorDNNOpenCV.h
index 57e955e8e2..87606ba1cf 100644
--- a/modules/detection/include/visp3/detection/vpDetectorDNNOpenCV.h
+++ b/modules/detection/include/visp3/detection/vpDetectorDNNOpenCV.h
@@ -201,7 +201,7 @@ class VISP_EXPORT vpDetectorDNNOpenCV
* \param j The JSON object, resulting from the parsing of a JSON file.
* \param config The configuration of the network, that will be initialized from the JSON data.
*/
- inline friend void from_json(const json &j, NetConfig &config)
+ friend inline void from_json(const json &j, NetConfig &config)
{
config.m_confThreshold = j.value("confidenceThreshold", config.m_confThreshold);
if (config.m_confThreshold <= 0) {
@@ -241,7 +241,7 @@ class VISP_EXPORT vpDetectorDNNOpenCV
* \param j A JSON parser object.
* \param config The vpDetectorDNNOpenCV::NetConfig that must be parsed into JSON format.
*/
- inline friend void to_json(json &j, const NetConfig &config)
+ friend inline void to_json(json &j, const NetConfig &config)
{
std::pair resolution = { config.m_inputSize.width, config.m_inputSize.height };
std::vector v_mean = { config.m_mean[0], config.m_mean[1], config.m_mean[2] };
@@ -440,7 +440,7 @@ class VISP_EXPORT vpDetectorDNNOpenCV
return text;
}
- inline friend std::ostream &operator<<(std::ostream &os, const NetConfig &config)
+ friend inline std::ostream &operator<<(std::ostream &os, const NetConfig &config)
{
os << config.toString();
return os;
@@ -515,7 +515,7 @@ class VISP_EXPORT vpDetectorDNNOpenCV
* \param j The JSON object, resulting from the parsing of a JSON file.
* \param network The network, that will be initialized from the JSON data.
*/
- inline friend void from_json(const json &j, vpDetectorDNNOpenCV &network)
+ friend inline void from_json(const json &j, vpDetectorDNNOpenCV &network)
{
network.m_netConfig = j.value("networkSettings", network.m_netConfig);
}
@@ -526,7 +526,7 @@ class VISP_EXPORT vpDetectorDNNOpenCV
* \param j The JSON parser.
* \param network The network we want to parse the configuration.
*/
- inline friend void to_json(json &j, const vpDetectorDNNOpenCV &network)
+ friend inline void to_json(json &j, const vpDetectorDNNOpenCV &network)
{
j = json {
{"networkSettings", network.m_netConfig}
@@ -534,7 +534,7 @@ class VISP_EXPORT vpDetectorDNNOpenCV
}
#endif
- inline friend std::ostream &operator<<(std::ostream &os, const vpDetectorDNNOpenCV &network)
+ friend inline std::ostream &operator<<(std::ostream &os, const vpDetectorDNNOpenCV &network)
{
os << network.m_netConfig;
return os;
diff --git a/modules/gui/include/visp3/gui/vpColorBlindFriendlyPalette.h b/modules/gui/include/visp3/gui/vpColorBlindFriendlyPalette.h
index b95e7f5dcc..f708605cd4 100755
--- a/modules/gui/include/visp3/gui/vpColorBlindFriendlyPalette.h
+++ b/modules/gui/include/visp3/gui/vpColorBlindFriendlyPalette.h
@@ -139,13 +139,6 @@ class VISP_EXPORT vpColorBlindFriendlyPalette
*/
std::string to_string() const;
- /**
- * \brief Cast the object into an unsigned int that matches the value of its \b _colorID member.
- *
- * \return unsigned int that matches the value of its \b _colorID member.
- */
- unsigned int to_uint() const;
-
/**
* \brief Get the list of available colors names.
*
diff --git a/modules/gui/include/visp3/gui/vpPlot.h b/modules/gui/include/visp3/gui/vpPlot.h
index ffd1dbb128..2943e37869 100644
--- a/modules/gui/include/visp3/gui/vpPlot.h
+++ b/modules/gui/include/visp3/gui/vpPlot.h
@@ -101,7 +101,7 @@
* }
*
* return 0;
- $ #endif
+ * #endif
* }
* \endcode
*/
diff --git a/modules/imgproc/include/visp3/imgproc/vpCircleHoughTransform.h b/modules/imgproc/include/visp3/imgproc/vpCircleHoughTransform.h
index c711968932..04391deea2 100644
--- a/modules/imgproc/include/visp3/imgproc/vpCircleHoughTransform.h
+++ b/modules/imgproc/include/visp3/imgproc/vpCircleHoughTransform.h
@@ -295,7 +295,7 @@ class VISP_EXPORT vpCircleHoughTransform
* \param[in] j : The JSON object, resulting from the parsing of a JSON file.
* \param[out] params : The circle Hough transform parameters that will be initialized from the JSON data.
*/
- inline friend void from_json(const json &j, vpCircleHoughTransformParameters ¶ms)
+ friend inline void from_json(const json &j, vpCircleHoughTransformParameters ¶ms)
{
std::string filteringAndGradientName = vpImageFilter::vpCannyFilteringAndGradientTypeToString(params.m_filteringAndGradientType);
filteringAndGradientName = j.value("filteringAndGradientType", filteringAndGradientName);
@@ -363,7 +363,7 @@ class VISP_EXPORT vpCircleHoughTransform
* \param[out] j : A JSON parser object.
* \param[in] params : The circle Hough transform parameters that will be serialized in the json object.
*/
- inline friend void to_json(json &j, const vpCircleHoughTransformParameters ¶ms)
+ friend inline void to_json(json &j, const vpCircleHoughTransformParameters ¶ms)
{
std::pair radiusLimits = { params.m_minRadius, params.m_maxRadius };
@@ -486,7 +486,7 @@ class VISP_EXPORT vpCircleHoughTransform
* \param[in] j The JSON object, resulting from the parsing of a JSON file.
* \param[out] detector The detector, that will be initialized from the JSON data.
*/
- inline friend void from_json(const json &j, vpCircleHoughTransform &detector)
+ friend inline void from_json(const json &j, vpCircleHoughTransform &detector)
{
detector.m_algoParams = j;
}
@@ -497,7 +497,7 @@ class VISP_EXPORT vpCircleHoughTransform
* \param[out] j A JSON parser object.
* \param[in] detector The vpCircleHoughTransform that must be parsed into JSON format.
*/
- inline friend void to_json(json &j, const vpCircleHoughTransform &detector)
+ friend inline void to_json(json &j, const vpCircleHoughTransform &detector)
{
j = detector.m_algoParams;
}
diff --git a/modules/python/.gitignore b/modules/python/.gitignore
new file mode 100644
index 0000000000..f015fe73c8
--- /dev/null
+++ b/modules/python/.gitignore
@@ -0,0 +1,10 @@
+*.egg-info
+bindings/src
+build
+stubs/visp
+stubs/build
+*.eggs
+doc/_build
+doc/_autosummary/*
+doc/generated
+doc/api.rst
diff --git a/modules/python/CMakeLists.txt b/modules/python/CMakeLists.txt
new file mode 100644
index 0000000000..b5aa701ece
--- /dev/null
+++ b/modules/python/CMakeLists.txt
@@ -0,0 +1,139 @@
+#############################################################################
+#
+# ViSP, open source Visual Servoing Platform software.
+# Copyright (C) 2005 - 2023 by Inria. All rights reserved.
+#
+# This software is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+# See the file LICENSE.txt at the root directory of this source
+# distribution for additional information about the GNU GPL.
+#
+# For using ViSP with software that can not be combined with the GNU
+# GPL, please contact Inria about acquiring a ViSP Professional
+# Edition License.
+#
+# See https://visp.inria.fr for more information.
+#
+# This software was developed at:
+# Inria Rennes - Bretagne Atlantique
+# Campus Universitaire de Beaulieu
+# 35042 Rennes Cedex
+# France
+#
+# If you have questions regarding the use of this file, please contact
+# Inria at visp@inria.fr
+#
+# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
+# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+#
+# Description:
+# ViSP Python bindings module
+#
+#############################################################################
+
+# Prevent CMAKE from interpreting this directory as a standard module.
+if(NOT VISP_DIR)
+ return()
+endif()
+
+# As we need all the others modules to already be configured,
+# we should configure the python directory by add_subdirectory("modules/python") in the main cmake.
+find_package(VISP REQUIRED)
+
+# TODO: check for pip
+
+# Set pip args
+if(DEFINED ENV{VIRTUAL_ENV} OR DEFINED ENV{CONDA_PREFIX})
+ set(_pip_args)
+else()
+ set(_pip_args "--user")
+endif()
+
+# Step 1: Generate configuration file
+# Define modules for which to generate python bindings
+set(python_ignored_modules "visp_python" "visp_java_bindings_generator" "visp_java" )
+set(python_bound_modules ${VISP_MODULES_BUILD})
+list(REMOVE_ITEM python_bound_modules ${python_ignored_modules})
+
+# Configure the different directories
+set(bindgen_package_location "${CMAKE_CURRENT_SOURCE_DIR}/generator")
+set(bindings_package_location "${CMAKE_CURRENT_SOURCE_DIR}/bindings")
+set(bindings_gen_location "${CMAKE_CURRENT_BINARY_DIR}/bindings")
+file(MAKE_DIRECTORY "${bindings_gen_location}/src")
+#file(TOUCH "${bindings_gen_location}/src/main.cpp")
+set(python_bindings_cpp_src "${bindings_gen_location}/src/main.cpp")
+
+foreach(module ${python_bound_modules})
+ get_target_property(dirs "${module}" INCLUDE_DIRECTORIES)
+ string(REPLACE "visp_" "" clean_module_name ${module})
+ set(cpp_src "${bindings_gen_location}/src/${clean_module_name}.cpp")
+ list(APPEND python_bindings_cpp_src "${cpp_src}")
+endforeach()
+
+include("${CMAKE_CURRENT_SOURCE_DIR}/GenerateConfig.cmake")
+
+# Step 2: Generate bindings
+# First, we install the bindings generator as an editable pip package
+# Then, we call it with the configuration files as argument. The .cpp files are generated in the cmake build directory
+
+# Get dependencies of the bindings generator
+# We should only run the generator when the config files, the sources or the C++ modules have changed
+file(GLOB config_files "${CMAKE_CURRENT_SOURCE_DIR}/config/*.json")
+file(GLOB_RECURSE python_sources "${CMAKE_CURRENT_SOURCE_DIR}/generator/visp_python_bindgen/*.py")
+set(pip_files "${CMAKE_CURRENT_SOURCE_DIR}/generator/pyproject.toml")
+
+set(bindings_dependencies
+ ${python_bound_modules}
+ ${json_config_file_path} ${config_files}
+ ${python_sources} ${pip_files}
+)
+
+# If we have doxygen, we should first generate the XML documentation
+# so that the binding stubs and doc is as complete as possible
+if(DOXYGEN_FOUND)
+ list(APPEND bindings_dependencies visp_doc_xml)
+endif()
+
+add_custom_command(
+ OUTPUT ${python_bindings_cpp_src}
+ COMMAND ${PYTHON3_EXECUTABLE} -m pip install ${_pip_args} ${bindgen_package_location}
+ COMMAND ${PYTHON3_EXECUTABLE} -m visp_python_bindgen.generator --config "${CMAKE_CURRENT_SOURCE_DIR}/config" --build-folder ${bindings_gen_location} --main-config "${json_config_file_path}"
+ DEPENDS ${bindings_dependencies}
+ COMMENT "Installing the bindings generator and running it..."
+)
+add_custom_target(
+ visp_python_bindings_generator_run
+ DEPENDS ${python_bindings_cpp_src}
+)
+
+set(VISP_PYTHON_VERSION "${VISP_VERSION}")
+# Step 3: Compile and install bindings as a python package
+add_subdirectory(bindings)
+
+# Step 4: Copy stubs dir and install stubs for autocompletion
+add_subdirectory(stubs)
+
+# Global target: compile and install the Python bindings
+add_custom_target(
+ visp_python_bindings
+ DEPENDS visp_python_bindings_stubs
+)
+
+# Step 5: Build documentation
+if(BUILD_PYTHON_BINDINGS_DOC)
+ add_subdirectory(doc)
+endif()
+
+
+# Export Variables to parent cmake
+set(VISP_PYTHON_BOUND_MODULES "")
+foreach(module ${python_bound_modules})
+ string(REPLACE "visp_" "" clean_module_name ${module})
+ list(APPEND VISP_PYTHON_BOUND_MODULES "${clean_module_name}")
+endforeach()
+set(VISP_PYTHON_BOUND_MODULES "${VISP_PYTHON_BOUND_MODULES}" PARENT_SCOPE)
+set(VISP_PYTHON_GENERATED_CONFIG_FILE "${json_config_file_path}" PARENT_SCOPE)
+
+set(VISP_PYTHON_PACKAGE_VERSION "${VISP_PYTHON_VERSION}" PARENT_SCOPE)
diff --git a/modules/python/GenerateConfig.cmake b/modules/python/GenerateConfig.cmake
new file mode 100644
index 0000000000..9281405e0b
--- /dev/null
+++ b/modules/python/GenerateConfig.cmake
@@ -0,0 +1,131 @@
+#############################################################################
+#
+# ViSP, open source Visual Servoing Platform software.
+# Copyright (C) 2005 - 2023 by Inria. All rights reserved.
+#
+# This software is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+# See the file LICENSE.txt at the root directory of this source
+# distribution for additional information about the GNU GPL.
+#
+# For using ViSP with software that can not be combined with the GNU
+# GPL, please contact Inria about acquiring a ViSP Professional
+# Edition License.
+#
+# See https://visp.inria.fr for more information.
+#
+# This software was developed at:
+# Inria Rennes - Bretagne Atlantique
+# Campus Universitaire de Beaulieu
+# 35042 Rennes Cedex
+# France
+#
+# If you have questions regarding the use of this file, please contact
+# Inria at visp@inria.fr
+#
+# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
+# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+#
+# Description:
+# ViSP Python bindings module
+#
+#############################################################################
+
+set(json_config_file "{}")
+set(json_config_file_path "${CMAKE_CURRENT_BINARY_DIR}/cmake_config.json")
+
+# Paths to important directories
+string(JSON json_config_file SET ${json_config_file} "xml_doc_path" "\"${VISP_DOC_DIR}/xml\"")
+string(JSON json_config_file SET ${json_config_file} "build_dir" "\"${CMAKE_BINARY_DIR}\"")
+string(JSON json_config_file SET ${json_config_file} "source_dir" "\"${CMAKE_SOURCE_DIR}\"")
+
+# Add include directories to config file
+set(json_include_dirs "[]")
+set(include_dirs_count 0)
+foreach(include_dir ${VISP_INCLUDE_DIRS})
+ string(JSON json_include_dirs SET ${json_include_dirs} "${include_dirs_count}" "\"${include_dir}\"")
+ MATH(EXPR include_dirs_count "${include_dirs_count}+1")
+endforeach()
+string(JSON json_config_file SET ${json_config_file} "include_dirs" "${json_include_dirs}")
+
+# For each bound module, add its headers and dependencies to config file
+set(json_modules "{}")
+foreach(module ${python_bound_modules})
+ string(REPLACE "visp_" "" clean_module_name ${module})
+ string(JSON json_modules SET ${json_modules} ${clean_module_name} "{}")
+ # Get module headers
+ set(json_header_list "[]")
+ set(header_count 0)
+ foreach(module_header ${VISP_MODULE_${module}_HEADERS})
+ string(JSON json_header_list SET ${json_header_list} "${header_count}" "\"${module_header}\"")
+ MATH(EXPR header_count "${header_count}+1")
+ endforeach()
+ string(JSON json_modules SET ${json_modules} ${clean_module_name} "headers" "${json_header_list}")
+ # Get module dependencies
+ set(json_deps_list "[]")
+ set(dep_count 0)
+ foreach(dep ${VISP_MODULE_${module}_DEPS})
+ string(REPLACE "visp_" "" clean_dep ${dep})
+ string(JSON json_deps_list SET ${json_deps_list} "${dep_count}" "\"${clean_dep}\"")
+ MATH(EXPR dep_count "${dep_count}+1")
+ endforeach()
+ string(JSON json_modules SET ${json_modules} ${clean_module_name} "dependencies" "${json_deps_list}")
+endforeach()
+string(JSON json_config_file SET ${json_config_file} "modules" ${json_modules})
+
+# Define platform specific macros
+# These should be the same as those defined when compiling the visp libraries
+# The impact will only be visible if the macros defined (or not) below appear in ViSP's headers
+# See https://github.com/cpredef/predef/tree/master for compiler/OS specific #defines
+set(json_defines "{}")
+string(JSON json_defines SET ${json_defines} "__cplusplus" "${VISP_CXX_STANDARD}")
+# Compiler
+if(CMAKE_COMPILER_IS_GNUCXX)
+ string(REPLACE "." ";" GCC_VERSION_LIST ${CMAKE_CXX_COMPILER_VERSION})
+ list(GET GCC_VERSION_LIST 0 GCC_MAJOR)
+ list(GET GCC_VERSION_LIST 1 GCC_MINOR)
+ list(GET GCC_VERSION_LIST 2 GCC_PATCH)
+
+ string(JSON json_defines SET ${json_defines} "__GNUC__" "${GCC_MAJOR}")
+ string(JSON json_defines SET ${json_defines} "__GNUC_MINOR__" "${GCC_MINOR}")
+ string(JSON json_defines SET ${json_defines} "__GNUC_PATCHLEVEL__" "${GCC_PATCH}")
+endif()
+
+if(CMAKE_COMPILER_IS_CLANGCXX)
+ string(REPLACE "." ";" CLANG_VERSION_LIST ${CMAKE_CXX_COMPILER_VERSION})
+ list(GET CLANG_VERSION_LIST 0 CLANG_MAJOR)
+ list(GET CLANG_VERSION_LIST 1 CLANG_MINOR)
+ list(GET CLANG_VERSION_LIST 2 CLANG_PATCH)
+
+ string(JSON json_defines SET ${json_defines} "__clang__" "${CLANG_MAJOR}")
+ string(JSON json_defines SET ${json_defines} "__clang_minor__" "${CLANG_MINOR}")
+ string(JSON json_defines SET ${json_defines} "__clang_patchlevel__" "${CLANG_PATCH}")
+ string(JSON json_defines SET ${json_defines} "__clang_version__" "${CMAKE_CXX_COMPILER_VERSION}")
+endif()
+
+if(MSVC)
+ string(JSON json_defines SET ${json_defines} "_MSC_VER" "${MSVC_VERSION}")
+endif()
+
+if(MINGW)
+ string(JSON json_defines SET ${json_defines} "__MINGW32__" "null")
+endif()
+# OS
+if(WIN32)
+ string(JSON json_defines SET ${json_defines} "_WIN32" "null")
+endif()
+if(UNIX)
+ string(JSON json_defines SET ${json_defines} "__linux__" "null")
+ string(JSON json_defines SET ${json_defines} "__unix__" "null")
+ string(JSON json_defines SET ${json_defines} "_unix" "null")
+endif()
+if(APPLE)
+ string(JSON json_defines SET ${json_defines} "__APPLE__" "null")
+ string(JSON json_defines SET ${json_defines} "__MACH__" "null")
+endif()
+
+string(JSON json_config_file SET ${json_config_file} "defines" ${json_defines})
+
+file(WRITE ${json_config_file_path} "${json_config_file}")
diff --git a/modules/python/bindings/CMakeLists.txt b/modules/python/bindings/CMakeLists.txt
new file mode 100644
index 0000000000..644d148970
--- /dev/null
+++ b/modules/python/bindings/CMakeLists.txt
@@ -0,0 +1,63 @@
+#############################################################################
+#
+# ViSP, open source Visual Servoing Platform software.
+# Copyright (C) 2005 - 2023 by Inria. All rights reserved.
+#
+# This software is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+# See the file LICENSE.txt at the root directory of this source
+# distribution for additional information about the GNU GPL.
+#
+# For using ViSP with software that can not be combined with the GNU
+# GPL, please contact Inria about acquiring a ViSP Professional
+# Edition License.
+#
+# See https://visp.inria.fr for more information.
+#
+# This software was developed at:
+# Inria Rennes - Bretagne Atlantique
+# Campus Universitaire de Beaulieu
+# 35042 Rennes Cedex
+# France
+#
+# If you have questions regarding the use of this file, please contact
+# Inria at visp@inria.fr
+#
+# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
+# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+#
+# Description:
+# ViSP Python bindings module
+#
+#############################################################################
+
+# Declare the cpp source files as explicitely generated so that pybind11_add_module does not look for them when they are not yet created
+set_source_files_properties(${python_bindings_cpp_src} PROPERTIES GENERATED TRUE)
+
+pybind11_add_module(_visp ${python_bindings_cpp_src})
+
+# Place library in binary/visp dir so that it doesn't pollute lib dir
+# This .so file is not treated the same as the others and we shouldn't link against it when compiling in C++
+# when installing the python module, pip will look into the "visp" subfolder for .so files to copy into the site-packages
+
+file(MAKE_DIRECTORY "${bindings_gen_location}/src")
+set_target_properties(_visp PROPERTIES
+ LIBRARY_OUTPUT_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}"
+)
+
+target_include_directories(_visp PRIVATE include) # Include directory containing custom bindings
+target_include_directories(_visp PRIVATE ${VISP_INCLUDE_DIRS})
+target_link_libraries(_visp PRIVATE ${VISP_LIBRARIES})
+add_dependencies(_visp visp_python_bindings_generator_run)
+
+# Setup pip install
+if(PYTHON3INTERP_FOUND)
+ configure_file("${CMAKE_CURRENT_SOURCE_DIR}/setup.py.in" "${CMAKE_CURRENT_BINARY_DIR}/setup.py" @ONLY)
+ add_custom_target( visp_python_bindings_install
+ COMMAND ${CMAKE_COMMAND} -E copy_directory "${CMAKE_CURRENT_SOURCE_DIR}/visp" "${CMAKE_CURRENT_BINARY_DIR}/visp"
+ COMMAND ${PYTHON3_EXECUTABLE} -m pip install ${_pip_args} "${CMAKE_CURRENT_BINARY_DIR}"
+ DEPENDS _visp
+ )
+endif()
diff --git a/modules/python/bindings/include/blob.hpp b/modules/python/bindings/include/blob.hpp
new file mode 100644
index 0000000000..5dd634a473
--- /dev/null
+++ b/modules/python/bindings/include/blob.hpp
@@ -0,0 +1,69 @@
+/*
+ * ViSP, open source Visual Servoing Platform software.
+ * Copyright (C) 2005 - 2023 by Inria. All rights reserved.
+ *
+ * This software is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ * See the file LICENSE.txt at the root directory of this source
+ * distribution for additional information about the GNU GPL.
+ *
+ * For using ViSP with software that can not be combined with the GNU
+ * GPL, please contact Inria about acquiring a ViSP Professional
+ * Edition License.
+ *
+ * See https://visp.inria.fr for more information.
+ *
+ * This software was developed at:
+ * Inria Rennes - Bretagne Atlantique
+ * Campus Universitaire de Beaulieu
+ * 35042 Rennes Cedex
+ * France
+ *
+ * If you have questions regarding the use of this file, please contact
+ * Inria at visp@inria.fr
+ *
+ * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
+ * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Description:
+ * Python bindings.
+ */
+
+#ifndef VISP_PYTHON_BLOB_HPP
+#define VISP_PYTHON_BLOB_HPP
+
+#include
+#include
+#include
+
+#include
+#include
+
+namespace py = pybind11;
+
+void bindings_vpDot2(py::class_ &pyDot2)
+{
+ pyDot2.def_static("defineDots", [](std::vector &dots,
+ const std::string &dotFile,
+ vpImage &I,
+ vpColor col = vpColor::blue,
+ bool trackDot = true) {
+ return vpDot2::defineDots(&dots[0], dots.size(), dotFile, I, col, trackDot);
+ }, R"doc(
+Wrapper for the defineDots method, see the C++ ViSP documentation.
+)doc", py::arg("dots"), py::arg("dotFile"), py::arg("I"), py::arg("color"), py::arg("trackDot") = true);
+
+ pyDot2.def_static("trackAndDisplay", [](std::vector &dots,
+ vpImage &I,
+ std::vector &cogs,
+ std::optional> cogStar) {
+ vpImagePoint *desireds = cogStar ? &((*cogStar)[0]) : nullptr;
+ vpDot2::trackAndDisplay(&dots[0], dots.size(), I, cogs, desireds);
+ }, R"doc(
+Wrapper for the trackAndDisplay method, see the C++ ViSP documentation.
+)doc", py::arg("dots"), py::arg("I"), py::arg("cogs"), py::arg("desiredCogs"));
+}
+
+#endif
diff --git a/modules/python/bindings/include/core.hpp b/modules/python/bindings/include/core.hpp
new file mode 100644
index 0000000000..99c8b7e8ee
--- /dev/null
+++ b/modules/python/bindings/include/core.hpp
@@ -0,0 +1,44 @@
+/*
+ * ViSP, open source Visual Servoing Platform software.
+ * Copyright (C) 2005 - 2023 by Inria. All rights reserved.
+ *
+ * This software is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ * See the file LICENSE.txt at the root directory of this source
+ * distribution for additional information about the GNU GPL.
+ *
+ * For using ViSP with software that can not be combined with the GNU
+ * GPL, please contact Inria about acquiring a ViSP Professional
+ * Edition License.
+ *
+ * See https://visp.inria.fr for more information.
+ *
+ * This software was developed at:
+ * Inria Rennes - Bretagne Atlantique
+ * Campus Universitaire de Beaulieu
+ * 35042 Rennes Cedex
+ * France
+ *
+ * If you have questions regarding the use of this file, please contact
+ * Inria at visp@inria.fr
+ *
+ * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
+ * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Description:
+ * Python bindings.
+ */
+
+#ifndef VISP_PYTHON_CORE_HPP
+#define VISP_PYTHON_CORE_HPP
+
+#include "core/utils.hpp"
+#include "core/arrays.hpp"
+#include "core/images.hpp"
+#include "core/pixel_meter.hpp"
+#include "core/image_conversions.hpp"
+
+
+#endif
diff --git a/modules/python/bindings/include/core/arrays.hpp b/modules/python/bindings/include/core/arrays.hpp
new file mode 100644
index 0000000000..7c862728c2
--- /dev/null
+++ b/modules/python/bindings/include/core/arrays.hpp
@@ -0,0 +1,352 @@
+/*
+ * ViSP, open source Visual Servoing Platform software.
+ * Copyright (C) 2005 - 2023 by Inria. All rights reserved.
+ *
+ * This software is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ * See the file LICENSE.txt at the root directory of this source
+ * distribution for additional information about the GNU GPL.
+ *
+ * For using ViSP with software that can not be combined with the GNU
+ * GPL, please contact Inria about acquiring a ViSP Professional
+ * Edition License.
+ *
+ * See https://visp.inria.fr for more information.
+ *
+ * This software was developed at:
+ * Inria Rennes - Bretagne Atlantique
+ * Campus Universitaire de Beaulieu
+ * 35042 Rennes Cedex
+ * France
+ *
+ * If you have questions regarding the use of this file, please contact
+ * Inria at visp@inria.fr
+ *
+ * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
+ * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Description:
+ * Python bindings.
+ */
+
+#ifndef VISP_PYTHON_CORE_ARRAYS_HPP
+#define VISP_PYTHON_CORE_ARRAYS_HPP
+
+#include "core/utils.hpp"
+
+#include
+#include
+#include
+
+#include
+#include
+#include
+
+/*
+ * Array2D and its children.
+ */
+
+/*
+ * Get buffer infos : used in def_buffer and the .numpy() function.
+ */
+template py::buffer_info get_buffer_info(T &) = delete;
+template class Array,
+ typename std::enable_if, Array>::value, bool>::type = true>
+py::buffer_info get_buffer_info(Array &array)
+{
+ return make_array_buffer(array.data, { array.getRows(), array.getCols() }, false);
+}
+
+template<>
+py::buffer_info get_buffer_info(vpMatrix &array)
+{
+ return make_array_buffer(array.data, { array.getRows(), array.getCols() }, false);
+}
+
+template<>
+py::buffer_info get_buffer_info(vpColVector &array)
+{
+ return make_array_buffer(array.data, { array.getRows() }, false);
+}
+template<>
+py::buffer_info get_buffer_info(vpRowVector &array)
+{
+ return make_array_buffer(array.data, { array.getCols() }, false);
+}
+template<>
+py::buffer_info get_buffer_info(vpTranslationVector &array)
+{
+ return make_array_buffer(array.data, { 3 }, false);
+}
+template<>
+py::buffer_info get_buffer_info(vpRotationMatrix &array)
+{
+ return make_array_buffer(array.data, { array.getRows(), array.getCols() }, true);
+}
+template<>
+py::buffer_info get_buffer_info(vpHomogeneousMatrix &array)
+{
+ return make_array_buffer(array.data, { array.getRows(), array.getCols() }, true);
+}
+
+/*
+ * Array 2D indexing
+ */
+template
+void define_get_item_2d_array(PyClass &pyClass)
+{
+ pyClass.def("__getitem__", [](const Class &self, std::pair pair) -> Item {
+ int i = pair.first, j = pair.second;
+ const int rows = (int)self.getRows(), cols = (int)self.getCols();
+ if (i >= rows || j >= cols || i < -rows || j < -cols) {
+ std::stringstream ss;
+ ss << "Invalid indexing into a 2D array: got indices " << shape_to_string({ i, j })
+ << " but array has dimensions " << shape_to_string({ rows, cols });
+ throw std::runtime_error(ss.str());
+ }
+ if (i < 0) {
+ i = rows + i;
+ }
+ if (j < 0) {
+ j = cols + j;
+ }
+ return self[i][j];
+ });
+ pyClass.def("__getitem__", [](const Class &self, int i) -> np_array_cf- {
+ const int rows = (int)self.getRows();
+ if (i >= rows || i < -rows) {
+ std::stringstream ss;
+ ss << "Invalid indexing into a 2D array: got row index " << shape_to_string({ i })
+ << " but array has " << rows << " rows";
+ throw std::runtime_error(ss.str());
+ }
+ if (i < 0) {
+ i = rows + i;
+ }
+ return (py::cast(self).template cast >())[py::cast(i)].template cast>();
+ }, py::keep_alive<0, 1>());
+ pyClass.def("__getitem__", [](const Class &self, py::slice slice) -> py::array_t
- {
+ return (py::cast(self).template cast >())[slice].template cast>();
+ }, py::keep_alive<0, 1>());
+ pyClass.def("__getitem__", [](const Class &self, py::tuple tuple) {
+ return (py::cast(self).template cast >())[tuple].template cast>();
+ }, py::keep_alive<0, 1>());
+}
+
+/*
+ * Array 2D indexing
+ */
+template
+void define_get_item_1d_array(PyClass &pyClass)
+{
+ pyClass.def("__getitem__", [](const Class &self, int i) -> Item {
+
+ const int elems = (int)self.getRows() * (int)self.getCols();
+ if (i >= elems || i < -elems) {
+ std::stringstream ss;
+ ss << "Invalid indexing into a 1D array: got indices " << shape_to_string({ i })
+ << " but array has dimensions " << shape_to_string({ elems });
+ throw std::runtime_error(ss.str());
+ }
+ if (i < 0) {
+ i = elems + i;
+ }
+ return self[i];
+ });
+ pyClass.def("__getitem__", [](const Class &self, py::slice slice) -> py::array_t
- {
+ return (py::cast(self).template cast >())[slice].template cast>();
+ }, py::keep_alive<0, 1>());
+}
+
+const char *numpy_fn_doc_writable = R"doc(
+ Numpy view of the underlying array data.
+ This numpy view can be used to directly modify the array.
+)doc";
+
+const char *numpy_fn_doc_nonwritable = R"doc(
+ Numpy view of the underlying array data.
+ This numpy view cannot be modified.
+ If you try to modify the array, an exception will be raised.
+)doc";
+
+template
+void bindings_vpArray2D(py::class_> &pyArray2D)
+{
+ pyArray2D.def_buffer(&get_buffer_info);
+
+ pyArray2D.def("numpy", [](vpArray2D &self) -> np_array_cf {
+ return py::cast(self).template cast >();
+ }, numpy_fn_doc_writable, py::keep_alive<0, 1>());
+
+ pyArray2D.def(py::init([](np_array_cf &np_array) {
+ verify_array_shape_and_dims(np_array, 2, "ViSP 2D array");
+ const std::vector shape = np_array.request().shape;
+ vpArray2D result(shape[0], shape[1]);
+ copy_data_from_np(np_array, result.data);
+ return result;
+ }), R"doc(
+Construct a 2D ViSP array by **copying** a 2D numpy array.
+
+:param np_array: The numpy array to copy.
+
+)doc", py::arg("np_array"));
+
+ define_get_item_2d_array>, vpArray2D, T>(pyArray2D);
+}
+
+void bindings_vpMatrix(py::class_> &pyMatrix)
+{
+ pyMatrix.def_buffer(&get_buffer_info);
+
+ pyMatrix.def("numpy", [](vpMatrix &self) -> np_array_cf {
+ return py::cast(self).cast>();
+ }, numpy_fn_doc_writable, py::keep_alive<0, 1>());
+
+ pyMatrix.def(py::init([](np_array_cf np_array) {
+ verify_array_shape_and_dims(np_array, 2, "ViSP Matrix");
+ const std::vector shape = np_array.request().shape;
+ vpMatrix result(shape[0], shape[1]);
+ copy_data_from_np(np_array, result.data);
+ return result;
+ }), R"doc(
+Construct a matrix by **copying** a 2D numpy array.
+
+:param np_array: The numpy array to copy.
+
+)doc", py::arg("np_array"));
+
+ define_get_item_2d_array>, vpMatrix, double>(pyMatrix);
+}
+
+
+void bindings_vpRotationMatrix(py::class_> &pyRotationMatrix)
+{
+
+ pyRotationMatrix.def_buffer(&get_buffer_info);
+ pyRotationMatrix.def("numpy", [](vpRotationMatrix &self) -> np_array_cf {
+ return py::cast(self).cast>();
+ }, numpy_fn_doc_nonwritable, py::keep_alive<0, 1>());
+ pyRotationMatrix.def(py::init([](np_array_cf np_array) {
+ verify_array_shape_and_dims(np_array, { 3, 3 }, "ViSP rotation matrix");
+ const std::vector shape = np_array.request().shape;
+ vpRotationMatrix result;
+ copy_data_from_np(np_array, result.data);
+ if (!result.isARotationMatrix()) {
+ throw std::runtime_error("Input numpy array is not a valid rotation matrix");
+ }
+ return result;
+ }), R"doc(
+Construct a rotation matrix by **copying** a 2D numpy array.
+This numpy array should be of dimensions :math:`3 \times 3` and be a valid rotation matrix.
+If it is not a rotation matrix, an exception will be raised.
+
+:param np_array: The numpy 1D array to copy.
+
+)doc", py::arg("np_array"));
+ define_get_item_2d_array>, vpRotationMatrix, double>(pyRotationMatrix);
+}
+
+void bindings_vpHomogeneousMatrix(py::class_> &pyHomogeneousMatrix)
+{
+ pyHomogeneousMatrix.def_buffer(get_buffer_info);
+ pyHomogeneousMatrix.def("numpy", [](vpHomogeneousMatrix &self) -> np_array_cf {
+ return py::cast(self).cast>();
+ }, numpy_fn_doc_nonwritable, py::keep_alive<0, 1>());
+
+ pyHomogeneousMatrix.def(py::init([](np_array_cf np_array) {
+ verify_array_shape_and_dims(np_array, { 4, 4 }, "ViSP homogeneous matrix");
+ const std::vector shape = np_array.request().shape;
+ vpHomogeneousMatrix result;
+ copy_data_from_np(np_array, result.data);
+ if (!result.isAnHomogeneousMatrix()) {
+ throw std::runtime_error("Input numpy array is not a valid homogeneous matrix");
+ }
+ return result;
+ }), R"doc(
+Construct a homogeneous matrix by **copying** a 2D numpy array.
+This numpy array should be of dimensions :math:`4 \times 4` and be a valid homogeneous matrix.
+If it is not a homogeneous matrix, an exception will be raised.
+
+:param np_array: The numpy 1D array to copy.
+
+)doc", py::arg("np_array"));
+ define_get_item_2d_array>, vpHomogeneousMatrix, double>(pyHomogeneousMatrix);
+}
+
+
+
+void bindings_vpTranslationVector(py::class_> &pyTranslationVector)
+{
+ pyTranslationVector.def_buffer(&get_buffer_info);
+
+ pyTranslationVector.def("numpy", [](vpTranslationVector &self) -> np_array_cf {
+ return py::cast(self).cast>();
+ }, numpy_fn_doc_writable, py::keep_alive<0, 1>());
+
+ pyTranslationVector.def(py::init([](np_array_cf np_array) {
+ const std::vector required_shape = { 3 };
+ verify_array_shape_and_dims(np_array, required_shape, "ViSP translation vector");
+ const std::vector shape = np_array.request().shape;
+ vpTranslationVector result;
+ copy_data_from_np(np_array, result.data);
+ return result;
+ }), R"doc(
+Construct a Translation vector by **copying** a 1D numpy array of size 3.
+
+:param np_array: The numpy 1D array to copy.
+
+)doc", py::arg("np_array"));
+ define_get_item_1d_array>, vpTranslationVector, double>(pyTranslationVector);
+}
+
+
+void bindings_vpColVector(py::class_> &pyColVector)
+{
+ pyColVector.def_buffer(&get_buffer_info);
+
+ pyColVector.def("numpy", [](vpColVector &self) -> np_array_cf {
+ return py::cast(self).cast>();
+ }, numpy_fn_doc_writable, py::keep_alive<0, 1>());
+
+ pyColVector.def(py::init([](np_array_cf np_array) {
+ verify_array_shape_and_dims(np_array, 1, "ViSP column vector");
+ const std::vector shape = np_array.request().shape;
+ vpColVector result(shape[0]);
+ copy_data_from_np(np_array, result.data);
+ return result;
+ }), R"doc(
+Construct a column vector by **copying** a 1D numpy array.
+
+:param np_array: The numpy 1D array to copy.
+
+)doc", py::arg("np_array"));
+ define_get_item_1d_array>, vpColVector, double>(pyColVector);
+
+}
+
+void bindings_vpRowVector(py::class_> &pyRowVector)
+{
+ pyRowVector.def_buffer(&get_buffer_info);
+ pyRowVector.def("numpy", [](vpRowVector &self) -> np_array_cf {
+ return np_array_cf(get_buffer_info(self), py::cast(self));
+ }, numpy_fn_doc_writable, py::keep_alive<0, 1>());
+ pyRowVector.def(py::init([](np_array_cf np_array) {
+ verify_array_shape_and_dims(np_array, 1, "ViSP row vector");
+ const std::vector shape = np_array.request().shape;
+ vpRowVector result(shape[0]);
+ copy_data_from_np(np_array, result.data);
+ return result;
+ }), R"doc(
+Construct a row vector by **copying** a 1D numpy array.
+
+:param np_array: The numpy 1D array to copy.
+
+)doc", py::arg("np_array"));
+ define_get_item_1d_array>, vpRowVector, double>(pyRowVector);
+}
+
+
+#endif
diff --git a/modules/python/bindings/include/core/image_conversions.hpp b/modules/python/bindings/include/core/image_conversions.hpp
new file mode 100644
index 0000000000..68099d9d15
--- /dev/null
+++ b/modules/python/bindings/include/core/image_conversions.hpp
@@ -0,0 +1,242 @@
+/*
+ * ViSP, open source Visual Servoing Platform software.
+ * Copyright (C) 2005 - 2023 by Inria. All rights reserved.
+ *
+ * This software is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ * See the file LICENSE.txt at the root directory of this source
+ * distribution for additional information about the GNU GPL.
+ *
+ * For using ViSP with software that can not be combined with the GNU
+ * GPL, please contact Inria about acquiring a ViSP Professional
+ * Edition License.
+ *
+ * See https://visp.inria.fr for more information.
+ *
+ * This software was developed at:
+ * Inria Rennes - Bretagne Atlantique
+ * Campus Universitaire de Beaulieu
+ * 35042 Rennes Cedex
+ * France
+ *
+ * If you have questions regarding the use of this file, please contact
+ * Inria at visp@inria.fr
+ *
+ * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
+ * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Description:
+ * Python bindings.
+ */
+
+#ifndef VISP_PYTHON_CORE_IMAGE_CONVERT_HPP
+#define VISP_PYTHON_CORE_IMAGE_CONVERT_HPP
+
+#include
+#include
+#include
+
+#include
+
+namespace
+{
+using ConversionFunction1D = void(*)(unsigned char *, unsigned char *, unsigned int);
+using ConversionFunction2D = void(*)(unsigned char *, unsigned char *, unsigned int, unsigned int);
+using ComputeBytesFunction = unsigned(*)(unsigned int, unsigned int);
+
+void call_conversion_fn(ConversionFunction2D fn, unsigned char *src, unsigned char *dest, unsigned int h, unsigned int w)
+{
+ fn(src, dest, h, w);
+}
+void call_conversion_fn(ConversionFunction1D fn, unsigned char *src, unsigned char *dest, unsigned int h, unsigned int w)
+{
+ fn(src, dest, h * w);
+}
+
+template
+struct SimpleConversionStruct
+{
+ SimpleConversionStruct(const std::string &name, ConversionFn fn, unsigned int srcBytesPerPixel, unsigned int destBytesPerPixel) :
+ name(name), fn(fn), srcBytesPerPixel(srcBytesPerPixel), destBytesPerPixel(destBytesPerPixel)
+ { }
+ std::string name;
+ ConversionFn fn;
+ unsigned int srcBytesPerPixel;
+ unsigned int destBytesPerPixel;
+
+ void add_conversion_binding(py::class_ &pyImageConvert)
+ {
+ pyImageConvert.def_static(name.c_str(), [this](py::array_t &src,
+ py::array_t &dest) {
+ py::buffer_info bufsrc = src.request(), bufdest = dest.request();
+ if (bufsrc.ndim < 2 || bufdest.ndim < 2) {
+ throw std::runtime_error("Expected to have src and dest arrays with at least two dimensions.");
+ }
+ if (bufsrc.shape[0] != bufdest.shape[0] || bufsrc.shape[1] != bufdest.shape[1]) {
+ std::stringstream ss;
+ ss << "src and dest must have the same number of pixels, but got src = " << shape_to_string(bufsrc.shape);
+ ss << "and dest = " << shape_to_string(bufdest.shape);
+ throw std::runtime_error(ss.str());
+ }
+ if (srcBytesPerPixel > 1 && (bufsrc.ndim != 3 || bufsrc.shape[2] != srcBytesPerPixel)) {
+ std::stringstream ss;
+ ss << "Source array should be a 3D array of shape (H, W, " << srcBytesPerPixel << ")";
+ throw std::runtime_error(ss.str());
+ }
+ else if (srcBytesPerPixel == 1 && bufsrc.ndim == 3 && bufsrc.shape[2] > 1) {
+ throw std::runtime_error("Source array should be a either a 2D array of shape H x W or a 3D array of shape (H, W, 1)");
+ }
+ if (destBytesPerPixel > 1 && (bufdest.ndim != 3 || bufdest.shape[2] != destBytesPerPixel)) {
+ std::stringstream ss;
+ ss << "Destination array should be a 3D array of shape (H, W, " << destBytesPerPixel << ")";
+ throw std::runtime_error(ss.str());
+ }
+ else if (destBytesPerPixel == 1 && bufdest.ndim == 3 && bufdest.shape[2] > 1) {
+ throw std::runtime_error("Destination should be a either a 2D array of shape H x W or a 3D array of shape (H, W, 1)");
+ }
+
+
+ unsigned char *src_ptr = static_cast(bufsrc.ptr);
+ unsigned char *dest_ptr = static_cast(bufdest.ptr);
+ call_conversion_fn(fn, src_ptr, dest_ptr, bufsrc.shape[0], bufsrc.shape[1]);
+ }, py::arg("src"), py::arg("dest"));
+ }
+
+};
+
+template
+struct ConversionFromYUVLike
+{
+ ConversionFromYUVLike(const std::string &name, ConversionFn fn, ComputeBytesFunction sourceBytesFn, unsigned int destBytesPerPixel) :
+ name(name), fn(fn), sourceBytesFn(sourceBytesFn), destBytesPerPixel(destBytesPerPixel)
+ { }
+ std::string name;
+ ConversionFn fn;
+ ComputeBytesFunction sourceBytesFn;
+
+ unsigned int destBytesPerPixel;
+
+ void add_conversion_binding(py::class_ &pyImageConvert)
+ {
+ pyImageConvert.def_static(name.c_str(), [this](py::array_t &src,
+ py::array_t &dest) {
+ py::buffer_info bufsrc = src.request(), bufdest = dest.request();
+ if (bufdest.ndim < 2) {
+ throw std::runtime_error("Expected to have dest array with at least two dimensions.");
+ }
+
+ unsigned int height = bufdest.shape[0], width = bufdest.shape[1];
+
+ unsigned expectedSourceBytes = sourceBytesFn(height, width);
+
+ unsigned actualBytes = 1;
+ for (unsigned int i = 0; i < bufsrc.ndim; ++i) {
+ actualBytes *= bufsrc.shape[i];
+ }
+
+ if (actualBytes != expectedSourceBytes) {
+ std::stringstream ss;
+ ss << "Expected to have " << expectedSourceBytes << " bytes in the input array, but got " << actualBytes << " elements.";
+ throw std::runtime_error(ss.str());
+ }
+
+ if (destBytesPerPixel > 1 && (bufdest.ndim != 3 || bufdest.shape[2] != destBytesPerPixel)) {
+ std::stringstream ss;
+ ss << "Destination array should be a 3D array of shape (H, W, " << destBytesPerPixel << ")";
+ throw std::runtime_error(ss.str());
+ }
+ else if (destBytesPerPixel == 1 && bufdest.ndim == 3 && bufdest.shape[2] > 1) {
+ throw std::runtime_error("Destination should be a either a 2D array of shape H x W or a 3D array of shape (H, W, 1)");
+ }
+
+
+ unsigned char *src_ptr = static_cast(bufsrc.ptr);
+ unsigned char *dest_ptr = static_cast(bufdest.ptr);
+ call_conversion_fn(fn, src_ptr, dest_ptr, bufdest.shape[0], bufdest.shape[1]);
+ }, py::arg("src"), py::arg("dest"));
+ }
+
+};
+
+unsigned size422(unsigned h, unsigned w)
+{
+ return h * w + (h * (w / 2)) * 2;
+}
+unsigned size420(unsigned h, unsigned w)
+{
+ return h * w + ((h / 2) * (w / 2)) * 2;
+}
+unsigned size411(unsigned h, unsigned w)
+{
+ return h * w + ((h / 4) * (w / 4)) * 2;
+}
+
+}
+
+
+
+void bindings_vpImageConvert(py::class_ &pyImageConvert)
+{
+ // Simple conversions where the size input is a single argument
+ {
+ std::vector> conversions = {
+ SimpleConversionStruct("YUV444ToGrey", &vpImageConvert::YUV444ToGrey, 3, 1),
+ SimpleConversionStruct("YUV444ToRGB", &vpImageConvert::YUV444ToRGB, 3, 3),
+ SimpleConversionStruct("YUV444ToRGBa", &vpImageConvert::YUV444ToRGBa, 3, 4),
+ SimpleConversionStruct("RGBToRGBa", static_cast(&vpImageConvert::RGBToRGBa), 3, 4),
+ SimpleConversionStruct("RGBaToRGB", &vpImageConvert::RGBaToRGB, 4, 3),
+ SimpleConversionStruct("GreyToRGB", &vpImageConvert::GreyToRGB, 1, 3),
+ SimpleConversionStruct("GreyToRGBa", static_cast(&vpImageConvert::GreyToRGBa), 1, 4),
+ SimpleConversionStruct("RGBToGrey", static_cast(&vpImageConvert::RGBToGrey), 3, 1),
+ };
+ for (auto &conversion: conversions) {
+ conversion.add_conversion_binding(pyImageConvert);
+ }
+ }
+
+ //YUV conversions
+ {
+ using Conv = ConversionFromYUVLike;
+ std::vector conversions = {
+ Conv("YUYVToRGBa", &vpImageConvert::YUYVToRGBa, &size422, 4),
+ Conv("YUYVToRGB", &vpImageConvert::YUYVToRGB, &size422, 3),
+
+ Conv("YV12ToRGBa", &vpImageConvert::YV12ToRGBa, &size420, 4),
+ Conv("YV12ToRGB", &vpImageConvert::YV12ToRGB, &size420, 3),
+ Conv("YUV420ToRGBa", &vpImageConvert::YUV420ToRGBa, &size420, 4),
+ Conv("YUV420ToRGB", &vpImageConvert::YUV420ToRGB, &size420, 3),
+
+ Conv("YVU9ToRGBa", &vpImageConvert::YVU9ToRGBa, &size411, 4),
+ Conv("YVU9ToRGB", &vpImageConvert::YVU9ToRGB, &size411, 3),
+ };
+ for (auto &conversion: conversions) {
+ conversion.add_conversion_binding(pyImageConvert);
+ }
+ }
+ {
+ using Conv = ConversionFromYUVLike;
+ std::vector conversions = {
+
+ Conv("YUYVToGrey", &vpImageConvert::YUYVToGrey, &size422, 1),
+ Conv("YUV422ToRGBa", &vpImageConvert::YUV422ToRGBa, &size422, 4),
+ Conv("YUV422ToRGB", &vpImageConvert::YUV422ToRGB, &size422, 3),
+ Conv("YUV422ToGrey", &vpImageConvert::YUV422ToGrey, &size422, 1),
+ Conv("YCbCrToRGBa", &vpImageConvert::YCbCrToRGBa, &size422, 4),
+ Conv("YCbCrToRGB", &vpImageConvert::YCbCrToRGB, &size422, 3),
+ Conv("YCbCrToGrey", &vpImageConvert::YCbCrToGrey, &size422, 1),
+ Conv("YCrCbToRGBa", &vpImageConvert::YCrCbToRGBa, &size422, 4),
+ Conv("YCrCbToRGB", &vpImageConvert::YCrCbToRGB, &size422, 3),
+ Conv("YUV420ToGrey", &vpImageConvert::YUV420ToGrey, &size420, 1),
+ Conv("YUV411ToRGBa", &vpImageConvert::YUV411ToRGBa, &size411, 4),
+ Conv("YUV411ToRGB", &vpImageConvert::YUV411ToRGB, &size411, 3),
+ Conv("YUV411ToGrey", &vpImageConvert::YUV411ToGrey, &size411, 1),
+ };
+ for (auto &conversion: conversions) {
+ conversion.add_conversion_binding(pyImageConvert);
+ }
+ }
+}
+
+#endif
diff --git a/modules/python/bindings/include/core/images.hpp b/modules/python/bindings/include/core/images.hpp
new file mode 100644
index 0000000000..53e0a08e19
--- /dev/null
+++ b/modules/python/bindings/include/core/images.hpp
@@ -0,0 +1,228 @@
+/*
+ * ViSP, open source Visual Servoing Platform software.
+ * Copyright (C) 2005 - 2023 by Inria. All rights reserved.
+ *
+ * This software is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ * See the file LICENSE.txt at the root directory of this source
+ * distribution for additional information about the GNU GPL.
+ *
+ * For using ViSP with software that can not be combined with the GNU
+ * GPL, please contact Inria about acquiring a ViSP Professional
+ * Edition License.
+ *
+ * See https://visp.inria.fr for more information.
+ *
+ * This software was developed at:
+ * Inria Rennes - Bretagne Atlantique
+ * Campus Universitaire de Beaulieu
+ * 35042 Rennes Cedex
+ * France
+ *
+ * If you have questions regarding the use of this file, please contact
+ * Inria at visp@inria.fr
+ *
+ * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
+ * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Description:
+ * Python bindings.
+ */
+
+#ifndef VISP_PYTHON_CORE_IMAGES_HPP
+#define VISP_PYTHON_CORE_IMAGES_HPP
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+namespace
+{
+const char *numpy_fn_doc_image = R"doc(
+ Numpy view of the underlying image data.
+ This numpy view can be used to directly modify the array.
+)doc";
+}
+
+/*
+ * Image 2D indexing
+ */
+template
+void define_get_item_2d_image(py::class_> &pyClass)
+{
+ pyClass.def("__getitem__", [](const vpImage &self, std::pair pair) -> T {
+ int i = pair.first, j = pair.second;
+ const int rows = (int)self.getHeight(), cols = (int)self.getRows();
+ if (i >= rows || j >= cols || i < -rows || j < -cols) {
+ std::stringstream ss;
+ ss << "Invalid indexing into a 2D image: got indices " << shape_to_string({ i, j })
+ << " but image has dimensions " << shape_to_string({ rows, cols });
+ throw std::runtime_error(ss.str());
+ }
+ if (i < 0) {
+ i = rows + i;
+ }
+ if (j < 0) {
+ j = cols + j;
+ }
+ return self[i][j];
+ });
+ pyClass.def("__getitem__", [](const vpImage &self, int i) -> np_array_cf {
+ const int rows = (int)self.getRows();
+ if (i >= rows || i < -rows) {
+ std::stringstream ss;
+ ss << "Invalid indexing into a 2D image: got row index " << shape_to_string({ i })
+ << " but array has " << rows << " rows";
+ throw std::runtime_error(ss.str());
+ }
+ if (i < 0) {
+ i = rows + i;
+ }
+ return (py::cast(self).template cast >())[py::cast(i)].template cast>();
+ });
+ pyClass.def("__getitem__", [](const vpImage &self, py::slice slice) -> py::array_t {
+ return (py::cast(self).template cast >())[slice].template cast>();
+ }, py::keep_alive<0, 1>());
+ pyClass.def("__getitem__", [](const vpImage &self, py::tuple tuple) {
+ return (py::cast(self).template cast >())[tuple].template cast>();
+ }, py::keep_alive<0, 1>());
+}
+
+/*
+ * vpImage
+ */
+template
+typename std::enable_if::value, void>::type
+bindings_vpImage(py::class_> &pyImage)
+{
+ pyImage.def_buffer([](vpImage &image) -> py::buffer_info {
+ return make_array_buffer(image.bitmap, { image.getHeight(), image.getWidth() }, false);
+ });
+ pyImage.def("numpy", [](vpImage &self) -> np_array_cf {
+ return py::cast(self).template cast>();
+ }, numpy_fn_doc_image, py::keep_alive<0, 1>());
+
+ pyImage.def(py::init([](np_array_cf &np_array) {
+ verify_array_shape_and_dims(np_array, 2, "ViSP Image");
+ const std::vector shape = np_array.request().shape;
+ vpImage result(shape[0], shape[1]);
+ copy_data_from_np(np_array, result.bitmap);
+ return result;
+ }), R"doc(
+Construct an image by **copying** a 2D numpy array.
+
+:param np_array: The numpy array to copy.
+
+)doc", py::arg("np_array"));
+
+ define_get_item_2d_image(pyImage);
+
+ pyImage.def("__repr__", [](const vpImage &self) -> std::string {
+ std::stringstream ss;
+ ss << "";
+ return ss.str();
+ });
+
+ pyImage.def("_visp_repr", [](const vpImage &self) -> std::string {
+ std::stringstream ss;
+ ss << self;
+ return ss.str();
+ }, R"doc(Get the full ViSP image string representation.)doc");
+
+}
+
+template
+typename std::enable_if::value, void>::type
+bindings_vpImage(py::class_> &pyImage)
+{
+ using NpRep = unsigned char;
+ static_assert(sizeof(T) == 4 * sizeof(NpRep));
+ pyImage.def_buffer([](vpImage &image) -> py::buffer_info {
+ return make_array_buffer(reinterpret_cast(image.bitmap), { image.getHeight(), image.getWidth(), 4 }, false);
+ });
+ pyImage.def("numpy", [](vpImage &self) -> np_array_cf {
+ return py::cast(self).template cast>();
+ }, numpy_fn_doc_image, py::keep_alive<0, 1>());
+
+ pyImage.def(py::init([](np_array_cf &np_array) {
+ verify_array_shape_and_dims(np_array, 3, "ViSP RGBa image");
+ const std::vector shape = np_array.request().shape;
+ if (shape[2] != 4) {
+ throw std::runtime_error("Tried to copy a 3D numpy array that does not have 4 elements per pixel into a ViSP RGBA image");
+ }
+ vpImage result(shape[0], shape[1]);
+ copy_data_from_np(np_array, (NpRep *)result.bitmap);
+ return result;
+ }), R"doc(
+Construct an image by **copying** a 3D numpy array. this numpy array should be of the form :math:`H \times W \times 4`
+where the 4 denotes the red, green, blue and alpha components of the image.
+
+:param np_array: The numpy array to copy.
+
+)doc", py::arg("np_array"));
+ define_get_item_2d_image(pyImage);
+
+ pyImage.def("__repr__", [](const vpImage &self) -> std::string {
+ std::stringstream ss;
+ ss << "";
+ return ss.str();
+ });
+
+ pyImage.def("_visp_repr", [](const vpImage &self) -> std::string {
+ std::stringstream ss;
+ ss << self;
+ return ss.str();
+ }, R"doc(Get the full ViSP image string representation.)doc");
+
+}
+template
+typename std::enable_if::value, void>::type
+bindings_vpImage(py::class_> &pyImage)
+{
+ using NpRep = float;
+ static_assert(sizeof(T) == 3 * sizeof(NpRep));
+ pyImage.def_buffer([](vpImage &image) -> py::buffer_info {
+ return make_array_buffer(reinterpret_cast(image.bitmap), { image.getHeight(), image.getWidth(), 3 }, false);
+ });
+
+ pyImage.def("numpy", [](vpImage &self) -> np_array_cf {
+ return py::cast(self).template cast>();
+ }, numpy_fn_doc_image, py::keep_alive<0, 1>());
+
+ pyImage.def(py::init([](np_array_cf &np_array) {
+ verify_array_shape_and_dims(np_array, 3, "ViSP RGBa image");
+ const std::vector shape = np_array.request().shape;
+ if (shape[2] != 3) {
+ throw std::runtime_error("Tried to copy a 3D numpy array that does not have 3 elements per pixel into a ViSP RGBf image");
+ }
+ vpImage result(shape[0], shape[1]);
+ copy_data_from_np(np_array, (NpRep *)result.bitmap);
+ return result;
+ }), R"doc(
+Construct an image by **copying** a 3D numpy array. this numpy array should be of the form :math:`H \times W \times 3`
+where the 3 denotes the red, green and blue components of the image.
+
+:param np_array: The numpy array to copy.
+
+)doc", py::arg("np_array"));
+ define_get_item_2d_image(pyImage);
+
+ pyImage.def("__repr__", [](const vpImage &self) -> std::string {
+ std::stringstream ss;
+ ss << "";
+ return ss.str();
+ });
+
+ pyImage.def("_visp_repr", [](const vpImage &self) -> std::string {
+ std::stringstream ss;
+ ss << self;
+ return ss.str();
+ }, R"doc(Get the full ViSP image string representation.)doc");
+}
+
+#endif
diff --git a/modules/python/bindings/include/core/pixel_meter.hpp b/modules/python/bindings/include/core/pixel_meter.hpp
new file mode 100644
index 0000000000..c55b272e2d
--- /dev/null
+++ b/modules/python/bindings/include/core/pixel_meter.hpp
@@ -0,0 +1,172 @@
+/*
+ * ViSP, open source Visual Servoing Platform software.
+ * Copyright (C) 2005 - 2023 by Inria. All rights reserved.
+ *
+ * This software is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ * See the file LICENSE.txt at the root directory of this source
+ * distribution for additional information about the GNU GPL.
+ *
+ * For using ViSP with software that can not be combined with the GNU
+ * GPL, please contact Inria about acquiring a ViSP Professional
+ * Edition License.
+ *
+ * See https://visp.inria.fr for more information.
+ *
+ * This software was developed at:
+ * Inria Rennes - Bretagne Atlantique
+ * Campus Universitaire de Beaulieu
+ * 35042 Rennes Cedex
+ * France
+ *
+ * If you have questions regarding the use of this file, please contact
+ * Inria at visp@inria.fr
+ *
+ * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
+ * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Description:
+ * Python bindings.
+ */
+
+#ifndef VISP_PYTHON_CORE_PIXEL_METER_HPP
+#define VISP_PYTHON_CORE_PIXEL_METER_HPP
+
+#include
+#include
+#include
+
+#include
+#include
+
+#include "core/utils.hpp"
+
+void bindings_vpPixelMeterConversion(py::class_ &pyPM)
+{
+ pyPM.def_static("convertPoints", [](const vpCameraParameters &cam, const py::array_t &us, const py::array_t &vs) {
+ py::buffer_info bufu = us.request(), bufv = vs.request();
+ if (bufu.ndim != bufv.ndim || bufu.shape != bufv.shape) {
+ std::stringstream ss;
+ ss << "us and vs must have the same number of dimensions and same number of elements, but got us = " << shape_to_string(bufu.shape);
+ ss << "and vs = " << shape_to_string(bufv.shape);
+ throw std::runtime_error(ss.str());
+ }
+ py::array_t xs(bufu.shape);
+ py::array_t ys(bufv.shape);
+
+ const double *u_ptr = static_cast(bufu.ptr);
+ const double *v_ptr = static_cast(bufv.ptr);
+ double *x_ptr = static_cast(xs.request().ptr);
+ double *y_ptr = static_cast(ys.request().ptr);
+
+ for (ssize_t i = 0; i < bufu.size; ++i) {
+ vpPixelMeterConversion::convertPoint(cam, u_ptr[i], v_ptr[i], x_ptr[i], y_ptr[i]);
+ }
+
+ return std::make_tuple(std::move(xs), std::move(ys));
+
+ }, R"doc(
+Convert a set of 2D pixel coordinates to normalized coordinates.
+
+:param cam: The camera intrinsics with which to convert pixels to normalized coordinates.
+
+:param us: The pixel coordinates along the horizontal axis.
+
+:param vs: The pixel coordinates along the vertical axis.
+
+:raises RuntimeError: If us and vs do not have the same dimensions and shape.
+
+:return: A tuple containing the x and y normalized coordinates of the input pixels.
+Both arrays have the same shape as xs and ys.
+
+Example usage:
+
+.. testcode::
+
+ from visp.core import PixelMeterConversion, CameraParameters
+ import numpy as np
+
+ h, w = 240, 320
+ cam = CameraParameters(px=600, py=600, u0=320, v0=240)
+
+ vs, us = np.meshgrid(range(h), range(w), indexing='ij') # vs and us are 2D arrays
+ vs.shape == (h, w) and us.shape == (h, w)
+
+ xs, ys = PixelMeterConversion.convertPoints(cam, us, vs)
+ # xs and ys have the same shape as us and vs
+ assert xs.shape == (h, w) and ys.shape == (h, w)
+
+ # Converting a numpy array to normalized coords has the same effect as calling on a single image point
+ u, v = 120, 120
+ x, y = PixelMeterConversion.convertPoint(cam, u, v)
+ assert x == xs[v, u] and y == ys[v, u]
+
+)doc", py::arg("cam"), py::arg("us"), py::arg("vs"));
+}
+
+void bindings_vpMeterPixelConversion(py::class_ &pyMP)
+{
+ pyMP.def_static("convertPoints", [](const vpCameraParameters &cam, const py::array_t &xs, const py::array_t &ys) {
+ py::buffer_info bufx = xs.request(), bufy = ys.request();
+ if (bufx.ndim != bufy.ndim || bufx.shape != bufy.shape) {
+ std::stringstream ss;
+ ss << "xs and ys must have the same number of dimensions and same number of elements, but got xs = " << shape_to_string(bufx.shape);
+ ss << "and ys = " << shape_to_string(bufy.shape);
+ throw std::runtime_error(ss.str());
+ }
+ py::array_t us(bufx.shape);
+ py::array_t vs(bufy.shape);
+
+ const double *x_ptr = static_cast(bufx.ptr);
+ const double *y_ptr = static_cast(bufy.ptr);
+ double *u_ptr = static_cast(us.request().ptr);
+ double *v_ptr = static_cast(vs.request().ptr);
+
+ for (ssize_t i = 0; i < bufx.size; ++i) {
+ vpMeterPixelConversion::convertPoint(cam, x_ptr[i], y_ptr[i], u_ptr[i], v_ptr[i]);
+ }
+
+ return std::make_tuple(std::move(us), std::move(vs));
+
+ }, R"doc(
+Convert a set of 2D normalized coordinates to pixel coordinates.
+
+:param cam: The camera intrinsics with which to convert normalized coordinates to pixels.
+
+:param xs: The normalized coordinates along the horizontal axis.
+
+:param ys: The normalized coordinates along the vertical axis.
+
+:raises RuntimeError: If xs and ys do not have the same dimensions and shape.
+
+:return: A tuple containing the u,v pixel coordinate arrays of the input normalized coordinates.
+Both arrays have the same shape as xs and ys.
+
+Example usage:
+
+.. testcode::
+
+ from visp.core import MeterPixelConversion, CameraParameters
+ import numpy as np
+
+ cam = CameraParameters(px=600, py=600, u0=320, v0=240)
+ n = 20
+ xs, ys = np.random.rand(n), np.random.rand(n)
+
+
+ us, vs = MeterPixelConversion.convertPoints(cam, xs, ys)
+
+ # xs and ys have the same shape as us and vs
+ assert us.shape == (n,) and vs.shape == (n,)
+
+ # Converting a numpy array to pixel coords has the same effect as calling on a single image point
+ x, y = xs[0], ys[0]
+ u, v = MeterPixelConversion.convertPoint(cam, x, y)
+ assert u == us[0] and v == vs[0]
+
+)doc", py::arg("cam"), py::arg("xs"), py::arg("ys"));
+}
+
+#endif
diff --git a/modules/python/bindings/include/core/utils.hpp b/modules/python/bindings/include/core/utils.hpp
new file mode 100644
index 0000000000..3bb413bdf0
--- /dev/null
+++ b/modules/python/bindings/include/core/utils.hpp
@@ -0,0 +1,138 @@
+/*
+ * ViSP, open source Visual Servoing Platform software.
+ * Copyright (C) 2005 - 2023 by Inria. All rights reserved.
+ *
+ * This software is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ * See the file LICENSE.txt at the root directory of this source
+ * distribution for additional information about the GNU GPL.
+ *
+ * For using ViSP with software that can not be combined with the GNU
+ * GPL, please contact Inria about acquiring a ViSP Professional
+ * Edition License.
+ *
+ * See https://visp.inria.fr for more information.
+ *
+ * This software was developed at:
+ * Inria Rennes - Bretagne Atlantique
+ * Campus Universitaire de Beaulieu
+ * 35042 Rennes Cedex
+ * France
+ *
+ * If you have questions regarding the use of this file, please contact
+ * Inria at visp@inria.fr
+ *
+ * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
+ * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Description:
+ * Python bindings.
+ */
+
+#ifndef VISP_PYTHON_CORE_UTILS_HPP
+#define VISP_PYTHON_CORE_UTILS_HPP
+
+#include
+#include
+
+#include
+#include