From 29555689ebb116bedbcaa33b277b4fda06adf6bc Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Thu, 7 Mar 2024 10:45:57 +0100 Subject: [PATCH 1/3] Fix build on ubuntu 12.04 - on ubuntu 12.04 __cplusplus is equal to 1 instead of 199711L - the fix consists in checking if __cplusplus <= 199711L to detect c++98 --- cmake/templates/vpConfig.h.in | 4 +++- modules/core/include/visp3/core/vpImageFilter.h | 4 +++- modules/core/include/visp3/core/vpNullptrEmulated.h | 4 +++- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/cmake/templates/vpConfig.h.in b/cmake/templates/vpConfig.h.in index 807d5db374..7ff33610b8 100644 --- a/cmake/templates/vpConfig.h.in +++ b/cmake/templates/vpConfig.h.in @@ -565,7 +565,9 @@ #cmakedefine VISP_HAVE_NULLPTR // Emulate nullptr when not available when cxx98 is enabled -#if (!defined(VISP_HAVE_NULLPTR)) && (__cplusplus == 199711L) +// Note that on ubuntu 12.04 __cplusplus is equal to 1 that's why in the next line we consider __cplusplus <= 199711L +// and not __cplusplus == 199711L +#if (!defined(VISP_HAVE_NULLPTR)) && (__cplusplus <= 199711L) #include #endif diff --git a/modules/core/include/visp3/core/vpImageFilter.h b/modules/core/include/visp3/core/vpImageFilter.h index 9e38d76dfa..ee989955fa 100644 --- a/modules/core/include/visp3/core/vpImageFilter.h +++ b/modules/core/include/visp3/core/vpImageFilter.h @@ -115,7 +115,9 @@ class VISP_EXPORT vpImageFilter return computeVal; } -#if ((__cplusplus == 199711L) || (defined(_MSVC_LANG) && (_MSVC_LANG == 199711L))) // Check if cxx98 +// Note that on ubuntu 12.04 __cplusplus is equal to 1 that's why in the next line we consider __cplusplus <= 199711L +// and not __cplusplus == 199711L +#if ((__cplusplus <= 199711L) || (defined(_MSVC_LANG) && (_MSVC_LANG == 199711L))) // Check if cxx98 // Helper to apply the scale to the raw values of the filters template static void scaleFilter(vpArray2D &filter, const float &scale) diff --git a/modules/core/include/visp3/core/vpNullptrEmulated.h b/modules/core/include/visp3/core/vpNullptrEmulated.h index 144cec8e1f..687eb5f3f6 100644 --- a/modules/core/include/visp3/core/vpNullptrEmulated.h +++ b/modules/core/include/visp3/core/vpNullptrEmulated.h @@ -33,7 +33,9 @@ #include -#if (!defined(VISP_HAVE_NULLPTR)) && (__cplusplus == 199711L) +// Note that on ubuntu 12.04 __cplusplus is equal to 1 that's why in the next line we consider __cplusplus <= 199711L +// and not __cplusplus == 199711L +#if (!defined(VISP_HAVE_NULLPTR)) && (__cplusplus <= 199711L) // Inspired from this thread https://stackoverflow.com/questions/24433436/compile-error-nullptr-undeclared-identifier // Does the emulation of nullptr when not available with cxx98 From a1f5095422e26b0f78baff16ce779b5504387d7d Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Thu, 7 Mar 2024 11:04:54 +0100 Subject: [PATCH 2/3] Fix doxygen warnings --- doc/mainpage.dox.in | 5 + modules/core/include/visp3/core/vpMutex.h | 42 ++-- modules/core/src/tools/geometry/vpPlane.cpp | 13 +- modules/core/src/tools/geometry/vpRect.cpp | 8 +- .../include/visp3/robot/vpSimulatorCamera.h | 2 +- .../include/visp3/sensor/vp1394TwoGrabber.h | 183 +++++++++--------- .../visp3/sensor/vpDirectShowGrabberImpl.h | 4 +- .../visp3/sensor/vpFlyCaptureGrabber.h | 5 +- .../include/visp3/sensor/vpV4l2Grabber.h | 63 +++--- .../framegrabber/1394/vp1394TwoGrabber.cpp | 100 ++++++---- .../src/framegrabber/v4l2/vpV4l2Grabber.cpp | 1 - .../tracker/me/include/visp3/me/vpMeEllipse.h | 4 +- .../visp3/visual_features/vpBasicFeature.h | 2 +- .../visp3/visual_features/vpGenericFeature.h | 2 +- 14 files changed, 229 insertions(+), 205 deletions(-) diff --git a/doc/mainpage.dox.in b/doc/mainpage.dox.in index 802a620462..5f45bd2f95 100644 --- a/doc/mainpage.dox.in +++ b/doc/mainpage.dox.in @@ -271,6 +271,11 @@ in different ways. This will motivate us to continue the efforts. \defgroup group_core_munkres Munkres Assignment Algorithm Munkres Assignment Algorithm. */ +/*! + \ingroup group_core_tools + \defgroup group_core_cpu_features CPU features + CPU features. +*/ /******************************************* * Module io diff --git a/modules/core/include/visp3/core/vpMutex.h b/modules/core/include/visp3/core/vpMutex.h index f61ab41063..172ded0ad2 100644 --- a/modules/core/include/visp3/core/vpMutex.h +++ b/modules/core/include/visp3/core/vpMutex.h @@ -121,7 +121,7 @@ class vp_deprecated vpMutex \class vpScopedLock - \ingroup group_core_mutex + \ingroup group_core_threading \brief Class that allows protection by mutex. @@ -129,34 +129,34 @@ class vp_deprecated vpMutex code from concurrent access. The scope of the mutex lock/unlock is determined by the constructor/destructor. -\code - #include + \code + #include -int main() -{ - vpMutex mutex; + int main() + { + vpMutex mutex; - { - vpMutex::vpScopedLock lock(mutex); - // shared var to protect - } -} + { + vpMutex::vpScopedLock lock(mutex); + // shared var to protect + } + } \endcode Without using vpScopedLock, the previous example would become: \code -#include + #include -int main() -{ - vpMutex mutex; + int main() + { + vpMutex mutex; - { - mutex.lock(); - // shared var to protect - mutex.unlock() - } -} + { + mutex.lock(); + // shared var to protect + mutex.unlock() + } + } \endcode More examples are provided in \ref tutorial-multi-threading. diff --git a/modules/core/src/tools/geometry/vpPlane.cpp b/modules/core/src/tools/geometry/vpPlane.cpp index ce1ac24fdd..482c0647d8 100644 --- a/modules/core/src/tools/geometry/vpPlane.cpp +++ b/modules/core/src/tools/geometry/vpPlane.cpp @@ -36,7 +36,6 @@ /*! \file vpPlane.cpp \brief definition of the vpPlane class member functions - \ingroup libtools */ #include @@ -60,7 +59,7 @@ vpPlane &vpPlane::operator=(const vpPlane &p) /*! Basic constructor that set the plane parameters A, B, C, D to zero. */ -vpPlane::vpPlane() : A(0), B(0), C(0), D(0) {} +vpPlane::vpPlane() : A(0), B(0), C(0), D(0) { } /*! Plane constructor from A, B, C, D parameters. @@ -72,7 +71,7 @@ vpPlane::vpPlane() : A(0), B(0), C(0), D(0) {} \param a, b, c, d : Parameters of the plane. */ -vpPlane::vpPlane(double a, double b, double c, double d) : A(a), B(b), C(c), D(d) {} +vpPlane::vpPlane(double a, double b, double c, double d) : A(a), B(b), C(c), D(d) { } /*! Copy constructor. @@ -177,8 +176,9 @@ void vpPlane::init(const vpPoint &P, const vpPoint &Q, const vpPoint &R, vpPlane b[0] = P.get_X() - R.get_X(); b[1] = P.get_Y() - R.get_Y(); b[2] = P.get_Z() - R.get_Z(); - } else { - // Calculate vector corresponding to PQ + } + else { + // Calculate vector corresponding to PQ a[0] = P.get_oX() - Q.get_oX(); a[1] = P.get_oY() - Q.get_oY(); a[2] = P.get_oZ() - Q.get_oZ(); @@ -317,7 +317,8 @@ double vpPlane::rayIntersection(const vpPoint &M0, const vpPoint &M1, vpColVecto H[0] = M0.get_X() + k * R[0]; H[1] = M0.get_Y() + k * R[1]; H[2] = M0.get_Z() + k * R[2]; - } else { + } + else { scal = getA() * M1.get_X() + getB() * M1.get_Y() + getC() * M1.get_Z(); // if (scal != 0) if (std::fabs(scal) > std::numeric_limits::epsilon()) diff --git a/modules/core/src/tools/geometry/vpRect.cpp b/modules/core/src/tools/geometry/vpRect.cpp index 4f858cdef7..be24cc6406 100644 --- a/modules/core/src/tools/geometry/vpRect.cpp +++ b/modules/core/src/tools/geometry/vpRect.cpp @@ -37,7 +37,6 @@ /*! \file vpRect.cpp \brief Defines a rectangle in the plane. - \ingroup libtools */ #include @@ -48,7 +47,7 @@ and \e width and \e height set to 1. */ -vpRect::vpRect() : left(0), top(0), width(0), height(0) {} +vpRect::vpRect() : left(0), top(0), width(0), height(0) { } /*! Constructs a rectangle with the \e top, \e left corner and \e width @@ -59,7 +58,7 @@ vpRect::vpRect() : left(0), top(0), width(0), height(0) {} \param w : rectangle width. \param h : rectangle height. */ -vpRect::vpRect(double l, double t, double w, double h) : left(l), top(t), width(w), height(h) {} +vpRect::vpRect(double l, double t, double w, double h) : left(l), top(t), width(w), height(h) { } /*! Constructs a rectangle with \e topLeft the top-left corner location @@ -71,8 +70,7 @@ vpRect::vpRect(double l, double t, double w, double h) : left(l), top(t), width( */ vpRect::vpRect(const vpImagePoint &topLeft, double w, double h) : left(topLeft.get_u()), top(topLeft.get_v()), width(w), height(h) -{ -} +{ } /*! Constructs a rectangle with \e topLeft the top-left corner location diff --git a/modules/robot/include/visp3/robot/vpSimulatorCamera.h b/modules/robot/include/visp3/robot/vpSimulatorCamera.h index e5d33592a4..fa3c77c01b 100644 --- a/modules/robot/include/visp3/robot/vpSimulatorCamera.h +++ b/modules/robot/include/visp3/robot/vpSimulatorCamera.h @@ -48,7 +48,7 @@ /*! * \class vpSimulatorCamera * - * \ingroup group_robot_simu_Camera + * \ingroup group_robot_simu_camera * * \brief Class that defines the simplest robot: a free flying camera. * diff --git a/modules/sensor/include/visp3/sensor/vp1394TwoGrabber.h b/modules/sensor/include/visp3/sensor/vp1394TwoGrabber.h index 3e6282bf94..7344ac2f6f 100644 --- a/modules/sensor/include/visp3/sensor/vp1394TwoGrabber.h +++ b/modules/sensor/include/visp3/sensor/vp1394TwoGrabber.h @@ -72,98 +72,93 @@ This class was tested with Marlin F033C and F131B cameras and with Point Grey Dragonfly 2, Flea 2 and Flea 3 cameras. - \ingroup libdevice - This grabber allows single or multi camera acquisition. - Here you will find an example of single capture from the first camera -found on the bus. This example is available in tutorial-grabber-1394.cpp: + found on the bus. This example is available in tutorial-grabber-1394.cpp: \include tutorial-grabber-1394.cpp A line by line explanation of this example is provided in \ref -tutorial-grabber. An other example that shows how to use format 7 and the -auto-shutter is provided in vp1394TwoGrabber() constructor: + tutorial-grabber. An other example that shows how to use format 7 and the + auto-shutter is provided in vp1394TwoGrabber() constructor: - If more than one camera is connected, it is also possible to select a -specific camera by its GUID: -\code -#include -#include -#include - -int main() -{ -#if defined(VISP_HAVE_DC1394) - vpImage I; // Create a gray level image container - bool reset = false; // Disable bus reset during construction - vp1394TwoGrabber g(reset); // Create a grabber based on libdc1394-2.x third party lib - - unsigned int ncameras; // Number of cameras on the bus - ncameras = g.getNumCameras(); - std::cout << ncameras << " cameras found:" << std::endl; - - for(unsigned int i=0; i< ncameras; i++) - { - g.setCamera(i); - uint64_t guid = g.getGuid(); - printf("camera %d with guid 0x%lx\n", i, (long unsigned int)guid); - } - - // produce: - // 2 cameras found: - // camera 0 with guid 0xb09d01009b329c - // camera 1 with guid 0xb09d01007e0ee7 - g.setCamera( (uint64_t)0xb09d01009b329cULL ); - - printf("Use camera with GUID: 0x%lx\n", (long unsigned int)g.getGuid()); - g.acquire(I); // Acquire an image from the camera with GUID 0xb09d01009b329c - - vpImageIo::write(I, "image.pgm"); // Write image on the disk -#endif -} - \endcode - - - Here an example of multi camera capture. An other example is available in -setCamera(): -\code -#include -#include -#include -#include - -int main() -{ -#if defined(VISP_HAVE_DC1394) - bool reset = false; // Disable bus reset during construction - vp1394TwoGrabber g(reset); // Creation of a grabber instance based on libdc1394-2.x third party lib. - unsigned int ncameras; // Number of cameras on the bus - ncameras = g.getNumCameras(); - - // Create an image container for each camera - vpImage *I = new vpImage [ncameras]; - char filename[FILENAME_MAX]; - - // If the first camera supports vpVIDEO_MODE_640x480_YUV422 video mode - g.setCamera(0); - g.setVideoMode(vp1394TwoGrabber::vpVIDEO_MODE_640x480_YUV422); - - // If the second camera support 30 fps acquisition - g.setCamera(1); - g.setFramerate(vp1394TwoGrabber::vpFRAMERATE_30); - - // Acquire an image from each camera - for (unsigned int camera=0; camera < ncameras; camera ++) { - g.setCamera(camera); - g.acquire(I[camera]); - std::stringstream ss; - ss << image-cam << camera << ".pgm"; - vpImageIo::write(I[camera], ss.str()); - } - delete [] I; -#endif -} - \endcode - - \author Fabien Spindler (Fabien.Spindler@irisa.fr), Irisa / Inria Rennes + specific camera by its GUID: + \code + #include + #include + #include + + int main() + { + #if defined(VISP_HAVE_DC1394) + vpImage I; // Create a gray level image container + bool reset = false; // Disable bus reset during construction + vp1394TwoGrabber g(reset); // Create a grabber based on libdc1394-2.x third party lib + + unsigned int ncameras; // Number of cameras on the bus + ncameras = g.getNumCameras(); + std::cout << ncameras << " cameras found:" << std::endl; + + for(unsigned int i=0; i< ncameras; i++) + { + g.setCamera(i); + uint64_t guid = g.getGuid(); + printf("camera %d with guid 0x%lx\n", i, (long unsigned int)guid); + } + + // produce: + // 2 cameras found: + // camera 0 with guid 0xb09d01009b329c + // camera 1 with guid 0xb09d01007e0ee7 + g.setCamera( (uint64_t)0xb09d01009b329cULL ); + + printf("Use camera with GUID: 0x%lx\n", (long unsigned int)g.getGuid()); + g.acquire(I); // Acquire an image from the camera with GUID 0xb09d01009b329c + + vpImageIo::write(I, "image.pgm"); // Write image on the disk + #endif + } + \endcode + + - Here an example of multi camera capture. An other example is available in setCamera(): + \code + #include + #include + #include + #include + + int main() + { + #if defined(VISP_HAVE_DC1394) + bool reset = false; // Disable bus reset during construction + vp1394TwoGrabber g(reset); // Creation of a grabber instance based on libdc1394-2.x third party lib. + unsigned int ncameras; // Number of cameras on the bus + ncameras = g.getNumCameras(); + + // Create an image container for each camera + vpImage *I = new vpImage [ncameras]; + char filename[FILENAME_MAX]; + + // If the first camera supports vpVIDEO_MODE_640x480_YUV422 video mode + g.setCamera(0); + g.setVideoMode(vp1394TwoGrabber::vpVIDEO_MODE_640x480_YUV422); + + // If the second camera support 30 fps acquisition + g.setCamera(1); + g.setFramerate(vp1394TwoGrabber::vpFRAMERATE_30); + + // Acquire an image from each camera + for (unsigned int camera=0; camera < ncameras; camera ++) { + g.setCamera(camera); + g.acquire(I[camera]); + std::stringstream ss; + ss << image-cam << camera << ".pgm"; + vpImageIo::write(I[camera], ss.str()); + } + delete [] I; + #endif + } + \endcode */ @@ -192,7 +187,8 @@ class VISP_EXPORT vp1394TwoGrabber : public vpFrameGrabber Enumeration of iso speed. See libdc1394 2.x header file dc1394/control.h */ - typedef enum { + typedef enum + { vpISO_SPEED_100 = DC1394_ISO_SPEED_100, vpISO_SPEED_200 = DC1394_ISO_SPEED_200, vpISO_SPEED_400 = DC1394_ISO_SPEED_400, @@ -205,7 +201,8 @@ class VISP_EXPORT vp1394TwoGrabber : public vpFrameGrabber Enumeration of video modes. See libdc1394 2.x header file dc1394/control.h */ - typedef enum { + typedef enum + { vpVIDEO_MODE_160x120_YUV444 = DC1394_VIDEO_MODE_160x120_YUV444, vpVIDEO_MODE_320x240_YUV422 = DC1394_VIDEO_MODE_320x240_YUV422, vpVIDEO_MODE_640x480_YUV411 = DC1394_VIDEO_MODE_640x480_YUV411, @@ -244,7 +241,8 @@ class VISP_EXPORT vp1394TwoGrabber : public vpFrameGrabber Enumeration of framerates. See libdc1394 2.x header file dc1394/control.h */ - typedef enum { + typedef enum + { vpFRAMERATE_1_875 = DC1394_FRAMERATE_1_875, vpFRAMERATE_3_75 = DC1394_FRAMERATE_3_75, vpFRAMERATE_7_5 = DC1394_FRAMERATE_7_5, @@ -259,7 +257,8 @@ class VISP_EXPORT vp1394TwoGrabber : public vpFrameGrabber Enumeration of color codings. See libdc1394 2.x header file dc1394/control.h */ - typedef enum { + typedef enum + { vpCOLOR_CODING_MONO8 = DC1394_COLOR_CODING_MONO8, vpCOLOR_CODING_YUV411 = DC1394_COLOR_CODING_YUV411, vpCOLOR_CODING_YUV422 = DC1394_COLOR_CODING_YUV422, @@ -277,7 +276,8 @@ class VISP_EXPORT vp1394TwoGrabber : public vpFrameGrabber Enumeration of the parameters that can be modified. See libdc1394 2.x header file dc1394/control.h */ - typedef enum { + typedef enum + { vpFEATURE_BRIGHTNESS = DC1394_FEATURE_BRIGHTNESS, vpFEATURE_EXPOSURE = DC1394_FEATURE_EXPOSURE, vpFEATURE_SHARPNESS = DC1394_FEATURE_SHARPNESS, @@ -306,7 +306,8 @@ class VISP_EXPORT vp1394TwoGrabber : public vpFrameGrabber /*! Control structure of the values that can be modified during the execution. */ - typedef struct { + typedef struct + { uint32_t brightness; uint32_t exposure; uint32_t sharpness; diff --git a/modules/sensor/include/visp3/sensor/vpDirectShowGrabberImpl.h b/modules/sensor/include/visp3/sensor/vpDirectShowGrabberImpl.h index b14f36c162..b525ed0f65 100644 --- a/modules/sensor/include/visp3/sensor/vpDirectShowGrabberImpl.h +++ b/modules/sensor/include/visp3/sensor/vpDirectShowGrabberImpl.h @@ -66,8 +66,8 @@ class VISP_EXPORT vpDirectShowGrabberImpl : public vpFrameGrabber public: /*! -Enumeration of video subtypes. -*/ + Enumeration of video subtypes. + */ /* typedef enum { //Known RGB formats diff --git a/modules/sensor/include/visp3/sensor/vpFlyCaptureGrabber.h b/modules/sensor/include/visp3/sensor/vpFlyCaptureGrabber.h index 71a8912728..415902323a 100644 --- a/modules/sensor/include/visp3/sensor/vpFlyCaptureGrabber.h +++ b/modules/sensor/include/visp3/sensor/vpFlyCaptureGrabber.h @@ -66,7 +66,7 @@ \code Grab loop had an error: There is an image consistency issue with this image. \endcode - follow instruction provide in + follow instruction provide [here](https://www.flir.fr/support-center/iis/machine-vision/knowledge-base/lost-ethernet-data-packets-on-linux-systems) to increase receive buffer size. @@ -210,7 +210,8 @@ class VISP_EXPORT vpFlyCaptureGrabber : public vpFrameGrabber void stopCapture(); protected: - typedef enum { + typedef enum + { ABS_VALUE, //!< Consider FlyCapture2::Property::absValue VALUE_A, //!< Consider FlyCapture2::Property::valueA } PropertyValue; diff --git a/modules/sensor/include/visp3/sensor/vpV4l2Grabber.h b/modules/sensor/include/visp3/sensor/vpV4l2Grabber.h index 9e02200464..d335e16f68 100644 --- a/modules/sensor/include/visp3/sensor/vpV4l2Grabber.h +++ b/modules/sensor/include/visp3/sensor/vpV4l2Grabber.h @@ -36,7 +36,6 @@ /*! \file vpV4l2Grabber.h \brief class for the Video For Linux 2 video device framegrabbing. - \ingroup libdevice */ #ifndef vpV4l2Grabber_hh @@ -63,7 +62,7 @@ \brief Class that is a wrapper over the Video4Linux2 (V4L2) driver. Thus to be enabled, this class needs the optional V4L2 3rd party. -Installation instruction are provided here https://visp.inria.fr/3rd_v4l2. + Installation instruction are provided here https://visp.inria.fr/3rd_v4l2. Information about Video4Linux can be found on http://linuxtv.org/v4lwiki/index.php/Main_Page @@ -71,7 +70,7 @@ Installation instruction are provided here https://visp.inria.fr/3rd_v4l2. This class was tested with a Pinnacle PCTV Studio/Rave board but also with the following webcams (Logitech QuickCam Vision Pro 9000, Logitech QuickCam Orbit AF, Logitech QuickCam IM (V-USB39), Dell latitude -E6400 internal webcam). + E6400 internal webcam). If the grabbing fail with a webcam, it means probably that you don't have the read/write permission on the /dev/video%%d device. You can @@ -83,8 +82,7 @@ E6400 internal webcam). For that, depending on your linux distribution check the card id in - /usr/share/doc/kernel-doc-2.4.20/video4linux/bttv/CARDLIST - - or -/usr/share/doc/kernel-doc-2.6.20/Documentation/video4linux/CARDLIST.bttv + - or /usr/share/doc/kernel-doc-2.6.20/Documentation/video4linux/CARDLIST.bttv For example, the card id of a Pinnacle PCTV Studio/Rave board is 39. Once this id is determined, you have to set the bttv driver with, by adding @@ -102,31 +100,27 @@ E6400 internal webcam). This other example shows how to use this grabber with an analogic camera attached to a bttv PCI card. \code -#include -#include + #include + #include -int main() -{ -#if defined(VISP_HAVE_V4L2) - vpImage I; - vpV4l2Grabber g; - g.setInput(2); // Input 2 on the board - g.setFramerate(vpV4l2Grabber::framerate_25fps); // 25 fps - g.setWidth(768); // Acquired images are 768 width - g.setHeight(576); // Acquired images are 576 height - g.setNBuffers(3); // 3 ring buffers to ensure real-time acquisition - g.open(I); // Open the grabber - - g.acquire(I); // Acquire a 768x576 grey image - vpImageIo::write(I, "image.pgm"); // Save the image on the disk -#endif -} + int main() + { + #if defined(VISP_HAVE_V4L2) + vpImage I; + vpV4l2Grabber g; + g.setInput(2); // Input 2 on the board + g.setFramerate(vpV4l2Grabber::framerate_25fps); // 25 fps + g.setWidth(768); // Acquired images are 768 width + g.setHeight(576); // Acquired images are 576 height + g.setNBuffers(3); // 3 ring buffers to ensure real-time acquisition + g.open(I); // Open the grabber + + g.acquire(I); // Acquire a 768x576 grey image + vpImageIo::write(I, "image.pgm"); // Save the image on the disk + #endif + } \endcode - - \author Fabien Spindler (Fabien.Spindler@irisa.fr), Irisa / Inria Rennes - - \sa vpFrameGrabber */ class VISP_EXPORT vpV4l2Grabber : public vpFrameGrabber @@ -144,7 +138,8 @@ class VISP_EXPORT vpV4l2Grabber : public vpFrameGrabber /*! \enum vpV4l2FramerateType Frame rate type for capture. */ - typedef enum { + typedef enum + { framerate_50fps, //!< 50 frames per second framerate_25fps //!< 25 frames per second } vpV4l2FramerateType; @@ -152,7 +147,8 @@ class VISP_EXPORT vpV4l2Grabber : public vpFrameGrabber /*! \enum vpV4l2FrameFormatType Frame format type for capture. */ - typedef enum { + typedef enum + { V4L2_FRAME_FORMAT, /*!< a field only */ V4L2_IMAGE_FORMAT /*!< an interlaced image */ } vpV4l2FrameFormatType; @@ -160,7 +156,8 @@ class VISP_EXPORT vpV4l2Grabber : public vpFrameGrabber /*! \enum vpV4l2PixelFormatType Pixel format type for capture. */ - typedef enum { + typedef enum + { V4L2_GREY_FORMAT, /*!< 8 Greyscale */ V4L2_RGB24_FORMAT, /*!< 24 RGB-8-8-8 */ V4L2_RGB32_FORMAT, /*!< 32 RGB-8-8-8-8 */ @@ -170,14 +167,16 @@ class VISP_EXPORT vpV4l2Grabber : public vpFrameGrabber } vpV4l2PixelFormatType; #ifndef DOXYGEN_SHOULD_SKIP_THIS - struct ng_video_fmt { + struct ng_video_fmt + { unsigned int pixelformat; /* VIDEO_* */ unsigned int width; unsigned int height; unsigned int bytesperline; /* zero for compressed formats */ }; - struct ng_video_buf { + struct ng_video_buf + { struct ng_video_fmt fmt; size_t size; unsigned char *data; diff --git a/modules/sensor/src/framegrabber/1394/vp1394TwoGrabber.cpp b/modules/sensor/src/framegrabber/1394/vp1394TwoGrabber.cpp index 9f1cab44b0..732c012c28 100644 --- a/modules/sensor/src/framegrabber/1394/vp1394TwoGrabber.cpp +++ b/modules/sensor/src/framegrabber/1394/vp1394TwoGrabber.cpp @@ -36,7 +36,6 @@ /*! \file vp1394TwoGrabber.cpp \brief member functions for firewire cameras - \ingroup libdevice */ #include @@ -61,11 +60,11 @@ const char *vp1394TwoGrabber::strVideoMode[DC1394_VIDEO_MODE_NUM] = { "MODE_1280x960_RGB8", "MODE_1280x960_MONO8", "MODE_1600x1200_YUV422", "MODE_1600x1200_RGB8", "MODE_1600x1200_MONO8", "MODE_1280x960_MONO16", "MODE_1600x1200_MONO16", "MODE_EXIF", "MODE_FORMAT7_0", "MODE_FORMAT7_1", "MODE_FORMAT7_2", "MODE_FORMAT7_3", - "MODE_FORMAT7_4", "MODE_FORMAT7_5", "MODE_FORMAT7_6", "MODE_FORMAT7_7"}; + "MODE_FORMAT7_4", "MODE_FORMAT7_5", "MODE_FORMAT7_6", "MODE_FORMAT7_7" }; const char *vp1394TwoGrabber::strFramerate[DC1394_FRAMERATE_NUM] = { "FRAMERATE_1_875", "FRAMERATE_3_75", "FRAMERATE_7_5", "FRAMERATE_15", - "FRAMERATE_30", "FRAMERATE_60", "FRAMERATE_120", "FRAMERATE_240"}; + "FRAMERATE_30", "FRAMERATE_60", "FRAMERATE_120", "FRAMERATE_240" }; const char *vp1394TwoGrabber::strColorCoding[DC1394_COLOR_CODING_NUM] = { "COLOR_CODING_MONO8", "COLOR_CODING_YUV411", "COLOR_CODING_YUV422", "COLOR_CODING_YUV444", @@ -119,11 +118,11 @@ int main() */ vp1394TwoGrabber::vp1394TwoGrabber(bool reset) : camera(nullptr), cameras(nullptr), num_cameras(0), camera_id(0), verbose(false), camIsOpen(nullptr), - num_buffers(4), // ring buffer size - isDataModified(nullptr), initialShutterMode(nullptr), dataCam(nullptr) + num_buffers(4), // ring buffer size + isDataModified(nullptr), initialShutterMode(nullptr), dataCam(nullptr) #ifdef VISP_HAVE_DC1394_CAMERA_ENUMERATE // new API > libdc1394-2.0.0-rc7 - , - d(nullptr), list(nullptr) + , + d(nullptr), list(nullptr) #endif { // protected members @@ -299,7 +298,8 @@ void vp1394TwoGrabber::setCamera(uint64_t cam_id) close(); throw(vpFrameGrabberException(vpFrameGrabberException::settingError, "The required camera is not present")); } - } else { + } + else { this->camera_id = (unsigned int)cam_id; // The input cam_id is not a // uint64_t guid, but the index of // the camera @@ -327,7 +327,8 @@ void vp1394TwoGrabber::getCamera(uint64_t &cam_id) { if (num_cameras) { cam_id = this->camera_id; - } else { + } + else { close(); vpERROR_TRACE("No cameras found"); throw(vpFrameGrabberException(vpFrameGrabberException::initializationError, "No cameras found")); @@ -352,7 +353,8 @@ uint64_t vp1394TwoGrabber::getCamera() { if (num_cameras) { return this->camera_id; - } else { + } + else { close(); vpERROR_TRACE("No cameras found"); throw(vpFrameGrabberException(vpFrameGrabberException::initializationError, "No cameras found")); @@ -1012,10 +1014,12 @@ void vp1394TwoGrabber::getColorCoding(vp1394TwoColorCodingType &coding) vpERROR_TRACE("Can't get current color coding"); throw(vpFrameGrabberException(vpFrameGrabberException::settingError, "Can't query current color coding")); } - } else if (dc1394_is_video_mode_still_image((dc1394video_mode_t)_videomode)) { + } + else if (dc1394_is_video_mode_still_image((dc1394video_mode_t)_videomode)) { throw(vpFrameGrabberException(vpFrameGrabberException::settingError, "No color coding for format 6 video mode")); - } else { - // Not Format 7 and not Format 6 video modes + } + else { + // Not Format 7 and not Format 6 video modes if (dc1394_get_color_coding_from_video_mode(camera, (dc1394video_mode_t)_videomode, &_coding) != DC1394_SUCCESS) { close(); vpERROR_TRACE("Could not query supported color coding for mode %d\n", _videomode); @@ -1073,11 +1077,13 @@ uint32_t vp1394TwoGrabber::getColorCodingSupported(vp1394TwoVideoModeType mode, codings.push_back((vp1394TwoColorCodingType)_codings.codings[i]); return _codings.num; - } else if (dc1394_is_video_mode_still_image((dc1394video_mode_t)mode)) { - // Format 6 video mode + } + else if (dc1394_is_video_mode_still_image((dc1394video_mode_t)mode)) { + // Format 6 video mode return 0; - } else { - // Not Format 7 and not Format 6 video modes + } + else { + // Not Format 7 and not Format 6 video modes dc1394color_coding_t _coding; if (dc1394_get_color_coding_from_video_mode(camera, (dc1394video_mode_t)mode, &_coding) != DC1394_SUCCESS) { close(); @@ -1133,11 +1139,13 @@ bool vp1394TwoGrabber::isColorCodingSupported(vp1394TwoVideoModeType mode, vp139 return true; } return false; - } else if (dc1394_is_video_mode_still_image((dc1394video_mode_t)mode)) { - // Format 6 video mode + } + else if (dc1394_is_video_mode_still_image((dc1394video_mode_t)mode)) { + // Format 6 video mode return false; - } else { - // Not Format 7 and not Format 6 video modes + } + else { + // Not Format 7 and not Format 6 video modes dc1394color_coding_t _coding; if (dc1394_get_color_coding_from_video_mode(camera, (dc1394video_mode_t)mode, &_coding) != DC1394_SUCCESS) { close(); @@ -1213,7 +1221,7 @@ void vp1394TwoGrabber::setFormat7ROI(unsigned int left, unsigned int top, unsign } #if 0 vpTRACE("left: %d top: %d width: %d height: %d", left, top, - width == 0 ? DC1394_USE_MAX_AVAIL: w, + width == 0 ? DC1394_USE_MAX_AVAIL : w, height == 0 ? DC1394_USE_MAX_AVAIL : h); vpTRACE("max_width: %d max_height: %d", max_width, max_height); #endif @@ -1235,7 +1243,8 @@ void vp1394TwoGrabber::setFormat7ROI(unsigned int left, unsigned int top, unsign if (w > (max_width - left)) w = (max_width - left); roi_width = (int32_t)w; - } else { + } + else { roi_width = DC1394_USE_MAX_AVAIL; } @@ -1244,7 +1253,8 @@ void vp1394TwoGrabber::setFormat7ROI(unsigned int left, unsigned int top, unsign if (h > (max_height - top)) h = (max_height - top); roi_height = (int32_t)h; - } else { + } + else { roi_height = DC1394_USE_MAX_AVAIL; } @@ -1459,7 +1469,8 @@ void vp1394TwoGrabber::close() // reset values try { updateDataStructToCam(); - } catch (...) { + } + catch (...) { } // reset mode (manual, auto, ...) if (dc1394_feature_set_mode(camera, DC1394_FEATURE_BRIGHTNESS, initialShutterMode[i]) != DC1394_SUCCESS || @@ -1618,7 +1629,8 @@ void vp1394TwoGrabber::setAutoShutter(bool enable) dc1394feature_mode_t mode; if (enable) { mode = DC1394_FEATURE_MODE_AUTO; - } else { + } + else { mode = DC1394_FEATURE_MODE_MANUAL; } @@ -1762,7 +1774,8 @@ void vp1394TwoGrabber::setAutoGain(bool enable) dc1394feature_mode_t mode; if (enable) { mode = DC1394_FEATURE_MODE_AUTO; - } else { + } + else { mode = DC1394_FEATURE_MODE_MANUAL; } @@ -1892,7 +1905,8 @@ void vp1394TwoGrabber::setCapture(dc1394switch_t _switch) close(); throw(vpFrameGrabberException(vpFrameGrabberException::settingError, "Could not setup dma capture")); } - } else { // _switch == DC1394_OFF + } + else { // _switch == DC1394_OFF dc1394error_t code = dc1394_capture_stop(camera); if (code != DC1394_SUCCESS && code != DC1394_CAPTURE_IS_NOT_SET) { @@ -2019,7 +2033,8 @@ void vp1394TwoGrabber::setIsoTransmissionSpeed(vp1394TwoIsoSpeedType isospeed) close(); throw(vpFrameGrabberException(vpFrameGrabberException::settingError, "Failed to set 1394B mode")); } - } else { + } + else { if (dc1394_video_set_operation_mode(camera, DC1394_OPERATION_MODE_LEGACY) != DC1394_SUCCESS) { close(); throw(vpFrameGrabberException(vpFrameGrabberException::settingError, "Cannot set camera to 1394A mode")); @@ -2279,7 +2294,7 @@ dc1394video_frame_t *vp1394TwoGrabber::dequeue(vpImage &I, uint64 close(); vpERROR_TRACE("Format conversion not implemented. Acquisition failed."); throw(vpFrameGrabberException(vpFrameGrabberException::otherError, "Format conversion not implemented. " - "Acquisition failed.")); + "Acquisition failed.")); }; return frame; @@ -2435,7 +2450,7 @@ dc1394video_frame_t *vp1394TwoGrabber::dequeue(vpImage &I, uint64_t &tim close(); vpERROR_TRACE("Format conversion not implemented. Acquisition failed."); throw(vpFrameGrabberException(vpFrameGrabberException::otherError, "Format conversion not implemented. " - "Acquisition failed.")); + "Acquisition failed.")); }; return frame; @@ -2601,7 +2616,7 @@ void vp1394TwoGrabber::acquire(vpImage &I, uint64_t ×tamp, uint32_t close(); vpERROR_TRACE("Format conversion not implemented. Acquisition failed."); throw(vpFrameGrabberException(vpFrameGrabberException::otherError, "Format conversion not implemented. " - "Acquisition failed.")); + "Acquisition failed.")); }; enqueue(frame); @@ -2725,8 +2740,8 @@ unsigned int vp1394TwoGrabber::getHeight() void vp1394TwoGrabber::printCameraInfo() { std::cout << "----------------------------------------------------------" << std::endl - << "----- Information for camera " << camera_id << " -----" << std::endl - << "----------------------------------------------------------" << std::endl; + << "----- Information for camera " << camera_id << " -----" << std::endl + << "----------------------------------------------------------" << std::endl; #ifdef VISP_HAVE_DC1394_CAMERA_ENUMERATE // new API > libdc1394-2.0.0-rc7 dc1394_camera_print_info(camera, stdout); @@ -2745,7 +2760,8 @@ void vp1394TwoGrabber::printCameraInfo() vpERROR_TRACE("unable to get feature set for camera %d\n", camera_id); throw(vpFrameGrabberException(vpFrameGrabberException::initializationError, "Cannot get camera features")); - } else { + } + else { #ifdef VISP_HAVE_DC1394_CAMERA_ENUMERATE // new API > libdc1394-2.0.0-rc7 dc1394_feature_print_all(&features, stdout); #elif defined VISP_HAVE_DC1394_FIND_CAMERAS // old API <= libdc1394-2.0.0-rc7 @@ -2774,7 +2790,8 @@ std::string vp1394TwoGrabber::videoMode2string(vp1394TwoVideoModeType videomode) if ((_videomode >= DC1394_VIDEO_MODE_MIN) && (_videomode <= DC1394_VIDEO_MODE_MAX)) { _str = strVideoMode[_videomode - DC1394_VIDEO_MODE_MIN]; - } else { + } + else { vpCERROR << "The video mode " << (int)videomode << " is not supported by the camera" << std::endl; } @@ -2800,7 +2817,8 @@ std::string vp1394TwoGrabber::framerate2string(vp1394TwoFramerateType fps) if ((_fps >= DC1394_FRAMERATE_MIN) && (_fps <= DC1394_FRAMERATE_MAX)) { _str = strFramerate[_fps - DC1394_FRAMERATE_MIN]; - } else { + } + else { vpCERROR << "The framerate " << (int)fps << " is not supported by the camera" << std::endl; } @@ -2827,7 +2845,8 @@ std::string vp1394TwoGrabber::colorCoding2string(vp1394TwoColorCodingType colorc if ((_coding >= DC1394_COLOR_CODING_MIN) && (_coding <= DC1394_COLOR_CODING_MAX)) { _str = strColorCoding[_coding - DC1394_COLOR_CODING_MIN]; - } else { + } + else { vpCERROR << "The color coding " << (int)colorcoding << " is not supported by the camera" << std::endl; } @@ -3261,7 +3280,8 @@ void vp1394TwoGrabber::setParameterValue(vp1394TwoParametersType param, unsigned close(); throw(vpFrameGrabberException(vpFrameGrabberException::settingError, "Unable to set the shutter information")); } - } else { + } + else { vpERROR_TRACE("The camera does not have a manual mode.\nCannot change the value"); throw(vpFrameGrabberException(vpFrameGrabberException::settingError, "The camera does not have a manual mode")); } @@ -3383,5 +3403,5 @@ vp1394TwoGrabber &vp1394TwoGrabber::operator>>(vpImage &I) #elif !defined(VISP_BUILD_SHARED_LIBS) // Work around to avoid warning: libvisp_sensor.a(vp1394TwoGrabber.cpp.o) has // no symbols -void dummy_vp1394TwoGrabber(){}; +void dummy_vp1394TwoGrabber() { }; #endif diff --git a/modules/sensor/src/framegrabber/v4l2/vpV4l2Grabber.cpp b/modules/sensor/src/framegrabber/v4l2/vpV4l2Grabber.cpp index d4b8009686..c2016a70d4 100644 --- a/modules/sensor/src/framegrabber/v4l2/vpV4l2Grabber.cpp +++ b/modules/sensor/src/framegrabber/v4l2/vpV4l2Grabber.cpp @@ -36,7 +36,6 @@ /*! \file vpV4l2Grabber.cpp \brief class for the Video For Linux 2 video device framegrabbing. - \ingroup libdevice */ #include diff --git a/modules/tracker/me/include/visp3/me/vpMeEllipse.h b/modules/tracker/me/include/visp3/me/vpMeEllipse.h index a70017936a..c0cdab267f 100644 --- a/modules/tracker/me/include/visp3/me/vpMeEllipse.h +++ b/modules/tracker/me/include/visp3/me/vpMeEllipse.h @@ -236,9 +236,9 @@ class VISP_EXPORT vpMeEllipse : public vpMeTracker * Initialize the tracking of an ellipse or an arc of an ellipse when \e trackArc is set to true. * If \b ips is set, use the contained points to initialize the ME if there are some, or initialize * by clicks the ME and \b ips will contained the clicked points. - * If \b ips is not set, call the method vpMeEllispe::initTracking(const vpImage&, bool, bool). + * If \b ips is not set, call the method vpMeEllipse::initTracking(const vpImage&, bool, bool). * - * \sa \ref vpMeEllispe::initTracking() + * \sa vpMeEllipse::initTracking() * * \warning The points should be selected as far as possible from each other. * When an arc of an ellipse is tracked, it is recommended to select the 5 points clockwise. diff --git a/modules/visual_features/include/visp3/visual_features/vpBasicFeature.h b/modules/visual_features/include/visp3/visual_features/vpBasicFeature.h index 7073877c3a..4be273154e 100644 --- a/modules/visual_features/include/visp3/visual_features/vpBasicFeature.h +++ b/modules/visual_features/include/visp3/visual_features/vpBasicFeature.h @@ -70,7 +70,7 @@ /*! * \class vpBasicFeature - * \ingroup group_core_features + * \ingroup group_visual_features * \brief class that defines what is a visual feature */ class VISP_EXPORT vpBasicFeature diff --git a/modules/visual_features/include/visp3/visual_features/vpGenericFeature.h b/modules/visual_features/include/visp3/visual_features/vpGenericFeature.h index f8a811eda4..b21adc2191 100644 --- a/modules/visual_features/include/visp3/visual_features/vpGenericFeature.h +++ b/modules/visual_features/include/visp3/visual_features/vpGenericFeature.h @@ -48,7 +48,7 @@ /*! * \class vpGenericFeature - * \ingroup group_core_features + * \ingroup group_visual_features * * \brief Class that enables to define a feature or a set of features which are * not implemented in ViSP as a specific class. It is indeed possible to create From 1adf02f01129b081fd4b407bf7a2837efda45998 Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Thu, 7 Mar 2024 11:09:50 +0100 Subject: [PATCH 3/3] Remove multi-threading related doc sinve vpThread and vpMutex are deprecated --- .../detection/tutorial-detection-face.dox | 12 +- doc/tutorial/image/tutorial-grabber.dox | 31 ++- .../misc/tutorial-multi-threading.dox | 208 ------------------ doc/tutorial/tutorial-users.dox | 2 - modules/core/include/visp3/core/vpMutex.h | 6 - modules/core/include/visp3/core/vpThread.h | 7 - 6 files changed, 21 insertions(+), 245 deletions(-) delete mode 100644 doc/tutorial/misc/tutorial-multi-threading.dox diff --git a/doc/tutorial/detection/tutorial-detection-face.dox b/doc/tutorial/detection/tutorial-detection-face.dox index 53672be077..051ef2d563 100644 --- a/doc/tutorial/detection/tutorial-detection-face.dox +++ b/doc/tutorial/detection/tutorial-detection-face.dox @@ -37,7 +37,7 @@ Now we explain the main lines of the source. First we have to include the header of the class that allows to detect a face. \snippet tutorial-face-detector.cpp Include -Then in the main() function before going further we need to check if OpenCV 2.2.0 is available. +Then in the main() function before going further we need to check if OpenCV 2.2.0 is available. \snippet tutorial-face-detector.cpp Macro defined @@ -55,11 +55,11 @@ Usage: ./tutorial-face-detector [--haar ] [--video ] [--haar -\endcode - -With vpThread the prototype of the function vpThread::Fn that could be executed in a separate thread is the following: -\code -vpThread::Return myFooFunction(vpThread::Args args) -\endcode -where arguments passed to the function are of type vpThread::Args. This function should return a vpThread::Return type. - -Then to create the thread that executes this function, you have just to construct a vpThread object indicating which is the function to execute. -- If you don't want to pass arguments to the function, just do like: -\code -vpThread foo((vpThread::Fn)myFooFunction); -\endcode - -- If you want to pass some arguments to the function, do rather like: -\code -int foo_arg = 3; -vpThread foo((vpThread::Fn)myFooFunction, (vpThread::Args)&foo_arg); -\endcode -This argument could then be exploited in myFooFunction() -\code -vpThread::Return myFooFunction(vpThread::Args args) -{ - int foo_arg = *((int *) args); -} -\endcode - -To illustrate this behavior, see testThread.cpp. - -\subsection multi-threading-into-mutex Mutexes overview - -To use vpMutex class you have first to include the corresponding header. -\code -#include -\endcode - -Then protecting a shared var from concurrent access could be done like: -\code -vpMutex mutex; -int var = 0; - -mutex.lock(); -// var to protect from concurrent access -var = 2; -mutex.unlock(); -\endcode -To illustrate this usage, see testMutex.cpp. - -There is also a more elegant way using vpMutex::vpScopedLock. The previous example becomes: -\code -vpMutex mutex; -int var = 0; - -{ - vpMutex::vpScopedLock lock(mutex); - // var to protect from concurrent access - var = 2; -} -\endcode - -Here, the vpMutex::vpScopedLock constructor locks the mutex, while the destructor unlocks. Using vpMutex::vpScopedLock, the scope of the portion of code that is protected is defined inside the brackets. To illustrate this usage, see tutorial-grabber-opencv-threaded.cpp. - -\section pass-multiple-arguments-return-values Pass multiple arguments and / or retrieve multiple return values - -This section will show you one convenient way to pass multiple arguments to a vpThread and retrieve multiple return values at the end of the computation. This example (testThread2.cpp) uses a functor class to do that. - -Basically, you declare a class that will act like a function by defining the \p operator() that will do the computation in a dedicated thread. In the following toy example, we want to compute the element-wise addition (\f$ v_{add}\left [ i \right ] = v_1 \left [ i \right ] + v_2 \left [ i \right ] \f$) and the element-wise multiplication (\f$ v_{mul}\left [ i \right ] = v_1 \left [ i \right ] \times v_2 \left [ i \right ] \f$) of two vectors. - -Each thread will process a subset of the input vectors and the partial results will be stored in two vectors (one for the addition and the other one for the multiplication). - -\snippet testThread2.cpp functor-thread-example declaration - -The required arguments needed by the constructor are the two input vectors, the start index and the end index that will define the portion of the vector to be processed by the current thread. Two getters are used to retrieve the results at the end of the computation. - -Let's see now how to create and initialize the threads: - -\snippet testThread2.cpp functor-thread-example threadCreation - -The pointer to the routine \p arithmThread() called by the thread is defined as the following: - -\snippet testThread2.cpp functor-thread-example threadFunction - -This routine is called by the threading library. We cast the argument passed to the \p thread routine and we call the function that needs to be executed by the thread. - -To get the results: - -\snippet testThread2.cpp functor-thread-example getResults - -After joining the threads, the partial results from one thread can be obtained by a call to the appropriate getter function. - -\warning You cannot create directly the thread as the following: - -\code -threads[i] = vpThread((vpThread::Fn) arithmThread, (vpThread::Args) &functors[i]); -\endcode - -nor as the following: - -\code -threads.push_back(vpThread((vpThread::Fn) arithmThread, (vpThread::Args) &functors[i])); -\endcode - -as theses lines of code create a temporary vpThread object that will be copied to the vector and after destructed. The destructor of the \p vpThread calls automatically the \p join() function and thus it will result that the threads will be created, started and joined sequentially as soon as the temporary \p vpThread object will be destructed. - -\section multi-threading-capture Multi-threaded capture and display - -Note that all the material (source code) described in this section is part of ViSP source code and could be downloaded using the following command: - -\code -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/grabber -\endcode - -The following example implemented in tutorial-grabber-opencv-threaded.cpp shows how to implement a multi-threaded application, where image capture is executed in one thread and image display in an other one. The capture is here performed thanks to OpenCV cv::VideoCapture class. It could be easily adapted to deal with other framegrabbers available in ViSP. In tutorial-grabber-v4l2-threaded.cpp you will find the same example using vpV4l2Grabber. To adapt the code to other framegrabbers see \ref tutorial-grabber. - -Hereafter we explain how tutorial-grabber-opencv-threaded.cpp works. - -\subsection multi-threading-capture-declaration Includes and declarations - -First we include all ViSP headers corresponding to the classes we will use; vpImageConvert to convert OpenCV images in ViSP images, vpMutex to protect shared data between the threads, vpThread to create the threads, vpTime to handle the time, vpDisplayX to display images under unix-like OS, and vpDisplayGDI to display the images under Windows. - -Then if OpenCV 2.1.0 or higher is found we include OpenCV highgui.hpp header that brings cv::VideoCapture class that will be used in this example for image capture. - -We declare then the shared data with variable names prefixed by "s_" (\e s_capture_state, indicating if capture is in progress or is stopped, \e s_frame the image that is currently captured and \e s_mutex_capture, the mutex that will be used to protect from concurrent access to these shared variables). -\snippet tutorial-grabber-opencv-threaded.cpp capture-multi-threaded declaration - -\subsection multi-threading-capture-function Capture thread - -Then we implement captureFunction(), the capture function that we want to run in a separate thread. As argument this function receives a reference over cv::VideoCapture object that was created in the \ref multi-threading-capture-main. - -\note We notice that cv::VideoCapture is unable to create an instance outside the \ref multi-threading-capture-main. That's why cv::VideoCapture object is passed throw the arguments of the function captureFunction(). With ViSP vp1394TwoGrabber, vp1394CMUGrabber, vpFlyCaptureGrabber, vpV4l2Grabber capture classes it would be possible to instantiate the object in the capture function. - -We check if the capture is able to found a camera thanks to \e cap.isOpened(), and start a 30 seconds capture loop that will fill \e frame_ with the image from the camera. The capture could be stopped before 30 seconds if \e stop_capture_ boolean is turned to true. Once an image is captured, with the mutex we update the shared data. After the while loop, we also update the capture state to capture_stopped to finish the display thread. -\snippet tutorial-grabber-opencv-threaded.cpp capture-multi-threaded captureFunction - -\subsection multi-threading-capture-display-function Display thread - -We implement then displayFunction() used to display the captured images. This function doesn't exploit any argument. Depending on the OS we create a display pointer over the class that we want to use (vpDisplayX or vpDisplayGDI). We enter then in a while loop that will end when the capture is stopped, meaning that the \ref multi-threading-capture-function is finished. - -In the display loop, with the mutex we create a copy of the shared variables \e s_capture_state in order to use if just after. When capture is started we convert the OpenCV cv::mat image into a local ViSP image \e I. Since we access to the shared \e s_frame data, the conversion is protected by the mutex. Then with the first available ViSP image \e I we initialize the display and turn \e display_initialized_ boolean to false indicating that the display is already initialized. Next we update the display with the content of the image. -When we capture is not started, we just sleep for 2 milli-seconds. -\snippet tutorial-grabber-opencv-threaded.cpp capture-multi-threaded displayFunction - -\subsection multi-threading-capture-main Main thread - -The main thread is the one that is implemented in the main() function. -We manage first the command line option "--device " to allow the user to select a specific camera when more then one camera are connected. Then as explained in \ref multi-threading-capture-function we need the create cv::VideoCapture object in the main(). Finally, captureFunction() and displayFunction() are started as two separate threads, one for the capture, an other one for the display using vpThread constructor. - -The call to join() is here to wait until capture and display thread ends to return from the main(). -\snippet tutorial-grabber-opencv-threaded.cpp capture-multi-threaded mainFunction - -Once build, to run this tutorial just run in a terminal: -\code -cd /tutorial/grabber -./tutorial-grabber-opencv-threaded --help -./tutorial-grabber-opencv-threaded --device 0 -\endcode - -where "--device 0" could be avoided since it is the default option. - -\section multi-threading-face-detection Extension to face detection - -Note that all the material (source code) described in this section is part of ViSP source code and could be downloaded using the following command: - -\code -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/detection/face -\endcode - -The example given in the previous section \ref multi-threading-capture could be extended to introduce an image processing. In this section, we illustrate the case of the face detection described in \ref tutorial-detection-face and implemented in tutorial-face-detector-live.cpp as a single main thread. Now we propose to extend this example using multi-threading where face detection is achieved in a separate thread. The complete source code is given in tutorial-face-detector-live-threaded.cpp. - -Here after we give the changes that we introduce in tutorial-face-detector-live-threaded.cpp to add a new thread dedicated to the face detection. - -\subsection multi-threading-face-detection-function Face detection thread - -The function that does the face detection is implemented in detectionFunction(). -We first instantiate an object of type vpDetectorFace. Then in the while loop, we call the face detection function using face_detector_.detect() when a new image is available. When faces are found, we retrieve the bounding box of the first face that is the largest in the image. We update the shared \e s_face_bbox var with the bounding box. This var is then exploited in the display thread and displayed as a rectangle. -\snippet tutorial-face-detector-live-threaded.cpp face-detection-threaded detectionFunction - -\subsection multi-threading-face-detection-main Main thread - -The main() is modified to call the detectionFunction() in a third thread. -\note Compared to the \ref multi-threading-capture-main used in tutorial-grabber-opencv-threaded.cpp, we modify here the main() to be able to capture images either from a webcam when Video For Linux 2 (V4L2) is available (only on Linux-like OS), or using OpenCV cv::VideoCapture when V4L2 is not available. - -\snippet tutorial-face-detector-live-threaded.cpp face-detection-threaded mainFunction - -To run the binary just open a terminal and run: -\code -cd /tutorial/detection/face -./tutorial-face-detector-live-threaded --help -./tutorial-face-detector-live-threaded -\endcode - -*/ diff --git a/doc/tutorial/tutorial-users.dox b/doc/tutorial/tutorial-users.dox index 8440e9a3f2..43e6e80996 100644 --- a/doc/tutorial/tutorial-users.dox +++ b/doc/tutorial/tutorial-users.dox @@ -166,8 +166,6 @@ This page introduces the user to other tools that may be useful. - \subpage tutorial-plotter
This tutorial explains how to plot curves in real-time during a visual servo. - \subpage tutorial-trace
This tutorial explains how to introduce trace in the code that could be enabled for debugging or disabled. -- \subpage tutorial-multi-threading
This tutorial explains how to implement multi-threaded applications to capture images from a camera -in one thread and display these images in an other thread. - \subpage tutorial-pcl-viewer
This tutorial explains how to use a threaded PCL-based point cloud visualizer. - \subpage tutorial-json
This tutorial explains how to read and save data in the portable JSON format. It focuses on saving the data generated by a visual servoing experiment and exporting it to Python in order to generate plots. - \subpage tutorial-synthetic-blenderproc
This tutorial shows you how to easily generate synthetic data from the 3D model of an object and obtain various modalities. This data can then be used to train a neural network for your own task. diff --git a/modules/core/include/visp3/core/vpMutex.h b/modules/core/include/visp3/core/vpMutex.h index 172ded0ad2..9c05b9d067 100644 --- a/modules/core/include/visp3/core/vpMutex.h +++ b/modules/core/include/visp3/core/vpMutex.h @@ -61,10 +61,6 @@ native Windows threading capabilities if pthread is not available under Windows. - An example of vpMutex usage is given in testMutex.cpp. - - More examples are provided in \ref tutorial-multi-threading. - \sa vpScopedLock */ class vp_deprecated vpMutex @@ -159,8 +155,6 @@ class vp_deprecated vpMutex } \endcode - More examples are provided in \ref tutorial-multi-threading. - \sa vpMutex */ class vpScopedLock diff --git a/modules/core/include/visp3/core/vpThread.h b/modules/core/include/visp3/core/vpThread.h index 672ba84ffb..be654680a1 100644 --- a/modules/core/include/visp3/core/vpThread.h +++ b/modules/core/include/visp3/core/vpThread.h @@ -59,13 +59,6 @@ This class implements native pthread functionalities if available, or native Windows threading capabilities if pthread is not available under Windows. - - There are two examples implemented in testMutex.cpp and testThread.cpp to - show how to use this class. The content of test-thread.cpp that highlights - the main functionalities of this class is given hereafter: \snippet - testThread.cpp Code - - More examples are provided in \ref tutorial-multi-threading. */ class vp_deprecated vpThread {