From 73be3ddbae88099081d45819bfec0872271a7145 Mon Sep 17 00:00:00 2001 From: rlagneau Date: Fri, 6 Oct 2023 14:16:20 +0200 Subject: [PATCH 01/43] [CORPS] Moved the vp::equalizeHistogram implementation in the vpHistogram class --- modules/core/include/visp3/core/vpHistogram.h | 1 + .../core/src/tools/histogram/vpHistogram.cpp | 46 +++++++++++++++ modules/imgproc/src/vpImgproc.cpp | 57 +++---------------- 3 files changed, 55 insertions(+), 49 deletions(-) diff --git a/modules/core/include/visp3/core/vpHistogram.h b/modules/core/include/visp3/core/vpHistogram.h index 36afaeaec2..20dabd6a1d 100644 --- a/modules/core/include/visp3/core/vpHistogram.h +++ b/modules/core/include/visp3/core/vpHistogram.h @@ -234,6 +234,7 @@ class VISP_EXPORT vpHistogram }; void calculate(const vpImage &I, unsigned int nbins = 256, unsigned int nbThreads = 1); + void equalize(const vpImage &I, vpImage &Iout); void display(const vpImage &I, const vpColor &color = vpColor::white, unsigned int thickness = 2, unsigned int maxValue_ = 0); diff --git a/modules/core/src/tools/histogram/vpHistogram.cpp b/modules/core/src/tools/histogram/vpHistogram.cpp index 9d34be9dd7..de74e105c6 100644 --- a/modules/core/src/tools/histogram/vpHistogram.cpp +++ b/modules/core/src/tools/histogram/vpHistogram.cpp @@ -330,6 +330,52 @@ void vpHistogram::calculate(const vpImage &I, unsigned int nbins, } } +void vpHistogram::equalize(const vpImage &I, vpImage &Iout) +{ + // Compute the histogram + calculate(I); + + // Calculate the cumulative distribution function + unsigned int cdf[256]; + unsigned int cdfMin = std::numeric_limits::max(), cdfMax = 0; + unsigned int minValue = std::numeric_limits::max(), maxValue = 0; + cdf[0] = histogram[0]; + + if (cdf[0] < cdfMin && cdf[0] > 0) { + cdfMin = cdf[0]; + minValue = 0; + } + + for (unsigned int i = 1; i < 256; i++) { + cdf[i] = cdf[i - 1] + histogram[i]; + + if (cdf[i] < cdfMin && cdf[i] > 0) { + cdfMin = cdf[i]; + minValue = i; + } + + if (cdf[i] > cdfMax) { + cdfMax = cdf[i]; + maxValue = i; + } + } + + unsigned int nbPixels = I.getWidth() * I.getHeight(); + if (nbPixels == cdfMin) { + // Only one brightness value in the image + return; + } + + // Construct the look-up table + unsigned char lut[256]; + for (unsigned int x = minValue; x <= maxValue; x++) { + lut[x] = vpMath::round((cdf[x] - cdfMin) / (double)(nbPixels - cdfMin) * 255.0); + } + + Iout = I; + Iout.performLut(lut); +} + /*! Display the histogram distribution in an image, the minimal image size is 36x36 px. diff --git a/modules/imgproc/src/vpImgproc.cpp b/modules/imgproc/src/vpImgproc.cpp index d607689620..3e7fa52d62 100644 --- a/modules/imgproc/src/vpImgproc.cpp +++ b/modules/imgproc/src/vpImgproc.cpp @@ -110,60 +110,19 @@ void adjust(const vpImage &I1, vpImage &I2, double alpha, double void equalizeHistogram(vpImage &I) { - if (I.getWidth() * I.getHeight() == 0) { + vpImage Icpy = I; + vp::equalizeHistogram(Icpy, I); +} + +void equalizeHistogram(const vpImage &I1, vpImage &I2) +{ + if (I1.getWidth() * I1.getHeight() == 0) { return; } // Calculate the histogram vpHistogram hist; - hist.calculate(I); - - // Calculate the cumulative distribution function - unsigned int cdf[256]; - unsigned int cdfMin = /*std::numeric_limits::max()*/ UINT_MAX, cdfMax = 0; - unsigned int minValue = - /*std::numeric_limits::max()*/ UINT_MAX, - maxValue = 0; - cdf[0] = hist[0]; - - if (cdf[0] < cdfMin && cdf[0] > 0) { - cdfMin = cdf[0]; - minValue = 0; - } - - for (unsigned int i = 1; i < 256; i++) { - cdf[i] = cdf[i - 1] + hist[i]; - - if (cdf[i] < cdfMin && cdf[i] > 0) { - cdfMin = cdf[i]; - minValue = i; - } - - if (cdf[i] > cdfMax) { - cdfMax = cdf[i]; - maxValue = i; - } - } - - unsigned int nbPixels = I.getWidth() * I.getHeight(); - if (nbPixels == cdfMin) { - // Only one brightness value in the image - return; - } - - // Construct the look-up table - unsigned char lut[256]; - for (unsigned int x = minValue; x <= maxValue; x++) { - lut[x] = vpMath::round((cdf[x] - cdfMin) / (double)(nbPixels - cdfMin) * 255.0); - } - - I.performLut(lut); -} - -void equalizeHistogram(const vpImage &I1, vpImage &I2) -{ - I2 = I1; - vp::equalizeHistogram(I2); + hist.equalize(I1, I2); } void equalizeHistogram(vpImage &I, bool useHSV) From 3fe2f212d74421efa63c55674920b7ea35a8ad52 Mon Sep 17 00:00:00 2001 From: rlagneau Date: Fri, 6 Oct 2023 14:20:32 +0200 Subject: [PATCH 02/43] [CORPS] The vpImageFilter::filter functions are now templated both in terms of image type and filter type. Adapted calls to these methods in some classes. [WIP] Added a first draft of Canny auto-threshold [CORPS] Changed the way the gradients are computed in vpCannyEdgeDetection and vpCircleHoughTransform classes --- .../include/visp3/core/vpCannyEdgeDetection.h | 33 +- .../core/include/visp3/core/vpImageFilter.h | 271 ++----- .../core/src/image/vpCannyEdgeDetection.cpp | 49 +- modules/core/src/image/vpImageFilter.cpp | 753 ++++++++++-------- .../visp3/imgproc/vpCircleHoughTransform.h | 30 +- .../imgproc/src/vpCircleHoughTransform.cpp | 39 +- 6 files changed, 592 insertions(+), 583 deletions(-) diff --git a/modules/core/include/visp3/core/vpCannyEdgeDetection.h b/modules/core/include/visp3/core/vpCannyEdgeDetection.h index 0ca547ddac..d6ee6a6f46 100644 --- a/modules/core/include/visp3/core/vpCannyEdgeDetection.h +++ b/modules/core/include/visp3/core/vpCannyEdgeDetection.h @@ -59,11 +59,13 @@ class VISP_EXPORT vpCannyEdgeDetection // // Gaussian smoothing attributes int m_gaussianKernelSize; /*!< Size of the Gaussian filter kernel used to smooth the input image. Must be an odd number.*/ float m_gaussianStdev; /*!< Standard deviation of the Gaussian filter.*/ + vpArray2D m_fg; /*!< Array that contains the Gaussian kernel.*/ // // Gradient computation attributes bool m_areGradientAvailable; /*!< Set to true if the user provides the gradient images, false otherwise. In the latter case, the class will compute the gradients.*/ - vpArray2D m_fg; /*!< Array that contains the Gaussian kernel.*/ - vpArray2D m_fgDg; /*!< Array that contains the derivative of the Gaussian kernel.*/ + unsigned int m_sobelAperture; /*!< The size of the Sobel kernels used to compute the gradients of the image.*/ + vpArray2D m_sobelX; /*!< Array that contains the Sobel kernel along the X-axis.*/ + vpArray2D m_sobelY; /*!< Array that contains the Sobel kernel along the Y-axis.*/ vpImage m_dIx; /*!< X-axis gradient.*/ vpImage m_dIy; /*!< Y-axis gradient.*/ @@ -82,10 +84,14 @@ class VISP_EXPORT vpCannyEdgeDetection /** @name Constructors and initialization */ //@{ /** - * \brief Initialize the Gaussian filters used to filter the input image and - * to compute its gradients. + * \brief Initialize the Gaussian filters used to filter the input image. */ void initGaussianFilters(); + + /** + * \brief Initialize the Sobel filters used to compute the input image gradients. + */ + void initSobelFilters(); //@} /** @name Different steps methods */ @@ -151,10 +157,11 @@ class VISP_EXPORT vpCannyEdgeDetection * * \param[in] gaussianKernelSize : The size of the Gaussian filter kernel. Must be odd. * \param[in] gaussianStdev : The standard deviation of the Gaussian filter. + * \param[in] sobelAperture : The size of the Sobel filters kernel. Must be odd. * \param[in] lowerThreshold : The lower threshold of the hysteresis thresholding step. If negative, will be computed from the upper threshold. * \param[in] upperThreshold : The upper threshold of the hysteresis thresholding step. If negative, will be computed from the median of the gray values of the image. */ - vpCannyEdgeDetection(const int &gaussianKernelSize, const float &gaussianStdev, + vpCannyEdgeDetection(const int &gaussianKernelSize, const float &gaussianStdev, const unsigned int &sobelAperture, const float &lowerThreshold = -1., const float &upperThreshold = -1.); // // Configuration from files @@ -187,6 +194,7 @@ class VISP_EXPORT vpCannyEdgeDetection detector.m_gaussianKernelSize = j.value("gaussianSize", detector.m_gaussianKernelSize); detector.m_gaussianStdev = j.value("gaussianStdev", detector.m_gaussianStdev); detector.m_lowerThreshold = j.value("lowerThreshold", detector.m_lowerThreshold); + detector.m_sobelAperture = j.value("sobelAperture", detector.m_sobelAperture); detector.m_upperThreshold = j.value("upperThreshold", detector.m_upperThreshold); } @@ -202,6 +210,7 @@ class VISP_EXPORT vpCannyEdgeDetection {"gaussianSize", detector.m_gaussianKernelSize}, {"gaussianStdev", detector.m_gaussianStdev}, {"lowerThreshold", detector.m_lowerThreshold}, + {"sobelAperture", detector.m_sobelAperture}, {"upperThreshold", detector.m_upperThreshold} }; } #endif @@ -284,6 +293,20 @@ class VISP_EXPORT vpCannyEdgeDetection m_gaussianStdev = stdev; initGaussianFilters(); } + + /** + * @brief Set the Gaussian Filters kernel size and standard deviation + * and initialize the aforementioned filters. + * + * \param[in] kernelSize : The size of the Gaussian filters kernel. + * \param[in] stdev : The standard deviation of the Gaussian filters used to blur and + * compute the gradient of the image. + */ + inline void setSobelAperture(const unsigned int &sobelAperture) + { + m_sobelAperture = sobelAperture; + initGaussianFilters(); + } //@} }; #endif diff --git a/modules/core/include/visp3/core/vpImageFilter.h b/modules/core/include/visp3/core/vpImageFilter.h index 054fcba05b..4f0e572be7 100644 --- a/modules/core/include/visp3/core/vpImageFilter.h +++ b/modules/core/include/visp3/core/vpImageFilter.h @@ -80,7 +80,7 @@ class VISP_EXPORT vpImageFilter \param r : coordinates (row) of the pixel \param c : coordinates (column) of the pixel */ - template static double derivativeFilterX(const vpImage &I, unsigned int r, unsigned int c) + template static double derivativeFilterX(const vpImage &I, unsigned int r, unsigned int c) { return (2047.0 * (I[r][c + 1] - I[r][c - 1]) + 913.0 * (I[r][c + 2] - I[r][c - 2]) + 112.0 * (I[r][c + 3] - I[r][c - 3])) / 8418.0; @@ -93,7 +93,7 @@ class VISP_EXPORT vpImageFilter \param r : coordinates (row) of the pixel \param c : coordinates (column) of the pixel */ - template static double derivativeFilterY(const vpImage &I, unsigned int r, unsigned int c) + template static double derivativeFilterY(const vpImage &I, unsigned int r, unsigned int c) { return (2047.0 * (I[r + 1][c] - I[r - 1][c]) + 913.0 * (I[r + 2][c] - I[r - 2][c]) + 112.0 * (I[r + 3][c] - I[r - 3][c])) / 8418.0; @@ -112,8 +112,8 @@ class VISP_EXPORT vpImageFilter \sa vpImageFilter::getGaussianDerivativeKernel() */ - template - static FilterType derivativeFilterX(const vpImage &I, unsigned int r, unsigned int c, const FilterType *filter, unsigned int size) + template + static FilterType derivativeFilterX(const vpImage &I, unsigned int r, unsigned int c, const FilterType *filter, unsigned int size) { unsigned int i; FilterType result; @@ -139,8 +139,8 @@ class VISP_EXPORT vpImageFilter \sa vpImageFilter::getGaussianDerivativeKernel() */ - template - static FilterType derivativeFilterY(const vpImage &I, unsigned int r, unsigned int c, const FilterType *filter, unsigned int size) + template + static FilterType derivativeFilterY(const vpImage &I, unsigned int r, unsigned int c, const FilterType *filter, unsigned int size) { unsigned int i; FilterType result; @@ -181,8 +181,8 @@ class VISP_EXPORT vpImageFilter \f] Only pixels in the input image fully covered by the kernel are considered. */ - template - static void filter(const vpImage &I, vpImage &If, const vpArray2D &M, bool convolve = false) + template + static void filter(const vpImage &I, vpImage &If, const vpArray2D &M, bool convolve = false) { unsigned int size_y = M.getRows(), size_x = M.getCols(); unsigned int half_size_y = size_y / 2, half_size_x = size_x / 2; @@ -221,6 +221,9 @@ class VISP_EXPORT vpImageFilter } } + template + static void filter(const vpImage &I, vpImage &If, const vpArray2D &M, bool convolve = false) = delete; + /*! Apply a filter to an image: \f[ @@ -233,8 +236,8 @@ class VISP_EXPORT vpImageFilter \param M : Filter kernel. \param convolve : If true, perform a convolution otherwise a correlation. */ - template - static void filter(const vpImage &I, vpImage &Iu, vpImage &Iv, const vpArray2D &M, + template + static void filter(const vpImage &I, vpImage &Iu, vpImage &Iv, const vpArray2D &M, bool convolve = false) { unsigned int size = M.getRows(); @@ -281,24 +284,13 @@ class VISP_EXPORT vpImageFilter } } - static void sepFilter(const vpImage &I, vpImage &If, const vpColVector &kernelH, const vpColVector &kernelV); + template + static void filter(const vpImage &I, vpImage &Iu, vpImage &Iv, const vpArray2D &M, bool convolve) = delete; - /*! - Apply a separable filter. - \tparam FilterType : Either float, to accelerate the computation time, or double, to have greater precision. - \param I : The original image. - \param GI : The filtered image. - \param filter : The separable filter. - \param size : The size of the filter. - */ - template - static void filter(const vpImage &I, vpImage &GI, const FilterType *filter, unsigned int size) - { - vpImage GIx; - filterX(I, GIx, filter, size); - filterY(GIx, GI, filter, size); - GIx.destroy(); - } + template + static void filter(const vpImage &I, vpImage &Iu, vpImage &Iv, const vpArray2D &M, bool convolve) = delete; + + static void sepFilter(const vpImage &I, vpImage &If, const vpColVector &kernelH, const vpColVector &kernelV); /*! Apply a separable filter. @@ -308,12 +300,12 @@ class VISP_EXPORT vpImageFilter \param filter: The separable filter. \param size: The size of the filter. */ - template - static void filter(const vpImage &I, vpImage &GI, const FilterType *filter, unsigned int size) + template + static void filter(const vpImage &I, vpImage &GI, const FilterType *filter, unsigned int size) { vpImage GIx; - filterX(I, GIx, filter, size); - filterY(GIx, GI, filter, size); + filterX(I, GIx, filter, size); + filterY(GIx, GI, filter, size); GIx.destroy(); } @@ -326,36 +318,19 @@ class VISP_EXPORT vpImageFilter return (unsigned char)((1. * I[i - 2][j] + 4. * I[i - 1][j] + 6. * I[i][j] + 4. * I[i + 1][j] + 1. * I[i + 2][j]) / 16.); } - template - static void filterX(const vpImage &I, vpImage &dIx, const FilterType *filter, unsigned int size) - { - dIx.resize(I.getHeight(), I.getWidth()); - for (unsigned int i = 0; i < I.getHeight(); i++) { - for (unsigned int j = 0; j < (size - 1) / 2; j++) { - dIx[i][j] = vpImageFilter::filterXLeftBorder(I, i, j, filter, size); - } - for (unsigned int j = (size - 1) / 2; j < I.getWidth() - (size - 1) / 2; j++) { - dIx[i][j] = vpImageFilter::filterX(I, i, j, filter, size); - } - for (unsigned int j = I.getWidth() - (size - 1) / 2; j < I.getWidth(); j++) { - dIx[i][j] = vpImageFilter::filterXRightBorder(I, i, j, filter, size); - } - } - } - - template - static void filterX(const vpImage &I, vpImage &dIx, const FilterType *filter, unsigned int size) + template + static void filterX(const vpImage &I, vpImage &dIx, const FilterType *filter, unsigned int size) { dIx.resize(I.getHeight(), I.getWidth()); for (unsigned int i = 0; i < I.getHeight(); i++) { for (unsigned int j = 0; j < (size - 1) / 2; j++) { - dIx[i][j] = vpImageFilter::filterXLeftBorder(I, i, j, filter, size); + dIx[i][j] = vpImageFilter::filterXLeftBorder(I, i, j, filter, size); } for (unsigned int j = (size - 1) / 2; j < I.getWidth() - (size - 1) / 2; j++) { - dIx[i][j] = vpImageFilter::filterX(I, i, j, filter, size); + dIx[i][j] = vpImageFilter::filterX(I, i, j, filter, size); } for (unsigned int j = I.getWidth() - (size - 1) / 2; j < I.getWidth(); j++) { - dIx[i][j] = vpImageFilter::filterXRightBorder(I, i, j, filter, size); + dIx[i][j] = vpImageFilter::filterXRightBorder(I, i, j, filter, size); } } } @@ -365,8 +340,8 @@ class VISP_EXPORT vpImageFilter static void filterXG(const vpImage &I, vpImage &dIx, const double *filter, unsigned int size); static void filterXB(const vpImage &I, vpImage &dIx, const double *filter, unsigned int size); - template - static inline FilterType filterX(const vpImage &I, unsigned int r, unsigned int c, const FilterType *filter, unsigned int size) + template + static inline FilterType filterX(const vpImage &I, unsigned int r, unsigned int c, const FilterType *filter, unsigned int size) { FilterType result; @@ -414,8 +389,8 @@ class VISP_EXPORT vpImageFilter return result + filter[0] * I[r][c].B; } - template - static inline FilterType filterXLeftBorder(const vpImage &I, unsigned int r, unsigned int c, + template + static inline FilterType filterXLeftBorder(const vpImage &I, unsigned int r, unsigned int c, const FilterType *filter, unsigned int size) { FilterType result; @@ -479,8 +454,8 @@ class VISP_EXPORT vpImageFilter return result + filter[0] * I[r][c].B; } - template - static inline FilterType filterXRightBorder(const vpImage &I, unsigned int r, unsigned int c, + template + static inline FilterType filterXRightBorder(const vpImage &I, unsigned int r, unsigned int c, const FilterType *filter, unsigned int size) { FilterType result; @@ -544,103 +519,34 @@ class VISP_EXPORT vpImageFilter return result + filter[0] * I[r][c].B; } - template - static inline FilterType filterX(const vpImage &I, unsigned int r, unsigned int c, - const FilterType *filter, unsigned int size) - { - FilterType result; - - result = 0; - - for (unsigned int i = 1; i <= (size - 1) / 2; i++) { - result += filter[i] * (I[r][c + i] + I[r][c - i]); - } - return result + filter[0] * I[r][c]; - } - - template - static inline FilterType filterXLeftBorder(const vpImage &I, unsigned int r, unsigned int c, - const FilterType *filter, unsigned int size) - { - FilterType result; - - result = 0; - - for (unsigned int i = 1; i <= (size - 1) / 2; i++) { - if (c > i) - result += filter[i] * (I[r][c + i] + I[r][c - i]); - else - result += filter[i] * (I[r][c + i] + I[r][i - c]); - } - return result + filter[0] * I[r][c]; - } - - template - static inline FilterType filterXRightBorder(const vpImage &I, unsigned int r, unsigned int c, - const FilterType *filter, unsigned int size) - { - FilterType result; - - result = 0; - - for (unsigned int i = 1; i <= (size - 1) / 2; i++) { - if (c + i < I.getWidth()) - result += filter[i] * (I[r][c + i] + I[r][c - i]); - else - result += filter[i] * (I[r][2 * I.getWidth() - c - i - 1] + I[r][c - i]); - } - return result + filter[0] * I[r][c]; - } - - template - static void filterY(const vpImage &I, vpImage &dIy, const FilterType *filter, unsigned int size) - { - dIy.resize(I.getHeight(), I.getWidth()); - for (unsigned int i = 0; i < (size - 1) / 2; i++) { - for (unsigned int j = 0; j < I.getWidth(); j++) { - dIy[i][j] = vpImageFilter::filterYTopBorder(I, i, j, filter, size); - } - } - for (unsigned int i = (size - 1) / 2; i < I.getHeight() - (size - 1) / 2; i++) { - for (unsigned int j = 0; j < I.getWidth(); j++) { - dIy[i][j] = vpImageFilter::filterY(I, i, j, filter, size); - } - } - for (unsigned int i = I.getHeight() - (size - 1) / 2; i < I.getHeight(); i++) { - for (unsigned int j = 0; j < I.getWidth(); j++) { - dIy[i][j] = vpImageFilter::filterYBottomBorder(I, i, j, filter, size); - } - } - } - static void filterY(const vpImage &I, vpImage &dIx, const double *filter, unsigned int size); static void filterYR(const vpImage &I, vpImage &dIx, const double *filter, unsigned int size); static void filterYG(const vpImage &I, vpImage &dIx, const double *filter, unsigned int size); static void filterYB(const vpImage &I, vpImage &dIx, const double *filter, unsigned int size); - template - static void filterY(const vpImage &I, vpImage &dIy, const FilterType *filter, unsigned int size) + template + static void filterY(const vpImage &I, vpImage &dIy, const FilterType *filter, unsigned int size) { dIy.resize(I.getHeight(), I.getWidth()); for (unsigned int i = 0; i < (size - 1) / 2; i++) { for (unsigned int j = 0; j < I.getWidth(); j++) { - dIy[i][j] = vpImageFilter::filterYTopBorder(I, i, j, filter, size); + dIy[i][j] = vpImageFilter::filterYTopBorder(I, i, j, filter, size); } } for (unsigned int i = (size - 1) / 2; i < I.getHeight() - (size - 1) / 2; i++) { for (unsigned int j = 0; j < I.getWidth(); j++) { - dIy[i][j] = vpImageFilter::filterY(I, i, j, filter, size); + dIy[i][j] = vpImageFilter::filterY(I, i, j, filter, size); } } for (unsigned int i = I.getHeight() - (size - 1) / 2; i < I.getHeight(); i++) { for (unsigned int j = 0; j < I.getWidth(); j++) { - dIy[i][j] = vpImageFilter::filterYBottomBorder(I, i, j, filter, size); + dIy[i][j] = vpImageFilter::filterYBottomBorder(I, i, j, filter, size); } } } - template - static inline FilterType filterY(const vpImage &I, unsigned int r, unsigned int c, const FilterType *filter, unsigned int size) + template + static inline FilterType filterY(const vpImage &I, unsigned int r, unsigned int c, const FilterType *filter, unsigned int size) { FilterType result; @@ -687,8 +593,8 @@ class VISP_EXPORT vpImageFilter return result + filter[0] * I[r][c].B; } - template - static inline FilterType filterYTopBorder(const vpImage &I, unsigned int r, unsigned int c, + template + static inline FilterType filterYTopBorder(const vpImage &I, unsigned int r, unsigned int c, const FilterType *filter, unsigned int size) { FilterType result; @@ -749,8 +655,8 @@ class VISP_EXPORT vpImageFilter return result + filter[0] * I[r][c].B; } - template - static inline FilterType filterYBottomBorder(const vpImage &I, unsigned int r, unsigned int c, + template + static inline FilterType filterYBottomBorder(const vpImage &I, unsigned int r, unsigned int c, const FilterType *filter, unsigned int size) { FilterType result; @@ -814,54 +720,6 @@ class VISP_EXPORT vpImageFilter return result + filter[0] * I[r][c].B; } - template - static inline FilterType filterYTopBorder(const vpImage &I, unsigned int r, unsigned int c, - const FilterType *filter, unsigned int size) - { - FilterType result; - - result = 0; - - for (unsigned int i = 1; i <= (size - 1) / 2; i++) { - if (r > i) - result += filter[i] * (I[r + i][c] + I[r - i][c]); - else - result += filter[i] * (I[r + i][c] + I[i - r][c]); - } - return result + filter[0] * I[r][c]; - } - - template - static inline FilterType filterYBottomBorder(const vpImage &I, unsigned int r, unsigned int c, - const FilterType *filter, unsigned int size) - { - FilterType result; - - result = 0; - - for (unsigned int i = 1; i <= (size - 1) / 2; i++) { - if (r + i < I.getHeight()) - result += filter[i] * (I[r + i][c] + I[r - i][c]); - else - result += filter[i] * (I[2 * I.getHeight() - r - i - 1][c] + I[r - i][c]); - } - return result + filter[0] * I[r][c]; - } - - template - static inline FilterType filterY(const vpImage &I, unsigned int r, unsigned int c, - const FilterType *filter, unsigned int size) - { - FilterType result; - - result = 0; - - for (unsigned int i = 1; i <= (size - 1) / 2; i++) { - result += filter[i] * (I[r + i][c] + I[r - i][c]); - } - return result + filter[0] * I[r][c]; - } - /*! Apply a Gaussian blur to an image. \tparam FilterType : Either float, to accelerate the computation time, or double, to have greater precision. @@ -874,44 +732,20 @@ class VISP_EXPORT vpImageFilter \sa getGaussianKernel() to know which kernel is used. */ - template - static void gaussianBlur(const vpImage &I, vpImage &GI, unsigned int size = 7, FilterType sigma = 0., bool normalize = true) + template + static void gaussianBlur(const vpImage &I, vpImage &GI, unsigned int size = 7, FilterType sigma = 0., bool normalize = true) { FilterType *fg = new FilterType[(size + 1) / 2]; vpImageFilter::getGaussianKernel(fg, size, sigma, normalize); vpImage GIx; - vpImageFilter::filterX(I, GIx, fg, size); - vpImageFilter::filterY(GIx, GI, fg, size); + vpImageFilter::filterX(I, GIx, fg, size); + vpImageFilter::filterY(GIx, GI, fg, size); GIx.destroy(); delete[] fg; } static void gaussianBlur(const vpImage &I, vpImage &GI, unsigned int size = 7, double sigma = 0., bool normalize = true); - /*! - Apply a Gaussian blur to a double image. - \tparam FilterType : Either float, to accelerate the computation time, or double, to have greater precision. - \param I : Input double image. - \param GI : Filtered image. - \param size : Filter size. This value should be odd. - \param sigma : Gaussian standard deviation. If it is equal to zero or - negative, it is computed from filter size as sigma = (size-1)/6. - \param normalize : Flag indicating whether to normalize the filter coefficients or not. - - \sa getGaussianKernel() to know which kernel is used. - */ - template - static void gaussianBlur(const vpImage &I, vpImage &GI, unsigned int size = 7, FilterType sigma = 0., bool normalize = true) - { - FilterType *fg = new FilterType[(size + 1) / 2]; - vpImageFilter::getGaussianKernel(fg, size, sigma, normalize); - vpImage GIx; - vpImageFilter::filterX(I, GIx, fg, size); - vpImageFilter::filterY(GIx, GI, fg, size); - GIx.destroy(); - delete[] fg; - } - /*! Apply a 5x5 Gaussian filter to an image pixel. @@ -1077,7 +911,7 @@ class VISP_EXPORT vpImageFilter const FilterType *gaussianDerivativeKernel, unsigned int size) { vpImage GIy; - vpImageFilter::filterY(I, GIy, gaussianKernel, size); + vpImageFilter::filterY(I, GIy, gaussianKernel, size); vpImageFilter::getGradX(GIy, dIx, gaussianDerivativeKernel, size); } @@ -1139,7 +973,7 @@ class VISP_EXPORT vpImageFilter const FilterType *gaussianDerivativeKernel, unsigned int size) { vpImage GIx; - vpImageFilter::filterX(I, GIx, gaussianKernel, size); + vpImageFilter::filterX(I, GIx, gaussianKernel, size); vpImageFilter::getGradY(GIx, dIy, gaussianDerivativeKernel, size); } @@ -1222,9 +1056,10 @@ class VISP_EXPORT vpImageFilter return 1 / 16.0; } +static float computeCannyThreshold(const vpImage &I, float &lowerThresh); + #if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) static float computeCannyThreshold(const cv::Mat &cv_I, const cv::Mat *p_cv_blur, float &lowerThresh); - static float computeCannyThreshold(const vpImage &I, float &lowerThresh); static float median(const cv::Mat &cv_I); static float median(const vpImage &Isrc); static std::vector median(const vpImage &Isrc); diff --git a/modules/core/src/image/vpCannyEdgeDetection.cpp b/modules/core/src/image/vpCannyEdgeDetection.cpp index a9a050cd36..716f164a8c 100644 --- a/modules/core/src/image/vpCannyEdgeDetection.cpp +++ b/modules/core/src/image/vpCannyEdgeDetection.cpp @@ -40,21 +40,25 @@ vpCannyEdgeDetection::vpCannyEdgeDetection() : m_gaussianKernelSize(3) , m_gaussianStdev(1.) , m_areGradientAvailable(false) + , m_sobelAperture(3) , m_lowerThreshold(-1.) , m_upperThreshold(-1.) { initGaussianFilters(); + initSobelFilters(); } -vpCannyEdgeDetection::vpCannyEdgeDetection(const int &gaussianKernelSize, const float &gaussianStdev +vpCannyEdgeDetection::vpCannyEdgeDetection(const int &gaussianKernelSize, const float &gaussianStdev, const unsigned int &sobelAperture , const float &lowerThreshold, const float &upperThreshold) : m_gaussianKernelSize(gaussianKernelSize) , m_gaussianStdev(gaussianStdev) , m_areGradientAvailable(false) + , m_sobelAperture(sobelAperture) , m_lowerThreshold(lowerThreshold) , m_upperThreshold(upperThreshold) { initGaussianFilters(); + initSobelFilters(); } #ifdef VISP_HAVE_NLOHMANN_JSON @@ -86,6 +90,7 @@ vpCannyEdgeDetection::initFromJSON(const std::string &jsonPath) *this = j; // Call from_json(const json& j, vpDetectionCircle2D& *this) to read json file.close(); initGaussianFilters(); + initSobelFilters(); } #endif @@ -97,8 +102,18 @@ vpCannyEdgeDetection::initGaussianFilters() } m_fg.resize(1, (m_gaussianKernelSize + 1)/2); vpImageFilter::getGaussianKernel(m_fg.data, m_gaussianKernelSize, m_gaussianStdev, false); - m_fgDg.resize(1, (m_gaussianKernelSize + 1)/2); - vpImageFilter::getGaussianDerivativeKernel(m_fgDg.data, m_gaussianKernelSize, m_gaussianStdev, false); +} + +void +vpCannyEdgeDetection::initSobelFilters() +{ + if ((m_sobelAperture % 2) == 0) { + throw(vpException(vpException::badValue, "The Sobel kernel size should be odd")); + } + m_sobelX.resize(m_sobelAperture, m_sobelAperture); + vpImageFilter::getSobelKernelX(m_sobelX.data, (m_sobelAperture - 1)/2); + m_sobelY.resize(m_sobelAperture, m_sobelAperture); + vpImageFilter::getSobelKernelY(m_sobelY.data, (m_sobelAperture - 1)/2); } // // Detection methods @@ -139,12 +154,15 @@ vpCannyEdgeDetection::detect(const vpImage &I) // // Step 4: hysteresis thresholding float upperThreshold = m_upperThreshold; - float lowerThreshold = m_lowerThreshold; - if (m_lowerThreshold < 0) { + if(upperThreshold < 0) { + upperThreshold = vpImageFilter::computeCannyThreshold(I, lowerThreshold); + } + else if (m_lowerThreshold < 0) { // Applying Canny recommendation to have the upper threshold 3 times greater than the lower threshold. lowerThreshold = m_upperThreshold / 3.f; } + std::cout << "lt = " << lowerThreshold << " ; ut = " << upperThreshold << std::endl; performHysteresisThresholding(lowerThreshold, upperThreshold); @@ -156,18 +174,15 @@ vpCannyEdgeDetection::detect(const vpImage &I) void vpCannyEdgeDetection::performFilteringAndGradientComputation(const vpImage &I) { - vpImageFilter::getGradXGauss2D(I, - m_dIx, - m_fg.data, - m_fgDg.data, - m_gaussianKernelSize - ); - vpImageFilter::getGradYGauss2D(I, - m_dIy, - m_fg.data, - m_fgDg.data, - m_gaussianKernelSize - ); + // Computing the Gaussian blurr + vpImage Iblur; + vpImage GIx; + vpImageFilter::filterX(I, GIx, m_fg.data, m_gaussianKernelSize); + vpImageFilter::filterY(GIx, Iblur, m_fg.data, m_gaussianKernelSize); + + // Computing the gradients + vpImageFilter::filter(Iblur,m_dIx,m_sobelX); + vpImageFilter::filter(Iblur,m_dIy,m_sobelY); } /** diff --git a/modules/core/src/image/vpImageFilter.cpp b/modules/core/src/image/vpImageFilter.cpp index 63da6cc2e2..239f31d2b0 100644 --- a/modules/core/src/image/vpImageFilter.cpp +++ b/modules/core/src/image/vpImageFilter.cpp @@ -35,6 +35,7 @@ #include #include +#include #include #include #include @@ -42,19 +43,33 @@ /** * \cond DO_NOT_DOCUMENT */ -template<> -void vpImageFilter::filter(const vpImage &I, vpImage &If, const vpArray2D &M, bool convolve); +/*template<>*/template +void vpImageFilter::filter(const vpImage &I, vpImage &If, const vpArray2D &M, bool convolve); -template<> -void vpImageFilter::filter(const vpImage &I, vpImage &If, const vpArray2D &M, bool convolve); +/*template<>*/template +void vpImageFilter::filter(const vpImage &I, vpImage &If, const vpArray2D &M, bool convolve); template <> -void vpImageFilter::filter(const vpImage &I, vpImage &Iu, vpImage &Iv, const vpArray2D &M, +void vpImageFilter::filter(const vpImage &I, vpImage &Iu, vpImage &Iv, const vpArray2D &M, bool convolve); template <> -void vpImageFilter::filter(const vpImage &I, vpImage &Iu, vpImage &Iv, const vpArray2D &M, +void vpImageFilter::filter(const vpImage &I, vpImage &Iu, vpImage &Iv, const vpArray2D &M, bool convolve); + +/*template<>*/template +void vpImageFilter::filter(const vpImage &I, vpImage &GI, const float *filter, + unsigned int size); + +/*template<>*/template +void vpImageFilter::filter(const vpImage &I, vpImage &GI, const double *filter, + unsigned int size); + +/*template<>*/template +void vpImageFilter::filter(const vpImage &I, vpImage &GI, const float *filter, unsigned int size); + +/*template<>*/template +void vpImageFilter::filter(const vpImage &I, vpImage &GI, const double *filter, unsigned int size); /** * \endcond */ @@ -142,266 +157,22 @@ void vpImageFilter::sepFilter(const vpImage &I, vpImage & } } -#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) -/** - * \brief Calculates the median value of a single channel. - * The algorithm is based on based on https://github.com/arnaudgelas/OpenCVExamples/blob/master/cvMat/Statistics/Median/Median.cpp - * \param[in] channel : Single channel image in OpenCV format. - */ -float vpImageFilter::median(const cv::Mat &channel) -{ - float m = (channel.rows * channel.cols) / 2.f; - int bin = 0; - float med = -1.0f; - - int histSize = 256; - float range[] = { 0, 256 }; - const float *histRange = { range }; - bool uniform = true; - bool accumulate = false; - cv::Mat hist; - cv::calcHist(&channel, 1, 0, cv::Mat(), hist, 1, &histSize, &histRange, uniform, accumulate); - - for (int i = 0; i < histSize && med < 0.0; ++i) { - bin += cvRound(hist.at(i)); - if (bin > m && med < 0.0) - med = static_cast(i); - } - - return med; -} - -/** - * \brief Calculates the median value of a single channel. - * The algorithm is based on based on https://github.com/arnaudgelas/OpenCVExamples/blob/master/cvMat/Statistics/Median/Median.cpp - * \param[in] Isrc : Gray-level image in ViSP format. - * \return Gray level image median value. - * \sa \ref vpImageFilter::median() "vpImageFilter::median(const cv::Mat)" - */ -float vpImageFilter::median(const vpImage &Isrc) -{ - cv::Mat cv_I; - vpImageConvert::convert(Isrc, cv_I); - return median(cv_I); -} - -/** - * \brief Calculates the median value of a vpRGBa image. - * The result is ordered in RGB format. - * \param[in] Isrc : RGB image in ViSP format. Alpha channel is ignored. - * \return std::vector meds such as meds[0] = red-channel-median, meds[1] = green-channel-median - * and meds[2] = blue-channel-median. - * \sa \ref vpImageFilter::median() "vpImageFilter::median(const cv::Mat)" - */ -std::vector vpImageFilter::median(const vpImage &Isrc) -{ - cv::Mat cv_I_bgr; - vpImageConvert::convert(Isrc, cv_I_bgr); - std::vector channels; - cv::split(cv_I_bgr, channels); - std::vector meds(3); - const int orderMeds[] = { 2, 1, 0 }; // To keep the order R, G, B - const int orderCvChannels[] = { 0, 1, 2 }; // Because the order of the cv::Mat is B, G, R - for (unsigned int i = 0; i < 3; i++) { - meds[orderMeds[i]] = median(channels[orderCvChannels[i]]); - } - return meds; -} - -/** - * \brief Compute the upper Canny edge filter threshold. - * - * \param[in] cv_I : The image, in cv format. - * \param[in] p_cv_blur : If different from nullptr, must contain a blurred version of cv_I. - * \param[out] lowerThresh : The lower threshold for the Canny edge filter. - * \return The upper Canny edge filter threshold. - */ -float vpImageFilter::computeCannyThreshold(const cv::Mat &cv_I, const cv::Mat *p_cv_blur, float &lowerThresh) -{ - cv::Mat cv_I_blur; - if (p_cv_blur != nullptr) { - cv_I_blur = *p_cv_blur; - } - else { - cv::GaussianBlur(cv_I, cv_I_blur, cv::Size(9, 9), 2, 2); - } - - // Subsample image to reach a 256 x 256 size - int req_size = 256; - int orig_size = std::min(static_cast(cv_I.rows), static_cast(cv_I.cols)); - int scale_down = std::max(1, static_cast(orig_size / req_size)); - cv::Mat cv_I_scaled_down; - resize(cv_I_blur, cv_I_scaled_down, cv::Size(), scale_down, scale_down, cv::INTER_NEAREST); - - float median_pix = vpImageFilter::median(cv_I_scaled_down); - float lower = std::max(0.f, 0.7f * median_pix); - float upper = std::min(255.f, 1.3f * median_pix); - upper = std::max(1.f, upper); - lowerThresh = lower; - return upper; -} - -/** - * \brief Compute the upper Canny edge filter threshold. - * - * \param[in] I : The gray-scale image, in ViSP format. - * \param[in] lowerThresh : Canny lower threshold. - * \return The upper Canny edge filter threshold. - */ -float vpImageFilter::computeCannyThreshold(const vpImage &I, float &lowerThresh) -{ - cv::Mat cv_I; - vpImageConvert::convert(I, cv_I); - return computeCannyThreshold(cv_I, nullptr, lowerThresh); -} -#endif - -/*! - Apply the Canny edge operator on the image \e Isrc and return the resulting - image \e Ires. - - The following example shows how to use the method: - - \code -#include -#include - -int main() -{ - // Constants for the Canny operator. - const unsigned int gaussianFilterSize = 5; - const double thresholdCanny = 15; - const unsigned int apertureSobel = 3; - - // Image for the Canny edge operator - vpImage Isrc; - vpImage Icanny; - - // First grab the source image Isrc. - - // Apply the Canny edge operator and set the Icanny image. - vpImageFilter::canny(Isrc, Icanny, gaussianFilterSize, thresholdCanny, apertureSobel); - return (0); -} - \endcode - - \param Isrc : Image to apply the Canny edge detector to. - \param Ires : Filtered image (255 means an edge, 0 otherwise). - \param gaussianFilterSize : The size of the mask of the Gaussian filter to - apply (an odd number). - \param thresholdCanny : The upper threshold for the Canny operator. Only value - greater than this value are marked as an edge. If negative, it will be automatically - computed, along with the lower threshold. Otherwise, the lower threshold will be set to one third - of the thresholdCanny . - \param apertureSobel : Size of the mask for the Sobel operator (odd number). -*/ -void vpImageFilter::canny(const vpImage &Isrc, vpImage &Ires, - unsigned int gaussianFilterSize, float thresholdCanny, unsigned int apertureSobel) -{ - vpImageFilter::canny(Isrc, Ires, gaussianFilterSize, thresholdCanny / 3.f, thresholdCanny, apertureSobel); -} - -/*! - Apply the Canny edge operator on the image \e Isrc and return the resulting - image \e Ires. - - The following example shows how to use the method: - - \code -#include -#include - -int main() -{ - // Constants for the Canny operator. - const unsigned int gaussianFilterSize = 5; - const float upperThresholdCanny = 15; - const float lowerThresholdCanny = 5; - const unsigned int apertureSobel = 3; - - // Image for the Canny edge operator - vpImage Isrc; - vpImage Icanny; - - // First grab the source image Isrc. - - // Apply the Canny edge operator and set the Icanny image. - vpImageFilter::canny(Isrc, Icanny, gaussianFilterSize, lowerThresholdCanny, upperThresholdCanny, apertureSobel); - return (0); -} - \endcode - - \param Isrc : Image to apply the Canny edge detector to. - \param Ires : Filtered image (255 means an edge, 0 otherwise). - \param gaussianFilterSize : The size of the mask of the Gaussian filter to - apply (an odd number). - \param lowerThreshold : The lower threshold for the Canny operator. Values lower - than this value are rejected. If negative, it will be set to one third - of the thresholdCanny . - \param upperThreshold : The upper threshold for the Canny operator. Only value - greater than this value are marked as an edge. If negative, it will be automatically - computed, along with the lower threshold. Otherwise, the lower threshold will be set to one third - of the thresholdCanny . - \param apertureSobel : Size of the mask for the Sobel operator (odd number). -*/ -void vpImageFilter::canny(const vpImage &Isrc, vpImage &Ires, - unsigned int gaussianFilterSize, float lowerThreshold, float upperThreshold, unsigned int apertureSobel) -{ -#if defined(HAVE_OPENCV_IMGPROC) - cv::Mat img_cvmat, cv_I_blur, cv_dx, cv_dy, edges_cvmat; - vpImageConvert::convert(Isrc, img_cvmat); - cv::GaussianBlur(img_cvmat, cv_I_blur, cv::Size((int)gaussianFilterSize, (int)gaussianFilterSize), 0, 0); - cv::Sobel(cv_I_blur, cv_dx, CV_16S, 1, 0, apertureSobel); - cv::Sobel(cv_I_blur, cv_dy, CV_16S, 0, 1, apertureSobel); - float upperCannyThresh = upperThreshold; - float lowerCannyThresh = lowerThreshold; - if (upperCannyThresh < 0) { - upperCannyThresh = computeCannyThreshold(img_cvmat, &cv_I_blur, lowerCannyThresh); - } - else if (lowerCannyThresh < 0) { - lowerCannyThresh = upperCannyThresh / 3.f; - } - cv::Canny(cv_dx, cv_dy, edges_cvmat, lowerCannyThresh, upperCannyThresh, false); - vpImageConvert::convert(edges_cvmat, Ires); -#else - (void)apertureSobel; - float upperCannyThresh = upperThreshold; - float lowerCannyThresh = lowerThreshold; - if (upperCannyThresh < 0) { - throw(vpException(vpException::badValue, "OpenCV imgproc module missing to be able to compute automatically the Canny thresholds")); - } - else if (lowerCannyThresh < 0) { - lowerCannyThresh = upperCannyThresh / 3.; - } - vpCannyEdgeDetection edgeDetector(gaussianFilterSize, 0.1, lowerCannyThresh, upperCannyThresh); - Ires = edgeDetector.detect(Isrc); -#endif -} - /** * \cond DO_NOT_DOCUMENT */ -template<> -void vpImageFilter::filter(const vpImage &I, vpImage &GI, const float *filter, +/*template<>*/template +void vpImageFilter::filterX(const vpImage &I, vpImage &dIx, const float *filter, unsigned int size); -template<> -void vpImageFilter::filter(const vpImage &I, vpImage &GI, const double *filter, +/*template<>*/template +void vpImageFilter::filterX(const vpImage &I, vpImage &dIx, const double *filter, unsigned int size); -template<> -void vpImageFilter::filter(const vpImage &I, vpImage &GI, const float *filter, unsigned int size); - -template<> -void vpImageFilter::filter(const vpImage &I, vpImage &GI, const double *filter, unsigned int size); - -template<> -void vpImageFilter::filterX(const vpImage &I, vpImage &dIx, const float *filter, - unsigned int size); +/*template<>*/template +void vpImageFilter::filterX(const vpImage &I, vpImage &dIx, const float *filter, unsigned int size); -template<> -void vpImageFilter::filterX(const vpImage &I, vpImage &dIx, const double *filter, - unsigned int size); +/*template<>*/template +void vpImageFilter::filterX(const vpImage &I, vpImage &dIx, const double *filter, unsigned int size); /** * \endcond */ @@ -431,19 +202,19 @@ void vpImageFilter::filterX(const vpImage &I, vpImage &dIx, cons /** * \cond DO_NOT_DOCUMENT */ -template<> -void vpImageFilter::filterX(const vpImage &I, vpImage &dIx, const float *filter, unsigned int size); - -template<> -void vpImageFilter::filterX(const vpImage &I, vpImage &dIx, const double *filter, unsigned int size); - -template<> -void vpImageFilter::filterY(const vpImage &I, vpImage &dIy, const float *filter, +/*template<>*/template +void vpImageFilter::filterY(const vpImage &I, vpImage &dIy, const float *filter, unsigned int size); -template<> -void vpImageFilter::filterY(const vpImage &I, vpImage &dIy, const double *filter, +/*template<>*/template +void vpImageFilter::filterY(const vpImage &I, vpImage &dIy, const double *filter, unsigned int size); + +/*template<>*/template +void vpImageFilter::filterY(const vpImage &I, vpImage &dIy, const float *filter, unsigned int size); + +/*template<>*/template +void vpImageFilter::filterY(const vpImage &I, vpImage &dIy, const double *filter, unsigned int size); /** * \endcond */ @@ -477,35 +248,17 @@ void vpImageFilter::filterY(const vpImage &I, vpImage &dIy, cons /** * \cond DO_NOT_DOCUMENT */ -template<> -void vpImageFilter::filterY(const vpImage &I, vpImage &dIy, const float *filter, unsigned int size); +/*template<>*/template +void vpImageFilter::gaussianBlur(const vpImage &I, vpImage &GI, unsigned int size, float sigma, bool normalize); -template<> -void vpImageFilter::filterY(const vpImage &I, vpImage &dIy, const double *filter, unsigned int size) -{ - dIy.resize(I.getHeight(), I.getWidth()); - for (unsigned int i = 0; i < (size - 1) / 2; i++) { - for (unsigned int j = 0; j < I.getWidth(); j++) { - dIy[i][j] = vpImageFilter::filterYTopBorder(I, i, j, filter, size); - } - } - for (unsigned int i = (size - 1) / 2; i < I.getHeight() - (size - 1) / 2; i++) { - for (unsigned int j = 0; j < I.getWidth(); j++) { - dIy[i][j] = vpImageFilter::filterY(I, i, j, filter, size); - } - } - for (unsigned int i = I.getHeight() - (size - 1) / 2; i < I.getHeight(); i++) { - for (unsigned int j = 0; j < I.getWidth(); j++) { - dIy[i][j] = vpImageFilter::filterYBottomBorder(I, i, j, filter, size); - } - } -} +/*template<>*/template +void vpImageFilter::gaussianBlur(const vpImage &I, vpImage &GI, unsigned int size, double sigma, bool normalize); -template<> -void vpImageFilter::gaussianBlur(const vpImage &I, vpImage &GI, unsigned int size, float sigma, bool normalize); +/*template<>*/template +void vpImageFilter::gaussianBlur(const vpImage &I, vpImage &GI, unsigned int size, float sigma, bool normalize); -template<> -void vpImageFilter::gaussianBlur(const vpImage &I, vpImage &GI, unsigned int size, double sigma, bool normalize); +/*template<>*/template +void vpImageFilter::gaussianBlur(const vpImage &I, vpImage &GI, unsigned int size, double sigma, bool normalize); /** * \endcond */ @@ -535,13 +288,7 @@ void vpImageFilter::gaussianBlur(const vpImage &I, vpImage &GI, /** * \cond DO_NOT_DOCUMENT */ -template<> -void vpImageFilter::gaussianBlur(const vpImage &I, vpImage &GI, unsigned int size, float sigma, bool normalize); - -template<> -void vpImageFilter::gaussianBlur(const vpImage &I, vpImage &GI, unsigned int size, double sigma, bool normalize); - -template<> +/*template<>*/template void vpImageFilter::getGaussianKernel(float *filter, unsigned int size, float sigma, bool normalize); template <> @@ -550,71 +297,71 @@ void vpImageFilter::getGaussianDerivativeKernel(float *filter, unsigned i template <> void vpImageFilter::getGaussianDerivativeKernel(double *filter, unsigned int size, double sigma, bool normalize); -template<> +/*template<>*/template void vpImageFilter::getGradX(const vpImage &I, vpImage &dIx); -template<> +/*template<>*/template void vpImageFilter::getGradX(const vpImage &I, vpImage &dIx); -template<> +/*template<>*/template void vpImageFilter::getGradY(const vpImage &I, vpImage &dIy); -template<> +/*template<>*/template void vpImageFilter::getGradY(const vpImage &I, vpImage &dIy); -template<> +/*template<>*/template void vpImageFilter::getGradX(const vpImage &I, vpImage &dIx, const float *filter, unsigned int size); -template<> +/*template<>*/template void vpImageFilter::getGradX(const vpImage &I, vpImage &dIx, const double *filter, unsigned int size); -template<> +/*template<>*/template void vpImageFilter::getGradX(const vpImage &I, vpImage &dIx, const float *filter, unsigned int size); -template<> +/*template<>*/template void vpImageFilter::getGradX(const vpImage &I, vpImage &dIx, const double *filter, unsigned int size); -template<> +/*template<>*/template void vpImageFilter::getGradY(const vpImage &I, vpImage &dIy, const float *filter, unsigned int size); -template<> +/*template<>*/template void vpImageFilter::getGradY(const vpImage &I, vpImage &dIy, const double *filter, unsigned int size); -template<> +/*template<>*/template void vpImageFilter::getGradY(const vpImage &I, vpImage &dIy, const float *filter, unsigned int size); -template<> +/*template<>*/template void vpImageFilter::getGradY(const vpImage &I, vpImage &dIy, const double *filter, unsigned int size); -template<> +/*template<>*/template void vpImageFilter::getGradXGauss2D(const vpImage &I, vpImage &dIx, const float *gaussianKernel, const float *gaussianDerivativeKernel, unsigned int size); -template<> +/*template<>*/template void vpImageFilter::getGradXGauss2D(const vpImage &I, vpImage &dIx, const double *gaussianKernel, const double *gaussianDerivativeKernel, unsigned int size); -template<> +/*template<>*/template void vpImageFilter::getGradXGauss2D(const vpImage &I, vpImage &dIx, const float *gaussianKernel, const float *gaussianDerivativeKernel, unsigned int size); -template<> +/*template<>*/template void vpImageFilter::getGradXGauss2D(const vpImage &I, vpImage &dIx, const double *gaussianKernel, const double *gaussianDerivativeKernel, unsigned int size); -template<> +/*template<>*/template void vpImageFilter::getGradYGauss2D(const vpImage &I, vpImage &dIy, const float *gaussianKernel, const float *gaussianDerivativeKernel, unsigned int size); -template<> +/*template<>*/template void vpImageFilter::getGradYGauss2D(const vpImage &I, vpImage &dIy, const double *gaussianKernel, const double *gaussianDerivativeKernel, unsigned int size); -template<> +/*template<>*/template void vpImageFilter::getGradYGauss2D(const vpImage &I, vpImage &dIy, const float *gaussianKernel, const float *gaussianDerivativeKernel, unsigned int size); -template<> +/*template<>*/template void vpImageFilter::getGradYGauss2D(const vpImage &I, vpImage &dIy, const double *gaussianKernel, const double *gaussianDerivativeKernel, unsigned int size); /** @@ -674,17 +421,373 @@ void vpImageFilter::getGaussYPyramidal(const vpImage &I, vpImage< /** * \cond DO_NOT_DOCUMENT */ -template<> +/*template<>*/template double vpImageFilter::getSobelKernelX(double *filter, unsigned int size); -template<> +/*template<>*/template float vpImageFilter::getSobelKernelX(float *filter, unsigned int size); -template<> +/*template<>*/template double vpImageFilter::getSobelKernelY(double *filter, unsigned int size); -template<> +/*template<>*/template float vpImageFilter::getSobelKernelY(float *filter, unsigned int size); /** * \endcond */ + + +#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) +/** + * \brief Calculates the median value of a single channel. + * The algorithm is based on based on https://github.com/arnaudgelas/OpenCVExamples/blob/master/cvMat/Statistics/Median/Median.cpp + * \param[in] channel : Single channel image in OpenCV format. + */ +float vpImageFilter::median(const cv::Mat &channel) +{ + float m = (channel.rows * channel.cols) / 2.f; + int bin = 0; + float med = -1.0f; + + int histSize = 256; + float range[] = { 0, 256 }; + const float *histRange = { range }; + bool uniform = true; + bool accumulate = false; + cv::Mat hist; + cv::calcHist(&channel, 1, 0, cv::Mat(), hist, 1, &histSize, &histRange, uniform, accumulate); + + for (int i = 0; i < histSize && med < 0.0; ++i) { + bin += cvRound(hist.at(i)); + if (bin > m && med < 0.0) + med = static_cast(i); + } + + return med; +} + +/** + * \brief Calculates the median value of a single channel. + * The algorithm is based on based on https://github.com/arnaudgelas/OpenCVExamples/blob/master/cvMat/Statistics/Median/Median.cpp + * \param[in] Isrc : Gray-level image in ViSP format. + * \return Gray level image median value. + * \sa \ref vpImageFilter::median() "vpImageFilter::median(const cv::Mat)" + */ +float vpImageFilter::median(const vpImage &Isrc) +{ + cv::Mat cv_I; + vpImageConvert::convert(Isrc, cv_I); + return median(cv_I); +} + +/** + * \brief Calculates the median value of a vpRGBa image. + * The result is ordered in RGB format. + * \param[in] Isrc : RGB image in ViSP format. Alpha channel is ignored. + * \return std::vector meds such as meds[0] = red-channel-median, meds[1] = green-channel-median + * and meds[2] = blue-channel-median. + * \sa \ref vpImageFilter::median() "vpImageFilter::median(const cv::Mat)" + */ +std::vector vpImageFilter::median(const vpImage &Isrc) +{ + cv::Mat cv_I_bgr; + vpImageConvert::convert(Isrc, cv_I_bgr); + std::vector channels; + cv::split(cv_I_bgr, channels); + std::vector meds(3); + const int orderMeds[] = { 2, 1, 0 }; // To keep the order R, G, B + const int orderCvChannels[] = { 0, 1, 2 }; // Because the order of the cv::Mat is B, G, R + for (unsigned int i = 0; i < 3; i++) { + meds[orderMeds[i]] = median(channels[orderCvChannels[i]]); + } + return meds; +} + +/** + * \brief Compute the upper Canny edge filter threshold. + * + * \param[in] cv_I : The image, in cv format. + * \param[in] p_cv_blur : If different from nullptr, must contain a blurred version of cv_I. + * \param[out] lowerThresh : The lower threshold for the Canny edge filter. + * \return The upper Canny edge filter threshold. + */ +float vpImageFilter::computeCannyThreshold(const cv::Mat &cv_I, const cv::Mat *p_cv_blur, float &lowerThresh) +{ + const unsigned int gaussianKernelSize = 5; + const float gaussianStdev = 2.f; + const unsigned int sobelAperture = 3; + const float nonEdgeRate = 0.6; + const float thresholdRate = 0.6; + double w = cv_I.cols; + double h = cv_I.rows; + int bins = 256; + cv::Mat sobel, sobelx, sobely, sobelxabs, sobelyabs, img_blur; + + if(p_cv_blur == nullptr) { + // Apply Gaussian blur to the image + cv::Size gsz(gaussianKernelSize, gaussianKernelSize); + cv::GaussianBlur(cv_I, img_blur,gsz, gaussianStdev); + } + else { + img_blur = *p_cv_blur; + } + + // Compute the gradient of the blurred image + cv::Sobel(img_blur, sobelx, CV_16S, 1, 0, sobelAperture, 1, 0); + cv::convertScaleAbs(sobelx, sobelxabs); + cv::Sobel(img_blur, sobely, CV_16S, 0, 1, sobelAperture, 1, 0); + cv::convertScaleAbs(sobely, sobelyabs); + cv::addWeighted(sobelxabs, 1, sobelyabs, 1, 0, sobel); + sobel.convertTo(sobel, CV_8U); + + // Equalize the histogram + cv::Mat equalized; + cv::equalizeHist(sobel, equalized); + + // Compute the upper threshold from the equalized histogram + cv::Mat hist; + const float range[] = {0.f, 256.f}; // The upper boundary is exclusive + const float* ranges[] = { range }; + int channels[] = {0}; + bool dims = 1; // The number of dimensions of the histogram + int histSize[] = {bins}; + bool uniform = true; + bool accumulate = false; // Clear the histogram at the beginning of calcHist if false, does not clear it otherwise + cv::calcHist(&equalized, 1, channels, cv::Mat(), hist, dims, histSize, ranges, uniform, accumulate); + float accu = 0; + float t = (float)(nonEdgeRate * w * h); + float bon = 0; + for (int i = 0; i < bins; i++) { + float tf = hist.at(i); + accu = accu + tf; + if (accu > t) { + bon = (float)i; + break; + } + } + float upperThresh = std::max(bon, 1.f); + lowerThresh = thresholdRate * bon; + std::cout << "[computeCannyThreshold::cv] ut = " << upperThresh << " ; lt = " << lowerThresh << std::endl; + return upperThresh; +} +#endif + +template +void computeMeanMaxStdev(const vpImage &I, float &mean, float &max, float &stdev) +{ + max = std::numeric_limits::epsilon(); + mean = 0.; + stdev = 0.; + unsigned int nbRows = I.getRows(); + unsigned int nbCols = I.getCols(); + float scale = 1. / ((float)nbRows * (float)nbCols); + for (unsigned int r = 0; r < nbRows; r++) { + for (unsigned int c = 0; c < nbCols; c++) { + mean += I[r][c]; + max = std::max(max, static_cast(I[r][c])); + } + } + mean *= scale; + for (unsigned int r = 0; r < nbRows; r++) { + for (unsigned int c = 0; c < nbCols; c++) { + stdev += (I[r][c] - mean) * (I[r][c] - mean); + } + } + stdev *= scale; + stdev = std::sqrt(stdev); +} + + +/** + * \brief Compute the upper Canny edge filter threshold. + * + * \param[in] I : The gray-scale image, in ViSP format. + * \param[in] lowerThresh : Canny lower threshold. + * \return The upper Canny edge filter threshold. + */ +float vpImageFilter::computeCannyThreshold(const vpImage &I, float &lowerThresh) +{ + const unsigned int gaussianKernelSize = 5; + const float gaussianStdev = 2.; + const unsigned int apertureSize = 3; + const float nonEdgeRate = 0.6; + const float thresholdRate = 0.6; + + double w = I.getWidth(); + double h = I.getHeight(); + + // Computing the Gaussian blurr + gradients of the image + vpImage Iblur; + vpImageFilter::gaussianBlur(I, Iblur, gaussianKernelSize, gaussianStdev); + + vpArray2D sobelX(apertureSize, apertureSize); // Sobel kernel along x + vpImageFilter::getSobelKernelX(sobelX.data, (apertureSize - 1)/2); + vpArray2D sobelY(apertureSize, apertureSize); // Sobel kernel along x + vpImageFilter::getSobelKernelY(sobelY.data, (apertureSize - 1)/2); + vpImage dIx, dIy; + vpImageFilter::filter(Iblur,dIx,sobelX); + vpImageFilter::filter(Iblur,dIy,sobelY); + + // Computing the gradient of the image + vpImage dI(h, w); + for (unsigned int r = 0; r < h; r++) { + for (unsigned int c = 0; c < w; c++) { + float dx = (float)dIx[r][c]; + float dy = (float)dIy[r][c]; + float gradient = std::sqrt(dx * dx + dy *dy); + float gradientClamped = std::min(gradient, (float)std::numeric_limits::max()); + dI[r][c] = gradientClamped; + } + } + float mean = 0.f, max = 0.f, stdev = 0.f; + computeMeanMaxStdev(dI, mean, max, stdev); + std::stringstream title; + title << "[computeCannyThreshold::ViSP] Gradient, mean = " << mean << " +/- " << stdev << "; max = " << max; + std::cout << title.str() << std::endl; + + // Equalize the histogram + vpImage dIabs_equalized; + vpHistogram hist; + hist.equalize(dI, dIabs_equalized); + + // Compute the histogram + const unsigned int nbBins = 256; + hist.calculate(dIabs_equalized, nbBins); + float accu = 0; + float t = (float)(nonEdgeRate * w * h); + float bon = 0; + for (unsigned int i = 0; i < nbBins; i++) { + float tf = hist[i]; + accu = accu + tf; + if (accu > t) { + bon = (float)i; + break; + } + } + float upperThresh = std::max(bon, 1.f); + lowerThresh = thresholdRate * bon; + std::cout << "[computeCannyThreshold::ViSP] ut = " << upperThresh << " ; lt = " << lowerThresh << std::endl; + return upperThresh; +} + +/*! + Apply the Canny edge operator on the image \e Isrc and return the resulting + image \e Ires. + + The following example shows how to use the method: + + \code +#include +#include + +int main() +{ + // Constants for the Canny operator. + const unsigned int gaussianFilterSize = 5; + const double thresholdCanny = 15; + const unsigned int apertureSobel = 3; + + // Image for the Canny edge operator + vpImage Isrc; + vpImage Icanny; + + // First grab the source image Isrc. + + // Apply the Canny edge operator and set the Icanny image. + vpImageFilter::canny(Isrc, Icanny, gaussianFilterSize, thresholdCanny, apertureSobel); + return (0); +} + \endcode + + \param Isrc : Image to apply the Canny edge detector to. + \param Ires : Filtered image (255 means an edge, 0 otherwise). + \param gaussianFilterSize : The size of the mask of the Gaussian filter to + apply (an odd number). + \param thresholdCanny : The upper threshold for the Canny operator. Only value + greater than this value are marked as an edge. If negative, it will be automatically + computed, along with the lower threshold. Otherwise, the lower threshold will be set to one third + of the thresholdCanny . + \param apertureSobel : Size of the mask for the Sobel operator (odd number). +*/ +void vpImageFilter::canny(const vpImage &Isrc, vpImage &Ires, + unsigned int gaussianFilterSize, float thresholdCanny, unsigned int apertureSobel) +{ + vpImageFilter::canny(Isrc, Ires, gaussianFilterSize, thresholdCanny / 3.f, thresholdCanny, apertureSobel); +} + +/*! + Apply the Canny edge operator on the image \e Isrc and return the resulting + image \e Ires. + + The following example shows how to use the method: + + \code +#include +#include + +int main() +{ + // Constants for the Canny operator. + const unsigned int gaussianFilterSize = 5; + const float upperThresholdCanny = 15; + const float lowerThresholdCanny = 5; + const unsigned int apertureSobel = 3; + + // Image for the Canny edge operator + vpImage Isrc; + vpImage Icanny; + + // First grab the source image Isrc. + + // Apply the Canny edge operator and set the Icanny image. + vpImageFilter::canny(Isrc, Icanny, gaussianFilterSize, lowerThresholdCanny, upperThresholdCanny, apertureSobel); + return (0); +} + \endcode + + \param Isrc : Image to apply the Canny edge detector to. + \param Ires : Filtered image (255 means an edge, 0 otherwise). + \param gaussianFilterSize : The size of the mask of the Gaussian filter to + apply (an odd number). + \param lowerThreshold : The lower threshold for the Canny operator. Values lower + than this value are rejected. If negative, it will be set to one third + of the thresholdCanny . + \param upperThreshold : The upper threshold for the Canny operator. Only value + greater than this value are marked as an edge. If negative, it will be automatically + computed, along with the lower threshold. Otherwise, the lower threshold will be set to one third + of the thresholdCanny . + \param apertureSobel : Size of the mask for the Sobel operator (odd number). +*/ +void vpImageFilter::canny(const vpImage &Isrc, vpImage &Ires, + unsigned int gaussianFilterSize, float lowerThreshold, float upperThreshold, unsigned int apertureSobel) +{ +#if defined(HAVE_OPENCV_IMGPROC) + cv::Mat img_cvmat, cv_I_blur, cv_dx, cv_dy, edges_cvmat; + vpImageConvert::convert(Isrc, img_cvmat); + cv::GaussianBlur(img_cvmat, cv_I_blur, cv::Size((int)gaussianFilterSize, (int)gaussianFilterSize), 0, 0); + cv::Sobel(cv_I_blur, cv_dx, CV_16S, 1, 0, apertureSobel); + cv::Sobel(cv_I_blur, cv_dy, CV_16S, 0, 1, apertureSobel); + float upperCannyThresh = upperThreshold; + float lowerCannyThresh = lowerThreshold; + if (upperCannyThresh < 0) { + upperCannyThresh = computeCannyThreshold(img_cvmat, &cv_I_blur, lowerCannyThresh); + } + else if (lowerCannyThresh < 0) { + lowerCannyThresh = upperCannyThresh / 3.f; + } + cv::Canny(cv_dx, cv_dy, edges_cvmat, lowerCannyThresh, upperCannyThresh, false); + vpImageConvert::convert(edges_cvmat, Ires); +#else + (void)apertureSobel; + float upperCannyThresh = upperThreshold; + float lowerCannyThresh = lowerThreshold; + if (upperCannyThresh < 0) { + upperCannyThresh = computeCannyThreshold(Isrc, lowerCannyThresh); + } + else if (lowerCannyThresh < 0) { + lowerCannyThresh = upperCannyThresh / 3.; + } + vpCannyEdgeDetection edgeDetector(gaussianFilterSize, 0.1, lowerCannyThresh, upperCannyThresh); + Ires = edgeDetector.detect(Isrc); +#endif +} \ No newline at end of file diff --git a/modules/imgproc/include/visp3/imgproc/vpCircleHoughTransform.h b/modules/imgproc/include/visp3/imgproc/vpCircleHoughTransform.h index 9c503700c4..071fe036b4 100644 --- a/modules/imgproc/include/visp3/imgproc/vpCircleHoughTransform.h +++ b/modules/imgproc/include/visp3/imgproc/vpCircleHoughTransform.h @@ -468,7 +468,7 @@ class VISP_EXPORT vpCircleHoughTransform void init(const vpCircleHoughTransformParameters &algoParams); /** - * \brief Set the parameters of the Gaussian filter, that computes the + * \brief Set the parameters of the Gaussian filter, that permits to blur the * gradients of the image. * * \param[in] kernelSize The size of the Gaussian kernel. Must be an odd value. @@ -490,6 +490,23 @@ class VISP_EXPORT vpCircleHoughTransform initGaussianFilters(); } + /** + * \brief Set the parameters of the Sobel filters, that computes the + * gradients of the image. + * + * \param[in] apertureSize The size of the Sobel filters kernel. Must be an odd value. + */ + inline void setSobelAperture(const unsigned int &apertureSize) + { + m_algoParams.m_sobelKernelSize = apertureSize; + + if ((m_algoParams.m_sobelKernelSize % 2) != 1) { + throw vpException(vpException::badValue, "Sobel Kernel size should be odd."); + } + + initSobelFilters(); + } + /*! * Set the threshold for the Canny operator. * Only value greater than this value are marked as an edge. @@ -750,11 +767,15 @@ class VISP_EXPORT vpCircleHoughTransform private: /** - * \brief Initialize the Gaussian filters used to blur the image and - * compute the gradient images. + * \brief Initialize the Gaussian filters used to blur the image. */ void initGaussianFilters(); + /** + * \brief Initialize the Gaussian filters used to blur the image compute the gradient images. + */ + void initSobelFilters(); + /** * \brief Perform Gaussian smoothing on the input image to reduce the noise * that would perturbate the edge detection. @@ -819,9 +840,10 @@ class VISP_EXPORT vpCircleHoughTransform vpCircleHoughTransformParameters m_algoParams; /*!< Attributes containing all the algorithm parameters.*/ // // Gaussian smoothing attributes vpArray2D m_fg; - vpArray2D m_fgDg; // // Gradient computation attributes + vpArray2D m_sobelX; + vpArray2D m_sobelY; vpImage m_dIx; /*!< Gradient along the x-axis of the input image.*/ vpImage m_dIy; /*!< Gradient along the y-axis of the input image.*/ diff --git a/modules/imgproc/src/vpCircleHoughTransform.cpp b/modules/imgproc/src/vpCircleHoughTransform.cpp index a5e3d5b198..21c9a05fd9 100644 --- a/modules/imgproc/src/vpCircleHoughTransform.cpp +++ b/modules/imgproc/src/vpCircleHoughTransform.cpp @@ -39,12 +39,14 @@ vpCircleHoughTransform::vpCircleHoughTransform() : m_algoParams() { initGaussianFilters(); + initSobelFilters(); } vpCircleHoughTransform::vpCircleHoughTransform(const vpCircleHoughTransformParameters &algoParams) : m_algoParams(algoParams) { initGaussianFilters(); + initSobelFilters(); } void @@ -52,6 +54,7 @@ vpCircleHoughTransform::init(const vpCircleHoughTransformParameters &algoParams) { m_algoParams = algoParams; initGaussianFilters(); + initSobelFilters(); } vpCircleHoughTransform::~vpCircleHoughTransform() @@ -87,6 +90,7 @@ vpCircleHoughTransform::initFromJSON(const std::string &jsonPath) m_algoParams = j; // Call from_json(const json& j, vpDetectorDNN& *this) to read json file.close(); initGaussianFilters(); + initSobelFilters(); } void @@ -101,11 +105,22 @@ vpCircleHoughTransform::initGaussianFilters() { m_fg.resize(1, (m_algoParams.m_gaussianKernelSize + 1)/2); vpImageFilter::getGaussianKernel(m_fg.data, m_algoParams.m_gaussianKernelSize, m_algoParams.m_gaussianStdev, false); - m_fgDg.resize(1, (m_algoParams.m_gaussianKernelSize + 1)/2); - vpImageFilter::getGaussianDerivativeKernel(m_fgDg.data, m_algoParams.m_gaussianKernelSize, m_algoParams.m_gaussianStdev, false); m_cannyVisp.setGaussianFilterParameters(m_algoParams.m_gaussianKernelSize, m_algoParams.m_gaussianStdev); } +void +vpCircleHoughTransform::initSobelFilters() +{ + if ((m_algoParams.m_sobelKernelSize % 2) != 1) { + throw vpException(vpException::badValue, "Sobel Kernel size should be odd."); + } + m_sobelX.resize(m_algoParams.m_sobelKernelSize , m_algoParams.m_sobelKernelSize ); + vpImageFilter::getSobelKernelX(m_sobelX.data, (m_algoParams.m_sobelKernelSize - 1)/2); + m_sobelY.resize(m_algoParams.m_sobelKernelSize , m_algoParams.m_sobelKernelSize ); + vpImageFilter::getSobelKernelY(m_sobelY.data, (m_algoParams.m_sobelKernelSize - 1)/2); + m_cannyVisp.setSobelAperture(m_algoParams.m_sobelKernelSize); +} + std::vector vpCircleHoughTransform::detect(const vpImage &I) { @@ -210,18 +225,14 @@ vpCircleHoughTransform::detect(const vpImage &I) void vpCircleHoughTransform::computeGradientsAfterGaussianSmoothing(const vpImage &I) { - vpImageFilter::getGradXGauss2D(I, - m_dIx, - m_fg.data, - m_fgDg.data, - m_algoParams.m_gaussianKernelSize - ); - vpImageFilter::getGradYGauss2D(I, - m_dIy, - m_fg.data, - m_fgDg.data, - m_algoParams.m_gaussianKernelSize - ); + // Computing the Gaussian blurr + vpImage Iblur, GIx; + vpImageFilter::filterX(I, GIx, m_fg.data, m_algoParams.m_gaussianKernelSize); + vpImageFilter::filterY(GIx, Iblur, m_fg.data, m_algoParams.m_gaussianKernelSize); + + // Computing the gradients + vpImageFilter::filter(Iblur,m_dIx,m_sobelX); + vpImageFilter::filter(Iblur,m_dIy,m_sobelY); } void From 5fd94451c3ef511e9bdb4c30db406497c6b5177a Mon Sep 17 00:00:00 2001 From: rlagneau Date: Fri, 6 Oct 2023 16:08:17 +0200 Subject: [PATCH 03/43] [WIP] Working on the non-equalized gradient image works better in most cases --- modules/core/src/image/vpImageFilter.cpp | 44 +++--------------------- 1 file changed, 4 insertions(+), 40 deletions(-) diff --git a/modules/core/src/image/vpImageFilter.cpp b/modules/core/src/image/vpImageFilter.cpp index 239f31d2b0..172dbea53e 100644 --- a/modules/core/src/image/vpImageFilter.cpp +++ b/modules/core/src/image/vpImageFilter.cpp @@ -572,32 +572,6 @@ float vpImageFilter::computeCannyThreshold(const cv::Mat &cv_I, const cv::Mat *p } #endif -template -void computeMeanMaxStdev(const vpImage &I, float &mean, float &max, float &stdev) -{ - max = std::numeric_limits::epsilon(); - mean = 0.; - stdev = 0.; - unsigned int nbRows = I.getRows(); - unsigned int nbCols = I.getCols(); - float scale = 1. / ((float)nbRows * (float)nbCols); - for (unsigned int r = 0; r < nbRows; r++) { - for (unsigned int c = 0; c < nbCols; c++) { - mean += I[r][c]; - max = std::max(max, static_cast(I[r][c])); - } - } - mean *= scale; - for (unsigned int r = 0; r < nbRows; r++) { - for (unsigned int c = 0; c < nbCols; c++) { - stdev += (I[r][c] - mean) * (I[r][c] - mean); - } - } - stdev *= scale; - stdev = std::sqrt(stdev); -} - - /** * \brief Compute the upper Canny edge filter threshold. * @@ -610,7 +584,7 @@ float vpImageFilter::computeCannyThreshold(const vpImage &I, floa const unsigned int gaussianKernelSize = 5; const float gaussianStdev = 2.; const unsigned int apertureSize = 3; - const float nonEdgeRate = 0.6; + const float nonEdgeRate = 0.8; const float thresholdRate = 0.6; double w = I.getWidth(); @@ -634,25 +608,16 @@ float vpImageFilter::computeCannyThreshold(const vpImage &I, floa for (unsigned int c = 0; c < w; c++) { float dx = (float)dIx[r][c]; float dy = (float)dIy[r][c]; - float gradient = std::sqrt(dx * dx + dy *dy); + float gradient = std::abs(dx) + std::abs(dy); float gradientClamped = std::min(gradient, (float)std::numeric_limits::max()); dI[r][c] = gradientClamped; } } - float mean = 0.f, max = 0.f, stdev = 0.f; - computeMeanMaxStdev(dI, mean, max, stdev); - std::stringstream title; - title << "[computeCannyThreshold::ViSP] Gradient, mean = " << mean << " +/- " << stdev << "; max = " << max; - std::cout << title.str() << std::endl; - - // Equalize the histogram - vpImage dIabs_equalized; - vpHistogram hist; - hist.equalize(dI, dIabs_equalized); // Compute the histogram + vpHistogram hist; const unsigned int nbBins = 256; - hist.calculate(dIabs_equalized, nbBins); + hist.calculate(dI, nbBins); float accu = 0; float t = (float)(nonEdgeRate * w * h); float bon = 0; @@ -666,7 +631,6 @@ float vpImageFilter::computeCannyThreshold(const vpImage &I, floa } float upperThresh = std::max(bon, 1.f); lowerThresh = thresholdRate * bon; - std::cout << "[computeCannyThreshold::ViSP] ut = " << upperThresh << " ; lt = " << lowerThresh << std::endl; return upperThresh; } From eee29b9e99057d46f343847fa7f787567ddfe71e Mon Sep 17 00:00:00 2001 From: rlagneau Date: Mon, 9 Oct 2023 16:58:36 +0200 Subject: [PATCH 04/43] [CORPS] Added choice of the backend (ViSP/OpenCV) and of the filtering + gradient operator (only Gaussian blur + Sobel at the moment) and upper threshold ratio + lower threshold ratio are now parameters --- .../include/visp3/core/vpCannyEdgeDetection.h | 88 +++- .../core/include/visp3/core/vpImageFilter.h | 51 ++- .../core/src/image/vpCannyEdgeDetection.cpp | 56 ++- modules/core/src/image/vpImageFilter.cpp | 415 ++++++++++++++---- .../visp3/imgproc/vpCircleHoughTransform.h | 113 ++++- .../imgproc/src/vpCircleHoughTransform.cpp | 60 +-- .../hough-transform/config/detector_img.json | 4 + 7 files changed, 613 insertions(+), 174 deletions(-) diff --git a/modules/core/include/visp3/core/vpCannyEdgeDetection.h b/modules/core/include/visp3/core/vpCannyEdgeDetection.h index d6ee6a6f46..cc22dd3e02 100644 --- a/modules/core/include/visp3/core/vpCannyEdgeDetection.h +++ b/modules/core/include/visp3/core/vpCannyEdgeDetection.h @@ -56,6 +56,10 @@ class VISP_EXPORT vpCannyEdgeDetection ON_CHECK /*!< This pixel is currently tested to know if it is linked to a strong edge point.*/ } EdgeType; + // Filtering + gradient methods choice + vpImageFilter::vpCannyFilteringAndGradientType m_filteringAndGradientType; /*!< Choice of the filter and + gradient operator to apply before the edge detection step*/ + // // Gaussian smoothing attributes int m_gaussianKernelSize; /*!< Size of the Gaussian filter kernel used to smooth the input image. Must be an odd number.*/ float m_gaussianStdev; /*!< Standard deviation of the Gaussian filter.*/ @@ -73,8 +77,13 @@ class VISP_EXPORT vpCannyEdgeDetection std::map, float> m_edgeCandidateAndGradient; /*!< Map that contains point image coordinates and corresponding gradient value.*/ // // Hysteresis thresholding attributes - float m_lowerThreshold; /*!< Lower threshold for the hysteresis step. If negative, it will be deduced as from m_upperThreshold. */ + float m_lowerThreshold; /*!< Lower threshold for the hysteresis step. If negative, it will be deduced + as from m_upperThreshold. */ + float m_lowerThresholdRatio; /*!< If the thresholds must be computed, the ratio of the upper threshold the lower + threshold is equal: m_lowerThreshold = m_lowerThresholdRatio * m_upperThreshold. */ float m_upperThreshold; /*!< Upper threshold for the hysteresis step.*/ + float m_upperThresholdRatio; /*!< If the thresholds must be computed, the ratio of pixels of the gradient image that + must be lower than the upper threshold \b m_upperThreshold.*/ // // Edge tracking attributes std::map, EdgeType> m_edgePointsCandidates; /*!< Map that contains the strong edge points, i.e. the points for which we know for sure they are edge points, @@ -118,15 +127,15 @@ class VISP_EXPORT vpCannyEdgeDetection * \b m_weakEdgePoints and will be kept in the final edge map only if they are connected * to a strong edge point. * Edge candidates that are below \b m_lowerThreshold are discarded. - * \param lowerThreshold Edge candidates that are below this threshold are definitely not + * \param[in] lowerThreshold Edge candidates that are below this threshold are definitely not * edges. - * \param upperThreshold Edge candidates that are greater than this threshold are classified + * \param[in] upperThreshold Edge candidates that are greater than this threshold are classified * as strong edges. */ void performHysteresisThresholding(const float &lowerThreshold, const float &upperThreshold); /** - * @brief Search recursively for a strong edge in the neighborhood of a weak edge. + * \brief Search recursively for a strong edge in the neighborhood of a weak edge. * * \param[in] coordinates : The coordinates we are checking. * \return true We found a strong edge point in its 8-connected neighborhood. @@ -153,16 +162,25 @@ class VISP_EXPORT vpCannyEdgeDetection vpCannyEdgeDetection(); /** - * \brief Construct a new vpCannyEdgeDetection object. + * \brief Construct a new vpCannyEdgeDetection object that uses Gaussian blur + Sobel operators to compute + * the edge map. * * \param[in] gaussianKernelSize : The size of the Gaussian filter kernel. Must be odd. * \param[in] gaussianStdev : The standard deviation of the Gaussian filter. * \param[in] sobelAperture : The size of the Sobel filters kernel. Must be odd. - * \param[in] lowerThreshold : The lower threshold of the hysteresis thresholding step. If negative, will be computed from the upper threshold. - * \param[in] upperThreshold : The upper threshold of the hysteresis thresholding step. If negative, will be computed from the median of the gray values of the image. + * \param[in] lowerThreshold : The lower threshold of the hysteresis thresholding step. If negative, will be computed + * from the upper threshold. + * \param[in] upperThreshold : The upper threshold of the hysteresis thresholding step. If negative, will be computed + * from the histogram of the absolute gradient. + * \param[in] lowerThresholdRatio : If the thresholds must be computed,the lower threshold will be equal to the upper + * threshold times \b lowerThresholdRatio . + * \param[in] upperThresholdRatio : If the thresholds must be computed,the upper threshold will be equal to the value + * such as the number of pixels of the image times \b upperThresholdRatio have an absolute gradient lower than the + * upper threshold. */ vpCannyEdgeDetection(const int &gaussianKernelSize, const float &gaussianStdev, const unsigned int &sobelAperture, - const float &lowerThreshold = -1., const float &upperThreshold = -1.); + const float &lowerThreshold = -1.f, const float &upperThreshold = -1.f, + const float &lowerThresholdRatio = 0.6f, const float &upperThresholdRatio = 0.8f); // // Configuration from files #ifdef VISP_HAVE_NLOHMANN_JSON @@ -186,32 +204,42 @@ class VISP_EXPORT vpCannyEdgeDetection * \brief Read the detector configuration from JSON. All values are optional and if an argument is not present, * the default value defined in the constructor is kept * - * \param j : The JSON object, resulting from the parsing of a JSON file. - * \param detector : The detector that will be initialized from the JSON data. + * \param[in] j : The JSON object, resulting from the parsing of a JSON file. + * \param[out] detector : The detector that will be initialized from the JSON data. */ inline friend void from_json(const json &j, vpCannyEdgeDetection &detector) { + std::string filteringAndGradientName = vpImageFilter::vpCannyFilteringAndGradientTypeToString(detector.m_filteringAndGradientType); + filteringAndGradientName = j.value("filteringAndGradientType", filteringAndGradientName); + detector.m_filteringAndGradientType = vpImageFilter::vpCannyFilteringAndGradientTypeFromString(filteringAndGradientName); detector.m_gaussianKernelSize = j.value("gaussianSize", detector.m_gaussianKernelSize); detector.m_gaussianStdev = j.value("gaussianStdev", detector.m_gaussianStdev); detector.m_lowerThreshold = j.value("lowerThreshold", detector.m_lowerThreshold); + detector.m_lowerThresholdRatio = j.value("lowerThresholdRatio", detector.m_lowerThresholdRatio); detector.m_sobelAperture = j.value("sobelAperture", detector.m_sobelAperture); detector.m_upperThreshold = j.value("upperThreshold", detector.m_upperThreshold); + detector.m_upperThresholdRatio = j.value("upperThresholdRatio", detector.m_upperThresholdRatio); } /** * \brief Parse a vpCannyEdgeDetection object into JSON format. * - * \param j : A JSON parser object. - * \param detector : The vpCannyEdgeDetection object that must be parsed into JSON format. + * \param[out] j : A JSON parser object. + * \param[in] detector : The vpCannyEdgeDetection object that must be parsed into JSON format. */ inline friend void to_json(json &j, const vpCannyEdgeDetection &detector) { + std::string filteringAndGradientName = vpImageFilter::vpCannyFilteringAndGradientTypeToString(detector.m_filteringAndGradientType); j = json { + {"filteringAndGradientType", filteringAndGradientName}, {"gaussianSize", detector.m_gaussianKernelSize}, {"gaussianStdev", detector.m_gaussianStdev}, {"lowerThreshold", detector.m_lowerThreshold}, + {"lowerThresholdRatio", detector.m_lowerThresholdRatio}, {"sobelAperture", detector.m_sobelAperture}, - {"upperThreshold", detector.m_upperThreshold} }; + {"upperThreshold", detector.m_upperThreshold}, + {"upperThresholdRatio", detector.m_upperThresholdRatio} + }; } #endif //@} @@ -249,6 +277,16 @@ class VISP_EXPORT vpCannyEdgeDetection /** @name Setters */ //@{ + /** + * \brief Set the Filtering And Gradient operators to apply to the image before the edge detection operation. + * + * \param[in] type The operators to apply. + */ + inline void setFilteringAndGradientType(const vpImageFilter::vpCannyFilteringAndGradientType &type) + { + m_filteringAndGradientType = type; + } + /** * \brief Set the Gradients of the image that will be processed. * @@ -280,7 +318,25 @@ class VISP_EXPORT vpCannyEdgeDetection } /** - * @brief Set the Gaussian Filters kernel size and standard deviation + * \brief Set the lower and upper Canny Thresholds ratio that are used to compute them automatically. To ask to + * compute automatically the thresholds, you must set the lower and upper thresholds with negative values using the + * appropriate setter. + * + * \sa \ref vpCannyEdgeDetection::setCannyThresholds() "vpCannyEdgeDetection::setCannyThresholds(const float&, const float&)" + * \param[in] lowerThreshRatio : The lower threshold ratio: if the thresholds are computed automatically, the lower + * threshold will be equal to the upper threshold multiplied by \b lowerThreshRatio. + * \param[in] upperThreshRatio : The upper threshold ratio: if the thresholds are computed automatically, the upper + * threshold will be set such as \b upperThreshRatio times the number of pixels of the image have their absolute + * gradient lower then the upper threshold. + */ + inline void setCannyThresholdsRatio(const float &lowerThreshRatio, const float &upperThreshRatio) + { + m_lowerThresholdRatio = lowerThreshRatio; + m_upperThresholdRatio = upperThreshRatio; + } + + /** + * \brief Set the Gaussian Filters kernel size and standard deviation * and initialize the aforementioned filters. * * \param[in] kernelSize : The size of the Gaussian filters kernel. @@ -295,7 +351,7 @@ class VISP_EXPORT vpCannyEdgeDetection } /** - * @brief Set the Gaussian Filters kernel size and standard deviation + * \brief Set the Gaussian Filters kernel size and standard deviation * and initialize the aforementioned filters. * * \param[in] kernelSize : The size of the Gaussian filters kernel. @@ -305,7 +361,7 @@ class VISP_EXPORT vpCannyEdgeDetection inline void setSobelAperture(const unsigned int &sobelAperture) { m_sobelAperture = sobelAperture; - initGaussianFilters(); + initSobelFilters(); } //@} }; diff --git a/modules/core/include/visp3/core/vpImageFilter.h b/modules/core/include/visp3/core/vpImageFilter.h index 4f0e572be7..0b14f7b4af 100644 --- a/modules/core/include/visp3/core/vpImageFilter.h +++ b/modules/core/include/visp3/core/vpImageFilter.h @@ -67,11 +67,41 @@ class VISP_EXPORT vpImageFilter { public: - static void canny(const vpImage &I, vpImage &Ic, unsigned int gaussianFilterSize, - float thresholdCanny, unsigned int apertureSobel); + //! Canny filter backends for the edge detection operations + typedef enum vpCannyBackendType + { + CANNY_OPENCV_BACKEND = 0, //!< Use OpenCV + CANNY_VISP_BACKEND = 1, //!< Use ViSP + CANNY_COUNT_BACKEND = 2 //! Number of supported backends + } vpCannyBackendType; + + static std::string vpCannyBackendTypeToString(const vpCannyBackendType &type); + + static vpCannyBackendType vpCannyBackendTypeFromString(const std::string &name); + + //! Canny filter and gradient operators to apply on the image before the edge detection stage + typedef enum vpCannyFilteringAndGradientType + { + CANNY_GBLUR_SOBEL_FILTERING = 0, //!< Apply Gaussian blur + Sobel operator on the input image + CANNY_COUNT_FILTERING = 1 //! Number of supported backends + } vpCannyFilteringAndGradientType; - static void canny(const vpImage &I, vpImage &Ic, unsigned int gaussianFilterSize, - float lowerThresholdCanny, float higherThresholdCanny, unsigned int apertureSobel); + static std::string vpCannyFilteringAndGradientTypeToString(const vpCannyFilteringAndGradientType &type); + + static vpCannyFilteringAndGradientType vpCannyFilteringAndGradientTypeFromString(const std::string &name); + + static void canny(const vpImage &I, vpImage &Ic, const unsigned int &gaussianFilterSize, + const float &thresholdCanny, const unsigned int &apertureSobel); + + static void canny(const vpImage &I, vpImage &Ic, const unsigned int &gaussianFilterSize, + const float &lowerThresholdCanny, const float &higherThresholdCanny, + const unsigned int &apertureSobel); + + static void canny(const vpImage &I, vpImage &Ic, const unsigned int &gaussianFilterSize, + const float &lowerThresholdCanny, const float &higherThresholdCanny, + const unsigned int &apertureSobel, const float &gaussianStdev, const float &lowerThresholdRatio, + const float &upperThresholdRatio, const vpCannyBackendType &cannyBackend, + const vpCannyFilteringAndGradientType &cannyFilteringSteps); /*! Apply a 1x3 derivative filter to an image pixel. @@ -289,7 +319,7 @@ class VISP_EXPORT vpImageFilter template static void filter(const vpImage &I, vpImage &Iu, vpImage &Iv, const vpArray2D &M, bool convolve) = delete; - + static void sepFilter(const vpImage &I, vpImage &If, const vpColVector &kernelH, const vpColVector &kernelV); /*! @@ -1056,10 +1086,17 @@ class VISP_EXPORT vpImageFilter return 1 / 16.0; } -static float computeCannyThreshold(const vpImage &I, float &lowerThresh); + static float computeCannyThreshold(const vpImage &I, float &lowerThresh, + const vpImage *p_dIx = nullptr, const vpImage *p_dIy = nullptr, + const unsigned int gaussianKernelSize = 5, + const float gaussianStdev = 2.f, const unsigned int apertureSobel = 3, + const float lowerThresholdRatio = 0.6, const float upperThresholdRatio = 0.8); #if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) - static float computeCannyThreshold(const cv::Mat &cv_I, const cv::Mat *p_cv_blur, float &lowerThresh); + static float computeCannyThreshold(const cv::Mat &cv_I, const cv::Mat *p_cv_dIx, const cv::Mat *p_cv_dIy, + float &lowerThresh, const unsigned int gaussianKernelSize = 5, + const float gaussianStdev = 2.f, const unsigned int apertureSobel = 3, + const float lowerThresholdRatio = 0.6, const float upperThresholdRatio = 0.8); static float median(const cv::Mat &cv_I); static float median(const vpImage &Isrc); static std::vector median(const vpImage &Isrc); diff --git a/modules/core/src/image/vpCannyEdgeDetection.cpp b/modules/core/src/image/vpCannyEdgeDetection.cpp index 716f164a8c..a76bde2a29 100644 --- a/modules/core/src/image/vpCannyEdgeDetection.cpp +++ b/modules/core/src/image/vpCannyEdgeDetection.cpp @@ -37,25 +37,33 @@ // // Initialization methods vpCannyEdgeDetection::vpCannyEdgeDetection() - : m_gaussianKernelSize(3) - , m_gaussianStdev(1.) + : m_filteringAndGradientType(vpImageFilter::CANNY_GBLUR_SOBEL_FILTERING) + , m_gaussianKernelSize(3) + , m_gaussianStdev(1.f) , m_areGradientAvailable(false) , m_sobelAperture(3) - , m_lowerThreshold(-1.) - , m_upperThreshold(-1.) + , m_lowerThreshold(-1.f) + , m_lowerThresholdRatio(0.6f) + , m_upperThreshold(-1.f) + , m_upperThresholdRatio(0.8f) { initGaussianFilters(); initSobelFilters(); } -vpCannyEdgeDetection::vpCannyEdgeDetection(const int &gaussianKernelSize, const float &gaussianStdev, const unsigned int &sobelAperture - , const float &lowerThreshold, const float &upperThreshold) - : m_gaussianKernelSize(gaussianKernelSize) +vpCannyEdgeDetection::vpCannyEdgeDetection(const int &gaussianKernelSize, const float &gaussianStdev + , const unsigned int &sobelAperture, const float &lowerThreshold, const float &upperThreshold + , const float &lowerThresholdRatio, const float &upperThresholdRatio +) + : m_filteringAndGradientType(vpImageFilter::CANNY_GBLUR_SOBEL_FILTERING) + , m_gaussianKernelSize(gaussianKernelSize) , m_gaussianStdev(gaussianStdev) , m_areGradientAvailable(false) , m_sobelAperture(sobelAperture) , m_lowerThreshold(lowerThreshold) + , m_lowerThresholdRatio(lowerThresholdRatio) , m_upperThreshold(upperThreshold) + , m_upperThresholdRatio(upperThresholdRatio) { initGaussianFilters(); initSobelFilters(); @@ -108,7 +116,8 @@ void vpCannyEdgeDetection::initSobelFilters() { if ((m_sobelAperture % 2) == 0) { - throw(vpException(vpException::badValue, "The Sobel kernel size should be odd")); + std::string errMsg("The Sobel kernel (" + std::to_string(m_sobelAperture) + ") should be odd"); + throw(vpException(vpException::badValue, errMsg)); } m_sobelX.resize(m_sobelAperture, m_sobelAperture); vpImageFilter::getSobelKernelX(m_sobelX.data, (m_sobelAperture - 1)/2); @@ -155,14 +164,15 @@ vpCannyEdgeDetection::detect(const vpImage &I) // // Step 4: hysteresis thresholding float upperThreshold = m_upperThreshold; float lowerThreshold = m_lowerThreshold; - if(upperThreshold < 0) { - upperThreshold = vpImageFilter::computeCannyThreshold(I, lowerThreshold); + if (upperThreshold < 0) { + upperThreshold = vpImageFilter::computeCannyThreshold(I, lowerThreshold, &m_dIx, &m_dIy, m_gaussianKernelSize, + m_gaussianStdev, m_sobelAperture, m_lowerThresholdRatio, + m_upperThresholdRatio); } else if (m_lowerThreshold < 0) { // Applying Canny recommendation to have the upper threshold 3 times greater than the lower threshold. lowerThreshold = m_upperThreshold / 3.f; } - std::cout << "lt = " << lowerThreshold << " ; ut = " << upperThreshold << std::endl; performHysteresisThresholding(lowerThreshold, upperThreshold); @@ -174,15 +184,21 @@ vpCannyEdgeDetection::detect(const vpImage &I) void vpCannyEdgeDetection::performFilteringAndGradientComputation(const vpImage &I) { - // Computing the Gaussian blurr - vpImage Iblur; - vpImage GIx; - vpImageFilter::filterX(I, GIx, m_fg.data, m_gaussianKernelSize); - vpImageFilter::filterY(GIx, Iblur, m_fg.data, m_gaussianKernelSize); - - // Computing the gradients - vpImageFilter::filter(Iblur,m_dIx,m_sobelX); - vpImageFilter::filter(Iblur,m_dIy,m_sobelY); + if (m_filteringAndGradientType == vpImageFilter::CANNY_GBLUR_SOBEL_FILTERING) { + // Computing the Gaussian blur + vpImage Iblur; + vpImage GIx; + vpImageFilter::filterX(I, GIx, m_fg.data, m_gaussianKernelSize); + vpImageFilter::filterY(GIx, Iblur, m_fg.data, m_gaussianKernelSize); + + // Computing the gradients + vpImageFilter::filter(Iblur, m_dIx, m_sobelX); + vpImageFilter::filter(Iblur, m_dIy, m_sobelY); + } + else { + std::string errmsg("Currently, the only filtering and gradient operators are Gaussian blur + Sobel"); + throw(vpException(vpException::notImplementedError, errmsg)); + } } /** diff --git a/modules/core/src/image/vpImageFilter.cpp b/modules/core/src/image/vpImageFilter.cpp index 172dbea53e..98dae6772e 100644 --- a/modules/core/src/image/vpImageFilter.cpp +++ b/modules/core/src/image/vpImageFilter.cpp @@ -38,8 +38,96 @@ #include #include #include +#include #include +/** + * \brief Cast a \b vpImageFilter::vpCannyBackendTypeToString into a string, to know its name. + * + * \param[in] type The type that must be casted into a string. + * \return std::string The corresponding name. + */ +std::string vpImageFilter::vpCannyBackendTypeToString(const vpImageFilter::vpCannyBackendType &type) +{ + std::string name; + switch (type) { + case vpCannyBackendType::CANNY_OPENCV_BACKEND: + name = "opencv-backend"; + break; + case vpCannyBackendType::CANNY_VISP_BACKEND: + name = "visp-backend"; + break; + case vpCannyBackendType::CANNY_COUNT_BACKEND: + default: + return "unknown-backend"; + } + return name; +} + +/** + * \brief Cast a string into a \b vpImageFilter::vpCannyBackendTypeToString. + * + * \param[in] name The name of the backend. + * \return vpImageFilter::vpCannyBackendTypeToString The corresponding enumeration value. + */ +vpImageFilter::vpCannyBackendType vpImageFilter::vpCannyBackendTypeFromString(const std::string &name) +{ + vpCannyBackendType type(vpCannyBackendType::CANNY_COUNT_BACKEND); + std::string nameLowerCase = vpIoTools::toLowerCase(name); + unsigned int count = (unsigned int)vpCannyBackendType::CANNY_COUNT_BACKEND; + bool found = false; + for (unsigned int i = 0; i < count && !found; i++) { + vpCannyBackendType temp = (vpCannyBackendType)i; + if (nameLowerCase == vpCannyBackendTypeToString(temp)) { + type = temp; + found = true; + } + } + return type; +} + +/** + * \brief Cast a \b vpImageFilter::vpCannyFilteringAndGradientType into a string, to know its name. + * + * \param[in] type The type that must be casted into a string. + * \return std::string The corresponding name. + */ +std::string vpImageFilter::vpCannyFilteringAndGradientTypeToString(const vpImageFilter::vpCannyFilteringAndGradientType &type) +{ + std::string name; + switch (type) { + case vpCannyFilteringAndGradientType::CANNY_GBLUR_SOBEL_FILTERING: + name = "gaussianblur+sobel-filtering"; + break; + case vpCannyFilteringAndGradientType::CANNY_COUNT_FILTERING: + default: + return "unknown-filtering"; + } + return name; +} + +/** + * \brief Cast a string into a \b vpImageFilter::vpCannyFilteringAndGradientType. + * + * \param[in] name The name of the backend. + * \return vpImageFilter::vpCannyFilteringAndGradientType The corresponding enumeration value. + */ +vpImageFilter::vpCannyFilteringAndGradientType vpImageFilter::vpCannyFilteringAndGradientTypeFromString(const std::string &name) +{ + vpCannyFilteringAndGradientType type(vpCannyFilteringAndGradientType::CANNY_COUNT_FILTERING); + std::string nameLowerCase = vpIoTools::toLowerCase(name); + unsigned int count = (unsigned int)vpCannyFilteringAndGradientType::CANNY_COUNT_FILTERING; + bool found = false; + for (unsigned int i = 0; i < count && !found; i++) { + vpCannyFilteringAndGradientType temp = (vpCannyFilteringAndGradientType)i; + if (nameLowerCase == vpCannyFilteringAndGradientTypeToString(temp)) { + type = temp; + found = true; + } + } + return type; +} + /** * \cond DO_NOT_DOCUMENT */ @@ -119,10 +207,10 @@ void vpImageFilter::filter(const vpImage &I, vpImage &I, vpImage &If, const vpColVector &kernelH, @@ -207,7 +295,7 @@ void vpImageFilter::filterY(const vpImage & unsigned int size); /*template<>*/template -void vpImageFilter::filterY(const vpImage &I, vpImage &dIy, const double *filter, +void vpImageFilter::filterY(const vpImage &I, vpImage &dIy, const double *filter, unsigned int size); /*template<>*/template @@ -265,12 +353,12 @@ void vpImageFilter::gaussianBlur(const vpImage &I, vpIma /*! Apply a Gaussian blur to RGB color image. - \param I : Input image. - \param GI : Filtered image. - \param size : Filter size. This value should be odd. - \param sigma : Gaussian standard deviation. If it is equal to zero or + \param[in] I : Input image. + \param[out] GI : Filtered image. + \param[in] size : Filter size. This value should be odd. + \param[in] sigma : Gaussian standard deviation. If it is equal to zero or negative, it is computed from filter size as sigma = (size-1)/6. - \param normalize : Flag indicating whether to normalize the filter coefficients or not. + \param[in] normalize : Flag indicating whether to normalize the filter coefficients or not. \sa getGaussianKernel() to know which kernel is used. */ @@ -504,38 +592,48 @@ std::vector vpImageFilter::median(const vpImage &Isrc) } /** - * \brief Compute the upper Canny edge filter threshold. + * \brief Compute the upper Canny edge filter threshold, using Gaussian blur + Sobel operators to compute + * the gradient of the image. * * \param[in] cv_I : The image, in cv format. - * \param[in] p_cv_blur : If different from nullptr, must contain a blurred version of cv_I. + * \param[in] p_cv_dIx : If different from nullptr, the gradient of cv_I with regard to the horizontal axis. + * \param[in] p_cv_dIy : If different from nullptr, the gradient of cv_I with regard to the vertical axis. * \param[out] lowerThresh : The lower threshold for the Canny edge filter. + * \param[in] gaussianFilterSize : The size of the mask of the Gaussian filter to apply (an odd number). + * \param[in] gaussianStdev : The standard deviation of the Gaussian filter to apply. + * \param[in] apertureSobel : Size of the mask for the Sobel operator (odd number). + * \param[in] lowerThresholdRatio : The ratio of the upper threshold the lower threshold must be equal to. + * \param[in] upperThresholdRatio : The ratio of pixels whose absolute gradient Gabs is lower or equal to to define + * the upper threshold. * \return The upper Canny edge filter threshold. */ -float vpImageFilter::computeCannyThreshold(const cv::Mat &cv_I, const cv::Mat *p_cv_blur, float &lowerThresh) +float vpImageFilter::computeCannyThreshold(const cv::Mat &cv_I, const cv::Mat *p_cv_dIx, const cv::Mat *p_cv_dIy, + float &lowerThresh, const unsigned int gaussianKernelSize, + const float gaussianStdev, const unsigned int apertureSobel, + const float lowerThresholdRatio, const float upperThresholdRatio) { - const unsigned int gaussianKernelSize = 5; - const float gaussianStdev = 2.f; - const unsigned int sobelAperture = 3; - const float nonEdgeRate = 0.6; - const float thresholdRate = 0.6; double w = cv_I.cols; double h = cv_I.rows; int bins = 256; - cv::Mat sobel, sobelx, sobely, sobelxabs, sobelyabs, img_blur; + cv::Mat sobel, sobelx, sobely, sobelxabs, sobelyabs; - if(p_cv_blur == nullptr) { + if (p_cv_dIx == nullptr || p_cv_dIy == nullptr) { + cv::Mat img_blur; // Apply Gaussian blur to the image cv::Size gsz(gaussianKernelSize, gaussianKernelSize); - cv::GaussianBlur(cv_I, img_blur,gsz, gaussianStdev); + cv::GaussianBlur(cv_I, img_blur, gsz, gaussianStdev); + + // Compute the gradient of the blurred image + cv::Sobel(img_blur, sobelx, CV_16S, 1, 0, apertureSobel, 1, 0); + cv::Sobel(img_blur, sobely, CV_16S, 0, 1, apertureSobel, 1, 0); } else { - img_blur = *p_cv_blur; + sobelx = *p_cv_dIx; + sobely = *p_cv_dIy; } - // Compute the gradient of the blurred image - cv::Sobel(img_blur, sobelx, CV_16S, 1, 0, sobelAperture, 1, 0); + // Compute the absolute gradient of the blurred image G = |dIx| + |dIy| cv::convertScaleAbs(sobelx, sobelxabs); - cv::Sobel(img_blur, sobely, CV_16S, 0, 1, sobelAperture, 1, 0); cv::convertScaleAbs(sobely, sobelyabs); cv::addWeighted(sobelxabs, 1, sobelyabs, 1, 0, sobel); sobel.convertTo(sobel, CV_8U); @@ -546,16 +644,16 @@ float vpImageFilter::computeCannyThreshold(const cv::Mat &cv_I, const cv::Mat *p // Compute the upper threshold from the equalized histogram cv::Mat hist; - const float range[] = {0.f, 256.f}; // The upper boundary is exclusive - const float* ranges[] = { range }; - int channels[] = {0}; + const float range[] = { 0.f, 256.f }; // The upper boundary is exclusive + const float *ranges[] = { range }; + int channels[] = { 0 }; bool dims = 1; // The number of dimensions of the histogram - int histSize[] = {bins}; + int histSize[] = { bins }; bool uniform = true; bool accumulate = false; // Clear the histogram at the beginning of calcHist if false, does not clear it otherwise cv::calcHist(&equalized, 1, channels, cv::Mat(), hist, dims, histSize, ranges, uniform, accumulate); float accu = 0; - float t = (float)(nonEdgeRate * w * h); + float t = (float)(upperThresholdRatio * w * h); float bon = 0; for (int i = 0; i < bins; i++) { float tf = hist.at(i); @@ -566,44 +664,57 @@ float vpImageFilter::computeCannyThreshold(const cv::Mat &cv_I, const cv::Mat *p } } float upperThresh = std::max(bon, 1.f); - lowerThresh = thresholdRate * bon; - std::cout << "[computeCannyThreshold::cv] ut = " << upperThresh << " ; lt = " << lowerThresh << std::endl; + lowerThresh = lowerThresholdRatio * bon; return upperThresh; } #endif /** - * \brief Compute the upper Canny edge filter threshold. + * \brief Compute the upper Canny edge filter threshold, using Gaussian blur + Sobel operators to compute + * the gradient of the image. * * \param[in] I : The gray-scale image, in ViSP format. + * \param[in] p_dIx : If different from nullptr, must contain the gradient of the image with regard to the horizontal axis. + * \param[in] p_dIy : If different from nullptr, must contain the gradient of the image with regard to the vertical axis. * \param[in] lowerThresh : Canny lower threshold. + * \param[in] gaussianFilterSize : The size of the mask of the Gaussian filter to apply (an odd number). + * \param[in] gaussianStdev : The standard deviation of the Gaussian filter to apply. + * \param[in] apertureSobel : Size of the mask for the Sobel operator (odd number). + * \param[in] lowerThresholdRatio : The ratio of the upper threshold the lower threshold must be equal to. + * \param[in] upperThresholdRatio : The ratio of pixels whose absolute gradient Gabs is lower or equal to define + * the upper threshold. * \return The upper Canny edge filter threshold. */ -float vpImageFilter::computeCannyThreshold(const vpImage &I, float &lowerThresh) +float vpImageFilter::computeCannyThreshold(const vpImage &I, float &lowerThresh, + const vpImage *p_dIx, const vpImage *p_dIy, + const unsigned int gaussianKernelSize, + const float gaussianStdev, const unsigned int apertureSobel, + const float lowerThresholdRatio, const float upperThresholdRatio) { - const unsigned int gaussianKernelSize = 5; - const float gaussianStdev = 2.; - const unsigned int apertureSize = 3; - const float nonEdgeRate = 0.8; - const float thresholdRate = 0.6; - double w = I.getWidth(); double h = I.getHeight(); - // Computing the Gaussian blurr + gradients of the image - vpImage Iblur; - vpImageFilter::gaussianBlur(I, Iblur, gaussianKernelSize, gaussianStdev); + vpImage dI(h, w); + vpImage dIx(h, w), dIy(h, w); + if (p_dIx != nullptr && p_dIy != nullptr) { + dIx = *p_dIx; + dIy = *p_dIy; + } + else { + // Computing the Gaussian blur + gradients of the image + vpImage Iblur; + vpImageFilter::gaussianBlur(I, Iblur, gaussianKernelSize, gaussianStdev); - vpArray2D sobelX(apertureSize, apertureSize); // Sobel kernel along x - vpImageFilter::getSobelKernelX(sobelX.data, (apertureSize - 1)/2); - vpArray2D sobelY(apertureSize, apertureSize); // Sobel kernel along x - vpImageFilter::getSobelKernelY(sobelY.data, (apertureSize - 1)/2); - vpImage dIx, dIy; - vpImageFilter::filter(Iblur,dIx,sobelX); - vpImageFilter::filter(Iblur,dIy,sobelY); + vpArray2D sobelX(apertureSobel, apertureSobel); // Sobel kernel along x + vpImageFilter::getSobelKernelX(sobelX.data, (apertureSobel - 1)/2); + vpArray2D sobelY(apertureSobel, apertureSobel); // Sobel kernel along y + vpImageFilter::getSobelKernelY(sobelY.data, (apertureSobel - 1)/2); - // Computing the gradient of the image - vpImage dI(h, w); + vpImageFilter::filter(Iblur, dIx, sobelX); + vpImageFilter::filter(Iblur, dIy, sobelY); + } + + // Computing the absolute gradient of the image G = |dIx| + |dIy| for (unsigned int r = 0; r < h; r++) { for (unsigned int c = 0; c < w; c++) { float dx = (float)dIx[r][c]; @@ -619,7 +730,7 @@ float vpImageFilter::computeCannyThreshold(const vpImage &I, floa const unsigned int nbBins = 256; hist.calculate(dI, nbBins); float accu = 0; - float t = (float)(nonEdgeRate * w * h); + float t = (float)(upperThresholdRatio * w * h); float bon = 0; for (unsigned int i = 0; i < nbBins; i++) { float tf = hist[i]; @@ -630,7 +741,7 @@ float vpImageFilter::computeCannyThreshold(const vpImage &I, floa } } float upperThresh = std::max(bon, 1.f); - lowerThresh = thresholdRate * bon; + lowerThresh = lowerThresholdRatio * bon; return upperThresh; } @@ -663,18 +774,19 @@ int main() } \endcode - \param Isrc : Image to apply the Canny edge detector to. - \param Ires : Filtered image (255 means an edge, 0 otherwise). - \param gaussianFilterSize : The size of the mask of the Gaussian filter to + \param[in] Isrc : Image to apply the Canny edge detector to. + \param[out] Ires : Filtered image (255 means an edge, 0 otherwise). + \param[in] gaussianFilterSize : The size of the mask of the Gaussian filter to apply (an odd number). - \param thresholdCanny : The upper threshold for the Canny operator. Only value + \param[in] thresholdCanny : The upper threshold for the Canny operator. Only value greater than this value are marked as an edge. If negative, it will be automatically computed, along with the lower threshold. Otherwise, the lower threshold will be set to one third of the thresholdCanny . - \param apertureSobel : Size of the mask for the Sobel operator (odd number). + \param[in] apertureSobel : Size of the mask for the Sobel operator (odd number). */ void vpImageFilter::canny(const vpImage &Isrc, vpImage &Ires, - unsigned int gaussianFilterSize, float thresholdCanny, unsigned int apertureSobel) + const unsigned int &gaussianFilterSize, const float &thresholdCanny, + const unsigned int &apertureSobel) { vpImageFilter::canny(Isrc, Ires, gaussianFilterSize, thresholdCanny / 3.f, thresholdCanny, apertureSobel); } @@ -709,49 +821,166 @@ int main() } \endcode - \param Isrc : Image to apply the Canny edge detector to. - \param Ires : Filtered image (255 means an edge, 0 otherwise). - \param gaussianFilterSize : The size of the mask of the Gaussian filter to + \param[in] Isrc : Image to apply the Canny edge detector to. + \param[out] Ires : Filtered image (255 means an edge, 0 otherwise). + \param[in] gaussianFilterSize : The size of the mask of the Gaussian filter to apply (an odd number). - \param lowerThreshold : The lower threshold for the Canny operator. Values lower + \param[in] lowerThreshold : The lower threshold for the Canny operator. Values lower than this value are rejected. If negative, it will be set to one third of the thresholdCanny . - \param upperThreshold : The upper threshold for the Canny operator. Only value + \param[in] upperThreshold : The upper threshold for the Canny operator. Only value greater than this value are marked as an edge. If negative, it will be automatically computed, along with the lower threshold. Otherwise, the lower threshold will be set to one third of the thresholdCanny . - \param apertureSobel : Size of the mask for the Sobel operator (odd number). + \param[in] apertureSobel : Size of the mask for the Sobel operator (odd number). */ void vpImageFilter::canny(const vpImage &Isrc, vpImage &Ires, - unsigned int gaussianFilterSize, float lowerThreshold, float upperThreshold, unsigned int apertureSobel) + const unsigned int &gaussianFilterSize, + const float &lowerThreshold, const float &upperThreshold, + const unsigned int &apertureSobel) { + const float gaussianStdev = 2.f; + const float upperThresholdRatio = 0.8f; + const float lowerThresholdRatio = 0.6f; #if defined(HAVE_OPENCV_IMGPROC) - cv::Mat img_cvmat, cv_I_blur, cv_dx, cv_dy, edges_cvmat; - vpImageConvert::convert(Isrc, img_cvmat); - cv::GaussianBlur(img_cvmat, cv_I_blur, cv::Size((int)gaussianFilterSize, (int)gaussianFilterSize), 0, 0); - cv::Sobel(cv_I_blur, cv_dx, CV_16S, 1, 0, apertureSobel); - cv::Sobel(cv_I_blur, cv_dy, CV_16S, 0, 1, apertureSobel); - float upperCannyThresh = upperThreshold; - float lowerCannyThresh = lowerThreshold; - if (upperCannyThresh < 0) { - upperCannyThresh = computeCannyThreshold(img_cvmat, &cv_I_blur, lowerCannyThresh); - } - else if (lowerCannyThresh < 0) { - lowerCannyThresh = upperCannyThresh / 3.f; - } - cv::Canny(cv_dx, cv_dy, edges_cvmat, lowerCannyThresh, upperCannyThresh, false); - vpImageConvert::convert(edges_cvmat, Ires); + const vpCannyBackendType cannyBackend = CANNY_OPENCV_BACKEND; #else - (void)apertureSobel; - float upperCannyThresh = upperThreshold; - float lowerCannyThresh = lowerThreshold; - if (upperCannyThresh < 0) { - upperCannyThresh = computeCannyThreshold(Isrc, lowerCannyThresh); + const vpCannyBackendType cannyBackend = CANNY_VISP_BACKEND; +#endif + const vpCannyFilteringAndGradientType cannyFilteringSteps = CANNY_GBLUR_SOBEL_FILTERING; + canny(Isrc, Ires, gaussianFilterSize, lowerThreshold, upperThreshold, apertureSobel, + gaussianStdev, lowerThresholdRatio, upperThresholdRatio, cannyBackend, cannyFilteringSteps); +} + +/*! + Apply the Canny edge operator on the image \e Isrc and return the resulting + image \e Ires. + + The following example shows how to use the method: + + \code +#include +#include + +int main() +{ + // Constants for the Canny operator. + const unsigned int gaussianFilterSize = 5; + const float gaussianStdev = 2.0f; + const float upperThresholdCanny = 15.f; + const float lowerThresholdCanny = 5.f; + const float upperThresholdRatio = 0.8f; + const float lowerThresholdRatio = 0.6f; + const unsigned int apertureSobel = 3; + const vpCannyBackendType cannyBackend = CANNY_OPENCV_BACKEND; // or CANNY_VISP_BACKEND; + const vpCannyFilteringAndGradientType cannyFilteringSteps = CANNY_GBLUR_SOBEL_FILTERING; + + // Image for the Canny edge operator + vpImage Isrc; + vpImage Icanny; + + // First grab the source image Isrc. + + // Apply the Canny edge operator and set the Icanny image. + vpImageFilter::canny(Isrc, Icanny, gaussianFilterSize, lowerThresholdCanny, upperThresholdCanny, apertureSobel, + gaussianStdev, lowerThresholdRatio, upperThresholdRatio, cannyBackend, cannyFilteringSteps); + return (0); +} + \endcode + + \param[in] Isrc : Image to apply the Canny edge detector to. + \param[out] Ires : Filtered image (255 means an edge, 0 otherwise). + \param[in] gaussianFilterSize : The size of the mask of the Gaussian filter to + apply (an odd number). + \param[in] lowerThreshold : The lower threshold for the Canny operator. Values lower + than this value are rejected. If negative, it will be set to one third + of the thresholdCanny . + \param[in] upperThreshold : The upper threshold for the Canny operator. Only value + greater than this value are marked as an edge. If negative, it will be automatically + computed, along with the lower threshold. Otherwise, the lower threshold will be set to one third + of the thresholdCanny . + \param[in] apertureSobel : Size of the mask for the Sobel operator (odd number). + \param[in] gaussianStdev : The standard deviation of the Gaussian filter to apply. + \param[in] lowerThresholdRatio : The ratio of the upper threshold the lower threshold must be equal to. + It is used only if the user asks to compute the Canny thresholds. + \param[in] upperThresholdRatio : The ratio of pixels whose absolute gradient Gabs is lower or equal to define + the upper threshold. It is used only if the user asks to compute the Canny thresholds. + \param[in] cannyBackend : The backend to use to perform the Canny edge filtering. + \param[in] cannyFilteringSteps : The filtering + gradient operators to apply to compute the gradient in the early + stage of the Canny algoritgm. + +*/ +void vpImageFilter::canny(const vpImage &Isrc, vpImage &Ires, + const unsigned int &gaussianFilterSize, + const float &lowerThreshold, const float &upperThreshold, const unsigned int &apertureSobel, + const float &gaussianStdev, const float &lowerThresholdRatio, const float &upperThresholdRatio, + const vpCannyBackendType &cannyBackend, const vpCannyFilteringAndGradientType &cannyFilteringSteps) +{ + if (cannyBackend == CANNY_OPENCV_BACKEND) { +#if defined(HAVE_OPENCV_IMGPROC) + cv::Mat img_cvmat, cv_dx, cv_dy, edges_cvmat; + vpImageConvert::convert(Isrc, img_cvmat); + if (cannyFilteringSteps == CANNY_GBLUR_SOBEL_FILTERING) { + cv::Mat cv_I_blur; + cv::GaussianBlur(img_cvmat, cv_I_blur, cv::Size((int)gaussianFilterSize, (int)gaussianFilterSize), 0, 0); + cv::Sobel(cv_I_blur, cv_dx, CV_16S, 1, 0, apertureSobel); + cv::Sobel(cv_I_blur, cv_dy, CV_16S, 0, 1, apertureSobel); + } + else { + std::string errMsg("[vpImageFilter::canny]Other types of Canny filtering steps have not been implemented"); + throw(vpException(vpException::functionNotImplementedError, errMsg)); + } + float upperCannyThresh = upperThreshold; + float lowerCannyThresh = lowerThreshold; + if (upperCannyThresh < 0) { + upperCannyThresh = computeCannyThreshold(img_cvmat, &cv_dx, &cv_dy, lowerCannyThresh, gaussianFilterSize, + gaussianStdev, apertureSobel, lowerThresholdRatio, upperThresholdRatio); + } + else if (lowerCannyThresh < 0) { + lowerCannyThresh = upperCannyThresh / 3.f; + } + cv::Canny(cv_dx, cv_dy, edges_cvmat, lowerCannyThresh, upperCannyThresh, false); + vpImageConvert::convert(edges_cvmat, Ires); +#else + std::string errMsg("[vpImageFilter::canny]You asked for CANNY_OPENCV_BACKEND but ViSP has not been compiled with OpenCV"); + throw(vpException(vpException::badValue, errMsg)); +#endif } - else if (lowerCannyThresh < 0) { - lowerCannyThresh = upperCannyThresh / 3.; + else if (cannyBackend == CANNY_VISP_BACKEND) { + float upperCannyThresh = upperThreshold; + float lowerCannyThresh = lowerThreshold; + + vpImage dIx, dIy; + if (cannyFilteringSteps == CANNY_GBLUR_SOBEL_FILTERING) { + // Computing the Gaussian blur + gradients of the image + vpImage Iblur; + vpImageFilter::gaussianBlur(Isrc, Iblur, gaussianFilterSize, gaussianStdev); + + // Compute the Sobel filters + vpArray2D sobelX(apertureSobel, apertureSobel); // Sobel kernel along x + vpImageFilter::getSobelKernelX(sobelX.data, (apertureSobel - 1)/2); + vpArray2D sobelY(apertureSobel, apertureSobel); // Sobel kernel along y + vpImageFilter::getSobelKernelY(sobelY.data, (apertureSobel - 1)/2); + + // Apply the Sobel filters to get the gradients + vpImageFilter::filter(Iblur, dIx, sobelX); + vpImageFilter::filter(Iblur, dIy, sobelY); + } + else { + std::string errMsg("[vpImageFilter::canny]Other types of Canny filtering steps have not been implemented"); + throw(vpException(vpException::functionNotImplementedError, errMsg)); + } + + if (upperCannyThresh < 0) { + upperCannyThresh = computeCannyThreshold(Isrc, lowerCannyThresh, &dIx, &dIy, gaussianFilterSize, gaussianStdev, + apertureSobel, lowerThresholdRatio, upperThresholdRatio); + } + else if (lowerCannyThresh < 0) { + lowerCannyThresh = upperCannyThresh / 3.; + } + vpCannyEdgeDetection edgeDetector(gaussianFilterSize, gaussianStdev, apertureSobel, lowerCannyThresh, upperCannyThresh, + lowerThresholdRatio, upperThresholdRatio); + edgeDetector.setGradients(dIx, dIy); + Ires = edgeDetector.detect(Isrc); } - vpCannyEdgeDetection edgeDetector(gaussianFilterSize, 0.1, lowerCannyThresh, upperCannyThresh); - Ires = edgeDetector.detect(Isrc); -#endif -} \ No newline at end of file +} diff --git a/modules/imgproc/include/visp3/imgproc/vpCircleHoughTransform.h b/modules/imgproc/include/visp3/imgproc/vpCircleHoughTransform.h index 071fe036b4..fb90e688d8 100644 --- a/modules/imgproc/include/visp3/imgproc/vpCircleHoughTransform.h +++ b/modules/imgproc/include/visp3/imgproc/vpCircleHoughTransform.h @@ -72,8 +72,13 @@ class VISP_EXPORT vpCircleHoughTransform class vpCircleHoughTransformParameters { private: + // // Filtering + gradient operators to use + vpImageFilter::vpCannyFilteringAndGradientType m_filteringAndGradientType; /*!< Permits to choose the filtering + + gradient operators to use.*/ + // // Gaussian smoothing attributes - int m_gaussianKernelSize; /*!< Size of the Gaussian filter kernel used to smooth the input image. Must be an odd number.*/ + int m_gaussianKernelSize; /*!< Size of the Gaussian filter kernel used to smooth the input image. + Must be an odd number.*/ float m_gaussianStdev; /*!< Standard deviation of the Gaussian filter.*/ // // Gradient computation attributes @@ -85,6 +90,11 @@ class VISP_EXPORT vpCircleHoughTransform float m_upperCannyThresh; /*!< The upper threshold for the Canny operator. Only values greater than this value are marked as an edge. A negative value makes the algorithm compute the upper and lower thresholds automatically.*/ int m_edgeMapFilteringNbIter; /*!< Number of iterations of 8-neighbor connectivity filtering to apply to the edge map*/ + vpImageFilter::vpCannyBackendType m_cannyBackendType; /*!< Permits to choose the backend used to compute the edge map.*/ + float m_lowerCannyThreshRatio; /*!< The ratio of the upper threshold the lower threshold must be equal to. + It is used only if the user asks to compute the Canny thresholds.*/ + float m_upperCannyThreshRatio; /*!< The ratio of pixels whose absolute gradient Gabs is lower or equal to define + the upper threshold. It is used only if the user asks to compute the Canny thresholds.*/ // // Center candidates computation attributes std::pair m_centerXlimits; /*!< Minimum and maximum position on the horizontal axis of the center of the circle we want to detect.*/ @@ -108,12 +118,16 @@ class VISP_EXPORT vpCircleHoughTransform * \brief Construct a new vpCircleHoughTransformParameters object with default parameters. */ vpCircleHoughTransformParameters() - : m_gaussianKernelSize(5) + : m_filteringAndGradientType(vpImageFilter::CANNY_GBLUR_SOBEL_FILTERING) + , m_gaussianKernelSize(5) , m_gaussianStdev(1.f) , m_sobelKernelSize(3) , m_lowerCannyThresh(-1.f) , m_upperCannyThresh(-1.f) , m_edgeMapFilteringNbIter(1) + , m_cannyBackendType(vpImageFilter::CANNY_OPENCV_BACKEND) + , m_lowerCannyThreshRatio(0.6f) + , m_upperCannyThreshRatio(0.8f) , m_centerXlimits(std::pair(std::numeric_limits::min(), std::numeric_limits::max())) , m_centerYlimits(std::pair(std::numeric_limits::min(), std::numeric_limits::max())) , m_minRadius(0) @@ -149,6 +163,14 @@ class VISP_EXPORT vpCircleHoughTransform * \param[in] circlePerfectness The scalar product radius RC_ij . gradient(Ep_j) >= m_circlePerfectness * || RC_ij || * || gradient(Ep_j) || to add a vote for the radius RC_ij. * \param[in] centerMinDistThresh Two circle candidates whose centers are closer than this threshold are considered for merging. * \param[in] mergingRadiusDiffThresh Maximum radius difference between two circle candidates to consider merging them. + * \param[in] filteringAndGradientMethod The choice of the filter and gradient operator to apply before the edge + * detection step. + * \param[in] backendType Permits to choose the backend used to compute the edge map. + * \param[in] lowerCannyThreshRatio If the thresholds must be computed,the lower threshold will be equal to the upper + * threshold times \b lowerThresholdRatio . + * \param[in] upperCannyThreshRatio If the thresholds must be computed,the upper threshold will be equal to the value + * such as the number of pixels of the image times \b upperThresholdRatio have an absolute gradient lower than the + * upper threshold. */ vpCircleHoughTransformParameters( const int &gaussianKernelSize @@ -167,13 +189,21 @@ class VISP_EXPORT vpCircleHoughTransform , const float &circlePerfectness , const float ¢erMinDistThresh , const float &mergingRadiusDiffThresh + , const vpImageFilter::vpCannyFilteringAndGradientType &filteringAndGradientMethod = vpImageFilter::CANNY_GBLUR_SOBEL_FILTERING + , const vpImageFilter::vpCannyBackendType &backendType = vpImageFilter::CANNY_OPENCV_BACKEND + , const float &lowerCannyThreshRatio = 0.6f + , const float &upperCannyThreshRatio = 0.8f ) - : m_gaussianKernelSize(gaussianKernelSize) + : m_filteringAndGradientType(filteringAndGradientMethod) + , m_gaussianKernelSize(gaussianKernelSize) , m_gaussianStdev(gaussianStdev) , m_sobelKernelSize(sobelKernelSize) , m_lowerCannyThresh(lowerCannyThresh) , m_upperCannyThresh(upperCannyThresh) , m_edgeMapFilteringNbIter(edgeMapFilterNbIter) + , m_cannyBackendType(backendType) + , m_lowerCannyThreshRatio(lowerCannyThreshRatio) + , m_upperCannyThreshRatio(upperCannyThreshRatio) , m_centerXlimits(centerXlimits) , m_centerYlimits(centerYlimits) , m_minRadius(std::min(minRadius, maxRadius)) @@ -192,10 +222,13 @@ class VISP_EXPORT vpCircleHoughTransform std::string toString() const { std::string txt("Hough Circle Transform Configuration:\n"); + txt += "\tFiltering + gradient operators = " + vpImageFilter::vpCannyFilteringAndGradientTypeToString(m_filteringAndGradientType) + "\n"; txt += "\tGaussian filter kernel size = " + std::to_string(m_gaussianKernelSize) + "\n"; txt += "\tGaussian filter standard deviation = " + std::to_string(m_gaussianStdev) + "\n"; txt += "\tSobel filter kernel size = " + std::to_string(m_sobelKernelSize) + "\n"; + txt += "\tCanny backend = " + vpImageFilter::vpCannyBackendTypeToString(m_cannyBackendType) + "\n"; txt += "\tCanny edge filter thresholds = [" + std::to_string(m_lowerCannyThresh) + " ; " + std::to_string(m_upperCannyThresh) + "]\n"; + txt += "\tCanny edge filter thresholds ratio (for auto-thresholding) = [" + std::to_string(m_lowerCannyThreshRatio) + " ; " + std::to_string(m_upperCannyThreshRatio) + "]\n"; txt += "\tEdge map 8-neighbor connectivity filtering number of iterations = " + std::to_string(m_edgeMapFilteringNbIter) + "\n"; txt += "\tCenter horizontal position limits: min = " + std::to_string(m_centerXlimits.first) + "\tmax = " + std::to_string(m_centerXlimits.second) +"\n"; txt += "\tCenter vertical position limits: min = " + std::to_string(m_centerYlimits.first) + "\tmax = " + std::to_string(m_centerYlimits.second) +"\n"; @@ -266,6 +299,10 @@ class VISP_EXPORT vpCircleHoughTransform */ inline friend void from_json(const json &j, vpCircleHoughTransformParameters ¶ms) { + std::string filteringAndGradientName = vpImageFilter::vpCannyFilteringAndGradientTypeToString(params.m_filteringAndGradientType); + filteringAndGradientName = j.value("filteringAndGradientType", filteringAndGradientName); + params.m_filteringAndGradientType = vpImageFilter::vpCannyFilteringAndGradientTypeFromString(filteringAndGradientName); + params.m_gaussianKernelSize = j.value("gaussianKernelSize", params.m_gaussianKernelSize); if ((params.m_gaussianKernelSize % 2) != 1) { throw vpException(vpException::badValue, "Gaussian Kernel size should be odd."); @@ -281,8 +318,13 @@ class VISP_EXPORT vpCircleHoughTransform throw vpException(vpException::badValue, "Sobel Kernel size should be odd."); } + std::string cannyBackendName = vpImageFilter::vpCannyBackendTypeToString(params.m_cannyBackendType); + cannyBackendName = j.value("cannyBackendType", cannyBackendName); + params.m_cannyBackendType = vpImageFilter::vpCannyBackendTypeFromString(cannyBackendName); params.m_lowerCannyThresh = j.value("lowerCannyThresh", params.m_lowerCannyThresh); + params.m_lowerCannyThreshRatio = j.value("lowerThresholdRatio", params.m_lowerCannyThreshRatio); params.m_upperCannyThresh = j.value("upperCannyThresh", params.m_upperCannyThresh); + params.m_upperCannyThreshRatio = j.value("upperThresholdRatio", params.m_upperCannyThreshRatio); params.m_edgeMapFilteringNbIter = j.value("edgeMapFilteringNbIter", params.m_edgeMapFilteringNbIter); params.m_centerXlimits = j.value("centerXlimits", params.m_centerXlimits); @@ -328,11 +370,15 @@ class VISP_EXPORT vpCircleHoughTransform std::pair radiusLimits = { params.m_minRadius, params.m_maxRadius }; j = json { + {"filteringAndGradientType", vpImageFilter::vpCannyFilteringAndGradientTypeToString(params.m_filteringAndGradientType)}, {"gaussianKernelSize", params.m_gaussianKernelSize}, {"gaussianStdev", params.m_gaussianStdev}, {"sobelKernelSize", params.m_sobelKernelSize}, + {"cannyBackendType", vpImageFilter::vpCannyBackendTypeToString(params.m_cannyBackendType)}, {"lowerCannyThresh", params.m_lowerCannyThresh}, + {"lowerThresholdRatio", params.m_lowerCannyThreshRatio}, {"upperCannyThresh", params.m_upperCannyThresh}, + {"upperThresholdRatio", params.m_upperCannyThreshRatio}, {"edgeMapFilteringNbIter", params.m_edgeMapFilteringNbIter}, {"centerXlimits", params.m_centerXlimits}, {"centerYlimits", params.m_centerYlimits}, @@ -364,8 +410,8 @@ class VISP_EXPORT vpCircleHoughTransform */ virtual ~vpCircleHoughTransform(); - // // Detection methods - + /** @name Detection methods */ + //@{ #ifdef HAVE_OPENCV_CORE /** * \brief Perform Circle Hough Transform to detect the circles in an OpenCV image. @@ -406,8 +452,10 @@ class VISP_EXPORT vpCircleHoughTransform */ std::vector detect(const vpImage &I, const int &nbCircles); #endif + //@} - // // Configuration from files + /** @name Configuration from files */ + //@{ #ifdef VISP_HAVE_NLOHMANN_JSON /** * \brief Construct a new vpCircleHoughTransform object configured according to @@ -458,8 +506,10 @@ class VISP_EXPORT vpCircleHoughTransform j = detector.m_algoParams; } #endif + //@} - // // Setters + /** @name Setters */ + //@{ /** * \brief Initialize all the algorithm parameters. * @@ -467,6 +517,17 @@ class VISP_EXPORT vpCircleHoughTransform */ void init(const vpCircleHoughTransformParameters &algoParams); + /** + * \brief Permits to choose the filtering + gradient operators to use. + * + * \param[in] type The type of filtering + gradient operators to use. + */ + inline void setFilteringAndGradientType(const vpImageFilter::vpCannyFilteringAndGradientType &type) + { + m_algoParams.m_filteringAndGradientType = type; + m_cannyVisp.setFilteringAndGradientType(type); + } + /** * \brief Set the parameters of the Gaussian filter, that permits to blur the * gradients of the image. @@ -507,6 +568,16 @@ class VISP_EXPORT vpCircleHoughTransform initSobelFilters(); } + /** + * \brief Set the backend to use to perform the Canny edge detection. + * + * \param[in] type The backend that must be used. + */ + inline void setCannyBackend(const vpImageFilter::vpCannyBackendType &type) + { + m_algoParams.m_cannyBackendType = type; + } + /*! * Set the threshold for the Canny operator. * Only value greater than this value are marked as an edge. @@ -522,6 +593,22 @@ class VISP_EXPORT vpCircleHoughTransform m_algoParams.m_upperCannyThresh = upperCannyThreshold; } + /** + * \brief Set the Canny thresholds ratio that are used to automatically compute the Canny thresholds + * in case the user asks to. + * + * \sa \ref vpCircleHoughTransform::setCannyThreshold "vpCircleHoughTransform::setCannyThreshold(const float&, const float&)" + * + * \param[in] lowerThreshRatio The ratio of the upper threshold the lower threshold will be equal to. + * \param[in] upperThreshRatio The ratio of pixels that must have a gradient lower than the upper threshold. + */ + inline void setCannyThresholdRatio(const float &lowerThreshRatio, const float &upperThreshRatio) + { + m_algoParams.m_lowerCannyThreshRatio = lowerThreshRatio; + m_algoParams.m_upperCannyThreshRatio = upperThreshRatio; + m_cannyVisp.setCannyThresholdsRatio(lowerThreshRatio, upperThreshRatio); + } + /*! * Set circles center min distance. * Change this value to detect circles with different distances to each other. @@ -632,9 +719,10 @@ class VISP_EXPORT vpCircleHoughTransform throw vpException(vpException::badValue, "Radius difference merging threshold must be positive."); } } + //@} - // // Getters - + /** @name Getters */ + //@{ /** * \brief Get the list of Center Candidates, stored as pair * @@ -754,6 +842,7 @@ class VISP_EXPORT vpCircleHoughTransform { return m_finalCircleVotes; } + //@} /*! * Create a string with all Hough transform parameters. @@ -811,9 +900,9 @@ class VISP_EXPORT vpCircleHoughTransform * The probability is defined as the ratio of \b nbVotes by the theoretical number of * pixel that should be visible in the image. * - * @param circle The circle for which we want to evaluate the probability. - * @param nbVotes The number of visible pixels of the given circle. - * @return float The probability of the circle. + * \param[in] circle The circle for which we want to evaluate the probability. + * \param[in] nbVotes The number of visible pixels of the given circle. + * \return float The probability of the circle. */ float computeCircleProbability(const vpImageCircle &circle, const unsigned int &nbVotes); diff --git a/modules/imgproc/src/vpCircleHoughTransform.cpp b/modules/imgproc/src/vpCircleHoughTransform.cpp index 21c9a05fd9..4df497cbbd 100644 --- a/modules/imgproc/src/vpCircleHoughTransform.cpp +++ b/modules/imgproc/src/vpCircleHoughTransform.cpp @@ -112,11 +112,11 @@ void vpCircleHoughTransform::initSobelFilters() { if ((m_algoParams.m_sobelKernelSize % 2) != 1) { - throw vpException(vpException::badValue, "Sobel Kernel size should be odd."); + throw vpException(vpException::badValue, "Sobel Kernel size should be odd."); } - m_sobelX.resize(m_algoParams.m_sobelKernelSize , m_algoParams.m_sobelKernelSize ); + m_sobelX.resize(m_algoParams.m_sobelKernelSize, m_algoParams.m_sobelKernelSize); vpImageFilter::getSobelKernelX(m_sobelX.data, (m_algoParams.m_sobelKernelSize - 1)/2); - m_sobelY.resize(m_algoParams.m_sobelKernelSize , m_algoParams.m_sobelKernelSize ); + m_sobelY.resize(m_algoParams.m_sobelKernelSize, m_algoParams.m_sobelKernelSize); vpImageFilter::getSobelKernelY(m_sobelY.data, (m_algoParams.m_sobelKernelSize - 1)/2); m_cannyVisp.setSobelAperture(m_algoParams.m_sobelKernelSize); } @@ -225,36 +225,44 @@ vpCircleHoughTransform::detect(const vpImage &I) void vpCircleHoughTransform::computeGradientsAfterGaussianSmoothing(const vpImage &I) { - // Computing the Gaussian blurr - vpImage Iblur, GIx; - vpImageFilter::filterX(I, GIx, m_fg.data, m_algoParams.m_gaussianKernelSize); - vpImageFilter::filterY(GIx, Iblur, m_fg.data, m_algoParams.m_gaussianKernelSize); - - // Computing the gradients - vpImageFilter::filter(Iblur,m_dIx,m_sobelX); - vpImageFilter::filter(Iblur,m_dIy,m_sobelY); + if (m_algoParams.m_filteringAndGradientType == vpImageFilter::CANNY_GBLUR_SOBEL_FILTERING) { + // Computing the Gaussian blurr + vpImage Iblur, GIx; + vpImageFilter::filterX(I, GIx, m_fg.data, m_algoParams.m_gaussianKernelSize); + vpImageFilter::filterY(GIx, Iblur, m_fg.data, m_algoParams.m_gaussianKernelSize); + + // Computing the gradients + vpImageFilter::filter(Iblur, m_dIx, m_sobelX); + vpImageFilter::filter(Iblur, m_dIy, m_sobelY); + } + else { + std::string errMsg("[computeGradientsAfterGaussianSmoothing] The filtering + gradient operators \""); + errMsg += vpImageFilter::vpCannyFilteringAndGradientTypeToString(m_algoParams.m_filteringAndGradientType); + errMsg += "\" is not implemented (yet)."; + throw(vpException(vpException::notImplementedError, errMsg)); + } } void vpCircleHoughTransform::edgeDetection(const vpImage &I) { -#if defined(HAVE_OPENCV_IMGPROC) - float upperCannyThresh = m_algoParams.m_upperCannyThresh; - float lowerCannyThresh = m_algoParams.m_lowerCannyThresh; - // Apply the Canny edge operator to compute the edge map - // The canny method performs Gaussian blur and gradient computation - if (m_algoParams.m_upperCannyThresh < 0.) { - upperCannyThresh = vpImageFilter::computeCannyThreshold(I, lowerCannyThresh); + if (m_algoParams.m_cannyBackendType == vpImageFilter::CANNY_VISP_BACKEND) { + // This is done to increase the time performances, because it avoids to + // recompute the gradient in the vpImageFilter::canny method + m_cannyVisp.setFilteringAndGradientType(m_algoParams.m_filteringAndGradientType); + m_cannyVisp.setCannyThresholds(m_algoParams.m_lowerCannyThresh, m_algoParams.m_upperCannyThresh); + m_cannyVisp.setCannyThresholdsRatio(m_algoParams.m_lowerCannyThreshRatio, m_algoParams.m_upperCannyThreshRatio); + m_cannyVisp.setGradients(m_dIx, m_dIy); + m_edgeMap = m_cannyVisp.detect(I); } - else if (m_algoParams.m_lowerCannyThresh < 0) { - lowerCannyThresh = upperCannyThresh / 3.f; + else { + // We will have to recompute the gradient in the desired backend format anyway so we let + // the vpImageFilter::canny method take care of it + vpImageFilter::canny(I, m_edgeMap, m_algoParams.m_gaussianKernelSize, m_algoParams.m_lowerCannyThresh, + m_algoParams.m_upperCannyThresh, m_algoParams.m_sobelKernelSize, m_algoParams.m_gaussianStdev, + m_algoParams.m_lowerCannyThreshRatio, m_algoParams.m_upperCannyThreshRatio, + m_algoParams.m_cannyBackendType, m_algoParams.m_filteringAndGradientType); } - vpImageFilter::canny(I, m_edgeMap, m_algoParams.m_gaussianKernelSize, lowerCannyThresh, upperCannyThresh, m_algoParams.m_sobelKernelSize); -#else - m_cannyVisp.setCannyThresholds(m_algoParams.m_lowerCannyThresh, m_algoParams.m_upperCannyThresh); - m_cannyVisp.setGradients(m_dIx, m_dIy); - m_edgeMap = m_cannyVisp.detect(I); -#endif for (int i = 0; i < m_algoParams.m_edgeMapFilteringNbIter; i++) { filterEdgeMap(); diff --git a/tutorial/imgproc/hough-transform/config/detector_img.json b/tutorial/imgproc/hough-transform/config/detector_img.json index febadcdcbe..3b0c490eb7 100644 --- a/tutorial/imgproc/hough-transform/config/detector_img.json +++ b/tutorial/imgproc/hough-transform/config/detector_img.json @@ -1,6 +1,10 @@ { + "cannyBackendType": "opencv-backend", + "filteringAndGradientType": "gaussianblur+sobel-filtering", "lowerCannyThresh": 100.0, + "lowerThresholdRatio": 0.6, "upperCannyThresh": 200.0, + "upperThresholdRatio": 0.8, "centerMinDistance": 5.0, "centerThresh": 100.0, "centerXlimits": [ From d9160532b1ff990f25ec589430e00c3bc2b91c37 Mon Sep 17 00:00:00 2001 From: rlagneau Date: Wed, 11 Oct 2023 10:37:23 +0200 Subject: [PATCH 05/43] Compute the threshold on the gradient image instead of the equalized histogram version of the gradient with OpenCV --- modules/core/src/image/vpImageFilter.cpp | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/modules/core/src/image/vpImageFilter.cpp b/modules/core/src/image/vpImageFilter.cpp index 98dae6772e..3771003959 100644 --- a/modules/core/src/image/vpImageFilter.cpp +++ b/modules/core/src/image/vpImageFilter.cpp @@ -638,10 +638,6 @@ float vpImageFilter::computeCannyThreshold(const cv::Mat &cv_I, const cv::Mat *p cv::addWeighted(sobelxabs, 1, sobelyabs, 1, 0, sobel); sobel.convertTo(sobel, CV_8U); - // Equalize the histogram - cv::Mat equalized; - cv::equalizeHist(sobel, equalized); - // Compute the upper threshold from the equalized histogram cv::Mat hist; const float range[] = { 0.f, 256.f }; // The upper boundary is exclusive @@ -651,7 +647,7 @@ float vpImageFilter::computeCannyThreshold(const cv::Mat &cv_I, const cv::Mat *p int histSize[] = { bins }; bool uniform = true; bool accumulate = false; // Clear the histogram at the beginning of calcHist if false, does not clear it otherwise - cv::calcHist(&equalized, 1, channels, cv::Mat(), hist, dims, histSize, ranges, uniform, accumulate); + cv::calcHist(&sobel, 1, channels, cv::Mat(), hist, dims, histSize, ranges, uniform, accumulate); float accu = 0; float t = (float)(upperThresholdRatio * w * h); float bon = 0; @@ -922,7 +918,7 @@ void vpImageFilter::canny(const vpImage &Isrc, vpImage Date: Wed, 18 Oct 2023 09:58:56 +0200 Subject: [PATCH 06/43] [CORPS] Added Scharr 3x3 filter and CANNY_GBLUR_SCHARR_FILTERING option --- .../include/visp3/core/vpCannyEdgeDetection.h | 28 ++--- .../core/include/visp3/core/vpImageFilter.h | 59 ++++++++- .../core/src/image/vpCannyEdgeDetection.cpp | 46 ++++--- modules/core/src/image/vpImageFilter.cpp | 116 ++++++++++++------ .../visp3/imgproc/vpCircleHoughTransform.h | 44 +++---- .../imgproc/src/vpCircleHoughTransform.cpp | 43 ++++--- 6 files changed, 220 insertions(+), 116 deletions(-) diff --git a/modules/core/include/visp3/core/vpCannyEdgeDetection.h b/modules/core/include/visp3/core/vpCannyEdgeDetection.h index cc22dd3e02..3cab0823ed 100644 --- a/modules/core/include/visp3/core/vpCannyEdgeDetection.h +++ b/modules/core/include/visp3/core/vpCannyEdgeDetection.h @@ -67,9 +67,9 @@ class VISP_EXPORT vpCannyEdgeDetection // // Gradient computation attributes bool m_areGradientAvailable; /*!< Set to true if the user provides the gradient images, false otherwise. In the latter case, the class will compute the gradients.*/ - unsigned int m_sobelAperture; /*!< The size of the Sobel kernels used to compute the gradients of the image.*/ - vpArray2D m_sobelX; /*!< Array that contains the Sobel kernel along the X-axis.*/ - vpArray2D m_sobelY; /*!< Array that contains the Sobel kernel along the Y-axis.*/ + unsigned int m_gradientFilterKernelSize; /*!< The size of the Sobel kernels used to compute the gradients of the image.*/ + vpArray2D m_gradientFilterX; /*!< Array that contains the gradient filter kernel (Sobel or Scharr) along the X-axis.*/ + vpArray2D m_gradientFilterY; /*!< Array that contains the gradient filter kernel (Sobel or Scharr) along the Y-axis.*/ vpImage m_dIx; /*!< X-axis gradient.*/ vpImage m_dIy; /*!< Y-axis gradient.*/ @@ -98,9 +98,9 @@ class VISP_EXPORT vpCannyEdgeDetection void initGaussianFilters(); /** - * \brief Initialize the Sobel filters used to compute the input image gradients. + * \brief Initialize the gradient filters (Sobel or Scharr) used to compute the input image gradients. */ - void initSobelFilters(); + void initGradientFilters(); //@} /** @name Different steps methods */ @@ -216,7 +216,7 @@ class VISP_EXPORT vpCannyEdgeDetection detector.m_gaussianStdev = j.value("gaussianStdev", detector.m_gaussianStdev); detector.m_lowerThreshold = j.value("lowerThreshold", detector.m_lowerThreshold); detector.m_lowerThresholdRatio = j.value("lowerThresholdRatio", detector.m_lowerThresholdRatio); - detector.m_sobelAperture = j.value("sobelAperture", detector.m_sobelAperture); + detector.m_gradientFilterKernelSize = j.value("gradientFilterKernelSize", detector.m_gradientFilterKernelSize); detector.m_upperThreshold = j.value("upperThreshold", detector.m_upperThreshold); detector.m_upperThresholdRatio = j.value("upperThresholdRatio", detector.m_upperThresholdRatio); } @@ -236,7 +236,7 @@ class VISP_EXPORT vpCannyEdgeDetection {"gaussianStdev", detector.m_gaussianStdev}, {"lowerThreshold", detector.m_lowerThreshold}, {"lowerThresholdRatio", detector.m_lowerThresholdRatio}, - {"sobelAperture", detector.m_sobelAperture}, + {"gradientFilterKernelSize", detector.m_gradientFilterKernelSize}, {"upperThreshold", detector.m_upperThreshold}, {"upperThresholdRatio", detector.m_upperThresholdRatio} }; @@ -285,6 +285,7 @@ class VISP_EXPORT vpCannyEdgeDetection inline void setFilteringAndGradientType(const vpImageFilter::vpCannyFilteringAndGradientType &type) { m_filteringAndGradientType = type; + initGradientFilters(); } /** @@ -351,17 +352,14 @@ class VISP_EXPORT vpCannyEdgeDetection } /** - * \brief Set the Gaussian Filters kernel size and standard deviation - * and initialize the aforementioned filters. + * \brief Set the parameters of the gradient filter (Sobel or Scharr) kernel size filters. * - * \param[in] kernelSize : The size of the Gaussian filters kernel. - * \param[in] stdev : The standard deviation of the Gaussian filters used to blur and - * compute the gradient of the image. + * \param[in] apertureSize The size of the gradient filters kernel. Must be an odd value. */ - inline void setSobelAperture(const unsigned int &sobelAperture) + inline void setGradientFilterAperture(const unsigned int &apertureSize) { - m_sobelAperture = sobelAperture; - initSobelFilters(); + m_gradientFilterKernelSize = apertureSize; + initGradientFilters(); } //@} }; diff --git a/modules/core/include/visp3/core/vpImageFilter.h b/modules/core/include/visp3/core/vpImageFilter.h index 0b14f7b4af..3309f2c30d 100644 --- a/modules/core/include/visp3/core/vpImageFilter.h +++ b/modules/core/include/visp3/core/vpImageFilter.h @@ -83,7 +83,8 @@ class VISP_EXPORT vpImageFilter typedef enum vpCannyFilteringAndGradientType { CANNY_GBLUR_SOBEL_FILTERING = 0, //!< Apply Gaussian blur + Sobel operator on the input image - CANNY_COUNT_FILTERING = 1 //! Number of supported backends + CANNY_GBLUR_SCHARR_FILTERING = 1, //!< Apply Gaussian blur + Scharr operator on the input image + CANNY_COUNT_FILTERING = 2 //! Number of supported backends } vpCannyFilteringAndGradientType; static std::string vpCannyFilteringAndGradientTypeToString(const vpCannyFilteringAndGradientType &type); @@ -1007,6 +1008,56 @@ class VISP_EXPORT vpImageFilter vpImageFilter::getGradY(GIx, dIy, gaussianDerivativeKernel, size); } + /*! + Get Scharr kernel for X-direction. + \tparam FilterType: Either float, to accelerate the computation time, or double, to have greater precision. + \param filter : Pointer to a double array already allocated. + \param size : Kernel size computed as: kernel_size = size*2 + 1 (max size is 20). + \return Scaling factor. + */ + template + inline static FilterType getScharrKernelX(FilterType *filter, unsigned int size) + { + if (size != 1) { + // Size = 1 => kernel_size = 2*1 + 1 = 3 + std::string errMsg = "Cannot get Scharr kernel of size " + std::to_string(size * 2 + 1) + " != 3"; + throw vpException(vpException::dimensionError, errMsg); + } + + vpArray2D ScharrY(size * 2 + 1, size * 2 + 1); + FilterType norm = getScharrKernelY(ScharrY.data, size); + memcpy(filter, ScharrY.t().data, ScharrY.getRows() * ScharrY.getCols() * sizeof(FilterType)); + return norm; + } + + /*! + Get Scharr kernel for Y-direction. + \tparam FilterType : Either float, to accelerate the computation time, or double, to have greater precision. + \param filter : Pointer to a double array already allocated. + \param size : Kernel size computed as: kernel_size = size*2 + 1 (max size is 20). + \return Scaling factor. + */ + template + inline static FilterType getScharrKernelY(FilterType *filter, unsigned int size) + { + // Scharr kernel pre-computed for the usual size + static const FilterType ScharrY3x3[9] = { -3.0, -10.0, -3.0, 0.0, 0.0, 0.0, 3.0, 10.0, 3.0 }; + + if (size != 1) { + // Size = 1 => kernel_size = 2*1 + 1 = 3 + std::string errMsg = "Cannot get Scharr kernel of size " + std::to_string(size * 2 + 1) + " != 3"; + throw vpException(vpException::dimensionError, errMsg); + } + + const unsigned int kernel_size = size * 2 + 1; + if (kernel_size == 3) { + memcpy(filter, ScharrY3x3, kernel_size * kernel_size * sizeof(FilterType)); + return 1 / 32.0; + } + + return 0.; + } + /*! Get Sobel kernel for X-direction. \tparam FilterType: Either float, to accelerate the computation time, or double, to have greater precision. @@ -1090,13 +1141,15 @@ class VISP_EXPORT vpImageFilter const vpImage *p_dIx = nullptr, const vpImage *p_dIy = nullptr, const unsigned int gaussianKernelSize = 5, const float gaussianStdev = 2.f, const unsigned int apertureSobel = 3, - const float lowerThresholdRatio = 0.6, const float upperThresholdRatio = 0.8); + const float lowerThresholdRatio = 0.6, const float upperThresholdRatio = 0.8, + const vpCannyFilteringAndGradientType &filteringType = CANNY_GBLUR_SOBEL_FILTERING); #if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) static float computeCannyThreshold(const cv::Mat &cv_I, const cv::Mat *p_cv_dIx, const cv::Mat *p_cv_dIy, float &lowerThresh, const unsigned int gaussianKernelSize = 5, const float gaussianStdev = 2.f, const unsigned int apertureSobel = 3, - const float lowerThresholdRatio = 0.6, const float upperThresholdRatio = 0.8); + const float lowerThresholdRatio = 0.6, const float upperThresholdRatio = 0.8, + const vpCannyFilteringAndGradientType &filteringType = CANNY_GBLUR_SOBEL_FILTERING); static float median(const cv::Mat &cv_I); static float median(const vpImage &Isrc); static std::vector median(const vpImage &Isrc); diff --git a/modules/core/src/image/vpCannyEdgeDetection.cpp b/modules/core/src/image/vpCannyEdgeDetection.cpp index a76bde2a29..5f0779d5ec 100644 --- a/modules/core/src/image/vpCannyEdgeDetection.cpp +++ b/modules/core/src/image/vpCannyEdgeDetection.cpp @@ -41,14 +41,14 @@ vpCannyEdgeDetection::vpCannyEdgeDetection() , m_gaussianKernelSize(3) , m_gaussianStdev(1.f) , m_areGradientAvailable(false) - , m_sobelAperture(3) + , m_gradientFilterKernelSize(3) , m_lowerThreshold(-1.f) , m_lowerThresholdRatio(0.6f) , m_upperThreshold(-1.f) , m_upperThresholdRatio(0.8f) { initGaussianFilters(); - initSobelFilters(); + initGradientFilters(); } vpCannyEdgeDetection::vpCannyEdgeDetection(const int &gaussianKernelSize, const float &gaussianStdev @@ -59,14 +59,14 @@ vpCannyEdgeDetection::vpCannyEdgeDetection(const int &gaussianKernelSize, const , m_gaussianKernelSize(gaussianKernelSize) , m_gaussianStdev(gaussianStdev) , m_areGradientAvailable(false) - , m_sobelAperture(sobelAperture) + , m_gradientFilterKernelSize(sobelAperture) , m_lowerThreshold(lowerThreshold) , m_lowerThresholdRatio(lowerThresholdRatio) , m_upperThreshold(upperThreshold) , m_upperThresholdRatio(upperThresholdRatio) { initGaussianFilters(); - initSobelFilters(); + initGradientFilters(); } #ifdef VISP_HAVE_NLOHMANN_JSON @@ -95,10 +95,10 @@ vpCannyEdgeDetection::initFromJSON(const std::string &jsonPath) msg << "Byte position of error: " << e.byte; throw vpException(vpException::ioError, msg.str()); } - *this = j; // Call from_json(const json& j, vpDetectionCircle2D& *this) to read json + from_json(j, *this); file.close(); initGaussianFilters(); - initSobelFilters(); + initGradientFilters(); } #endif @@ -113,16 +113,23 @@ vpCannyEdgeDetection::initGaussianFilters() } void -vpCannyEdgeDetection::initSobelFilters() +vpCannyEdgeDetection::initGradientFilters() { - if ((m_sobelAperture % 2) == 0) { - std::string errMsg("The Sobel kernel (" + std::to_string(m_sobelAperture) + ") should be odd"); - throw(vpException(vpException::badValue, errMsg)); + if ((m_gradientFilterKernelSize % 2) != 1) { + throw vpException(vpException::badValue, "Gradient filters kernel size should be odd."); + } + m_gradientFilterX.resize(m_gradientFilterKernelSize, m_gradientFilterKernelSize); + m_gradientFilterY.resize(m_gradientFilterKernelSize, m_gradientFilterKernelSize); + + if (m_filteringAndGradientType == vpImageFilter::CANNY_GBLUR_SOBEL_FILTERING) { + vpImageFilter::getSobelKernelX(m_gradientFilterX.data, (m_gradientFilterKernelSize - 1)/2); + vpImageFilter::getSobelKernelY(m_gradientFilterY.data, (m_gradientFilterKernelSize - 1)/2); + } + else if (m_filteringAndGradientType == vpImageFilter::CANNY_GBLUR_SCHARR_FILTERING) { + // Compute the Scharr filters + vpImageFilter::getScharrKernelX(m_gradientFilterX.data, (m_gradientFilterKernelSize - 1)/2); + vpImageFilter::getScharrKernelY(m_gradientFilterY.data, (m_gradientFilterKernelSize - 1)/2); } - m_sobelX.resize(m_sobelAperture, m_sobelAperture); - vpImageFilter::getSobelKernelX(m_sobelX.data, (m_sobelAperture - 1)/2); - m_sobelY.resize(m_sobelAperture, m_sobelAperture); - vpImageFilter::getSobelKernelY(m_sobelY.data, (m_sobelAperture - 1)/2); } // // Detection methods @@ -166,8 +173,8 @@ vpCannyEdgeDetection::detect(const vpImage &I) float lowerThreshold = m_lowerThreshold; if (upperThreshold < 0) { upperThreshold = vpImageFilter::computeCannyThreshold(I, lowerThreshold, &m_dIx, &m_dIy, m_gaussianKernelSize, - m_gaussianStdev, m_sobelAperture, m_lowerThresholdRatio, - m_upperThresholdRatio); + m_gaussianStdev, m_gradientFilterKernelSize, m_lowerThresholdRatio, + m_upperThresholdRatio, m_filteringAndGradientType); } else if (m_lowerThreshold < 0) { // Applying Canny recommendation to have the upper threshold 3 times greater than the lower threshold. @@ -184,7 +191,8 @@ vpCannyEdgeDetection::detect(const vpImage &I) void vpCannyEdgeDetection::performFilteringAndGradientComputation(const vpImage &I) { - if (m_filteringAndGradientType == vpImageFilter::CANNY_GBLUR_SOBEL_FILTERING) { + if (m_filteringAndGradientType == vpImageFilter::CANNY_GBLUR_SOBEL_FILTERING + || m_filteringAndGradientType == vpImageFilter::CANNY_GBLUR_SCHARR_FILTERING) { // Computing the Gaussian blur vpImage Iblur; vpImage GIx; @@ -192,8 +200,8 @@ vpCannyEdgeDetection::performFilteringAndGradientComputation(const vpImage(GIx, Iblur, m_fg.data, m_gaussianKernelSize); // Computing the gradients - vpImageFilter::filter(Iblur, m_dIx, m_sobelX); - vpImageFilter::filter(Iblur, m_dIy, m_sobelY); + vpImageFilter::filter(Iblur, m_dIx, m_gradientFilterX); + vpImageFilter::filter(Iblur, m_dIy, m_gradientFilterY); } else { std::string errmsg("Currently, the only filtering and gradient operators are Gaussian blur + Sobel"); diff --git a/modules/core/src/image/vpImageFilter.cpp b/modules/core/src/image/vpImageFilter.cpp index 3771003959..69aa9642b8 100644 --- a/modules/core/src/image/vpImageFilter.cpp +++ b/modules/core/src/image/vpImageFilter.cpp @@ -99,6 +99,9 @@ std::string vpImageFilter::vpCannyFilteringAndGradientTypeToString(const vpImage case vpCannyFilteringAndGradientType::CANNY_GBLUR_SOBEL_FILTERING: name = "gaussianblur+sobel-filtering"; break; + case vpCannyFilteringAndGradientType::CANNY_GBLUR_SCHARR_FILTERING: + name = "gaussianblur+scharr-filtering"; + break; case vpCannyFilteringAndGradientType::CANNY_COUNT_FILTERING: default: return "unknown-filtering"; @@ -592,7 +595,7 @@ std::vector vpImageFilter::median(const vpImage &Isrc) } /** - * \brief Compute the upper Canny edge filter threshold, using Gaussian blur + Sobel operators to compute + * \brief Compute the upper Canny edge filter threshold, using Gaussian blur + Sobel or + Scharr operators to compute * the gradient of the image. * * \param[in] cv_I : The image, in cv format. @@ -601,21 +604,24 @@ std::vector vpImageFilter::median(const vpImage &Isrc) * \param[out] lowerThresh : The lower threshold for the Canny edge filter. * \param[in] gaussianFilterSize : The size of the mask of the Gaussian filter to apply (an odd number). * \param[in] gaussianStdev : The standard deviation of the Gaussian filter to apply. - * \param[in] apertureSobel : Size of the mask for the Sobel operator (odd number). + * \param[in] apertureGradient : Size of the mask for the Sobel operator (odd number). * \param[in] lowerThresholdRatio : The ratio of the upper threshold the lower threshold must be equal to. * \param[in] upperThresholdRatio : The ratio of pixels whose absolute gradient Gabs is lower or equal to to define + * \param[in] filteringType : The gradient filter to apply to compute the gradient, if \b p_cv_dIx and \b p_cv_dIy are + * nullptr. * the upper threshold. * \return The upper Canny edge filter threshold. */ float vpImageFilter::computeCannyThreshold(const cv::Mat &cv_I, const cv::Mat *p_cv_dIx, const cv::Mat *p_cv_dIy, float &lowerThresh, const unsigned int gaussianKernelSize, - const float gaussianStdev, const unsigned int apertureSobel, - const float lowerThresholdRatio, const float upperThresholdRatio) + const float gaussianStdev, const unsigned int apertureGradient, + const float lowerThresholdRatio, const float upperThresholdRatio, + const vpImageFilter::vpCannyFilteringAndGradientType &filteringType) { double w = cv_I.cols; double h = cv_I.rows; int bins = 256; - cv::Mat sobel, sobelx, sobely, sobelxabs, sobelyabs; + cv::Mat dI, dIx, dIy, dIx_abs, dIy_abs; if (p_cv_dIx == nullptr || p_cv_dIy == nullptr) { cv::Mat img_blur; @@ -624,19 +630,25 @@ float vpImageFilter::computeCannyThreshold(const cv::Mat &cv_I, const cv::Mat *p cv::GaussianBlur(cv_I, img_blur, gsz, gaussianStdev); // Compute the gradient of the blurred image - cv::Sobel(img_blur, sobelx, CV_16S, 1, 0, apertureSobel, 1, 0); - cv::Sobel(img_blur, sobely, CV_16S, 0, 1, apertureSobel, 1, 0); + if (filteringType == vpImageFilter::CANNY_GBLUR_SOBEL_FILTERING) { + cv::Sobel(img_blur, dIx, CV_16S, 1, 0, apertureGradient, 1, 0); + cv::Sobel(img_blur, dIy, CV_16S, 0, 1, apertureGradient, 1, 0); + } + else if (filteringType == vpImageFilter::CANNY_GBLUR_SCHARR_FILTERING) { + cv::Scharr(img_blur, dIx, CV_16S, 1, 0); + cv::Scharr(img_blur, dIy, CV_16S, 0, 1); + } } else { - sobelx = *p_cv_dIx; - sobely = *p_cv_dIy; + dIx = *p_cv_dIx; + dIy = *p_cv_dIy; } // Compute the absolute gradient of the blurred image G = |dIx| + |dIy| - cv::convertScaleAbs(sobelx, sobelxabs); - cv::convertScaleAbs(sobely, sobelyabs); - cv::addWeighted(sobelxabs, 1, sobelyabs, 1, 0, sobel); - sobel.convertTo(sobel, CV_8U); + cv::convertScaleAbs(dIx, dIx_abs); + cv::convertScaleAbs(dIy, dIy_abs); + cv::addWeighted(dIx_abs, 1, dIy_abs, 1, 0, dI); + dI.convertTo(dI, CV_8U); // Compute the upper threshold from the equalized histogram cv::Mat hist; @@ -647,7 +659,7 @@ float vpImageFilter::computeCannyThreshold(const cv::Mat &cv_I, const cv::Mat *p int histSize[] = { bins }; bool uniform = true; bool accumulate = false; // Clear the histogram at the beginning of calcHist if false, does not clear it otherwise - cv::calcHist(&sobel, 1, channels, cv::Mat(), hist, dims, histSize, ranges, uniform, accumulate); + cv::calcHist(&dI, 1, channels, cv::Mat(), hist, dims, histSize, ranges, uniform, accumulate); float accu = 0; float t = (float)(upperThresholdRatio * w * h); float bon = 0; @@ -666,7 +678,7 @@ float vpImageFilter::computeCannyThreshold(const cv::Mat &cv_I, const cv::Mat *p #endif /** - * \brief Compute the upper Canny edge filter threshold, using Gaussian blur + Sobel operators to compute + * \brief Compute the upper Canny edge filter threshold, using Gaussian blur + Sobel or + Scharr operators to compute * the gradient of the image. * * \param[in] I : The gray-scale image, in ViSP format. @@ -675,17 +687,20 @@ float vpImageFilter::computeCannyThreshold(const cv::Mat &cv_I, const cv::Mat *p * \param[in] lowerThresh : Canny lower threshold. * \param[in] gaussianFilterSize : The size of the mask of the Gaussian filter to apply (an odd number). * \param[in] gaussianStdev : The standard deviation of the Gaussian filter to apply. - * \param[in] apertureSobel : Size of the mask for the Sobel operator (odd number). + * \param[in] apertureGradient : Size of the mask for the Sobel operator (odd number). * \param[in] lowerThresholdRatio : The ratio of the upper threshold the lower threshold must be equal to. * \param[in] upperThresholdRatio : The ratio of pixels whose absolute gradient Gabs is lower or equal to define * the upper threshold. + * \param[in] filteringType : The gradient filter to apply to compute the gradient, if \b p_dIx and \b p_dIy are + * nullptr. * \return The upper Canny edge filter threshold. */ float vpImageFilter::computeCannyThreshold(const vpImage &I, float &lowerThresh, const vpImage *p_dIx, const vpImage *p_dIy, const unsigned int gaussianKernelSize, - const float gaussianStdev, const unsigned int apertureSobel, - const float lowerThresholdRatio, const float upperThresholdRatio) + const float gaussianStdev, const unsigned int apertureGradient, + const float lowerThresholdRatio, const float upperThresholdRatio, + const vpImageFilter::vpCannyFilteringAndGradientType &filteringType) { double w = I.getWidth(); double h = I.getHeight(); @@ -701,13 +716,20 @@ float vpImageFilter::computeCannyThreshold(const vpImage &I, floa vpImage Iblur; vpImageFilter::gaussianBlur(I, Iblur, gaussianKernelSize, gaussianStdev); - vpArray2D sobelX(apertureSobel, apertureSobel); // Sobel kernel along x - vpImageFilter::getSobelKernelX(sobelX.data, (apertureSobel - 1)/2); - vpArray2D sobelY(apertureSobel, apertureSobel); // Sobel kernel along y - vpImageFilter::getSobelKernelY(sobelY.data, (apertureSobel - 1)/2); + vpArray2D gradientFilterX(apertureGradient, apertureGradient); // Gradient filter along the X-axis + vpArray2D gradientFilterY(apertureGradient, apertureGradient); // Gradient filter along the Y-axis - vpImageFilter::filter(Iblur, dIx, sobelX); - vpImageFilter::filter(Iblur, dIy, sobelY); + if (filteringType == vpImageFilter::CANNY_GBLUR_SOBEL_FILTERING) { + vpImageFilter::getSobelKernelX(gradientFilterX.data, (apertureGradient - 1)/2); // Sobel kernel along x + vpImageFilter::getSobelKernelY(gradientFilterY.data, (apertureGradient - 1)/2); // Sobel kernel along y + } + else if (filteringType == vpImageFilter::CANNY_GBLUR_SCHARR_FILTERING) { + vpImageFilter::getScharrKernelX(gradientFilterX.data, (apertureGradient - 1)/2); // Scharr kernel along x + vpImageFilter::getScharrKernelY(gradientFilterY.data, (apertureGradient - 1)/2); // Scharr kernel along y + } + + vpImageFilter::filter(Iblur, dIx, gradientFilterX); + vpImageFilter::filter(Iblur, dIy, gradientFilterY); } // Computing the absolute gradient of the image G = |dIx| + |dIy| @@ -895,7 +917,7 @@ int main() greater than this value are marked as an edge. If negative, it will be automatically computed, along with the lower threshold. Otherwise, the lower threshold will be set to one third of the thresholdCanny . - \param[in] apertureSobel : Size of the mask for the Sobel operator (odd number). + \param[in] apertureGradient : Size of the mask for the gardient (Sobel or Scharr) operator (odd number). \param[in] gaussianStdev : The standard deviation of the Gaussian filter to apply. \param[in] lowerThresholdRatio : The ratio of the upper threshold the lower threshold must be equal to. It is used only if the user asks to compute the Canny thresholds. @@ -908,7 +930,7 @@ int main() */ void vpImageFilter::canny(const vpImage &Isrc, vpImage &Ires, const unsigned int &gaussianFilterSize, - const float &lowerThreshold, const float &upperThreshold, const unsigned int &apertureSobel, + const float &lowerThreshold, const float &upperThreshold, const unsigned int &apertureGradient, const float &gaussianStdev, const float &lowerThresholdRatio, const float &upperThresholdRatio, const vpCannyBackendType &cannyBackend, const vpCannyFilteringAndGradientType &cannyFilteringSteps) { @@ -919,8 +941,14 @@ void vpImageFilter::canny(const vpImage &Isrc, vpImage &Isrc, vpImage &Isrc, vpImage dIx, dIy; - if (cannyFilteringSteps == CANNY_GBLUR_SOBEL_FILTERING) { + if (cannyFilteringSteps == CANNY_GBLUR_SOBEL_FILTERING + || cannyFilteringSteps == CANNY_GBLUR_SCHARR_FILTERING) { // Computing the Gaussian blur + gradients of the image vpImage Iblur; vpImageFilter::gaussianBlur(Isrc, Iblur, gaussianFilterSize, gaussianStdev); - // Compute the Sobel filters - vpArray2D sobelX(apertureSobel, apertureSobel); // Sobel kernel along x - vpImageFilter::getSobelKernelX(sobelX.data, (apertureSobel - 1)/2); - vpArray2D sobelY(apertureSobel, apertureSobel); // Sobel kernel along y - vpImageFilter::getSobelKernelY(sobelY.data, (apertureSobel - 1)/2); + // Compute the gradient filters + vpArray2D gradientFilterX(apertureGradient, apertureGradient); // Gradient filter along the X-axis + vpArray2D gradientFilterY(apertureGradient, apertureGradient); // Gradient filter along the Y-axis + + if (cannyFilteringSteps == CANNY_GBLUR_SOBEL_FILTERING) { + vpImageFilter::getSobelKernelX(gradientFilterX.data, (apertureGradient - 1)/2); // Sobel kernel along X + vpImageFilter::getSobelKernelY(gradientFilterY.data, (apertureGradient - 1)/2); // Sobel kernel along Y + } + else if (cannyFilteringSteps == CANNY_GBLUR_SCHARR_FILTERING) { + vpImageFilter::getScharrKernelX(gradientFilterX.data, (apertureGradient - 1)/2); + vpImageFilter::getScharrKernelY(gradientFilterY.data, (apertureGradient - 1)/2); + } - // Apply the Sobel filters to get the gradients - vpImageFilter::filter(Iblur, dIx, sobelX); - vpImageFilter::filter(Iblur, dIy, sobelY); + // Apply the gradient filters to get the gradients + vpImageFilter::filter(Iblur, dIx, gradientFilterX); + vpImageFilter::filter(Iblur, dIy, gradientFilterY); } else { std::string errMsg("[vpImageFilter::canny]Other types of Canny filtering steps have not been implemented"); @@ -969,12 +1005,12 @@ void vpImageFilter::canny(const vpImage &Isrc, vpImage 0"); } - params.m_sobelKernelSize = j.value("sobelKernelSize", params.m_sobelKernelSize); - if ((params.m_sobelKernelSize % 2) != 1) { - throw vpException(vpException::badValue, "Sobel Kernel size should be odd."); + params.m_gradientFilterKernelSize = j.value("gradientFilterKernelSize", params.m_gradientFilterKernelSize); + if ((params.m_gradientFilterKernelSize % 2) != 1) { + throw vpException(vpException::badValue, "Gradient filter kernel (Sobel or Scharr) size should be odd."); } std::string cannyBackendName = vpImageFilter::vpCannyBackendTypeToString(params.m_cannyBackendType); @@ -373,7 +373,7 @@ class VISP_EXPORT vpCircleHoughTransform {"filteringAndGradientType", vpImageFilter::vpCannyFilteringAndGradientTypeToString(params.m_filteringAndGradientType)}, {"gaussianKernelSize", params.m_gaussianKernelSize}, {"gaussianStdev", params.m_gaussianStdev}, - {"sobelKernelSize", params.m_sobelKernelSize}, + {"gradientFilterKernelSize", params.m_gradientFilterKernelSize}, {"cannyBackendType", vpImageFilter::vpCannyBackendTypeToString(params.m_cannyBackendType)}, {"lowerCannyThresh", params.m_lowerCannyThresh}, {"lowerThresholdRatio", params.m_lowerCannyThreshRatio}, @@ -526,6 +526,7 @@ class VISP_EXPORT vpCircleHoughTransform { m_algoParams.m_filteringAndGradientType = type; m_cannyVisp.setFilteringAndGradientType(type); + initGradientFilters(); } /** @@ -552,20 +553,19 @@ class VISP_EXPORT vpCircleHoughTransform } /** - * \brief Set the parameters of the Sobel filters, that computes the - * gradients of the image. + * \brief Set the parameters of the gradient filter (Sobel or Scharr) kernel size filters. * - * \param[in] apertureSize The size of the Sobel filters kernel. Must be an odd value. + * \param[in] apertureSize The size of the gradient filters kernel. Must be an odd value. */ - inline void setSobelAperture(const unsigned int &apertureSize) + inline void setGradientFilterAperture(const unsigned int &apertureSize) { - m_algoParams.m_sobelKernelSize = apertureSize; + m_algoParams.m_gradientFilterKernelSize = apertureSize; - if ((m_algoParams.m_sobelKernelSize % 2) != 1) { - throw vpException(vpException::badValue, "Sobel Kernel size should be odd."); + if ((m_algoParams.m_gradientFilterKernelSize % 2) != 1) { + throw vpException(vpException::badValue, "Gradient filter (Sobel or Scharr) Kernel size should be odd."); } - initSobelFilters(); + initGradientFilters(); } /** @@ -861,9 +861,9 @@ class VISP_EXPORT vpCircleHoughTransform void initGaussianFilters(); /** - * \brief Initialize the Gaussian filters used to blur the image compute the gradient images. + * \brief Initialize the gradient filters used to compute the gradient images. */ - void initSobelFilters(); + void initGradientFilters(); /** * \brief Perform Gaussian smoothing on the input image to reduce the noise @@ -931,8 +931,8 @@ class VISP_EXPORT vpCircleHoughTransform vpArray2D m_fg; // // Gradient computation attributes - vpArray2D m_sobelX; - vpArray2D m_sobelY; + vpArray2D m_gradientFilterX; /*!< Contains the coefficients of the gradient kernel along the X-axis*/ + vpArray2D m_gradientFilterY; /*!< Contains the coefficients of the gradient kernel along the Y-axis*/ vpImage m_dIx; /*!< Gradient along the x-axis of the input image.*/ vpImage m_dIy; /*!< Gradient along the y-axis of the input image.*/ diff --git a/modules/imgproc/src/vpCircleHoughTransform.cpp b/modules/imgproc/src/vpCircleHoughTransform.cpp index 4df497cbbd..e28769f67f 100644 --- a/modules/imgproc/src/vpCircleHoughTransform.cpp +++ b/modules/imgproc/src/vpCircleHoughTransform.cpp @@ -39,14 +39,14 @@ vpCircleHoughTransform::vpCircleHoughTransform() : m_algoParams() { initGaussianFilters(); - initSobelFilters(); + initGradientFilters(); } vpCircleHoughTransform::vpCircleHoughTransform(const vpCircleHoughTransformParameters &algoParams) : m_algoParams(algoParams) { initGaussianFilters(); - initSobelFilters(); + initGradientFilters(); } void @@ -54,7 +54,7 @@ vpCircleHoughTransform::init(const vpCircleHoughTransformParameters &algoParams) { m_algoParams = algoParams; initGaussianFilters(); - initSobelFilters(); + initGradientFilters(); } vpCircleHoughTransform::~vpCircleHoughTransform() @@ -87,10 +87,10 @@ vpCircleHoughTransform::initFromJSON(const std::string &jsonPath) msg << "Byte position of error: " << e.byte; throw vpException(vpException::ioError, msg.str()); } - m_algoParams = j; // Call from_json(const json& j, vpDetectorDNN& *this) to read json + m_algoParams = j; // Call from_json(const json& j, vpCircleHoughTransformParameters&) to read json file.close(); initGaussianFilters(); - initSobelFilters(); + initGradientFilters(); } void @@ -109,16 +109,24 @@ vpCircleHoughTransform::initGaussianFilters() } void -vpCircleHoughTransform::initSobelFilters() +vpCircleHoughTransform::initGradientFilters() { - if ((m_algoParams.m_sobelKernelSize % 2) != 1) { - throw vpException(vpException::badValue, "Sobel Kernel size should be odd."); + if ((m_algoParams.m_gradientFilterKernelSize % 2) != 1) { + throw vpException(vpException::badValue, "Gradient filters Kernel size should be odd."); + } + m_gradientFilterX.resize(m_algoParams.m_gradientFilterKernelSize, m_algoParams.m_gradientFilterKernelSize); + m_gradientFilterY.resize(m_algoParams.m_gradientFilterKernelSize, m_algoParams.m_gradientFilterKernelSize); + m_cannyVisp.setGradientFilterAperture(m_algoParams.m_gradientFilterKernelSize); + + if (m_algoParams.m_filteringAndGradientType == vpImageFilter::CANNY_GBLUR_SOBEL_FILTERING) { + vpImageFilter::getSobelKernelX(m_gradientFilterX.data, (m_algoParams.m_gradientFilterKernelSize - 1)/2); + vpImageFilter::getSobelKernelY(m_gradientFilterY.data, (m_algoParams.m_gradientFilterKernelSize - 1)/2); + } + else if (m_algoParams.m_filteringAndGradientType == vpImageFilter::CANNY_GBLUR_SCHARR_FILTERING) { + // Compute the Scharr filters + vpImageFilter::getScharrKernelX(m_gradientFilterX.data, (m_algoParams.m_gradientFilterKernelSize - 1)/2); + vpImageFilter::getScharrKernelY(m_gradientFilterY.data, (m_algoParams.m_gradientFilterKernelSize - 1)/2); } - m_sobelX.resize(m_algoParams.m_sobelKernelSize, m_algoParams.m_sobelKernelSize); - vpImageFilter::getSobelKernelX(m_sobelX.data, (m_algoParams.m_sobelKernelSize - 1)/2); - m_sobelY.resize(m_algoParams.m_sobelKernelSize, m_algoParams.m_sobelKernelSize); - vpImageFilter::getSobelKernelY(m_sobelY.data, (m_algoParams.m_sobelKernelSize - 1)/2); - m_cannyVisp.setSobelAperture(m_algoParams.m_sobelKernelSize); } std::vector @@ -225,15 +233,16 @@ vpCircleHoughTransform::detect(const vpImage &I) void vpCircleHoughTransform::computeGradientsAfterGaussianSmoothing(const vpImage &I) { - if (m_algoParams.m_filteringAndGradientType == vpImageFilter::CANNY_GBLUR_SOBEL_FILTERING) { + if (m_algoParams.m_filteringAndGradientType == vpImageFilter::CANNY_GBLUR_SOBEL_FILTERING + || m_algoParams.m_filteringAndGradientType == vpImageFilter::CANNY_GBLUR_SCHARR_FILTERING) { // Computing the Gaussian blurr vpImage Iblur, GIx; vpImageFilter::filterX(I, GIx, m_fg.data, m_algoParams.m_gaussianKernelSize); vpImageFilter::filterY(GIx, Iblur, m_fg.data, m_algoParams.m_gaussianKernelSize); // Computing the gradients - vpImageFilter::filter(Iblur, m_dIx, m_sobelX); - vpImageFilter::filter(Iblur, m_dIy, m_sobelY); + vpImageFilter::filter(Iblur, m_dIx, m_gradientFilterX); + vpImageFilter::filter(Iblur, m_dIy, m_gradientFilterY); } else { std::string errMsg("[computeGradientsAfterGaussianSmoothing] The filtering + gradient operators \""); @@ -259,7 +268,7 @@ vpCircleHoughTransform::edgeDetection(const vpImage &I) // We will have to recompute the gradient in the desired backend format anyway so we let // the vpImageFilter::canny method take care of it vpImageFilter::canny(I, m_edgeMap, m_algoParams.m_gaussianKernelSize, m_algoParams.m_lowerCannyThresh, - m_algoParams.m_upperCannyThresh, m_algoParams.m_sobelKernelSize, m_algoParams.m_gaussianStdev, + m_algoParams.m_upperCannyThresh, m_algoParams.m_gradientFilterKernelSize, m_algoParams.m_gaussianStdev, m_algoParams.m_lowerCannyThreshRatio, m_algoParams.m_upperCannyThreshRatio, m_algoParams.m_cannyBackendType, m_algoParams.m_filteringAndGradientType); } From eeeb9925ea2cf997c84fcd4d27126a3852690c2c Mon Sep 17 00:00:00 2001 From: rlagneau Date: Wed, 18 Oct 2023 14:13:07 +0200 Subject: [PATCH 07/43] [CORPS] When using visp-backend, work with normalized gradient filters, with which the computeCannyThreshold method works better. --- .../core/src/image/vpCannyEdgeDetection.cpp | 27 +++++++-- modules/core/src/image/vpImageFilter.cpp | 55 +++++++++++++++---- .../imgproc/src/vpCircleHoughTransform.cpp | 27 +++++++-- 3 files changed, 91 insertions(+), 18 deletions(-) diff --git a/modules/core/src/image/vpCannyEdgeDetection.cpp b/modules/core/src/image/vpCannyEdgeDetection.cpp index 5f0779d5ec..4329dfcdd0 100644 --- a/modules/core/src/image/vpCannyEdgeDetection.cpp +++ b/modules/core/src/image/vpCannyEdgeDetection.cpp @@ -121,15 +121,34 @@ vpCannyEdgeDetection::initGradientFilters() m_gradientFilterX.resize(m_gradientFilterKernelSize, m_gradientFilterKernelSize); m_gradientFilterY.resize(m_gradientFilterKernelSize, m_gradientFilterKernelSize); + auto scaleFilter = [](vpArray2D &filter, const float &scale) { + for (unsigned int r = 0; r < filter.getRows(); r++) { + for (unsigned int c = 0; c < filter.getCols(); c++) { + filter[r][c] = filter[r][c] * scale; + } + }}; + + float scaleX = 1.f; + float scaleY = 1.f; + if (m_filteringAndGradientType == vpImageFilter::CANNY_GBLUR_SOBEL_FILTERING) { - vpImageFilter::getSobelKernelX(m_gradientFilterX.data, (m_gradientFilterKernelSize - 1)/2); - vpImageFilter::getSobelKernelY(m_gradientFilterY.data, (m_gradientFilterKernelSize - 1)/2); + scaleX = vpImageFilter::getSobelKernelX(m_gradientFilterX.data, (m_gradientFilterKernelSize - 1)/2); + scaleY = vpImageFilter::getSobelKernelY(m_gradientFilterY.data, (m_gradientFilterKernelSize - 1)/2); } else if (m_filteringAndGradientType == vpImageFilter::CANNY_GBLUR_SCHARR_FILTERING) { // Compute the Scharr filters - vpImageFilter::getScharrKernelX(m_gradientFilterX.data, (m_gradientFilterKernelSize - 1)/2); - vpImageFilter::getScharrKernelY(m_gradientFilterY.data, (m_gradientFilterKernelSize - 1)/2); + scaleX = vpImageFilter::getScharrKernelX(m_gradientFilterX.data, (m_gradientFilterKernelSize - 1)/2); + scaleY = vpImageFilter::getScharrKernelY(m_gradientFilterY.data, (m_gradientFilterKernelSize - 1)/2); } + else { + std::string errMsg = "[vpCannyEdgeDetection::initGradientFilters] Error: gradient filtering method \""; + errMsg += vpImageFilter::vpCannyFilteringAndGradientTypeToString(m_filteringAndGradientType); + errMsg += "\" has not been implemented yet\n"; + throw vpException(vpException::notImplementedError, errMsg); + } + + scaleFilter(m_gradientFilterX, scaleX); + scaleFilter(m_gradientFilterY, scaleY); } // // Detection methods diff --git a/modules/core/src/image/vpImageFilter.cpp b/modules/core/src/image/vpImageFilter.cpp index 69aa9642b8..554db5e55a 100644 --- a/modules/core/src/image/vpImageFilter.cpp +++ b/modules/core/src/image/vpImageFilter.cpp @@ -719,15 +719,33 @@ float vpImageFilter::computeCannyThreshold(const vpImage &I, floa vpArray2D gradientFilterX(apertureGradient, apertureGradient); // Gradient filter along the X-axis vpArray2D gradientFilterY(apertureGradient, apertureGradient); // Gradient filter along the Y-axis - if (filteringType == vpImageFilter::CANNY_GBLUR_SOBEL_FILTERING) { - vpImageFilter::getSobelKernelX(gradientFilterX.data, (apertureGradient - 1)/2); // Sobel kernel along x - vpImageFilter::getSobelKernelY(gradientFilterY.data, (apertureGradient - 1)/2); // Sobel kernel along y + // Helper to apply the scale to the raw values of the filters + auto scaleFilter = [](vpArray2D &filter, const float &scale) { + for (unsigned int r = 0; r < filter.getRows(); r++) { + for (unsigned int c = 0; c < filter.getCols(); c++) { + filter[r][c] = filter[r][c] * scale; + } + }}; + + // Scales to apply to the filters to get a normalized gradient filter that gives a gradient + // between 0 and 255 for an vpImage + float scaleX = 1.f; + float scaleY = 1.f; + + if (filteringType == CANNY_GBLUR_SOBEL_FILTERING) { + scaleX = vpImageFilter::getSobelKernelX(gradientFilterX.data, (apertureGradient - 1)/2); // Sobel kernel along X + scaleY = vpImageFilter::getSobelKernelY(gradientFilterY.data, (apertureGradient - 1)/2); // Sobel kernel along Y } - else if (filteringType == vpImageFilter::CANNY_GBLUR_SCHARR_FILTERING) { - vpImageFilter::getScharrKernelX(gradientFilterX.data, (apertureGradient - 1)/2); // Scharr kernel along x - vpImageFilter::getScharrKernelY(gradientFilterY.data, (apertureGradient - 1)/2); // Scharr kernel along y + else if (filteringType == CANNY_GBLUR_SCHARR_FILTERING) { + scaleX = vpImageFilter::getScharrKernelX(gradientFilterX.data, (apertureGradient - 1)/2); // Scharr kernel along X + scaleY = vpImageFilter::getScharrKernelY(gradientFilterY.data, (apertureGradient - 1)/2); // Scharr kernel along Y } + // Scale the gradient filters to have a normalized gradient filter + scaleFilter(gradientFilterX, scaleX); + scaleFilter(gradientFilterY, scaleY); + + // Apply the gradient filters to get the gradients vpImageFilter::filter(Iblur, dIx, gradientFilterX); vpImageFilter::filter(Iblur, dIy, gradientFilterY); } @@ -985,15 +1003,32 @@ void vpImageFilter::canny(const vpImage &Isrc, vpImage gradientFilterX(apertureGradient, apertureGradient); // Gradient filter along the X-axis vpArray2D gradientFilterY(apertureGradient, apertureGradient); // Gradient filter along the Y-axis + // Helper to apply the scale to the raw values of the filters + auto scaleFilter = [](vpArray2D &filter, const float &scale) { + for (unsigned int r = 0; r < filter.getRows(); r++) { + for (unsigned int c = 0; c < filter.getCols(); c++) { + filter[r][c] = filter[r][c] * scale; + } + }}; + + // Scales to apply to the filters to get a normalized gradient filter that gives a gradient + // between 0 and 255 for an vpImage + float scaleX = 1.f; + float scaleY = 1.f; + if (cannyFilteringSteps == CANNY_GBLUR_SOBEL_FILTERING) { - vpImageFilter::getSobelKernelX(gradientFilterX.data, (apertureGradient - 1)/2); // Sobel kernel along X - vpImageFilter::getSobelKernelY(gradientFilterY.data, (apertureGradient - 1)/2); // Sobel kernel along Y + scaleX = vpImageFilter::getSobelKernelX(gradientFilterX.data, (apertureGradient - 1)/2); // Sobel kernel along X + scaleY = vpImageFilter::getSobelKernelY(gradientFilterY.data, (apertureGradient - 1)/2); // Sobel kernel along Y } else if (cannyFilteringSteps == CANNY_GBLUR_SCHARR_FILTERING) { - vpImageFilter::getScharrKernelX(gradientFilterX.data, (apertureGradient - 1)/2); - vpImageFilter::getScharrKernelY(gradientFilterY.data, (apertureGradient - 1)/2); + scaleX = vpImageFilter::getScharrKernelX(gradientFilterX.data, (apertureGradient - 1)/2); + scaleY = vpImageFilter::getScharrKernelY(gradientFilterY.data, (apertureGradient - 1)/2); } + // Scale the gradient filters to have a normalized gradient filter + scaleFilter(gradientFilterX, scaleX); + scaleFilter(gradientFilterY, scaleY); + // Apply the gradient filters to get the gradients vpImageFilter::filter(Iblur, dIx, gradientFilterX); vpImageFilter::filter(Iblur, dIy, gradientFilterY); diff --git a/modules/imgproc/src/vpCircleHoughTransform.cpp b/modules/imgproc/src/vpCircleHoughTransform.cpp index 2e0ec58ca6..5d01152493 100644 --- a/modules/imgproc/src/vpCircleHoughTransform.cpp +++ b/modules/imgproc/src/vpCircleHoughTransform.cpp @@ -118,15 +118,34 @@ vpCircleHoughTransform::initGradientFilters() m_gradientFilterY.resize(m_algoParams.m_gradientFilterKernelSize, m_algoParams.m_gradientFilterKernelSize); m_cannyVisp.setGradientFilterAperture(m_algoParams.m_gradientFilterKernelSize); + auto scaleFilter = [](vpArray2D &filter, const float &scale) { + for (unsigned int r = 0; r < filter.getRows(); r++) { + for (unsigned int c = 0; c < filter.getCols(); c++) { + filter[r][c] = filter[r][c] * scale; + } + }}; + + float scaleX = 1.f; + float scaleY = 1.f; + if (m_algoParams.m_filteringAndGradientType == vpImageFilter::CANNY_GBLUR_SOBEL_FILTERING) { - vpImageFilter::getSobelKernelX(m_gradientFilterX.data, (m_algoParams.m_gradientFilterKernelSize - 1)/2); - vpImageFilter::getSobelKernelY(m_gradientFilterY.data, (m_algoParams.m_gradientFilterKernelSize - 1)/2); + // Compute the Sobel filters + scaleX = vpImageFilter::getSobelKernelX(m_gradientFilterX.data, (m_algoParams.m_gradientFilterKernelSize - 1)/2); + scaleY = vpImageFilter::getSobelKernelY(m_gradientFilterY.data, (m_algoParams.m_gradientFilterKernelSize - 1)/2); } else if (m_algoParams.m_filteringAndGradientType == vpImageFilter::CANNY_GBLUR_SCHARR_FILTERING) { // Compute the Scharr filters - vpImageFilter::getScharrKernelX(m_gradientFilterX.data, (m_algoParams.m_gradientFilterKernelSize - 1)/2); - vpImageFilter::getScharrKernelY(m_gradientFilterY.data, (m_algoParams.m_gradientFilterKernelSize - 1)/2); + scaleX = vpImageFilter::getScharrKernelX(m_gradientFilterX.data, (m_algoParams.m_gradientFilterKernelSize - 1)/2); + scaleY = vpImageFilter::getScharrKernelY(m_gradientFilterY.data, (m_algoParams.m_gradientFilterKernelSize - 1)/2); + } + else { + std::string errMsg = "[vpCircleHoughTransform::initGradientFilters] Error: gradient filtering method \""; + errMsg += vpImageFilter::vpCannyFilteringAndGradientTypeToString(m_algoParams.m_filteringAndGradientType); + errMsg += "\" has not been implemented yet\n"; + throw vpException(vpException::notImplementedError, errMsg); } + scaleFilter(m_gradientFilterX, scaleX); + scaleFilter(m_gradientFilterY, scaleY); } std::vector From 40de54c78efb79b736f40e4808faec4aab4b4094 Mon Sep 17 00:00:00 2001 From: rlagneau Date: Wed, 18 Oct 2023 16:34:51 +0200 Subject: [PATCH 08/43] [CORPS] Working with normalized Gaussian kernel + corrected Sobel scale + normalize OpenCV gradients to make the computeCannyThresh works --- .../include/visp3/core/vpCannyEdgeDetection.h | 5 ++- .../core/include/visp3/core/vpImageFilter.h | 21 ++++++---- .../core/src/image/vpCannyEdgeDetection.cpp | 5 ++- modules/core/src/image/vpImageFilter.cpp | 40 +++++++++++++------ .../imgproc/src/vpCircleHoughTransform.cpp | 2 +- 5 files changed, 48 insertions(+), 25 deletions(-) diff --git a/modules/core/include/visp3/core/vpCannyEdgeDetection.h b/modules/core/include/visp3/core/vpCannyEdgeDetection.h index 3cab0823ed..18f4920c5b 100644 --- a/modules/core/include/visp3/core/vpCannyEdgeDetection.h +++ b/modules/core/include/visp3/core/vpCannyEdgeDetection.h @@ -177,10 +177,13 @@ class VISP_EXPORT vpCannyEdgeDetection * \param[in] upperThresholdRatio : If the thresholds must be computed,the upper threshold will be equal to the value * such as the number of pixels of the image times \b upperThresholdRatio have an absolute gradient lower than the * upper threshold. + * \param[in] filteringType : The filtering and gradient operators to apply to the image before the edge detection + * operation. */ vpCannyEdgeDetection(const int &gaussianKernelSize, const float &gaussianStdev, const unsigned int &sobelAperture, const float &lowerThreshold = -1.f, const float &upperThreshold = -1.f, - const float &lowerThresholdRatio = 0.6f, const float &upperThresholdRatio = 0.8f); + const float &lowerThresholdRatio = 0.6f, const float &upperThresholdRatio = 0.8f, + const vpImageFilter::vpCannyFilteringAndGradientType &filteringType = vpImageFilter::CANNY_GBLUR_SOBEL_FILTERING); // // Configuration from files #ifdef VISP_HAVE_NLOHMANN_JSON diff --git a/modules/core/include/visp3/core/vpImageFilter.h b/modules/core/include/visp3/core/vpImageFilter.h index 3309f2c30d..98aab64da1 100644 --- a/modules/core/include/visp3/core/vpImageFilter.h +++ b/modules/core/include/visp3/core/vpImageFilter.h @@ -1013,7 +1013,7 @@ class VISP_EXPORT vpImageFilter \tparam FilterType: Either float, to accelerate the computation time, or double, to have greater precision. \param filter : Pointer to a double array already allocated. \param size : Kernel size computed as: kernel_size = size*2 + 1 (max size is 20). - \return Scaling factor. + \return Scaling factor to normalize the Scharr kernel. */ template inline static FilterType getScharrKernelX(FilterType *filter, unsigned int size) @@ -1035,7 +1035,7 @@ class VISP_EXPORT vpImageFilter \tparam FilterType : Either float, to accelerate the computation time, or double, to have greater precision. \param filter : Pointer to a double array already allocated. \param size : Kernel size computed as: kernel_size = size*2 + 1 (max size is 20). - \return Scaling factor. + \return Scaling factor to normalize the Scharr kernel. */ template inline static FilterType getScharrKernelY(FilterType *filter, unsigned int size) @@ -1063,7 +1063,7 @@ class VISP_EXPORT vpImageFilter \tparam FilterType: Either float, to accelerate the computation time, or double, to have greater precision. \param filter : Pointer to a double array already allocated. \param size : Kernel size computed as: kernel_size = size*2 + 1 (max size is 20). - \return Scaling factor. + \return Scaling factor to normalize the Sobel kernel. */ template inline static FilterType getSobelKernelX(FilterType *filter, unsigned int size) @@ -1084,7 +1084,7 @@ class VISP_EXPORT vpImageFilter \tparam FilterType : Either float, to accelerate the computation time, or double, to have greater precision. \param filter : Pointer to a double array already allocated. \param size : Kernel size computed as: kernel_size = size*2 + 1 (max size is 20). - \return Scaling factor. + \return Scaling factor to normalize the Sobel kernel. */ template inline static FilterType getSobelKernelY(FilterType *filter, unsigned int size) @@ -1113,28 +1113,33 @@ class VISP_EXPORT vpImageFilter throw vpException(vpException::dimensionError, "Cannot get Sobel kernel of size > 20!"); const unsigned int kernel_size = size * 2 + 1; + double scale = (1. / 8.); // Scale to normalize Sobel3x3 if (kernel_size == 3) { memcpy(filter, SobelY3x3, kernel_size * kernel_size * sizeof(FilterType)); - return 1 / 8.0; + return scale; } + scale *= 1./ 16.; // Sobel5x5 is the convolution of smoothingKernel, which needs 1/16 scale factor, with Sobel3x3 if (kernel_size == 5) { memcpy(filter, SobelY5x5, kernel_size * kernel_size * sizeof(FilterType)); - return 1 / 16.0; + return scale; } + scale *= 1./ 16.; // Sobel7x7 is the convolution of smoothingKernel, which needs 1/16 scale factor, with Sobel5x5 if (kernel_size == 7) { memcpy(filter, SobelY7x7, kernel_size * kernel_size * sizeof(FilterType)); - return 1 / 16.0; + return scale; } vpArray2D sobelY(7, 7); memcpy(sobelY.data, SobelY7x7, sobelY.getRows() * sobelY.getCols() * sizeof(FilterType)); for (unsigned int i = 4; i <= size; i++) { sobelY = vpArray2D::conv2(sobelY, smoothingKernel, "full"); + // Sobel(N+1)x(N+1) is the convolution of smoothingKernel, which needs 1/16 scale factor, with SobelNxN + scale *= 1./ 16.; } memcpy(filter, sobelY.data, sobelY.getRows() * sobelY.getCols() * sizeof(FilterType)); - return 1 / 16.0; + return scale; } static float computeCannyThreshold(const vpImage &I, float &lowerThresh, diff --git a/modules/core/src/image/vpCannyEdgeDetection.cpp b/modules/core/src/image/vpCannyEdgeDetection.cpp index 4329dfcdd0..2f3c3fec6e 100644 --- a/modules/core/src/image/vpCannyEdgeDetection.cpp +++ b/modules/core/src/image/vpCannyEdgeDetection.cpp @@ -54,8 +54,9 @@ vpCannyEdgeDetection::vpCannyEdgeDetection() vpCannyEdgeDetection::vpCannyEdgeDetection(const int &gaussianKernelSize, const float &gaussianStdev , const unsigned int &sobelAperture, const float &lowerThreshold, const float &upperThreshold , const float &lowerThresholdRatio, const float &upperThresholdRatio + , const vpImageFilter::vpCannyFilteringAndGradientType &filteringType ) - : m_filteringAndGradientType(vpImageFilter::CANNY_GBLUR_SOBEL_FILTERING) + : m_filteringAndGradientType(filteringType) , m_gaussianKernelSize(gaussianKernelSize) , m_gaussianStdev(gaussianStdev) , m_areGradientAvailable(false) @@ -109,7 +110,7 @@ vpCannyEdgeDetection::initGaussianFilters() throw(vpException(vpException::badValue, "The Gaussian kernel size should be odd")); } m_fg.resize(1, (m_gaussianKernelSize + 1)/2); - vpImageFilter::getGaussianKernel(m_fg.data, m_gaussianKernelSize, m_gaussianStdev, false); + vpImageFilter::getGaussianKernel(m_fg.data, m_gaussianKernelSize, m_gaussianStdev, true); } void diff --git a/modules/core/src/image/vpImageFilter.cpp b/modules/core/src/image/vpImageFilter.cpp index 554db5e55a..2fcb5402e9 100644 --- a/modules/core/src/image/vpImageFilter.cpp +++ b/modules/core/src/image/vpImageFilter.cpp @@ -631,12 +631,16 @@ float vpImageFilter::computeCannyThreshold(const cv::Mat &cv_I, const cv::Mat *p // Compute the gradient of the blurred image if (filteringType == vpImageFilter::CANNY_GBLUR_SOBEL_FILTERING) { - cv::Sobel(img_blur, dIx, CV_16S, 1, 0, apertureGradient, 1, 0); - cv::Sobel(img_blur, dIy, CV_16S, 0, 1, apertureGradient, 1, 0); + double scale = 1. / 8.; + if (apertureGradient > 3) { + scale *= std::pow(1./16., ((apertureGradient -1.)/2.) - 1.); + } + cv::Sobel(img_blur, dIx, CV_16S, 1, 0, apertureGradient, 1, 0, scale); + cv::Sobel(img_blur, dIy, CV_16S, 0, 1, apertureGradient, 1, 0, scale); } else if (filteringType == vpImageFilter::CANNY_GBLUR_SCHARR_FILTERING) { - cv::Scharr(img_blur, dIx, CV_16S, 1, 0); - cv::Scharr(img_blur, dIy, CV_16S, 0, 1); + cv::Scharr(img_blur, dIx, CV_16S, 1, 0, 1.f/32.f); + cv::Scharr(img_blur, dIy, CV_16S, 0, 1, 1.f/32.f); } } else { @@ -959,14 +963,18 @@ void vpImageFilter::canny(const vpImage &Isrc, vpImage 3) { + scale *= std::pow(1./16., ((apertureGradient -1.)/2.) - 1.); + } + cv::Sobel(cv_I_blur, cv_dx, CV_16S, 1, 0, apertureGradient, scale); + cv::Sobel(cv_I_blur, cv_dy, CV_16S, 0, 1, apertureGradient, scale); } else if (cannyFilteringSteps == CANNY_GBLUR_SCHARR_FILTERING) { cv::Mat cv_I_blur; cv::GaussianBlur(img_cvmat, cv_I_blur, cv::Size((int)gaussianFilterSize, (int)gaussianFilterSize), gaussianStdev, 0); - cv::Scharr(cv_I_blur, cv_dx, CV_16S, 1, 0); - cv::Scharr(cv_I_blur, cv_dy, CV_16S, 0, 1); + cv::Scharr(cv_I_blur, cv_dx, CV_16S, 1, 0, 1.f/32.f); + cv::Scharr(cv_I_blur, cv_dy, CV_16S, 0, 1, 1.f/32.f); } else { std::string errMsg("[vpImageFilter::canny]Other types of Canny filtering steps have not been implemented"); @@ -976,7 +984,8 @@ void vpImageFilter::canny(const vpImage &Isrc, vpImage &Isrc, vpImage dIx, dIy; if (cannyFilteringSteps == CANNY_GBLUR_SOBEL_FILTERING || cannyFilteringSteps == CANNY_GBLUR_SCHARR_FILTERING) { - // Computing the Gaussian blur + gradients of the image + // Computing the Gaussian blur vpImage Iblur; - vpImageFilter::gaussianBlur(Isrc, Iblur, gaussianFilterSize, gaussianStdev); + vpArray2D fg(1, (gaussianFilterSize + 1)/2); + vpImageFilter::getGaussianKernel(fg.data, gaussianFilterSize, gaussianStdev, true); + vpImage GIx; + vpImageFilter::filterX(Isrc, GIx, fg.data, gaussianFilterSize); + vpImageFilter::filterY(GIx, Iblur, fg.data, gaussianFilterSize); // Compute the gradient filters vpArray2D gradientFilterX(apertureGradient, apertureGradient); // Gradient filter along the X-axis @@ -1040,13 +1053,14 @@ void vpImageFilter::canny(const vpImage &Isrc, vpImage Date: Wed, 18 Oct 2023 17:40:56 +0200 Subject: [PATCH 09/43] [TUTO] Added a tutorial for vpCannyEdgeDetection --- tutorial/image/CMakeLists.txt | 5 + tutorial/image/drawingHelpers.cpp | 90 +++++++++ tutorial/image/drawingHelpers.h | 103 +++++++++++ tutorial/image/tutorial-canny.cpp | 296 ++++++++++++++++++++++++++++++ 4 files changed, 494 insertions(+) create mode 100644 tutorial/image/drawingHelpers.cpp create mode 100644 tutorial/image/drawingHelpers.h create mode 100644 tutorial/image/tutorial-canny.cpp diff --git a/tutorial/image/CMakeLists.txt b/tutorial/image/CMakeLists.txt index 370c300e55..7fd6ad0b04 100644 --- a/tutorial/image/CMakeLists.txt +++ b/tutorial/image/CMakeLists.txt @@ -43,6 +43,11 @@ foreach(cpp ${tutorial_cpp}) endif() endforeach() +visp_add_target(tutorial-canny.cpp drawingHelpers.cpp) +if(COMMAND visp_add_dependency) + visp_add_dependency(tutorial-canny.cpp "tutorials") +endif() + # Copy the data files to the same location than the target foreach(data ${tutorial_data}) visp_copy_data(tutorial-viewer.cpp ${data}) diff --git a/tutorial/image/drawingHelpers.cpp b/tutorial/image/drawingHelpers.cpp new file mode 100644 index 0000000000..9345b1b6aa --- /dev/null +++ b/tutorial/image/drawingHelpers.cpp @@ -0,0 +1,90 @@ +/* + * ViSP, open source Visual Servoing Platform software. + * Copyright (C) 2005 - 2023 by Inria. All rights reserved. + * + * This software is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * See the file LICENSE.txt at the root directory of this source + * distribution for additional information about the GNU GPL. + * + * For using ViSP with software that can not be combined with the GNU + * GPL, please contact Inria about acquiring a ViSP Professional + * Edition License. + * + * See https://visp.inria.fr for more information. + * + * This software was developed at: + * Inria Rennes - Bretagne Atlantique + * Campus Universitaire de Beaulieu + * 35042 Rennes Cedex + * France + * + * If you have questions regarding the use of this file, please contact + * Inria at visp@inria.fr + * + * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE + * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. + */ + +#include "drawingHelpers.h" + +#include + +#if defined(VISP_HAVE_X11) + vpDisplayX drawingHelpers::d; +#elif defined(HAVE_OPENCV_HIGHGUI) + vpDisplayOpenCV drawingHelpers::d; +#elif defined(VISP_HAVE_GTK) + vpDisplayGTK drawingHelpers::d; +#elif defined(VISP_HAVE_GDI) + vpDisplayGDI drawingHelpers::d; +#elif defined(VISP_HAVE_D3D9) + vpDisplayD3D drawingHelpers::d; +#endif + +vpImage drawingHelpers::I_disp; + +bool drawingHelpers::display(vpImage &I, const std::string &title, const bool &blockingMode) +{ + I_disp = I; + d.init(I_disp); + vpDisplay::setTitle(I_disp, title.c_str()); + + vpDisplay::display(I_disp); + vpDisplay::displayText(I_disp, 15, 15, "Left click to continue...", vpColor::red); + vpDisplay::displayText(I_disp, 35, 15, "Right click to stop...", vpColor::red); + vpDisplay::flush(I_disp); + vpMouseButton::vpMouseButtonType button; + vpDisplay::getClick(I_disp, button, blockingMode); + bool hasToContinue = true; + if (button == vpMouseButton::button3) + { + // Right click => stop the program + hasToContinue = false; + } + + return hasToContinue; +} + +bool drawingHelpers::display(vpImage &D, const std::string &title, const bool &blockingMode) +{ + vpImage I; // Image to display + vpImageConvert::convert(D, I); + return display(I, title, blockingMode); +} + +bool drawingHelpers::display(vpImage &D, const std::string &title, const bool &blockingMode) +{ + vpImage I; // Image to display + vpImageConvert::convert(D, I); + return display(I, title, blockingMode); +} + +bool drawingHelpers::display(vpImage &F, const std::string &title, const bool &blockingMode) +{ + vpImage I; // Image to display + vpImageConvert::convert(F, I); + return display(I, title, blockingMode); +} diff --git a/tutorial/image/drawingHelpers.h b/tutorial/image/drawingHelpers.h new file mode 100644 index 0000000000..ad3b4e57aa --- /dev/null +++ b/tutorial/image/drawingHelpers.h @@ -0,0 +1,103 @@ +/* + * ViSP, open source Visual Servoing Platform software. + * Copyright (C) 2005 - 2023 by Inria. All rights reserved. + * + * This software is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * See the file LICENSE.txt at the root directory of this source + * distribution for additional information about the GNU GPL. + * + * For using ViSP with software that can not be combined with the GNU + * GPL, please contact Inria about acquiring a ViSP Professional + * Edition License. + * + * See https://visp.inria.fr for more information. + * + * This software was developed at: + * Inria Rennes - Bretagne Atlantique + * Campus Universitaire de Beaulieu + * 35042 Rennes Cedex + * France + * + * If you have questions regarding the use of this file, please contact + * Inria at visp@inria.fr + * + * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE + * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. + */ +#ifndef _drawingHelpers_h_ +#define _drawingHelpers_h_ + +#include +#include +#include +#include + +namespace drawingHelpers +{ + #if defined(VISP_HAVE_X11) + extern vpDisplayX d; + #elif defined(HAVE_OPENCV_HIGHGUI) + extern vpDisplayOpenCV d; + #elif defined(VISP_HAVE_GTK) + extern vpDisplayGTK d; + #elif defined(VISP_HAVE_GDI) + extern vpDisplayGDI d; + #elif defined(VISP_HAVE_D3D9) + extern vpDisplayD3D d; + #endif + + extern vpImage I_disp; /*!< Displayed image.*/ + + /** + * \brief Display a RGB image and catch the user clicks to know if + * the user wants to stop the program. + * + * \param[out] I The RGB image to display. + * \param[in] title The title of the window. + * \param[in] blockingMode If true, wait for a click to switch to the next image. + * \return true The user wants to continue the application. + * \return false The user wants to stop the application. + */ + bool display(vpImage &I, const std::string &title, const bool &blockingMode); + + /** + * \brief Display a gray-scale image and catch the user clicks to know if + * the user wants to stop the program. + * + * \param[out] I The gray-scale image to display. + * \param[in] title The title of the window. + * \param[in] blockingMode If true, wait for a click to switch to the next image. + * \return true The user wants to continue the application. + * \return false The user wants to stop the application. + */ + bool display(vpImage &I, const std::string &title, const bool &blockingMode); + + /** + * \brief Display a double precision image and catch the user clicks to know if + * the user wants to stop the program. + * + * \param[out] I The double precision image to display. + * \param[in] title The title of the window. + * \param[in] blockingMode If true, wait for a click to switch to the next image. + * \return true The user wants to continue the application. + * \return false The user wants to stop the application. + */ + bool display(vpImage &D, const std::string &title, const bool &blockingMode); + + /** + * \brief Display a floating-point precision image and catch the user clicks to know if + * the user wants to stop the program. + * + * \param[out] I The floating-point precision image to display. + * \param[in] title The title of the window. + * \param[in] blockingMode If true, wait for a click to switch to the next image. + * \return true The user wants to continue the application. + * \return false The user wants to stop the application. + */ + bool display(vpImage &F, const std::string &title, const bool &blockingMode); +} + +#endif diff --git a/tutorial/image/tutorial-canny.cpp b/tutorial/image/tutorial-canny.cpp new file mode 100644 index 0000000000..10aabe4f85 --- /dev/null +++ b/tutorial/image/tutorial-canny.cpp @@ -0,0 +1,296 @@ +/**************************************************************************** + * + * ViSP, open source Visual Servoing Platform software. + * Copyright (C) 2005 - 2023 by Inria. All rights reserved. + * + * This software is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * See the file LICENSE.txt at the root directory of this source + * distribution for additional information about the GNU GPL. + * + * For using ViSP with software that can not be combined with the GNU + * GPL, please contact Inria about acquiring a ViSP Professional + * Edition License. + * + * See https://visp.inria.fr for more information. + * + * This software was developed at: + * Inria Rennes - Bretagne Atlantique + * Campus Universitaire de Beaulieu + * 35042 Rennes Cedex + * France + * + * If you have questions regarding the use of this file, please contact + * Inria at visp@inria.fr + * + * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE + * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * +*****************************************************************************/ +#include + +#include +#include +#include + +#ifdef HAVE_OPENCV_IMGPROC +#include +#endif + +#include "drawingHelpers.h" + +template +void computeMeanMaxStdev(const vpImage &I, float &mean, float &max, float &stdev) +{ + max = std::numeric_limits::epsilon(); + mean = 0.; + stdev = 0.; + unsigned int nbRows = I.getRows(); + unsigned int nbCols = I.getCols(); + float scale = 1. / ((float)nbRows * (float)nbCols); + for (unsigned int r = 0; r < nbRows; r++) { + for (unsigned int c = 0; c < nbCols; c++) { + mean += I[r][c]; + max = std::max(max, static_cast(I[r][c])); + } + } + mean *= scale; + for (unsigned int r = 0; r < nbRows; r++) { + for (unsigned int c = 0; c < nbCols; c++) { + stdev += (I[r][c] - mean) * (I[r][c] - mean); + } + } + stdev *= scale; + stdev = std::sqrt(stdev); +} + +void setGradientOutsideClass(const vpImage &I, const int &gaussianKernelSize, const float &gaussianStdev, vpCannyEdgeDetection &cannyDetector, + const unsigned int apertureSize, const vpImageFilter::vpCannyFilteringAndGradientType &filteringType) +{ + // Get the Gaussian blur kernel + if ((gaussianKernelSize % 2) == 0) { + throw(vpException(vpException::badValue, "The Gaussian kernel size should be odd")); + } + vpArray2D fg(1, (gaussianKernelSize + 1)/2); + vpImageFilter::getGaussianKernel(fg.data, gaussianKernelSize, gaussianStdev, true); + + // Get the gradient filters kernel + if ((apertureSize % 2) != 1) { + throw vpException(vpException::badValue, "Gradient filters kernel size should be odd."); + } + vpArray2D gradientFilterX(apertureSize, apertureSize); + vpArray2D gradientFilterY(apertureSize, apertureSize); + + auto scaleFilter = [](vpArray2D &filter, const float &scale) { + for (unsigned int r = 0; r < filter.getRows(); r++) { + for (unsigned int c = 0; c < filter.getCols(); c++) { + filter[r][c] = filter[r][c] * scale; + } + }}; + + float scaleX = 1.f; + float scaleY = 1.f; + + if (filteringType == vpImageFilter::CANNY_GBLUR_SOBEL_FILTERING) { + scaleX = vpImageFilter::getSobelKernelX(gradientFilterX.data, (apertureSize - 1)/2); + scaleY = vpImageFilter::getSobelKernelY(gradientFilterY.data, (apertureSize - 1)/2); + } + else if (filteringType == vpImageFilter::CANNY_GBLUR_SCHARR_FILTERING) { + // Compute the Scharr filters + scaleX = vpImageFilter::getScharrKernelX(gradientFilterX.data, (apertureSize - 1)/2); + scaleY = vpImageFilter::getScharrKernelY(gradientFilterY.data, (apertureSize - 1)/2); + } + else { + std::string errMsg = "Error: gradient filtering method \""; + errMsg += vpImageFilter::vpCannyFilteringAndGradientTypeToString(filteringType); + errMsg += "\" has not been implemented yet\n"; + throw vpException(vpException::notImplementedError, errMsg); + } + + scaleFilter(gradientFilterX, scaleX); + scaleFilter(gradientFilterY, scaleY); + + // Perform Gaussian blur + vpImage Iblur; + vpImage GIx; + vpImageFilter::filterX(I, GIx, fg.data, gaussianKernelSize); + vpImageFilter::filterY(GIx, Iblur, fg.data, gaussianKernelSize); + + // Computing the gradients + vpImage dIx, dIy; + vpImageFilter::filter(Iblur, dIx, gradientFilterX); + vpImageFilter::filter(Iblur, dIy, gradientFilterY); + + // Set the gradients of the vpCannyEdgeDetection + cannyDetector.setGradients(dIx, dIy); + + // Display the gradients + float mean, max, stdev; + computeMeanMaxStdev(dIx, mean, max, stdev); + std::string title = "Gradient along the horizontal axis. Mean = " + std::to_string(mean) + + "+/-" + std::to_string(stdev) + " Max = " + std::to_string(max); + drawingHelpers::display(dIx, title, true); + computeMeanMaxStdev(dIy, mean, max, stdev); + title = "Gradient along the horizontal axis. Mean = " + std::to_string(mean) + + "+/-" + std::to_string(stdev) + " Max = " + std::to_string(max); + drawingHelpers::display(dIy, title, true); +} + +void usage(const std::string &softName) +{ + std::cout << "NAME" << std::endl; + std::cout << softName << ": software to test the vpCannyEdgeComputation class and vpImageFilter::canny method" << std::endl; + std::cout << "SYNOPSIS" << std::endl; + std::cout << "\t" << softName + << " [-i, --image ]" + << " [-g, --gradient ]" + << " [-t, --thresh ]" + << " [-a, --aperture ]" + << " [-f, --filter ]" + << " [-r, --ratio ]" + << " [-b, --backend ]" + << " [-h, --help]" + << std::endl; + std::cout << "DESCRIPTION" << std::endl; + std::cout << "\t-i, --image" << std::endl + << "\t\tPermits to load an image on which will be tested the vpCanny class." + << std::endl; + std::cout << "\t-g, --gradient" << std::endl + << "\t\tPermits to compute the gradients of the image outside the vpCanny class." + << "\t\tFirst parameter is the size of the Gaussian kernel used to compute the gradients." + << "\t\tSecond parameter is the standard deviation of the Gaussian kernel used to compute the gradients." + << std::endl; + std::cout << "\t-t, --thresh" << std::endl + << "\t\tPermits to set the lower and upper thresholds of the vpCanny class." + << "\t\tFirst parameter is the lower threshold." + << "\t\tSecond parameter is the upper threshold." + << std::endl; + std::cout << "\t-a, --aperture" << std::endl + << "\t\tPermits to set the size of the gradient filter kernel." + << "\t\tParameter must be odd and positive." + << std::endl; + std::cout << "\t-f, --filter" << std::endl + << "\t\tPermits to choose the type of filter to apply to compute the gradient." + << std::endl; + std::cout << "\t-r, --ratio" << std::endl + << "\t\tPermits to set the lower and upper thresholds ratio of the vpCanny class." + << "\t\tFirst parameter is the lower threshold ratio." + << "\t\tSecond parameter is the upper threshold ratio." + << std::endl; + std::cout << "\t-b, --backend" << std::endl + << "\t\tPermits to use the vpImageFilter::canny method for comparison." + << std::endl; + std::cout << "\t-h, --help" << std::endl + << "\t\tPermits to display the different arguments this software handles." + << std::endl; +} + +int main(int argc, const char *argv[]) +{ + std::string opt_img; + bool opt_gradientOutsideClass = false; + bool opt_useVpImageFilterCanny = false; + int opt_gaussianKernelSize = 3; + int opt_apertureSize = 3; + vpImageFilter::vpCannyFilteringAndGradientType opt_filteringType = vpImageFilter::CANNY_GBLUR_SOBEL_FILTERING; + float opt_gaussianStdev = 1.; + float opt_lowerThresh = -1.; + float opt_upperThresh = -1.; + float opt_lowerThreshRatio = 0.6f; + float opt_upperThreshRatio = 0.8f; + vpImageFilter::vpCannyBackendType opt_backend = vpImageFilter::CANNY_VISP_BACKEND; + for (int i = 1; i < argc; i++) { + std::string argv_str = std::string(argv[i]); + if ((argv_str == "-i" || argv_str == "--image") && i + 1 < argc) { + opt_img = std::string(argv[i + 1]); + i++; + } + else if ((argv_str == "-g" || argv_str == "--gradient") && i + 2 < argc) { + opt_gradientOutsideClass = true; + opt_gaussianKernelSize = atoi(argv[i + 1]); + opt_gaussianStdev = atoi(argv[i + 2]); + i += 2; + } + else if ((argv_str == "-t" || argv_str == "--thresh") && i + 2 < argc) { + opt_lowerThresh = atof(argv[i + 1]); + opt_upperThresh = atof(argv[i + 2]); + i += 2; + } + else if ((argv_str == "-a" || argv_str == "--aperture") && i + 1 < argc) { + opt_apertureSize = std::atoi(argv[i + 1]); + i++; + } + else if ((argv_str == "-f" || argv_str == "--filter") && i + 1 < argc) { + opt_filteringType = vpImageFilter::vpCannyFilteringAndGradientTypeFromString(std::string(argv[i + 1])); + i++; + } + else if ((argv_str == "-r" || argv_str == "--ratio") && i + 2 < argc) { + opt_lowerThreshRatio = std::atof(argv[i + 1]); + opt_upperThreshRatio = std::atof(argv[i + 2]); + i += 2; + } + else if ((argv_str == "-b" || argv_str == "--backend") && i + 1 < argc) { + opt_useVpImageFilterCanny = true; + opt_backend = vpImageFilter::vpCannyBackendTypeFromString(std::string(argv[i+1])); + i++; + } + else if (argv_str == "-h" || argv_str == "--help") { + usage(std::string(argv[0])); + return EXIT_SUCCESS; + } + else { + std::cerr << "Argument \"" << argv_str << "\" is unknown." << std::endl; + return EXIT_FAILURE; + } + } + + std::string configAsTxt("Canny Configuration:\n"); + configAsTxt += "\tFiltering + gradient operators = " + vpImageFilter::vpCannyFilteringAndGradientTypeToString(opt_filteringType) + "\n"; + configAsTxt += "\tGaussian filter kernel size = " + std::to_string(opt_gaussianKernelSize) + "\n"; + configAsTxt += "\tGaussian filter standard deviation = " + std::to_string(opt_gaussianStdev) + "\n"; + configAsTxt += "\tGradient filter kernel size = " + std::to_string(opt_apertureSize) + "\n"; + configAsTxt += "\tCanny edge filter thresholds = [" + std::to_string(opt_lowerThresh) + " ; " + std::to_string(opt_upperThresh) + "]\n"; + configAsTxt += "\tCanny edge filter thresholds ratio (for auto-thresholding) = [" + std::to_string(opt_lowerThreshRatio) + " ; " + std::to_string(opt_upperThreshRatio) + "]\n"; + std::cout << configAsTxt << std::endl; + + vpCannyEdgeDetection cannyDetector(opt_gaussianKernelSize, opt_gaussianStdev, opt_apertureSize, + opt_lowerThresh, opt_upperThresh, opt_lowerThreshRatio, opt_upperThreshRatio, + opt_filteringType); + vpImage I_canny_input; + if (!opt_img.empty()) { + // Detection on the user image + vpImageIo::read(I_canny_input, opt_img); + } + else { + // Detection on a fake image of a square + I_canny_input.resize(500, 500, 0); + for (unsigned int r = 150; r < 350; r++) { + for (unsigned int c = 150; c < 350; c++) { + I_canny_input[r][c] = 125; + } + } + } + + if (opt_gradientOutsideClass) { + setGradientOutsideClass(I_canny_input, opt_gaussianKernelSize, opt_gaussianStdev, cannyDetector, opt_apertureSize, opt_filteringType); + } + vpImage I_canny = cannyDetector.detect(I_canny_input); + float mean, max, stdev; + computeMeanMaxStdev(I_canny_input, mean, max, stdev); + std::string title("Input of the Canny edge detector. Mean = " + std::to_string(mean) + "+/-" + std::to_string(stdev) + " Max = " + std::to_string(max)); + drawingHelpers::display(I_canny_input, title, true); + drawingHelpers::display(I_canny, "Canny results on image " + opt_img, true); + + if (opt_useVpImageFilterCanny) { + float cannyThresh = opt_upperThresh; + float lowerThresh(opt_lowerThresh); + vpImageFilter::canny(I_canny_input, I_canny, opt_gaussianKernelSize, lowerThresh, cannyThresh, + opt_apertureSize, opt_gaussianStdev, opt_lowerThreshRatio, opt_upperThreshRatio, + opt_backend, opt_filteringType); + drawingHelpers::display(I_canny, "Canny results with \"" + vpImageFilter::vpCannyBackendTypeToString(opt_backend) + "\" backend", true); + } + + return EXIT_SUCCESS; +} From fc14b3269c9c51920118851df295320581248cc1 Mon Sep 17 00:00:00 2001 From: rlagneau Date: Thu, 19 Oct 2023 10:57:45 +0200 Subject: [PATCH 10/43] [CORPS] Added a method to compute the partial gradients from an image depending on the vpCannyFilteringAndGradientType and vpCannyBackendType --- .../core/include/visp3/core/vpImageConvert.h | 2 + .../core/include/visp3/core/vpImageFilter.h | 241 +++++++++- modules/core/src/image/vpImageConvert.cpp | 34 ++ modules/core/src/image/vpImageFilter.cpp | 434 ++++++++---------- .../imgproc/src/vpCircleHoughTransform.cpp | 2 +- tutorial/image/tutorial-canny.cpp | 55 +-- 6 files changed, 463 insertions(+), 305 deletions(-) diff --git a/modules/core/include/visp3/core/vpImageConvert.h b/modules/core/include/visp3/core/vpImageConvert.h index 2c7c9c57fb..cb045254b3 100644 --- a/modules/core/include/visp3/core/vpImageConvert.h +++ b/modules/core/include/visp3/core/vpImageConvert.h @@ -111,11 +111,13 @@ class VISP_EXPORT vpImageConvert static void convert(const cv::Mat &src, vpImage &dest, bool flip = false); static void convert(const cv::Mat &src, vpImage &dest, bool flip = false, unsigned int nThreads = 0); static void convert(const cv::Mat &src, vpImage &dest, bool flip = false); + static void convert(const cv::Mat &src, vpImage &dest, bool flip = false); static void convert(const cv::Mat &src, vpImage &dest, bool flip = false); static void convert(const cv::Mat &src, vpImage &dest, bool flip = false); static void convert(const vpImage &src, cv::Mat &dest); static void convert(const vpImage &src, cv::Mat &dest, bool copyData = true); static void convert(const vpImage &src, cv::Mat &dest, bool copyData = true); + static void convert(const vpImage &src, cv::Mat &dest, bool copyData = true); static void convert(const vpImage &src, cv::Mat &dest); #endif diff --git a/modules/core/include/visp3/core/vpImageFilter.h b/modules/core/include/visp3/core/vpImageFilter.h index 98aab64da1..2af1b1513c 100644 --- a/modules/core/include/visp3/core/vpImageFilter.h +++ b/modules/core/include/visp3/core/vpImageFilter.h @@ -45,7 +45,10 @@ #include #include +#include +#include #include +#include #include #include #include @@ -101,8 +104,8 @@ class VISP_EXPORT vpImageFilter static void canny(const vpImage &I, vpImage &Ic, const unsigned int &gaussianFilterSize, const float &lowerThresholdCanny, const float &higherThresholdCanny, const unsigned int &apertureSobel, const float &gaussianStdev, const float &lowerThresholdRatio, - const float &upperThresholdRatio, const vpCannyBackendType &cannyBackend, - const vpCannyFilteringAndGradientType &cannyFilteringSteps); + const float &upperThresholdRatio, const bool &normalizeGradients, + const vpCannyBackendType &cannyBackend, const vpCannyFilteringAndGradientType &cannyFilteringSteps); /*! Apply a 1x3 derivative filter to an image pixel. @@ -1142,19 +1145,233 @@ class VISP_EXPORT vpImageFilter return scale; } - static float computeCannyThreshold(const vpImage &I, float &lowerThresh, - const vpImage *p_dIx = nullptr, const vpImage *p_dIy = nullptr, - const unsigned int gaussianKernelSize = 5, - const float gaussianStdev = 2.f, const unsigned int apertureSobel = 3, - const float lowerThresholdRatio = 0.6, const float upperThresholdRatio = 0.8, - const vpCannyFilteringAndGradientType &filteringType = CANNY_GBLUR_SOBEL_FILTERING); - #if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) static float computeCannyThreshold(const cv::Mat &cv_I, const cv::Mat *p_cv_dIx, const cv::Mat *p_cv_dIy, - float &lowerThresh, const unsigned int gaussianKernelSize = 5, - const float gaussianStdev = 2.f, const unsigned int apertureSobel = 3, - const float lowerThresholdRatio = 0.6, const float upperThresholdRatio = 0.8, + float &lowerThresh, const unsigned int &gaussianKernelSize = 5, + const float &gaussianStdev = 2.f, const unsigned int &apertureGradient = 3, + const float &lowerThresholdRatio = 0.6, const float &upperThresholdRatio = 0.8, const vpCannyFilteringAndGradientType &filteringType = CANNY_GBLUR_SOBEL_FILTERING); + + static void computePartialDerivatives(const cv::Mat &cv_I, + cv::Mat &cv_dIx, cv::Mat &cv_dIy, + const bool &computeDx = true, const bool &computeDy = true, const bool &normalize = true, + const unsigned int &gaussianKernelSize = 5, const float &gaussianStdev = 2.f, + const unsigned int &apertureGradient = 3, + const vpCannyFilteringAndGradientType &filteringType = CANNY_GBLUR_SOBEL_FILTERING); +#endif + + /** + * \brief Compute the partial derivatives (i.e. horizontal and vertical gradients) of the input image. + * + * \tparam ImageType Either unsigned char, float or double + * \tparam FilterType Either float or double. + * \param[in] I The input image we want the partial derivatives. + * \param[out] dIx The horizontal partial derivative, i.e. horizontal gradient. + * \param[out] dIy The vertical partial derivative, i.e. vertical gradient. + * \param[in] computeDx Idicate if we must compute the horizontal gradient. + * \param[in] computeDy Idicate if we must compute the vertical gradient. + * \param[in] normalize Idicate if we must normalize the gradient filters. + * \param[in] gaussianKernelSize The size of the kernel of the Gaussian filter used to blur the image. + * \param[in] gaussianStdev The standard deviation of the Gaussian filter used to blur the image. + * \param[in] apertureGradient The size of the kernel of the gradient filter. + * \param[in] filteringType The type of filters to apply to compute the gradients. + * \param[in] backend The type of backend to use to compute the gradients. + */ + template + inline static void computePartialDerivatives(const vpImage &I, + vpImage &dIx, vpImage &dIy, + const bool &computeDx = true, const bool &computeDy = true, const bool &normalize = true, + const unsigned int &gaussianKernelSize = 5, const FilterType &gaussianStdev = 2.f, + const unsigned int &apertureGradient = 3, + const vpCannyFilteringAndGradientType &filteringType = CANNY_GBLUR_SOBEL_FILTERING, + const vpCannyBackendType &backend = CANNY_VISP_BACKEND) + { + if (backend == CANNY_OPENCV_BACKEND) { +#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) + cv::Mat cv_I, cv_dIx, cv_dIy; + vpImageConvert::convert(I, cv_I); + computePartialDerivatives(cv_I, cv_dIx, cv_dIy, computeDx, computeDy, normalize, gaussianKernelSize, + gaussianStdev, apertureGradient, filteringType); + if (computeDx) { + vpImageConvert::convert(cv_dIx, dIx); + } + if (computeDy) { + vpImageConvert::convert(cv_dIy, dIy); + } +#else + throw(vpException(vpException::badValue, "You need to compile ViSP with OpenCV to use CANNY_OPENCV_BACKEND")); +#endif + } + else { + if (filteringType == CANNY_GBLUR_SCHARR_FILTERING || filteringType == CANNY_GBLUR_SOBEL_FILTERING) { + dIx.resize(I.getHeight(), I.getWidth()); + dIy.resize(I.getHeight(), I.getWidth()); + + // Computing the Gaussian blur + gradients of the image + vpImage Iblur; + vpImageFilter::gaussianBlur(I, Iblur, gaussianKernelSize, gaussianStdev); + + vpArray2D gradientFilterX(apertureGradient, apertureGradient); // Gradient filter along the X-axis + vpArray2D gradientFilterY(apertureGradient, apertureGradient); // Gradient filter along the Y-axis + + // Helper to apply the scale to the raw values of the filters + auto scaleFilter = [](vpArray2D &filter, const float &scale) { + for (unsigned int r = 0; r < filter.getRows(); r++) { + for (unsigned int c = 0; c < filter.getCols(); c++) { + filter[r][c] = filter[r][c] * scale; + } + }}; + + // Scales to apply to the filters to get a normalized gradient filter that gives a gradient + // between 0 and 255 for an vpImage + float scaleX = 1.f; + float scaleY = 1.f; + + if (filteringType == CANNY_GBLUR_SOBEL_FILTERING) { + if (computeDx) { + scaleX = vpImageFilter::getSobelKernelX(gradientFilterX.data, (apertureGradient - 1)/2); + } + if (computeDy) { + scaleY = vpImageFilter::getSobelKernelY(gradientFilterY.data, (apertureGradient - 1)/2); + } + } + else if (filteringType == CANNY_GBLUR_SCHARR_FILTERING) { + if (computeDx) { + scaleX = vpImageFilter::getScharrKernelX(gradientFilterX.data, (apertureGradient - 1)/2); + } + if (computeDy) { + scaleY = vpImageFilter::getScharrKernelY(gradientFilterY.data, (apertureGradient - 1)/2); + } + } + + // Scale the gradient filters to have a normalized gradient filter + if (normalize) { + if (computeDx) { + scaleFilter(gradientFilterX, scaleX); + } + if (computeDy) { + scaleFilter(gradientFilterY, scaleY); + } + } + + // Apply the gradient filters to get the gradients + if (computeDx) { + vpImageFilter::filter(Iblur, dIx, gradientFilterX); + } + + if (computeDy) { + vpImageFilter::filter(Iblur, dIy, gradientFilterY); + } + } + else { + std::string errMsg = "[vpImageFilter::computePartialDerivatives] Filtering + gradient method \""; + errMsg += vpCannyFilteringAndGradientTypeToString(filteringType); + errMsg += "\" is not implemented yet\n"; + throw(vpException(vpException::notImplementedError, errMsg)); + } + } + } + + template + inline static void computePartialDerivatives(const vpImage &I, + vpImage &dIx, vpImage &dIy, + const bool &computeDx = true, const bool &computeDy = true, const bool &normalize = true, + const unsigned int &gaussianKernelSize = 5, const FilterType &gaussianStdev = 2.f, + const unsigned int &apertureGradient = 3, + const vpCannyFilteringAndGradientType &filteringType = CANNY_GBLUR_SOBEL_FILTERING, + const vpCannyBackendType &backend = CANNY_VISP_BACKEND) = delete; + + template + inline static void computePartialDerivatives(const vpImage &I, + vpImage &dIx, vpImage &dIy, + const bool &computeDx = true, const bool &computeDy = true, const bool &normalize = true, + const unsigned int &gaussianKernelSize = 5, const unsigned char &gaussianStdev = 2.f, + const unsigned int &apertureGradient = 3, + const vpCannyFilteringAndGradientType &filteringType = CANNY_GBLUR_SOBEL_FILTERING, + const vpCannyBackendType &backend = CANNY_VISP_BACKEND) = delete; + + template + inline static void computePartialDerivatives(const vpImage &I, + vpImage &dIx, vpImage &dIy, + const bool &computeDx = true, const bool &computeDy = true, const bool &normalize = true, + const unsigned int gaussianKernelSize = 5, const vpRGBa gaussianStdev = vpRGBa(), + const unsigned int apertureGradient = 3, + const vpCannyFilteringAndGradientType &filteringType = CANNY_GBLUR_SOBEL_FILTERING, + const vpCannyBackendType &backend = CANNY_VISP_BACKEND) = delete; + + /** + * \brief Compute the upper Canny edge filter threshold, using Gaussian blur + Sobel or + Scharr operators to compute + * the gradient of the image. + * + * \tparam OutType : Either float, to accelerate the computation time, or double, to have greater precision. + * \param[in] I : The gray-scale image, in ViSP format. + * \param[in] p_dIx : If different from nullptr, must contain the gradient of the image with regard to the horizontal axis. + * \param[in] p_dIy : If different from nullptr, must contain the gradient of the image with regard to the vertical axis. + * \param[in] lowerThresh : Canny lower threshold. + * \param[in] gaussianFilterSize : The size of the mask of the Gaussian filter to apply (an odd number). + * \param[in] gaussianStdev : The standard deviation of the Gaussian filter to apply. + * \param[in] apertureGradient : Size of the mask for the Sobel operator (odd number). + * \param[in] lowerThresholdRatio : The ratio of the upper threshold the lower threshold must be equal to. + * \param[in] upperThresholdRatio : The ratio of pixels whose absolute gradient Gabs is lower or equal to define + * the upper threshold. + * \param[in] filteringType : The gradient filter to apply to compute the gradient, if \b p_dIx and \b p_dIy are + * nullptr. + * \return The upper Canny edge filter threshold. + */ + template + inline static float computeCannyThreshold(const vpImage &I, float &lowerThresh, + const vpImage *p_dIx = nullptr, const vpImage *p_dIy = nullptr, + const unsigned int &gaussianKernelSize = 5, + const OutType &gaussianStdev = 2.f, const unsigned int &apertureGradient = 3, + const float &lowerThresholdRatio = 0.6, const float &upperThresholdRatio = 0.8, + const vpCannyFilteringAndGradientType &filteringType = CANNY_GBLUR_SOBEL_FILTERING) + { + double w = I.getWidth(); + double h = I.getHeight(); + + vpImage dI(h, w); + vpImage dIx(h, w), dIy(h, w); + if (p_dIx != nullptr && p_dIy != nullptr) { + dIx = *p_dIx; + dIy = *p_dIy; + } + else { + computePartialDerivatives(I, dIx, dIy, true, true, true, gaussianKernelSize, gaussianStdev, + apertureGradient, filteringType); + } + + // Computing the absolute gradient of the image G = |dIx| + |dIy| + for (unsigned int r = 0; r < h; r++) { + for (unsigned int c = 0; c < w; c++) { + float dx = (float)dIx[r][c]; + float dy = (float)dIy[r][c]; + float gradient = std::abs(dx) + std::abs(dy); + float gradientClamped = std::min(gradient, (float)std::numeric_limits::max()); + dI[r][c] = gradientClamped; + } + } + + // Compute the histogram + vpHistogram hist; + const unsigned int nbBins = 256; + hist.calculate(dI, nbBins); + float accu = 0; + float t = (float)(upperThresholdRatio * w * h); + float bon = 0; + for (unsigned int i = 0; i < nbBins; i++) { + float tf = hist[i]; + accu = accu + tf; + if (accu > t) { + bon = (float)i; + break; + } + } + float upperThresh = std::max(bon, 1.f); + lowerThresh = lowerThresholdRatio * bon; + return upperThresh; + } + +#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) static float median(const cv::Mat &cv_I); static float median(const vpImage &Isrc); static std::vector median(const vpImage &Isrc); diff --git a/modules/core/src/image/vpImageConvert.cpp b/modules/core/src/image/vpImageConvert.cpp index f1d6df0bf0..3fe7de60e8 100644 --- a/modules/core/src/image/vpImageConvert.cpp +++ b/modules/core/src/image/vpImageConvert.cpp @@ -497,6 +497,27 @@ void vpImageConvert::convert(const cv::Mat &src, vpImage &dest, bool flip } } +/*! + * Converts cv::Mat CV_32FC1 format to ViSP vpImage. + * + * \param[in] src : OpenCV image in CV_32FC1 format. + * \param[out] dest : ViSP image in double format. + * \param[in] flip : When true during conversion flip image vertically. + */ +void vpImageConvert::convert(const cv::Mat &src, vpImage &dest, bool flip) +{ + vpImage I_float; + convert(src, I_float, flip); + unsigned int nbRows = (unsigned int)src.rows; + unsigned int nbCols = (unsigned int)src.cols; + dest.resize(nbRows, nbCols); + for (unsigned int i = 0; i < nbRows; ++i) { + for (unsigned int j = 0; j < nbCols; ++j) { + dest[i][j] = I_float[i][j]; + } + } +} + /*! * Converts cv::Mat CV_16UC1 format to ViSP vpImage. * @@ -662,6 +683,19 @@ void vpImageConvert::convert(const vpImage &src, cv::Mat &dest, bool copy } } +void vpImageConvert::convert(const vpImage &src, cv::Mat &dest, bool copyData) +{ + unsigned int nbRows = src.getRows(); + unsigned int nbCols = src.getCols(); + vpImage I_float(nbRows, nbCols); + for (unsigned int i = 0; i < nbRows; ++i) { + for (unsigned int j = 0; j < nbCols; ++j) { + I_float[i][j] = src[i][j]; + } + } + convert(I_float, dest, copyData); +} + void vpImageConvert::convert(const vpImage &src, cv::Mat &dest) { cv::Mat vpToMat((int)src.getRows(), (int)src.getCols(), CV_32FC3, (void *)src.bitmap); diff --git a/modules/core/src/image/vpImageFilter.cpp b/modules/core/src/image/vpImageFilter.cpp index 2fcb5402e9..f2fc84a2da 100644 --- a/modules/core/src/image/vpImageFilter.cpp +++ b/modules/core/src/image/vpImageFilter.cpp @@ -34,9 +34,6 @@ *****************************************************************************/ #include -#include -#include -#include #include #include #include @@ -134,32 +131,32 @@ vpImageFilter::vpCannyFilteringAndGradientType vpImageFilter::vpCannyFilteringAn /** * \cond DO_NOT_DOCUMENT */ -/*template<>*/template +template void vpImageFilter::filter(const vpImage &I, vpImage &If, const vpArray2D &M, bool convolve); -/*template<>*/template +template void vpImageFilter::filter(const vpImage &I, vpImage &If, const vpArray2D &M, bool convolve); -template <> +template void vpImageFilter::filter(const vpImage &I, vpImage &Iu, vpImage &Iv, const vpArray2D &M, bool convolve); -template <> +template void vpImageFilter::filter(const vpImage &I, vpImage &Iu, vpImage &Iv, const vpArray2D &M, bool convolve); -/*template<>*/template +template void vpImageFilter::filter(const vpImage &I, vpImage &GI, const float *filter, unsigned int size); -/*template<>*/template +template void vpImageFilter::filter(const vpImage &I, vpImage &GI, const double *filter, unsigned int size); -/*template<>*/template +template void vpImageFilter::filter(const vpImage &I, vpImage &GI, const float *filter, unsigned int size); -/*template<>*/template +template void vpImageFilter::filter(const vpImage &I, vpImage &GI, const double *filter, unsigned int size); /** * \endcond @@ -251,18 +248,18 @@ void vpImageFilter::sepFilter(const vpImage &I, vpImage & /** * \cond DO_NOT_DOCUMENT */ -/*template<>*/template +template void vpImageFilter::filterX(const vpImage &I, vpImage &dIx, const float *filter, unsigned int size); -/*template<>*/template +template void vpImageFilter::filterX(const vpImage &I, vpImage &dIx, const double *filter, unsigned int size); -/*template<>*/template +template void vpImageFilter::filterX(const vpImage &I, vpImage &dIx, const float *filter, unsigned int size); -/*template<>*/template +template void vpImageFilter::filterX(const vpImage &I, vpImage &dIx, const double *filter, unsigned int size); /** * \endcond @@ -293,18 +290,18 @@ void vpImageFilter::filterX(const vpImage &I, vpImage &dIx, cons /** * \cond DO_NOT_DOCUMENT */ -/*template<>*/template +template void vpImageFilter::filterY(const vpImage &I, vpImage &dIy, const float *filter, unsigned int size); -/*template<>*/template +template void vpImageFilter::filterY(const vpImage &I, vpImage &dIy, const double *filter, unsigned int size); -/*template<>*/template +template void vpImageFilter::filterY(const vpImage &I, vpImage &dIy, const float *filter, unsigned int size); -/*template<>*/template +template void vpImageFilter::filterY(const vpImage &I, vpImage &dIy, const double *filter, unsigned int size); /** * \endcond @@ -339,16 +336,16 @@ void vpImageFilter::filterY(const vpImage &I, vpImage &dIy, cons /** * \cond DO_NOT_DOCUMENT */ -/*template<>*/template +template void vpImageFilter::gaussianBlur(const vpImage &I, vpImage &GI, unsigned int size, float sigma, bool normalize); -/*template<>*/template +template void vpImageFilter::gaussianBlur(const vpImage &I, vpImage &GI, unsigned int size, double sigma, bool normalize); -/*template<>*/template +template void vpImageFilter::gaussianBlur(const vpImage &I, vpImage &GI, unsigned int size, float sigma, bool normalize); -/*template<>*/template +template void vpImageFilter::gaussianBlur(const vpImage &I, vpImage &GI, unsigned int size, double sigma, bool normalize); /** * \endcond @@ -379,80 +376,80 @@ void vpImageFilter::gaussianBlur(const vpImage &I, vpImage &GI, /** * \cond DO_NOT_DOCUMENT */ -/*template<>*/template +template void vpImageFilter::getGaussianKernel(float *filter, unsigned int size, float sigma, bool normalize); -template <> +template void vpImageFilter::getGaussianDerivativeKernel(float *filter, unsigned int size, float sigma, bool normalize); -template <> +template void vpImageFilter::getGaussianDerivativeKernel(double *filter, unsigned int size, double sigma, bool normalize); -/*template<>*/template +template void vpImageFilter::getGradX(const vpImage &I, vpImage &dIx); -/*template<>*/template +template void vpImageFilter::getGradX(const vpImage &I, vpImage &dIx); -/*template<>*/template +template void vpImageFilter::getGradY(const vpImage &I, vpImage &dIy); -/*template<>*/template +template void vpImageFilter::getGradY(const vpImage &I, vpImage &dIy); -/*template<>*/template +template void vpImageFilter::getGradX(const vpImage &I, vpImage &dIx, const float *filter, unsigned int size); -/*template<>*/template +template void vpImageFilter::getGradX(const vpImage &I, vpImage &dIx, const double *filter, unsigned int size); -/*template<>*/template +template void vpImageFilter::getGradX(const vpImage &I, vpImage &dIx, const float *filter, unsigned int size); -/*template<>*/template +template void vpImageFilter::getGradX(const vpImage &I, vpImage &dIx, const double *filter, unsigned int size); -/*template<>*/template +template void vpImageFilter::getGradY(const vpImage &I, vpImage &dIy, const float *filter, unsigned int size); -/*template<>*/template +template void vpImageFilter::getGradY(const vpImage &I, vpImage &dIy, const double *filter, unsigned int size); -/*template<>*/template +template void vpImageFilter::getGradY(const vpImage &I, vpImage &dIy, const float *filter, unsigned int size); -/*template<>*/template +template void vpImageFilter::getGradY(const vpImage &I, vpImage &dIy, const double *filter, unsigned int size); -/*template<>*/template +template void vpImageFilter::getGradXGauss2D(const vpImage &I, vpImage &dIx, const float *gaussianKernel, const float *gaussianDerivativeKernel, unsigned int size); -/*template<>*/template +template void vpImageFilter::getGradXGauss2D(const vpImage &I, vpImage &dIx, const double *gaussianKernel, const double *gaussianDerivativeKernel, unsigned int size); -/*template<>*/template +template void vpImageFilter::getGradXGauss2D(const vpImage &I, vpImage &dIx, const float *gaussianKernel, const float *gaussianDerivativeKernel, unsigned int size); -/*template<>*/template +template void vpImageFilter::getGradXGauss2D(const vpImage &I, vpImage &dIx, const double *gaussianKernel, const double *gaussianDerivativeKernel, unsigned int size); -/*template<>*/template +template void vpImageFilter::getGradYGauss2D(const vpImage &I, vpImage &dIy, const float *gaussianKernel, const float *gaussianDerivativeKernel, unsigned int size); -/*template<>*/template +template void vpImageFilter::getGradYGauss2D(const vpImage &I, vpImage &dIy, const double *gaussianKernel, const double *gaussianDerivativeKernel, unsigned int size); -/*template<>*/template +template void vpImageFilter::getGradYGauss2D(const vpImage &I, vpImage &dIy, const float *gaussianKernel, const float *gaussianDerivativeKernel, unsigned int size); -/*template<>*/template +template void vpImageFilter::getGradYGauss2D(const vpImage &I, vpImage &dIy, const double *gaussianKernel, const double *gaussianDerivativeKernel, unsigned int size); /** @@ -512,16 +509,16 @@ void vpImageFilter::getGaussYPyramidal(const vpImage &I, vpImage< /** * \cond DO_NOT_DOCUMENT */ -/*template<>*/template +template double vpImageFilter::getSobelKernelX(double *filter, unsigned int size); -/*template<>*/template +template float vpImageFilter::getSobelKernelX(float *filter, unsigned int size); -/*template<>*/template +template double vpImageFilter::getSobelKernelY(double *filter, unsigned int size); -/*template<>*/template +template float vpImageFilter::getSobelKernelY(float *filter, unsigned int size); /** * \endcond @@ -613,9 +610,9 @@ std::vector vpImageFilter::median(const vpImage &Isrc) * \return The upper Canny edge filter threshold. */ float vpImageFilter::computeCannyThreshold(const cv::Mat &cv_I, const cv::Mat *p_cv_dIx, const cv::Mat *p_cv_dIy, - float &lowerThresh, const unsigned int gaussianKernelSize, - const float gaussianStdev, const unsigned int apertureGradient, - const float lowerThresholdRatio, const float upperThresholdRatio, + float &lowerThresh, const unsigned int &gaussianKernelSize, + const float &gaussianStdev, const unsigned int &apertureGradient, + const float &lowerThresholdRatio, const float &upperThresholdRatio, const vpImageFilter::vpCannyFilteringAndGradientType &filteringType) { double w = cv_I.cols; @@ -624,24 +621,8 @@ float vpImageFilter::computeCannyThreshold(const cv::Mat &cv_I, const cv::Mat *p cv::Mat dI, dIx, dIy, dIx_abs, dIy_abs; if (p_cv_dIx == nullptr || p_cv_dIy == nullptr) { - cv::Mat img_blur; - // Apply Gaussian blur to the image - cv::Size gsz(gaussianKernelSize, gaussianKernelSize); - cv::GaussianBlur(cv_I, img_blur, gsz, gaussianStdev); - - // Compute the gradient of the blurred image - if (filteringType == vpImageFilter::CANNY_GBLUR_SOBEL_FILTERING) { - double scale = 1. / 8.; - if (apertureGradient > 3) { - scale *= std::pow(1./16., ((apertureGradient -1.)/2.) - 1.); - } - cv::Sobel(img_blur, dIx, CV_16S, 1, 0, apertureGradient, 1, 0, scale); - cv::Sobel(img_blur, dIy, CV_16S, 0, 1, apertureGradient, 1, 0, scale); - } - else if (filteringType == vpImageFilter::CANNY_GBLUR_SCHARR_FILTERING) { - cv::Scharr(img_blur, dIx, CV_16S, 1, 0, 1.f/32.f); - cv::Scharr(img_blur, dIy, CV_16S, 0, 1, 1.f/32.f); - } + computePartialDerivatives(cv_I, dIx, dIy, true, true, true, gaussianKernelSize, gaussianStdev, apertureGradient, + filteringType); } else { dIx = *p_cv_dIx; @@ -679,111 +660,143 @@ float vpImageFilter::computeCannyThreshold(const cv::Mat &cv_I, const cv::Mat *p lowerThresh = lowerThresholdRatio * bon; return upperThresh; } -#endif /** - * \brief Compute the upper Canny edge filter threshold, using Gaussian blur + Sobel or + Scharr operators to compute - * the gradient of the image. + * \brief Compute the partial derivatives (i.e. horizontal and vertical gradients) of the input image. * - * \param[in] I : The gray-scale image, in ViSP format. - * \param[in] p_dIx : If different from nullptr, must contain the gradient of the image with regard to the horizontal axis. - * \param[in] p_dIy : If different from nullptr, must contain the gradient of the image with regard to the vertical axis. - * \param[in] lowerThresh : Canny lower threshold. - * \param[in] gaussianFilterSize : The size of the mask of the Gaussian filter to apply (an odd number). - * \param[in] gaussianStdev : The standard deviation of the Gaussian filter to apply. - * \param[in] apertureGradient : Size of the mask for the Sobel operator (odd number). - * \param[in] lowerThresholdRatio : The ratio of the upper threshold the lower threshold must be equal to. - * \param[in] upperThresholdRatio : The ratio of pixels whose absolute gradient Gabs is lower or equal to define - * the upper threshold. - * \param[in] filteringType : The gradient filter to apply to compute the gradient, if \b p_dIx and \b p_dIy are - * nullptr. - * \return The upper Canny edge filter threshold. + * \param[in] cv_I The input image we want the partial derivatives. + * \param[out] cv_dIx The horizontal partial derivative, i.e. horizontal gradient. + * \param[out] cv_dIy The vertical partial derivative, i.e. vertical gradient. + * \param[in] computeDx Idicate if we must compute the horizontal gradient. + * \param[in] computeDy Idicate if we must compute the vertical gradient. + * \param[in] normalize Idicate if we must normalize the gradient filters. + * \param[in] gaussianKernelSize The size of the kernel of the Gaussian filter used to blur the image. + * \param[in] gaussianStdev The standard deviation of the Gaussian filter used to blur the image. + * \param[in] apertureGradient The size of the kernel of the gradient filter. + * \param[in] filteringType The type of filters to apply to compute the gradients. + * \param[in] backend The type of backend to use to compute the gradients. */ -float vpImageFilter::computeCannyThreshold(const vpImage &I, float &lowerThresh, - const vpImage *p_dIx, const vpImage *p_dIy, - const unsigned int gaussianKernelSize, - const float gaussianStdev, const unsigned int apertureGradient, - const float lowerThresholdRatio, const float upperThresholdRatio, - const vpImageFilter::vpCannyFilteringAndGradientType &filteringType) +void vpImageFilter::computePartialDerivatives(const cv::Mat &cv_I, + cv::Mat &cv_dIx, cv::Mat &cv_dIy, + const bool &computeDx, const bool &computeDy, const bool &normalize, + const unsigned int &gaussianKernelSize, const float &gaussianStdev, + const unsigned int &apertureGradient, + const vpImageFilter::vpCannyFilteringAndGradientType &filteringType) { - double w = I.getWidth(); - double h = I.getHeight(); - - vpImage dI(h, w); - vpImage dIx(h, w), dIy(h, w); - if (p_dIx != nullptr && p_dIy != nullptr) { - dIx = *p_dIx; - dIy = *p_dIy; - } - else { - // Computing the Gaussian blur + gradients of the image - vpImage Iblur; - vpImageFilter::gaussianBlur(I, Iblur, gaussianKernelSize, gaussianStdev); - - vpArray2D gradientFilterX(apertureGradient, apertureGradient); // Gradient filter along the X-axis - vpArray2D gradientFilterY(apertureGradient, apertureGradient); // Gradient filter along the Y-axis - - // Helper to apply the scale to the raw values of the filters - auto scaleFilter = [](vpArray2D &filter, const float &scale) { - for (unsigned int r = 0; r < filter.getRows(); r++) { - for (unsigned int c = 0; c < filter.getCols(); c++) { - filter[r][c] = filter[r][c] * scale; - } - }}; - - // Scales to apply to the filters to get a normalized gradient filter that gives a gradient - // between 0 and 255 for an vpImage - float scaleX = 1.f; - float scaleY = 1.f; - - if (filteringType == CANNY_GBLUR_SOBEL_FILTERING) { - scaleX = vpImageFilter::getSobelKernelX(gradientFilterX.data, (apertureGradient - 1)/2); // Sobel kernel along X - scaleY = vpImageFilter::getSobelKernelY(gradientFilterY.data, (apertureGradient - 1)/2); // Sobel kernel along Y - } - else if (filteringType == CANNY_GBLUR_SCHARR_FILTERING) { - scaleX = vpImageFilter::getScharrKernelX(gradientFilterX.data, (apertureGradient - 1)/2); // Scharr kernel along X - scaleY = vpImageFilter::getScharrKernelY(gradientFilterY.data, (apertureGradient - 1)/2); // Scharr kernel along Y - } - - // Scale the gradient filters to have a normalized gradient filter - scaleFilter(gradientFilterX, scaleX); - scaleFilter(gradientFilterY, scaleY); - - // Apply the gradient filters to get the gradients - vpImageFilter::filter(Iblur, dIx, gradientFilterX); - vpImageFilter::filter(Iblur, dIy, gradientFilterY); - } + if (filteringType == vpImageFilter::CANNY_GBLUR_SCHARR_FILTERING + || filteringType == vpImageFilter::CANNY_GBLUR_SOBEL_FILTERING) { + cv::Mat img_blur; + // Apply Gaussian blur to the image + cv::Size gsz(gaussianKernelSize, gaussianKernelSize); + cv::GaussianBlur(cv_I, img_blur, gsz, gaussianStdev); - // Computing the absolute gradient of the image G = |dIx| + |dIy| - for (unsigned int r = 0; r < h; r++) { - for (unsigned int c = 0; c < w; c++) { - float dx = (float)dIx[r][c]; - float dy = (float)dIy[r][c]; - float gradient = std::abs(dx) + std::abs(dy); - float gradientClamped = std::min(gradient, (float)std::numeric_limits::max()); - dI[r][c] = gradientClamped; + // Compute the gradient of the blurred image + if (filteringType == vpImageFilter::CANNY_GBLUR_SOBEL_FILTERING) { + double scale = 1.; + if (normalize) { + scale = 1. / 8.; + if (apertureGradient > 3) { + scale *= std::pow(1./16., ((apertureGradient -1.)/2.) - 1.); + } + } + if (computeDx) { + cv::Sobel(img_blur, cv_dIx, CV_16S, 1, 0, apertureGradient, 1, 0, scale); + } + if (computeDy) { + cv::Sobel(img_blur, cv_dIy, CV_16S, 0, 1, apertureGradient, 1, 0, scale); + } } - } - - // Compute the histogram - vpHistogram hist; - const unsigned int nbBins = 256; - hist.calculate(dI, nbBins); - float accu = 0; - float t = (float)(upperThresholdRatio * w * h); - float bon = 0; - for (unsigned int i = 0; i < nbBins; i++) { - float tf = hist[i]; - accu = accu + tf; - if (accu > t) { - bon = (float)i; - break; + else if (filteringType == vpImageFilter::CANNY_GBLUR_SCHARR_FILTERING) { + double scale = 1.; + if (normalize) { + scale = 1. / 32.; + } + if (computeDx) { + cv::Scharr(img_blur, cv_dIx, CV_16S, 1, 0, scale); + } + if (computeDy) { + cv::Scharr(img_blur, cv_dIy, CV_16S, 0, 1, scale); + } } } - float upperThresh = std::max(bon, 1.f); - lowerThresh = lowerThresholdRatio * bon; - return upperThresh; } +#endif + +/** + * \cond DO_NOT_DOCUMENT + */ +template +void vpImageFilter::computePartialDerivatives(const vpImage &I, + vpImage &dIx, vpImage &dIy, + const bool &computeDx, const bool &computeDy, const bool &normalize, + const unsigned int &gaussianKernelSize, const float &gaussianStdev, + const unsigned int &apertureGradient, + const vpCannyFilteringAndGradientType &filteringType, + const vpCannyBackendType &backend); + +template +void vpImageFilter::computePartialDerivatives(const vpImage &I, + vpImage &dIx, vpImage &dIy, + const bool &computeDx, const bool &computeDy, const bool &normalize, + const unsigned int &gaussianKernelSize, const double &gaussianStdev, + const unsigned int &apertureGradient, + const vpCannyFilteringAndGradientType &filteringType, + const vpCannyBackendType &backend); + +template +void vpImageFilter::computePartialDerivatives(const vpImage &I, + vpImage &dIx, vpImage &dIy, + const bool &computeDx, const bool &computeDy, const bool &normalize, + const unsigned int &gaussianKernelSize, const float &gaussianStdev, + const unsigned int &apertureGradient, + const vpCannyFilteringAndGradientType &filteringType, + const vpCannyBackendType &backend); + +template +void vpImageFilter::computePartialDerivatives(const vpImage &I, + vpImage &dIx, vpImage &dIy, + const bool &computeDx, const bool &computeDy, const bool &normalize, + const unsigned int &gaussianKernelSize, const double &gaussianStdev, + const unsigned int &apertureGradient, + const vpCannyFilteringAndGradientType &filteringType, + const vpCannyBackendType &backend); + +template +void vpImageFilter::computePartialDerivatives(const vpImage &I, + vpImage &dIx, vpImage &dIy, + const bool &computeDx, const bool &computeDy, const bool &normalize, + const unsigned int &gaussianKernelSize, const float &gaussianStdev, + const unsigned int &apertureGradient, + const vpCannyFilteringAndGradientType &filteringType, + const vpCannyBackendType &backend); + +template +void vpImageFilter::computePartialDerivatives(const vpImage &I, + vpImage &dIx, vpImage &dIy, + const bool &computeDx, const bool &computeDy, const bool &normalize, + const unsigned int &gaussianKernelSize, const double &gaussianStdev, + const unsigned int &apertureGradient, + const vpCannyFilteringAndGradientType &filteringType, + const vpCannyBackendType &backend); + +template +float vpImageFilter::computeCannyThreshold(const vpImage &I, float &lowerThresh, + const vpImage *p_dIx, const vpImage *p_dIy, + const unsigned int &gaussianKernelSize, + const double &gaussianStdev, const unsigned int &apertureGradient, + const float &lowerThresholdRatio, const float &upperThresholdRatio, + const vpImageFilter::vpCannyFilteringAndGradientType &filteringType); + +template +float vpImageFilter::computeCannyThreshold(const vpImage &I, float &lowerThresh, + const vpImage *p_dIx, const vpImage *p_dIy, + const unsigned int &gaussianKernelSize, + const float &gaussianStdev, const unsigned int &apertureGradient, + const float &lowerThresholdRatio, const float &upperThresholdRatio, + const vpImageFilter::vpCannyFilteringAndGradientType &filteringType); +/** + * \endcond +*/ /*! Apply the Canny edge operator on the image \e Isrc and return the resulting @@ -889,7 +902,7 @@ void vpImageFilter::canny(const vpImage &Isrc, vpImage Isrc; @@ -923,7 +937,8 @@ int main() // Apply the Canny edge operator and set the Icanny image. vpImageFilter::canny(Isrc, Icanny, gaussianFilterSize, lowerThresholdCanny, upperThresholdCanny, apertureSobel, - gaussianStdev, lowerThresholdRatio, upperThresholdRatio, cannyBackend, cannyFilteringSteps); + gaussianStdev, lowerThresholdRatio, upperThresholdRatio, normalizeGradient, + cannyBackend, filteringType); return (0); } \endcode @@ -938,48 +953,33 @@ int main() \param[in] upperThreshold : The upper threshold for the Canny operator. Only value greater than this value are marked as an edge. If negative, it will be automatically computed, along with the lower threshold. Otherwise, the lower threshold will be set to one third - of the thresholdCanny . + of the upper threshold . \param[in] apertureGradient : Size of the mask for the gardient (Sobel or Scharr) operator (odd number). \param[in] gaussianStdev : The standard deviation of the Gaussian filter to apply. \param[in] lowerThresholdRatio : The ratio of the upper threshold the lower threshold must be equal to. It is used only if the user asks to compute the Canny thresholds. \param[in] upperThresholdRatio : The ratio of pixels whose absolute gradient Gabs is lower or equal to define the upper threshold. It is used only if the user asks to compute the Canny thresholds. + \param[in] normalizeGradients : Needs to be true if asking to compute the \b upperThreshold , otherwise it depends on + the user application and user-defined thresholds. \param[in] cannyBackend : The backend to use to perform the Canny edge filtering. \param[in] cannyFilteringSteps : The filtering + gradient operators to apply to compute the gradient in the early stage of the Canny algoritgm. - */ void vpImageFilter::canny(const vpImage &Isrc, vpImage &Ires, const unsigned int &gaussianFilterSize, const float &lowerThreshold, const float &upperThreshold, const unsigned int &apertureGradient, const float &gaussianStdev, const float &lowerThresholdRatio, const float &upperThresholdRatio, - const vpCannyBackendType &cannyBackend, const vpCannyFilteringAndGradientType &cannyFilteringSteps) + const bool &normalizeGradients, + const vpCannyBackendType &cannyBackend, const vpCannyFilteringAndGradientType &cannyFilteringSteps +) { if (cannyBackend == CANNY_OPENCV_BACKEND) { #if defined(HAVE_OPENCV_IMGPROC) cv::Mat img_cvmat, cv_dx, cv_dy, edges_cvmat; vpImageConvert::convert(Isrc, img_cvmat); - if (cannyFilteringSteps == CANNY_GBLUR_SOBEL_FILTERING) { - cv::Mat cv_I_blur; - cv::GaussianBlur(img_cvmat, cv_I_blur, cv::Size((int)gaussianFilterSize, (int)gaussianFilterSize), gaussianStdev, 0); - double scale = 1. / 8.; - if (apertureGradient > 3) { - scale *= std::pow(1./16., ((apertureGradient -1.)/2.) - 1.); - } - cv::Sobel(cv_I_blur, cv_dx, CV_16S, 1, 0, apertureGradient, scale); - cv::Sobel(cv_I_blur, cv_dy, CV_16S, 0, 1, apertureGradient, scale); - } - else if (cannyFilteringSteps == CANNY_GBLUR_SCHARR_FILTERING) { - cv::Mat cv_I_blur; - cv::GaussianBlur(img_cvmat, cv_I_blur, cv::Size((int)gaussianFilterSize, (int)gaussianFilterSize), gaussianStdev, 0); - cv::Scharr(cv_I_blur, cv_dx, CV_16S, 1, 0, 1.f/32.f); - cv::Scharr(cv_I_blur, cv_dy, CV_16S, 0, 1, 1.f/32.f); - } - else { - std::string errMsg("[vpImageFilter::canny]Other types of Canny filtering steps have not been implemented"); - throw(vpException(vpException::functionNotImplementedError, errMsg)); - } + computePartialDerivatives(img_cvmat, cv_dx, cv_dy, true, true, normalizeGradients, gaussianFilterSize, + gaussianStdev, apertureGradient, cannyFilteringSteps); float upperCannyThresh = upperThreshold; float lowerCannyThresh = lowerThreshold; if (upperCannyThresh < 0) { @@ -1002,54 +1002,8 @@ void vpImageFilter::canny(const vpImage &Isrc, vpImage dIx, dIy; - if (cannyFilteringSteps == CANNY_GBLUR_SOBEL_FILTERING - || cannyFilteringSteps == CANNY_GBLUR_SCHARR_FILTERING) { - // Computing the Gaussian blur - vpImage Iblur; - vpArray2D fg(1, (gaussianFilterSize + 1)/2); - vpImageFilter::getGaussianKernel(fg.data, gaussianFilterSize, gaussianStdev, true); - vpImage GIx; - vpImageFilter::filterX(Isrc, GIx, fg.data, gaussianFilterSize); - vpImageFilter::filterY(GIx, Iblur, fg.data, gaussianFilterSize); - - // Compute the gradient filters - vpArray2D gradientFilterX(apertureGradient, apertureGradient); // Gradient filter along the X-axis - vpArray2D gradientFilterY(apertureGradient, apertureGradient); // Gradient filter along the Y-axis - - // Helper to apply the scale to the raw values of the filters - auto scaleFilter = [](vpArray2D &filter, const float &scale) { - for (unsigned int r = 0; r < filter.getRows(); r++) { - for (unsigned int c = 0; c < filter.getCols(); c++) { - filter[r][c] = filter[r][c] * scale; - } - }}; - - // Scales to apply to the filters to get a normalized gradient filter that gives a gradient - // between 0 and 255 for an vpImage - float scaleX = 1.f; - float scaleY = 1.f; - - if (cannyFilteringSteps == CANNY_GBLUR_SOBEL_FILTERING) { - scaleX = vpImageFilter::getSobelKernelX(gradientFilterX.data, (apertureGradient - 1)/2); // Sobel kernel along X - scaleY = vpImageFilter::getSobelKernelY(gradientFilterY.data, (apertureGradient - 1)/2); // Sobel kernel along Y - } - else if (cannyFilteringSteps == CANNY_GBLUR_SCHARR_FILTERING) { - scaleX = vpImageFilter::getScharrKernelX(gradientFilterX.data, (apertureGradient - 1)/2); - scaleY = vpImageFilter::getScharrKernelY(gradientFilterY.data, (apertureGradient - 1)/2); - } - - // Scale the gradient filters to have a normalized gradient filter - scaleFilter(gradientFilterX, scaleX); - scaleFilter(gradientFilterY, scaleY); - - // Apply the gradient filters to get the gradients - vpImageFilter::filter(Iblur, dIx, gradientFilterX); - vpImageFilter::filter(Iblur, dIy, gradientFilterY); - } - else { - std::string errMsg("[vpImageFilter::canny]Other types of Canny filtering steps have not been implemented"); - throw(vpException(vpException::functionNotImplementedError, errMsg)); - } + computePartialDerivatives(Isrc, dIx, dIy, true, true, normalizeGradients, gaussianFilterSize, + gaussianStdev, apertureGradient, cannyFilteringSteps, cannyBackend); if (upperCannyThresh < 0) { upperCannyThresh = computeCannyThreshold(Isrc, lowerCannyThresh, &dIx, &dIy, gaussianFilterSize, gaussianStdev, diff --git a/modules/imgproc/src/vpCircleHoughTransform.cpp b/modules/imgproc/src/vpCircleHoughTransform.cpp index 17e3a720c2..ea971438eb 100644 --- a/modules/imgproc/src/vpCircleHoughTransform.cpp +++ b/modules/imgproc/src/vpCircleHoughTransform.cpp @@ -288,7 +288,7 @@ vpCircleHoughTransform::edgeDetection(const vpImage &I) // the vpImageFilter::canny method take care of it vpImageFilter::canny(I, m_edgeMap, m_algoParams.m_gaussianKernelSize, m_algoParams.m_lowerCannyThresh, m_algoParams.m_upperCannyThresh, m_algoParams.m_gradientFilterKernelSize, m_algoParams.m_gaussianStdev, - m_algoParams.m_lowerCannyThreshRatio, m_algoParams.m_upperCannyThreshRatio, + m_algoParams.m_lowerCannyThreshRatio, m_algoParams.m_upperCannyThreshRatio, true, m_algoParams.m_cannyBackendType, m_algoParams.m_filteringAndGradientType); } diff --git a/tutorial/image/tutorial-canny.cpp b/tutorial/image/tutorial-canny.cpp index 10aabe4f85..6e133f5da9 100644 --- a/tutorial/image/tutorial-canny.cpp +++ b/tutorial/image/tutorial-canny.cpp @@ -69,59 +69,10 @@ void computeMeanMaxStdev(const vpImage &I, float &mean, float &max, float &st void setGradientOutsideClass(const vpImage &I, const int &gaussianKernelSize, const float &gaussianStdev, vpCannyEdgeDetection &cannyDetector, const unsigned int apertureSize, const vpImageFilter::vpCannyFilteringAndGradientType &filteringType) { - // Get the Gaussian blur kernel - if ((gaussianKernelSize % 2) == 0) { - throw(vpException(vpException::badValue, "The Gaussian kernel size should be odd")); - } - vpArray2D fg(1, (gaussianKernelSize + 1)/2); - vpImageFilter::getGaussianKernel(fg.data, gaussianKernelSize, gaussianStdev, true); - - // Get the gradient filters kernel - if ((apertureSize % 2) != 1) { - throw vpException(vpException::badValue, "Gradient filters kernel size should be odd."); - } - vpArray2D gradientFilterX(apertureSize, apertureSize); - vpArray2D gradientFilterY(apertureSize, apertureSize); - - auto scaleFilter = [](vpArray2D &filter, const float &scale) { - for (unsigned int r = 0; r < filter.getRows(); r++) { - for (unsigned int c = 0; c < filter.getCols(); c++) { - filter[r][c] = filter[r][c] * scale; - } - }}; - - float scaleX = 1.f; - float scaleY = 1.f; - - if (filteringType == vpImageFilter::CANNY_GBLUR_SOBEL_FILTERING) { - scaleX = vpImageFilter::getSobelKernelX(gradientFilterX.data, (apertureSize - 1)/2); - scaleY = vpImageFilter::getSobelKernelY(gradientFilterY.data, (apertureSize - 1)/2); - } - else if (filteringType == vpImageFilter::CANNY_GBLUR_SCHARR_FILTERING) { - // Compute the Scharr filters - scaleX = vpImageFilter::getScharrKernelX(gradientFilterX.data, (apertureSize - 1)/2); - scaleY = vpImageFilter::getScharrKernelY(gradientFilterY.data, (apertureSize - 1)/2); - } - else { - std::string errMsg = "Error: gradient filtering method \""; - errMsg += vpImageFilter::vpCannyFilteringAndGradientTypeToString(filteringType); - errMsg += "\" has not been implemented yet\n"; - throw vpException(vpException::notImplementedError, errMsg); - } - - scaleFilter(gradientFilterX, scaleX); - scaleFilter(gradientFilterY, scaleY); - - // Perform Gaussian blur - vpImage Iblur; - vpImage GIx; - vpImageFilter::filterX(I, GIx, fg.data, gaussianKernelSize); - vpImageFilter::filterY(GIx, Iblur, fg.data, gaussianKernelSize); - // Computing the gradients vpImage dIx, dIy; - vpImageFilter::filter(Iblur, dIx, gradientFilterX); - vpImageFilter::filter(Iblur, dIy, gradientFilterY); + vpImageFilter::computePartialDerivatives(I, dIx, dIy, true, true, true, gaussianKernelSize, gaussianStdev, + apertureSize, filteringType); // Set the gradients of the vpCannyEdgeDetection cannyDetector.setGradients(dIx, dIy); @@ -287,7 +238,7 @@ int main(int argc, const char *argv[]) float cannyThresh = opt_upperThresh; float lowerThresh(opt_lowerThresh); vpImageFilter::canny(I_canny_input, I_canny, opt_gaussianKernelSize, lowerThresh, cannyThresh, - opt_apertureSize, opt_gaussianStdev, opt_lowerThreshRatio, opt_upperThreshRatio, + opt_apertureSize, opt_gaussianStdev, opt_lowerThreshRatio, opt_upperThreshRatio, true, opt_backend, opt_filteringType); drawingHelpers::display(I_canny, "Canny results with \"" + vpImageFilter::vpCannyBackendTypeToString(opt_backend) + "\" backend", true); } From adedf6015294a17059c31953326f093257cbf230 Mon Sep 17 00:00:00 2001 From: rlagneau Date: Thu, 19 Oct 2023 11:22:41 +0200 Subject: [PATCH 11/43] [CORPS] Added a method to know the list of available backend and filtering types for Canny --- .../core/include/visp3/core/vpImageFilter.h | 6 +++ modules/core/src/image/vpImageFilter.cpp | 46 +++++++++++++++++++ tutorial/image/tutorial-canny.cpp | 2 + 3 files changed, 54 insertions(+) diff --git a/modules/core/include/visp3/core/vpImageFilter.h b/modules/core/include/visp3/core/vpImageFilter.h index 2af1b1513c..a977d73509 100644 --- a/modules/core/include/visp3/core/vpImageFilter.h +++ b/modules/core/include/visp3/core/vpImageFilter.h @@ -78,6 +78,9 @@ class VISP_EXPORT vpImageFilter CANNY_COUNT_BACKEND = 2 //! Number of supported backends } vpCannyBackendType; + static std::string vpCannyBackendTypeList(const std::string &pref = "<", const std::string &sep = " , ", + const std::string &suf = ">"); + static std::string vpCannyBackendTypeToString(const vpCannyBackendType &type); static vpCannyBackendType vpCannyBackendTypeFromString(const std::string &name); @@ -90,6 +93,9 @@ class VISP_EXPORT vpImageFilter CANNY_COUNT_FILTERING = 2 //! Number of supported backends } vpCannyFilteringAndGradientType; + static std::string vpCannyFilteringAndGradientTypeList(const std::string &pref = "<", const std::string &sep = " , ", + const std::string &suf = ">"); + static std::string vpCannyFilteringAndGradientTypeToString(const vpCannyFilteringAndGradientType &type); static vpCannyFilteringAndGradientType vpCannyFilteringAndGradientTypeFromString(const std::string &name); diff --git a/modules/core/src/image/vpImageFilter.cpp b/modules/core/src/image/vpImageFilter.cpp index f2fc84a2da..ad33a28fd3 100644 --- a/modules/core/src/image/vpImageFilter.cpp +++ b/modules/core/src/image/vpImageFilter.cpp @@ -38,6 +38,29 @@ #include #include +/** + * \brief Get the list of available vpCannyBackendType. + * + * \param[in] pref The prefix of the list. + * \param[in] sep The separator between two elements of the list. + * \param[in] suf The suffix of the list. + * \return std::string The list of available items. + */ +std::string vpImageFilter::vpCannyBackendTypeList(const std::string &pref, const std::string &sep, + const std::string &suf) +{ + std::string list(pref); + for (unsigned int i = 0; i < vpCannyBackendType::CANNY_COUNT_BACKEND - 1; i++) { + vpCannyBackendType type = (vpCannyBackendType)i; + list += vpCannyBackendTypeToString(type); + list += sep; + } + vpCannyBackendType type = (vpCannyBackendType)(vpCannyBackendType::CANNY_COUNT_BACKEND - 1); + list += vpCannyBackendTypeToString(type); + list += suf; + return list; +} + /** * \brief Cast a \b vpImageFilter::vpCannyBackendTypeToString into a string, to know its name. * @@ -83,6 +106,29 @@ vpImageFilter::vpCannyBackendType vpImageFilter::vpCannyBackendTypeFromString(co return type; } +/** + * \brief Get the list of available vpCannyFilteringAndGradientType. + * + * \param[in] pref The prefix of the list. + * \param[in] sep The separator between two elements of the list. + * \param[in] suf The suffix of the list. + * \return std::string The list of available items. + */ +std::string vpImageFilter::vpCannyFilteringAndGradientTypeList(const std::string &pref, const std::string &sep, + const std::string &suf) +{ + std::string list(pref); + for (unsigned int i = 0; i < vpCannyFilteringAndGradientType::CANNY_COUNT_FILTERING - 1; i++) { + vpCannyFilteringAndGradientType type = (vpCannyFilteringAndGradientType)i; + list += vpCannyFilteringAndGradientTypeToString(type); + list += sep; + } + vpCannyFilteringAndGradientType type = (vpCannyFilteringAndGradientType)(CANNY_COUNT_FILTERING - 1); + list += vpCannyFilteringAndGradientTypeToString(type); + list += suf; + return list; +} + /** * \brief Cast a \b vpImageFilter::vpCannyFilteringAndGradientType into a string, to know its name. * diff --git a/tutorial/image/tutorial-canny.cpp b/tutorial/image/tutorial-canny.cpp index 6e133f5da9..e75e6c16cb 100644 --- a/tutorial/image/tutorial-canny.cpp +++ b/tutorial/image/tutorial-canny.cpp @@ -124,6 +124,7 @@ void usage(const std::string &softName) << std::endl; std::cout << "\t-f, --filter" << std::endl << "\t\tPermits to choose the type of filter to apply to compute the gradient." + << "\t\tAvailable values = " << vpImageFilter::vpCannyFilteringAndGradientTypeList() << std::endl << std::endl; std::cout << "\t-r, --ratio" << std::endl << "\t\tPermits to set the lower and upper thresholds ratio of the vpCanny class." @@ -132,6 +133,7 @@ void usage(const std::string &softName) << std::endl; std::cout << "\t-b, --backend" << std::endl << "\t\tPermits to use the vpImageFilter::canny method for comparison." + << "\t\tAvailable values = " << vpImageFilter::vpCannyBackendTypeList() << std::endl << std::endl; std::cout << "\t-h, --help" << std::endl << "\t\tPermits to display the different arguments this software handles." From f44b4b7037a8e0a5fda7c31b5d42cc70c2fd27d7 Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Sat, 21 Oct 2023 18:27:50 +0200 Subject: [PATCH 12/43] Improve cmake timestamp and git csv retrieval to avoid useless rebuild - Before, visp-build-info.tmp was modified after each call to cmake leading to rebuilding vpIoTools.cpp and thus visp_core... - Now visp-build-info.tmp is updated only if necessary - This commit removes also all specific cmake code when cmake < 2.8.12 --- CMakeLists.txt | 22 +++++--------- cmake/VISPModule.cmake | 10 ------- cmake/VISPUtils.cmake | 35 +++++++++++++++++------ platforms/android/android.toolchain.cmake | 3 -- 4 files changed, 34 insertions(+), 36 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index f187a62160..d8110b8b79 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1262,16 +1262,7 @@ vp_system_information(NUMBER_OF_LOGICAL_CORES NUMBER_OF_PHYSICAL_CORES TOTAL_PHY find_host_package(Git QUIET) if(NOT DEFINED VISP_VCSVERSION AND GIT_FOUND) - execute_process(COMMAND "${GIT_EXECUTABLE}" describe --tags --always --dirty --match "v[0-9].[0-9].[0-9]*" - WORKING_DIRECTORY "${VISP_SOURCE_DIR}" - OUTPUT_VARIABLE VISP_VCSVERSION - RESULT_VARIABLE GIT_RESULT - ERROR_QUIET - OUTPUT_STRIP_TRAILING_WHITESPACE - ) - if(NOT GIT_RESULT EQUAL 0) - set(VISP_VCSVERSION "unknown") - endif() + vp_git_describe(VISP_VCSVERSION "${VISP_SOURCE_DIR}") elseif(NOT DEFINED VISP_VCSVERSION) # We don't have git: set(VISP_VCSVERSION "unknown") @@ -1322,11 +1313,12 @@ endif() # ========================== build platform ========================== status("") status(" Platform:") -if(NOT CMAKE_VERSION VERSION_LESS 2.8.11) - string(TIMESTAMP TIMESTAMP "" UTC) - if(TIMESTAMP) - status(" Timestamp:" ${TIMESTAMP}) - endif() +if(NOT DEFINED VISP_TIMESTAMP AND NOT BUILD_INFO_SKIP_TIMESTAMP) + string(TIMESTAMP VISP_TIMESTAMP "" UTC) + set(VISP_TIMESTAMP "${VISP_TIMESTAMP}" CACHE STRING "Timestamp of ViSP build configuration" FORCE) +endif() +if(VISP_TIMESTAMP) + status(" Timestamp:" ${VISP_TIMESTAMP}) endif() status(" Host:" ${CMAKE_HOST_SYSTEM_NAME} ${CMAKE_HOST_SYSTEM_VERSION} ${CMAKE_HOST_SYSTEM_PROCESSOR}) if(CMAKE_CROSSCOMPILING) diff --git a/cmake/VISPModule.cmake b/cmake/VISPModule.cmake index eee1761ba1..0089104ad9 100644 --- a/cmake/VISPModule.cmake +++ b/cmake/VISPModule.cmake @@ -590,11 +590,6 @@ macro(vp_target_include_modules target) if(d MATCHES "^visp_" AND HAVE_${d}) if (EXISTS "${VISP_MODULE_${d}_LOCATION}/include") vp_target_include_directories(${target} "${VISP_MODULE_${d}_LOCATION}/include") - # Work around to be able to build the modules without INTERFACE_INCLUDE_DIRECTORIES - # that was only introduces since CMake 2.8.12 - if (CMAKE_VERSION VERSION_LESS 2.8.12) - vp_target_include_directories(${target} "${VISP_MODULE_${d}_INC_DEPS}") - endif() endif() elseif(EXISTS "${d}") # FS keep external deps inc @@ -992,11 +987,6 @@ macro(vp_add_tests) foreach(d ${VISP_TEST_${the_module}_DEPS}) list(APPEND test_deps ${d}) list(APPEND test_deps ${VISP_MODULE_${d}_DEPS}) - # Work around to be able to build the modules without INTERFACE_INCLUDE_DIRECTORIES - # that was only introduces since CMake 2.8.12 - if(CMAKE_VERSION VERSION_LESS 2.8.12) - list(APPEND test_deps "${VISP_MODULE_${__m}_INC_DEPS}") - endif() endforeach() vp_check_dependencies(${test_deps}) diff --git a/cmake/VISPUtils.cmake b/cmake/VISPUtils.cmake index 3fa45576b3..3b8eebf23c 100644 --- a/cmake/VISPUtils.cmake +++ b/cmake/VISPUtils.cmake @@ -195,15 +195,11 @@ function(vp_target_include_directories target) endif() endforeach() if(__params) - if(CMAKE_VERSION VERSION_LESS 2.8.11) - include_directories(${__params}) + if(TARGET ${target}) + target_include_directories(${target} PRIVATE ${__params}) else() - if(TARGET ${target}) - target_include_directories(${target} PRIVATE ${__params}) - else() - set(__new_inc "${VP_TARGET_INCLUDE_DIRS_${target}};${__params}") - set(VP_TARGET_INCLUDE_DIRS_${target} "${__new_inc}" CACHE INTERNAL "") - endif() + set(__new_inc "${VP_TARGET_INCLUDE_DIRS_${target}};${__params}") + set(VP_TARGET_INCLUDE_DIRS_${target} "${__new_inc}" CACHE INTERNAL "") endif() endif() endfunction() @@ -1969,3 +1965,26 @@ macro(vp_list_replace_string list_in list_out regular_expression replacement_exp endforeach() set(${list_out} ${__list_out}) endmacro() + +macro(vp_git_describe var_name path) + if(GIT_FOUND) + execute_process(COMMAND "${GIT_EXECUTABLE}" describe --tags --exact-match --dirty + WORKING_DIRECTORY "${path}" + OUTPUT_VARIABLE ${var_name} + RESULT_VARIABLE GIT_RESULT + ERROR_QUIET + OUTPUT_STRIP_TRAILING_WHITESPACE + ) + if(NOT GIT_RESULT EQUAL 0) + execute_process(COMMAND "${GIT_EXECUTABLE}" describe --tags --always --dirty --match "[0-9].[0-9].[0-9]*" + WORKING_DIRECTORY "${path}" + OUTPUT_VARIABLE ${var_name} + RESULT_VARIABLE GIT_RESULT + ERROR_QUIET + OUTPUT_STRIP_TRAILING_WHITESPACE + ) + endif() + else() + set(${var_name} "unknown") + endif() +endmacro() diff --git a/platforms/android/android.toolchain.cmake b/platforms/android/android.toolchain.cmake index 6645af3d1a..0096a29c99 100644 --- a/platforms/android/android.toolchain.cmake +++ b/platforms/android/android.toolchain.cmake @@ -1156,9 +1156,6 @@ if( NOT CMAKE_C_COMPILER ) endif() set( _CMAKE_TOOLCHAIN_PREFIX "${ANDROID_TOOLCHAIN_MACHINE_NAME}-" ) -if( CMAKE_VERSION VERSION_LESS 2.8.5 ) - set( CMAKE_ASM_COMPILER_ARG1 "-c" ) -endif() if( APPLE ) find_program( CMAKE_INSTALL_NAME_TOOL NAMES install_name_tool ) if( NOT CMAKE_INSTALL_NAME_TOOL ) From 4842e5441b2f6ccafdb487517091ec6db1d95765 Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Mon, 23 Oct 2023 10:01:00 +0200 Subject: [PATCH 13/43] Fix warnings detected during build on ci with Visual 2019 --- modules/core/src/image/vpGaussianFilter.cpp | 12 +- modules/core/src/image/vpImageCircle.cpp | 85 ++--- modules/core/src/image/vpImageFilter.cpp | 8 +- .../test/tools/geometry/testImageCircle.cpp | 308 +++++++++--------- 4 files changed, 208 insertions(+), 205 deletions(-) diff --git a/modules/core/src/image/vpGaussianFilter.cpp b/modules/core/src/image/vpGaussianFilter.cpp index 0f77e22807..4e116085ab 100644 --- a/modules/core/src/image/vpGaussianFilter.cpp +++ b/modules/core/src/image/vpGaussianFilter.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Gaussian filter class - * -*****************************************************************************/ + */ #include #include @@ -88,7 +86,8 @@ class vpGaussianFilter::Impl if (!m_deinterleave) { SimdGaussianBlurRun(m_funcPtrRGBa, reinterpret_cast(I.bitmap), I.getWidth() * 4, reinterpret_cast(I_blur.bitmap), I_blur.getWidth() * 4); - } else { + } + else { vpImageConvert::split(I, &m_red, &m_green, &m_blue); SimdGaussianBlurRun(m_funcPtrGray, m_red.bitmap, m_red.getWidth(), m_redBlurred.bitmap, m_redBlurred.getWidth()); SimdGaussianBlurRun(m_funcPtrGray, m_green.bitmap, m_green.getWidth(), m_greenBlurred.bitmap, @@ -125,8 +124,7 @@ class vpGaussianFilter::Impl */ vpGaussianFilter::vpGaussianFilter(unsigned int width, unsigned int height, float sigma, bool deinterleave) : m_impl(new Impl(width, height, sigma, deinterleave)) -{ -} +{ } vpGaussianFilter::~vpGaussianFilter() { delete m_impl; } diff --git a/modules/core/src/image/vpImageCircle.cpp b/modules/core/src/image/vpImageCircle.cpp index b8e49af9bd..b0508f9b37 100644 --- a/modules/core/src/image/vpImageCircle.cpp +++ b/modules/core/src/image/vpImageCircle.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Image circle, i.e. circle in the image space. - * -*****************************************************************************/ + */ #include @@ -53,30 +51,28 @@ vpImageCircle::vpImageCircle(const vpImagePoint ¢er, const float &radius) vpImageCircle::vpImageCircle(const cv::Vec3f &vec) : m_center(vec[1], vec[0]) , m_radius(vec[2]) -{ - -} +{ } #endif vpImageCircle::~vpImageCircle() -{ - -} +{ } /*! - * \brief Express \b theta between - Pi and Pi . + * \brief Express \b theta between \f$-\pi\f$ and \f$\pi\f$. * - * \param[in] theta The input angle we want to ensure it is in the interval [-Pi ; Pi] - * \return float The input angle in the interval [-Pi ; Pi] + * \param[in] theta The input angle we want to ensure it is in the interval \f$[-\pi ; \pi]\f$. + * \return The input angle in the interval \f$[-\pi ; \pi]\f$. */ float getAngleBetweenMinPiAndPi(const float &theta) { float theta1 = theta; - if (theta1 > M_PI) { - theta1 -= 2.0 * M_PI; + float pi = static_cast(M_PI); + + if (theta1 > pi) { + theta1 -= 2.0f * pi; } - else if (theta1 < -M_PI) { - theta1 += 2.0 * M_PI; + else if (theta1 < -pi) { + theta1 += 2.0f * pi; } return theta1; } @@ -122,7 +118,8 @@ void computeIntersectionsRightBorderOnly(const float &u_c, const float &umax_roi float theta2 = -1.f * theta1; float theta_min = std::min(theta1, theta2); float theta_max = std::max(theta1, theta2); - delta_theta = 2.f * M_PI - (theta_max - theta_min); + float pi = static_cast(M_PI); + delta_theta = 2.f * pi - (theta_max - theta_min); } /*! @@ -140,24 +137,25 @@ void computeIntersectionsTopBorderOnly(const float &v_c, const float &vmin_roi, // v = vc - r sin(theta) because the v-axis goes down // theta = asin((vc - v)/r) float theta1 = std::asin((v_c - vmin_roi) / radius); + float pi = static_cast(M_PI); theta1 = getAngleBetweenMinPiAndPi(theta1); float theta2 = 0.f; if (theta1 >= 0.f) { - theta2 = M_PI - theta1; + theta2 = pi - theta1; } else { - theta2 = -theta1 - M_PI; + theta2 = -theta1 - pi; } float theta_min = std::min(theta1, theta2); float theta_max = std::max(theta1, theta2); if (std::abs(theta_max - theta_min) * radius < 1.f) { // Between the maximum and minimum theta there is less than 1 pixel of difference // It meens that the full circle is visible - delta_theta = 2.f * M_PI; + delta_theta = 2.f * pi; } else if (theta1 > 0.f) { - delta_theta = 2.f * M_PI - (theta_max - theta_min); + delta_theta = 2.f * pi - (theta_max - theta_min); } else { delta_theta = theta_max - theta_min; @@ -179,27 +177,28 @@ void computeIntersectionsBottomBorderOnly(const float &v_c, const float &vmax_ro // v = vc - r sin(theta) because the v-axis goes down // theta = asin((vc - v)/r) float theta1 = std::asin((v_c - vmax_roi) / radius); + float pi = static_cast(M_PI); theta1 = getAngleBetweenMinPiAndPi(theta1); float theta2 = 0.f; if (theta1 >= 0.f) { - theta2 = M_PI - theta1; + theta2 = pi - theta1; } else { - theta2 = -theta1 - M_PI; + theta2 = -theta1 - pi; } float theta_min = std::min(theta1, theta2); float theta_max = std::max(theta1, theta2); if (std::abs(theta_max - theta_min) * radius < 1.f) { // Between the maximum and minimum theta there is less than 1 pixel of difference - // It meens that the full circle is visible - delta_theta = 2.f * M_PI; + // It means that the full circle is visible + delta_theta = 2.f * pi; } else if (theta1 > 0.f) { delta_theta = theta_max - theta_min; } else { - delta_theta = 2.f * M_PI - (theta_max - theta_min); + delta_theta = 2.f * pi - (theta_max - theta_min); } } @@ -226,13 +225,14 @@ void computePerpendicularAxesIntersections(const float &u_c, const float &v_c, c // v = vc - r sin(theta) because the v-axis goes down // theta = asin((vc - v)/r) float theta_u_cross = std::asin((v_c - crossing_u)/radius); + float pi = static_cast(M_PI); theta_u_cross = getAngleBetweenMinPiAndPi(theta_u_cross); float theta_u_cross_2 = 0.f; if (theta_u_cross > 0) { - theta_u_cross_2 = M_PI - theta_u_cross; + theta_u_cross_2 = pi - theta_u_cross; } else { - theta_u_cross_2 = -M_PI - theta_u_cross; + theta_u_cross_2 = -pi - theta_u_cross; } // Computing the corresponding u-coordinates at which the u-axis is crossed float u_ucross = u_c + radius * std::cos(theta_u_cross); @@ -352,19 +352,20 @@ void computeIntersectionsTopRight(const float &u_c, const float &v_c, const floa float u_umax = crossing_theta_u_max.second; float v_vmin = crossing_theta_v_min.second; float v_vmax = crossing_theta_v_max.second; + float pi = static_cast(M_PI); if (u_umin <= umax_roi && v_vmin < vmin_roi && u_umax >= umax_roi && v_vmax >= vmin_roi) { // The circle crosses only once each axis and the center is below the top border //Case crossing once delta_theta = theta_v_max - theta_u_min; if (delta_theta < 0) { // The arc cannot be negative - delta_theta += 2.f * M_PI; + delta_theta += 2.f * pi; } } else if (u_umin <= umax_roi && v_vmin >= vmin_roi && u_umax <= umax_roi && v_vmax >= vmin_roi) { // The circle crosses twice each axis //Case crossing twice - delta_theta = 2 * M_PI - ((theta_u_min - theta_u_max)+(theta_v_min - theta_v_max)); + delta_theta = 2.f * pi - ((theta_u_min - theta_u_max)+(theta_v_min - theta_v_max)); } else if (u_umin >= umax_roi && v_vmin >= vmin_roi && u_umax >= umax_roi && v_vmax >= vmin_roi) { // The circle crosses the u-axis outside the roi @@ -458,19 +459,20 @@ void computeIntersectionsBottomRight(const float &u_c, const float &v_c, const f float u_umax = crossing_theta_u_max.second; float v_vmin = crossing_theta_v_min.second; float v_vmax = crossing_theta_v_max.second; + float pi = static_cast(M_PI); if (u_umin <= umax_roi && u_umax > umax_roi && v_vmin <= vmax_roi && v_vmax > vmax_roi) { // The circle crosses only once each axis //Case crossing once delta_theta = theta_u_min - theta_v_min; if (delta_theta < 0) { // An arc length cannot be negative it means that theta_u_max was comprise in the bottom left quadrant of the circle - delta_theta += 2.f * M_PI; + delta_theta += 2.f * pi; } } else if (u_umin <= umax_roi && u_umax <= umax_roi && v_vmin <= vmax_roi && v_vmax <= vmax_roi) { // The circle crosses twice each axis //Case crossing twice - delta_theta = 2.f * M_PI - ((theta_v_min - theta_v_max) + (theta_u_max - theta_u_min)); + delta_theta = 2.f * pi - ((theta_v_min - theta_v_max) + (theta_u_max - theta_u_min)); } else if (u_umin > umax_roi && u_umax > umax_roi && v_vmin <= vmax_roi && v_vmax <= vmax_roi) { // The circle crosses the u-axis outside the roi @@ -586,9 +588,10 @@ void computeIntersectionsTopRightBottom(const float &u_c, const float &v_c, cons float theta_u_max_bottom = crossing_theta_u_max.first; float u_umin_bottom = crossing_theta_u_min.second; float u_umax_bottom = crossing_theta_u_max.second; + float pi = static_cast(M_PI); if (u_umax_top <= umax_roi && u_umax_bottom <= umax_roi && v_vmin >= vmin_roi && v_vmax <= vmax_roi) { // case intersection top + right + bottom twice - delta_theta = 2.f * M_PI - ((theta_u_min_top - theta_u_max_top) + (theta_v_min - theta_v_max) + (theta_u_max_bottom - theta_u_min_bottom)); + delta_theta = 2.f * pi - ((theta_u_min_top - theta_u_max_top) + (theta_v_min - theta_v_max) + (theta_u_max_bottom - theta_u_min_bottom)); } else if (u_umin_top <= umax_roi && u_umax_top > umax_roi && v_vmin <= vmin_roi && u_umin_bottom <= umax_roi && u_umax_bottom > umax_roi && v_vmax >= vmax_roi) { // case intersection top and bottom @@ -628,11 +631,12 @@ void computeIntersectionsTopBottomOnly(const float &u_c, const float &v_c, const float theta_u_cross_top = std::asin((v_c - vmin_roi)/radius); theta_u_cross_top = getAngleBetweenMinPiAndPi(theta_u_cross_top); float theta_u_cross_top_2 = 0.f; + float pi = static_cast(M_PI); if (theta_u_cross_top > 0) { - theta_u_cross_top_2 = M_PI - theta_u_cross_top; + theta_u_cross_top_2 = pi - theta_u_cross_top; } else { - theta_u_cross_top_2 = -M_PI - theta_u_cross_top; + theta_u_cross_top_2 = -pi - theta_u_cross_top; } // Computing the corresponding u-coordinates at which the u-axis is crossed @@ -656,10 +660,10 @@ void computeIntersectionsTopBottomOnly(const float &u_c, const float &v_c, const theta_u_cross_bottom = getAngleBetweenMinPiAndPi(theta_u_cross_bottom); float theta_u_cross_bottom_2 = 0.f; if (theta_u_cross_bottom > 0) { - theta_u_cross_bottom_2 = M_PI - theta_u_cross_bottom; + theta_u_cross_bottom_2 = pi - theta_u_cross_bottom; } else { - theta_u_cross_bottom_2 = -M_PI - theta_u_cross_bottom; + theta_u_cross_bottom_2 = -pi - theta_u_cross_bottom; } // Computing the corresponding u-coordinates at which the u-axis is crossed @@ -679,7 +683,7 @@ void computeIntersectionsTopBottomOnly(const float &u_c, const float &v_c, const // Computing the the length of the angular interval of the circle when it intersects // only with the top and bottom borders of the Region of Interest (RoI) - delta_theta = 2.f * M_PI - ((theta_u_cross_top_min - theta_u_cross_top_max) + (theta_u_cross_bottom_max - theta_u_cross_bottom_min)); + delta_theta = 2.f * pi - ((theta_u_cross_top_min - theta_u_cross_top_max) + (theta_u_cross_bottom_max - theta_u_cross_bottom_min)); } /*! @@ -938,11 +942,12 @@ float vpImageCircle::computeAngularCoverageInRoI(const vpRect &roi) const bool touchBottomBorder = (v_c + radius) >= vmax_roi; bool isHorizontallyOK = (!touchLeftBorder && !touchRightBorder); bool isVerticallyOK = (!touchTopBorder && !touchBottomBorder); + float pi = static_cast(M_PI); if (isHorizontallyOK && isVerticallyOK && roi.isInside(m_center)) { // Easy case // The circle has its center in the image and its radius is not too great // to make it fully contained in the RoI - delta_theta = 2.f * M_PI; + delta_theta = 2.f * pi; } else if (touchBottomBorder && !touchLeftBorder && !touchRightBorder && !touchTopBorder) { // Touches/intersects only the bottom border of the RoI diff --git a/modules/core/src/image/vpImageFilter.cpp b/modules/core/src/image/vpImageFilter.cpp index 63da6cc2e2..f7a0119d16 100644 --- a/modules/core/src/image/vpImageFilter.cpp +++ b/modules/core/src/image/vpImageFilter.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Various image tools, convolution, ... - * -*****************************************************************************/ + */ #include #include @@ -373,7 +371,7 @@ void vpImageFilter::canny(const vpImage &Isrc, vpImage #include @@ -47,11 +45,12 @@ bool compareAngles(const float &actualVal, const float &theoreticalVal) float ensureIsBetweenMinPiAndPi(const float &theta) { float theta1 = theta; - if (theta1 > M_PI) { - theta1 -= 2.0 * M_PI; + float pi = static_cast(M_PI); + if (theta1 > pi) { + theta1 -= 2.0 * pi; } - else if (theta1 < -M_PI) { - theta1 += 2.0 * M_PI; + else if (theta1 < -pi) { + theta1 += 2.0 * pi; } return theta1; } @@ -67,12 +66,15 @@ int main() const float HEIGHT_SWITCHED = WIDTH; // The RoI must be inverted in order to cross left and right axes while crossing only the top axis vpRect switchedRoI(OFFSET, OFFSET, WIDTH_SWITCHED, HEIGHT_SWITCHED); bool hasSucceeded = true; + float pi = static_cast(M_PI); + float pi_2 = static_cast(M_PI_2); + float pi_4 = static_cast(M_PI_4); // Test with no intersections { vpImageCircle circle(vpImagePoint(HEIGHT / 2.f, WIDTH / 2.f), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * M_PI * RADIUS; + float theoreticalValue = 2.f * pi * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -94,7 +96,7 @@ int main() vpRect roiSquare(OFFSET, OFFSET, HEIGHT, HEIGHT); vpImageCircle circle(vpImagePoint(OFFSET + HEIGHT / 2.f, OFFSET + HEIGHT / 2.f), HEIGHT / 2.f); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * M_PI * HEIGHT / 2.f; + float theoreticalValue = 2.f * pi * HEIGHT / 2.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -119,7 +121,7 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 4.f * M_PI * RADIUS /3.f; + float theoreticalValue = 4.f * pi * RADIUS /3.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -144,7 +146,7 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * M_PI * RADIUS /3.f; + float theoreticalValue = 2.f * pi * RADIUS /3.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -169,7 +171,7 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * M_PI * RADIUS; + float theoreticalValue = 2.f * pi * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -194,7 +196,7 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 4.f * M_PI * RADIUS /3.f; + float theoreticalValue = 4.f * pi * RADIUS /3.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -219,7 +221,7 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * M_PI * RADIUS /3.f; + float theoreticalValue = 2.f * pi * RADIUS /3.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -244,7 +246,7 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * M_PI * RADIUS; + float theoreticalValue = 2.f * pi * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -265,12 +267,12 @@ int main() { // v = vc - r sin(theta) // Formula: vc = OFFSET + RADIUS * sin(theta) - float theta = M_PI / 3.f; + float theta = pi / 3.f; float uc = OFFSET + 100.f; float vc = OFFSET + RADIUS * sin(theta); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 5.f * M_PI * RADIUS /3.f; + float theoreticalValue = 5.f * pi * RADIUS /3.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -291,12 +293,12 @@ int main() { // v = vc - r sin(theta) // Formula: vc = OFFSET + RADIUS * sin(theta) - float theta = -2.f * M_PI/3.f; + float theta = -2.f * pi/3.f; float uc = OFFSET + 100.f; float vc = OFFSET + RADIUS * std::sin(theta); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = M_PI * RADIUS /3.f; + float theoreticalValue = pi * RADIUS /3.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -317,12 +319,12 @@ int main() { // v = vc - r sin(theta) // Formula: vc = OFFSET + RADIUS * sin(theta) - float theta = M_PI_2; + float theta = pi_2; float uc = OFFSET + 100.f; float vc = OFFSET + RADIUS * sin(theta); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * M_PI * RADIUS; + float theoreticalValue = 2.f * pi * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -343,12 +345,12 @@ int main() { // v = vc - r sin(theta) // Formula: vc = OFFSET + HEIGHT + RADIUS * sin(theta) - float theta = -M_PI / 3.f; + float theta = -pi / 3.f; float uc = OFFSET + 100.f; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 5.f * M_PI * RADIUS /3.f; + float theoreticalValue = 5.f * pi * RADIUS /3.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -369,12 +371,12 @@ int main() { // v = vc - r sin(theta) // Formula: vc = OFFSET + HEIGHT + RADIUS * sin(theta) - float theta = M_PI / 3.f; + float theta = pi / 3.f; float uc = OFFSET + 100.f; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = M_PI * RADIUS /3.f; + float theoreticalValue = pi * RADIUS /3.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -398,7 +400,7 @@ int main() float vc = OFFSET + HEIGHT - RADIUS; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * M_PI * RADIUS; + float theoreticalValue = 2.f * pi * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -425,7 +427,7 @@ int main() float vc = OFFSET; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = M_PI_2 * RADIUS; + float theoreticalValue = pi_2 * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -451,13 +453,13 @@ int main() // (4): umin = uc + r cos(theta_v_max) ; v_cross_max = vc - r sin(theta_v_max) >= vmin && <= vmin + height // (3) & (4) => uc = umin - r cos(theta_v_min) = umin - r cos(theta_v_max) <=> theta_v_min = - theta_v_max // (3) & (4) => vc >= vmin + r sin(theta_v_min) && vc >= vmin + r sin (theta_v_max) - float theta_v_min = M_PI / 4.f; + float theta_v_min = pi / 4.f; float uc = OFFSET - RADIUS * std::cos(theta_v_min); float vc = OFFSET + RADIUS * std::sin(theta_v_min) + 1.f; vc = std::max(vc, OFFSET + RADIUS * std::sin(-theta_v_min) + 1.f); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = M_PI_2 * RADIUS; + float theoreticalValue = pi_2 * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -485,13 +487,13 @@ int main() // (1) => uc + r cos(theta_u_top_min) >= umin <=> uc >= umin - r cos(theta_u_top_min) // (2) => uc + r cos(theta_u_top_max) >= umin <=> uc >= umin - r cos(theta_u_top_max) - float theta_u_top_min = -1.1f * M_PI_2; + float theta_u_top_min = -1.1f * pi_2; float uc = OFFSET - RADIUS * std::cos(theta_u_top_min) + 1.f; - uc = std::max(uc, OFFSET - RADIUS * std::cos((float)M_PI - theta_u_top_min) + 1.f); + uc = std::max(uc, OFFSET - RADIUS * std::cos(pi - theta_u_top_min) + 1.f); float vc = OFFSET + RADIUS * std::sin(theta_u_top_min); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 0.2f * M_PI_2 * RADIUS; + float theoreticalValue = 0.2f * pi_2 * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -521,10 +523,10 @@ int main() // (3) & (4) =>{ uc = umin - r cos(theta_v_min) & { uc = umin - r cos(- theta_v_min) // (3) & (4) { vc >= vmin - r sin(theta_v_min) & { vc >= vmin - r cos(- theta_v_min) - float theta_u_top_min = 5.f * M_PI / 8.f; - float theta_u_top_max = M_PI - theta_u_top_min; + float theta_u_top_min = 5.f * pi / 8.f; + float theta_u_top_max = pi - theta_u_top_min; float uc = OFFSET - RADIUS * std::cos(theta_u_top_min) + 1.f; - uc = std::max(uc, OFFSET - RADIUS * std::cos((float)M_PI - theta_u_top_min) + 1.f); + uc = std::max(uc, OFFSET - RADIUS * std::cos(pi - theta_u_top_min) + 1.f); float vc = OFFSET + RADIUS * std::sin(theta_u_top_min); float theta_v_min = std::acos((OFFSET - uc)/RADIUS); theta_v_min = ensureIsBetweenMinPiAndPi(theta_v_min); @@ -564,13 +566,13 @@ int main() // (1) => vc = vmin + r sin(theta_u_top_min) // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_top_min = 2.f * M_PI / 3.f; - float theta_v_max = -M_PI_2; + float theta_u_top_min = 2.f * pi / 3.f; + float theta_v_max = -pi_2; float uc = OFFSET + WIDTH - RADIUS * std::cos(theta_v_max); float vc = OFFSET + RADIUS * std::sin(theta_u_top_min);; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (M_PI_2 + M_PI / 3.f) * RADIUS; + float theoreticalValue = (pi_2 + pi / 3.f) * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -599,13 +601,13 @@ int main() // (1) <=> asin((vc - vmin)/r) >= acos[(umin + width - uc)/r] <=> vc >= r sin(acos[(umin + width - uc)/r]) + vmin // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max - float theta_v_max = -7.f * M_PI / 8.f; + float theta_v_max = -7.f * pi / 8.f; float theta_v_min = -theta_v_max; float uc = OFFSET + WIDTH - RADIUS * std::cos(theta_v_max); float vc = RADIUS * std::sin(std::acos((OFFSET + WIDTH - uc)/RADIUS)) + OFFSET + 1.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - (theta_v_min - theta_v_max)) * RADIUS; + float theoreticalValue = (2.f * pi - (theta_v_min - theta_v_max)) * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -634,8 +636,8 @@ int main() // Choice: theta_u_top_min = -0.9 * PI / 2 // (1) => vc = vmin + r sin(theta_u_top_min) // (2) vc - r sin(theta_v_min) <= vmin => asin((vc - vmin)/r) <= theta_v_min - float theta_u_top_min = -0.9f * M_PI_2; - float theta_u_top_max = M_PI - theta_u_top_min; + float theta_u_top_min = -0.9f * pi_2; + float theta_u_top_max = pi - theta_u_top_min; theta_u_top_max = ensureIsBetweenMinPiAndPi(theta_u_top_max); float vc = OFFSET + RADIUS * std::sin(theta_u_top_min); float theta_v_min = std::asin((vc - OFFSET)/RADIUS) + 1.f; @@ -672,10 +674,10 @@ int main() // (2) & (4) =>{ uc = umin - r cos(theta_v_min) & { uc = umin - r cos(- theta_v_min) // (2) & (4) { vc >= vmin - r sin(theta_v_min) & { vc >= vmin - r cos(- theta_v_min) - float theta_u_top_min = 5.f * M_PI / 8.f; - float theta_u_top_max = M_PI - theta_u_top_min; + float theta_u_top_min = 5.f * pi / 8.f; + float theta_u_top_max = pi - theta_u_top_min; float uc = OFFSET + WIDTH - RADIUS * std::cos(theta_u_top_min) - 1.f; - uc = std::min(uc, OFFSET + WIDTH - RADIUS * std::cos((float)M_PI - theta_u_top_min) - 1.f); + uc = std::min(uc, OFFSET + WIDTH - RADIUS * std::cos(pi - theta_u_top_min) - 1.f); float vc = OFFSET + RADIUS * std::sin(theta_u_top_min); float theta_v_min = std::acos((OFFSET + WIDTH - uc)/RADIUS); theta_v_min = ensureIsBetweenMinPiAndPi(theta_v_min); @@ -687,7 +689,7 @@ int main() } vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - ((theta_u_top_min - theta_u_top_max) + (theta_v_min - theta_v_max))) * RADIUS; + float theoreticalValue = (2.f * pi - ((theta_u_top_min - theta_u_top_max) + (theta_v_min - theta_v_max))) * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -715,13 +717,13 @@ int main() // (3) => vc = vmin + height + r sin(theta_u_bot_max) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_v_min = M_PI_2; - float theta_u_bot_max = -M_PI / 3.f; + float theta_v_min = pi_2; + float theta_u_bot_max = -pi / 3.f; float uc = OFFSET - RADIUS * std::cos(theta_v_min); float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_max);; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (M_PI_2 + M_PI / 3.f) * RADIUS; + float theoreticalValue = (pi_2 + pi / 3.f) * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -751,7 +753,7 @@ int main() // (4) => vc <= vmin + height + r sin(theta_v_max) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_v_min = M_PI_4 / 2.f; + float theta_v_min = pi_4 / 2.f; float theta_v_max = -theta_v_min; float uc = OFFSET - RADIUS * std::cos(theta_v_min); float vc = std::min(OFFSET + HEIGHT + RADIUS * std::sin(theta_v_min) - 1.f, OFFSET + HEIGHT + RADIUS * std::sin(theta_v_max) - 1.f); @@ -787,8 +789,8 @@ int main() // (1) => uc >= umin - r cos(theta_u_bot_max) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_bot_min = 5.f * M_PI_4 / 2.f; - float theta_u_bot_max = M_PI - theta_u_bot_min; + float theta_u_bot_min = 5.f * pi_4 / 2.f; + float theta_u_bot_max = pi - theta_u_bot_min; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_min); float uc = std::max(OFFSET - RADIUS * std::cos(theta_u_bot_min) + 1.f, OFFSET - RADIUS * std::cos(theta_u_bot_max) + 1.f); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); @@ -823,10 +825,10 @@ int main() // (2) & (4) => vc < vmin + height + r sin(theta_v_min) & vc < vmin + height + r sin(-theta_v_min) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_bot_min = -5.f * M_PI / 8.f; - float theta_u_bot_max = M_PI - theta_u_bot_min; + float theta_u_bot_min = -5.f * pi / 8.f; + float theta_u_bot_max = pi - theta_u_bot_min; theta_u_bot_max = ensureIsBetweenMinPiAndPi(theta_u_bot_max); - float theta_v_min = 7.f * M_PI / 8.f; + float theta_v_min = 7.f * pi / 8.f; float theta_v_max = -theta_v_min; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_min); float uc = OFFSET - RADIUS * std::cos(theta_v_min); @@ -860,13 +862,13 @@ int main() // (1) => vc = vmin + height + r sin(theta_u_bot_min) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_bot_min = -2.f * M_PI / 3.f; - float theta_v_min = M_PI_2; + float theta_u_bot_min = -2.f * pi / 3.f; + float theta_v_min = pi_2; float uc = OFFSET + WIDTH - RADIUS * std::cos(theta_v_min); float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_min);; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (M_PI_2 + M_PI / 3.f) * RADIUS; + float theoreticalValue = (pi_2 + pi / 3.f) * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -895,12 +897,12 @@ int main() // (2) & (4) => vc <= vmin + height + r sin(theta_v_min) & vc <= vmin + height + r sin(-theta_v_min) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_v_min = 5.f * M_PI / 6.f; + float theta_v_min = 5.f * pi / 6.f; float uc = OFFSET + WIDTH - RADIUS * std::cos(theta_v_min); float vc = std::min(OFFSET + HEIGHT + RADIUS * std::sin(theta_v_min) - 1.f, OFFSET + HEIGHT + RADIUS * std::sin(-theta_v_min) - 1.f); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (M_PI / 3.f) * RADIUS; // <=> 2.f * M_PI / 6.f + float theoreticalValue = (pi / 3.f) * RADIUS; // <=> 2.f * M_PI / 6.f bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -929,12 +931,12 @@ int main() // (1) & (3) => uc < umin + width - r cos(theta_u_bot_min) & uc <= umin + width - r cos(PI - theta_u_bot_min) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_bot_min = 4.f * M_PI / 6.f; + float theta_u_bot_min = 4.f * pi / 6.f; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_min); - float uc = std::min(OFFSET + WIDTH - RADIUS * std::cos(theta_u_bot_min) - 1.f, OFFSET + WIDTH - RADIUS * std::cos((float)M_PI -theta_u_bot_min) - 1.f); + float uc = std::min(OFFSET + WIDTH - RADIUS * std::cos(theta_u_bot_min) - 1.f, OFFSET + WIDTH - RADIUS * std::cos((float)pi -theta_u_bot_min) - 1.f); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (M_PI / 3.f) * RADIUS; // <=> 2.f * M_PI / 6.f + float theoreticalValue = (pi / 3.f) * RADIUS; // <=> 2.f * M_PI / 6.f bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -964,16 +966,16 @@ int main() // (2) & (4) => vc < vmin + height + r sin(theta_v_min) & vc < vmin + height + r sin(-theta_v_min) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_bot_min = -7.f * M_PI / 8.f; - float theta_u_bot_max = M_PI - theta_u_bot_min; + float theta_u_bot_min = -7.f * pi / 8.f; + float theta_u_bot_max = pi - theta_u_bot_min; theta_u_bot_max = ensureIsBetweenMinPiAndPi(theta_u_bot_max); - float theta_v_max = -3.f * M_PI / 8.f; + float theta_v_max = -3.f * pi / 8.f; float theta_v_min = -theta_v_max; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_min); float uc = OFFSET - RADIUS * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - ((theta_v_min - theta_v_max) + (theta_u_bot_max - theta_u_bot_min))) * RADIUS; + float theoreticalValue = (2.f * pi - ((theta_v_min - theta_v_max) + (theta_u_bot_max - theta_u_bot_min))) * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1002,12 +1004,12 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 5.f * M_PI / 8.f; - float theta_u_top_max = 3.f * M_PI / 8.f; - float theta_v_min = 7.f * M_PI / 8.f; + float theta_u_top_min = 5.f * pi / 8.f; + float theta_u_top_max = 3.f * pi / 8.f; + float theta_v_min = 7.f * pi / 8.f; float theta_v_max = -theta_v_min; - float theta_u_bottom_min = -5.f * M_PI / 8.f; - float theta_u_bottom_max = -3.f * M_PI / 8.f; + float theta_u_bottom_min = -5.f * pi / 8.f; + float theta_u_bottom_max = -3.f * pi / 8.f; float vc = OFFSET + HEIGHT / 2.f; float radius = -(OFFSET - vc)/ std::sin(theta_u_top_min); float uc = OFFSET - radius * std::cos(theta_v_min); @@ -1042,9 +1044,9 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_max = M_PI / 6.f; - float theta_u_top_min = M_PI - theta_u_top_max; - float theta_v_min = M_PI / 3.f; + float theta_u_top_max = pi / 6.f; + float theta_u_top_min = pi - theta_u_top_max; + float theta_v_min = pi / 3.f; float theta_u_bottom_max = -theta_u_top_max; float radius = HEIGHT; float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1080,9 +1082,9 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 4.f * M_PI / 6.f; - float theta_u_top_max = M_PI - theta_u_top_min; - float theta_v_min = M_PI; + float theta_u_top_min = 4.f * pi / 6.f; + float theta_u_top_max = pi - theta_u_top_min; + float theta_v_min = pi; float theta_u_bottom_min = -theta_u_top_min; float theta_u_bottom_max = -theta_u_top_max; float radius = HEIGHT / (2.f * std::sin(theta_u_top_min)); // vmin + h - vmin = (vc - r sin(-theta_u_top_min)) - (vc - r sin(theta_top_min)) @@ -1090,7 +1092,7 @@ int main() float uc = OFFSET - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; + float theoreticalValue = (2.f * pi - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1119,8 +1121,8 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = M_PI_2; - float theta_v_min = M_PI_4; + float theta_u_top_min = pi_2; + float theta_v_min = pi_4; float theta_v_max = -theta_v_min; float radius = HEIGHT / 2.f; float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1156,8 +1158,8 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = M_PI_2; - float theta_v_min = 3.f * M_PI_4; + float theta_u_top_min = pi_2; + float theta_v_min = 3.f * pi_4; float theta_v_max = -theta_v_min; float radius = HEIGHT / 2.f; float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1194,8 +1196,8 @@ int main() // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max float theta_u_top_max = 0.f; - float theta_u_bot_max = -M_PI / 3.f; - float theta_v_max = -M_PI / 6.f; + float theta_u_bot_max = -pi / 3.f; + float theta_v_max = -pi / 6.f; float radius = HEIGHT / (std::sin(theta_u_top_max) - std::sin(theta_u_bot_max)); float uc = OFFSET - radius * std::cos(theta_v_max); float vc = OFFSET + radius * std::sin(theta_u_top_max); @@ -1230,9 +1232,9 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_max = M_PI / 3.f; + float theta_u_top_max = pi / 3.f; float theta_u_bot_max = 0.f; - float theta_v_min = M_PI / 6.f; + float theta_v_min = pi / 6.f; float radius = HEIGHT / (std::sin(theta_u_top_max) - std::sin(theta_u_bot_max)); float uc = OFFSET - radius * std::cos(theta_v_min); float vc = OFFSET + radius * std::sin(theta_u_top_max); @@ -1267,18 +1269,18 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 5.f * M_PI / 8.f; - float theta_u_top_max = 3.f * M_PI / 8.f; - float theta_v_min = 1.f * M_PI / 8.f; + float theta_u_top_min = 5.f * pi / 8.f; + float theta_u_top_max = 3.f * pi / 8.f; + float theta_v_min = 1.f * pi / 8.f; float theta_v_max = -theta_v_min; - float theta_u_bottom_min = -5.f * M_PI / 8.f; - float theta_u_bottom_max = -3.f * M_PI / 8.f; + float theta_u_bottom_min = -5.f * pi / 8.f; + float theta_u_bottom_max = -3.f * pi / 8.f; float vc = OFFSET + HEIGHT / 2.f; float radius = -(OFFSET - vc)/ std::sin(theta_u_top_min); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - ((theta_u_top_min - theta_u_top_max) + (theta_v_min - theta_v_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; + float theoreticalValue = (2.f * pi - ((theta_u_top_min - theta_u_top_max) + (theta_v_min - theta_v_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1307,15 +1309,15 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 5.f * M_PI / 6.f; - float theta_v_min = 2.f * M_PI / 3.f; + float theta_u_top_min = 5.f * pi / 6.f; + float theta_v_min = 2.f * pi / 3.f; float theta_u_bottom_min = -theta_u_top_min; float radius = HEIGHT; float vc = OFFSET + radius * std::sin(theta_u_top_min); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - (theta_u_top_min - theta_u_bottom_min)) * radius; + float theoreticalValue = (2.f * pi - (theta_u_top_min - theta_u_bottom_min)) * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1344,8 +1346,8 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 4.f * M_PI / 6.f; - float theta_u_top_max = M_PI - theta_u_top_min; + float theta_u_top_min = 4.f * pi / 6.f; + float theta_u_top_max = pi - theta_u_top_min; float theta_v_min = 0; float theta_u_bottom_min = -theta_u_top_min; float theta_u_bottom_max = -theta_u_top_max; @@ -1354,7 +1356,7 @@ int main() float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; + float theoreticalValue = (2.f * pi - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1383,15 +1385,15 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = M_PI_2; - float theta_v_min = 3.f * M_PI_4; + float theta_u_top_min = pi_2; + float theta_v_min = 3.f * pi_4; float theta_v_max = -theta_v_min; float radius = HEIGHT / 2.f; float vc = OFFSET + radius * std::sin(theta_u_top_min); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - (theta_v_min - theta_v_max)) * radius; + float theoreticalValue = (2.f * pi - (theta_v_min - theta_v_max)) * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1420,15 +1422,15 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = M_PI_2; - float theta_v_min = M_PI_4; + float theta_u_top_min = pi_2; + float theta_v_min = pi_4; float theta_v_max = -theta_v_min; float radius = HEIGHT / 2.f; float vc = OFFSET + radius * std::sin(theta_u_top_min); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - (theta_v_min - theta_v_max)) * radius; + float theoreticalValue = (2.f * pi - (theta_v_min - theta_v_max)) * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1457,15 +1459,15 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = M_PI; - float theta_u_bot_min = -2.f * M_PI / 3.f; - float theta_v_max = -5.f * M_PI / 6.f; + float theta_u_top_min = pi; + float theta_u_bot_min = -2.f * pi / 3.f; + float theta_v_max = -5.f * pi / 6.f; float radius = HEIGHT / (std::sin(theta_u_top_min) - std::sin(theta_u_bot_min)); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_max); float vc = OFFSET + radius * std::sin(theta_u_top_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - (theta_u_top_min - theta_v_max)) * radius; + float theoreticalValue = (2.f * pi - (theta_u_top_min - theta_v_max)) * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1494,9 +1496,9 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 2.f * M_PI / 3.f; - float theta_u_bot_min = M_PI; - float theta_v_min = 5.f * M_PI / 6.f; + float theta_u_top_min = 2.f * pi / 3.f; + float theta_u_bot_min = pi; + float theta_v_min = 5.f * pi / 6.f; float radius = HEIGHT / (std::sin(theta_u_top_min) - std::sin(theta_u_bot_min)); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1533,12 +1535,12 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_v_left_min = 7.f * M_PI / 8.f; + float theta_v_left_min = 7.f * pi / 8.f; float theta_v_left_max = -theta_v_left_min; - float theta_v_right_min = M_PI / 8.f; + float theta_v_right_min = pi / 8.f; float theta_v_right_max = -theta_v_right_min; - float theta_u_top_min = 5.f * M_PI / 8.f; - float theta_u_top_max = M_PI - theta_u_top_min; + float theta_u_top_min = 5.f * pi / 8.f; + float theta_u_top_max = pi - theta_u_top_min; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET + WIDTH_SWITCHED - radius * std::cos(theta_v_right_min); float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1574,12 +1576,12 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_top_min = -2.f * M_PI / 3.f; + float theta_u_top_min = -2.f * pi / 3.f; float uc = OFFSET + WIDTH_SWITCHED/2.f; float vc = OFFSET + RADIUS * std::sin(theta_u_top_min); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(switchedRoI); - float theoreticalValue = (M_PI/3.f) * RADIUS; + float theoreticalValue = (pi/3.f) * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1610,9 +1612,9 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_v_left_max = -5.f * M_PI / 8.f; - float theta_v_right_max = -3.f *M_PI / 8.f; - float theta_u_top_min = -7.f * M_PI / 8.f; + float theta_v_left_max = -5.f * pi / 8.f; + float theta_v_right_max = -3.f *pi / 8.f; + float theta_u_top_min = -7.f * pi / 8.f; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_max) - std::cos(theta_v_left_max)); float uc = OFFSET - radius * std::cos(theta_v_left_max); float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1649,9 +1651,9 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_top_max = -M_PI / 3.f; + float theta_u_top_max = -pi / 3.f; float theta_v_right_max = 0.f; - float theta_v_left_max = -M_PI_2; + float theta_v_left_max = -pi_2; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_max) - std::cos(theta_v_left_max)); float uc = OFFSET; float vc = OFFSET + radius * std::sin(theta_u_top_max); @@ -1688,9 +1690,9 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_top_min = -2.f * M_PI / 3.f; - float theta_v_left_max = M_PI; - float theta_v_right_max = -M_PI_2; + float theta_u_top_min = -2.f * pi / 3.f; + float theta_v_left_max = pi; + float theta_v_right_max = -pi_2; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_max) - std::cos(theta_v_left_max)); float uc = OFFSET + WIDTH_SWITCHED; float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1727,12 +1729,12 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_v_left_min = 7.f * M_PI / 8.f; + float theta_v_left_min = 7.f * pi / 8.f; float theta_v_left_max = -theta_v_left_min; - float theta_v_right_min = M_PI / 8.f; + float theta_v_right_min = pi / 8.f; float theta_v_right_max = -theta_v_right_min; - float theta_u_bot_min = -5.f * M_PI / 8.f; - float theta_u_bot_max = -M_PI - theta_u_bot_min; + float theta_u_bot_min = -5.f * pi / 8.f; + float theta_u_bot_max = -pi - theta_u_bot_min; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET + WIDTH_SWITCHED - radius * std::cos(theta_v_right_min); float vc = OFFSET + HEIGHT_SWITCHED + radius * std::sin(theta_u_bot_min); @@ -1768,10 +1770,10 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_bot_min = 2.f * M_PI / 3.f; - float theta_u_bot_max = M_PI - theta_u_bot_min; - float theta_v_left_min = 5.f * M_PI / 6.f; - float theta_v_right_min = M_PI / 6.f; + float theta_u_bot_min = 2.f * pi / 3.f; + float theta_u_bot_max = pi - theta_u_bot_min; + float theta_v_left_min = 5.f * pi / 6.f; + float theta_v_right_min = pi / 6.f; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET + WIDTH_SWITCHED - radius * std::cos(theta_v_right_min); float vc = OFFSET + HEIGHT_SWITCHED + radius * std::sin(theta_u_bot_min); @@ -1807,9 +1809,9 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_bot_min = 7.f * M_PI / 8.f; - float theta_v_left_min = 5.f * M_PI / 8.f; - float theta_v_right_min = 3.f * M_PI / 8.f; + float theta_u_bot_min = 7.f * pi / 8.f; + float theta_v_left_min = 5.f * pi / 8.f; + float theta_v_right_min = 3.f * pi / 8.f; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET + WIDTH_SWITCHED - radius * std::cos(theta_v_right_min); float vc = OFFSET + HEIGHT_SWITCHED + radius * std::sin(theta_u_bot_min); @@ -1845,8 +1847,8 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_bot_max = M_PI / 3.f; - float theta_v_left_min = M_PI_2; + float theta_u_bot_max = pi / 3.f; + float theta_v_left_min = pi_2; float theta_v_right_min = 0.f; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET; @@ -1883,9 +1885,9 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_bot_min = 2.f * M_PI / 3.f; - float theta_v_right_min = M_PI_2; - float theta_v_left_min = M_PI; + float theta_u_bot_min = 2.f * pi / 3.f; + float theta_v_right_min = pi_2; + float theta_v_left_min = pi; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET + WIDTH_SWITCHED; float vc = OFFSET + HEIGHT_SWITCHED + radius * std::sin(theta_u_bot_min); @@ -1917,17 +1919,17 @@ int main() // (6): u_cross_bot_max = uc + r cos(theta_u_bottom_max) <= umin_roi + width ; vmin_roi + height = vc - r sin(theta_u_bottom_max) // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 2.f * M_PI / 3.f; - float theta_u_top_max = M_PI / 3.f; - float theta_u_bottom_min = -2.f * M_PI / 3.f; - float theta_u_bottom_max = -M_PI / 3.f; + float theta_u_top_min = 2.f * pi / 3.f; + float theta_u_top_max = pi / 3.f; + float theta_u_bottom_min = -2.f * pi / 3.f; + float theta_u_bottom_max = -pi / 3.f; float uc = OFFSET + WIDTH / 2.f; float vc = OFFSET + HEIGHT / 2.f; float radius = -(OFFSET - vc)/ std::sin(theta_u_top_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; + float theoreticalValue = (2.f * pi - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1953,9 +1955,9 @@ int main() // (6): u_min + width = uc + r cos(theta_v_right_max); v_cross_right_max = vc - r sin(theta_v_right_max) // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_v_left_min = 5.f * M_PI / 6.f; + float theta_v_left_min = 5.f * pi / 6.f; float theta_v_left_max = -theta_v_left_min; - float theta_v_right_min = M_PI / 6.f; + float theta_v_right_min = pi / 6.f; float theta_v_right_max = -theta_v_right_min; float uc = OFFSET + HEIGHT / 2.f; float vc = OFFSET + WIDTH / 2.f; @@ -1985,14 +1987,14 @@ int main() // Choosing theta_v_left_min = 7 PI / 8 and circle at the center of the RoI // umin = uc + r cos(theta_v_left_min) => r = (umin - uc) / cos(theta_v_left_min) vpRect squareRoI(OFFSET, OFFSET, HEIGHT, HEIGHT); - float theta_v_left_min = 7.f * M_PI / 8.f; + float theta_v_left_min = 7.f * pi / 8.f; float uc = OFFSET + HEIGHT / 2.f; float vc = OFFSET + HEIGHT / 2.f; float radius = (OFFSET - uc) / std::cos(theta_v_left_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(squareRoI); - float theoreticalValue = M_PI * radius; + float theoreticalValue = pi * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { From c5a0be4a34c3a667e25069e33851c355e97cec0d Mon Sep 17 00:00:00 2001 From: Samuel Felton Date: Tue, 24 Oct 2023 17:06:05 +0200 Subject: [PATCH 14/43] initial rework of opencv includes, some issues with nonfree module ? --- cmake/templates/vpConfig.h.in | 2 +- .../perfColorConversion.cpp | 27 +++++++------ .../image-with-dataset/perfGaussianFilter.cpp | 15 ++++--- .../image-with-dataset/perfImageResize.cpp | 27 +++++++------ .../image-with-dataset/testConversion.cpp | 39 +++++++++++-------- .../keypoint-with-dataset/testKeyPoint-5.cpp | 26 +++++++++---- tutorial/image/tutorial-image-converter.cpp | 8 +++- 7 files changed, 91 insertions(+), 53 deletions(-) diff --git a/cmake/templates/vpConfig.h.in b/cmake/templates/vpConfig.h.in index 8190f0c319..26c9ef50d4 100644 --- a/cmake/templates/vpConfig.h.in +++ b/cmake/templates/vpConfig.h.in @@ -167,7 +167,7 @@ // OpenCV version in hexadecimal (for example 2.1.0 gives 0x020100). #ifdef VISP_HAVE_OPENCV # define VISP_HAVE_OPENCV_VERSION ${VISP_HAVE_OPENCV_VERSION} -# include +# include #endif // For compat with previous releases diff --git a/modules/core/test/image-with-dataset/perfColorConversion.cpp b/modules/core/test/image-with-dataset/perfColorConversion.cpp index f4333b6e87..b8b26970cc 100644 --- a/modules/core/test/image-with-dataset/perfColorConversion.cpp +++ b/modules/core/test/image-with-dataset/perfColorConversion.cpp @@ -45,6 +45,11 @@ #include #include +#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGCODECS) && defined(HAVE_OPENCV_IMGPROC) +#include +#include +#endif + static std::string ipath = vpIoTools::getViSPImagesDataPath(); static std::string imagePathColor = vpIoTools::createFilePath(ipath, "Klimt/Klimt.ppm"); static std::string imagePathGray = vpIoTools::createFilePath(ipath, "Klimt/Klimt.pgm"); @@ -312,14 +317,14 @@ int main(int argc, char *argv[]) // Build a new parser on top of Catch's using namespace Catch::clara; auto cli = session.cli() // Get Catch's composite command line parser - | Opt(runBenchmark) // bind variable to a new option, with a hint string - ["--benchmark"] // the option names it will respond to - ("run benchmark?") // description string for the help output - | Opt(imagePathColor, "imagePathColor")["--imagePathColor"]("Path to color image") | - Opt(imagePathGray, "imagePathColor")["--imagePathGray"]("Path to gray image") | - Opt(nThreads, "nThreads")["--nThreads"]("Number of threads"); - - // Now pass the new composite back to Catch so it uses that + | Opt(runBenchmark) // bind variable to a new option, with a hint string + ["--benchmark"] // the option names it will respond to + ("run benchmark?") // description string for the help output + | Opt(imagePathColor, "imagePathColor")["--imagePathColor"]("Path to color image") | + Opt(imagePathGray, "imagePathColor")["--imagePathGray"]("Path to gray image") | + Opt(nThreads, "nThreads")["--nThreads"]("Number of threads"); + +// Now pass the new composite back to Catch so it uses that session.cli(cli); // Let Catch (using Clara) parse the command line @@ -329,14 +334,14 @@ int main(int argc, char *argv[]) vpImage I_color; vpImageIo::read(I_color, imagePathColor); std::cout << "imagePathColor:\n\t" << imagePathColor << "\n\t" << I_color.getWidth() << "x" << I_color.getHeight() - << std::endl; + << std::endl; vpImage I_gray; vpImageIo::read(I_gray, imagePathGray); std::cout << "imagePathGray:\n\t" << imagePathGray << "\n\t" << I_gray.getWidth() << "x" << I_gray.getHeight() - << std::endl; + << std::endl; std::cout << "nThreads: " << nThreads << " / available threads: " << std::thread::hardware_concurrency() - << std::endl; + << std::endl; int numFailed = session.run(); diff --git a/modules/core/test/image-with-dataset/perfGaussianFilter.cpp b/modules/core/test/image-with-dataset/perfGaussianFilter.cpp index 11059e3bda..570ca6340d 100644 --- a/modules/core/test/image-with-dataset/perfGaussianFilter.cpp +++ b/modules/core/test/image-with-dataset/perfGaussianFilter.cpp @@ -45,6 +45,11 @@ #include #include +#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGCODECS) && defined(HAVE_OPENCV_IMGPROC) +#include +#include +#endif + static const std::string ipath = vpIoTools::getViSPImagesDataPath(); static std::string imagePath = vpIoTools::createFilePath(ipath, "faces/1280px-Solvay_conference_1927.png"); @@ -166,12 +171,12 @@ int main(int argc, char *argv[]) // Build a new parser on top of Catch's using namespace Catch::clara; auto cli = session.cli() // Get Catch's composite command line parser - | Opt(runBenchmark) // bind variable to a new option, with a hint string - ["--benchmark"] // the option names it will respond to - ("run benchmark?") // description string for the help output - ; + | Opt(runBenchmark) // bind variable to a new option, with a hint string + ["--benchmark"] // the option names it will respond to + ("run benchmark?") // description string for the help output + ; - // Now pass the new composite back to Catch so it uses that +// Now pass the new composite back to Catch so it uses that session.cli(cli); // Let Catch (using Clara) parse the command line diff --git a/modules/core/test/image-with-dataset/perfImageResize.cpp b/modules/core/test/image-with-dataset/perfImageResize.cpp index 28ccd8d048..06e60928bb 100644 --- a/modules/core/test/image-with-dataset/perfImageResize.cpp +++ b/modules/core/test/image-with-dataset/perfImageResize.cpp @@ -46,6 +46,11 @@ #include #include +#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGCODECS) && defined(HAVE_OPENCV_IMGPROC) +#include +#include +#endif + static const std::string ipath = vpIoTools::getViSPImagesDataPath(); static std::string imagePathColor = vpIoTools::createFilePath(ipath, "Klimt/Klimt.ppm"); static std::string imagePathGray = vpIoTools::createFilePath(ipath, "Klimt/Klimt.pgm"); @@ -368,15 +373,15 @@ int main(int argc, char *argv[]) // Build a new parser on top of Catch's using namespace Catch::clara; auto cli = session.cli() // Get Catch's composite command line parser - | Opt(runBenchmark) // bind variable to a new option, with a hint string - ["--benchmark"] // the option names it will respond to - ("run benchmark?") // description string for the help output - | Opt(imagePathColor, "imagePathColor")["--imagePathColor"]("Path to color image") | - Opt(imagePathGray, "imagePathColor")["--imagePathGray"] | - Opt(g_resize_width, "g_resize_width")["--width"]("Resize width") | - Opt(g_resize_height, "g_resize_height")["--height"]("Resize height"); - - // Now pass the new composite back to Catch so it uses that + | Opt(runBenchmark) // bind variable to a new option, with a hint string + ["--benchmark"] // the option names it will respond to + ("run benchmark?") // description string for the help output + | Opt(imagePathColor, "imagePathColor")["--imagePathColor"]("Path to color image") | + Opt(imagePathGray, "imagePathColor")["--imagePathGray"] | + Opt(g_resize_width, "g_resize_width")["--width"]("Resize width") | + Opt(g_resize_height, "g_resize_height")["--height"]("Resize height"); + +// Now pass the new composite back to Catch so it uses that session.cli(cli); // Let Catch (using Clara) parse the command line @@ -386,12 +391,12 @@ int main(int argc, char *argv[]) vpImage I_color; vpImageIo::read(I_color, imagePathColor); std::cout << "imagePathColor:\n\t" << imagePathColor << "\n\t" << I_color.getWidth() << "x" << I_color.getHeight() - << std::endl; + << std::endl; vpImage I_gray; vpImageIo::read(I_gray, imagePathGray); std::cout << "imagePathGray:\n\t" << imagePathGray << "\n\t" << I_gray.getWidth() << "x" << I_gray.getHeight() - << std::endl; + << std::endl; std::cout << "Resize to: " << g_resize_width << "x" << g_resize_height << std::endl; int numFailed = session.run(); diff --git a/modules/core/test/image-with-dataset/testConversion.cpp b/modules/core/test/image-with-dataset/testConversion.cpp index a8fee2ac6b..a61f87d6f8 100644 --- a/modules/core/test/image-with-dataset/testConversion.cpp +++ b/modules/core/test/image-with-dataset/testConversion.cpp @@ -45,6 +45,11 @@ #include #include +#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGCODECS) && defined(HAVE_OPENCV_IMGPROC) +#include +#include +#endif + /*! \example testConversion.cpp @@ -74,7 +79,7 @@ SYNOPSIS\n\ %s [-i ] [-o ] [-n ]\n\ [-h]\n\ ", - name); +name); fprintf(stdout, "\n\ OPTIONS: Default\n\ @@ -208,7 +213,8 @@ int main(int argc, const char **argv) try { // Create the dirname vpIoTools::makeDirectory(opath); - } catch (...) { + } + catch (...) { usage(argv[0], NULL, ipath, opt_opath, username, nbIterations); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Cannot create " << opath << std::endl; @@ -223,8 +229,8 @@ int main(int argc, const char **argv) if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; std::cout << " Since -i " - << " is different from VISP_IMAGE_PATH=" << env_ipath << std::endl - << " we skip the environment variable." << std::endl; + << " is different from VISP_IMAGE_PATH=" << env_ipath << std::endl + << " we skip the environment variable." << std::endl; } } @@ -233,9 +239,9 @@ int main(int argc, const char **argv) usage(argv[0], NULL, ipath, opt_opath, username, nbIterations); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl - << " environment variable to specify the location of the " << std::endl - << " image path where test images are located." << std::endl - << std::endl; + << " environment variable to specify the location of the " << std::endl + << " image path where test images are located." << std::endl + << std::endl; return EXIT_FAILURE; } @@ -278,7 +284,7 @@ int main(int argc, const char **argv) // Convert a YUV pixel value to a RGB value vpImageConvert::YUVToRGB(y, u, v, r, g, b); std::cout << " y(" << (int)y << ") u(" << (int)u << ") v(" << (int)v << ") = r(" << (int)r << ") g(" << (int)g - << ") b(" << (int)b << ")" << std::endl; + << ") b(" << (int)b << ")" << std::endl; vpChrono chrono; @@ -475,8 +481,8 @@ int main(int argc, const char **argv) // HSV color //////////////////////////////////// std::cout << "** Convert a vpImage in RGB color space to a " - "vpImage in HSV color" - << std::endl; + "vpImage in HSV color" + << std::endl; unsigned int size = Ic.getSize(); unsigned int w = Ic.getWidth(), h = Ic.getHeight(); std::vector hue(size); @@ -512,11 +518,11 @@ int main(int argc, const char **argv) for (unsigned int j = 0; j < Ic.getWidth(); j++) { if (Ic[i][j].R != I_HSV2RGBa[i][j].R || Ic[i][j].G != I_HSV2RGBa[i][j].G || Ic[i][j].B != I_HSV2RGBa[i][j].B) { std::cerr << "Ic[i][j].R=" << static_cast(Ic[i][j].R) - << " ; I_HSV2RGBa[i][j].R=" << static_cast(I_HSV2RGBa[i][j].R) << std::endl; + << " ; I_HSV2RGBa[i][j].R=" << static_cast(I_HSV2RGBa[i][j].R) << std::endl; std::cerr << "Ic[i][j].G=" << static_cast(Ic[i][j].G) - << " ; I_HSV2RGBa[i][j].G=" << static_cast(I_HSV2RGBa[i][j].G) << std::endl; + << " ; I_HSV2RGBa[i][j].G=" << static_cast(I_HSV2RGBa[i][j].G) << std::endl; std::cerr << "Ic[i][j].B=" << static_cast(Ic[i][j].B) - << " ; I_HSV2RGBa[i][j].B=" << static_cast(I_HSV2RGBa[i][j].B) << std::endl; + << " ; I_HSV2RGBa[i][j].B=" << static_cast(I_HSV2RGBa[i][j].B) << std::endl; throw vpException(vpException::fatalError, "Problem with conversion between RGB <==> HSV"); } } @@ -586,9 +592,9 @@ int main(int argc, const char **argv) cv::Mat colorMat_crop = colorMat(rect_roi); cv::Mat colorMat_crop_continuous = colorMat(rect_roi).clone(); std::cout << " colorMat_crop: " << colorMat_crop.cols << "x" << colorMat_crop.rows << " is continuous? " - << colorMat_crop.isContinuous() << std::endl; + << colorMat_crop.isContinuous() << std::endl; std::cout << " colorMat_crop_continuous: " << colorMat_crop_continuous.cols << "x" << colorMat_crop_continuous.rows - << " is continuous? " << colorMat_crop_continuous.isContinuous() << std::endl; + << " is continuous? " << colorMat_crop_continuous.isContinuous() << std::endl; vpImage I_color_crop((unsigned int)(rect_roi.height - rect_roi.y), (unsigned int)(rect_roi.width - rect_roi.x)); @@ -636,7 +642,8 @@ int main(int argc, const char **argv) } return EXIT_SUCCESS; - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "Catch an exception: " << e.getMessage() << std::endl; return EXIT_FAILURE; } diff --git a/modules/vision/test/keypoint-with-dataset/testKeyPoint-5.cpp b/modules/vision/test/keypoint-with-dataset/testKeyPoint-5.cpp index ebe751d8c2..fbb3ddb5cf 100644 --- a/modules/vision/test/keypoint-with-dataset/testKeyPoint-5.cpp +++ b/modules/vision/test/keypoint-with-dataset/testKeyPoint-5.cpp @@ -48,6 +48,15 @@ #include #include +#include +#include +#include +#include +#include + + + + // List of allowed command line options #define GETOPTARGS "cdh" @@ -165,9 +174,9 @@ void run_test(const std::string &env_ipath, bool opt_click_allowed, bool opt_dis // features that are scale invariant to detect potential problem in ViSP. std::cout << "INFORMATION: " << std::endl; std::cout << "Here, we want to test feature detection on a pyramid of images even for features " - "that are scale invariant to detect potential problem in ViSP." - << std::endl - << std::endl; + "that are scale invariant to detect potential problem in ViSP." + << std::endl + << std::endl; vpKeyPoint keyPoints; // Will test the different types of keypoints detection to see if there is @@ -274,11 +283,11 @@ void run_test(const std::string &env_ipath, bool opt_click_allowed, bool opt_dis keyPoints.detect(I, kpts); std::cout << "Nb keypoints detected: " << kpts.size() << " for " - << mapOfDetectorNames[(vpKeyPoint::vpFeatureDetectorType)i] << " method." << std::endl; + << mapOfDetectorNames[(vpKeyPoint::vpFeatureDetectorType)i] << " method." << std::endl; if (kpts.empty()) { std::stringstream ss; ss << "No keypoints detected with " << mapOfDetectorNames[(vpKeyPoint::vpFeatureDetectorType)i] - << " method and image: " << filename << "." << std::endl; + << " method and image: " << filename << "." << std::endl; throw(vpException(vpException::fatalError, ss.str())); } @@ -325,8 +334,8 @@ int main(int argc, const char **argv) if (env_ipath.empty()) { std::cerr << "Please set the VISP_INPUT_IMAGE_PATH environment " - "variable value." - << std::endl; + "variable value." + << std::endl; return EXIT_FAILURE; } @@ -344,7 +353,8 @@ int main(int argc, const char **argv) run_test(env_ipath, opt_click_allowed, opt_display, Iinput, I); } - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cerr << e.what() << std::endl; return EXIT_FAILURE; } diff --git a/tutorial/image/tutorial-image-converter.cpp b/tutorial/image/tutorial-image-converter.cpp index 4bfcdc68e2..15c96d7037 100644 --- a/tutorial/image/tutorial-image-converter.cpp +++ b/tutorial/image/tutorial-image-converter.cpp @@ -2,6 +2,11 @@ #include #include +#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGCODECS) && defined(HAVE_OPENCV_IMGPROC) +#include +#include +#endif + int main() { #if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_HIGHGUI) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_IMGCODECS) @@ -23,7 +28,8 @@ int main() #ifdef VISP_HAVE_PNG vpImageIo::write(I, "monkey.png"); // Gray #endif - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "Catch an exception: " << e << std::endl; } #endif From 44ec8e987c6b38af54de0fa8f461cf2d25133ffd Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Tue, 24 Oct 2023 18:47:29 +0200 Subject: [PATCH 15/43] Improve code quality by fixing all the warnings when -Wsuggest-destructor-override is used - Changes done to add override keyword when necessary, removed useless destructors - Doc improvement in the same commit --- 3rdparty/pthreads4w/ChangeLog | 210 ++-- .../tutorial-tracking-mb-generic-json.dox | 2 +- .../visual-servo/tutorial-pixhawk-vs.dox | 2 +- example/math/BSpline.cpp | 2 +- .../servoAfma6FourPoints2DArtVelocity.cpp | 4 +- ...ervoAfma6FourPoints2DCamVelocityLs_cur.cpp | 2 +- ...ervoAfma6FourPoints2DCamVelocityLs_des.cpp | 2 +- ...oViper850FourPoints2DArtVelocityLs_cur.cpp | 2 +- ...oViper850FourPoints2DArtVelocityLs_des.cpp | 4 +- ...oViper850FourPoints2DCamVelocityLs_cur.cpp | 2 +- .../servoViper850FourPointsKinect.cpp | 2 +- modules/ar/CMakeLists.txt | 2 +- .../include/visp3/core/vpCameraParameters.h | 130 +-- modules/core/include/visp3/core/vpCircle.h | 93 +- modules/core/include/visp3/core/vpClient.h | 276 +++-- modules/core/include/visp3/core/vpColVector.h | 4 - modules/core/include/visp3/core/vpCylinder.h | 185 ++-- modules/core/include/visp3/core/vpDisplay.h | 940 +++++++++--------- modules/core/include/visp3/core/vpException.h | 12 +- .../include/visp3/core/vpForceTwistMatrix.h | 7 +- .../include/visp3/core/vpForwardProjection.h | 223 ++--- .../core/include/visp3/core/vpFrameGrabber.h | 112 +-- .../include/visp3/core/vpHistogramValey.h | 9 +- .../include/visp3/core/vpHomogeneousMatrix.h | 4 - modules/core/include/visp3/core/vpLine.h | 197 ++-- .../core/vpLinearKalmanFilterInstantiation.h | 15 +- modules/core/include/visp3/core/vpMath.h | 92 +- modules/core/include/visp3/core/vpMatrix.h | 3 - .../include/visp3/core/vpMatrixException.h | 2 +- .../visp3/core/vpMeterPixelConversion.h | 36 +- .../core/include/visp3/core/vpMomentAlpha.h | 327 +++--- .../core/include/visp3/core/vpMomentArea.h | 30 +- .../visp3/core/vpMomentAreaNormalized.h | 218 ++-- .../core/include/visp3/core/vpMomentBasic.h | 1 - .../include/visp3/core/vpMomentCInvariant.h | 21 +- .../include/visp3/core/vpMomentCentered.h | 3 +- .../core/include/visp3/core/vpMomentCommon.h | 22 +- .../include/visp3/core/vpMomentDatabase.h | 23 +- .../visp3/core/vpMomentGravityCenter.h | 11 +- .../core/vpMomentGravityCenterNormalized.h | 1 - .../core/include/visp3/core/vpMomentObject.h | 2 +- .../visp3/core/vpPixelMeterConversion.h | 42 +- modules/core/include/visp3/core/vpPoint.h | 25 +- .../core/include/visp3/core/vpPoseVector.h | 4 - .../include/visp3/core/vpQuaternionVector.h | 27 +- modules/core/include/visp3/core/vpRequest.h | 2 +- .../include/visp3/core/vpRotationMatrix.h | 5 - .../include/visp3/core/vpRotationVector.h | 11 +- modules/core/include/visp3/core/vpRowVector.h | 19 +- .../core/include/visp3/core/vpRxyzVector.h | 3 - .../core/include/visp3/core/vpRzyxVector.h | 4 - .../core/include/visp3/core/vpRzyzVector.h | 3 - modules/core/include/visp3/core/vpServer.h | 10 +- modules/core/include/visp3/core/vpSphere.h | 85 +- .../core/include/visp3/core/vpSubColVector.h | 41 +- modules/core/include/visp3/core/vpSubMatrix.h | 4 +- .../core/include/visp3/core/vpSubRowVector.h | 42 +- .../core/include/visp3/core/vpThetaUVector.h | 2 - modules/core/include/visp3/core/vpTracker.h | 45 +- .../include/visp3/core/vpTranslationVector.h | 4 +- modules/core/include/visp3/core/vpUDPClient.h | 220 ++-- .../visp3/core/vpVelocityTwistMatrix.h | 9 +- .../core/src/camera/vpCameraParameters.cpp | 673 ++++++------- .../src/camera/vpMeterPixelConversion.cpp | 10 +- .../src/camera/vpPixelMeterConversion.cpp | 16 +- modules/core/src/image/vpRGBa.cpp | 2 +- modules/core/src/image/vpRGBf.cpp | 2 +- .../vpLinearKalmanFilterInstantiation.cpp | 12 +- modules/core/src/math/matrix/vpColVector.cpp | 2 +- .../core/src/math/matrix/vpSubColVector.cpp | 108 +- .../core/src/math/matrix/vpSubRowVector.cpp | 100 +- modules/core/src/tools/network/vpClient.cpp | 39 +- .../forward-projection/vpCylinder.cpp | 5 - .../tracking/forward-projection/vpSphere.cpp | 11 +- .../src/tracking/moments/vpMomentCommon.cpp | 118 +-- modules/core/test/tools/xml/testXmlParser.cpp | 139 ++- .../visp3/detection/vpDetectorAprilTag.h | 352 +++---- .../include/visp3/detection/vpDetectorBase.h | 57 +- .../visp3/detection/vpDetectorDNNOpenCV.h | 48 +- .../detection/vpDetectorDataMatrixCode.h | 118 ++- .../include/visp3/detection/vpDetectorFace.h | 80 +- .../visp3/detection/vpDetectorQRCode.h | 123 ++- modules/gui/include/visp3/gui/vpD3DRenderer.h | 27 +- modules/gui/include/visp3/gui/vpDisplayD3D.h | 122 ++- modules/gui/include/visp3/gui/vpDisplayGDI.h | 158 ++- modules/gui/include/visp3/gui/vpDisplayGTK.h | 81 +- .../gui/include/visp3/gui/vpDisplayOpenCV.h | 246 +++-- .../gui/include/visp3/gui/vpDisplayWin32.h | 111 +-- modules/gui/include/visp3/gui/vpDisplayX.h | 80 +- modules/gui/include/visp3/gui/vpGDIRenderer.h | 2 +- modules/gui/include/visp3/gui/vpPlot.h | 141 ++- modules/gui/src/display/vpDisplayGTK.cpp | 105 +- .../gui/src/display/windows/vpDisplayD3D.cpp | 9 +- .../gui/src/display/windows/vpDisplayGDI.cpp | 9 +- .../imgproc/src/vpCircleHoughTransform.cpp | 2 +- modules/io/include/visp3/io/vpDiskGrabber.h | 126 +-- modules/io/include/visp3/io/vpVideoReader.h | 343 +++---- modules/io/src/video/vpDiskGrabber.cpp | 2 - modules/io/src/video/vpVideoReader.cpp | 126 ++- modules/io/src/video/vpVideoWriter.cpp | 2 +- modules/robot/include/visp3/robot/vpAfma4.h | 4 +- modules/robot/include/visp3/robot/vpAfma6.h | 4 +- modules/robot/include/visp3/robot/vpPioneer.h | 150 ++- .../robot/include/visp3/robot/vpPioneerPan.h | 187 ++-- .../robot/include/visp3/robot/vpQbSoftHand.h | 93 +- .../robot/include/visp3/robot/vpRobotAfma4.h | 10 +- .../robot/include/visp3/robot/vpRobotAfma6.h | 14 +- .../include/visp3/robot/vpRobotBiclops.h | 16 +- .../robot/include/visp3/robot/vpRobotCamera.h | 15 +- .../include/visp3/robot/vpRobotFlirPtu.h | 12 +- .../robot/include/visp3/robot/vpRobotFranka.h | 347 ++++--- .../robot/include/visp3/robot/vpRobotKinova.h | 14 +- .../include/visp3/robot/vpRobotPioneer.h | 94 +- .../robot/include/visp3/robot/vpRobotPtu46.h | 10 +- .../include/visp3/robot/vpRobotSimulator.h | 54 +- .../include/visp3/robot/vpRobotTemplate.h | 46 +- .../visp3/robot/vpRobotUniversalRobots.h | 12 +- .../include/visp3/robot/vpRobotViper650.h | 15 +- .../include/visp3/robot/vpRobotViper850.h | 17 +- .../visp3/robot/vpRobotWireFrameSimulator.h | 193 ++-- .../include/visp3/robot/vpSimulatorAfma6.h | 286 +++--- .../include/visp3/robot/vpSimulatorCamera.h | 133 ++- .../include/visp3/robot/vpSimulatorPioneer.h | 122 ++- .../visp3/robot/vpSimulatorPioneerPan.h | 125 ++- .../include/visp3/robot/vpSimulatorViper850.h | 336 ++++--- .../robot/include/visp3/robot/vpUnicycle.h | 96 +- modules/robot/include/visp3/robot/vpViper.h | 4 +- .../robot/include/visp3/robot/vpViper650.h | 123 ++- .../robot/include/visp3/robot/vpViper850.h | 124 ++- .../haptic-device/qbdevice/vpQbSoftHand.cpp | 23 +- .../robot/src/real-robot/viper/vpViper.cpp | 2 +- .../robot/src/real-robot/viper/vpViper650.cpp | 2 +- .../robot/src/real-robot/viper/vpViper850.cpp | 2 +- .../src/robot-simulator/vpRobotCamera.cpp | 8 +- .../vpRobotWireFrameSimulator.cpp | 47 +- .../src/robot-simulator/vpSimulatorCamera.cpp | 6 - .../robot-simulator/vpSimulatorPioneer.cpp | 10 +- .../robot-simulator/vpSimulatorPioneerPan.cpp | 10 +- .../sensor/vpForceTorqueAtiNetFTSensor.h | 111 +-- .../visp3/sensor/vpForceTorqueAtiSensor.h | 89 +- .../sensor/include/visp3/sensor/vpSickLDMRS.h | 108 +- .../tracker/blob/include/visp3/blob/vpDot.h | 282 +++--- .../tracker/blob/include/visp3/blob/vpDot2.h | 392 ++++---- modules/tracker/blob/src/dots/vpDot2.cpp | 166 ++-- .../include/visp3/mbt/vpMbDepthDenseTracker.h | 46 +- .../visp3/mbt/vpMbDepthNormalTracker.h | 46 +- .../include/visp3/mbt/vpMbEdgeKltTracker.h | 67 +- .../mbt/include/visp3/mbt/vpMbEdgeTracker.h | 478 +++++---- .../include/visp3/mbt/vpMbGenericTracker.h | 155 ++- .../mbt/include/visp3/mbt/vpMbHiddenFaces.h | 46 +- .../mbt/include/visp3/mbt/vpMbKltTracker.h | 52 +- .../mbt/include/visp3/mbt/vpMbTracker.h | 43 +- .../mbt/include/visp3/mbt/vpMbtMeEllipse.h | 27 +- .../mbt/include/visp3/mbt/vpMbtMeLine.h | 56 +- .../mbt/include/visp3/mbt/vpMbtPolygon.h | 102 +- .../tracker/mbt/src/edge/vpMbtMeEllipse.cpp | 4 - .../tracker/mbt/src/vpMbGenericTracker.cpp | 2 - modules/tracker/mbt/src/vpMbTracker.cpp | 4 +- modules/tracker/mbt/src/vpMbtPolygon.cpp | 20 +- .../tracker/me/include/visp3/me/vpMeEllipse.h | 3 +- .../tracker/me/include/visp3/me/vpMeLine.h | 2 +- .../tracker/me/include/visp3/me/vpMeNurbs.h | 5 - .../tracker/me/include/visp3/me/vpMeTracker.h | 2 +- modules/tracker/me/include/visp3/me/vpNurbs.h | 4 - .../tracker/me/src/moving-edges/vpMeNurbs.cpp | 2 - .../tracker/me/src/moving-edges/vpNurbs.cpp | 2 - modules/tracker/me/test/testNurbs.cpp | 2 +- .../tt/vpTemplateTrackerWarpHomographySL3.h | 28 +- .../vpTemplateTrackerWarpHomographySL3.cpp | 7 +- .../include/visp3/tt_mi/vpTemplateTrackerMI.h | 37 +- .../include/visp3/vision/vpHomography.h | 2 - .../include/visp3/vision/vpPoseFeatures.h | 20 +- .../visp3/visual_features/vpBasicFeature.h | 19 +- .../visp3/visual_features/vpFeatureDepth.h | 239 +++-- .../visp3/visual_features/vpFeatureEllipse.h | 39 +- .../visp3/visual_features/vpFeatureLine.h | 328 +++--- .../visual_features/vpFeatureLuminance.h | 55 +- .../visp3/visual_features/vpFeatureMoment.h | 320 +++--- .../visual_features/vpFeatureMomentAlpha.h | 161 ++- .../visual_features/vpFeatureMomentArea.h | 71 +- .../vpFeatureMomentAreaNormalized.h | 279 +++--- .../visual_features/vpFeatureMomentBasic.h | 90 +- .../vpFeatureMomentCInvariant.h | 383 ++++--- .../visual_features/vpFeatureMomentCentered.h | 102 +- .../visual_features/vpFeatureMomentCommon.h | 371 ++++--- .../visual_features/vpFeatureMomentDatabase.h | 236 +++-- .../vpFeatureMomentGravityCenter.h | 365 ++++--- .../vpFeatureMomentGravityCenterNormalized.h | 402 ++++---- .../visp3/visual_features/vpFeaturePoint.h | 278 +++--- .../visp3/visual_features/vpFeaturePoint3D.h | 337 +++---- .../visual_features/vpFeaturePointPolar.h | 432 ++++---- .../visp3/visual_features/vpFeatureSegment.h | 241 +++-- .../visp3/visual_features/vpFeatureThetaU.h | 358 ++++--- .../visual_features/vpFeatureTranslation.h | 474 +++++---- .../visual_features/vpFeatureVanishingPoint.h | 71 +- .../visp3/visual_features/vpGenericFeature.h | 277 +++--- .../vpFeatureBuilderSegment.cpp | 88 +- .../src/visual-feature/vpFeatureMoment.cpp | 147 ++- .../visual-feature/vpFeatureMomentAlpha.cpp | 49 +- .../vpFeatureMomentAreaNormalized.cpp | 57 +- .../visual-feature/vpFeatureMomentBasic.cpp | 64 +- .../vpFeatureMomentCInvariant.cpp | 672 +++++++------ .../vpFeatureMomentCentered.cpp | 111 +-- .../visual-feature/vpFeatureMomentCommon.cpp | 39 +- .../vpFeatureMomentDatabase.cpp | 58 +- .../vpFeatureMomentGravityCenter.cpp | 56 +- ...vpFeatureMomentGravityCenterNormalized.cpp | 65 +- .../src/visual-feature/vpFeatureSegment.cpp | 588 ++++++----- .../src/visual-feature/vpGenericFeature.cpp | 50 +- 209 files changed, 10114 insertions(+), 10585 deletions(-) diff --git a/3rdparty/pthreads4w/ChangeLog b/3rdparty/pthreads4w/ChangeLog index 6f0913ef4d..d36a4b66a2 100644 --- a/3rdparty/pthreads4w/ChangeLog +++ b/3rdparty/pthreads4w/ChangeLog @@ -30,7 +30,7 @@ * implement.h (NEED_FTIME): remove conditionals. * pthread.h: Remove Borland compiler time types no longer needed. * configure.ac (NEED_FTIME): Removed check. - + 2018-08-07 Ross Johnson * GNUmakefile.in (DLL_VER): rename as PTW32_VER. @@ -63,7 +63,7 @@ all-tests-cflags. * Makefile (all-tests-cflags): retain; require all-tests-md and all-tests-mt. - + 2016-12-25 Ross Johnson * Change all license notices to the Apache License 2.0 @@ -110,11 +110,11 @@ * _ptw32.h: MINGW(all) include stdint.h to define all specific size integers (int64_t etc). - + 2016-12-17 Kyle Schwarz * _ptw32.h: MINGW6464 define pid_t as __int64. - + 2016-04-01 Ross Johnson * _ptw32.h: Move more header stuff into here. @@ -154,7 +154,7 @@ * pthread_mutex_init.c: Memory allocation of robust mutex element was not being checked. - + 2016-02-29 Ross Johnson * GNUmakefile (MINGW_HAVE_SECURE_API): Moved to config.h. Undefined @@ -223,7 +223,7 @@ * create.c: Don't apply cpu affinity from thread attributes for WINCE; bug fix. - + 2013-07-23 Ross Johnson * config.h (HAVE_CPU_AFFINITY): Defined. @@ -304,7 +304,7 @@ 2012-10-24 Stephane Clairet * pthread_key_delete.c: Bug fix - move keylock release to after the - while loop. (This bug first was introduced at release 2.9.1) + while loop. (This bug first was introduced at release 2.9.1) 2012-10-16 Ross Johnson @@ -327,7 +327,7 @@ * sched.h (cpu_set_t): Redefined. * implement.h (_sched_cpu_set_vector_): Created as the private equivalent of cpu_set. - (pthread_thread_t_.cpuset): Type change. + (pthread_thread_t_.cpuset): Type change. * sched_setaffinity.c: Reflect changes to cpu_set_t and _sched_cpu_set_vector_. * pthread_setaffinity.c: Likewise. * create.c: Likewise. @@ -461,14 +461,14 @@ * sched.h (DWORD_PTR): As above; other changes. * sem_post.c: Fix errno handling and restructure. * sem_getvalue.c: Fix return value and restructure. - + 2012-09-18 Ross Johnson * sched_setaffinity.c: New API to set process CPU affinity in POSIX context; compatibility with Linux. * pthread_setaffinity.c: Likewise. * implement.h (pthread_t_): Added cpuset element. - * sched.h: Added new prototypes. + * sched.h: Added new prototypes. * sched.h (cpu_set_t): Support for new process and thread affinity API. * pthread.h: Added new prototypes. @@ -580,7 +580,7 @@ * implement.h (__ptw32_spinlock_check_need_init): added missing forward declaration. - + 2012-07-19 Daniel Richard. G * common.mk: New; macros common to all build environment makefiles. @@ -714,7 +714,7 @@ mandatory for implementations that don't support PROCESS_SHARED mutexes, nevertheless it was considered useful both functionally and for source-level compatibility. - + 2011-03-26 Ross Johnson * pthread_getunique_np.c: New non-POSIX interface for compatibility @@ -878,7 +878,7 @@ observed when there are greater than barrier-count threads attempting to cross. * pthread_barrier_destroy: Added an MCS guard lock. - + 2009-03-03 Stephan O'Farrill * pthread_attr_getschedpolicy.c: Add "const" to function parameter @@ -923,7 +923,7 @@ * ptw32_semwait.c: Add check for invalid sem_t after acquiring the sem_t state guard mutex and before affecting changes to sema state. - + 2007-01-06 Marcel Ruff * error.c: Fix reference to pthread handle exitStatus member for @@ -1144,7 +1144,7 @@ 2005-04-25 Ross Johnson - * ptw32_relmillisecs.c: New module; converts future abstime to + * ptw32_relmillisecs.c: New module; converts future abstime to milliseconds relative to 'now'. * pthread_mutex_timedlock.c: Use new __ptw32_relmillisecs routine in place of internal code; remove the NEED_SEM code - this routine is now @@ -1246,7 +1246,7 @@ in speed. So, in the final design with cancelability, an uncontested once_control operation involves a minimum of five interlocked operations (including the LeaveCS operation). - + ALTERNATIVES: An alternative design from Alexander Terekhov proposed using a named mutex, as sketched below:- @@ -1259,13 +1259,13 @@ } once_control = true; } - + A more detailed description of this can be found here:- http://groups.yahoo.com/group/boost/message/15442 [Although the definition of a suitable PTHREAD_ONCE_INIT precludes use of the TLS located flag, this is not critical.] - + There are three primary concerns though:- 1) The [named] mutex is 'created' even in the uncontended case. 2) A system wide unique name must be generated. @@ -1411,7 +1411,7 @@ 2004-10-19 Ross Johnson * sem_init.c (sem_init): New semaphore model based on the same idea - as mutexes, i.e. user space interlocked check to avoid + as mutexes, i.e. user space interlocked check to avoid unnecessarily entering kernel space. Wraps the Win32 semaphore and keeps it's own counter. Although the motivation to do this has existed for a long time, credit goes to Alexander Terekhov for providing @@ -1443,7 +1443,7 @@ * pthread_mutex_trylock.c: Likewise. * pthread_mutex_timedlock.c: Likewise. * pthread_mutex_unlock.c: Set the event. - + 2004-10-14 Ross Johnson * pthread_mutex_lock.c (pthread_mutex_lock): New algorithm using @@ -1600,7 +1600,7 @@ * pthread.h (PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP): Likewise. * pthread.h (PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP): Likewise. - * ptw32_mutex_check_need_init.c (__ptw32_mutex_check_need_init): + * ptw32_mutex_check_need_init.c (__ptw32_mutex_check_need_init): Add new initialisers. * pthread_mutex_lock.c (pthread_mutex_lock): Check for new @@ -1682,7 +1682,7 @@ * ptw32_getprocessors.c: Some Win32 environments don't have GetProcessAffinityMask(), so always return CPU count = 1 for them. * config.h (NEED_PROCESSOR_AFFINITY_MASK): Define for WinCE. - + 2003-10-15 Ross Johnson * Re-indented all .c files using default GNU style to remove assorted @@ -1882,7 +1882,7 @@ 2002-09-20 Michael Johnson - * pthread_cond_destroy.c (pthread_cond_destroy): + * pthread_cond_destroy.c (pthread_cond_destroy): When two different threads exist, and one is attempting to destroy a condition variable while the other is attempting to initialize a condition variable that was created with @@ -1900,7 +1900,7 @@ 2002-07-30 Alexander Terekhov - * pthread_cond_wait.c (__ptw32_cond_wait_cleanup): + * pthread_cond_wait.c (__ptw32_cond_wait_cleanup): Remove code designed to avoid/prevent spurious wakeup problems. It is believed that the sem_timedwait() call is consuming a CV signal that it shouldn't and this is @@ -1967,7 +1967,7 @@ * pthread_rwlock_timedrdlock.c: New - untested. * pthread_rwlock_timedwrlock.c: New - untested. - + * Testsuite passed (except known MSVC++ problems) * pthread_cond_destroy.c: Expand the time change @@ -2081,7 +2081,7 @@ * pthread_mutex_timedlock.c: Likewise. * pthread_mutex_trylock.c: Likewise. * pthread_mutex_unlock.c: Likewise. - + * private.c: Split file into function segments. * ptw32_InterlockedCompareExchange.c: Separated routine from private.c * ptw32_callUserDestroyRoutines.c: Likewise. @@ -2138,7 +2138,7 @@ * ptw32_calloc.c: Likewise. * ptw32_new.c: Likewise. * w32_CancelableWait.c: Likewise. - + 2002-02-09 Ross Johnson * nonportable.c (pthread_delay_np): Make a true @@ -2212,7 +2212,7 @@ it must be possible to NOT include the header and related definitions with some combination of symbol definitions. Secondly, it should be possible - that additional definitions should be limited to POSIX + that additional definitions should be limited to POSIX compliant symbols by the definition of appropriate symbols. * pthread.h: POSIX conditionals. @@ -2253,7 +2253,7 @@ 2002-02-04 Ross Johnson The following extends the idea above to the rest of pthreads-win32 - rpj - + * attr.c: All routines are now in separate compilation units; This file is used to congregate the separate modules for potential inline optimisation and backward build compatibility. @@ -2394,7 +2394,7 @@ Solaris pthreads implementation. * implement.h (pthread_mutex_t_): Remove critical section element - no longer needed. - + 2002-01-04 Ross Johnson @@ -2435,7 +2435,7 @@ implementation of InterlockedCompareExchange. * Makefile (TEST_ICE): Likewise. * private.c (TEST_ICE): Likewise. - + 2001-10-24 Ross Johnson * attr.c (pthread_attr_setstacksize): Quell warning @@ -2626,7 +2626,7 @@ * semaphore.c: Added sem_post_multiple; this is a useful routine, but it doesn't appear to be standard. For now it's not an exported function. - + 2001-06-25 Ross Johnson * create.c (pthread_create): Add priority inheritance @@ -2742,9 +2742,9 @@ 2001-06-06 Ross Johnson - * mutex.c (pthread_mutexattr_init): Remove + * mutex.c (pthread_mutexattr_init): Remove __ptw32_mutex_default_kind. - + 2001-06-05 Ross Johnson * nonportable.c (pthread_mutex_setdefaultkind_np): @@ -2794,14 +2794,14 @@ * GNUmakefile (OPT): Leave symbolic information out of the library and increase optimisation level - for smaller faster prebuilt dlls. - + 2001-05-29 Milan Gardian * Makefile: fix typo. * pthreads.h: Fix problems with stdcall/cdecl conventions, in particular remove the need for PT_STDCALL everywhere; remove warning supression. * (errno): Fix the longstanding "inconsistent dll linkage" problem - with errno; now also works with /MD debugging libs - + with errno; now also works with /MD debugging libs - warnings emerged when compiling pthreads library with /MD (or /MDd) compiler switch, instead of /MT (or /MTd) (i.e. when compiling pthreads using Multithreaded DLL CRT instead of Multithreaded statically linked @@ -2945,13 +2945,13 @@ Mingw32 built library. 2000-10-10 Steven Reddie - + * misc.c (pthread_self): Restore Win32 "last error" cleared by TlsGetValue() call in pthread_getspecific() - + 2000-09-20 Arthur Kantor - + * mutex.c (pthread_mutex_lock): Record the owner of the mutex. This requires also keeping count of recursive locks ourselves rather than leaving it @@ -3075,7 +3075,7 @@ * pthread.h (__PTW32_BUILD): Only redefine __except and catch compiler keywords if we aren't building - the library (ie. __PTW32_BUILD is not defined) - + the library (ie. __PTW32_BUILD is not defined) - this is safer than defining and then undefining if not building the library. * implement.h: Remove __except and catch undefines. @@ -3250,7 +3250,7 @@ * private.c (__ptw32_threadStart): Initialise ei[]. (__ptw32_threadStart): When beginthread is used to start the - thread, force waiting until the creator thread had the + thread, force waiting until the creator thread had the thread handle. * cancel.c (__ptw32_cancel_thread): Include context switch @@ -3292,7 +3292,7 @@ Making this mutex a static could reduce the number of mutexes used by an application since it is actually created only at first use and it's often destroyed soon after. - + 2000-07-22 Ross Johnson * FAQ: Added Q5 and Q6. @@ -3388,7 +3388,7 @@ 1999-11-21 Ross Johnson - * global.c (__ptw32_exception_services): Declare new variable. + * global.c (__ptw32_exception_services): Declare new variable. * private.c (__ptw32_threadStart): Destroy thread's cancelLock mutex; make 'catch' and '__except' usageimmune to @@ -3462,7 +3462,7 @@ * pthread.h (winsock.h): Include unconditionally. (ETIMEDOUT): Change fallback value to that defined by winsock.h. - + * general: Patched for portability to WinCE. The details are described in the file WinCE-PORT. Follow the instructions in README.WinCE to make the appropriate changes in config.h. @@ -3594,14 +3594,14 @@ Tue Aug 17 20:00:08 1999 Mumit Khan * exit.c (pthread_exit): Don't call pthread_self() but get thread handle directly from TSD for efficiency. - + 1999-08-12 Ross Johnson * private.c (__ptw32_threadStart): ei[] only declared if _MSC_VER. * exit.c (pthread_exit): Check for implicitly created threads to avoid raising an unhandled exception. - + 1999-07-12 Peter Slacik * condvar.c (pthread_cond_destroy): Add critical section. @@ -3640,7 +3640,7 @@ Sun May 30 00:25:02 1999 Ross Johnson Fri May 28 13:33:05 1999 Mark E. Armstrong * condvar.c (pthread_cond_broadcast): Fix possible memory fault - + Thu May 27 13:08:46 1999 Peter Slacik * condvar.c (pthread_cond_broadcast): Fix logic bug @@ -3670,7 +3670,7 @@ Thu Apr 8 01:16:23 1999 Ross Johnson (sem_getvalue): ditto. * semaphore.h (_POSIX_SEMAPHORES): define. - + Wed Apr 7 14:09:52 1999 Ross Johnson * errno.c (_REENTRANT || _MT): Invert condition. @@ -3715,7 +3715,7 @@ Fri Apr 2 11:08:50 1999 Ross Johnson * semaphore.c (__ptw32_sem_timedwait): Moved to private.c. - * pthread.h (__ptw32_sem_t): Change to sem_t. + * pthread.h (__ptw32_sem_t): Change to sem_t. * private.c (__ptw32_sem_timedwait): Moved from semaphore.c; set errno on error. @@ -3758,7 +3758,7 @@ Fri Mar 19 09:12:59 1999 Ross Johnson Tue Mar 16 1999 Ross Johnson * all: Add GNU LGPL and Copyright and Warranty. - + Mon Mar 15 00:20:13 1999 Ross Johnson * condvar.c (pthread_cond_init): fix possible uninitialised use @@ -3818,7 +3818,7 @@ Sun Mar 7 12:31:14 1999 Ross Johnson * implement.h (__ptw32_cond_test_init_lock): Add extern. - * global.c (__ptw32_cond_test_init_lock): Add declaration. + * global.c (__ptw32_cond_test_init_lock): Add declaration. * condvar.c (pthread_cond_destroy): check for valid initialised CV; flag destroyed CVs as invalid. @@ -3924,7 +3924,7 @@ Wed Feb 3 13:04:44 1999 Ross Johnson * cleanup.c: Rename __ptw32_*_cleanup() to pthread_*_cleanup(). * pthread.def: Ditto. - + * pthread.h: Ditto. * pthread.def (pthread_cleanup_push): Remove from export list; @@ -3998,7 +3998,7 @@ Fri Jan 29 11:56:28 1999 Ross Johnson Sun Jan 24 01:34:52 1999 Ross Johnson - * semaphore.c (sem_wait): Remove second arg to + * semaphore.c (sem_wait): Remove second arg to pthreadCancelableWait() call. Sat Jan 23 17:36:40 1999 Ross Johnson @@ -4072,7 +4072,7 @@ Tue Jan 19 18:27:42 1999 Ross Johnson * pthread.h (pthreadCancelableTimedWait): New prototype. (pthreadCancelableWait): Remove second argument. - * misc.c (CancelableWait): New static function is + * misc.c (CancelableWait): New static function is pthreadCancelableWait() renamed. (pthreadCancelableWait): Now just calls CancelableWait() with INFINITE timeout. @@ -4083,18 +4083,18 @@ Tue Jan 19 18:27:42 1999 Scott Lightner * private.c (__ptw32_sem_timedwait): 'abstime' arg really is absolute time. Calculate relative time to wait from current - time before passing timeout to new routine + time before passing timeout to new routine pthreadCancelableTimedWait(). Tue Jan 19 10:27:39 1999 Ross Johnson * pthread.h (pthread_mutexattr_setforcecs_np): New prototype. - + * mutex.c (pthread_mutexattr_init): Init 'pshared' and 'forcecs' attributes to 0. (pthread_mutexattr_setforcecs_np): New function (not portable). - * pthread.h (pthread_mutex_t): + * pthread.h (pthread_mutex_t): Add 'mutex' element. Set to NULL in PTHREAD_MUTEX_INITIALIZER. The pthread_mutex_*() routines will try to optimise performance by choosing either mutexes or critical sections as the basis @@ -4102,7 +4102,7 @@ Tue Jan 19 10:27:39 1999 Ross Johnson (pthread_mutexattr_t_): Add 'forcecs' element. Some applications may choose to force use of critical sections if they know that:- - the mutex is PROCESS_PRIVATE and, + the mutex is PROCESS_PRIVATE and, either the OS supports TryEnterCriticalSection() or pthread_mutex_trylock() will never be called on the mutex. This attribute will be setable via a non-portable routine. @@ -4131,11 +4131,11 @@ Sun Jan 17 12:01:26 1999 Ross Johnson * mutex.c (_mutex_check_need_init): New static function to test and init PTHREAD_MUTEX_INITIALIZER mutexes. Provides serialised - access to the internal state of the uninitialised static mutex. + access to the internal state of the uninitialised static mutex. Called from pthread_mutex_trylock() and pthread_mutex_lock() which do a quick unguarded test to check if _mutex_check_need_init() needs to be called. This is safe as the test is conservative - and is repeated inside the guarded section of + and is repeated inside the guarded section of _mutex_check_need_init(). Thus in all calls except the first calls to lock static mutexes, the additional overhead to lock any mutex is a single memory fetch and test for zero. @@ -4249,7 +4249,7 @@ Mon Jan 11 20:33:19 1999 Ross Johnson * pthread.h: Re-arrange conditional compile of pthread_cleanup-* macros. - * cleanup.c (__ptw32_push_cleanup): Provide conditional + * cleanup.c (__ptw32_push_cleanup): Provide conditional compile of cleanup->prev. 1999-01-11 Tor Lillqvist @@ -4291,13 +4291,13 @@ Tue Dec 29 13:11:16 1998 Ross Johnson pthread_mutexattr_t_, pthread_key_t_, pthread_cond_t_, pthread_condattr_t_, pthread_once_t_. - * pthread.h: Add "_" prefix to pthread_push_cleanup and + * pthread.h: Add "_" prefix to pthread_push_cleanup and pthread_pop_cleanup internal routines, and associated struct and typedefs. * buildlib.bat: Add compile command for semaphore.c - * pthread.def: Comment out pthread_atfork routine name. + * pthread.def: Comment out pthread_atfork routine name. Now unimplemented. * tsd.c (pthread_setspecific): Rename tkAssocCreate to @@ -4380,7 +4380,7 @@ Sun Dec 20 14:51:58 1998 Ross Johnson (pthread_condattr_getpshared): Replaced by John Bossom's version. (pthread_condattr_setpshared): Replaced by John Bossom's version. (pthread_cond_init): Replaced by John Bossom's version. - Fix comment (refered to mutex rather than condition variable). + Fix comment (referred to mutex rather than condition variable). (pthread_cond_destroy): Replaced by John Bossom's version. (pthread_cond_wait): Replaced by John Bossom's version. (pthread_cond_timedwait): Replaced by John Bossom's version. @@ -4400,7 +4400,7 @@ Mon Dec 7 09:44:40 1998 John Bossom (pthread_setcanceltype): Replaced. (pthread_testcancel): Replaced. (pthread_cancel): Replaced. - + * exit.c (pthread_exit): Replaced. * misc.c (pthread_self): Replaced. @@ -4648,9 +4648,9 @@ Mon Oct 5 14:25:08 1998 Ross Johnson macro. Passes. * tests/create1.c: New file; test pthread_create(). Passes. - + * tests/equal.c: Poor test; remove. - + * tests/equal1.c New file; test pthread_equal(). Passes. * tests/once1.c: New file; test for pthread_once(). Passes. @@ -4664,7 +4664,7 @@ Mon Oct 5 14:25:08 1998 Ross Johnson * tests/self3.c: New file. Test pthread_self() with a couple of threads to ensure their thread IDs differ. Passes. - + 1998-10-04 Ben Elliston * tests/mutex2.c: Test pthread_mutex_trylock(). Passes. @@ -4692,14 +4692,14 @@ Mon Oct 5 14:25:08 1998 Ross Johnson * config.h.in: Regenerate. * create.c (__ptw32_start_call): Add STDCALL prefix. - + * mutex.c (pthread_mutex_init): Correct function signature. * attr.c (pthread_attr_init): Only zero out the `sigmask' member if we have the sigset_t type. * pthread.h: No need to include . It doesn't even exist - on Win32! Again, an artifact of cross-compilation. + on Win32! Again, an artifact of cross-compilation. (pthread_sigmask): Only provide if we have the sigset_t type. * process.h: Remove. This was a stand-in before we started doing @@ -4717,7 +4717,7 @@ Mon Oct 5 14:25:08 1998 Ross Johnson * configure.in: Test for required system features. - * configure: Generate. + * configure: Generate. * acconfig.h: New file. @@ -4733,7 +4733,7 @@ Mon Oct 5 14:25:08 1998 Ross Johnson * install-sh: Likewise. - * config.h: Remove. + * config.h: Remove. * Makefile: Likewise. @@ -4746,7 +4746,7 @@ Mon Oct 5 14:25:08 1998 Ross Johnson Sat Sep 12 20:09:24 1998 Ross Johnson * windows.h: Remove error number definitions. These are in - + * tsd.c: Add comment explaining rationale for not building POSIX TSD on top of Win32 TLS. @@ -4756,10 +4756,10 @@ Sat Sep 12 20:09:24 1998 Ross Johnson * signal.c (pthread_sigmask): Only provide if HAVE_SIGSET_T is defined. - + * config.h: #undef features, don't #define them. This will be generated by autoconf very soon. - + 1998-08-11 Ben Elliston * Makefile (LIB): Define. @@ -4770,7 +4770,7 @@ Sat Sep 12 20:09:24 1998 Ross Johnson already have one. * windows.c (TlsGetValue): Bug fix. - + Thu Aug 6 15:19:22 1998 Ross Johnson * misc.c (pthread_once): Fix arg 1 of EnterCriticalSection() @@ -4779,7 +4779,7 @@ Thu Aug 6 15:19:22 1998 Ross Johnson * fork.c (pthread_atfork): Typecast (void (*)(void *)) funcptr in each __ptw32_handler_push() call. - * exit.c (__ptw32_exit): Fix attr arg in + * exit.c (__ptw32_exit): Fix attr arg in pthread_attr_getdetachstate() call. * private.c (__ptw32_new_thread): Typecast (HANDLE) NULL. @@ -4789,13 +4789,13 @@ Thu Aug 6 15:19:22 1998 Ross Johnson changing in an attempt to make thread administration data types opaque and cleanup DLL startup. - * dll.c (PthreadsEntryPoint): + * dll.c (PthreadsEntryPoint): (__ptw32_virgins): Remove malloc() and free() calls. (__ptw32_reuse): Ditto. (__ptw32_win32handle_map): Ditto. (__ptw32_threads_mutex_table): Ditto. - * global.c (_POSIX_THREAD_THREADS_MAX): Initialise with + * global.c (_POSIX_THREAD_THREADS_MAX): Initialise with PTW32_MAX_THREADS. (__ptw32_virgins): Ditto. (__ptw32_reuse): Ditto. @@ -4808,9 +4808,9 @@ Thu Aug 6 15:19:22 1998 Ross Johnson * condvar.c (pthread_cond_init): Add address-of operator & to arg 1 of pthread_mutex_init() call. (pthread_cond_destroy): Add address-of operator & to - arg 1 of pthread_mutex_destroy() call. + arg 1 of pthread_mutex_destroy() call. - * cleanup.c (__ptw32_destructor_pop_all): Add (int) cast to + * cleanup.c (__ptw32_destructor_pop_all): Add (int) cast to pthread_getspecific() arg. (__ptw32_destructor_pop): Add (void *) cast to "if" conditional. (__ptw32_destructor_push): Add (void *) cast to @@ -4849,13 +4849,13 @@ Tue Aug 4 16:57:58 1998 Ross Johnson * private.c (__ptw32_delete_thread): Fix typo. Add missing ';'. - * global.c (__ptw32_virgins): Change types from pointer to + * global.c (__ptw32_virgins): Change types from pointer to array pointer. (__ptw32_reuse): Ditto. (__ptw32_win32handle_map): Ditto. (__ptw32_threads_mutex_table): Ditto. - * implement.h(__ptw32_virgins): Change types from pointer to + * implement.h(__ptw32_virgins): Change types from pointer to array pointer. (__ptw32_reuse): Ditto. (__ptw32_win32handle_map): Ditto. @@ -4925,7 +4925,7 @@ Mon Aug 3 21:19:57 1998 Ross Johnson * windows.h (THREAD_PRIORITY_NORMAL): Add. - * pthread.h (sched_param): Add missing ';' to struct definition. + * pthread.h (sched_param): Add missing ';' to struct definition. * attr.c (pthread_attr_init): Remove obsolete pthread_attr_t member initialisation - cancelstate, canceltype, cancel_pending. @@ -4938,7 +4938,7 @@ Mon Aug 3 21:19:57 1998 Ross Johnson 1998-08-02 Ben Elliston - * windows.h: Remove duplicate TlsSetValue() prototype. Add + * windows.h: Remove duplicate TlsSetValue() prototype. Add TlsGetValue() prototype. (FALSE): Define. (TRUE): Likewise. @@ -5115,7 +5115,7 @@ Mon Jul 27 00:20:37 1998 Ross Johnson * exit.c (__ptw32_exit): Fix incorrect check for detachedstate. - * implement.h (__ptw32_call_t): Remove env member. + * implement.h (__ptw32_call_t): Remove env member. Sun Jul 26 13:06:12 1998 Ross Johnson @@ -5229,7 +5229,7 @@ Sat Jul 25 00:00:13 1998 Ross Johnson * create.c (__ptw32_start_call): Set thread priority. Ensure our thread entry is removed from the thread table but only if pthread_detach() was called and there are no waiting joins. - (pthread_create): Set detach flag in thread entry if the + (pthread_create): Set detach flag in thread entry if the thread is created PTHREAD_CREATE_DETACHED. * pthread.h (pthread_attr_t): Rename member "detachedstate". @@ -5307,7 +5307,7 @@ Fri Jul 24 21:13:55 1998 Ross Johnson (pthread_create): New threads inherit their creator's signal mask. Copy the signal mask to the new thread structure if we know about signals. - + Fri Jul 24 16:33:17 1998 Ross Johnson * fork.c (pthread_atfork): Add all the necessary push calls. @@ -5351,12 +5351,12 @@ Fri Jul 24 03:00:25 1998 Ross Johnson (pthread_create): _beginthreadex() now passes a pointer to our thread table entry instead of just the call member of that entry. - * implement.h (__ptw32_threads_thread): New member + * implement.h (__ptw32_threads_thread): New member void ** joinvalueptr. (__ptw32_call_t): New member jmpbuf env. * exit.c (pthread_exit): Major rewrite to handle joins and handing - value pointer to joining thread. Uses longjmp() back to + value pointer to joining thread. Uses longjmp() back to __ptw32_start_call(). * create.c (pthread_create): Ensure values of new attribute members @@ -5426,7 +5426,7 @@ Fri Jul 24 00:21:21 1998 Ross Johnson (SCHED_MAX): Likewise, the maximum possible value. (PTHREAD_CANCEL_ASYNCHRONOUS): Redefine. (PTHREAD_CANCEL_DEFERRED): Likewise. - + * sched.c: New file. (pthread_setschedparam): Implement. (pthread_getschedparam): Implement. @@ -5492,7 +5492,7 @@ Wed Jul 22 00:16:22 1998 Ross Johnson all new threads. It allows us to do some cleanup when the thread returns, ie. that is otherwise only done if the thread is cancelled. - * exit.c (__ptw32_vacuum): New function contains code from + * exit.c (__ptw32_vacuum): New function contains code from pthread_exit() that we need in the new __ptw32_start_call() as well. @@ -5563,7 +5563,7 @@ Wed Jul 22 00:16:22 1998 Ross Johnson up to multiple of DWORD. Add function prototypes. - * private.c (__ptw32_getthreadindex): "*thread" should have been + * private.c (__ptw32_getthreadindex): "*thread" should have been "thread". Detect empty slot fail condition. 1998-07-20 Ben Elliston @@ -5571,13 +5571,13 @@ Wed Jul 22 00:16:22 1998 Ross Johnson * misc.c (pthread_once): Implement. Don't use a per-application flag and mutex--make `pthread_once_t' contain these elements in their structure. The earlier version had incorrect semantics. - + * pthread.h (__ptw32_once_flag): Add new variable. Remove. (__ptw32_once_lock): Add new mutex lock to ensure integrity of access to __ptw32_once_flag. Remove. (pthread_once): Add function prototype. (pthread_once_t): Define this type. - + Mon Jul 20 02:31:05 1998 Ross Johnson * private.c (__ptw32_getthreadindex): Implement. @@ -5594,7 +5594,7 @@ Mon Jul 20 02:31:05 1998 Ross Johnson * create.c (pthread_create): Add thread to thread table. Keep a thread-private copy of the attributes with default values - filled in when necessary. Same for the cleanup stack. Make + filled in when necessary. Same for the cleanup stack. Make pthread_create C run-time library friendly by using _beginthreadex() instead of CreateThread(). Fix error returns. @@ -5661,9 +5661,9 @@ Sun Jul 19 16:26:23 1998 Ross Johnson across processes for now. (pthread_mutex_t): Use a Win32 CRITICAL_SECTION type for better performance. - + * implement.h (__ptw32_mutexattr_t): Remove shared attribute. - + * mutex.c (pthread_mutexattr_setpshared): This optional function is no longer supported, since we want to implement POSIX mutex variables using the much more efficient Win32 critical section @@ -5688,7 +5688,7 @@ Sun Jul 19 16:26:23 1998 Ross Johnson (pthread_attr_getstackaddr): Likewise. (pthread_attr_init): Likewise. (pthread_attr_destroy): Likewise. - + * condvar.c (pthread_condattr_init): Add `_cond' to function name. * mutex.c (pthread_mutex_lock): Add `_mutex' to function name. @@ -5702,7 +5702,7 @@ Sun Jul 19 16:26:23 1998 Ross Johnson (pthread_attr_getstacksize): Likewise. (pthread_attr_setstackaddr): Likewise. (pthread_attr_getstackaddr): Likewise. - + Mon Jul 13 01:09:55 1998 Ross Johnson * implement.h: Wrap in #ifndef _IMPLEMENT_H @@ -5717,7 +5717,7 @@ Mon Jul 13 01:09:55 1998 Ross Johnson (pthread_condattr_destroy): Likewise. (pthread_condattr_setpshared): Likewise. (pthread_condattr_getpshared): Likewise. - + * implement.h (PTHREAD_THREADS_MAX): Remove trailing semicolon. (PTHREAD_STACK_MIN): Specify; needs confirming. (__ptw32_attr_t): Define this type. @@ -5769,7 +5769,7 @@ Mon Jul 13 01:09:55 1998 Ross Johnson (insert_attr): New function; very preliminary implementation! (is_attr): Likewise. (remove_attr): Likewise. - + Sat Jul 11 14:48:54 1998 Ross Johnson * implement.h: Preliminary implementation specific defines. @@ -5781,10 +5781,10 @@ Sat Jul 11 14:48:54 1998 Ross Johnson * sync.c (pthread_join): Implement. * misc.c (pthread_equal): Likewise. - + * pthread.h (pthread_join): Add function prototype. (pthread_equal): Likewise. - + 1998-07-10 Ben Elliston * misc.c (pthread_self): Implement. diff --git a/doc/tutorial/tracking/tutorial-tracking-mb-generic-json.dox b/doc/tutorial/tracking/tutorial-tracking-mb-generic-json.dox index 1c132a25db..2ef9352ead 100644 --- a/doc/tutorial/tracking/tutorial-tracking-mb-generic-json.dox +++ b/doc/tutorial/tracking/tutorial-tracking-mb-generic-json.dox @@ -116,7 +116,7 @@ stating that this tracker uses both edge (see the vpMe class) and KLT (see vpKlt The next important definition is: \snippet realsense-color-and-depth.json.example Transformation Describing the transformation between this camera and the reference camera. It can also be given as a vpPoseVector JSON representation. -If the current camera is the reference, then "camTref" may be ommitted or set as the identity transformation. +If the current camera is the reference, then "camTref" may be omitted or set as the identity transformation. Next, we must define the camera intrinsics (see vpCameraParameters): \snippet realsense-color-and-depth.json.example Camera diff --git a/doc/tutorial/visual-servo/tutorial-pixhawk-vs.dox b/doc/tutorial/visual-servo/tutorial-pixhawk-vs.dox index a34dd17df7..fc67ffae45 100644 --- a/doc/tutorial/visual-servo/tutorial-pixhawk-vs.dox +++ b/doc/tutorial/visual-servo/tutorial-pixhawk-vs.dox @@ -272,7 +272,7 @@ In order to do this part, make sure you add a camera to your drone. We added a i Jetson through USB. The code servoPixhawkDroneIBVS.cpp is an example that needs to be run on the Jetson and that allows to do visual servoing with the drone. -This program establishes a rigid link between the drone (equiped with a camera) and an Apriltag. +This program establishes a rigid link between the drone (equipped with a camera) and an Apriltag. Depending on where the camera is placed, the matrices expressing the transformation between the FLU body frame of the drone and the camera frame need to be modified. Here is a picture of the drone showing where the D405 camera was attached. diff --git a/example/math/BSpline.cpp b/example/math/BSpline.cpp index 0a3d2caf36..5056dfded9 100644 --- a/example/math/BSpline.cpp +++ b/example/math/BSpline.cpp @@ -29,7 +29,7 @@ * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * Description: - * Exemple of a B-Spline curve. + * Example of a B-Spline curve. * *****************************************************************************/ /*! diff --git a/example/servo-afma6/servoAfma6FourPoints2DArtVelocity.cpp b/example/servo-afma6/servoAfma6FourPoints2DArtVelocity.cpp index 4a35a880f5..0b9729103c 100644 --- a/example/servo-afma6/servoAfma6FourPoints2DArtVelocity.cpp +++ b/example/servo-afma6/servoAfma6FourPoints2DArtVelocity.cpp @@ -120,7 +120,7 @@ int main() try { // Define the square CAD model -// Square dimention +// Square dimension #define L 0.075 // Distance between the camera and the square at the desired // position after visual servoing convergence @@ -154,7 +154,7 @@ int main() std::cout << " Test program for vpServo " << std::endl; std::cout << " Eye-in-hand task control, velocity computed in the joint space" << std::endl; std::cout << " Use of the Afma6 robot " << std::endl; - std::cout << " task : servo 4 points on a square with dimention " << L << " meters" << std::endl; + std::cout << " task : servo 4 points on a square with dimension " << L << " meters" << std::endl; std::cout << "-------------------------------------------------------" << std::endl; std::cout << std::endl; diff --git a/example/servo-afma6/servoAfma6FourPoints2DCamVelocityLs_cur.cpp b/example/servo-afma6/servoAfma6FourPoints2DCamVelocityLs_cur.cpp index 3904882893..5cdc8de3cf 100644 --- a/example/servo-afma6/servoAfma6FourPoints2DCamVelocityLs_cur.cpp +++ b/example/servo-afma6/servoAfma6FourPoints2DCamVelocityLs_cur.cpp @@ -203,7 +203,7 @@ int main() std::cout << " Eye-in-hand task control, velocity computed in the camera frame" << std::endl; std::cout << " Use of the Afma6 robot " << std::endl; std::cout << " Interaction matrix computed with the current features " << std::endl; - std::cout << " task : servo 4 points on a square with dimention " << L << " meters" << std::endl; + std::cout << " task : servo 4 points on a square with dimension " << L << " meters" << std::endl; std::cout << "-------------------------------------------------------" << std::endl; std::cout << std::endl; diff --git a/example/servo-afma6/servoAfma6FourPoints2DCamVelocityLs_des.cpp b/example/servo-afma6/servoAfma6FourPoints2DCamVelocityLs_des.cpp index f9fd160600..5d715572b3 100644 --- a/example/servo-afma6/servoAfma6FourPoints2DCamVelocityLs_des.cpp +++ b/example/servo-afma6/servoAfma6FourPoints2DCamVelocityLs_des.cpp @@ -153,7 +153,7 @@ int main() std::cout << " Use of the Afma6 robot " << std::endl; std::cout << " Interaction matrix computed with the desired features " << std::endl; - std::cout << " task : servo 4 points on a square with dimention " << L << " meters" << std::endl; + std::cout << " task : servo 4 points on a square with dimension " << L << " meters" << std::endl; std::cout << "-------------------------------------------------------" << std::endl; std::cout << std::endl; diff --git a/example/servo-viper850/servoViper850FourPoints2DArtVelocityLs_cur.cpp b/example/servo-viper850/servoViper850FourPoints2DArtVelocityLs_cur.cpp index dac1df8176..43fd7e2edb 100644 --- a/example/servo-viper850/servoViper850FourPoints2DArtVelocityLs_cur.cpp +++ b/example/servo-viper850/servoViper850FourPoints2DArtVelocityLs_cur.cpp @@ -188,7 +188,7 @@ int main() std::cout << " Test program for vpServo " << std::endl; std::cout << " Eye-in-hand task control, velocity computed in the joint space" << std::endl; std::cout << " Use of the Afma6 robot " << std::endl; - std::cout << " task : servo 4 points on a square with dimention " << L << " meters" << std::endl; + std::cout << " task : servo 4 points on a square with dimension " << L << " meters" << std::endl; std::cout << "-------------------------------------------------------" << std::endl; std::cout << std::endl; diff --git a/example/servo-viper850/servoViper850FourPoints2DArtVelocityLs_des.cpp b/example/servo-viper850/servoViper850FourPoints2DArtVelocityLs_des.cpp index 0e2ac786db..456295e6ee 100644 --- a/example/servo-viper850/servoViper850FourPoints2DArtVelocityLs_des.cpp +++ b/example/servo-viper850/servoViper850FourPoints2DArtVelocityLs_des.cpp @@ -108,7 +108,7 @@ int main() try { // Define the square CAD model -// Square dimention +// Square dimension // #define L 0.075 #define L 0.05 // Distance between the camera and the square at the desired @@ -150,7 +150,7 @@ int main() std::cout << " Test program for vpServo " << std::endl; std::cout << " Eye-in-hand task control, velocity computed in the joint space" << std::endl; std::cout << " Use of the Afma6 robot " << std::endl; - std::cout << " task : servo 4 points on a square with dimention " << L << " meters" << std::endl; + std::cout << " task : servo 4 points on a square with dimension " << L << " meters" << std::endl; std::cout << "-------------------------------------------------------" << std::endl; std::cout << std::endl; diff --git a/example/servo-viper850/servoViper850FourPoints2DCamVelocityLs_cur.cpp b/example/servo-viper850/servoViper850FourPoints2DCamVelocityLs_cur.cpp index 3b7f4588b7..896985320a 100644 --- a/example/servo-viper850/servoViper850FourPoints2DCamVelocityLs_cur.cpp +++ b/example/servo-viper850/servoViper850FourPoints2DCamVelocityLs_cur.cpp @@ -189,7 +189,7 @@ int main() std::cout << " Test program for vpServo " << std::endl; std::cout << " Eye-in-hand task control, velocity computed in the camera space" << std::endl; std::cout << " Use of the Viper850 robot " << std::endl; - std::cout << " task : servo 4 points on a square with dimention " << L << " meters" << std::endl; + std::cout << " task : servo 4 points on a square with dimension " << L << " meters" << std::endl; std::cout << "-------------------------------------------------------" << std::endl; std::cout << std::endl; diff --git a/example/servo-viper850/servoViper850FourPointsKinect.cpp b/example/servo-viper850/servoViper850FourPointsKinect.cpp index ba8a431c35..2e4f567e3d 100644 --- a/example/servo-viper850/servoViper850FourPointsKinect.cpp +++ b/example/servo-viper850/servoViper850FourPointsKinect.cpp @@ -200,7 +200,7 @@ int main() std::cout << " Test program for vpServo " << std::endl; std::cout << " Eye-in-hand task control, velocity computed in the camera space" << std::endl; std::cout << " Use of the Viper850 robot " << std::endl; - std::cout << " task : servo 4 points on a square with dimention " << L << " meters" << std::endl; + std::cout << " task : servo 4 points on a square with dimension " << L << " meters" << std::endl; std::cout << "-------------------------------------------------------" << std::endl; std::cout << std::endl; diff --git a/modules/ar/CMakeLists.txt b/modules/ar/CMakeLists.txt index d5a5394cbb..004b702c55 100644 --- a/modules/ar/CMakeLists.txt +++ b/modules/ar/CMakeLists.txt @@ -173,7 +173,7 @@ if(USE_COIN3D) endif() endif() -vp_add_module(ar visp_core OPTIONAL visp_io) +vp_add_module(ar visp_core) vp_glob_module_sources() if(USE_OGRE) diff --git a/modules/core/include/visp3/core/vpCameraParameters.h b/modules/core/include/visp3/core/vpCameraParameters.h index 06b36d5e25..ada6c7074d 100644 --- a/modules/core/include/visp3/core/vpCameraParameters.h +++ b/modules/core/include/visp3/core/vpCameraParameters.h @@ -341,7 +341,7 @@ class VISP_EXPORT vpCameraParameters \return True if the fov has been computed, False otherwise. */ - inline bool isFovComputed() const { return isFov; } + inline bool isFovComputed() const { return m_isFov; } void computeFov(const unsigned int &w, const unsigned int &h); @@ -354,7 +354,7 @@ class VISP_EXPORT vpCameraParameters */ inline double getHorizontalFovAngle() const { - if (!isFov) { + if (!m_isFov) { vpTRACE("Warning: The FOV is not computed, getHorizontalFovAngle() " "won't be significant."); } @@ -370,7 +370,7 @@ class VISP_EXPORT vpCameraParameters */ inline double getVerticalFovAngle() const { - if (!isFov) { + if (!m_isFov) { vpTRACE("Warning: The FOV is not computed, getVerticalFovAngle() won't " "be significant."); } @@ -391,24 +391,24 @@ class VISP_EXPORT vpCameraParameters */ inline std::vector getFovNormals() const { - if (!isFov) { + if (!m_isFov) { vpTRACE("Warning: The FOV is not computed, getFovNormals() won't be " "significant."); } - return fovNormals; + return m_fovNormals; } - inline double get_px() const { return px; } - inline double get_px_inverse() const { return inv_px; } - inline double get_py_inverse() const { return inv_py; } - inline double get_py() const { return py; } - inline double get_u0() const { return u0; } - inline double get_v0() const { return v0; } - inline double get_kud() const { return kud; } - inline double get_kdu() const { return kdu; } + inline double get_px() const { return m_px; } + inline double get_px_inverse() const { return m_inv_px; } + inline double get_py_inverse() const { return m_inv_py; } + inline double get_py() const { return m_py; } + inline double get_u0() const { return m_u0; } + inline double get_v0() const { return m_v0; } + inline double get_kud() const { return m_kud; } + inline double get_kdu() const { return m_kdu; } inline std::vector getKannalaBrandtDistortionCoefficients() const { return m_dist_coefs; } - inline vpCameraParametersProjType get_projModel() const { return projModel; } + inline vpCameraParametersProjType get_projModel() const { return m_projModel; } vpMatrix get_K() const; vpMatrix get_K_inverse() const; @@ -425,22 +425,22 @@ class VISP_EXPORT vpCameraParameters static const double DEFAULT_KDU_PARAMETER; static const vpCameraParametersProjType DEFAULT_PROJ_TYPE; - double px, py; //!< Pixel size - double u0, v0; //!< Principal point - double kud; //!< Radial distortion (from undistorted to distorted) - double kdu; //!< Radial distortion (from distorted to undistorted) - std::vector m_dist_coefs; //!< Coefficients for Kannala-Brandt distortion model + double m_px, m_py; //!< Pixel size + double m_u0, m_v0; //!< Principal point + double m_kud; //!< Radial distortion (from undistorted to distorted) + double m_kdu; //!< Radial distortion (from distorted to undistorted) + std::vector m_dist_coefs; //!< Coefficients for Kannala-Brandt distortion model - unsigned int width; //!< Width of the image used for the fov computation - unsigned int height; //!< Height of the image used for the fov computation - bool isFov; //!< Boolean to specify if the fov has been computed - double m_hFovAngle; //!< Field of view horizontal angle - double m_vFovAngle; //!< Field of view vertical angle - std::vector fovNormals; //!< Normals of the planes describing the fov + unsigned int m_width; //!< Width of the image used for the fov computation + unsigned int m_height; //!< Height of the image used for the fov computation + bool m_isFov; //!< Boolean to specify if the fov has been computed + double m_hFovAngle; //!< Field of view horizontal angle + double m_vFovAngle; //!< Field of view vertical angle + std::vector m_fovNormals; //!< Normals of the planes describing the fov - double inv_px, inv_py; + double m_inv_px, m_inv_py; - vpCameraParametersProjType projModel; //!< used projection model + vpCameraParametersProjType m_projModel; //!< used projection model #ifdef VISP_HAVE_NLOHMANN_JSON friend void to_json(nlohmann::json &j, const vpCameraParameters &cam); friend void from_json(const nlohmann::json &j, vpCameraParameters &cam); @@ -454,26 +454,26 @@ NLOHMANN_JSON_SERIALIZE_ENUM(vpCameraParameters::vpCameraParametersProjType, { {vpCameraParameters::perspectiveProjWithDistortion, "perspectiveWithDistortion"}, {vpCameraParameters::ProjWithKannalaBrandtDistortion, "kannalaBrandtDistortion"} }); + /** * \brief Converts camera parameters into a JSON representation. - * \sa from_json for more information on the content - * \param j the resulting JSON object - * \param cam the camera to serialize - * + * \sa from_json() for more information on the content. + * \param j The resulting JSON object. + * \param cam The camera to serialize. */ inline void to_json(nlohmann::json &j, const vpCameraParameters &cam) { - j["px"] = cam.px; - j["py"] = cam.py; - j["u0"] = cam.u0; - j["v0"] = cam.v0; - j["model"] = cam.projModel; + j["px"] = cam.m_px; + j["py"] = cam.m_py; + j["u0"] = cam.m_u0; + j["v0"] = cam.m_v0; + j["model"] = cam.m_projModel; - switch (cam.projModel) { + switch (cam.m_projModel) { case vpCameraParameters::perspectiveProjWithDistortion: { - j["kud"] = cam.kud; - j["kdu"] = cam.kdu; + j["kud"] = cam.m_kud; + j["kdu"] = cam.m_kdu; break; } case vpCameraParameters::ProjWithKannalaBrandtDistortion: @@ -487,33 +487,33 @@ inline void to_json(nlohmann::json &j, const vpCameraParameters &cam) break; } } -/*! - \brief Deserialize a JSON object into camera parameters. - The minimal required properties are: - - Pixel size: px, py - - Principal point: u0, v0 - - If a projection model (\ref vpCameraParameters::vpCameraParametersProjType) is supplied, then other parameters may be expected: - - In the case of perspective projection with distortion, ku, and kud must be supplied. - - In the case of Kannala-Brandt distortion, the list of coefficients must be supplied. - - An example of a JSON object representing a camera is: - \code{.json} - { - "px": 300.0, - "py": 300.0, - "u0": 120.5, - "v0": 115.0, - "model": "perspectiveWithDistortion", // one of ["perspectiveWithoutDistortion", "perspectiveWithDistortion", "kannalaBrandtDistortion"]. If ommitted, camera is assumed to have no distortion - "kud": 0.5, // required since "model" == perspectiveWithDistortion - "kdu": 0.5 - } - \endcode - - \param j The json object to deserialize. - \param cam The modified camera. -*/ +/*! + * \brief Deserialize a JSON object into camera parameters. + * The minimal required properties are: + * - Pixel size: px, py + * - Principal point: u0, v0 + * + * If a projection model (\ref vpCameraParameters::vpCameraParametersProjType) is supplied, then other parameters may be expected: + * - In the case of perspective projection with distortion, ku, and kud must be supplied. + * - In the case of Kannala-Brandt distortion, the list of coefficients must be supplied. + * + * An example of a JSON object representing a camera is: + * \code{.json} + * { + * "px": 300.0, + * "py": 300.0, + * "u0": 120.5, + * "v0": 115.0, + * "model": "perspectiveWithDistortion", // one of ["perspectiveWithoutDistortion", "perspectiveWithDistortion", "kannalaBrandtDistortion"]. If omitted, camera is assumed to have no distortion + * "kud": 0.5, // required since "model" == perspectiveWithDistortion + * "kdu": 0.5 + * } + * \endcode + * + * \param j The json object to deserialize. + * \param cam The modified camera. + */ inline void from_json(const nlohmann::json &j, vpCameraParameters &cam) { const double px = j.at("px").get(); diff --git a/modules/core/include/visp3/core/vpCircle.h b/modules/core/include/visp3/core/vpCircle.h index 3407516fdc..c491f08985 100644 --- a/modules/core/include/visp3/core/vpCircle.h +++ b/modules/core/include/visp3/core/vpCircle.h @@ -46,63 +46,62 @@ #include /*! - \class vpCircle - \ingroup group_core_geometry - \brief Class that defines a 3D circle in the object frame and allows forward projection of a 3D circle in the - camera frame and in the 2D image plane by perspective projection. - All the parameters must be set in meter. - - Note that a 3D circle is defined from the intersection between a 3D plane and a 3D sphere. - - A 3D circle has the followings parameters: - - **in the object frame**: the parameters oA, oB, oC corresponding to the 3D plane with equation - oA*(X-oX)+oB*(Y-oY)+oC*(Z-oZ)=0 where (X,Y,Z) are the coordinates of a 3D point belonging to the plane passing through - the 3D sphere center (oX,oY,oZ) and the 3D coordinates oX, oY, oZ of the center and radius R of the 3D sphere. These - parameters registered in vpForwardProjection::oP internal 7-dim vector are set using the constructors - vpCircle(double oA, double oB, double oC, double oX, double oY, double oZ, double R), - vpCircle(const vpColVector &oP) or the functions - setWorldCoordinates(double oA, double oB, double oC, double oX, double oY, double oZ, double R) - and setWorldCoordinates(const vpColVector &oP). To get theses parameters use get_oP(). - - - **in the camera frame**: the parameters cA, cB, cC corresponding to the 3D plane cAx+cBy+cCz+D=0 - and the coordinates cX, cY, cZ of the center and radius R of the 3D sphere. These - parameters registered in vpTracker::cP internal 7-dim vector are computed using - changeFrame(const vpHomogeneousMatrix &cMo, vpColVector &cP) const or changeFrame(const vpHomogeneousMatrix &cMo). - These parameters could be retrieved using getA(), getB(), getC(), getX(), getY(), getZ() and getR(). - To get theses parameters use get_cP(). - - - **in the image plane**: here we consider the parameters of the ellipse corresponding - to the perspective projection of the 3D circle. The parameters are the ellipse centroid (x, y) - and n20, n11, n02 which are the second order centered moments of - the ellipse normalized by its area (i.e., such that \f$n_{ij} = \mu_{ij}/a\f$ where - \f$\mu_{ij}\f$ are the centered moments and a the area). - These parameters are registered in vpTracker::p internal 5-dim vector - and computed using projection() and - projection(const vpColVector &cP, vpColVector &p) const. They could be retrieved using get_x(), get_y(), get_n20(), - get_n11() and get_n02(). They correspond to 2D normalized circle parameters with values expressed in meters. - To get theses parameters use get_p(). - -*/ + * \class vpCircle + * \ingroup group_core_geometry + * \brief Class that defines a 3D circle in the object frame and allows forward projection of a 3D circle in the + * camera frame and in the 2D image plane by perspective projection. + * All the parameters must be set in meter. + * + * Note that a 3D circle is defined from the intersection between a 3D plane and a 3D sphere. + * + * A 3D circle has the followings parameters: + * - **in the object frame**: the parameters oA, oB, oC corresponding to the 3D plane with equation + * oA*(X-oX)+oB*(Y-oY)+oC*(Z-oZ)=0 where (X,Y,Z) are the coordinates of a 3D point belonging to the plane passing through + * the 3D sphere center (oX,oY,oZ) and the 3D coordinates oX, oY, oZ of the center and radius R of the 3D sphere. These + * parameters registered in vpForwardProjection::oP internal 7-dim vector are set using the constructors + * vpCircle(double oA, double oB, double oC, double oX, double oY, double oZ, double R), + * vpCircle(const vpColVector &oP) or the functions + * setWorldCoordinates(double oA, double oB, double oC, double oX, double oY, double oZ, double R) + * and setWorldCoordinates(const vpColVector &oP). To get theses parameters use get_oP(). + * + * - **in the camera frame**: the parameters cA, cB, cC corresponding to the 3D plane cAx+cBy+cCz+D=0 + * and the coordinates cX, cY, cZ of the center and radius R of the 3D sphere. These + * parameters registered in vpTracker::cP internal 7-dim vector are computed using + * changeFrame(const vpHomogeneousMatrix &cMo, vpColVector &cP) const or changeFrame(const vpHomogeneousMatrix &cMo). + * These parameters could be retrieved using getA(), getB(), getC(), getX(), getY(), getZ() and getR(). + * To get theses parameters use get_cP(). + * + * - **in the image plane**: here we consider the parameters of the ellipse corresponding + * to the perspective projection of the 3D circle. The parameters are the ellipse centroid (x, y) + * and n20, n11, n02 which are the second order centered moments of + * the ellipse normalized by its area (i.e., such that \f$n_{ij} = \mu_{ij}/a\f$ where + * \f$\mu_{ij}\f$ are the centered moments and a the area). + * These parameters are registered in vpTracker::p internal 5-dim vector + * and computed using projection() and + * projection(const vpColVector &cP, vpColVector &p) const. They could be retrieved using get_x(), get_y(), get_n20(), + * get_n11() and get_n02(). They correspond to 2D normalized circle parameters with values expressed in meters. + * To get theses parameters use get_p(). + */ class VISP_EXPORT vpCircle : public vpForwardProjection { public: vpCircle(); explicit vpCircle(const vpColVector &oP); vpCircle(double oA, double oB, double oC, double oX, double oY, double oZ, double R); - virtual ~vpCircle(); + virtual ~vpCircle() override; - void changeFrame(const vpHomogeneousMatrix &noMo, vpColVector &noP) const; - void changeFrame(const vpHomogeneousMatrix &cMo); + void changeFrame(const vpHomogeneousMatrix &noMo, vpColVector &noP) const override; + void changeFrame(const vpHomogeneousMatrix &cMo) override; void display(const vpImage &I, const vpCameraParameters &cam, const vpColor &color = vpColor::green, - unsigned int thickness = 1); + unsigned int thickness = 1) override; void display(const vpImage &I, const vpCameraParameters &cam, const vpColor &color = vpColor::green, unsigned int thickness = 1); void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &color = vpColor::green, unsigned int thickness = 1); + const vpColor &color = vpColor::green, unsigned int thickness = 1) override; void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &color = vpColor::green, unsigned int thickness = 1); - vpCircle *duplicate() const; + vpCircle *duplicate() const override; double get_x() const { return p[0]; } double get_y() const { return p[1]; } @@ -121,10 +120,10 @@ class VISP_EXPORT vpCircle : public vpForwardProjection double getR() const { return cP[6]; } - void projection(); - void projection(const vpColVector &cP, vpColVector &p) const; + void projection() override; + void projection(const vpColVector &cP, vpColVector &p) const override; - void setWorldCoordinates(const vpColVector &oP); + void setWorldCoordinates(const vpColVector &oP) override; void setWorldCoordinates(double oA, double oB, double oC, double oX, double oY, double oZ, double R); //################### @@ -134,7 +133,7 @@ class VISP_EXPORT vpCircle : public vpForwardProjection const double &theta, double &i, double &j); protected: - void init(); + void init() override; public: #if defined(VISP_BUILD_DEPRECATED_FUNCTIONS) diff --git a/modules/core/include/visp3/core/vpClient.h b/modules/core/include/visp3/core/vpClient.h index 10ae2923ad..6550491ad5 100644 --- a/modules/core/include/visp3/core/vpClient.h +++ b/modules/core/include/visp3/core/vpClient.h @@ -45,169 +45,161 @@ #ifdef VISP_HAVE_FUNC_INET_NTOP /*! - \class vpClient - - \ingroup group_core_com_ethernet - - \brief This class represents a Transmission Control Protocol (TCP) client. - - TCP provides reliable, ordered delivery of a stream of bytes from a program - on one computer to another program on another computer. - - Exemple of client's code, receiving and sending basic message - It corresponds to the client used in the first example of vpServer class' - documentation: - - \code -#include -#include - -int main() -{ - std::string servername = "localhost"; - unsigned int port = 35000; - - vpClient client; - client.connectToHostname(servername, port); - //client.connectToIP("127.0.0.1",port); - - int val = 0; - - while(1) - { - // Sending the new value to the first client - if(client.send(&val) != sizeof(int)) - std::cout << "Error while sending" << std::endl; - else - std::cout << "Sending : " << val << std::endl; - - // Receiving a value from the first client - if(client.receive(&val) != sizeof(int)) - std::cout << "Error while receiving" << std::endl; - else - std::cout << "Received : " << val << std::endl; - } - - return 0; -} - \endcode - - Exemple of client's code, sending a vpImage on request form. - It correspond to the server used in the second example of vpServer class' - documentation. - - \code -#include -#include -#include -#include -#include -#include - -#include "vpRequestImage.h" //See vpRequest class documentation - -int main(int argc, char **argv) -{ -#if defined(VISP_HAVE_V4L2) - std::string servername = "localhost"; - unsigned int port = 35000; - - vpImage I; // Create a gray level image container - - // Create a grabber based on v4l2 third party lib (for usb cameras under - // Linux) - vpV4l2Grabber g; - g.setScale(1); - g.setInput(0); - g.open(I); - - // Create an image viewer -#if defined(VISP_HAVE_X11) - vpDisplayX d(I, -1, -1, "Camera frame"); -#elif defined(VISP_HAVE_GDI) //Win32 - vpDisplayGDI d(I, -1, -1, "Camera frame"); -#endif - - vpClient client; - client.connectToHostname(servername, port); - //client.connectToIP("127.0.0.1",port); - - vpRequestImage reqImage(&I); - - while(1) - { - double t = vpTime::measureTimeMs(); - // Acquire a new image - g.acquire(I); - - vpDisplay::display(I); - vpDisplay::flush(I); - - client.sendAndEncodeRequest(reqImage); - - // A click in the viewer to exit - if ( vpDisplay::getClick(I, false) ) - break; - } - - return 0; -#endif -} - \endcode - - \sa vpClient - \sa vpRequest - \sa vpNetwork -*/ + * \class vpClient + * + * \ingroup group_core_com_ethernet + * + * \brief This class represents a Transmission Control Protocol (TCP) client. + * + * TCP provides reliable, ordered delivery of a stream of bytes from a program + * on one computer to another program on another computer. + * + * Example of client's code, receiving and sending basic message + * It corresponds to the client used in the first example of vpServer class' + * documentation: + * + * \code + * #include + * #include + * + * int main() + * { + * std::string servername = "localhost"; + * unsigned int port = 35000; + * + * vpClient client; + * client.connectToHostname(servername, port); + * //client.connectToIP("127.0.0.1",port); + * + * int val = 0; + * + * while(1) + * { + * // Sending the new value to the first client + * if(client.send(&val) != sizeof(int)) + * std::cout << "Error while sending" << std::endl; + * else + * std::cout << "Sending : " << val << std::endl; + * + * // Receiving a value from the first client + * if(client.receive(&val) != sizeof(int)) + * std::cout << "Error while receiving" << std::endl; + * else + * std::cout << "Received : " << val << std::endl; + * } + * + * return 0; + * } + * \endcode + * + * Example of client's code, sending a vpImage on request form. + * It correspond to the server used in the second example of vpServer class' + * documentation. + * + * \code + * #include + * #include + * #include + * #include + * #include + * #include + * + * #include "vpRequestImage.h" //See vpRequest class documentation + * + * int main(int argc, char **argv) + * { + * #if defined(VISP_HAVE_V4L2) + * std::string servername = "localhost"; + * unsigned int port = 35000; + * + * vpImage I; // Create a gray level image container + * + * // Create a grabber based on v4l2 third party lib (for usb cameras under + * // Linux) + * vpV4l2Grabber g; + * g.setScale(1); + * g.setInput(0); + * g.open(I); + * + * // Create an image viewer + * #if defined(VISP_HAVE_X11) + * vpDisplayX d(I, -1, -1, "Camera frame"); + * #elif defined(VISP_HAVE_GDI) //Win32 + * vpDisplayGDI d(I, -1, -1, "Camera frame"); + * #endif + * + * vpClient client; + * client.connectToHostname(servername, port); + * //client.connectToIP("127.0.0.1",port); + * + * vpRequestImage reqImage(&I); + * + * while(1) + * { + * double t = vpTime::measureTimeMs(); + * // Acquire a new image + * g.acquire(I); + * + * vpDisplay::display(I); + * vpDisplay::flush(I); + * + * client.sendAndEncodeRequest(reqImage); + * + * // A click in the viewer to exit + * if ( vpDisplay::getClick(I, false) ) + * break; + * } + * + * return 0; + * #endif + * } + * \endcode + * + * \sa vpClient + * \sa vpRequest + * \sa vpNetwork + */ class VISP_EXPORT vpClient : public vpNetwork { private: - //######## PARAMETERS ######## - //# # - //############################ - - unsigned int numberOfAttempts; - - //######## Private Functions ######## - //# # - //################################### + unsigned int m_numberOfAttempts; bool connectServer(vpNetwork::vpReceptor &serv); public: vpClient(); - virtual ~vpClient(); + virtual ~vpClient() override; bool connectToHostname(const std::string &hostname, const unsigned int &port_serv); bool connectToIP(const std::string &ip, const unsigned int &port_serv); void deconnect(const unsigned int &index = 0); /*! - Get the actual number of attempts to connect to the server. - - \sa vpClient::setNumberOfAttempts() - - \return Actual number of attempts. - */ - unsigned int getNumberOfAttempts() { return numberOfAttempts; } + * Get the actual number of attempts to connect to the server. + * + * \sa vpClient::setNumberOfAttempts() + * + * \return Actual number of attempts. + */ + unsigned int getNumberOfAttempts() { return m_numberOfAttempts; } /*! - Get the number of server that the client is connected on. - - \return Number of servers. - */ + * Get the number of server that the client is connected on. + * + * \return Number of servers. + */ unsigned int getNumberOfServers() { return (unsigned int)receptor_list.size(); } void print(); /*! - Set the number of attempts to connect to the server. - - \sa vpClient::getNumberOfAttempts() - - \param nb : Number of attempts. - */ - void setNumberOfAttempts(const unsigned int &nb) { numberOfAttempts = nb; } + * Set the number of attempts to connect to the server. + * + * \sa vpClient::getNumberOfAttempts() + * + * \param nb : Number of attempts. + */ + void setNumberOfAttempts(const unsigned int &nb) { m_numberOfAttempts = nb; } void stop(); }; diff --git a/modules/core/include/visp3/core/vpColVector.h b/modules/core/include/visp3/core/vpColVector.h index 0fb123573b..0236d46bc1 100644 --- a/modules/core/include/visp3/core/vpColVector.h +++ b/modules/core/include/visp3/core/vpColVector.h @@ -254,10 +254,6 @@ class VISP_EXPORT vpColVector : public vpArray2D std::copy(list.begin(), list.end(), data); } #endif - /*! - * Destructor. - */ - virtual ~vpColVector() { } /*! * Removes all elements from the vector (which are destroyed), diff --git a/modules/core/include/visp3/core/vpCylinder.h b/modules/core/include/visp3/core/vpCylinder.h index 5347c78658..6f402f7226 100644 --- a/modules/core/include/visp3/core/vpCylinder.h +++ b/modules/core/include/visp3/core/vpCylinder.h @@ -46,58 +46,60 @@ #include /*! - \class vpCylinder - \ingroup group_core_geometry - \brief Class that defines a 3D cylinder in the object frame and allows forward projection of a 3D cylinder in the - camera frame and in the 2D image plane by perspective projection. - All the parameters must be set in meter. - - A 3D cylinder of radius R is defined by the set of circles of radius R whose center belongs - to a straight line perpendicular to the plane of the circles. - - A 3D cylinder has the followings parameters: - - **in the object frame**: the cylinder is represented by the equation: - \f$ (X - oX)^2 + (Y - oY)^2 + (Z - oZ)^2 - (oA \; X + oB \; Y + oC \; Z)^2 - - R^2 = 0 \f$ with - \f[ - \left\{ \begin{array}{l} - oA^2 + oB^2 + oC^2 = 1 \\ - oA \; oX + oB \; oY + oC \; oZ = 0 - \end{array} \right. - \f] - where R is the radius of the cylinder, oA, oB, oC are the - coordinates of its direction vector and oX, oY, oZ are the - coordinates of the nearest point belonging to the cylinder axis from the - projection center. - The corresponding parameters are located in vpForwardProjection::oP 7-dim internal vector. They correspond - to oP = (oA, oB, oC, oX, oY, oZ, R). - Setting the cylinder parameters is achieved through the constructors with - parameters or setWorldCoordinates() methods. - To get theses parameters use get_oP(). - - - **in the camera frame**: parameters are saved in vpTracker::cP 7-dim internal vector - with cP =(cA, cB, cC, cX, cY, cZ, R). Considering the set of parameters oP expressed in the object - frame, cylinder coordinates expressed in the camera frame are obtained using - changeFrame(). To get these parameters use get_cP(). - - - **in the 2D image plane**: parameters are saved in vpTracker::p 4-dim vector. - They correspond to p = (\f$\rho_1\f$, \f$\theta_1\f$, \f$\rho_2\f$, \f$\theta_2\f$), noting - that for non-degenerated cases, the perspective projection of a cylinder on the image plane is a set of two - straight lines with equation: - \f[ - \left\{ \begin{array}{lll} - x \;\cos\theta_1 + x \;\sin\theta_1 - \rho_1 = 0 \\ - y \;\cos\theta_2 + y \;\sin\theta_2 - \rho_2 = 0 - \end{array} \right. - \f] - Perspective projection is achieved using projection() methods. The methods - get_p(), getRho1(), getTheta1() and getRho2(), getTheta2() allow to access to the - projected line parameters. -*/ + * \class vpCylinder + * \ingroup group_core_geometry + * \brief Class that defines a 3D cylinder in the object frame and allows forward projection of a 3D cylinder in the + * camera frame and in the 2D image plane by perspective projection. + * All the parameters must be set in meter. + * + * A 3D cylinder of radius R is defined by the set of circles of radius R whose center belongs + * to a straight line perpendicular to the plane of the circles. + * + * A 3D cylinder has the followings parameters: + * - **in the object frame**: the cylinder is represented by the equation: + * \f$ (X - oX)^2 + (Y - oY)^2 + (Z - oZ)^2 - (oA \; X + oB \; Y + oC \; Z)^2 - + * R^2 = 0 \f$ with + * \f[ + * \left\{ \begin{array}{l} + * oA^2 + oB^2 + oC^2 = 1 \\ + * oA \; oX + oB \; oY + oC \; oZ = 0 + * \end{array} \right. + * \f] + * where R is the radius of the cylinder, oA, oB, oC are the + * coordinates of its direction vector and oX, oY, oZ are the + * coordinates of the nearest point belonging to the cylinder axis from the + * projection center. + * The corresponding parameters are located in vpForwardProjection::oP 7-dim internal vector. They correspond + * to oP = (oA, oB, oC, oX, oY, oZ, R). + * Setting the cylinder parameters is achieved through the constructors with + * parameters or setWorldCoordinates() methods. + * To get theses parameters use get_oP(). + * + * - **in the camera frame**: parameters are saved in vpTracker::cP 7-dim internal vector + * with cP =(cA, cB, cC, cX, cY, cZ, R). Considering the set of parameters oP expressed in the object + * frame, cylinder coordinates expressed in the camera frame are obtained using + * changeFrame(). To get these parameters use get_cP(). + * + * - **in the 2D image plane**: parameters are saved in vpTracker::p 4-dim vector. + * They correspond to p = (\f$\rho_1\f$, \f$\theta_1\f$, \f$\rho_2\f$, \f$\theta_2\f$), noting + * that for non-degenerated cases, the perspective projection of a cylinder on the image plane is a set of two + * straight lines with equation: + * \f[ + * \left\{ \begin{array}{lll} + * x \;\cos\theta_1 + x \;\sin\theta_1 - \rho_1 = 0 \\ + * y \;\cos\theta_2 + y \;\sin\theta_2 - \rho_2 = 0 + * \end{array} \right. + * \f] + * + * Perspective projection is achieved using projection() methods. The methods + * get_p(), getRho1(), getTheta1() and getRho2(), getTheta2() allow to access to the + * projected line parameters. + */ class VISP_EXPORT vpCylinder : public vpForwardProjection { public: - typedef enum { + typedef enum + { line1, /*!< First limb of the cylinder. */ line2 /*!< Second limb of the cylinder. */ } vpLineCylinderType; @@ -105,85 +107,90 @@ class VISP_EXPORT vpCylinder : public vpForwardProjection vpCylinder(); explicit vpCylinder(const vpColVector &oP); vpCylinder(double oA, double oB, double oC, double oX, double oY, double oZ, double R); - virtual ~vpCylinder(); - void changeFrame(const vpHomogeneousMatrix &cMo, vpColVector &cP) const; - void changeFrame(const vpHomogeneousMatrix &cMo); + void changeFrame(const vpHomogeneousMatrix &cMo, vpColVector &cP) const override; + void changeFrame(const vpHomogeneousMatrix &cMo) override; double computeZ(double x, double y) const; void display(const vpImage &I, const vpCameraParameters &cam, const vpColor &color = vpColor::green, - unsigned int thickness = 1); + unsigned int thickness = 1) override; void display(const vpImage &I, const vpCameraParameters &cam, const vpColor &color = vpColor::green, unsigned int thickness = 1); void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &color = vpColor::green, unsigned int thickness = 1); + const vpColor &color = vpColor::green, unsigned int thickness = 1) override; void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &color = vpColor::green, unsigned int thickness = 1); - vpCylinder *duplicate() const; + vpCylinder *duplicate() const override; /*! - Return the \f$\rho_1\f$ parameter of the line corresponding to the - projection of the cylinder in the image plane. - \sa getTheta1() - */ + * Return the \f$\rho_1\f$ parameter of the line corresponding to the + * projection of the cylinder in the image plane. + * \sa getTheta1() + */ double getRho1() const { return p[0]; } /*! - Return the \f$\theta_1\f$ parameter of the line corresponding to the - projection of the cylinder in the image plane. - \sa getRho1() - */ + * Return the \f$\theta_1\f$ parameter of the line corresponding to the + * projection of the cylinder in the image plane. + * \sa getRho1() + */ double getTheta1() const { return p[1]; } /*! - Return the \f$\rho_2\f$ parameter of the line corresponding to the - projection of the cylinder in the image plane. - \sa getTheta2() - */ + * Return the \f$\rho_2\f$ parameter of the line corresponding to the + * projection of the cylinder in the image plane. + * \sa getTheta2() + */ double getRho2() const { return p[2]; } /*! - Return the \f$\theta_2\f$ parameter of the line corresponding to the - projection of the cylinder in the image plane. - \sa getRho2() - */ + * Return the \f$\theta_2\f$ parameter of the line corresponding to the + * projection of the cylinder in the image plane. + * \sa getRho2() + */ double getTheta2() const { return p[3]; } /*! - Return cylinder cA parameter expressed in the camera frame. - */ + * Return cylinder cA parameter expressed in the camera frame. + */ double getA() const { return cP[0]; } + /*! - Return cylinder cB parameter expressed in the camera frame. - */ + * Return cylinder cB parameter expressed in the camera frame. + */ double getB() const { return cP[1]; } + /*! - Return cylinder cC parameter expressed in the camera frame. - */ + * Return cylinder cC parameter expressed in the camera frame. + */ double getC() const { return cP[2]; } + /*! - Return cylinder cX parameter expressed in the camera frame. - */ + * Return cylinder cX parameter expressed in the camera frame. + */ double getX() const { return cP[3]; } + /*! - Return cylinder cY parameter expressed in the camera frame. - */ + * Return cylinder cY parameter expressed in the camera frame. + */ double getY() const { return cP[4]; } + /*! - Return cylinder cZ parameter expressed in the camera frame. - */ + * Return cylinder cZ parameter expressed in the camera frame. + */ double getZ() const { return cP[5]; } + /*! - Return cylinder R parameter corresponding to the cylinder radius. - */ + * Return cylinder R parameter corresponding to the cylinder radius. + */ double getR() const { return cP[6]; } - void init(); + void init() override; - void projection(); - void projection(const vpColVector &cP, vpColVector &p) const; + void projection() override; + void projection(const vpColVector &cP, vpColVector &p) const override; - void setWorldCoordinates(const vpColVector &oP); + void setWorldCoordinates(const vpColVector &oP) override; void setWorldCoordinates(double oA, double oB, double oC, double oX, double oY, double oZ, double R); }; diff --git a/modules/core/include/visp3/core/vpDisplay.h b/modules/core/include/visp3/core/vpDisplay.h index 326a7e8ea4..2e1327b895 100644 --- a/modules/core/include/visp3/core/vpDisplay.h +++ b/modules/core/include/visp3/core/vpDisplay.h @@ -48,127 +48,126 @@ #include /*! - \file vpDisplay.h - \brief Generic class for image display, also provide the interface - with the image. -*/ + * \file vpDisplay.h + * \brief Generic class for image display, also provide the interface + * with the image. + */ /*! - - \class vpDisplay - - \ingroup group_core_gui - - \brief Class that defines generic functionalities for display. - - The \ref tutorial-getting-started is a good starting point to know - how to use this class to display an image in a window. - - \warning Since ViSP 3.3.1 or higher we introduce the alpha channel support for color - transparency. This new feature is only supported yet using vpDisplayOpenCV. See vpColor - header documentation and displayOpenCV.cpp example for usage when displaying filled - transparent circles and rectangles. - - The example below shows how to use this class. - - \code -#include -#include -#include -#include -#include -#include -#include - -int main() -{ - vpImage I; // Grey level image - - // Read an image in PGM P5 format -#ifdef _WIN32 - vpImageIo::read(I, "C:/Temp/visp-images/Klimt/Klimt.pgm"); -#else - vpImageIo::read(I, "/local/soft/ViSP/ViSP-images/Klimt/Klimt.pgm"); -#endif - - vpDisplay *d; - - // Depending on the detected third party libraries, we instantiate here the - // first video device which is available -#if defined(VISP_HAVE_X11) - d = new vpDisplayX; -#elif defined(VISP_HAVE_GTK) - d = new vpDisplayGTK; -#elif defined(VISP_HAVE_GDI) - d = new vpDisplayGDI; -#elif defined(VISP_HAVE_D3D9) - d = new vpDisplayD3D; -#elif defined(HAVE_OPENCV_HIGHGUI) - d = new vpDisplayOpenCV; -#endif - - // Initialize the display with the image I. Display and image are - // now link together. -#ifdef VISP_HAVE_DISPLAY - d->init(I); -#endif - - // Specify the window location - vpDisplay::setWindowPosition(I, 400, 100); - - // Set the display window title - vpDisplay::setTitle(I, "My image"); - - // To initialize the video device, it is also possible to replace - // the 3 previous lines by: - // d->init(I, 400, 100, "My image"); - - // Set the display background with image I content - vpDisplay::display(I); - - // Draw a red rectangle in the display overlay (foreground) - vpDisplay::displayRectangle(I, 10, 10, 100, 20, vpColor::red, true); - - // Draw a red rectangle in the display overlay (foreground) - vpImagePoint topLeftCorner; - topLeftCorner.set_i(50); - topLeftCorner.set_j(10); - vpDisplay::displayRectangle(I, topLeftCorner, 100, 20, vpColor::green, - true); - - // Flush the foreground and background display - vpDisplay::flush(I); - - // Get non blocking keyboard events - std::cout << "Check keyboard events..." << std::endl; - char key[10]; sprintf(key, "\0"); - bool ret; - for (int i=0; i< 200; i++) { - bool ret = vpDisplay::getKeyboardEvent(I, key, false); - if (ret) - std::cout << "keyboard event: key: " << "\"" << key - << "\"" << std::endl; - vpTime::wait(40); - } - - // Get a blocking keyboard event - std::cout << "Wait for a keyboard event..." << std::endl; - ret = vpDisplay::getKeyboardEvent(I, key, true); - std::cout << "keyboard event: " << ret << std::endl; - if (ret) - std::cout << "key: " << "\"" << key << "\"" << std::endl; - - // Wait for a click in the display window - std::cout << "Wait for a button click..." << std::endl; - vpDisplay::getClick(I); - - delete d; -} - \endcode - - Other examples are available in tutorial-image-viewer.cpp and - tutorial-viewer.cpp. -*/ + * \class vpDisplay + * + * \ingroup group_core_gui + * + * \brief Class that defines generic functionalities for display. + * + * The \ref tutorial-getting-started is a good starting point to know + * how to use this class to display an image in a window. + * + * \warning Since ViSP 3.3.1 or higher we introduce the alpha channel support for color + * transparency. This new feature is only supported yet using vpDisplayOpenCV. See vpColor + * header documentation and displayOpenCV.cpp example for usage when displaying filled + * transparent circles and rectangles. + * + * The example below shows how to use this class. + * + * \code + * #include + * #include + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * vpImage I; // Grey level image + * + * // Read an image in PGM P5 format + * #ifdef _WIN32 + * vpImageIo::read(I, "C:/Temp/visp-images/Klimt/Klimt.pgm"); + * #else + * vpImageIo::read(I, "/local/soft/ViSP/ViSP-images/Klimt/Klimt.pgm"); + * #endif + * + * vpDisplay *d; + * + * // Depending on the detected third party libraries, we instantiate here the + * // first video device which is available + * #if defined(VISP_HAVE_X11) + * d = new vpDisplayX; + * #elif defined(VISP_HAVE_GTK) + * d = new vpDisplayGTK; + * #elif defined(VISP_HAVE_GDI) + * d = new vpDisplayGDI; + * #elif defined(VISP_HAVE_D3D9) + * d = new vpDisplayD3D; + * #elif defined(HAVE_OPENCV_HIGHGUI) + * d = new vpDisplayOpenCV; + * #endif + * + * // Initialize the display with the image I. Display and image are + * // now link together. + * #ifdef VISP_HAVE_DISPLAY + * d->init(I); + * #endif + * + * // Specify the window location + * vpDisplay::setWindowPosition(I, 400, 100); + * + * // Set the display window title + * vpDisplay::setTitle(I, "My image"); + * + * // To initialize the video device, it is also possible to replace + * // the 3 previous lines by: + * // d->init(I, 400, 100, "My image"); + * + * // Set the display background with image I content + * vpDisplay::display(I); + * + * // Draw a red rectangle in the display overlay (foreground) + * vpDisplay::displayRectangle(I, 10, 10, 100, 20, vpColor::red, true); + * + * // Draw a red rectangle in the display overlay (foreground) + * vpImagePoint topLeftCorner; + * topLeftCorner.set_i(50); + * topLeftCorner.set_j(10); + * vpDisplay::displayRectangle(I, topLeftCorner, 100, 20, vpColor::green, + * true); + * + * // Flush the foreground and background display + * vpDisplay::flush(I); + * + * // Get non blocking keyboard events + * std::cout << "Check keyboard events..." << std::endl; + * char key[10]; sprintf(key, "\0"); + * bool ret; + * for (int i=0; i< 200; i++) { + * bool ret = vpDisplay::getKeyboardEvent(I, key, false); + * if (ret) + * std::cout << "keyboard event: key: " << "\"" << key + * << "\"" << std::endl; + * vpTime::wait(40); + * } + * + * // Get a blocking keyboard event + * std::cout << "Wait for a keyboard event..." << std::endl; + * ret = vpDisplay::getKeyboardEvent(I, key, true); + * std::cout << "keyboard event: " << ret << std::endl; + * if (ret) + * std::cout << "key: " << "\"" << key << "\"" << std::endl; + * + * // Wait for a click in the display window + * std::cout << "Wait for a button click..." << std::endl; + * vpDisplay::getClick(I); + * + * delete d; + * } + * \endcode + * + * Other examples are available in tutorial-image-viewer.cpp and + * tutorial-viewer.cpp. + */ class VISP_EXPORT vpDisplay { public: @@ -225,37 +224,37 @@ class VISP_EXPORT vpDisplay //@{ unsigned int computeAutoScale(unsigned int width, unsigned int height); /*! - Return the value of the down scale factor applied to the image in order to - reduce the size of the window used to display the image. + * Return the value of the down scale factor applied to the image in order to + * reduce the size of the window used to display the image. */ unsigned int getDownScalingFactor() { return m_scale; } /*! - Return the display height. - \sa getWidth() - */ + * Return the display height. + * \sa getWidth() + */ inline unsigned int getHeight() const { return m_height; } /*! - Return the display width. - \sa getHeight() - */ + * Return the display width. + * \sa getHeight() + */ inline unsigned int getWidth() const { return m_width; } /*! - Return the position (along the horizontal axis) on the screen of the - display window. \sa getWindowYPosition() + * Return the position (along the horizontal axis) on the screen of the + * display window. \sa getWindowYPosition() */ int getWindowXPosition() const { return m_windowXPosition; } /*! - Return the position (along the vertical axis) on the screen of the display - window. \sa getWindowXPosition() + * Return the position (along the vertical axis) on the screen of the display + * window. \sa getWindowXPosition() */ int getWindowYPosition() const { return m_windowYPosition; } /*! - Check if the display has been initialised - - \return True if the display has been initialised, otherwise False - */ + * Check if the display has been initialised. + * + * \return True if the display has been initialised, otherwise False + */ inline bool isInitialised() { return m_displayHasBeenInitialized; } virtual void setDownScalingFactor(unsigned int scale); virtual void setDownScalingFactor(vpScaleType scaleType); @@ -265,47 +264,47 @@ class VISP_EXPORT vpDisplay /** @name vpDisplay pure virtual functions */ //@{ /*! - Set the window backgroud to \e color. - \param color : Background color. - */ + * Set the window background to \e color. + * \param color : Background color. + */ virtual void clearDisplay(const vpColor &color = vpColor::white) = 0; /*! - Close the window. - */ + * Close the window. + */ virtual void closeDisplay() = 0; /*! - Display an arrow from image point \e ip1 to image point \e ip2. - \param ip1 : Initial image point. - \param ip2 : Final image point. - \param color : Arrow color. - \param w : Arrow width. - \param h : Arrow height. - \param thickness : Thickness of the lines used to display the arrow. - */ + * Display an arrow from image point \e ip1 to image point \e ip2. + * \param ip1 : Initial image point. + * \param ip2 : Final image point. + * \param color : Arrow color. + * \param w : Arrow width. + * \param h : Arrow height. + * \param thickness : Thickness of the lines used to display the arrow. + */ virtual void displayArrow(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color = vpColor::white, unsigned int w = 4, unsigned int h = 2, unsigned int thickness = 1) = 0; /*! - Display a string at the image point \e ip location. - - To select the font used to display the string, use setFont(). - - \param ip : Upper left image point location of the string in the display. - \param text : String to display in overlay. - \param color : String color. - - \sa setFont() - */ + * Display a string at the image point \e ip location. + * + * To select the font used to display the string, use setFont(). + * + * \param ip : Upper left image point location of the string in the display. + * \param text : String to display in overlay. + * \param color : String color. + * + * \sa setFont() + */ virtual void displayCharString(const vpImagePoint &ip, const char *text, const vpColor &color = vpColor::green) = 0; /*! - Display a circle. - \param circle : Circle to display. - \param color : Circle color. - \param fill : When set to true fill the circle. - \param thickness : Thickness of the circle. This parameter is only useful - when \e fill is set to false. - */ + * Display a circle. + * \param circle : Circle to display. + * \param color : Circle color. + * \param fill : When set to true fill the circle. + * \param thickness : Thickness of the circle. This parameter is only useful + * when \e fill is set to false. + */ inline virtual void displayCircle(const vpImageCircle &circle, const vpColor &color, bool fill = false, unsigned int thickness = 1) { @@ -313,68 +312,60 @@ class VISP_EXPORT vpDisplay } /*! - Display a circle. - \param center : Circle center position. - \param radius : Circle radius. - \param color : Circle color. - \param fill : When set to true fill the circle. - \param thickness : Thickness of the circle. This parameter is only useful - when \e fill is set to false. - */ + * Display a circle. + * \param center : Circle center position. + * \param radius : Circle radius. + * \param color : Circle color. + * \param fill : When set to true fill the circle. + * \param thickness : Thickness of the circle. This parameter is only useful + * when \e fill is set to false. + */ virtual void displayCircle(const vpImagePoint ¢er, unsigned int radius, const vpColor &color, bool fill = false, unsigned int thickness = 1) = 0; /*! - Display a cross at the image point \e ip location. - \param ip : Cross location. - \param size : Size (width and height) of the cross. - \param color : Cross color. - \param thickness : Thickness of the lines used to display the cross. - */ + * Display a cross at the image point \e ip location. + * \param ip : Cross location. + * \param size : Size (width and height) of the cross. + * \param color : Cross color. + * \param thickness : Thickness of the lines used to display the cross. + */ virtual void displayCross(const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness = 1) = 0; /*! - Display a dashed line from image point \e ip1 to image point \e ip2. - \param ip1 : Initial image point. - \param ip2 : Final image point. - \param color : Line color. - \param thickness : Dashed line thickness. - */ + * Display a dashed line from image point \e ip1 to image point \e ip2. + * \param ip1 : Initial image point. + * \param ip2 : Final image point. + * \param color : Line color. + * \param thickness : Dashed line thickness. + */ virtual void displayDotLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness = 1) = 0; /*! - Display a line from image point \e ip1 to image point \e ip2. - \param ip1 : Initial image point. - \param ip2 : Final image point. - \param color : Line color. - \param thickness : Line thickness. - */ + * Display a line from image point \e ip1 to image point \e ip2. + * \param ip1 : Initial image point. + * \param ip2 : Final image point. + * \param color : Line color. + * \param thickness : Line thickness. + */ virtual void displayLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness = 1) = 0; /*! - Display the gray level image \e I (8bits). - - \warning Display has to be initialized. - - \warning Suppress the overlay drawing. - - \param I : Image to display. - - \sa init(), closeDisplay() - */ + * Display the gray level image \e I (8bits). + * \warning Display has to be initialized. + * \warning Suppress the overlay drawing. + * \param I : Image to display. + * \sa init(), closeDisplay() + */ virtual void displayImage(const vpImage &I) = 0; /*! - Display the color image \e I in RGBa format (32bits). - - \warning Display has to be initialized. - - \warning Suppress the overlay drawing. - - \param I : Image to display. - - \sa init(), closeDisplay() - */ + * Display the color image \e I in RGBa format (32bits). + * \warning Display has to be initialized. + * \warning Suppress the overlay drawing. + * \param I : Image to display. + * \sa init(), closeDisplay() + */ virtual void displayImage(const vpImage &I) = 0; virtual void displayImageROI(const vpImage &I, const vpImagePoint &iP, unsigned int width, @@ -383,339 +374,334 @@ class VISP_EXPORT vpDisplay unsigned int height) = 0; /*! - Display a point at the image point \e ip location. - \param ip : Point location. - \param color : Point color. - \param thickness : Point thickness. - */ + * Display a point at the image point \e ip location. + * \param ip : Point location. + * \param color : Point color. + * \param thickness : Point thickness. + */ virtual void displayPoint(const vpImagePoint &ip, const vpColor &color, unsigned int thickness = 1) = 0; /*! - Display a rectangle with \e topLeft as the top-left corner and \e - width and \e height the rectangle size. - - \param topLeft : Top-left corner of the rectangle. - \param width : Rectangle width. - \param height : Rectangle height. - \param color : Rectangle color. - \param fill : When set to true fill the rectangle. - - \param thickness : Thickness of the four lines used to display the - rectangle. This parameter is only useful when \e fill is set to - false. - */ + * Display a rectangle with \e topLeft as the top-left corner and \e + * width and \e height the rectangle size. + * + * \param topLeft : Top-left corner of the rectangle. + * \param width : Rectangle width. + * \param height : Rectangle height. + * \param color : Rectangle color. + * \param fill : When set to true fill the rectangle. + * \param thickness : Thickness of the four lines used to display the + * rectangle. This parameter is only useful when \e fill is set to + * false. + */ virtual void displayRectangle(const vpImagePoint &topLeft, unsigned int width, unsigned int height, const vpColor &color, bool fill = false, unsigned int thickness = 1) = 0; /*! - Display a rectangle with \e topLeft as the top-left corner and \e - width and \e height the rectangle size. - - \param topLeft : Top-left corner of the rectangle. - \param bottomRight : Bottom-right corner of the rectangle. - \param color : Rectangle color. - \param fill : When set to true fill the rectangle. - - \param thickness : Thickness of the four lines used to display the - rectangle. This parameter is only useful when \e fill is set to - false. - */ + * Display a rectangle with \e topLeft as the top-left corner and \e + * width and \e height the rectangle size. + * + * \param topLeft : Top-left corner of the rectangle. + * \param bottomRight : Bottom-right corner of the rectangle. + * \param color : Rectangle color. + * \param fill : When set to true fill the rectangle. + * \param thickness : Thickness of the four lines used to display the + * rectangle. This parameter is only useful when \e fill is set to + * false. + */ virtual void displayRectangle(const vpImagePoint &topLeft, const vpImagePoint &bottomRight, const vpColor &color, bool fill = false, unsigned int thickness = 1) = 0; /*! - Display a rectangle with \e topLeft as the top-left corner and \e - width and \e height the rectangle size. - - \param rectangle : Rectangle characteristics. - \param color : Rectangle color. - \param fill : When set to true fill the rectangle. - - \param thickness : Thickness of the four lines used to display the - rectangle. This parameter is only useful when \e fill is set to - false. - - */ + * Display a rectangle with \e topLeft as the top-left corner and \e + * width and \e height the rectangle size. + * + * \param rectangle : Rectangle characteristics. + * \param color : Rectangle color. + * \param fill : When set to true fill the rectangle. + * \param thickness : Thickness of the four lines used to display the + * rectangle. This parameter is only useful when \e fill is set to + * false. + */ virtual void displayRectangle(const vpRect &rectangle, const vpColor &color, bool fill = false, unsigned int thickness = 1) = 0; /*! - Flushes the display. - It's necessary to use this function to see the results of any drawing. - */ + * Flushes the display. + * It's necessary to use this function to see the results of any drawing. + */ virtual void flushDisplay() = 0; /*! - Flushes the display. - It's necessary to use this function to see the results of any drawing. - */ + * Flushes the display. + * It's necessary to use this function to see the results of any drawing. + */ virtual void flushDisplayROI(const vpImagePoint &iP, unsigned int width, unsigned int height) = 0; /* Simple interface with the mouse event */ /*! - Wait for a click from one of the mouse button. - - \param blocking [in] : Blocking behavior. - - When set to true, this method waits until a mouse button is - pressed and then returns always true. - - When set to false, returns true only if a mouse button is - pressed, otherwise returns false. - - \return - - true if a button was clicked. This is always the case if blocking is set - to \e true. - - false if no button was clicked. This can occur if blocking is set - to \e false. - */ + * Wait for a click from one of the mouse button. + * + * \param blocking [in] : Blocking behavior. + * - When set to true, this method waits until a mouse button is + * pressed and then returns always true. + * - When set to false, returns true only if a mouse button is + * pressed, otherwise returns false. + * + * \return + * - true if a button was clicked. This is always the case if blocking is set + * to \e true. + * - false if no button was clicked. This can occur if blocking is set + * to \e false. + */ virtual bool getClick(bool blocking = true) = 0; /*! - Wait for a click from one of the mouse button and get the position - of the clicked image point. - - \param ip [out] : The coordinates of the clicked image point. - - \param blocking [in] : true for a blocking behaviour waiting a mouse - button click, false for a non blocking behaviour. - - \return - - true if a button was clicked. This is always the case if blocking is set - to \e true. - - false if no button was clicked. This can occur if blocking is set - to \e false. - */ + * Wait for a click from one of the mouse button and get the position + * of the clicked image point. + * + * \param ip [out] : The coordinates of the clicked image point. + * + * \param blocking [in] : true for a blocking behaviour waiting a mouse + * button click, false for a non blocking behaviour. + * + * \return + * - true if a button was clicked. This is always the case if blocking is set + * to \e true. + * - false if no button was clicked. This can occur if blocking is set + * to \e false. + */ virtual bool getClick(vpImagePoint &ip, bool blocking = true) = 0; /*! - Wait for a mouse button click and get the position of the clicked - pixel. The button used to click is also set. - - \param ip [out] : The coordinates of the clicked image point. - - \param button [out] : The button used to click. - - \param blocking [in] : - - When set to true, this method waits until a mouse button is - pressed and then returns always true. - - When set to false, returns true only if a mouse button is - pressed, otherwise returns false. - - \return true if a mouse button is pressed, false otherwise. If a - button is pressed, the location of the mouse pointer is updated in - \e ip. - */ + * Wait for a mouse button click and get the position of the clicked + * pixel. The button used to click is also set. + * + * \param ip [out] : The coordinates of the clicked image point. + * + * \param button [out] : The button used to click. + * + * \param blocking [in] : + * - When set to true, this method waits until a mouse button is + * pressed and then returns always true. + * - When set to false, returns true only if a mouse button is + * pressed, otherwise returns false. + * + * \return true if a mouse button is pressed, false otherwise. If a + * button is pressed, the location of the mouse pointer is updated in + * \e ip. + */ virtual bool getClick(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true) = 0; /*! - Wait for a mouse button click release and get the position of the - image point were the click release occurs. The button used to click is - also set. Same method as getClick(unsigned int&, unsigned int&, - vpMouseButton::vpMouseButtonType &, bool). - - \param ip [out] : Position of the clicked image point. - - \param button [in] : Button used to click. - - \param blocking [in] : true for a blocking behaviour waiting a mouse - button click, false for a non blocking behaviour. - - \return - - true if a button was clicked. This is always the case if blocking is set - to \e true. - - false if no button was clicked. This can occur if blocking is set - to \e false. - - \sa getClick(vpImagePoint &, vpMouseButton::vpMouseButtonType &, bool) - - */ + * Wait for a mouse button click release and get the position of the + * image point were the click release occurs. The button used to click is + * also set. Same method as getClick(unsigned int&, unsigned int&, + * vpMouseButton::vpMouseButtonType &, bool). + * + * \param ip [out] : Position of the clicked image point. + * + * \param button [in] : Button used to click. + * + * \param blocking [in] : true for a blocking behaviour waiting a mouse + * button click, false for a non blocking behaviour. + * + * \return + * - true if a button was clicked. This is always the case if blocking is set + * to \e true. + * - false if no button was clicked. This can occur if blocking is set + * to \e false. + * + * \sa getClick(vpImagePoint &, vpMouseButton::vpMouseButtonType &, bool) + * + */ virtual bool getClickUp(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true) = 0; /*! - Get a keyboard event. - - \param blocking [in] : Blocking behavior. - - When set to true, this method waits until a key is - pressed and then returns always true. - - When set to false, returns true only if a key is - pressed, otherwise returns false. - - \return - - true if a key was pressed. This is always the case if blocking is set - to \e true. - - false if no key was pressed. This can occur if blocking is set - to \e false. - */ + * Get a keyboard event. + * + * \param blocking [in] : Blocking behavior. + * - When set to true, this method waits until a key is + * pressed and then returns always true. + * - When set to false, returns true only if a key is + * pressed, otherwise returns false. + * + * \return + * - true if a key was pressed. This is always the case if blocking is set + * to \e true. + * - false if no key was pressed. This can occur if blocking is set + * to \e false. + */ virtual bool getKeyboardEvent(bool blocking = true) = 0; /*! - - Get a keyboard event. - - \param blocking [in] : Blocking behavior. - - When set to true, this method waits until a key is - pressed and then returns always true. - - When set to false, returns true only if a key is - pressed, otherwise returns false. - - \param key [out]: If possible, an ISO Latin-1 character - corresponding to the keyboard key. - - \return - - true if a key was pressed. This is always the case if blocking is set - to \e true. - - false if no key was pressed. This can occur if blocking is set - to \e false. - */ + * Get a keyboard event. + * + * \param blocking [in] : Blocking behavior. + * - When set to true, this method waits until a key is + * pressed and then returns always true. + * - When set to false, returns true only if a key is + * pressed, otherwise returns false. + * + * \param key [out]: If possible, an ISO Latin-1 character + * corresponding to the keyboard key. + * + * \return + * - true if a key was pressed. This is always the case if blocking is set + * to \e true. + * - false if no key was pressed. This can occur if blocking is set + * to \e false. + */ virtual bool getKeyboardEvent(std::string &key, bool blocking = true) = 0; /*! - Get the coordinates of the mouse pointer. - - \param ip [out] : The coordinates of the mouse pointer. - - \return true if a pointer motion event was received, false otherwise. - - \exception vpDisplayException::notInitializedError : If the display - was not initialized. - */ + * Get the coordinates of the mouse pointer. + * + * \param ip [out] : The coordinates of the mouse pointer. + * + * \return true if a pointer motion event was received, false otherwise. + * + * \exception vpDisplayException::notInitializedError : If the display + * was not initialized. + */ virtual bool getPointerMotionEvent(vpImagePoint &ip) = 0; /*! - Get the coordinates of the mouse pointer. - - \param ip [out] : The coordinates of the mouse pointer. - - \return true. - - \exception vpDisplayException::notInitializedError : If the display - was not initialized. - */ + * Get the coordinates of the mouse pointer. + * + * \param ip [out] : The coordinates of the mouse pointer. + * + * \return true. + * + * \exception vpDisplayException::notInitializedError : If the display + * was not initialized. + */ virtual bool getPointerPosition(vpImagePoint &ip) = 0; /*! - Gets the screen vertical resolution in pixel. + * Gets the screen vertical resolution in pixel. */ virtual unsigned int getScreenHeight() = 0; /*! - Gets the screen resolution in pixel. - \param width, height : Screen resolution in pixels. + * Gets the screen resolution in pixel. + * \param width, height : Screen resolution in pixels. */ virtual void getScreenSize(unsigned int &width, unsigned int &height) = 0; /*! - Gets the screen horizontal resolution in pixel. + * Gets the screen horizontal resolution in pixel. */ virtual unsigned int getScreenWidth() = 0; /*! - Initialize the display (size, position and title) of a gray level image. + * Initialize the display (size, position and title) of a gray level image. - \param I : Image to be displayed (not that image has to be initialized). - \param x : Horizontal position of the upper/left window corner. - \param y : Vertical position of the upper/left window corner. - \param title : Window title. - */ + * \param I : Image to be displayed (not that image has to be initialized). + * \param x : Horizontal position of the upper/left window corner. + * \param y : Vertical position of the upper/left window corner. + * \param title : Window title. + */ virtual void init(vpImage &I, int x = -1, int y = -1, const std::string &title = "") = 0; /*! - Initialize the display (size, position and title) of a color - image in RGBa format. - - \param I : Image to be displayed (not that image has to be initialized). - \param x : Horizontal position of the upper/left window corner. - \param y : Vertical position of the upper/left window corner. - \param title : Window title. - */ + * Initialize the display (size, position and title) of a color + * image in RGBa format. + * + * \param I : Image to be displayed (not that image has to be initialized). + * \param x : Horizontal position of the upper/left window corner. + * \param y : Vertical position of the upper/left window corner. + * \param title : Window title. + */ virtual void init(vpImage &I, int x = -1, int y = -1, const std::string &title = "") = 0; /*! - Initialize the display size, position and title. - - \param width : Window width. - \param height : Window height. - \param x : Horizontal position of the upper/left window corner. - \param y : Vertical position of the upper/left window corner. - \param title : Window title. - - The following example shows how to use this function - \code - #include - #include - #include - #include - #include - #include - - int main() - { - #ifdef VISP_HAVE_DISPLAY - vpImage I; - vpImageIo::read(I, "lena.pgm"); - - vpDisplay *d; - - #if defined(VISP_HAVE_X11) - d = new vpDisplayX; - #elif defined(VISP_HAVE_GTK) - d = new vpDisplayGTK; - #elif defined(VISP_HAVE_GDI) - d = new vpDisplayGDI; - #elif defined(VISP_HAVE_D3D9) - d = new vpDisplayD3D; - #elif defined(HAVE_OPENCV_HIGHGUI) - d = new vpDisplayOpenCV; - #else - std::cout << "Sorry, no video device is available" << std::endl; - return -1; - #endif - - d->init(I.getWidth(), I.getHeight(), 10, 20, "viewer"); - - // Now associate the display to the image - I.display = d; - - // Set the display background with image I content - vpDisplay::display(I); - - // Flush the foreground and background display - vpDisplay::flush(I); - - // wait for a mouse clink in the display to exit - vpDisplay::getClick(I); - - delete d; - #endif - } - \endcode - */ + * Initialize the display size, position and title. + * + * \param width : Window width. + * \param height : Window height. + * \param x : Horizontal position of the upper/left window corner. + * \param y : Vertical position of the upper/left window corner. + * \param title : Window title. + * + * The following example shows how to use this function + * \code + * #include + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * #ifdef VISP_HAVE_DISPLAY + * vpImage I; + * vpImageIo::read(I, "lena.pgm"); + * + * vpDisplay *d; + * + * #if defined(VISP_HAVE_X11) + * d = new vpDisplayX; + * #elif defined(VISP_HAVE_GTK) + * d = new vpDisplayGTK; + * #elif defined(VISP_HAVE_GDI) + * d = new vpDisplayGDI; + * #elif defined(VISP_HAVE_D3D9) + * d = new vpDisplayD3D; + * #elif defined(HAVE_OPENCV_HIGHGUI) + * d = new vpDisplayOpenCV; + * #else + * std::cout << "Sorry, no video device is available" << std::endl; + * return -1; + * #endif + * + * d->init(I.getWidth(), I.getHeight(), 10, 20, "viewer"); + * + * // Now associate the display to the image + * I.display = d; + * + * // Set the display background with image I content + * vpDisplay::display(I); + * + * // Flush the foreground and background display + * vpDisplay::flush(I); + * + * // wait for a mouse clink in the display to exit + * vpDisplay::getClick(I); + * + * delete d; + * #endif + * } + * \endcode + */ virtual void init(unsigned int width, unsigned int height, int x = -1, int y = -1, const std::string &title = "") = 0; /*! - Set the font used to display a text in overlay. The display is - performed using displayCharString(). - - \param font : The expected font name. The available fonts are given by - the "xlsfonts" binary. To choose a font you can also use the - "xfontsel" binary. - - \note Under UNIX, to know all the available fonts, use the - "xlsfonts" binary in a terminal. You can also use the "xfontsel" binary. - - \sa displayCharString() - */ + * Set the font used to display a text in overlay. The display is + * performed using displayCharString(). + * + * \param font : The expected font name. The available fonts are given by + * the "xlsfonts" binary. To choose a font you can also use the + * "xfontsel" binary. + * + * \note Under UNIX, to know all the available fonts, use the + * "xlsfonts" binary in a terminal. You can also use the "xfontsel" binary. + * + * \sa displayCharString() + */ virtual void setFont(const std::string &font) = 0; /*! - Set the window title. - \param title : Window title. - */ + * Set the window title. + * \param title : Window title. + */ virtual void setTitle(const std::string &title) = 0; /*! - Set the window position in the screen. + * Set the window position in the screen. + * + * \param x : Horizontal position of the upper/left window corner. + * \param y : Vertical position of the upper/left window corner. - \param x : Horizontal position of the upper/left window corner. - \param y : Vertical position of the upper/left window corner. - - */ + */ virtual void setWindowPosition(int x, int y) = 0; //@} #endif // ifndef DOXYGEN_SHOULD_SKIP_THIS /*! - @name Static public vpDisplay functionalities on gray level images. - */ + * @name Static public vpDisplay functionalities on gray level images. + */ //@{ static void close(vpImage &I); static void display(const vpImage &I); @@ -823,8 +809,8 @@ class VISP_EXPORT vpDisplay //@} /*! - @name Static public vpDisplay functionalities on 32 bits color images. - */ + * @name Static public vpDisplay functionalities on 32 bits color images. + */ //@{ static void close(vpImage &I); static void display(const vpImage &I); @@ -925,7 +911,7 @@ class VISP_EXPORT vpDisplay //@} private: - //! get the window pixmap and put it in vpRGBa image + //! Get the window pixmap and put it in vpRGBa image. virtual void getImage(vpImage &I) = 0; }; diff --git a/modules/core/include/visp3/core/vpException.h b/modules/core/include/visp3/core/vpException.h index 6d96c0fb62..6dffe6105e 100644 --- a/modules/core/include/visp3/core/vpException.h +++ b/modules/core/include/visp3/core/vpException.h @@ -68,7 +68,7 @@ class VISP_EXPORT vpException : public std::exception void setMessage(const char *format, va_list args); //! forbid the empty constructor (protected) - vpException() : code(notInitialized), message("") { }; + vpException() : code(notInitialized), message("") { } public: enum generalExceptionEnum @@ -105,16 +105,6 @@ class VISP_EXPORT vpException : public std::exception */ explicit vpException(int code); - /*! - * Destructor. Do nothing but implemented to fit the inheritance from - * std::exception - */ -#if VISP_CXX_STANDARD > VISP_CXX_STANDARD_98 - virtual ~vpException() { } -#else - virtual ~vpException() throw() { } -#endif - /** @name Inherited functionalities from vpException */ //@{ /*! diff --git a/modules/core/include/visp3/core/vpForceTwistMatrix.h b/modules/core/include/visp3/core/vpForceTwistMatrix.h index 993e66ef33..8f01f4ce91 100644 --- a/modules/core/include/visp3/core/vpForceTwistMatrix.h +++ b/modules/core/include/visp3/core/vpForceTwistMatrix.h @@ -177,11 +177,6 @@ class VISP_EXPORT vpForceTwistMatrix : public vpArray2D vpForceTwistMatrix(const vpRotationMatrix &R); vpForceTwistMatrix(const vpThetaUVector &thetau); - /*! - Destructor. - */ - virtual ~vpForceTwistMatrix(){} - vpForceTwistMatrix buildFrom(const vpTranslationVector &t, const vpRotationMatrix &R); vpForceTwistMatrix buildFrom(const vpTranslationVector &t, const vpThetaUVector &thetau); vpForceTwistMatrix buildFrom(const vpHomogeneousMatrix &M, bool full = true); @@ -224,7 +219,7 @@ class VISP_EXPORT vpForceTwistMatrix : public vpArray2D \deprecated Provided only for compat with previous releases. This function does nothing. */ - vp_deprecated void init(){} + vp_deprecated void init() { } /*! \deprecated You should rather use eye(). */ diff --git a/modules/core/include/visp3/core/vpForwardProjection.h b/modules/core/include/visp3/core/vpForwardProjection.h index 3cf257c87b..97e98c2cf6 100644 --- a/modules/core/include/visp3/core/vpForwardProjection.h +++ b/modules/core/include/visp3/core/vpForwardProjection.h @@ -35,9 +35,9 @@ #define vpForwardProjection_H /*! - \file vpForwardProjection.h - \brief class that defines what is a generic geometric feature -*/ + * \file vpForwardProjection.h + * \brief class that defines what is a generic geometric feature + */ #include #include @@ -46,114 +46,107 @@ #include /*! - \class vpForwardProjection - \brief Class that defines what is a generic geometric feature. - - Each geometric feature has parameters expressed: - - - in the object frame \e oP. These parameters are located in the public - attribute vpForwardProjection::oP. - - in the camera frame \e cP. These parameters are located in the public - attribute vpTracker::cP. - - in the image plane \e p. These parameters are located in the public - attribute vpTracker::p. They correspond to normalized coordinates - of the feature expressed in meters. -*/ + * \class vpForwardProjection + * \brief Class that defines what is a generic geometric feature. + * + * Each geometric feature has parameters expressed: + * + * - in the object frame \e oP. These parameters are located in the public + * attribute vpForwardProjection::oP. + * - in the camera frame \e cP. These parameters are located in the public + * attribute vpTracker::cP. + * - in the image plane \e p. These parameters are located in the public + * attribute vpTracker::p. They correspond to normalized coordinates + * of the feature expressed in meters. + */ class VISP_EXPORT vpForwardProjection : public vpTracker { public: /*! - Used for memory issue especially in the vpServo class. - */ + * Used for memory issue especially in the vpServo class. + */ typedef enum { user, vpDisplayForwardProjection } vpForwardProjectionDeallocatorType; /** @name Public Member Functions Inherited from vpForwardProjection */ //@{ - vpForwardProjection() : oP(), deallocate(user) {} - - //! Destructor that does nothing. - virtual ~vpForwardProjection() { ; } + vpForwardProjection() : oP(), deallocate(user) { } /*! - - Computes the features parameters in the camera frame (\e cP) thanks - to the parameters given in the object frame - (vpForwardProjection::oP) and the homogeneous matrix relative to - the pose (\e cMo) between the object frame and the camera frame. - - To set the parameters in the object frame you need to call - setWorldCoordinates(). - - \param cMo : The homogeneous matrix corresponding to the pose - between the camera frame and the object frame. - - \param cP : The vector which will contain the feature parameters - expressed in the camera frame. - - With this method, the vpTracker::cP public attribute is not updated. - - */ + * Computes the features parameters in the camera frame (\e cP) thanks + * to the parameters given in the object frame + * (vpForwardProjection::oP) and the homogeneous matrix relative to + * the pose (\e cMo) between the object frame and the camera frame. + * + * To set the parameters in the object frame you need to call + * setWorldCoordinates(). + * + * \param cMo : The homogeneous matrix corresponding to the pose + * between the camera frame and the object frame. + * + * \param cP : The vector which will contain the feature parameters + * expressed in the camera frame. + * + * With this method, the vpTracker::cP public attribute is not updated. + */ virtual void changeFrame(const vpHomogeneousMatrix &cMo, vpColVector &cP) const = 0; - /*! - - Computes the features parameters in the camera frame (\e cP) thanks - to the parameters given in the object frame - (vpForwardProjection::oP) and the homogeneous matrix relative to - the pose (\e cMo) between the object frame and the camera frame. - To set the parameters in the object frame you need to call - setWorldCoordinates(). - - \param cMo : The homogeneous matrix corresponding to the pose - between the camera frame and the object frame. - - The features parameters in the camera frame (cP) are updated in - the vpTracker::cP public attribute. - */ + /*! + * Computes the features parameters in the camera frame (\e cP) thanks + * to the parameters given in the object frame + * (vpForwardProjection::oP) and the homogeneous matrix relative to + * the pose (\e cMo) between the object frame and the camera frame. + * + * To set the parameters in the object frame you need to call + * setWorldCoordinates(). + * + * \param cMo : The homogeneous matrix corresponding to the pose + * between the camera frame and the object frame. + * + * The features parameters in the camera frame (cP) are updated in + * the vpTracker::cP public attribute. + */ virtual void changeFrame(const vpHomogeneousMatrix &cMo) = 0; /*! - - Displays the feature in the image \e I thanks to the 2D feature - parameters in the image plane (vpTracker::p) and the camera - parameters which enable to convert the features from meter to pixel. - - \param I : The image where the feature must be displayed in overlay. - - \param cam : The camera parameters to enable the conversion from - meter to pixel. - - \param color : The desired color to display the line in the image. - \param thickness : Thickness of the feature representation. - */ + * Displays the feature in the image \e I thanks to the 2D feature + * parameters in the image plane (vpTracker::p) and the camera + * parameters which enable to convert the features from meter to pixel. + * + * \param I : The image where the feature must be displayed in overlay. + * + * \param cam : The camera parameters to enable the conversion from + * meter to pixel. + * + * \param color : The desired color to display the line in the image. + * \param thickness : Thickness of the feature representation. + */ virtual void display(const vpImage &I, const vpCameraParameters &cam, const vpColor &color = vpColor::green, unsigned int thickness = 1) = 0; /*! - - Displays the feature in the image \e I thanks to the features in - the object frame (vpForwardProjection::oP), the homogeneous matrix - relative to the pose between the object frame and the camera frame and the - camera parameters which enable to convert the features from meter - to pixel. - - \param I : The image where the line must be displayed in overlay. - - \param cMo : The homogeneous matrix corresponding to the pose - between the camera frame and the object frame. - - \param cam : The camera parameters to enable the conversion from - meter to pixel. - - \param color : The desired color to display the line in the image. - \param thickness : Thickness of the feature representation. + * Displays the feature in the image \e I thanks to the features in + * the object frame (vpForwardProjection::oP), the homogeneous matrix + * relative to the pose between the object frame and the camera frame and the + * camera parameters which enable to convert the features from meter + * to pixel. + * + * \param I : The image where the line must be displayed in overlay. + * + * \param cMo : The homogeneous matrix corresponding to the pose + * between the camera frame and the object frame. + * + * \param cam : The camera parameters to enable the conversion from + * meter to pixel. + * + * \param color : The desired color to display the line in the image. + * \param thickness : Thickness of the feature representation. */ virtual void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &color = vpColor::green, unsigned int thickness = 1) = 0; /*! - Create an object with the same type. - */ + * Create an object with the same type. + */ virtual vpForwardProjection *duplicate() const = 0; //! Return object parameters expressed in the 3D object frame. @@ -164,25 +157,24 @@ class VISP_EXPORT vpForwardProjection : public vpTracker virtual void print() const; /*! - - Computes the feature parameters in the image plane from the - parameters expressed in the camera frame. - - \param cP [input] : Feature parameters expressed in the camera frame. - - \param p [output] : Feature parameters expressed in the image plane. - */ + * Computes the feature parameters in the image plane from the + * parameters expressed in the camera frame. + * + * \param cP [input] : Feature parameters expressed in the camera frame. + * + * \param p [output] : Feature parameters expressed in the image plane. + */ virtual void projection(const vpColVector &cP, vpColVector &p) const = 0; /*! - Computes the feature parameters in the image plane. These - parameters are than updated in the vpTracker::p public attribute. - - \warning To compute these parameters, the method exploit the - feature parameters in the camera frame. Thus, vpTracker::cP need - to be updated before the call of this method. For that, a call to - changeFrame(const vpHomogeneousMatrix &) is requested. - */ + * Computes the feature parameters in the image plane. These + * parameters are than updated in the vpTracker::p public attribute. + * + * \warning To compute these parameters, the method exploit the + * feature parameters in the camera frame. Thus, vpTracker::cP need + * to be updated before the call of this method. For that, a call to + * changeFrame(const vpHomogeneousMatrix &) is requested. + */ virtual void projection() = 0; void project(); @@ -191,12 +183,11 @@ class VISP_EXPORT vpForwardProjection : public vpTracker void setDeallocate(vpForwardProjectionDeallocatorType d) { deallocate = d; } /*! - Sets the parameters which define the feature in the object frame. - - \param oP : Feature parameters expressed in the object frame used - to set the vpForwardProjection::oP public attribute. - - */ + * Sets the parameters which define the feature in the object frame. + * + * \param oP : Feature parameters expressed in the object frame used + * to set the vpForwardProjection::oP public attribute. + */ virtual void setWorldCoordinates(const vpColVector &oP) = 0; void track(const vpHomogeneousMatrix &cMo); @@ -206,11 +197,11 @@ class VISP_EXPORT vpForwardProjection : public vpTracker /** @name Protected Member Functions Inherited from vpForwardProjection */ //@{ /*! - Default initialisation of the feature parameters: - - in the object frame: \e oP - - in the camera frame: \e cP - - in the image plane: \e p. - */ + * Default initialisation of the feature parameters: + * - in the object frame: \e oP + * - in the camera frame: \e cP + * - in the image plane: \e p. + */ virtual void init() = 0; //@} @@ -218,8 +209,8 @@ class VISP_EXPORT vpForwardProjection : public vpTracker /** @name Public Attributes Inherited from vpForwardProjection */ //@{ /*! - Feature coordinates expressed in the object frame. - */ + * Feature coordinates expressed in the object frame. + */ vpColVector oP; //@} diff --git a/modules/core/include/visp3/core/vpFrameGrabber.h b/modules/core/include/visp3/core/vpFrameGrabber.h index 0806476bd0..91ab6fd7c6 100644 --- a/modules/core/include/visp3/core/vpFrameGrabber.h +++ b/modules/core/include/visp3/core/vpFrameGrabber.h @@ -38,61 +38,58 @@ #include /*! - \file vpFrameGrabber.h - \brief Base class for all video devices. It is - designed to provide a generic front end to video sources. -*/ + * \file vpFrameGrabber.h + * \brief Base class for all video devices. It is + * designed to provide a generic front end to video sources. + */ /*! - \class vpFrameGrabber - - \brief Base class for all video devices. It is designed to provide a front - end to video sources. - - This class should provide a virtual function that allows the acquisition - of an image. - - The example below shows how to use this class. - \code -#include -#include -#include -#include -#include - -int main() -{ -#if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) - vpImage I; - vpFrameGrabber *g; // Generic framegrabber - -#if defined( VISP_HAVE_DC1394 ) - vp1394TwoGrabber *g_1394_2 = new vp1394TwoGrabber; - // specific settings for firewire grabber based on libdc1394-2.x version - g_1394_2->setVideoMode(vp1394TwoGrabber::vpVIDEO_MODE_320x240_YUV422); - g_1394_2->setFramerate(vp1394TwoGrabber::vpFRAMERATE_30); - g = g_1394_2; -#elif defined( VISP_HAVE_V4L2 ) - vpV4l2Grabber *g_v4l2 = new vpV4l2Grabber; - // specific settings for Video For Linux Two grabber - g_v4l2->setInput(2); // Input 2 on the board - g_v4l2->setFramerate(vpV4l2Grabber::framerate_50fps); // 50 fps - g_v4l2->setWidth(384); // Acquired images are 768 width - g_v4l2->setHeight(288); // Acquired images are 576 height - g_v4l2->setNBuffers(3); // 3 ring buffers to ensure real-time acquisition - g = g_v4l2; -#endif - - g->open(I); // Open the framegrabber - g->acquire(I); // Acquire an image - vpImageIo::write(I, "image.pgm"); // Write image on the disk -#endif -} - \endcode - - - \author Eric Marchand (Eric.Marchand@irisa.fr), Irisa / Inria Rennes -*/ + * \class vpFrameGrabber + * + * \brief Base class for all video devices. It is designed to provide a front + * end to video sources. + * + * This class should provide a virtual function that allows the acquisition + * of an image. + * + * The example below shows how to use this class. + * \code + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * #if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) + * vpImage I; + * vpFrameGrabber *g; // Generic framegrabber + * + * #if defined( VISP_HAVE_DC1394 ) + * vp1394TwoGrabber *g_1394_2 = new vp1394TwoGrabber; + * // specific settings for firewire grabber based on libdc1394-2.x version + * g_1394_2->setVideoMode(vp1394TwoGrabber::vpVIDEO_MODE_320x240_YUV422); + * g_1394_2->setFramerate(vp1394TwoGrabber::vpFRAMERATE_30); + * g = g_1394_2; + * #elif defined( VISP_HAVE_V4L2 ) + * vpV4l2Grabber *g_v4l2 = new vpV4l2Grabber; + * // specific settings for Video For Linux Two grabber + * g_v4l2->setInput(2); // Input 2 on the board + * g_v4l2->setFramerate(vpV4l2Grabber::framerate_50fps); // 50 fps + * g_v4l2->setWidth(384); // Acquired images are 768 width + * g_v4l2->setHeight(288); // Acquired images are 576 height + * g_v4l2->setNBuffers(3); // 3 ring buffers to ensure real-time acquisition + * g = g_v4l2; + * #endif + * + * g->open(I); // Open the framegrabber + * g->acquire(I); // Acquire an image + * vpImageIo::write(I, "image.pgm"); // Write image on the disk + * #endif + * } + * \endcode + */ class VISP_EXPORT vpFrameGrabber { public: @@ -112,8 +109,7 @@ class VISP_EXPORT vpFrameGrabber //@} public: - vpFrameGrabber() : init(false), height(0), width(0){}; - virtual ~vpFrameGrabber() { ; } + vpFrameGrabber() : init(false), height(0), width(0) { }; virtual void open(vpImage &I) = 0; virtual void open(vpImage &I) = 0; @@ -122,9 +118,9 @@ class VISP_EXPORT vpFrameGrabber virtual void acquire(vpImage &I) = 0; /*! - This virtual function is used to de-allocate - the memory used by a specific frame grabber - */ + * This virtual function is used to de-allocate + * the memory used by a specific frame grabber + */ virtual void close() = 0; }; diff --git a/modules/core/include/visp3/core/vpHistogramValey.h b/modules/core/include/visp3/core/vpHistogramValey.h index d4ab3f463c..d0a753b17e 100644 --- a/modules/core/include/visp3/core/vpHistogramValey.h +++ b/modules/core/include/visp3/core/vpHistogramValey.h @@ -58,14 +58,11 @@ class VISP_EXPORT vpHistogramValey : vpHistogramPeak { public: - vpHistogramValey() : vpHistogramPeak(){}; + vpHistogramValey() : vpHistogramPeak() { }; - vpHistogramValey(unsigned char lvl, unsigned val) : vpHistogramPeak(lvl, val){}; + vpHistogramValey(unsigned char lvl, unsigned val) : vpHistogramPeak(lvl, val) { }; - vpHistogramValey(const vpHistogramValey &v) : vpHistogramPeak(v){}; - - /*! Destructor that does nothing. */ - virtual ~vpHistogramValey() {} + vpHistogramValey(const vpHistogramValey &v) : vpHistogramPeak(v) { }; vpHistogramValey &operator=(const vpHistogramValey &v); bool operator==(const vpHistogramValey &v) const; diff --git a/modules/core/include/visp3/core/vpHomogeneousMatrix.h b/modules/core/include/visp3/core/vpHomogeneousMatrix.h index 78fe4321c3..6e4291f992 100644 --- a/modules/core/include/visp3/core/vpHomogeneousMatrix.h +++ b/modules/core/include/visp3/core/vpHomogeneousMatrix.h @@ -212,10 +212,6 @@ class VISP_EXPORT vpHomogeneousMatrix : public vpArray2D #if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpHomogeneousMatrix(const std::initializer_list &list); #endif - /*! - Destructor. - */ - virtual ~vpHomogeneousMatrix() { } void buildFrom(const vpTranslationVector &t, const vpRotationMatrix &R); void buildFrom(const vpTranslationVector &t, const vpThetaUVector &tu); diff --git a/modules/core/include/visp3/core/vpLine.h b/modules/core/include/visp3/core/vpLine.h index 83ae2471b2..6b73e68511 100644 --- a/modules/core/include/visp3/core/vpLine.h +++ b/modules/core/include/visp3/core/vpLine.h @@ -35,9 +35,9 @@ #define vpLine_H /*! - \file vpLine.h - \brief class that defines what is a line -*/ + * \file vpLine.h + * \brief class that defines what is a line + */ #include #include @@ -45,120 +45,117 @@ #include /*! - \class vpLine - \ingroup group_core_geometry - - \brief Class that defines a 3D line in the object frame and allows forward projection - of the line in the camera frame and in the 2D image plane by perspective projection. - All the parameters must be set in meter. - - Note that a 3D line is defined from the intersection between two 3D planes. - - A 3D line has the followings parameters: - - **in the 3D object frame**: parameters are located in vpForwardProjection::oP 8-dim internal vector. They correspond - to the parameters oA1, oB1, oC1, oD1 and oA2, oB2, oC2, oD2 defining the equations of the two planes. - Each point \f$ (X, Y, Z) \f$ which belongs to the 3D line is a solution of those two - equations: - \f[ oA1*X + oB1*Y + oC1*Z + oD1 = 0 \f] - \f[ oA2*X + oB2*Y + oC2*Z + oD2 = 0 \f] - To update these line parameters you may use setWorldCoordinates(). To get theses parameters use get_oP(). - - - **in the 3D camera frame**: parameters are saved in vpTracker::cP 8-dim internal vector. They correspond - to the parameters cA1, cB1, cC1, cD1 and cA2, cB2, cC2, cD2 defining the equations of the two planes. - Each point \f$ (X, Y, Z) \f$ which belongs to the 3D line is a solution of those two - equations: - \f[ cA1*X + cB1*Y + cC1*Z + cD1 = 0 \f] - \f[ cA2*X + cB2*Y + cC2*Z + cD2 = 0 \f] - It is easily possible to compute these parameters thanks to the corresponding 3D parameters oP in the - object frame. But you have to note that four constraints are added in the planes equations. - \f[ cD1 = 0 \f] - \f[ cD2 > 0 \f] - \f[ cA1*cA2 + cB1*cB2 + cC1*cC2 = 0 \f] - \f[ || cA2 || = 1 \f] - To compute these parameters you may use changeFrame(). To get these parameters use get_cP(). - - - **in the 2D image plane**: parameters are saved in vpTracker::p 2-dim vector. They correspond - to the parameters (\f$\rho\f$, \f$\theta\f$). These - 2D parameters are obtained from the perspective projection of the 3D line parameters expressed - in the camera frame. They are defined thanks to the 2D equation of a line. - \f[ x \; cos(\theta) + y \; sin(\theta) -\rho = 0 \f] Here \f$ x - \f$ and \f$ y \f$ are the coordinates of a point belonging to the - line in the image plane while \f$ \rho \f$ and \f$ \theta \f$ are - the parameters used to define the line. The value of \f$ \theta - \f$ is between \f$ -\pi/2 \f$ and \f$ \pi/2 \f$ and the value of - \f$ \rho \f$ can be positive or negative. The conventions used to - choose the sign of \f$ \rho \f$ and the value of \f$ \theta \f$ - are illustrated by the following image. - \image html vpFeatureLine.gif - \image latex vpFeatureLine.ps width=10cm - The line parameters corresponding to the image frame are located - in the vpTracker::p public attribute, where \e p is a vector defined - as: \f[ p = \left[\begin{array}{c} \rho \\ \theta \end{array}\right] \f] - To compute these parameters use projection(). To get the corresponding values use get_p(). -*/ + * \class vpLine + * \ingroup group_core_geometry + * + * \brief Class that defines a 3D line in the object frame and allows forward projection + * of the line in the camera frame and in the 2D image plane by perspective projection. + * All the parameters must be set in meter. + * + * Note that a 3D line is defined from the intersection between two 3D planes. + * + * A 3D line has the followings parameters: + * - **in the 3D object frame**: parameters are located in vpForwardProjection::oP 8-dim internal vector. They correspond + * to the parameters oA1, oB1, oC1, oD1 and oA2, oB2, oC2, oD2 defining the equations of the two planes. + * Each point \f$ (X, Y, Z) \f$ which belongs to the 3D line is a solution of those two + * equations: + * \f[ oA1*X + oB1*Y + oC1*Z + oD1 = 0 \f] + * \f[ oA2*X + oB2*Y + oC2*Z + oD2 = 0 \f] + * To update these line parameters you may use setWorldCoordinates(). To get theses parameters use get_oP(). + * + * - **in the 3D camera frame**: parameters are saved in vpTracker::cP 8-dim internal vector. They correspond + * to the parameters cA1, cB1, cC1, cD1 and cA2, cB2, cC2, cD2 defining the equations of the two planes. + * Each point \f$ (X, Y, Z) \f$ which belongs to the 3D line is a solution of those two + * equations: + * \f[ cA1*X + cB1*Y + cC1*Z + cD1 = 0 \f] + * \f[ cA2*X + cB2*Y + cC2*Z + cD2 = 0 \f] + * It is easily possible to compute these parameters thanks to the corresponding 3D parameters oP in the + * object frame. But you have to note that four constraints are added in the planes equations. + * \f[ cD1 = 0 \f] + * \f[ cD2 > 0 \f] + * \f[ cA1*cA2 + cB1*cB2 + cC1*cC2 = 0 \f] + * \f[ || cA2 || = 1 \f] + * To compute these parameters you may use changeFrame(). To get these parameters use get_cP(). + * + * - **in the 2D image plane**: parameters are saved in vpTracker::p 2-dim vector. They correspond + * to the parameters (\f$\rho\f$, \f$\theta\f$). These + * 2D parameters are obtained from the perspective projection of the 3D line parameters expressed + * in the camera frame. They are defined thanks to the 2D equation of a line. + * \f[ x \; cos(\theta) + y \; sin(\theta) -\rho = 0 \f] Here \f$ x + * \f$ and \f$ y \f$ are the coordinates of a point belonging to the + * line in the image plane while \f$ \rho \f$ and \f$ \theta \f$ are + * the parameters used to define the line. The value of \f$ \theta + * \f$ is between \f$ -\pi/2 \f$ and \f$ \pi/2 \f$ and the value of + * \f$ \rho \f$ can be positive or negative. The conventions used to + * choose the sign of \f$ \rho \f$ and the value of \f$ \theta \f$ + * are illustrated by the following image. + * \image html vpFeatureLine.gif + * \image latex vpFeatureLine.ps width=10cm + * + * The line parameters corresponding to the image frame are located + * in the vpTracker::p public attribute, where \e p is a vector defined + * as: \f[ p = \left[\begin{array}{c} \rho \\ \theta \end{array}\right] \f] + * To compute these parameters use projection(). To get the corresponding values use get_p(). + */ class VISP_EXPORT vpLine : public vpForwardProjection { public: vpLine(); - //! Destructor - virtual ~vpLine() { ; } - void changeFrame(const vpHomogeneousMatrix &cMo, vpColVector &cP) const; - void changeFrame(const vpHomogeneousMatrix &cMo); + void changeFrame(const vpHomogeneousMatrix &cMo, vpColVector &cP) const override; + void changeFrame(const vpHomogeneousMatrix &cMo) override; void display(const vpImage &I, const vpCameraParameters &cam, const vpColor &color = vpColor::green, - unsigned int thickness = 1); + unsigned int thickness = 1) override; void display(const vpImage &I, const vpCameraParameters &cam, const vpColor &color = vpColor::green, unsigned int thickness = 1); void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &color = vpColor::green, unsigned int thickness = 1); + const vpColor &color = vpColor::green, unsigned int thickness = 1) override; void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &color = vpColor::green, unsigned int thickness = 1); - vpLine *duplicate() const; + vpLine *duplicate() const override; /*! - Gets the \f$ \rho \f$ value corresponding to one of the - two parameters used to define the line parametrization in the - image plane. - - \return Returns the current value of \f$ \rho \f$. - - \sa getTheta() - */ + * Gets the \f$ \rho \f$ value corresponding to one of the + * two parameters used to define the line parametrization in the + * image plane. + * + * \return Returns the current value of \f$ \rho \f$. + * + * \sa getTheta() + */ double getRho() const { return p[0]; } /*! - - Gets the \f$ \theta \f$ angle value corresponding to one of the - two parameters used to define the line parametrization in the - image plane. - - \return Returns the current value of \f$ \theta \f$. - - \sa getRho() - */ + * Gets the \f$ \theta \f$ angle value corresponding to one of the + * two parameters used to define the line parametrization in the + * image plane. + * + * \return Returns the current value of \f$ \theta \f$. + * + * \sa getRho() + */ double getTheta() const { return p[1]; } /*! - - Sets the \f$ \rho \f$ parameter used to define the line in the - image plane. - - \param rho : The desired value for \f$ \rho \f$. - - \sa setTheta() - */ + * Sets the \f$ \rho \f$ parameter used to define the line in the + * image plane. + * + * \param rho : The desired value for \f$ \rho \f$. + * + * \sa setTheta() + */ void setRho(double rho) { p[0] = rho; } /*! - Sets the \f$ \theta \f$ angle value used to define the line in the - image plane. - - \param theta : The desired value for \f$ \theta \f$ angle. - - \sa setRho() - */ + * Sets the \f$ \theta \f$ angle value used to define the line in the + * image plane. + * + * \param theta : The desired value for \f$ \theta \f$ angle. + * + * \sa setRho() + */ void setTheta(double theta) { p[1] = theta; } void setWorldCoordinates(const double &oA1, const double &oB1, const double &oC1, const double &oD1, @@ -166,19 +163,13 @@ class VISP_EXPORT vpLine : public vpForwardProjection void setWorldCoordinates(const vpColVector &oP1, const vpColVector &oP2); - void setWorldCoordinates(const vpColVector &oP); + void setWorldCoordinates(const vpColVector &oP) override; - void projection(); - void projection(const vpColVector &cP, vpColVector &p) const; + void projection() override; + void projection(const vpColVector &cP, vpColVector &p) const override; protected: - void init(); + void init() override; }; #endif - -/* - * Local variables: - * c-basic-offset: 2 - * End: - */ diff --git a/modules/core/include/visp3/core/vpLinearKalmanFilterInstantiation.h b/modules/core/include/visp3/core/vpLinearKalmanFilterInstantiation.h index 54e39faa6c..d6d7b207ac 100644 --- a/modules/core/include/visp3/core/vpLinearKalmanFilterInstantiation.h +++ b/modules/core/include/visp3/core/vpLinearKalmanFilterInstantiation.h @@ -55,11 +55,12 @@ class VISP_EXPORT vpLinearKalmanFilterInstantiation : public vpKalmanFilter /*! Selector used to set the Kalman filter state model. */ - typedef enum { - /*! Consider the state as a constant velocity model with white - noise. Measures available are the successive positions of the - target. To know more about this state model, see - initStateConstVel_MeasurePos(). */ + typedef enum + { +/*! Consider the state as a constant velocity model with white + noise. Measures available are the successive positions of the + target. To know more about this state model, see + initStateConstVel_MeasurePos(). */ stateConstVel_MeasurePos, /*! Consider the state as a constant velocity model with colored noise measurements as acceleration terms. Measured available are the @@ -81,10 +82,8 @@ class VISP_EXPORT vpLinearKalmanFilterInstantiation : public vpKalmanFilter By default the state model is unknown and set to vpLinearKalmanFilterInstantiation::unknown. */ - vpLinearKalmanFilterInstantiation() : model(unknown){}; + vpLinearKalmanFilterInstantiation() : model(unknown) { }; - /*! Destructor that does nothng. */ - virtual ~vpLinearKalmanFilterInstantiation(){}; /*! Return the current state model. */ diff --git a/modules/core/include/visp3/core/vpMath.h b/modules/core/include/visp3/core/vpMath.h index f082ab5d33..237b2decfb 100644 --- a/modules/core/include/visp3/core/vpMath.h +++ b/modules/core/include/visp3/core/vpMath.h @@ -218,14 +218,14 @@ class VISP_EXPORT vpMath static double lineFitting(const std::vector &imPts, double &a, double &b, double &c); - template static inline _Tp saturate(unsigned char v) { return _Tp(v); } - template static inline _Tp saturate(char v) { return _Tp(v); } - template static inline _Tp saturate(unsigned short v) { return _Tp(v); } - template static inline _Tp saturate(short v) { return _Tp(v); } - template static inline _Tp saturate(unsigned v) { return _Tp(v); } - template static inline _Tp saturate(int v) { return _Tp(v); } - template static inline _Tp saturate(float v) { return _Tp(v); } - template static inline _Tp saturate(double v) { return _Tp(v); } + template static inline Tp saturate(unsigned char v) { return Tp(v); } + template static inline Tp saturate(char v) { return Tp(v); } + template static inline Tp saturate(unsigned short v) { return Tp(v); } + template static inline Tp saturate(short v) { return Tp(v); } + template static inline Tp saturate(unsigned v) { return Tp(v); } + template static inline Tp saturate(int v) { return Tp(v); } + template static inline Tp saturate(float v) { return Tp(v); } + template static inline Tp saturate(double v) { return Tp(v); } static double getMean(const std::vector &v); static double getMedian(const std::vector &v); @@ -323,13 +323,13 @@ long double vpMath::comb(unsigned int n, unsigned int p) int vpMath::round(double x) { #if defined(VISP_HAVE_FUNC_STD_ROUND) - return (int)std::round(x); + return static_cast(std::round(x)); #elif defined(VISP_HAVE_FUNC_ROUND) //:: to design the global namespace and avoid to call recursively // vpMath::round - return (int)::round(x); + return static_cast(::round(x)); #else - return (x > 0.0) ? ((int)floor(x + 0.5)) : ((int)ceil(x - 0.5)); + return (x > 0.0) ? (static_cast(floor(x + 0.5))) : (static_cast(ceil(x - 0.5))); #endif } @@ -408,26 +408,29 @@ template <> inline unsigned char vpMath::saturate(char v) // On little endian arch, CHAR_MIN=-127 and CHAR_MAX=128 leading to // (int)(char -127) = -127. if (std::numeric_limits::is_signed) - return (unsigned char)(((std::max))((int)v, 0)); + return static_cast(std::max(static_cast(v), 0)); else - return (unsigned char)((unsigned int)v > SCHAR_MAX ? 0 : v); + return static_cast(static_cast(v) > SCHAR_MAX ? 0 : v); } template <> inline unsigned char vpMath::saturate(unsigned short v) { - return (unsigned char)((std::min))((unsigned int)v, (unsigned int)UCHAR_MAX); + return static_cast(std::min(static_cast(v), static_cast(UCHAR_MAX))); } template <> inline unsigned char vpMath::saturate(int v) { - return (unsigned char)((unsigned int)v <= UCHAR_MAX ? v : v > 0 ? UCHAR_MAX : 0); + return static_cast(static_cast(v) <= UCHAR_MAX ? v : v > 0 ? UCHAR_MAX : 0); } -template <> inline unsigned char vpMath::saturate(short v) { return saturate((int)v); } +template <> inline unsigned char vpMath::saturate(short v) +{ + return saturate(static_cast(v)); +} template <> inline unsigned char vpMath::saturate(unsigned int v) { - return (unsigned char)((std::min))(v, (unsigned int)UCHAR_MAX); + return static_cast(std::min(v, static_cast(UCHAR_MAX))); } template <> inline unsigned char vpMath::saturate(float v) @@ -443,23 +446,29 @@ template <> inline unsigned char vpMath::saturate(double v) } // char -template <> inline char vpMath::saturate(unsigned char v) { return (char)((std::min))((int)v, SCHAR_MAX); } +template <> inline char vpMath::saturate(unsigned char v) +{ + return static_cast(std::min(static_cast(v), SCHAR_MAX)); +} template <> inline char vpMath::saturate(unsigned short v) { - return (char)((std::min))((unsigned int)v, (unsigned int)SCHAR_MAX); + return static_cast(std::min(static_cast(v), static_cast(SCHAR_MAX))); } template <> inline char vpMath::saturate(int v) { - return (char)((unsigned int)(v - SCHAR_MIN) <= (unsigned int)UCHAR_MAX ? v : v > 0 ? SCHAR_MAX : SCHAR_MIN); + return static_cast(static_cast(v - SCHAR_MIN) <= static_cast(UCHAR_MAX) ? v : v > 0 ? SCHAR_MAX : SCHAR_MIN); } -template <> inline char vpMath::saturate(short v) { return saturate((int)v); } +template <> inline char vpMath::saturate(short v) +{ + return saturate((int)v); +} template <> inline char vpMath::saturate(unsigned int v) { - return (char)((std::min))(v, (unsigned int)SCHAR_MAX); + return static_cast(std::min(v, static_cast(SCHAR_MAX))); } template <> inline char vpMath::saturate(float v) @@ -483,24 +492,24 @@ template <> inline unsigned short vpMath::saturate(char v) // On little endian arch, CHAR_MIN=-127 and CHAR_MAX=128 leading to // (int)(char -127) = -127. if (std::numeric_limits::is_signed) - return (unsigned char)(((std::max))((int)v, 0)); + return static_cast(std::max(static_cast(v), 0)); else - return (unsigned char)((unsigned int)v > SCHAR_MAX ? 0 : v); + return static_cast(static_cast(v) > SCHAR_MAX ? 0 : v); } template <> inline unsigned short vpMath::saturate(short v) { - return (unsigned short)((std::max))((int)v, 0); + return static_cast(std::max(static_cast(v), 0)); } template <> inline unsigned short vpMath::saturate(int v) { - return (unsigned short)((unsigned int)v <= (unsigned int)USHRT_MAX ? v : v > 0 ? USHRT_MAX : 0); + return static_cast(static_cast(v) <= static_cast(USHRT_MAX) ? v : v > 0 ? USHRT_MAX : 0); } template <> inline unsigned short vpMath::saturate(unsigned int v) { - return (unsigned short)((std::min))(v, (unsigned int)USHRT_MAX); + return static_cast(std::min(v, static_cast(USHRT_MAX))); } template <> inline unsigned short vpMath::saturate(float v) @@ -516,14 +525,17 @@ template <> inline unsigned short vpMath::saturate(double v) } // short -template <> inline short vpMath::saturate(unsigned short v) { return (short)((std::min))((int)v, SHRT_MAX); } +template <> inline short vpMath::saturate(unsigned short v) +{ + return static_cast(std::min(static_cast(v), SHRT_MAX)); +} template <> inline short vpMath::saturate(int v) { - return (short)((unsigned int)(v - SHRT_MIN) <= (unsigned int)USHRT_MAX ? v : v > 0 ? SHRT_MAX : SHRT_MIN); + return static_cast(static_cast(v - SHRT_MIN) <= static_cast(USHRT_MAX) ? v : v > 0 ? SHRT_MAX : SHRT_MIN); } template <> inline short vpMath::saturate(unsigned int v) { - return (short)((std::min))(v, (unsigned int)SHRT_MAX); + return static_cast(std::min(v, static_cast(SHRT_MAX))); } template <> inline short vpMath::saturate(float v) { @@ -537,15 +549,27 @@ template <> inline short vpMath::saturate(double v) } // int -template <> inline int vpMath::saturate(float v) { return vpMath::round(v); } +template <> inline int vpMath::saturate(float v) +{ + return vpMath::round(v); +} -template <> inline int vpMath::saturate(double v) { return vpMath::round(v); } +template <> inline int vpMath::saturate(double v) +{ + return vpMath::round(v); +} // unsigned int // (Comment from OpenCV) we intentionally do not clip negative numbers, to // make -1 become 0xffffffff etc. -template <> inline unsigned int vpMath::saturate(float v) { return (unsigned int)vpMath::round(v); } +template <> inline unsigned int vpMath::saturate(float v) +{ + return static_cast(vpMath::round(v)); +} -template <> inline unsigned int vpMath::saturate(double v) { return (unsigned int)vpMath::round(v); } +template <> inline unsigned int vpMath::saturate(double v) +{ + return static_cast(vpMath::round(v)); +} #endif diff --git a/modules/core/include/visp3/core/vpMatrix.h b/modules/core/include/visp3/core/vpMatrix.h index a1f28fd036..f1dac9443c 100644 --- a/modules/core/include/visp3/core/vpMatrix.h +++ b/modules/core/include/visp3/core/vpMatrix.h @@ -208,9 +208,6 @@ vpMatrix M(R); explicit vpMatrix(const std::initializer_list > &lists); #endif - //! Destructor (Memory de-allocation) - virtual ~vpMatrix() { } - /*! Removes all elements from the matrix (which are destroyed), leaving the container with a size of 0. diff --git a/modules/core/include/visp3/core/vpMatrixException.h b/modules/core/include/visp3/core/vpMatrixException.h index c38ee9a49a..edc9e136a1 100644 --- a/modules/core/include/visp3/core/vpMatrixException.h +++ b/modules/core/include/visp3/core/vpMatrixException.h @@ -54,7 +54,7 @@ class VISP_EXPORT vpMatrixException : public vpException */ enum errorCodeEnum { -//! Error returns by a constructor + //! Error returns by a constructor constructionError, //! Something is not initialized notInitializedError, diff --git a/modules/core/include/visp3/core/vpMeterPixelConversion.h b/modules/core/include/visp3/core/vpMeterPixelConversion.h index 29c3f969ed..0334b69d39 100644 --- a/modules/core/include/visp3/core/vpMeterPixelConversion.h +++ b/modules/core/include/visp3/core/vpMeterPixelConversion.h @@ -104,7 +104,7 @@ class VISP_EXPORT vpMeterPixelConversion */ inline static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v) { - switch (cam.projModel) { + switch (cam.m_projModel) { case vpCameraParameters::perspectiveProjWithoutDistortion: convertPointWithoutDistortion(cam, x, y, u, v); break; @@ -146,7 +146,7 @@ class VISP_EXPORT vpMeterPixelConversion inline static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, vpImagePoint &iP) { - switch (cam.projModel) { + switch (cam.m_projModel) { case vpCameraParameters::perspectiveProjWithoutDistortion: convertPointWithoutDistortion(cam, x, y, iP); break; @@ -172,8 +172,8 @@ class VISP_EXPORT vpMeterPixelConversion inline static void convertPointWithoutDistortion(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v) { - u = x * cam.px + cam.u0; - v = y * cam.py + cam.v0; + u = x * cam.m_px + cam.m_u0; + v = y * cam.m_py + cam.m_v0; } /*! @@ -189,8 +189,8 @@ class VISP_EXPORT vpMeterPixelConversion inline static void convertPointWithoutDistortion(const vpCameraParameters &cam, const double &x, const double &y, vpImagePoint &iP) { - iP.set_u(x * cam.px + cam.u0); - iP.set_v(y * cam.py + cam.v0); + iP.set_u(x * cam.m_px + cam.m_u0); + iP.set_v(y * cam.m_py + cam.m_v0); } /*! @@ -212,9 +212,9 @@ class VISP_EXPORT vpMeterPixelConversion inline static void convertPointWithDistortion(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v) { - double r2 = 1. + cam.kud * (x * x + y * y); - u = cam.u0 + cam.px * x * r2; - v = cam.v0 + cam.py * y * r2; + double r2 = 1. + cam.m_kud * (x * x + y * y); + u = cam.m_u0 + cam.m_px * x * r2; + v = cam.m_v0 + cam.m_py * y * r2; } /*! @@ -236,9 +236,9 @@ class VISP_EXPORT vpMeterPixelConversion inline static void convertPointWithDistortion(const vpCameraParameters &cam, const double &x, const double &y, vpImagePoint &iP) { - double r2 = 1. + cam.kud * (x * x + y * y); - iP.set_u(cam.u0 + cam.px * x * r2); - iP.set_v(cam.v0 + cam.py * y * r2); + double r2 = 1. + cam.m_kud * (x * x + y * y); + iP.set_u(cam.m_u0 + cam.m_px * x * r2); + iP.set_v(cam.m_v0 + cam.m_py * y * r2); } /*! @@ -272,7 +272,7 @@ class VISP_EXPORT vpMeterPixelConversion std::vector k = cam.getKannalaBrandtDistortionCoefficients(); double theta2 = theta * theta, theta3 = theta2 * theta, theta4 = theta2 * theta2, theta5 = theta4 * theta, - theta6 = theta3 * theta3, theta7 = theta6 * theta, theta8 = theta4 * theta4, theta9 = theta8 * theta; + theta6 = theta3 * theta3, theta7 = theta6 * theta, theta8 = theta4 * theta4, theta9 = theta8 * theta; double r_d = theta + k[0] * theta3 + k[1] * theta5 + k[2] * theta7 + k[3] * theta9; @@ -281,8 +281,8 @@ class VISP_EXPORT vpMeterPixelConversion double x_d = x * scale; double y_d = y * scale; - u = cam.px * x_d + cam.u0; - v = cam.py * y_d + cam.v0; + u = cam.m_px * x_d + cam.m_u0; + v = cam.m_py * y_d + cam.m_v0; } /*! @@ -315,7 +315,7 @@ class VISP_EXPORT vpMeterPixelConversion std::vector k = cam.getKannalaBrandtDistortionCoefficients(); double theta2 = theta * theta, theta3 = theta2 * theta, theta4 = theta2 * theta2, theta5 = theta4 * theta, - theta6 = theta3 * theta3, theta7 = theta6 * theta, theta8 = theta4 * theta4, theta9 = theta8 * theta; + theta6 = theta3 * theta3, theta7 = theta6 * theta, theta8 = theta4 * theta4, theta9 = theta8 * theta; double r_d = theta + k[0] * theta3 + k[1] * theta5 + k[2] * theta7 + k[3] * theta9; @@ -324,8 +324,8 @@ class VISP_EXPORT vpMeterPixelConversion double x_d = x * scale; double y_d = y * scale; - iP.set_u(cam.px * x_d + cam.u0); - iP.set_v(cam.py * y_d + cam.v0); + iP.set_u(cam.m_px * x_d + cam.m_u0); + iP.set_v(cam.m_py * y_d + cam.m_v0); } #endif // #ifndef DOXYGEN_SHOULD_SKIP_THIS diff --git a/modules/core/include/visp3/core/vpMomentAlpha.h b/modules/core/include/visp3/core/vpMomentAlpha.h index 6c52aeed00..56eada8a9c 100644 --- a/modules/core/include/visp3/core/vpMomentAlpha.h +++ b/modules/core/include/visp3/core/vpMomentAlpha.h @@ -42,163 +42,163 @@ #include /*! - \class vpMomentAlpha - - \ingroup group_core_moments - - \brief This class defines the orientation of the object inside the plane - parallel to the object. - - In general the value of the moment is computed in \f$ [-\pi/2 ; \pi/2] \f$ - interval by the formula \f$ \alpha = \frac{1}{2} - \mathrm{atan2}(2\mu_{11}, \mu_{20}-\mu_{02}) \f$. - - To obtain a \f$ [-\pi ; \pi] \f$ precision for non symmetric object, you - have to specify a reference information. This reference information is an - alpha computed using the previous formula in \f$ [-\pi/2 ; \pi/2] \f$. - Obtaining this precision comes from third-order centered moments and this - reference information. - - Therefore there are two modes for vpMomentAlpha and one constructor per - mode: - - Reference mode using the empty constructor vpMomentAlpha(): - The vpMomentAlpha doesn't need any additionnal information, it will compute - its values from available moments in \f$ [-\pi/2 ; \pi/2] \f$. - - Relative mode using non-empty constructor - vpMomentAlpha(std::vector&, double): The vpMomentAlpha is computed in - \f$ [-\pi ; \pi] \f$ from the available moments and the reference - information. By knowing the reference, it may distinguish in-plane rotations - of \f$ \alpha \f$ from rotations of \f$ \alpha + \pi \f$. - - The following code demonstrates a calculation of a reference alpha and then - uses this alpha to estimate the orientation of the same object after - performing a 180 degrees rotation. Therefore the first and second alpha should - have opposite values. - - \code -#include -#include -#include -#include -#include -#include - -//generic function for printing -void print (double i) { std::cout << i << "\t";} - -int main() -{ - vpPoint p; - std::vector vec_p; // Vector that contains the vertices of the contour polygon - p.set_x(1); p.set_y(1); // Coordinates in meters in the image plane (vertex 1) - vec_p.push_back(p); - p.set_x(2); p.set_y(2); // Coordinates in meters in the image plane (vertex 2) - vec_p.push_back(p); - p.set_x(-3); p.set_y(0); // Coordinates in meters in the image plane (vertex 3) - vec_p.push_back(p); - p.set_x(-3); p.set_y(-1); // Coordinates in meters in the image plane (vertex 4) - vec_p.push_back(p); - - //////////////////////////////REFERENCE VALUES//////////////////////////////// - vpMomentObject objRef(3); // Reference object. Must be of order 3 because we will - // need the 3rd order centered moments - - objRef.setType(vpMomentObject::DENSE_POLYGON); // Object is the inner part of a polygon - objRef.fromVector(vec_p); // Init the dense object with the polygon - - vpMomentDatabase dbRef; // Reference database - vpMomentGravityCenter gRef; // Declaration of gravity center - vpMomentCentered mcRef; // Centered moments - vpMomentAlpha alphaRef; // Declare alpha as reference - - gRef.linkTo(dbRef); // Add gravity center to database - mcRef.linkTo(dbRef); // Add centered moments - alphaRef.linkTo(dbRef); // Add alpha depending on centered moments - - dbRef.updateAll(objRef); // All of the moments must be updated, not just alpha - - gRef.compute(); // Compute the moment - mcRef.compute(); // Compute centered moments AFTER gravity center - alphaRef.compute(); // Compute alpha AFTER centered moments. - - // The order of values in the vector must be as follows: mu30 mu21 mu12 mu03 - std::vector mu3ref = {mcRef.get(3,0), mcRef.get(2,1), mcRef.get(1,2), mcRef.get(0,3)}; - - std::cout << "--- Reference object ---" << std::endl; - std::cout << "alphaRef=" << vpMath::deg(alphaRef.get()) << " deg" << std::endl << "mu3="; // print reference alpha - std::for_each (mu3ref.begin(), mu3ref.end(), print); - std::cout << std::endl; - - ////////////CURRENT VALUES (same object rotated 180deg - must be - ////////////entered in reverse order)//////////////// - vec_p.clear(); - - p.set_x(-3); p.set_y(1); // Coordinates in meters in the image plane (vertex 4) - vec_p.push_back(p); - p.set_x(-3); p.set_y(0); // Coordinates in meters in the image plane (vertex 3) - vec_p.push_back(p); - p.set_x(2); p.set_y(-2); // Coordinates in meters in the image plane (vertex 2) - vec_p.push_back(p); - p.set_x(1); p.set_y(-1); // Coordinates in meters in the image plane (vertex 1) - vec_p.push_back(p); - - vpMomentObject obj(3); // Second object. Order 3 is also required because of the Alpha - // will compare third-order centered moments to given reference. - - obj.setType(vpMomentObject::DENSE_POLYGON); // Object is the inner part of a polygon - obj.fromVector(vec_p); // Init the dense object with the polygon - - vpMomentDatabase db; // Database - vpMomentGravityCenter g; // Declaration of gravity center - vpMomentCentered mc; // mc contains centered moments - vpMomentAlpha alpha(mu3ref, alphaRef.get()); // Declare alpha as relative to a reference - - g.linkTo(db); // Add gravity center to database - mc.linkTo(db); // Add centered moments - alpha.linkTo(db); // Add alpha depending on centered moments - - db.updateAll(obj); // All of the moments must be updated - - g.compute(); // Compute the moment - mc.compute(); // Compute centered moments AFTER gravity center - alpha.compute(); // Compute alpha AFTER centered moments. - - std::cout << "--- current object ---" << std::endl; - std::cout << "alpha=" << vpMath::deg(alpha.get()) << " deg" << std::endl; - - return 0; -} - \endcode -This program outputs: -\code ---- Reference object --- -alphaRef=25.3019 deg -mu3=1.80552 0.921882 0.385828 0.122449 ---- current object --- -alpha=-25.3019 deg -\endcode - - There is also testMomentAlpha.cpp example that shows how to compute alpha in the range \f$ [-\pi ; \pi] \f$ - using arrow images as input. The code is given below: - \include testMomentAlpha.cpp - - From the first image we compute the 3rd order centered moments and the value of the reference alpha - that is than used to compute the alpha moment in the range \f$ [-\pi ; \pi] \f$. Running this example you will get: - \code -alpha expected 0 computed -0.128108 deg -alpha expected 45 computed 44.8881 deg -alpha expected 90 computed 89.8719 deg -alpha expected 135 computed 134.888 deg -alpha expected 180 computed 179.872 deg -alpha expected -135 computed -135.112 deg -alpha expected -90 computed -90.1281 deg -alpha expected -45 computed -45.1119 deg - \endcode - - Shortcuts for quickly getting those references exist in vpMomentCommon. - - This moment depends on vpMomentCentered. -*/ + * \class vpMomentAlpha + * + * \ingroup group_core_moments + * + * \brief This class defines the orientation of the object inside the plane + * parallel to the object. + * + * In general the value of the moment is computed in \f$ [-\pi/2 ; \pi/2] \f$ + * interval by the formula \f$ \alpha = \frac{1}{2} + * \mathrm{atan2}(2\mu_{11}, \mu_{20}-\mu_{02}) \f$. + * + * To obtain a \f$ [-\pi ; \pi] \f$ precision for non symmetric object, you + * have to specify a reference information. This reference information is an + * alpha computed using the previous formula in \f$ [-\pi/2 ; \pi/2] \f$. + * Obtaining this precision comes from third-order centered moments and this + * reference information. + * + * Therefore there are two modes for vpMomentAlpha and one constructor per + * mode: + * - Reference mode using the empty constructor vpMomentAlpha(): + * The vpMomentAlpha doesn't need any additional information, it will compute + * its values from available moments in \f$ [-\pi/2 ; \pi/2] \f$. + * - Relative mode using non-empty constructor + * vpMomentAlpha(std::vector&, double): The vpMomentAlpha is computed in + * \f$ [-\pi ; \pi] \f$ from the available moments and the reference + * information. By knowing the reference, it may distinguish in-plane rotations + * of \f$ \alpha \f$ from rotations of \f$ \alpha + \pi \f$. + * + * The following code demonstrates a calculation of a reference alpha and then + * uses this alpha to estimate the orientation of the same object after + * performing a 180 degrees rotation. Therefore the first and second alpha should + * have opposite values. + * + * \code + * #include + * #include + * #include + * #include + * #include + * #include + * + * //generic function for printing + * void print (double i) { std::cout << i << "\t";} + * + * int main() + * { + * vpPoint p; + * std::vector vec_p; // Vector that contains the vertices of the contour polygon + * p.set_x(1); p.set_y(1); // Coordinates in meters in the image plane (vertex 1) + * vec_p.push_back(p); + * p.set_x(2); p.set_y(2); // Coordinates in meters in the image plane (vertex 2) + * vec_p.push_back(p); + * p.set_x(-3); p.set_y(0); // Coordinates in meters in the image plane (vertex 3) + * vec_p.push_back(p); + * p.set_x(-3); p.set_y(-1); // Coordinates in meters in the image plane (vertex 4) + * vec_p.push_back(p); + * + * //////////////////////////////REFERENCE VALUES//////////////////////////////// + * vpMomentObject objRef(3); // Reference object. Must be of order 3 because we will + * // need the 3rd order centered moments + * + * objRef.setType(vpMomentObject::DENSE_POLYGON); // Object is the inner part of a polygon + * objRef.fromVector(vec_p); // Init the dense object with the polygon + * + * vpMomentDatabase dbRef; // Reference database + * vpMomentGravityCenter gRef; // Declaration of gravity center + * vpMomentCentered mcRef; // Centered moments + * vpMomentAlpha alphaRef; // Declare alpha as reference + * + * gRef.linkTo(dbRef); // Add gravity center to database + * mcRef.linkTo(dbRef); // Add centered moments + * alphaRef.linkTo(dbRef); // Add alpha depending on centered moments + * + * dbRef.updateAll(objRef); // All of the moments must be updated, not just alpha + * + * gRef.compute(); // Compute the moment + * mcRef.compute(); // Compute centered moments AFTER gravity center + * alphaRef.compute(); // Compute alpha AFTER centered moments. + * + * // The order of values in the vector must be as follows: mu30 mu21 mu12 mu03 + * std::vector mu3ref = {mcRef.get(3,0), mcRef.get(2,1), mcRef.get(1,2), mcRef.get(0,3)}; + * + * std::cout << "--- Reference object ---" << std::endl; + * std::cout << "alphaRef=" << vpMath::deg(alphaRef.get()) << " deg" << std::endl << "mu3="; // print reference alpha + * std::for_each (mu3ref.begin(), mu3ref.end(), print); + * std::cout << std::endl; + * + * ////////////CURRENT VALUES (same object rotated 180deg - must be + * ////////////entered in reverse order)//////////////// + * vec_p.clear(); + * + * p.set_x(-3); p.set_y(1); // Coordinates in meters in the image plane (vertex 4) + * vec_p.push_back(p); + * p.set_x(-3); p.set_y(0); // Coordinates in meters in the image plane (vertex 3) + * vec_p.push_back(p); + * p.set_x(2); p.set_y(-2); // Coordinates in meters in the image plane (vertex 2) + * vec_p.push_back(p); + * p.set_x(1); p.set_y(-1); // Coordinates in meters in the image plane (vertex 1) + * vec_p.push_back(p); + * + * vpMomentObject obj(3); // Second object. Order 3 is also required because of the Alpha + * // will compare third-order centered moments to given reference. + * + * obj.setType(vpMomentObject::DENSE_POLYGON); // Object is the inner part of a polygon + * obj.fromVector(vec_p); // Init the dense object with the polygon + * + * vpMomentDatabase db; // Database + * vpMomentGravityCenter g; // Declaration of gravity center + * vpMomentCentered mc; // mc contains centered moments + * vpMomentAlpha alpha(mu3ref, alphaRef.get()); // Declare alpha as relative to a reference + * + * g.linkTo(db); // Add gravity center to database + * mc.linkTo(db); // Add centered moments + * alpha.linkTo(db); // Add alpha depending on centered moments + * + * db.updateAll(obj); // All of the moments must be updated + * + * g.compute(); // Compute the moment + * mc.compute(); // Compute centered moments AFTER gravity center + * alpha.compute(); // Compute alpha AFTER centered moments. + * + * std::cout << "--- current object ---" << std::endl; + * std::cout << "alpha=" << vpMath::deg(alpha.get()) << " deg" << std::endl; + * + * return 0; + * } + * \endcode + * This program outputs: + * \code + * --- Reference object --- + * alphaRef=25.3019 deg + * mu3=1.80552 0.921882 0.385828 0.122449 + * --- current object --- + * alpha=-25.3019 deg + * \endcode + * + * There is also testMomentAlpha.cpp example that shows how to compute alpha in the range \f$ [-\pi ; \pi] \f$ + * using arrow images as input. The code is given below: + * \include testMomentAlpha.cpp + * + * From the first image we compute the 3rd order centered moments and the value of the reference alpha + * that is than used to compute the alpha moment in the range \f$ [-\pi ; \pi] \f$. Running this example you will get: + * \code + * alpha expected 0 computed -0.128108 deg + * alpha expected 45 computed 44.8881 deg + * alpha expected 90 computed 89.8719 deg + * alpha expected 135 computed 134.888 deg + * alpha expected 180 computed 179.872 deg + * alpha expected -135 computed -135.112 deg + * alpha expected -90 computed -90.1281 deg + * alpha expected -45 computed -45.1119 deg + * \endcode + * + * Shortcuts for quickly getting those references exist in vpMomentCommon. + * + * This moment depends on vpMomentCentered. + */ class VISP_EXPORT vpMomentAlpha : public vpMoment { private: @@ -211,21 +211,20 @@ class VISP_EXPORT vpMomentAlpha : public vpMoment public: vpMomentAlpha(); vpMomentAlpha(const std::vector &mu3_ref, double alpha_ref, double threshold = 1e-6); - virtual ~vpMomentAlpha(){}; void compute(); /*! - Retrieve the orientation of the object as a single double value. + * Retrieve the orientation of the object as a single double value. */ double get() const { return values[0]; } /*! - Moment name. + * Moment name. */ const char *name() const { return "vpMomentAlpha"; } /*! - Returns true if the alpha moment was constructed as a reference with values in \f$ [-\pi/2 ; \pi/2] \f$, false - otherwise. + * Returns true if the alpha moment was constructed as a reference with values in \f$ [-\pi/2 ; \pi/2] \f$, false + * otherwise. */ inline bool is_ref() const { @@ -236,8 +235,8 @@ class VISP_EXPORT vpMomentAlpha : public vpMoment } /*! - Returns true if the alpha moment is computed on a symmetric object along its two axis. - Symmetry is computed using 3rd order centered moments \f$\mu_{30},\mu_{21},\mu_{12},\mu_{03}\f$. + * Returns true if the alpha moment is computed on a symmetric object along its two axis. + * Symmetry is computed using 3rd order centered moments \f$\mu_{30},\mu_{21},\mu_{12},\mu_{03}\f$. */ inline bool is_symmetric() const { diff --git a/modules/core/include/visp3/core/vpMomentArea.h b/modules/core/include/visp3/core/vpMomentArea.h index d9467c12e4..9a05d6f8b7 100644 --- a/modules/core/include/visp3/core/vpMomentArea.h +++ b/modules/core/include/visp3/core/vpMomentArea.h @@ -39,26 +39,24 @@ class vpMomentObject; class vpMomentCentered; // Required for discrete case of vpMomentObject /*! - \class vpMomentArea - - \ingroup group_core_moments - - \brief Class handling the surface moment. - - For a dense planar object, the area corresponds to the zero-order moment: - \f[ a = m_{00} = \mu_{00} \f] - - When considering a discrete set of points, the moment \f$ m_{00} \f$ simply - corresponds to the number of points. Since this is of no use in a servoing - scheme, this class uses in this case \f$ a = \mu_{20} + \mu_{02} \f$, which is - invariant to planar translation and rotation. - -*/ + * \class vpMomentArea + * + * \ingroup group_core_moments + * + * \brief Class handling the surface moment. + * + * For a dense planar object, the area corresponds to the zero-order moment: + * \f[ a = m_{00} = \mu_{00} \f] + * + * When considering a discrete set of points, the moment \f$ m_{00} \f$ simply + * corresponds to the number of points. Since this is of no use in a servoing + * scheme, this class uses in this case \f$ a = \mu_{20} + \mu_{02} \f$, which is + * invariant to planar translation and rotation. + */ class VISP_EXPORT vpMomentArea : public vpMoment { public: vpMomentArea(); - virtual ~vpMomentArea(){}; /** @name Inherited functionalities from vpMomentArea */ //@{ diff --git a/modules/core/include/visp3/core/vpMomentAreaNormalized.h b/modules/core/include/visp3/core/vpMomentAreaNormalized.h index ab512fd9d7..342bbf6c83 100644 --- a/modules/core/include/visp3/core/vpMomentAreaNormalized.h +++ b/modules/core/include/visp3/core/vpMomentAreaNormalized.h @@ -31,9 +31,9 @@ * 2D normalized surface moment descriptor (usually described as An) */ /*! - \file vpMomentAreaNormalized.h - \brief 2D normalized surface moment descriptor (usually described as An). -*/ + * \file vpMomentAreaNormalized.h + * \brief 2D normalized surface moment descriptor (usually described as An). + */ #ifndef _vpMomentAreaNormalized_h_ #define _vpMomentAreaNormalized_h_ @@ -43,93 +43,92 @@ class vpMomentObject; class vpMomentCentered; /*! - \class vpMomentAreaNormalized - - \ingroup group_core_moments - - \brief Class handling the normalized surface moment that is invariant in -scale and used to estimate depth. - - This moment depends on vpMomentCentered. - - The idea behind vpMomentAreaNormalized is described in \cite Tahri05z. - - During a visual servoing process, a vpMomentAreaNormalized will converge -towards the desired depth when the current surface will converge to the -destination surface. It is defined as follows: \f$ a_n=Z^* -\sqrt{\frac{a^*}{a}} \f$ where \e a is the current surface and \e a* the -destination surface. Consequently, the vpMomentAreaNormalized needs to have -information about the desired depth \e Z* and the desired surface \e a*. - - \warning About the order of the object. - The surface (refered to as \e a in the above paragraph) depends of the -nature of the object. - - In case of a continuous object (when vpMomentObject::getType() is -vpMomentObject::DENSE_FULL_OBJECT or vpMomentObject::DENSE_POLYGON) -\f$a=m_{00}\f$. - - In case of a discrete object (when vpMomentObject::getType() is -vpMomentObject::DISCRETE) \f$a=\mu_{20}+\mu_{02}\f$. - - Therefore, a vpMomentObject has to be of minimum order 2 in order to compute -a vpMomentAreaNormalized moment in the discrete case and of minimum order 0 in -continous cases. - - This example shows a computation in the discrete case. - \code -#include -#include -#include -#include -#include -#include - -int main() -{ - vpPoint p; - std::vector vec_p; // vector that contains object points - - p.set_x(1); p.set_y(1); // coordinates in meters in the image plane of point 1 - vec_p.push_back(p); - p.set_x(2); p.set_y(2); // coordinates in meters in the image plane of point 2 - vec_p.push_back(p); - - //////////////////////////////REFERENCE VALUES//////////////////////////////// - vpMomentObject obj(2); // Object where all the moment defined with - // i+j <= 2 will be computed below. Order is - // 2 because in discrete mode, the surface - // computation is a=mu02+mu02 - - - obj.setType(vpMomentObject::DISCRETE); // Discrete mode for object - obj.fromVector(vec_p); // initialize the object with the points coordinates - - vpMomentDatabase db; //reference database - vpMomentGravityCenter g; // declaration of gravity center - vpMomentCentered mc; // centered moments - vpMomentAreaNormalized an(2,1); //declare normalized surface with - //destination depth of 1 meter and - //destination surface of 2 m2 - - g.linkTo(db); //add gravity center to database - mc.linkTo(db); //add centered moments - an.linkTo(db); //add alpha depending on centered moments - - db.updateAll(obj); // All of the moments must be updated, not just an - - g.compute(); // compute the moment - mc.compute(); //compute centered moments AFTER gravity center - an.compute(); //compute alpha AFTER centered moments. - - std::cout << an << std::endl; - - return 0; -} - \endcode - This code produces the following output: - \code -An:1.41421 - \endcode -*/ + * \class vpMomentAreaNormalized + * + * \ingroup group_core_moments + * + * \brief Class handling the normalized surface moment that is invariant in + * scale and used to estimate depth. + * + * This moment depends on vpMomentCentered. + * + * The idea behind vpMomentAreaNormalized is described in \cite Tahri05z. + * + * During a visual servoing process, a vpMomentAreaNormalized will converge + * towards the desired depth when the current surface will converge to the + * destination surface. It is defined as follows: \f$ a_n=Z^* + * \sqrt{\frac{a^*}{a}} \f$ where \e a is the current surface and \e a* the + * destination surface. Consequently, the vpMomentAreaNormalized needs to have + * information about the desired depth \e Z* and the desired surface \e a*. + * + * \warning About the order of the object. + * The surface (referred to as \e a in the above paragraph) depends of the + * nature of the object. + * - In case of a continuous object (when vpMomentObject::getType() is + * vpMomentObject::DENSE_FULL_OBJECT or vpMomentObject::DENSE_POLYGON) + * \f$a=m_{00}\f$. + * - In case of a discrete object (when vpMomentObject::getType() is + * vpMomentObject::DISCRETE) \f$a=\mu_{20}+\mu_{02}\f$. + * + * Therefore, a vpMomentObject has to be of minimum order 2 in order to compute + * a vpMomentAreaNormalized moment in the discrete case and of minimum order 0 in + * continuous cases. + * + * This example shows a computation in the discrete case. + * \code + * #include + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * vpPoint p; + * std::vector vec_p; // vector that contains object points + * + * p.set_x(1); p.set_y(1); // coordinates in meters in the image plane of point 1 + * vec_p.push_back(p); + * p.set_x(2); p.set_y(2); // coordinates in meters in the image plane of point 2 + * vec_p.push_back(p); + * + * //////////////////////////////REFERENCE VALUES//////////////////////////////// + * vpMomentObject obj(2); // Object where all the moment defined with + * // i+j <= 2 will be computed below. Order is + * // 2 because in discrete mode, the surface + * // computation is a=mu02+mu02 + * + * obj.setType(vpMomentObject::DISCRETE); // Discrete mode for object + * obj.fromVector(vec_p); // initialize the object with the points coordinates + * + * vpMomentDatabase db; //reference database + * vpMomentGravityCenter g; // declaration of gravity center + * vpMomentCentered mc; // centered moments + * vpMomentAreaNormalized an(2,1); //declare normalized surface with + * //destination depth of 1 meter and + * //destination surface of 2 m2 + * + * g.linkTo(db); //add gravity center to database + * mc.linkTo(db); //add centered moments + * an.linkTo(db); //add alpha depending on centered moments + * + * db.updateAll(obj); // All of the moments must be updated, not just an + * + * g.compute(); // compute the moment + * mc.compute(); //compute centered moments AFTER gravity center + * an.compute(); //compute alpha AFTER centered moments. + * + * std::cout << an << std::endl; + * + * return 0; + * } + * \endcode + * This code produces the following output: + * \code + * An:1.41421 + * \endcode + */ class VISP_EXPORT vpMomentAreaNormalized : public vpMoment { private: @@ -138,45 +137,46 @@ class VISP_EXPORT vpMomentAreaNormalized : public vpMoment public: vpMomentAreaNormalized(double a_star, double Z_star); - virtual ~vpMomentAreaNormalized() { }; void compute(); + /*! - Retrieves the desired surface \e a* as specified in the constructor. - */ + * Retrieves the desired surface \e a* as specified in the constructor. + */ double getDesiredArea() const { return desiredSurface; } + /*! - Retrieves the desired depth \e Z* as specified in the constructor. - */ + * Retrieves the desired depth \e Z* as specified in the constructor. + */ double getDesiredDepth() const { return desiredDepth; } /*! - Set the desired depth \e Z* to a new value than the one specified in the constructor. - This value has to be set before calling compute(). - */ + * Set the desired depth \e Z* to a new value than the one specified in the constructor. + * This value has to be set before calling compute(). + */ void setDesiredDepth(double Z_star) { desiredDepth = Z_star; } + /*! - Set the desired area \e a* to a new value than the one specified in the constructor. - This value has to be set before calling compute(). - */ + * Set the desired area \e a* to a new value than the one specified in the constructor. + * This value has to be set before calling compute(). + */ void setDesiredArea(double a_star) { desiredSurface = a_star; } - #if defined(VISP_BUILD_DEPRECATED_FUNCTIONS) /*! - @name Deprecated functions - */ + * @name Deprecated functions + */ //@{ /*! - \deprecated Use rather getDesiredArea() - Retrieves the desired surface \e a* as specified in the constructor. - */ + * \deprecated Use rather getDesiredArea() + * Retrieves the desired surface \e a* as specified in the constructor. + */ vp_deprecated double getDesiredSurface() const { return desiredSurface; } //@} #endif /*! - Moment name. - */ + * Moment name. + */ const char *name() const { return "vpMomentAreaNormalized"; } friend VISP_EXPORT std::ostream &operator<<(std::ostream &os, const vpMomentAreaNormalized &v); void printDependencies(std::ostream &os) const; diff --git a/modules/core/include/visp3/core/vpMomentBasic.h b/modules/core/include/visp3/core/vpMomentBasic.h index 5d8acf8dc1..7597478402 100644 --- a/modules/core/include/visp3/core/vpMomentBasic.h +++ b/modules/core/include/visp3/core/vpMomentBasic.h @@ -70,7 +70,6 @@ class VISP_EXPORT vpMomentBasic : public vpMoment { public: vpMomentBasic(); - virtual ~vpMomentBasic(){}; void compute(); const std::vector &get() const; diff --git a/modules/core/include/visp3/core/vpMomentCInvariant.h b/modules/core/include/visp3/core/vpMomentCInvariant.h index 6323fa859f..106d576f61 100644 --- a/modules/core/include/visp3/core/vpMomentCInvariant.h +++ b/modules/core/include/visp3/core/vpMomentCInvariant.h @@ -51,25 +51,25 @@ class vpMomentBasic; \ingroup group_core_moments This class defines several 2D (translation+rotation+scale) invariants for -both symmetric and non-symmetric objects. These moment-based invariants are -described in the following papers \cite Chaumette04a, \cite Tahri05z. + both symmetric and non-symmetric objects. These moment-based invariants are + described in the following papers \cite Chaumette04a, \cite Tahri05z. The descriptions for the invariants \f$C_1\f$ to \f$C_{10}\f$ can be found -in \cite Chaumette04a and for invariants -\f$P_x\f$,\f$P_y\f$,\f$S_x\f$,\f$S_y\f$ in \cite Tahri05z. + in \cite Chaumette04a and for invariants + \f$P_x\f$,\f$P_y\f$,\f$S_x\f$,\f$S_y\f$ in \cite Tahri05z. These invariants are classicaly used in visual servoing to control the -out-of-plane rotations. The C-type or P-type invariants are used for -non-symmetric objects whereas the S-type invariants are used for symmetric -objects. + out-of-plane rotations. The C-type or P-type invariants are used for + non-symmetric objects whereas the S-type invariants are used for symmetric + objects. For most cases of non-symmetric objects, (\f$C_4\f$,\f$C_6\f$) or -(\f$P_x\f$,\f$P_y\f$) couples are widely used to control x and y rotations. + (\f$P_x\f$,\f$P_y\f$) couples are widely used to control x and y rotations. For symmetric objects \f$S_x\f$ and \f$S_y\f$ are the only choice. There are 14 translation+rotation+scale invariants (10 C-type, 2 P-type and -2 S-type) that can be accessed from by vpMomentCInvariant::get or any of the -get shortcuts. + 2 S-type) that can be accessed from by vpMomentCInvariant::get or any of the + get shortcuts. The example below shows how to retrieve the \f$C_2\f$ invariant: \code @@ -140,7 +140,6 @@ class VISP_EXPORT vpMomentCInvariant : public vpMoment public: explicit vpMomentCInvariant(bool flg_sxsynormalization = false); - virtual ~vpMomentCInvariant() { }; /*! Shorcut for getting the value of \f$C_1\f$. diff --git a/modules/core/include/visp3/core/vpMomentCentered.h b/modules/core/include/visp3/core/vpMomentCentered.h index 2e0a8043a9..90b0a00f78 100644 --- a/modules/core/include/visp3/core/vpMomentCentered.h +++ b/modules/core/include/visp3/core/vpMomentCentered.h @@ -37,7 +37,7 @@ #include /*! \file vpMomentCentered.h - \brief Centered moment descriptor (also refered to as \f$\mu_{ij}\f$). + \brief Centered moment descriptor (also referred as \f$\mu_{ij}\f$). */ @@ -75,7 +75,6 @@ class VISP_EXPORT vpMomentCentered : public vpMoment { public: vpMomentCentered(); - virtual ~vpMomentCentered(){}; void compute(); double get(unsigned int i, unsigned int j) const; diff --git a/modules/core/include/visp3/core/vpMomentCommon.h b/modules/core/include/visp3/core/vpMomentCommon.h index 5fbae91c2d..33e6a19979 100644 --- a/modules/core/include/visp3/core/vpMomentCommon.h +++ b/modules/core/include/visp3/core/vpMomentCommon.h @@ -68,27 +68,27 @@ class vpMomentObject; - vpMomentAlpha - vpMomentArea - There is no need to do the linkTo operations manually nor is it necessary + There is no need to do the linkTo operations manually nor is it necessary to care about the order of moment computation. - This class carries an vpMomentCommon::updateAll() method capable of + This class carries an vpMomentCommon::updateAll() method capable of updating AND computing moments from an object (see 4-step process in vpMoment). The moments computed by this class are classical moments used in moment-based visual servoing. For more information see \cite Tahri05z. - To initialize this moment set the user needs to compute the following + To initialize this moment set the user needs to compute the following things: - the Mu3 value set: set of third-order centered moments computed for a - reference object. (\f$\mu_{ij}$ with $i+j = 3\f$ ). These values allow the - system to save the reference angular position and to perform planar - rotations of more than 180 degrees if needed. + reference object. (\f$\mu_{ij}$ with $i+j = 3\f$ ). These values allow the + system to save the reference angular position and to perform planar + rotations of more than 180 degrees if needed. - the destination depth. - the surface of the destination object in the end of the visual servoing - process. + process. - the reference alpha: angular position of the object used to obtain the - Mu3 set. + Mu3 set. - Shortcuts for each of these prerequisites are provided by this class + Shortcuts for each of these prerequisites are provided by this class except depth (methods vpMomentCommon::getMu3(), vpMomentCommon::getSurface(), vpMomentCommon::getAlpha()). @@ -128,12 +128,12 @@ class VISP_EXPORT vpMomentCommon : public vpMomentDatabase public: vpMomentCommon(double dstSurface, const std::vector &ref, double refAlpha, double dstZ = 1.0, bool flg_sxsyfromnormalized = false); - virtual ~vpMomentCommon(); + virtual ~vpMomentCommon() override; static double getAlpha(vpMomentObject &object); static std::vector getMu3(vpMomentObject &object); static double getSurface(vpMomentObject &object); - void updateAll(vpMomentObject &object); + void updateAll(vpMomentObject &object) override; }; #endif // VPCOMMONMOMENTS_H diff --git a/modules/core/include/visp3/core/vpMomentDatabase.h b/modules/core/include/visp3/core/vpMomentDatabase.h index f9b4c80902..bb66dc2525 100644 --- a/modules/core/include/visp3/core/vpMomentDatabase.h +++ b/modules/core/include/visp3/core/vpMomentDatabase.h @@ -52,19 +52,19 @@ class vpMomentObject; \ingroup group_core_moments \brief This class allows to register all vpMoments so they can access each -other according to their dependencies. + other according to their dependencies. Sometimes, a moment needs to have access to other moment's values to be -computed. For example vpMomentCentered needs additionnal information about the -gravity center vpMomentGravityCenter in order to compute the moment's value -from a vpMomentObject. This gravity center should be stored in a -vpMomentDatabase where it can be accessed. + computed. For example vpMomentCentered needs additional information about the + gravity center vpMomentGravityCenter in order to compute the moment's value + from a vpMomentObject. This gravity center should be stored in a + vpMomentDatabase where it can be accessed. All moments in a database can access each other freely at any time. They can -also verify if a moment is present in the database or not. Here is a example -of a dependency between two moments using a vpMomentDatabase: + also verify if a moment is present in the database or not. Here is a example + of a dependency between two moments using a vpMomentDatabase: -\code + \code #include #include #include @@ -127,7 +127,8 @@ class VISP_EXPORT vpMomentDatabase { private: #ifndef DOXYGEN_SHOULD_SKIP_THIS - struct vpCmpStr_t { + struct vpCmpStr_t + { bool operator()(char const *a, char const *b) const { return std::strcmp(a, b) < 0; } }; #endif @@ -135,8 +136,8 @@ class VISP_EXPORT vpMomentDatabase void add(vpMoment &moment, const char *name); public: - vpMomentDatabase() : moments() {} - virtual ~vpMomentDatabase() {} + vpMomentDatabase() : moments() { } + virtual ~vpMomentDatabase() { } /** @name Inherited functionalities from vpMomentDatabase */ //@{ diff --git a/modules/core/include/visp3/core/vpMomentGravityCenter.h b/modules/core/include/visp3/core/vpMomentGravityCenter.h index 66e212ca76..39171649ff 100644 --- a/modules/core/include/visp3/core/vpMomentGravityCenter.h +++ b/modules/core/include/visp3/core/vpMomentGravityCenter.h @@ -55,7 +55,7 @@ class vpMomentObject; These coordinates are defined as follows: \f$x_g = \frac{m_{01}}{m_{00}} \f$,\f$y_g = \frac{m_{10}}{m_{00}} \f$ -\code + \code #include #include #include @@ -95,21 +95,20 @@ int main() return 0; } -\endcode + \endcode -This example produces the following results: -\code + This example produces the following results: + \code Xg=0.0166667 Yg=-0.00833333 Xg=0.0166667, Yg=-0.00833333 -\endcode + \endcode */ class VISP_EXPORT vpMomentGravityCenter : public vpMoment { public: vpMomentGravityCenter(); - virtual ~vpMomentGravityCenter(){}; /** @name Inherited functionalities from vpMomentGravityCenter */ //@{ diff --git a/modules/core/include/visp3/core/vpMomentGravityCenterNormalized.h b/modules/core/include/visp3/core/vpMomentGravityCenterNormalized.h index a86e80af28..aeb9d02b5b 100644 --- a/modules/core/include/visp3/core/vpMomentGravityCenterNormalized.h +++ b/modules/core/include/visp3/core/vpMomentGravityCenterNormalized.h @@ -61,7 +61,6 @@ class VISP_EXPORT vpMomentGravityCenterNormalized : public vpMomentGravityCenter { public: vpMomentGravityCenterNormalized(); - virtual ~vpMomentGravityCenterNormalized(){}; void compute(); //! Moment name. const char *name() const { return "vpMomentGravityCenterNormalized"; } diff --git a/modules/core/include/visp3/core/vpMomentObject.h b/modules/core/include/visp3/core/vpMomentObject.h index 1e39cec69d..fb8e1af6ff 100644 --- a/modules/core/include/visp3/core/vpMomentObject.h +++ b/modules/core/include/visp3/core/vpMomentObject.h @@ -98,7 +98,7 @@ class vpCameraParameters; to get the basic moments that are computed and how to compute other classical moments such as the gravity center or the centered moments. -\code + \code #include #include #include diff --git a/modules/core/include/visp3/core/vpPixelMeterConversion.h b/modules/core/include/visp3/core/vpPixelMeterConversion.h index 0e546b1149..962b3f8848 100644 --- a/modules/core/include/visp3/core/vpPixelMeterConversion.h +++ b/modules/core/include/visp3/core/vpPixelMeterConversion.h @@ -101,7 +101,7 @@ class VISP_EXPORT vpPixelMeterConversion */ inline static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y) { - switch (cam.projModel) { + switch (cam.m_projModel) { case vpCameraParameters::perspectiveProjWithoutDistortion: convertPointWithoutDistortion(cam, u, v, x, y); break; @@ -143,7 +143,7 @@ class VISP_EXPORT vpPixelMeterConversion */ inline static void convertPoint(const vpCameraParameters &cam, const vpImagePoint &iP, double &x, double &y) { - switch (cam.projModel) { + switch (cam.m_projModel) { case vpCameraParameters::perspectiveProjWithoutDistortion: convertPointWithoutDistortion(cam, iP, x, y); break; @@ -172,8 +172,8 @@ class VISP_EXPORT vpPixelMeterConversion inline static void convertPointWithoutDistortion(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y) { - x = (u - cam.u0) * cam.inv_px; - y = (v - cam.v0) * cam.inv_py; + x = (u - cam.m_u0) * cam.m_inv_px; + y = (v - cam.m_v0) * cam.m_inv_py; } /*! @@ -194,8 +194,8 @@ class VISP_EXPORT vpPixelMeterConversion inline static void convertPointWithoutDistortion(const vpCameraParameters &cam, const vpImagePoint &iP, double &x, double &y) { - x = (iP.get_u() - cam.u0) * cam.inv_px; - y = (iP.get_v() - cam.v0) * cam.inv_py; + x = (iP.get_u() - cam.m_u0) * cam.m_inv_px; + y = (iP.get_v() - cam.m_v0) * cam.m_inv_py; } /*! @@ -215,9 +215,9 @@ class VISP_EXPORT vpPixelMeterConversion inline static void convertPointWithDistortion(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y) { - double r2 = 1. + cam.kdu * (vpMath::sqr((u - cam.u0) * cam.inv_px) + vpMath::sqr((v - cam.v0) * cam.inv_py)); - x = (u - cam.u0) * r2 * cam.inv_px; - y = (v - cam.v0) * r2 * cam.inv_py; + double r2 = 1. + cam.m_kdu * (vpMath::sqr((u - cam.m_u0) * cam.m_inv_px) + vpMath::sqr((v - cam.m_v0) * cam.m_inv_py)); + x = (u - cam.m_u0) * r2 * cam.m_inv_px; + y = (v - cam.m_v0) * r2 * cam.m_inv_py; } /*! @@ -239,10 +239,10 @@ class VISP_EXPORT vpPixelMeterConversion inline static void convertPointWithDistortion(const vpCameraParameters &cam, const vpImagePoint &iP, double &x, double &y) { - double r2 = 1. + cam.kdu * (vpMath::sqr((iP.get_u() - cam.u0) * cam.inv_px) + - vpMath::sqr((iP.get_v() - cam.v0) * cam.inv_py)); - x = (iP.get_u() - cam.u0) * r2 * cam.inv_px; - y = (iP.get_v() - cam.v0) * r2 * cam.inv_py; + double r2 = 1. + cam.m_kdu * (vpMath::sqr((iP.get_u() - cam.m_u0) * cam.m_inv_px) + + vpMath::sqr((iP.get_v() - cam.m_v0) * cam.m_inv_py)); + x = (iP.get_u() - cam.m_u0) * r2 * cam.m_inv_px; + y = (iP.get_v() - cam.m_v0) * r2 * cam.m_inv_py; } /*! @@ -268,7 +268,7 @@ class VISP_EXPORT vpPixelMeterConversion inline static void convertPointWithKannalaBrandtDistortion(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y) { - double x_d = (u - cam.u0) / cam.px, y_d = (v - cam.v0) / cam.py; + double x_d = (u - cam.m_u0) / cam.m_px, y_d = (v - cam.m_v0) / cam.m_py; double scale = 1.0; double r_d = sqrt(vpMath::sqr(x_d) + vpMath::sqr(y_d)); @@ -285,10 +285,10 @@ class VISP_EXPORT vpPixelMeterConversion for (int j = 0; j < 10; j++) { double theta2 = theta * theta, theta4 = theta2 * theta2, theta6 = theta4 * theta2, theta8 = theta6 * theta2; double k0_theta2 = k[0] * theta2, k1_theta4 = k[1] * theta4, k2_theta6 = k[2] * theta6, - k3_theta8 = k[3] * theta8; - /* new_theta = theta - theta_fix, theta_fix = f0(theta) / f0'(theta) */ + k3_theta8 = k[3] * theta8; + /* new_theta = theta - theta_fix, theta_fix = f0(theta) / f0'(theta) */ double theta_fix = (theta * (1 + k0_theta2 + k1_theta4 + k2_theta6 + k3_theta8) - r_d) / - (1 + 3 * k0_theta2 + 5 * k1_theta4 + 7 * k2_theta6 + 9 * k3_theta8); + (1 + 3 * k0_theta2 + 5 * k1_theta4 + 7 * k2_theta6 + 9 * k3_theta8); theta = theta - theta_fix; if (fabs(theta_fix) < EPS) break; @@ -323,7 +323,7 @@ class VISP_EXPORT vpPixelMeterConversion inline static void convertPointWithKannalaBrandtDistortion(const vpCameraParameters &cam, const vpImagePoint &iP, double &x, double &y) { - double x_d = (iP.get_u() - cam.u0) / cam.px, y_d = (iP.get_v() - cam.v0) / cam.py; + double x_d = (iP.get_u() - cam.m_u0) / cam.m_px, y_d = (iP.get_v() - cam.m_v0) / cam.m_py; double scale = 1.0; double r_d = sqrt(vpMath::sqr(x_d) + vpMath::sqr(y_d)); @@ -340,10 +340,10 @@ class VISP_EXPORT vpPixelMeterConversion for (int j = 0; j < 10; j++) { double theta2 = theta * theta, theta4 = theta2 * theta2, theta6 = theta4 * theta2, theta8 = theta6 * theta2; double k0_theta2 = k[0] * theta2, k1_theta4 = k[1] * theta4, k2_theta6 = k[2] * theta6, - k3_theta8 = k[3] * theta8; - /* new_theta = theta - theta_fix, theta_fix = f0(theta) / f0'(theta) */ + k3_theta8 = k[3] * theta8; + /* new_theta = theta - theta_fix, theta_fix = f0(theta) / f0'(theta) */ double theta_fix = (theta * (1 + k0_theta2 + k1_theta4 + k2_theta6 + k3_theta8) - r_d) / - (1 + 3 * k0_theta2 + 5 * k1_theta4 + 7 * k2_theta6 + 9 * k3_theta8); + (1 + 3 * k0_theta2 + 5 * k1_theta4 + 7 * k2_theta6 + 9 * k3_theta8); theta = theta - theta_fix; if (fabs(theta_fix) < EPS) break; diff --git a/modules/core/include/visp3/core/vpPoint.h b/modules/core/include/visp3/core/vpPoint.h index 989859132d..9710e1ecc1 100644 --- a/modules/core/include/visp3/core/vpPoint.h +++ b/modules/core/include/visp3/core/vpPoint.h @@ -82,23 +82,21 @@ class VISP_EXPORT vpPoint : public vpForwardProjection vpPoint(double oX, double oY, double oZ); explicit vpPoint(const vpColVector &oP); explicit vpPoint(const std::vector &oP); - //! Destructor. - virtual ~vpPoint() {} public: // Compute the 3D coordinates _cP (camera frame) - void changeFrame(const vpHomogeneousMatrix &cMo, vpColVector &cP) const; - void changeFrame(const vpHomogeneousMatrix &cMo); + void changeFrame(const vpHomogeneousMatrix &cMo, vpColVector &cP) const override; + void changeFrame(const vpHomogeneousMatrix &cMo) override; void display(const vpImage &I, const vpCameraParameters &cam, const vpColor &color = vpColor::green, - unsigned int thickness = 1); + unsigned int thickness = 1) override; void display(const vpImage &I, const vpCameraParameters &cam, const vpColor &color = vpColor::green, unsigned int thickness = 1); void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &color = vpColor::green, unsigned int thickness = 1); + const vpColor &color = vpColor::green, unsigned int thickness = 1) override; void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &color = vpColor::green, unsigned int thickness = 1); - vpPoint *duplicate() const; + vpPoint *duplicate() const override; // Get coordinates double get_X() const; @@ -119,17 +117,12 @@ class VISP_EXPORT vpPoint : public vpForwardProjection void getWorldCoordinates(std::vector &oP); friend VISP_EXPORT std::ostream &operator<<(std::ostream &os, const vpPoint &vpp); -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - vpPoint &operator=(const vpPoint &vpp) = default; -#else - vpPoint &operator=(const vpPoint &vpp); -#endif //! Projection onto the image plane of a point. Input: the 3D coordinates in //! the camera frame _cP, output : the 2D coordinates _p. - void projection(const vpColVector &_cP, vpColVector &_p) const; + void projection(const vpColVector &_cP, vpColVector &_p) const override; - void projection(); + void projection() override; // Set coordinates void set_X(double cX); @@ -145,12 +138,12 @@ class VISP_EXPORT vpPoint : public vpForwardProjection void set_w(double w); void setWorldCoordinates(double oX, double oY, double oZ); - void setWorldCoordinates(const vpColVector &oP); + void setWorldCoordinates(const vpColVector &oP) override; void setWorldCoordinates(const std::vector &oP); protected: //! Basic construction. - void init(); + void init() override; }; #endif diff --git a/modules/core/include/visp3/core/vpPoseVector.h b/modules/core/include/visp3/core/vpPoseVector.h index e98544a936..fb59bf63f2 100644 --- a/modules/core/include/visp3/core/vpPoseVector.h +++ b/modules/core/include/visp3/core/vpPoseVector.h @@ -201,10 +201,6 @@ class VISP_EXPORT vpPoseVector : public vpArray2D vpPoseVector(const vpTranslationVector &tv, const vpThetaUVector &tu); // constructor convert a translation and a rotation matrix into a pose vpPoseVector(const vpTranslationVector &tv, const vpRotationMatrix &R); - /*! - Destructor. - */ - virtual ~vpPoseVector() { }; vpPoseVector buildFrom(double tx, double ty, double tz, double tux, double tuy, double tuz); // convert an homogeneous matrix in a pose diff --git a/modules/core/include/visp3/core/vpQuaternionVector.h b/modules/core/include/visp3/core/vpQuaternionVector.h index 7b4d177535..93402d38b4 100644 --- a/modules/core/include/visp3/core/vpQuaternionVector.h +++ b/modules/core/include/visp3/core/vpQuaternionVector.h @@ -117,9 +117,6 @@ class VISP_EXPORT vpQuaternionVector : public vpRotationVector explicit vpQuaternionVector(const vpColVector &q); explicit vpQuaternionVector(const std::vector &q); - //! Destructor. - virtual ~vpQuaternionVector(){} - vpQuaternionVector buildFrom(const double qx, const double qy, const double qz, const double qw); vpQuaternionVector buildFrom(const vpRotationMatrix &R); vpQuaternionVector buildFrom(const vpThetaUVector &tu); @@ -127,15 +124,15 @@ class VISP_EXPORT vpQuaternionVector : public vpRotationVector vpQuaternionVector buildFrom(const std::vector &q); void set(double x, double y, double z, double w); - const double& x() const; - const double& y() const; - const double& z() const; - const double& w() const; + const double &x() const; + const double &y() const; + const double &z() const; + const double &w() const; - double& x(); - double& y(); - double& z(); - double& w(); + double &x(); + double &y(); + double &z(); + double &w(); vpQuaternionVector operator+(const vpQuaternionVector &q) const; vpQuaternionVector operator-(const vpQuaternionVector &q) const; @@ -154,11 +151,11 @@ class VISP_EXPORT vpQuaternionVector : public vpRotationVector double magnitude() const; void normalize(); - static double dot(const vpQuaternionVector& q0, const vpQuaternionVector& q1); + static double dot(const vpQuaternionVector &q0, const vpQuaternionVector &q1); - static vpQuaternionVector lerp(const vpQuaternionVector& q0, const vpQuaternionVector& q1, double t); - static vpQuaternionVector nlerp(const vpQuaternionVector& q0, const vpQuaternionVector& q1, double t); - static vpQuaternionVector slerp(const vpQuaternionVector& q0, const vpQuaternionVector& q1, double t); + static vpQuaternionVector lerp(const vpQuaternionVector &q0, const vpQuaternionVector &q1, double t); + static vpQuaternionVector nlerp(const vpQuaternionVector &q0, const vpQuaternionVector &q1, double t); + static vpQuaternionVector slerp(const vpQuaternionVector &q0, const vpQuaternionVector &q1, double t); }; #endif diff --git a/modules/core/include/visp3/core/vpRequest.h b/modules/core/include/visp3/core/vpRequest.h index c2370de1d6..41aa8c521a 100644 --- a/modules/core/include/visp3/core/vpRequest.h +++ b/modules/core/include/visp3/core/vpRequest.h @@ -49,7 +49,7 @@ \brief This the request that will transit on the network - Exemple request decoding an image on a specific form. + Example request decoding an image on a specific form. First parameter : Height of the image. Second parameter : Width of the image. Thirs parameter : Bitmap of the image (not compress). diff --git a/modules/core/include/visp3/core/vpRotationMatrix.h b/modules/core/include/visp3/core/vpRotationMatrix.h index 0862fa8cbb..09c66521ce 100644 --- a/modules/core/include/visp3/core/vpRotationMatrix.h +++ b/modules/core/include/visp3/core/vpRotationMatrix.h @@ -133,11 +133,6 @@ class VISP_EXPORT vpRotationMatrix : public vpArray2D explicit vpRotationMatrix(const std::initializer_list &list); #endif - /*! - Destructor. - */ - virtual ~vpRotationMatrix() { } - vpRotationMatrix buildFrom(const vpHomogeneousMatrix &M); vpRotationMatrix buildFrom(const vpThetaUVector &v); vpRotationMatrix buildFrom(const vpPoseVector &p); diff --git a/modules/core/include/visp3/core/vpRotationVector.h b/modules/core/include/visp3/core/vpRotationVector.h index 3ded305a11..a2a34b7506 100644 --- a/modules/core/include/visp3/core/vpRotationVector.h +++ b/modules/core/include/visp3/core/vpRotationVector.h @@ -96,21 +96,16 @@ class VISP_EXPORT vpRotationVector : public vpArray2D { public: //! Constructor that constructs a 0-size rotation vector. - vpRotationVector() : vpArray2D(), m_index(0) {} + vpRotationVector() : vpArray2D(), m_index(0) { } //! Constructor that constructs a vector of size n and initialize all values //! to zero. - explicit vpRotationVector(unsigned int n) : vpArray2D(n, 1), m_index(0) {} + explicit vpRotationVector(unsigned int n) : vpArray2D(n, 1), m_index(0) { } /*! Copy operator. */ - vpRotationVector(const vpRotationVector &v) : vpArray2D(v), m_index(0) {} - - /*! - Destructor. - */ - virtual ~vpRotationVector(){} + vpRotationVector(const vpRotationVector &v) : vpArray2D(v), m_index(0) { } /** @name Inherited functionalities from vpRotationVector */ //@{ diff --git a/modules/core/include/visp3/core/vpRowVector.h b/modules/core/include/visp3/core/vpRowVector.h index 2e17c4ea18..be20e8f889 100644 --- a/modules/core/include/visp3/core/vpRowVector.h +++ b/modules/core/include/visp3/core/vpRowVector.h @@ -111,15 +111,15 @@ class VISP_EXPORT vpRowVector : public vpArray2D { public: //! Basic constructor that creates an empty 0-size row vector. - vpRowVector() : vpArray2D() {} + vpRowVector() : vpArray2D() { } //! Construct a row vector of size n. All the elements are initialized to //! zero. - explicit vpRowVector(unsigned int n) : vpArray2D(1, n) {} + explicit vpRowVector(unsigned int n) : vpArray2D(1, n) { } //! Construct a row vector of size n. Each element is set to \e val. - vpRowVector(unsigned int n, double val) : vpArray2D(1, n, val) {} + vpRowVector(unsigned int n, double val) : vpArray2D(1, n, val) { } //! Copy constructor that allows to construct a row vector from an other //! one. - vpRowVector(const vpRowVector &v) : vpArray2D(v) {} + vpRowVector(const vpRowVector &v) : vpArray2D(v) { } vpRowVector(const vpRowVector &v, unsigned int c, unsigned int ncols); vpRowVector(const vpMatrix &M); vpRowVector(const vpMatrix &M, unsigned int i); @@ -127,12 +127,8 @@ class VISP_EXPORT vpRowVector : public vpArray2D vpRowVector(const std::vector &v); #if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpRowVector(vpRowVector &&v); - vpRowVector(const std::initializer_list &list) : vpArray2D(list) {} + vpRowVector(const std::initializer_list &list) : vpArray2D(list) { } #endif - /*! - Destructor. - */ - virtual ~vpRowVector() {} /*! Removes all elements from the vector (which are destroyed), @@ -170,7 +166,8 @@ class VISP_EXPORT vpRowVector : public vpArray2D /*! Extract a sub-row vector from a row vector. \param c : Index of the column corresponding to the first element of the - vector to extract. \param rowsize : Size of the vector to extract. + vector to extract. + \param rowsize : Size of the vector to extract. \exception vpException::fatalError If the vector to extract is not contained in the original one. @@ -310,7 +307,7 @@ class VISP_EXPORT vpRowVector : public vpArray2D \deprecated Provided only for compat with previous releases. This function does nothing. */ - vp_deprecated void init() {} + vp_deprecated void init() { } /*! \deprecated You should rather use stack(const vpRowVector &) */ diff --git a/modules/core/include/visp3/core/vpRxyzVector.h b/modules/core/include/visp3/core/vpRxyzVector.h index da541877ff..aa5d3c086e 100644 --- a/modules/core/include/visp3/core/vpRxyzVector.h +++ b/modules/core/include/visp3/core/vpRxyzVector.h @@ -189,9 +189,6 @@ class VISP_EXPORT vpRxyzVector : public vpRotationVector explicit vpRxyzVector(const vpColVector &rxyz); explicit vpRxyzVector(const std::vector &rxyz); - //! Destructor. - virtual ~vpRxyzVector(){} - // convert a rotation matrix into Rxyz vector vpRxyzVector buildFrom(const vpRotationMatrix &R); diff --git a/modules/core/include/visp3/core/vpRzyxVector.h b/modules/core/include/visp3/core/vpRzyxVector.h index 0782f10c3e..da55f25b90 100644 --- a/modules/core/include/visp3/core/vpRzyxVector.h +++ b/modules/core/include/visp3/core/vpRzyxVector.h @@ -175,7 +175,6 @@ int main() \endcode */ - class VISP_EXPORT vpRzyxVector : public vpRotationVector { public: @@ -191,9 +190,6 @@ class VISP_EXPORT vpRzyxVector : public vpRotationVector explicit vpRzyxVector(const vpColVector &rzyx); explicit vpRzyxVector(const std::vector &rzyx); - //! Destructor. - virtual ~vpRzyxVector(){} - // convert a rotation matrix into Rzyx vector vpRzyxVector buildFrom(const vpRotationMatrix &R); diff --git a/modules/core/include/visp3/core/vpRzyzVector.h b/modules/core/include/visp3/core/vpRzyzVector.h index 897522f861..bfc5c3ae09 100644 --- a/modules/core/include/visp3/core/vpRzyzVector.h +++ b/modules/core/include/visp3/core/vpRzyzVector.h @@ -189,9 +189,6 @@ class VISP_EXPORT vpRzyzVector : public vpRotationVector explicit vpRzyzVector(const vpColVector &rzyz); explicit vpRzyzVector(const std::vector &rzyz); - //! Destructor. - virtual ~vpRzyzVector(){} - // convert a rotation matrix into Rzyz vector vpRzyzVector buildFrom(const vpRotationMatrix &R); diff --git a/modules/core/include/visp3/core/vpServer.h b/modules/core/include/visp3/core/vpServer.h index d5eac7597c..83054b6696 100644 --- a/modules/core/include/visp3/core/vpServer.h +++ b/modules/core/include/visp3/core/vpServer.h @@ -52,8 +52,8 @@ TCP provides reliable, ordered delivery of a stream of bytes from a program on one computer to another program on another computer. - Exemple of server's code, receiving and sending basic message. - It corresponds to the client used in the first exemple of vpClient class' + Example of server's code, receiving and sending basic message. + It corresponds to the client used in the first example of vpClient class' documentation. \code @@ -93,8 +93,8 @@ int main(int argc,const char** argv) } \endcode - Exemple of server's code, receiving a vpImage on request form. - It correspond to the client used in the second exemple of vpClient class' + Example of server's code, receiving a vpImage on request form. + It correspond to the client used in the second example of vpClient class' documentation. \code @@ -174,7 +174,7 @@ class VISP_EXPORT vpServer : public vpNetwork explicit vpServer(const int &port); vpServer(const std::string &adress_serv, const int &port_serv); - virtual ~vpServer(); + virtual ~vpServer() override; bool checkForConnections(); diff --git a/modules/core/include/visp3/core/vpSphere.h b/modules/core/include/visp3/core/vpSphere.h index 4c818f7c5e..1992d8695c 100644 --- a/modules/core/include/visp3/core/vpSphere.h +++ b/modules/core/include/visp3/core/vpSphere.h @@ -32,9 +32,9 @@ */ /*! - \file vpSphere.h - \brief forward projection of a sphere -*/ + * \file vpSphere.h + * \brief forward projection of a sphere + */ #ifndef vpSphere_hh #define vpSphere_hh @@ -46,55 +46,54 @@ #include #include /*! - \class vpSphere - \ingroup group_core_geometry - \brief Class that defines a 3D sphere in the object frame and allows forward projection of a 3D sphere in the - camera frame and in the 2D image plane by perspective projection. - All the parameters must be set in meter. - - A sphere has the followings parameters: - - **in the object frame**: the 3D coordinates oX, oY, oZ of the center and radius R. These - parameters registered in vpForwardProjection::oP internal 4-dim vector are set using the constructors vpSphere(double - oX, double oY, double oZ, double R), vpSphere(const vpColVector &oP) or the functions setWorldCoordinates(double oX, - double oY, double oZ, double R) and setWorldCoordinates(const vpColVector &oP). To get theses parameters use get_oP(). - - - **in the camera frame**: the coordinates cX, cY, cZ of the center and radius R. These - parameters registered in vpTracker::cP internal 4-dim vector are computed using - changeFrame(const vpHomogeneousMatrix &cMo, vpColVector &cP) const or changeFrame(const vpHomogeneousMatrix &cMo). - These parameters could be retrieved using getX(), getY(), getZ() and getR(). - To get theses parameters use get_cP(). - - - **in the image plane**: here we consider the parameters of the ellipse corresponding - to the perspective projection of the 3D sphere. The parameters are the ellipse centroid (x, y) - and n20, n11, n02 which are the second order centered moments of - the ellipse normalized by its area (i.e., such that \f$n_{ij} = \mu_{ij}/a\f$ where - \f$\mu_{ij}\f$ are the centered moments and a the area). - These parameters are registered in vpTracker::p internal 5-dim vector and computed using projection() and - projection(const vpColVector &cP, vpColVector &p) const. They could be retrieved using get_x(), get_y(), get_n20(), - get_n11() and get_n02(). They correspond to 2D normalized sphere parameters with values expressed in meters. - To get theses parameters use get_p(). -*/ + * \class vpSphere + * \ingroup group_core_geometry + * \brief Class that defines a 3D sphere in the object frame and allows forward projection of a 3D sphere in the + * camera frame and in the 2D image plane by perspective projection. + * All the parameters must be set in meter. + * + * A sphere has the followings parameters: + * - **in the object frame**: the 3D coordinates oX, oY, oZ of the center and radius R. These + *. parameters registered in vpForwardProjection::oP internal 4-dim vector are set using the constructors vpSphere(double + *. oX, double oY, double oZ, double R), vpSphere(const vpColVector &oP) or the functions setWorldCoordinates(double oX, + *. double oY, double oZ, double R) and setWorldCoordinates(const vpColVector &oP). To get theses parameters use get_oP(). + * + * - **in the camera frame**: the coordinates cX, cY, cZ of the center and radius R. These + *. parameters registered in vpTracker::cP internal 4-dim vector are computed using + *. changeFrame(const vpHomogeneousMatrix &cMo, vpColVector &cP) const or changeFrame(const vpHomogeneousMatrix &cMo). + *. These parameters could be retrieved using getX(), getY(), getZ() and getR(). + *. To get theses parameters use get_cP(). + * + * - **in the image plane**: here we consider the parameters of the ellipse corresponding + *. to the perspective projection of the 3D sphere. The parameters are the ellipse centroid (x, y) + *. and n20, n11, n02 which are the second order centered moments of + *. the ellipse normalized by its area (i.e., such that \f$n_{ij} = \mu_{ij}/a\f$ where + *. \f$\mu_{ij}\f$ are the centered moments and a the area). + *. These parameters are registered in vpTracker::p internal 5-dim vector and computed using projection() and + *. projection(const vpColVector &cP, vpColVector &p) const. They could be retrieved using get_x(), get_y(), get_n20(), + *. get_n11() and get_n02(). They correspond to 2D normalized sphere parameters with values expressed in meters. + *. To get theses parameters use get_p(). + */ class VISP_EXPORT vpSphere : public vpForwardProjection { public: vpSphere(); explicit vpSphere(const vpColVector &oP); vpSphere(double oX, double oY, double oZ, double R); - virtual ~vpSphere(); - void changeFrame(const vpHomogeneousMatrix &cMo, vpColVector &cP) const; - void changeFrame(const vpHomogeneousMatrix &cMo); + void changeFrame(const vpHomogeneousMatrix &cMo, vpColVector &cP) const override; + void changeFrame(const vpHomogeneousMatrix &cMo) override; void display(const vpImage &I, const vpCameraParameters &cam, const vpColor &color = vpColor::green, - unsigned int thickness = 1); + unsigned int thickness = 1) override; void display(const vpImage &I, const vpCameraParameters &cam, const vpColor &color = vpColor::green, unsigned int thickness = 1); void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &color = vpColor::green, unsigned int thickness = 1); + const vpColor &color = vpColor::green, unsigned int thickness = 1) override; void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &color = vpColor::green, unsigned int thickness = 1); - vpSphere *duplicate() const; + vpSphere *duplicate() const override; double get_x() const { return p[0]; } double get_y() const { return p[1]; } @@ -108,20 +107,20 @@ class VISP_EXPORT vpSphere : public vpForwardProjection double getZ() const { return cP[2]; } double getR() const { return cP[3]; } - void projection(); - void projection(const vpColVector &cP, vpColVector &p) const; + void projection() override; + void projection(const vpColVector &cP, vpColVector &p) const override; - void setWorldCoordinates(const vpColVector &oP); + void setWorldCoordinates(const vpColVector &oP) override; void setWorldCoordinates(double oX, double oY, double oZ, double R); protected: - void init(); + void init() override; public: #if defined(VISP_BUILD_DEPRECATED_FUNCTIONS) /*! - @name Deprecated functions - */ + * @name Deprecated functions + */ //@{ /*! * \deprecated You should rather use get_n20(). diff --git a/modules/core/include/visp3/core/vpSubColVector.h b/modules/core/include/visp3/core/vpSubColVector.h index 58f9ec9d79..99fa0579b2 100644 --- a/modules/core/include/visp3/core/vpSubColVector.h +++ b/modules/core/include/visp3/core/vpSubColVector.h @@ -37,40 +37,37 @@ #include /*! - \file vpSubColVector.h - - \brief Definition of the vpSubColVector class -*/ + * \file vpSubColVector.h + * + * \brief Definition of the vpSubColVector class + */ /*! - \class vpSubColVector - \ingroup group_core_matrices - This class provides a mask on a vpColVector. It has internally a - pointer to the parent vpColVector. - All properties of vpColVector are available with - a vpSubColVector. - - \author Jean Laneurit (IRISA - INRIA Rennes) - - \sa vpMatrix vpColvector vpRowVector -*/ + * \class vpSubColVector + * \ingroup group_core_matrices + * This class provides a mask on a vpColVector. It has internally a + * pointer to the parent vpColVector. + * All properties of vpColVector are available with + * a vpSubColVector. + * + * \sa vpMatrix vpColVector vpRowVector + */ class VISP_EXPORT vpSubColVector : public vpColVector { - private: - //! Copy constructor unavaible + //! Copy constructor unavailable vpSubColVector(const vpSubColVector & /* m */); protected: - //! Number of row of parent vpColvector at initialization - unsigned int pRowNum; - //! Parent vpColvector - vpColVector *parent; + //! Number of row of parent vpColVector at initialization + unsigned int m_pRowNum; + //! Parent vpColVector + vpColVector *m_parent; public: vpSubColVector(); vpSubColVector(vpColVector &v, const unsigned int &offset, const unsigned int &nrows); - virtual ~vpSubColVector(); + virtual ~vpSubColVector() override; void checkParentStatus() const; diff --git a/modules/core/include/visp3/core/vpSubMatrix.h b/modules/core/include/visp3/core/vpSubMatrix.h index 8cabf02d03..989cf9bf71 100644 --- a/modules/core/include/visp3/core/vpSubMatrix.h +++ b/modules/core/include/visp3/core/vpSubMatrix.h @@ -52,7 +52,7 @@ \author Jean Laneurit (IRISA - INRIA Rennes) - \sa vpMatrix vpColvector vpRowVector + \sa vpMatrix vpColVector vpRowVector */ class VISP_EXPORT vpSubMatrix : public vpMatrix { @@ -77,7 +77,7 @@ class VISP_EXPORT vpSubMatrix : public vpMatrix vpSubMatrix(vpMatrix &m, const unsigned int &row, const unsigned int &col, const unsigned int &nrows, const unsigned int &ncols); //! Destructor - virtual ~vpSubMatrix(); + virtual ~vpSubMatrix() override; //! Initialisation of vpMatrix void init(vpMatrix &m, const unsigned int &row, const unsigned int &col, const unsigned int &nrows, diff --git a/modules/core/include/visp3/core/vpSubRowVector.h b/modules/core/include/visp3/core/vpSubRowVector.h index 21660d7cf5..d5fc68e2b2 100644 --- a/modules/core/include/visp3/core/vpSubRowVector.h +++ b/modules/core/include/visp3/core/vpSubRowVector.h @@ -37,41 +37,37 @@ #include /*! - \file vpSubRowVector.h - - \brief Definition of the vpSubRowVector class -*/ + * \file vpSubRowVector.h + * + * \brief Definition of the vpSubRowVector class + */ /*! - \class vpSubRowVector - \ingroup group_core_matrices - This class provides a mask on a vpRowVector. It has internally a - pointer to the parent vpRowVector. - All properties of vpRowVector are available with - a vpSubRowVector. - - \author Jean Laneurit (IRISA - INRIA Rennes) - - \sa vpMatrix vpColvector vpRowVector -*/ - + * \class vpSubRowVector + * \ingroup group_core_matrices + * This class provides a mask on a vpRowVector. It has internally a + * pointer to the parent vpRowVector. + * All properties of vpRowVector are available with + * a vpSubRowVector. + * + * \sa vpMatrix vpColVector vpRowVector + */ class VISP_EXPORT vpSubRowVector : public vpRowVector { - private: - //! Copy constructor unavaible + //! Copy constructor unavailable vpSubRowVector(const vpSubRowVector & /* m */); protected: - //! Number of row of parent vpColvector at initialization - unsigned int pColNum; - //! Parent vpColvector - vpRowVector *parent; + //! Number of row of parent vpColVector at initialization + unsigned int m_pColNum; + //! Parent vpColVector + vpRowVector *m_parent; public: vpSubRowVector(); vpSubRowVector(vpRowVector &v, const unsigned int &offset, const unsigned int &ncols); - virtual ~vpSubRowVector(); + virtual ~vpSubRowVector() override; void checkParentStatus() const; diff --git a/modules/core/include/visp3/core/vpThetaUVector.h b/modules/core/include/visp3/core/vpThetaUVector.h index b940a6d4fa..fb442147ba 100644 --- a/modules/core/include/visp3/core/vpThetaUVector.h +++ b/modules/core/include/visp3/core/vpThetaUVector.h @@ -190,8 +190,6 @@ class VISP_EXPORT vpThetaUVector : public vpRotationVector explicit vpThetaUVector(const std::vector &tu); vpThetaUVector(double tux, double tuy, double tuz); - //! Destructor. - virtual ~vpThetaUVector(){} // convert an homogeneous matrix into Theta U vector vpThetaUVector buildFrom(const vpHomogeneousMatrix &M); diff --git a/modules/core/include/visp3/core/vpTracker.h b/modules/core/include/visp3/core/vpTracker.h index 25662d0f22..0d0d6bd860 100644 --- a/modules/core/include/visp3/core/vpTracker.h +++ b/modules/core/include/visp3/core/vpTracker.h @@ -35,46 +35,45 @@ #define vpTracker_H /*! - \file vpTracker.h - \brief Class that defines what is a generic tracker. -*/ + * \file vpTracker.h + * \brief Class that defines what is a generic tracker. + */ #include #include #include /*! - \class vpTracker - \ingroup group_core_trackers - \brief Class that defines what is a feature generic tracker. - - A tracker is able to track features with parameters expressed in: - - in the camera frame \e cP. These parameters are located in the public - attribute vpTracker::cP. - - in the image plane \e p. These parameters are located in the public - attribute vpTracker::p. They correspond to normalized coordinates - of the feature expressed in meters. - -*/ + * \class vpTracker + * \ingroup group_core_trackers + * \brief Class that defines what is a feature generic tracker. + * + * A tracker is able to track features with parameters expressed in: + * - in the camera frame \e cP. These parameters are located in the public + * attribute vpTracker::cP. + * - in the image plane \e p. These parameters are located in the public + * attribute vpTracker::p. They correspond to normalized coordinates + * of the feature expressed in meters. + */ class VISP_EXPORT vpTracker { public: /** @name Public Attributes Inherited from vpTracker */ //@{ /*! - Feature coordinates expressed in the image plane \e p. They correspond - to 2D normalized coordinates expressed in meters. - */ + * Feature coordinates expressed in the image plane \e p. They correspond + * to 2D normalized coordinates expressed in meters. + */ vpColVector p; /*! - Feature coordinates expressed in the camera frame \e cP. - */ + * Feature coordinates expressed in the camera frame \e cP. + */ vpColVector cP; /*! - Flag used to indicate if the feature parameters \e cP expressed - in the camera frame are available. - */ + * Flag used to indicate if the feature parameters \e cP expressed + * in the camera frame are available. + */ bool cPAvailable; //@} diff --git a/modules/core/include/visp3/core/vpTranslationVector.h b/modules/core/include/visp3/core/vpTranslationVector.h index a48d24b509..d8576c2ab0 100644 --- a/modules/core/include/visp3/core/vpTranslationVector.h +++ b/modules/core/include/visp3/core/vpTranslationVector.h @@ -117,7 +117,7 @@ class VISP_EXPORT vpTranslationVector : public vpArray2D Default constructor. The translation vector is initialized to zero. */ - vpTranslationVector() : vpArray2D(3, 1), m_index(0) { }; + vpTranslationVector() : vpArray2D(3, 1), m_index(0) { } vpTranslationVector(double tx, double ty, double tz); vpTranslationVector(const vpTranslationVector &tv); explicit vpTranslationVector(const vpHomogeneousMatrix &M); @@ -173,7 +173,7 @@ class VISP_EXPORT vpTranslationVector : public vpArray2D (void)ncols; (void)flagNullify; throw(vpException(vpException::fatalError, "Cannot resize a translation vector")); - }; + } void set(double tx, double ty, double tz); diff --git a/modules/core/include/visp3/core/vpUDPClient.h b/modules/core/include/visp3/core/vpUDPClient.h index f48b31a7ef..75d36acd44 100644 --- a/modules/core/include/visp3/core/vpUDPClient.h +++ b/modules/core/include/visp3/core/vpUDPClient.h @@ -52,117 +52,115 @@ #define VP_MAX_UDP_PAYLOAD 508 /*! - \class vpUDPClient - - \ingroup group_core_com_ethernet - - \brief This class implements a basic (IPv4) User Datagram Protocol (UDP) -client. - - More information here, - here - or here: -
- This User Datagram Protocol (UDP) is defined to make available a - datagram mode of packet-switched computer communication in the - environment of an interconnected set of computer networks. This - protocol assumes that the Internet Protocol (IP) [1] is used as the - underlying protocol. - - This protocol provides a procedure for application programs to send - messages to other programs with a minimum of protocol mechanism. The - protocol is transaction oriented, and delivery and duplicate protection - are not guaranteed. Applications requiring ordered reliable delivery of - streams of data should use the Transmission Control Protocol (TCP) [2]. -
- - Example of a client's code, sending a basic message and receiving the - server answer: - - \code -#include -#include -#include - -int main() { - try { - std::string servername = "127.0.0.1"; - unsigned int port = 50037; - vpUDPClient client(servername, port); - - while (true) { - std::cout << "Enter the message to send:" << std::endl; - std::string msg = ""; - std::getline(std::cin, msg); - if (client.send(msg) != (int) msg.size()) - std::cerr << "Error client.send()!" << std::endl; - if (client.receive(msg)) - std::cout << "Receive from the server: " << msg << std::endl; - } - return EXIT_SUCCESS; - } catch (const vpException &e) { - std::cerr << "Catch an exception: " << e.what() << std::endl; - return EXIT_FAILURE; - } -} - \endcode - - If you want to send a complex data type, you can either send the ASCII - representation or send directly the byte data. In the last case, you should - have to handle that both the server and the client have the same data type - representation. Be careful also with the endianness of the network / host. - - Here an example using a structure of data, assuming that both the server and - the client have the same architecture (probably you should write your own - serialization / deserialization functions for the data you want to send / - receive): - - \code -#include -#include -#include -#include - -struct vpDataType_t { - double double_val; - int int_val; - vpDataType_t() : double_val(0.0), int_val(0) {} - vpDataType_t(double dbl, int i) : double_val(dbl), int_val(i) {} -}; - -int main() { - try { - std::string servername = "127.0.0.1"; - unsigned int port = 50037; - vpUDPClient client(servername, port); - vpDataType_t data_type(1234.56789, 123450); - char data[sizeof(data_type.double_val)+sizeof(data_type.int_val)]; - - memcpy(data, &data_type.double_val, sizeof(data_type.double_val)); - memcpy(data+sizeof(data_type.double_val), &data_type.int_val, sizeof(data_type.int_val)); - - std::string msg(data, sizeof(data_type.double_val)+sizeof(data_type.int_val)); - if (client.send(msg) != (int) sizeof(data_type.double_val)+sizeof(data_type.int_val)) - std::cerr << "Error client.send()!" << std::endl; - if (client.receive(msg)) { - data_type.double_val = *reinterpret_cast(msg.c_str()); - data_type.int_val - = *reinterpret_cast(msg.c_str()+sizeof(data_type.double_val)); - std::cout << "Receive from the server double_val: " << data_type.double_val - << " ; int_val: " << data_type.int_val << std::endl; - } - return EXIT_SUCCESS; - } catch (const vpException &e) { - std::cerr << "Catch an exception: " << e.what() << std::endl; - return EXIT_FAILURE; - } -} - \endcode - - \sa vpUDPServer -*/ + * \class vpUDPClient + * + * \ingroup group_core_com_ethernet + * + * \brief This class implements a basic (IPv4) User Datagram Protocol (UDP) client. + * + * More information here, + * here + * or + * here: + *
+ * This User Datagram Protocol (UDP) is defined to make available a + * datagram mode of packet-switched computer communication in the + * environment of an interconnected set of computer networks. This + * protocol assumes that the Internet Protocol (IP) [1] is used as the + * underlying protocol. + * + * This protocol provides a procedure for application programs to send + * messages to other programs with a minimum of protocol mechanism. The + * protocol is transaction oriented, and delivery and duplicate protection + * are not guaranteed. Applications requiring ordered reliable delivery of + * streams of data should use the Transmission Control Protocol (TCP) [2]. + *
+ * + * Example of a client's code, sending a basic message and receiving the + * server answer: + * + * \code + * #include + * #include + * #include + * + * int main() { + * try { + * std::string servername = "127.0.0.1"; + * unsigned int port = 50037; + * vpUDPClient client(servername, port); + * + * while (true) { + * std::cout << "Enter the message to send:" << std::endl; + * std::string msg = ""; + * std::getline(std::cin, msg); + * if (client.send(msg) != (int) msg.size()) + * std::cerr << "Error client.send()!" << std::endl; + * if (client.receive(msg)) + * std::cout << "Receive from the server: " << msg << std::endl; + * } + * return EXIT_SUCCESS; + * } catch (const vpException &e) { + * std::cerr << "Catch an exception: " << e.what() << std::endl; + * return EXIT_FAILURE; + * } + * } + * \endcode + * + * If you want to send a complex data type, you can either send the ASCII + * representation or send directly the byte data. In the last case, you should + * have to handle that both the server and the client have the same data type + * representation. Be careful also with the endianness of the network / host. + * + * Here an example using a structure of data, assuming that both the server and + * the client have the same architecture (probably you should write your own + * serialization / deserialization functions for the data you want to send / + * receive): + * + * \code + * #include + * #include + * #include + * #include + * + * struct vpDataType_t { + * double double_val; + * int int_val; + * vpDataType_t() : double_val(0.0), int_val(0) {} + * vpDataType_t(double dbl, int i) : double_val(dbl), int_val(i) {} + * }; + * + * int main() { + * try { + * std::string servername = "127.0.0.1"; + * unsigned int port = 50037; + * vpUDPClient client(servername, port); + * vpDataType_t data_type(1234.56789, 123450); + * char data[sizeof(data_type.double_val)+sizeof(data_type.int_val)]; + * + * memcpy(data, &data_type.double_val, sizeof(data_type.double_val)); + * memcpy(data+sizeof(data_type.double_val), &data_type.int_val, sizeof(data_type.int_val)); + * + * std::string msg(data, sizeof(data_type.double_val)+sizeof(data_type.int_val)); + * if (client.send(msg) != (int) sizeof(data_type.double_val)+sizeof(data_type.int_val)) + * std::cerr << "Error client.send()!" << std::endl; + * if (client.receive(msg)) { + * data_type.double_val = *reinterpret_cast(msg.c_str()); + * data_type.int_val + * = *reinterpret_cast(msg.c_str()+sizeof(data_type.double_val)); + * std::cout << "Receive from the server double_val: " << data_type.double_val + * << " ; int_val: " << data_type.int_val << std::endl; + * } + * return EXIT_SUCCESS; + * } catch (const vpException &e) { + * std::cerr << "Catch an exception: " << e.what() << std::endl; + * return EXIT_FAILURE; + * } + * } + * \endcode + * + * \sa vpUDPServer + */ class VISP_EXPORT vpUDPClient { public: diff --git a/modules/core/include/visp3/core/vpVelocityTwistMatrix.h b/modules/core/include/visp3/core/vpVelocityTwistMatrix.h index 55530abdd6..d8a3ccf0ab 100644 --- a/modules/core/include/visp3/core/vpVelocityTwistMatrix.h +++ b/modules/core/include/visp3/core/vpVelocityTwistMatrix.h @@ -178,11 +178,6 @@ class VISP_EXPORT vpVelocityTwistMatrix : public vpArray2D vpVelocityTwistMatrix(const vpRotationMatrix &R); vpVelocityTwistMatrix(const vpThetaUVector &thetau); - /*! - Destructor. - */ - virtual ~vpVelocityTwistMatrix(){}; - vpVelocityTwistMatrix buildFrom(const vpTranslationVector &t, const vpRotationMatrix &R); vpVelocityTwistMatrix buildFrom(const vpTranslationVector &t, const vpThetaUVector &thetau); vpVelocityTwistMatrix buildFrom(const vpHomogeneousMatrix &M, bool full = true); @@ -217,7 +212,7 @@ class VISP_EXPORT vpVelocityTwistMatrix : public vpArray2D (void)ncols; (void)flagNullify; throw(vpException(vpException::fatalError, "Cannot resize a velocity twist matrix")); - }; + } #if defined(VISP_BUILD_DEPRECATED_FUNCTIONS) /*! @@ -228,7 +223,7 @@ class VISP_EXPORT vpVelocityTwistMatrix : public vpArray2D \deprecated Provided only for compat with previous releases. This function does nothing. */ - vp_deprecated void init(){}; + vp_deprecated void init() { } /*! \deprecated You should rather use eye(). */ diff --git a/modules/core/src/camera/vpCameraParameters.cpp b/modules/core/src/camera/vpCameraParameters.cpp index 1ef31179eb..8993f054d2 100644 --- a/modules/core/src/camera/vpCameraParameters.cpp +++ b/modules/core/src/camera/vpCameraParameters.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,17 +29,12 @@ * * Description: * Camera intrinsic parameters. - * - * Authors: - * Anthony Saunier - * -*****************************************************************************/ + */ /*! \file vpCameraParameters.cpp \brief Definition of the vpCameraParameters class member functions. Class vpCameraParameters define the camera intrinsic parameters - */ #include @@ -60,407 +54,418 @@ const double vpCameraParameters::DEFAULT_V0_PARAMETER = 144.0; const double vpCameraParameters::DEFAULT_KUD_PARAMETER = 0.0; const double vpCameraParameters::DEFAULT_KDU_PARAMETER = 0.0; const vpCameraParameters::vpCameraParametersProjType vpCameraParameters::DEFAULT_PROJ_TYPE = - vpCameraParameters::perspectiveProjWithoutDistortion; +vpCameraParameters::perspectiveProjWithoutDistortion; /*! - Default constructor. - By default, a perspective projection without distortion model is set. - - \sa init() -*/ + * Default constructor. + * By default, a perspective projection without distortion model is set. + * + * \sa init() + */ vpCameraParameters::vpCameraParameters() - : px(DEFAULT_PX_PARAMETER), py(DEFAULT_PY_PARAMETER), u0(DEFAULT_U0_PARAMETER), v0(DEFAULT_V0_PARAMETER), - kud(DEFAULT_KUD_PARAMETER), kdu(DEFAULT_KDU_PARAMETER), m_dist_coefs(), width(0), height(0), isFov(false), - m_hFovAngle(0), m_vFovAngle(0), fovNormals(), inv_px(1. / DEFAULT_PX_PARAMETER), inv_py(1. / DEFAULT_PY_PARAMETER), - projModel(DEFAULT_PROJ_TYPE) + : m_px(DEFAULT_PX_PARAMETER), m_py(DEFAULT_PY_PARAMETER), m_u0(DEFAULT_U0_PARAMETER), m_v0(DEFAULT_V0_PARAMETER), + m_kud(DEFAULT_KUD_PARAMETER), m_kdu(DEFAULT_KDU_PARAMETER), m_dist_coefs(), m_width(0), m_height(0), m_isFov(false), + m_hFovAngle(0), m_vFovAngle(0), m_fovNormals(), m_inv_px(1. / DEFAULT_PX_PARAMETER), m_inv_py(1. / DEFAULT_PY_PARAMETER), + m_projModel(DEFAULT_PROJ_TYPE) { init(); } /*! - Copy constructor + * Copy constructor */ vpCameraParameters::vpCameraParameters(const vpCameraParameters &c) - : px(DEFAULT_PX_PARAMETER), py(DEFAULT_PY_PARAMETER), u0(DEFAULT_U0_PARAMETER), v0(DEFAULT_V0_PARAMETER), - kud(DEFAULT_KUD_PARAMETER), kdu(DEFAULT_KDU_PARAMETER), m_dist_coefs(), width(0), height(0), isFov(false), - m_hFovAngle(0), m_vFovAngle(0), fovNormals(), inv_px(1. / DEFAULT_PX_PARAMETER), inv_py(1. / DEFAULT_PY_PARAMETER), - projModel(DEFAULT_PROJ_TYPE) + : m_px(DEFAULT_PX_PARAMETER), m_py(DEFAULT_PY_PARAMETER), m_u0(DEFAULT_U0_PARAMETER), m_v0(DEFAULT_V0_PARAMETER), + m_kud(DEFAULT_KUD_PARAMETER), m_kdu(DEFAULT_KDU_PARAMETER), m_dist_coefs(), m_width(0), m_height(0), m_isFov(false), + m_hFovAngle(0), m_vFovAngle(0), m_fovNormals(), m_inv_px(1. / DEFAULT_PX_PARAMETER), m_inv_py(1. / DEFAULT_PY_PARAMETER), + m_projModel(DEFAULT_PROJ_TYPE) { init(c); } /*! - Constructor for perspective projection without distortion model - - \param cam_px,cam_py : pixel size - \param cam_u0,cam_v0 : principal points - + * Constructor for perspective projection without distortion model + * + * \param cam_px : Pixel size along x axis (horizontal). + * \param cam_py : Pixel size along y axis (vertical) + * \param cam_u0 : Principal point coordinate in pixel along x. + * \param cam_v0 : Principal point coordinate in pixel along y. */ vpCameraParameters::vpCameraParameters(double cam_px, double cam_py, double cam_u0, double cam_v0) - : px(DEFAULT_PX_PARAMETER), py(DEFAULT_PY_PARAMETER), u0(DEFAULT_U0_PARAMETER), v0(DEFAULT_V0_PARAMETER), - kud(DEFAULT_KUD_PARAMETER), kdu(DEFAULT_KDU_PARAMETER), m_dist_coefs(), width(0), height(0), isFov(false), - m_hFovAngle(0), m_vFovAngle(0), fovNormals(), inv_px(1. / DEFAULT_PX_PARAMETER), inv_py(1. / DEFAULT_PY_PARAMETER), - projModel(DEFAULT_PROJ_TYPE) + : m_px(DEFAULT_PX_PARAMETER), m_py(DEFAULT_PY_PARAMETER), m_u0(DEFAULT_U0_PARAMETER), m_v0(DEFAULT_V0_PARAMETER), + m_kud(DEFAULT_KUD_PARAMETER), m_kdu(DEFAULT_KDU_PARAMETER), m_dist_coefs(), m_width(0), m_height(0), m_isFov(false), + m_hFovAngle(0), m_vFovAngle(0), m_fovNormals(), m_inv_px(1. / DEFAULT_PX_PARAMETER), m_inv_py(1. / DEFAULT_PY_PARAMETER), + m_projModel(DEFAULT_PROJ_TYPE) { initPersProjWithoutDistortion(cam_px, cam_py, cam_u0, cam_v0); } /*! - Constructor for perspective projection with distortion model - - \param cam_px,cam_py : pixel size - \param cam_u0,cam_v0 : principal points - \param cam_kud : undistorted to distorted radial distortion - \param cam_kdu : distorted to undistorted radial distortion - + * Constructor for perspective projection with distortion model + * + * \param cam_px : Pixel size along x axis (horizontal). + * \param cam_py : Pixel size along y axis (vertical) + * \param cam_u0 : Principal point coordinate in pixel along x. + * \param cam_v0 : Principal point coordinate in pixel along y. + * \param cam_kud : Undistorted to distorted radial distortion. + * \param cam_kdu : Distorted to undistorted radial distortion. */ vpCameraParameters::vpCameraParameters(double cam_px, double cam_py, double cam_u0, double cam_v0, double cam_kud, double cam_kdu) - : px(DEFAULT_PX_PARAMETER), py(DEFAULT_PY_PARAMETER), u0(DEFAULT_U0_PARAMETER), v0(DEFAULT_V0_PARAMETER), - kud(DEFAULT_KUD_PARAMETER), kdu(DEFAULT_KDU_PARAMETER), m_dist_coefs(), width(0), height(0), isFov(false), - m_hFovAngle(0), m_vFovAngle(0), fovNormals(), inv_px(1. / DEFAULT_PX_PARAMETER), inv_py(1. / DEFAULT_PY_PARAMETER), - projModel(DEFAULT_PROJ_TYPE) + : m_px(DEFAULT_PX_PARAMETER), m_py(DEFAULT_PY_PARAMETER), m_u0(DEFAULT_U0_PARAMETER), m_v0(DEFAULT_V0_PARAMETER), + m_kud(DEFAULT_KUD_PARAMETER), m_kdu(DEFAULT_KDU_PARAMETER), m_dist_coefs(), m_width(0), m_height(0), m_isFov(false), + m_hFovAngle(0), m_vFovAngle(0), m_fovNormals(), m_inv_px(1. / DEFAULT_PX_PARAMETER), m_inv_py(1. / DEFAULT_PY_PARAMETER), + m_projModel(DEFAULT_PROJ_TYPE) { initPersProjWithDistortion(cam_px, cam_py, cam_u0, cam_v0, cam_kud, cam_kdu); } /*! - Constructor for projection with Kannala-Brandt distortion model - - \param cam_px,cam_py : pixel size - \param cam_u0,cam_v0 : principal points - \param coefficients : distortion model coefficients - + * Constructor for projection with Kannala-Brandt distortion model + * + * \param cam_px : Pixel size along x axis (horizontal). + * \param cam_py : Pixel size along y axis (vertical) + * \param cam_u0 : Principal point coordinate in pixel along x. + * \param cam_v0 : Principal point coordinate in pixel along y. + * \param coefficients : distortion model coefficients */ vpCameraParameters::vpCameraParameters(double cam_px, double cam_py, double cam_u0, double cam_v0, const std::vector &coefficients) - : px(DEFAULT_PX_PARAMETER), py(DEFAULT_PY_PARAMETER), u0(DEFAULT_U0_PARAMETER), v0(DEFAULT_V0_PARAMETER), - kud(DEFAULT_KUD_PARAMETER), kdu(DEFAULT_KDU_PARAMETER), m_dist_coefs(), width(0), height(0), isFov(false), - m_hFovAngle(0), m_vFovAngle(0), fovNormals(), inv_px(1. / DEFAULT_PX_PARAMETER), inv_py(1. / DEFAULT_PY_PARAMETER), - projModel(DEFAULT_PROJ_TYPE) + : m_px(DEFAULT_PX_PARAMETER), m_py(DEFAULT_PY_PARAMETER), m_u0(DEFAULT_U0_PARAMETER), m_v0(DEFAULT_V0_PARAMETER), + m_kud(DEFAULT_KUD_PARAMETER), m_kdu(DEFAULT_KDU_PARAMETER), m_dist_coefs(), m_width(0), m_height(0), m_isFov(false), + m_hFovAngle(0), m_vFovAngle(0), m_fovNormals(), m_inv_px(1. / DEFAULT_PX_PARAMETER), m_inv_py(1. / DEFAULT_PY_PARAMETER), + m_projModel(DEFAULT_PROJ_TYPE) { initProjWithKannalaBrandtDistortion(cam_px, cam_py, cam_u0, cam_v0, coefficients); } /*! - \brief basic initialization with the default parameters -*/ + * \brief Basic initialization with the default parameters. + */ void vpCameraParameters::init() { - if (fabs(this->px) < 1e-6) { + if (fabs(this->m_px) < 1e-6) { throw(vpException(vpException::divideByZeroError, "Camera parameter px = 0")); } - if (fabs(this->py) < 1e-6) { + if (fabs(this->m_py) < 1e-6) { throw(vpException(vpException::divideByZeroError, "Camera parameter px = 0")); } - this->inv_px = 1. / this->px; - this->inv_py = 1. / this->py; + this->m_inv_px = 1. / this->m_px; + this->m_inv_py = 1. / this->m_py; } /*! - Initialization with specific parameters using perpective projection without - distortion model. - \param cam_px,cam_py : the ratio between the focal length and the size of a -pixel. \param cam_u0,cam_v0 : principal point coordinates in pixels. - - The following sample code shows how to use this function: - \code -#include -#include - -int main() -{ - vpImage I(480, 640); - double u0 = I.getWidth() / 2.; - double v0 = I.getHeight() / 2.; - double px = 600; - double py = 600; - vpCameraParameters cam; - cam.initPersProjWithoutDistortion(px, py, u0, v0); - cam.computeFov(I.getWidth(), I.getHeight()); - std::cout << cam << std::endl; - std::cout << "Field of view (horizontal: " << vpMath::deg(cam.getHorizontalFovAngle()) - << " and vertical: " << vpMath::deg(cam.getVerticalFovAngle()) - << " degrees)" << std::endl; -} - \endcode - It produces the following output: - \code -Camera parameters for perspective projection without distortion: - px = 600 py = 600 - u0 = 320 v0 = 240 - -Field of view (horizontal: 56.145 and vertical: 43.6028 degrees) - \endcode - + * Initialization with specific parameters using perspective projection without + * distortion model. + * + * \param cam_px : Pixel size along x axis (horizontal). + * \param cam_py : Pixel size along y axis (vertical) + * \param cam_u0 : Principal point coordinate in pixel along x. + * \param cam_v0 : Principal point coordinate in pixel along y. + * + * The following sample code shows how to use this function: + * \code + * #include + * #include + * + * int main() + * { + * vpImage I(480, 640); + * double u0 = I.getWidth() / 2.; + * double v0 = I.getHeight() / 2.; + * double px = 600; + * double py = 600; + * vpCameraParameters cam; + * cam.initPersProjWithoutDistortion(px, py, u0, v0); + * cam.computeFov(I.getWidth(), I.getHeight()); + * std::cout << cam << std::endl; + * std::cout << "Field of view (horizontal: " << vpMath::deg(cam.getHorizontalFovAngle()) + * << " and vertical: " << vpMath::deg(cam.getVerticalFovAngle()) + * << " degrees)" << std::endl; + * } + * \endcode + * It produces the following output: + * \code + * Camera parameters for perspective projection without distortion: + * px = 600 py = 600 + * u0 = 320 v0 = 240 + * + * Field of view (horizontal: 56.145 and vertical: 43.6028 degrees) + * \endcode */ void vpCameraParameters::initPersProjWithoutDistortion(double cam_px, double cam_py, double cam_u0, double cam_v0) { - this->projModel = vpCameraParameters::perspectiveProjWithoutDistortion; + this->m_projModel = vpCameraParameters::perspectiveProjWithoutDistortion; - this->px = cam_px; - this->py = cam_py; - this->u0 = cam_u0; - this->v0 = cam_v0; - this->kud = 0; - this->kdu = 0; + this->m_px = cam_px; + this->m_py = cam_py; + this->m_u0 = cam_u0; + this->m_v0 = cam_v0; + this->m_kud = 0; + this->m_kdu = 0; this->m_dist_coefs.clear(); - if (fabs(px) < 1e-6) { + if (fabs(m_px) < 1e-6) { throw(vpException(vpException::divideByZeroError, "Camera parameter px = 0")); } - if (fabs(py) < 1e-6) { + if (fabs(m_py) < 1e-6) { throw(vpException(vpException::divideByZeroError, "Camera parameter py = 0")); } - this->inv_px = 1. / px; - this->inv_py = 1. / py; + this->m_inv_px = 1. / m_px; + this->m_inv_py = 1. / m_py; } /*! - Initialization with specific parameters using perpective projection with - distortion model. - \param cam_px,cam_py : the ratio between the focal length and the size of a pixel. - \param cam_u0,cam_v0 : principal points coordinates in pixels. - \param cam_kud : undistorted to distorted radial distortion. - \param cam_kdu : distorted to undistorted radial distortion. - - The following sample code shows how to use this function: - \code -#include -#include - -int main() -{ - vpImage I(480, 640); - double u0 = I.getWidth() / 2.; - double v0 = I.getHeight() / 2.; - double px = 600; - double py = 600; - double kud = -0.19; - double kdu = 0.20; - vpCameraParameters cam; - cam.initPersProjWithDistortion(px, py, u0, v0, kud, kdu); - cam.computeFov(I.getWidth(), I.getHeight()); - std::cout << cam << std::endl; - std::cout << "Field of view (horizontal: " << vpMath::deg(cam.getHorizontalFovAngle()) - << " and vertical: " << vpMath::deg(cam.getVerticalFovAngle()) - << " degrees)" << std::endl; -} - \endcode - It produces the following output: - \code -Camera parameters for perspective projection with distortion: - px = 600 py = 600 - u0 = 320 v0 = 240 - kud = -0.19 - kdu = 0.2 - -Field of view (horizontal: 56.14497387 and vertical: 43.60281897 degrees) -\endcode -*/ + * Initialization with specific parameters using perspective projection with + * distortion model. + * + * \param cam_px : Pixel size along x axis (horizontal). + * \param cam_py : Pixel size along y axis (vertical) + * \param cam_u0 : Principal point coordinate in pixel along x. + * \param cam_v0 : Principal point coordinate in pixel along y. + * \param cam_kud : Undistorted to distorted radial distortion. + * \param cam_kdu : Distorted to undistorted radial distortion. + * + * The following sample code shows how to use this function: + * \code + * #include + * #include + * + * int main() + * { + * vpImage I(480, 640); + * double u0 = I.getWidth() / 2.; + * double v0 = I.getHeight() / 2.; + * double px = 600; + * double py = 600; + * double kud = -0.19; + * double kdu = 0.20; + * vpCameraParameters cam; + * cam.initPersProjWithDistortion(px, py, u0, v0, kud, kdu); + * cam.computeFov(I.getWidth(), I.getHeight()); + * std::cout << cam << std::endl; + * std::cout << "Field of view (horizontal: " << vpMath::deg(cam.getHorizontalFovAngle()) + * << " and vertical: " << vpMath::deg(cam.getVerticalFovAngle()) + * << " degrees)" << std::endl; + * } + * \endcode + * It produces the following output: + * \code + * Camera parameters for perspective projection with distortion: + * px = 600 py = 600 + * u0 = 320 v0 = 240 + * kud = -0.19 + * kdu = 0.2 + * + * Field of view (horizontal: 56.14497387 and vertical: 43.60281897 degrees) + * \endcode + */ void vpCameraParameters::initPersProjWithDistortion(double cam_px, double cam_py, double cam_u0, double cam_v0, double cam_kud, double cam_kdu) { - this->projModel = vpCameraParameters::perspectiveProjWithDistortion; - - this->px = cam_px; - this->py = cam_py; - this->u0 = cam_u0; - this->v0 = cam_v0; - this->kud = cam_kud; - this->kdu = cam_kdu; + this->m_projModel = vpCameraParameters::perspectiveProjWithDistortion; + + this->m_px = cam_px; + this->m_py = cam_py; + this->m_u0 = cam_u0; + this->m_v0 = cam_v0; + this->m_kud = cam_kud; + this->m_kdu = cam_kdu; this->m_dist_coefs.clear(); - if (fabs(px) < 1e-6) { + if (fabs(m_px) < 1e-6) { throw(vpException(vpException::divideByZeroError, "Camera parameter px = 0")); } - if (fabs(py) < 1e-6) { + if (fabs(m_py) < 1e-6) { throw(vpException(vpException::divideByZeroError, "Camera parameter px = 0")); } - this->inv_px = 1. / px; - this->inv_py = 1. / py; + this->m_inv_px = 1. / m_px; + this->m_inv_py = 1. / m_py; } /*! - Initialization with specific parameters using Kannala-Brandt distortion model - \param cam_px,cam_py : The ratio between the focal length and the size of a pixel. - \param cam_u0,cam_v0 : Principal points coordinates in pixels. - \param coefficients : Distortion coefficients. -*/ + * Initialization with specific parameters using Kannala-Brandt distortion model + * + * \param cam_px : Pixel size along x axis (horizontal). + * \param cam_py : Pixel size along y axis (vertical) + * \param cam_u0 : Principal point coordinate in pixel along x. + * \param cam_v0 : Principal point coordinate in pixel along y. + * \param coefficients : Distortion coefficients. + */ void vpCameraParameters::initProjWithKannalaBrandtDistortion(double cam_px, double cam_py, double cam_u0, double cam_v0, const std::vector &coefficients) { - this->projModel = vpCameraParameters::ProjWithKannalaBrandtDistortion; + this->m_projModel = vpCameraParameters::ProjWithKannalaBrandtDistortion; - this->px = cam_px; - this->py = cam_py; - this->u0 = cam_u0; - this->v0 = cam_v0; + this->m_px = cam_px; + this->m_py = cam_py; + this->m_u0 = cam_u0; + this->m_v0 = cam_v0; - this->kud = 0.0; - this->kdu = 0.0; + this->m_kud = 0.0; + this->m_kdu = 0.0; - if (fabs(px) < 1e-6) { + if (fabs(m_px) < 1e-6) { throw(vpException(vpException::divideByZeroError, "Camera parameter px = 0")); } - if (fabs(py) < 1e-6) { + if (fabs(m_py) < 1e-6) { throw(vpException(vpException::divideByZeroError, "Camera parameter px = 0")); } - this->inv_px = 1. / px; - this->inv_py = 1. / py; + this->m_inv_px = 1. / m_px; + this->m_inv_py = 1. / m_py; this->m_dist_coefs = coefficients; } /*! - destructor - - nothing much to destroy... -*/ -vpCameraParameters::~vpCameraParameters() {} + * Destructor that does nothing. + */ +vpCameraParameters::~vpCameraParameters() { } /*! - initialization from another vpCameraParameters object -*/ + * Initialization from another vpCameraParameters object. + */ void vpCameraParameters::init(const vpCameraParameters &c) { *this = c; } /*! - initialise the camera from a calibration matrix. - Using a calibration matrix leads to a camera without distortion - - The K matrix in parameters must be like: - - \f$ K = \left(\begin{array}{ccc} - p_x & 0 & u_0 \\ - 0 & p_y & v_0 \\ - 0 & 0 & 1 - \end{array} \right) \f$ - - \param _K : the 3by3 calibration matrix -*/ -void vpCameraParameters::initFromCalibrationMatrix(const vpMatrix &_K) + * Initialise the camera from a calibration matrix. + * Using a calibration matrix leads to a camera without distortion. + * + * The K matrix in parameters must be like: + * + * \f$ K = \left(\begin{array}{ccc} + * p_x & 0 & u_0 \\ + * 0 & p_y & v_0 \\ + * 0 & 0 & 1 + * \end{array} \right) \f$ + * + * \param K : the 3-by-3 calibration matrix + */ +void vpCameraParameters::initFromCalibrationMatrix(const vpMatrix &K) { - if (_K.getRows() != 3 || _K.getCols() != 3) { + if (K.getRows() != 3 || K.getCols() != 3) { throw vpException(vpException::dimensionError, "bad size for calibration matrix"); } - if (std::fabs(_K[2][2] - 1.0) > std::numeric_limits::epsilon()) { + if (std::fabs(K[2][2] - 1.0) > std::numeric_limits::epsilon()) { throw vpException(vpException::badValue, "bad value: K[2][2] must be equal to 1"); } - initPersProjWithoutDistortion(_K[0][0], _K[1][1], _K[0][2], _K[1][2]); + initPersProjWithoutDistortion(K[0][0], K[1][1], K[0][2], K[1][2]); } /*! - Initialize the camera model without distortion from the image dimension and -the camera field of view. \param w : Image width. \param h : Image height. - \param hfov : Camera horizontal field of view angle expressed in radians. - \param vfov : Camera vertical field of view angle expressed in radians. - - The following sample code shows how to use this function: - \code -#include -#include - -int main() -{ - vpImage I(480, 640); - vpCameraParameters cam; - double hfov = vpMath::rad(56); - double vfov = vpMath::rad(43); - cam.initFromFov(I.getWidth(), I.getHeight(), hfov, vfov); - - std::cout << cam << std::endl; - std::cout << "Field of view (horizontal: " << vpMath::deg(cam.getHorizontalFovAngle()) - << " and vertical: " << vpMath::deg(cam.getVerticalFovAngle()) << " degrees)" << std::endl; -} - \endcode - It produces the following output: - \code -Camera parameters for perspective projection without distortion: - px = 601.832 py = 609.275 - u0 = 320 v0 = 240 - -Field of view (horizontal: 56 and vertical: 43 degrees) - \endcode + * Initialize the camera model without distortion from the image dimension and + * the camera field of view. + * \param w : Image width. + * \param h : Image height. + * \param hfov : Camera horizontal field of view angle expressed in radians. + * \param vfov : Camera vertical field of view angle expressed in radians. + * + * The following sample code shows how to use this function: + * \code + * #include + * #include + * + * int main() + * { + * vpImage I(480, 640); + * vpCameraParameters cam; + * double hfov = vpMath::rad(56); + * double vfov = vpMath::rad(43); + * cam.initFromFov(I.getWidth(), I.getHeight(), hfov, vfov); + * + * std::cout << cam << std::endl; + * std::cout << "Field of view (horizontal: " << vpMath::deg(cam.getHorizontalFovAngle()) + * << " and vertical: " << vpMath::deg(cam.getVerticalFovAngle()) << " degrees)" << std::endl; + * } + * \endcode + * It produces the following output: + * \code + * Camera parameters for perspective projection without distortion: + * px = 601.832 py = 609.275 + * u0 = 320 v0 = 240 + * + * Field of view (horizontal: 56 and vertical: 43 degrees) + * \endcode */ void vpCameraParameters::initFromFov(const unsigned int &w, const unsigned int &h, const double &hfov, const double &vfov) { - projModel = vpCameraParameters::perspectiveProjWithoutDistortion; - u0 = (double)w / 2.; - v0 = (double)h / 2.; - px = u0 / tan(hfov / 2); - py = v0 / tan(vfov / 2); - kud = 0; - kdu = 0; - inv_px = 1. / px; - inv_py = 1. / py; + m_projModel = vpCameraParameters::perspectiveProjWithoutDistortion; + m_u0 = (double)w / 2.; + m_v0 = (double)h / 2.; + m_px = m_u0 / tan(hfov / 2); + m_py = m_v0 / tan(vfov / 2); + m_kud = 0; + m_kdu = 0; + m_inv_px = 1. / m_px; + m_inv_py = 1. / m_py; computeFov(w, h); } /*! - copy operator + * Copy operator. */ vpCameraParameters &vpCameraParameters::operator=(const vpCameraParameters &cam) { - projModel = cam.projModel; - px = cam.px; - py = cam.py; - u0 = cam.u0; - v0 = cam.v0; - kud = cam.kud; - kdu = cam.kdu; + m_projModel = cam.m_projModel; + m_px = cam.m_px; + m_py = cam.m_py; + m_u0 = cam.m_u0; + m_v0 = cam.m_v0; + m_kud = cam.m_kud; + m_kdu = cam.m_kdu; m_dist_coefs = cam.m_dist_coefs; - inv_px = cam.inv_px; - inv_py = cam.inv_py; + m_inv_px = cam.m_inv_px; + m_inv_py = cam.m_inv_py; - isFov = cam.isFov; + m_isFov = cam.m_isFov; m_hFovAngle = cam.m_hFovAngle; m_vFovAngle = cam.m_vFovAngle; - width = cam.width; - height = cam.height; - fovNormals = cam.fovNormals; + m_width = cam.m_width; + m_height = cam.m_height; + m_fovNormals = cam.m_fovNormals; return *this; } /*! - True if the two objects are absolutely identical. + * True if the two objects are absolutely identical. */ bool vpCameraParameters::operator==(const vpCameraParameters &c) const { - if (projModel != c.projModel) + if (m_projModel != c.m_projModel) return false; - if (!vpMath::equal(px, c.px, std::numeric_limits::epsilon()) || - !vpMath::equal(py, c.py, std::numeric_limits::epsilon()) || - !vpMath::equal(u0, c.u0, std::numeric_limits::epsilon()) || - !vpMath::equal(v0, c.v0, std::numeric_limits::epsilon()) || - !vpMath::equal(kud, c.kud, std::numeric_limits::epsilon()) || - !vpMath::equal(kdu, c.kdu, std::numeric_limits::epsilon()) || - !vpMath::equal(inv_px, c.inv_px, std::numeric_limits::epsilon()) || - !vpMath::equal(inv_py, c.inv_py, std::numeric_limits::epsilon())) + if (!vpMath::equal(m_px, c.m_px, std::numeric_limits::epsilon()) || + !vpMath::equal(m_py, c.m_py, std::numeric_limits::epsilon()) || + !vpMath::equal(m_u0, c.m_u0, std::numeric_limits::epsilon()) || + !vpMath::equal(m_v0, c.m_v0, std::numeric_limits::epsilon()) || + !vpMath::equal(m_kud, c.m_kud, std::numeric_limits::epsilon()) || + !vpMath::equal(m_kdu, c.m_kdu, std::numeric_limits::epsilon()) || + !vpMath::equal(m_inv_px, c.m_inv_px, std::numeric_limits::epsilon()) || + !vpMath::equal(m_inv_py, c.m_inv_py, std::numeric_limits::epsilon())) return false; - if(m_dist_coefs.size() != c.m_dist_coefs.size()) + if (m_dist_coefs.size() != c.m_dist_coefs.size()) return false; for (unsigned int i = 0; i < m_dist_coefs.size(); i++) if (!vpMath::equal(m_dist_coefs[i], c.m_dist_coefs[i], std::numeric_limits::epsilon())) return false; - if (isFov != c.isFov || !vpMath::equal(m_hFovAngle, c.m_hFovAngle, std::numeric_limits::epsilon()) || - !vpMath::equal(m_vFovAngle, c.m_vFovAngle, std::numeric_limits::epsilon()) || width != c.width || - height != c.height) + if (m_isFov != c.m_isFov || !vpMath::equal(m_hFovAngle, c.m_hFovAngle, std::numeric_limits::epsilon()) || + !vpMath::equal(m_vFovAngle, c.m_vFovAngle, std::numeric_limits::epsilon()) || m_width != c.m_width || + m_height != c.m_height) return false; - if (fovNormals.size() != c.fovNormals.size()) + if (m_fovNormals.size() != c.m_fovNormals.size()) return false; - std::vector::const_iterator it1 = fovNormals.begin(); - std::vector::const_iterator it2 = c.fovNormals.begin(); - for (; it1 != fovNormals.end() && it2 != c.fovNormals.end(); ++it1, ++it2) { + std::vector::const_iterator it1 = m_fovNormals.begin(); + std::vector::const_iterator it2 = c.m_fovNormals.begin(); + for (; it1 != m_fovNormals.end() && it2 != c.m_fovNormals.end(); ++it1, ++it2) { if (*it1 != *it2) return false; } @@ -469,30 +474,30 @@ bool vpCameraParameters::operator==(const vpCameraParameters &c) const } /*! - False if the two objects are absolutely identical. + * False if the two objects are absolutely identical. */ bool vpCameraParameters::operator!=(const vpCameraParameters &c) const { return !(*this == c); } /*! - Compute angles and normals of the FOV. - - \param w : Width of the image - \param h : Height of the image. -*/ + * Compute angles and normals of the FOV. + * + * \param w : Width of the image + * \param h : Height of the image. + */ void vpCameraParameters::computeFov(const unsigned int &w, const unsigned int &h) { - if ((!isFov || w != width || h != height) && w != 0 && h != 0) { - fovNormals = std::vector(4); + if ((!m_isFov || w != m_width || h != m_height) && w != 0 && h != 0) { + m_fovNormals = std::vector(4); - isFov = true; + m_isFov = true; - double hFovAngle = atan(((double)w - u0) * (1.0 / px)); - double vFovAngle = atan((v0) * (1.0 / py)); - double minushFovAngle = atan((u0) * (1.0 / px)); - double minusvFovAngle = atan(((double)h - v0) * (1.0 / py)); + double hFovAngle = atan(((double)w - m_u0) * (1.0 / m_px)); + double vFovAngle = atan((m_v0) * (1.0 / m_py)); + double minushFovAngle = atan((m_u0) * (1.0 / m_px)); + double minusvFovAngle = atan(((double)h - m_v0) * (1.0 / m_py)); - width = w; - height = h; + m_width = w; + m_height = h; vpColVector n(3); n = 0; @@ -504,10 +509,10 @@ void vpCameraParameters::computeFov(const unsigned int &w, const unsigned int &h vpColVector nLeft, nRight; nLeft = Rleft * (-n); - fovNormals[0] = nLeft.normalize(); + m_fovNormals[0] = nLeft.normalize(); nRight = Rright * n; - fovNormals[1] = nRight.normalize(); + m_fovNormals[1] = nRight.normalize(); n = 0; n[1] = 1.0; @@ -518,10 +523,10 @@ void vpCameraParameters::computeFov(const unsigned int &w, const unsigned int &h vpColVector nUp, nDown; nUp = Rup * (-n); - fovNormals[2] = nUp.normalize(); + m_fovNormals[2] = nUp.normalize(); nDown = Rdown * n; - fovNormals[3] = nDown.normalize(); + m_fovNormals[3] = nDown.normalize(); m_hFovAngle = hFovAngle + minushFovAngle; m_vFovAngle = vFovAngle + minusvFovAngle; @@ -529,72 +534,72 @@ void vpCameraParameters::computeFov(const unsigned int &w, const unsigned int &h } /*! - Return the camera matrix \f$K\f$ given by: - - \f$ K = \left[\begin{array}{ccc} - p_x & 0 & u_0 \\ - 0 & p_y & v_0 \\ - 0 & 0 & 1 - \end{array} \right] \f$ - - \sa get_K_inverse() -*/ + * Return the camera matrix \f$K\f$ given by: + * + * \f$ K = \left[\begin{array}{ccc} + * p_x & 0 & u_0 \\ + * 0 & p_y & v_0 \\ + * 0 & 0 & 1 + * \end{array} \right] \f$ + * + * \sa get_K_inverse() + */ vpMatrix vpCameraParameters::get_K() const { vpMatrix K(3, 3, 0.); - K[0][0] = px; - K[1][1] = py; - K[0][2] = u0; - K[1][2] = v0; + K[0][0] = m_px; + K[1][1] = m_py; + K[0][2] = m_u0; + K[1][2] = m_v0; K[2][2] = 1.0; return K; } /*! - Return the inverted camera matrix \f$K^{-1}\f$ given by: - - \f$ K^{-1} = \left[\begin{array}{ccc} - 1/p_x & 0 & -u_0/p_x \\ - 0 & 1/p_y & -v_0/p_y \\ - 0 & 0 & 1 - \end{array} \right] \f$ - - \sa get_K() -*/ + * Return the inverted camera matrix \f$K^{-1}\f$ given by: + * + * \f$ K^{-1} = \left[\begin{array}{ccc} + * 1/p_x & 0 & -u_0/p_x \\ + * 0 & 1/p_y & -v_0/p_y \\ + * 0 & 0 & 1 + * \end{array} \right] \f$ + * + * \sa get_K() + */ vpMatrix vpCameraParameters::get_K_inverse() const { vpMatrix K_inv(3, 3, 0.); - K_inv[0][0] = inv_px; - K_inv[1][1] = inv_py; - K_inv[0][2] = -u0 * inv_px; - K_inv[1][2] = -v0 * inv_py; + K_inv[0][0] = m_inv_px; + K_inv[1][1] = m_inv_py; + K_inv[0][2] = -m_u0 * m_inv_px; + K_inv[1][2] = -m_v0 * m_inv_py; K_inv[2][2] = 1.0; return K_inv; } /*! - Print the camera parameters on the standard output - - \sa operator<<(std::ostream &, const vpCameraParameters &) -*/ + * Print the camera parameters on the standard output. + * + * \sa operator<<(std::ostream &, const vpCameraParameters &) + */ void vpCameraParameters::printParameters() { std::ios::fmtflags original_flags(std::cout.flags()); - switch (projModel) { + switch (m_projModel) { case vpCameraParameters::perspectiveProjWithoutDistortion: std::cout.precision(10); std::cout << "Camera parameters for perspective projection without distortion:" << std::endl; - std::cout << " px = " << px << "\t py = " << py << std::endl; - std::cout << " u0 = " << u0 << "\t v0 = " << v0 << std::endl; + std::cout << " px = " << m_px << "\t py = " << m_py << std::endl; + std::cout << " u0 = " << m_u0 << "\t v0 = " << m_v0 << std::endl; break; case vpCameraParameters::perspectiveProjWithDistortion: std::cout.precision(10); std::cout << "Camera parameters for perspective projection with distortion:" << std::endl; - std::cout << " px = " << px << "\t py = " << py << std::endl; - std::cout << " u0 = " << u0 << "\t v0 = " << v0 << std::endl; - std::cout << " kud = " << kud << std::endl; - std::cout << " kdu = " << kdu << std::endl; + std::cout << " px = " << m_px << "\t py = " << m_py << std::endl; + std::cout << " u0 = " << m_u0 << "\t v0 = " << m_v0 << std::endl; + std::cout << " kud = " << m_kud << std::endl; + std::cout << " kdu = " << m_kdu << std::endl; break; case vpCameraParameters::ProjWithKannalaBrandtDistortion: std::cout << " Coefficients: "; @@ -606,13 +611,13 @@ void vpCameraParameters::printParameters() // Restore ostream format std::cout.flags(original_flags); } -/*! - Print on the output stream \e os the camera parameters. - - \param os : Output stream. - \param cam : Camera parameters. -*/ +/*! + * Print on the output stream \e os the camera parameters. + * + * \param os : Output stream. + * \param cam : Camera parameters. + */ VISP_EXPORT std::ostream &operator<<(std::ostream &os, const vpCameraParameters &cam) { switch (cam.get_projModel()) { diff --git a/modules/core/src/camera/vpMeterPixelConversion.cpp b/modules/core/src/camera/vpMeterPixelConversion.cpp index b8d452ad95..5c0f835b42 100644 --- a/modules/core/src/camera/vpMeterPixelConversion.cpp +++ b/modules/core/src/camera/vpMeterPixelConversion.cpp @@ -58,15 +58,15 @@ void vpMeterPixelConversion::convertLine(const vpCameraParameters &cam, const do { double co = cos(theta_m); double si = sin(theta_m); - double d = sqrt(vpMath::sqr(cam.py * co) + vpMath::sqr(cam.px * si)); + double d = sqrt(vpMath::sqr(cam.m_py * co) + vpMath::sqr(cam.m_px * si)); if (fabs(d) < 1e-6) { vpERROR_TRACE("division by zero"); throw(vpException(vpException::divideByZeroError, "division by zero")); } - theta_p = atan2(cam.px * si, cam.py * co); - rho_p = (cam.px * cam.py * rho_m + cam.u0 * cam.py * co + cam.v0 * cam.px * si); + theta_p = atan2(cam.m_px * si, cam.m_py * co); + rho_p = (cam.m_px * cam.m_py * rho_m + cam.m_u0 * cam.m_py * co + cam.m_v0 * cam.m_px * si); rho_p /= d; } @@ -82,7 +82,7 @@ void vpMeterPixelConversion::convertLine(const vpCameraParameters &cam, const do \param[in] circle : 3D circle with internal vector `circle.p[]` that contains the ellipse parameters expressed in the image plane. These parameters are internally updated after perspective projection of the sphere. \param[out] center_p : Center \f$(u_c, v_c)\f$ of the corresponding ellipse in the image with coordinates expressed in - pixels. + pixels. \param[out] n20_p, n11_p, n02_p : Second order centered moments of the ellipse normalized by its area (i.e., such that \f$n_{ij} = \mu_{ij}/a\f$ where \f$\mu_{ij}\f$ are the centered moments and a the area) expressed in pixels. @@ -125,7 +125,7 @@ void vpMeterPixelConversion::convertEllipse(const vpCameraParameters &cam, const \param[in] sphere : 3D sphere with internal vector `circle.p[]` that contains the ellipse parameters expressed in the image plane. These parameters are internally updated after perspective projection of the sphere. \param[out] center_p : Center \f$(u_c, v_c)\f$ of the corresponding ellipse in the image with coordinates expressed in - pixels. + pixels. \param[out] n20_p, n11_p, n02_p : Second order centered moments of the ellipse normalized by its area (i.e., such that \f$n_{ij} = \mu_{ij}/a\f$ where \f$\mu_{ij}\f$ are the centered moments and a the area) expressed in pixels. diff --git a/modules/core/src/camera/vpPixelMeterConversion.cpp b/modules/core/src/camera/vpPixelMeterConversion.cpp index adc130124a..a55ce1c4a0 100644 --- a/modules/core/src/camera/vpPixelMeterConversion.cpp +++ b/modules/core/src/camera/vpPixelMeterConversion.cpp @@ -80,14 +80,14 @@ void vpPixelMeterConversion::convertLine(const vpCameraParameters &cam, const do { double co = cos(theta_p); double si = sin(theta_p); - double d = vpMath::sqr(cam.px * co) + vpMath::sqr(cam.py * si); + double d = vpMath::sqr(cam.m_px * co) + vpMath::sqr(cam.m_py * si); if (fabs(d) < 1e-6) { vpERROR_TRACE("division by zero"); throw(vpException(vpException::divideByZeroError, "division by zero")); } - theta_m = atan2(si * cam.py, co * cam.px); - rho_m = (rho_p - cam.u0 * co - cam.v0 * si) / sqrt(d); + theta_m = atan2(si * cam.m_py, co * cam.m_px); + rho_m = (rho_p - cam.m_u0 * co - cam.m_v0 * si) / sqrt(d); } /*! @@ -131,8 +131,8 @@ void vpPixelMeterConversion::convertMoment(const vpCameraParameters &cam, unsign const vpMatrix &moment_pixel, vpMatrix &moment_meter) { vpMatrix m(order, order); - double yc = -cam.v0; - double xc = -cam.u0; + double yc = -cam.m_v0; + double xc = -cam.m_u0; for (unsigned int k = 0; k < order; k++) // iteration correspondant e l'ordre du moment { @@ -145,7 +145,7 @@ void vpPixelMeterConversion::convertMoment(const vpCameraParameters &cam, unsign for (unsigned int t = 0; t <= q; t++) // somme interne { m[p][q] += static_cast(vpMath::comb(p, r)) * static_cast(vpMath::comb(q, t)) * - pow(xc, (int)(p - r)) * pow(yc, (int)(q - t)) * moment_pixel[r][t]; + pow(xc, (int)(p - r)) * pow(yc, (int)(q - t)) * moment_pixel[r][t]; } } } @@ -154,7 +154,7 @@ void vpPixelMeterConversion::convertMoment(const vpCameraParameters &cam, unsign for (unsigned int p = 0; p < order; p++) for (unsigned int q = 0; q < order; q++) if (p + q == k) { - m[p][q] *= pow(cam.inv_px, (int)(1 + p)) * pow(cam.inv_py, (int)(1 + q)); + m[p][q] *= pow(cam.m_inv_px, (int)(1 + p)) * pow(cam.m_inv_py, (int)(1 + q)); } for (unsigned int k = 0; k < order; k++) // iteration correspondant e l'ordre du moment @@ -254,7 +254,7 @@ void vpPixelMeterConversion::convertMoment(const cv::Mat &cameraMatrix, unsigned for (unsigned int t = 0; t <= q; t++) // somme interne { m[p][q] += static_cast(vpMath::comb(p, r)) * static_cast(vpMath::comb(q, t)) * - pow(xc, static_cast(p - r)) * pow(yc, static_cast(q - t)) * moment_pixel[r][t]; + pow(xc, static_cast(p - r)) * pow(yc, static_cast(q - t)) * moment_pixel[r][t]; } } } diff --git a/modules/core/src/image/vpRGBa.cpp b/modules/core/src/image/vpRGBa.cpp index 85c60fb65c..69147272c4 100644 --- a/modules/core/src/image/vpRGBa.cpp +++ b/modules/core/src/image/vpRGBa.cpp @@ -91,7 +91,7 @@ vpRGBa &vpRGBa::operator=(const vpRGBa &&v) relation with respectively R, G, B and A. \exception vpException::dimensionError : If v is not a 4 four - dimention vector. + dimension vector. */ vpRGBa &vpRGBa::operator=(const vpColVector &v) { diff --git a/modules/core/src/image/vpRGBf.cpp b/modules/core/src/image/vpRGBf.cpp index dbb0d50afd..7822445047 100644 --- a/modules/core/src/image/vpRGBf.cpp +++ b/modules/core/src/image/vpRGBf.cpp @@ -88,7 +88,7 @@ vpRGBf &vpRGBf::operator=(const vpRGBf &&v) \param v : Input vector. v[0], v[1], v[2] are to make into relation with respectively R, G and B. - \exception vpException::dimensionError : If v is not a 3-dimentional vector. + \exception vpException::dimensionError : If v is not a 3-dimensional vector. */ vpRGBf &vpRGBf::operator=(const vpColVector &v) { diff --git a/modules/core/src/math/kalman/vpLinearKalmanFilterInstantiation.cpp b/modules/core/src/math/kalman/vpLinearKalmanFilterInstantiation.cpp index 9186d0a69c..ad4179240d 100644 --- a/modules/core/src/math/kalman/vpLinearKalmanFilterInstantiation.cpp +++ b/modules/core/src/math/kalman/vpLinearKalmanFilterInstantiation.cpp @@ -152,7 +152,7 @@ int main() // Does the filtering vpColVector vm(2); // Measured velocities for ( ; ; ) { - // Get the two dimentional velocity measures + // Get the two dimensional velocity measures // vm[0] = ...; // vm[1] = ...; @@ -187,7 +187,7 @@ void vpLinearKalmanFilterInstantiation::initFilter(unsigned int n_signal, vpColV } /*! - Modelisation of a constant speed state model with white noise. The + Modelization of a constant speed state model with white noise. The measure is assumed to be the position of the target. The considered state model is the following @@ -336,7 +336,7 @@ void vpLinearKalmanFilterInstantiation::initStateConstVel_MeasurePos(unsigned in /*! - Modelisation of a constant speed state model with colored noise. The + Modelization of a constant speed state model with colored noise. The measure is assumed to be the velocity of the target. This state model assume that there is some memory associated with @@ -472,7 +472,7 @@ int main() // Does the filtering vpColVector vm(2); // Measured velocities for ( ; ; ) { - // Get the two dimentional velocity measures + // Get the two dimensional velocity measures // vm[0] = ...; // vm[1] = ...; @@ -544,7 +544,7 @@ void vpLinearKalmanFilterInstantiation::initStateConstVelWithColoredNoise_Measur /*! - Modelisation of a constant acceleration state model with colored noise. The + Modelization of a constant acceleration state model with colored noise. The measure is assumed to be the velocity of the target. This state model assume that there is some memory associated with @@ -691,7 +691,7 @@ int main() // Does the filtering vpColVector vm(2); // Measured velocities for ( ; ; ) { - // Get the two dimentional velocity measures + // Get the two dimensional velocity measures // vm[0] = ...; // vm[1] = ...; diff --git a/modules/core/src/math/matrix/vpColVector.cpp b/modules/core/src/math/matrix/vpColVector.cpp index 7a485eba4a..01fac52a25 100644 --- a/modules/core/src/math/matrix/vpColVector.cpp +++ b/modules/core/src/math/matrix/vpColVector.cpp @@ -658,7 +658,7 @@ vpMatrix vpColVector::skew(const vpColVector &v) { vpMatrix M; if (v.getRows() != 3) { - throw(vpException(vpException::dimensionError, "Cannot compute skew vector of a non 3-dimention vector (%d)", + throw(vpException(vpException::dimensionError, "Cannot compute skew vector of a non 3-dimension vector (%d)", v.getRows())); } diff --git a/modules/core/src/math/matrix/vpSubColVector.cpp b/modules/core/src/math/matrix/vpSubColVector.cpp index 1602c74441..4d5875081e 100644 --- a/modules/core/src/math/matrix/vpSubColVector.cpp +++ b/modules/core/src/math/matrix/vpSubColVector.cpp @@ -42,32 +42,32 @@ #include //! Default constructor that creates an empty vector. -vpSubColVector::vpSubColVector() : vpColVector(), pRowNum(0), parent(NULL) {} +vpSubColVector::vpSubColVector() : vpColVector(), m_pRowNum(0), m_parent(NULL) { } /*! - Construct a sub-column vector from a parent column vector. - \param v : parent column vector. - \param offset : offset where the sub-column vector starts in the parent - column vector. \param nrows : size of the sub-column vector. -*/ + * Construct a sub-column vector from a parent column vector. + * \param v : parent column vector. + * \param offset : offset where the sub-column vector starts in the parent column vector. + * \param nrows : size of the sub-column vector. + */ vpSubColVector::vpSubColVector(vpColVector &v, const unsigned int &offset, const unsigned int &nrows) - : vpColVector(), pRowNum(0), parent(NULL) + : vpColVector(), m_pRowNum(0), m_parent(NULL) { init(v, offset, nrows); } /*! - Initialize a sub-column vector from a parent column vector. - \param v : parent column vector. - \param offset : offset where the sub-column vector starts in the parent - column vector. \param nrows : size of the sub-column vector. -*/ + * Initialize a sub-column vector from a parent column vector. + * \param v : parent column vector. + * \param offset : offset where the sub-column vector starts in the parent column vector. + * \param nrows : size of the sub-column vector. + */ void vpSubColVector::init(vpColVector &v, const unsigned int &offset, const unsigned int &nrows) { if (!v.data) { throw(vpException(vpException::fatalError, "Cannot initialize a " - "sub-column vector from an " - "empty parent column vector")); + "sub-column vector from an " + "empty parent column vector")); } if (offset + nrows <= v.getRows()) { @@ -76,49 +76,51 @@ void vpSubColVector::init(vpColVector &v, const unsigned int &offset, const unsi rowNum = nrows; colNum = 1; - pRowNum = v.getRows(); - parent = &v; + m_pRowNum = v.getRows(); + m_parent = &v; if (rowPtrs) { free(rowPtrs); } - rowPtrs = (double **)malloc(parent->getRows() * sizeof(double *)); + rowPtrs = (double **)malloc(m_parent->getRows() * sizeof(double *)); for (unsigned int i = 0; i < nrows; i++) rowPtrs[i] = v.data + i + offset; dsize = rowNum; - } else { + } + else { throw(vpException(vpException::dimensionError, "Cannot create a sub-column vector that is not " - "completely contained in the parrent column vector")); + "completely contained in the parent column vector")); } } -//! Destructor that set the pointer to the parrent column vector to NULL. +/*! + * Destructor that set the pointer to the parent column vector to NULL. + */ vpSubColVector::~vpSubColVector() { data = NULL; } /*! - This method can be used to detect if the parent column vector - always exits or its size have not changed. - If this not the case an exception is thrown. -*/ + * This method can be used to detect if the parent column vector + * always exits or its size have not changed. + * If this not the case an exception is thrown. + */ void vpSubColVector::checkParentStatus() const { if (!data) { throw(vpException(vpException::fatalError, "The parent of the current sub-column vector has been destroyed")); } - if (pRowNum != parent->getRows()) { + if (m_pRowNum != m_parent->getRows()) { throw(vpException(vpException::dimensionError, "The size of the parent sub-column vector has changed")); } } /*! - Allow to initialize a sub-column vector from an other one using operation A - = B. Notice that the sub-column vector is not resized to the dimension of \e - B. - - \param B : a sub-column vector. -*/ + * Allow to initialize a sub-column vector from an other one using operation A + * = B. Notice that the sub-column vector is not resized to the dimension of \e B. + * + * \param B : a sub-column vector. + */ vpSubColVector &vpSubColVector::operator=(const vpSubColVector &B) { if (rowNum != B.getRows()) { @@ -127,17 +129,18 @@ vpSubColVector &vpSubColVector::operator=(const vpSubColVector &B) "(%dx1) sub-column vector", rowNum, B.getRows())); } - pRowNum = B.pRowNum; + m_pRowNum = B.m_pRowNum; for (unsigned int i = 0; i < rowNum; i++) data[i] = B[i]; return *this; } /*! - Allow to initialize a sub-column vector from a column vector using operation - A = B. Notice that the sub-column vector is not resized to the dimension of - \e B. \param B : a column vector. -*/ + * Allow to initialize a sub-column vector from a column vector using operation + * A = B. Notice that the sub-column vector is not resized to the dimension of \e B. + * + * \param B : a column vector. + */ vpSubColVector &vpSubColVector::operator=(const vpColVector &B) { if (rowNum != B.getRows()) { @@ -154,10 +157,11 @@ vpSubColVector &vpSubColVector::operator=(const vpColVector &B) } /*! - Allow to initialize a sub-column vector from a m-by-1 matrix using operation - A = B. Notice that the sub-column vector is not resized to the dimension of - \e B. \param B : a matrix of size m-by-1. -*/ + * Allow to initialize a sub-column vector from a m-by-1 matrix using operation + * A = B. Notice that the sub-column vector is not resized to the dimension of \e B. + * + * \param B : a matrix of size m-by-1. + */ vpSubColVector &vpSubColVector::operator=(const vpMatrix &B) { if ((B.getCols() != 1) || (rowNum != B.getRows())) { @@ -171,9 +175,10 @@ vpSubColVector &vpSubColVector::operator=(const vpMatrix &B) } /*! - Set all the elements of the sub-column vector to \e x. - \param x : a scalar value. -*/ + * Set all the elements of the sub-column vector to \e x. + * + * \param x : a scalar value. + */ vpSubColVector &vpSubColVector::operator=(const double &x) { for (unsigned int i = 0; i < rowNum; i++) @@ -182,7 +187,7 @@ vpSubColVector &vpSubColVector::operator=(const double &x) } /*! - Operator that allows to convert a translation vector into a column vector. + * Operator that allows to convert a translation vector into a column vector. */ vpSubColVector &vpSubColVector::operator=(const vpTranslationVector &tv) { @@ -190,7 +195,8 @@ vpSubColVector &vpSubColVector::operator=(const vpTranslationVector &tv) if (rowNum != k) { try { resize(k); - } catch (...) { + } + catch (...) { throw; } } @@ -198,8 +204,9 @@ vpSubColVector &vpSubColVector::operator=(const vpTranslationVector &tv) memcpy(data, tv.data, rowNum * sizeof(double)); return *this; } + /*! - Operator that allows to convert a rotation vector into a column vector. + * Operator that allows to convert a rotation vector into a column vector. */ vpSubColVector &vpSubColVector::operator=(const vpRotationVector &rv) { @@ -207,7 +214,8 @@ vpSubColVector &vpSubColVector::operator=(const vpRotationVector &rv) if (rowNum != k) { try { resize(k); - } catch (...) { + } + catch (...) { throw; } } @@ -215,8 +223,9 @@ vpSubColVector &vpSubColVector::operator=(const vpRotationVector &rv) memcpy(data, rv.data, rowNum * sizeof(double)); return *this; } + /*! - Operator that allows to convert a pose vector into a column vector. + * Operator that allows to convert a pose vector into a column vector. */ vpSubColVector &vpSubColVector::operator=(const vpPoseVector &p) { @@ -224,7 +233,8 @@ vpSubColVector &vpSubColVector::operator=(const vpPoseVector &p) if (rowNum != k) { try { resize(k); - } catch (...) { + } + catch (...) { throw; } } diff --git a/modules/core/src/math/matrix/vpSubRowVector.cpp b/modules/core/src/math/matrix/vpSubRowVector.cpp index bb0b06f19d..6e64b314e0 100644 --- a/modules/core/src/math/matrix/vpSubRowVector.cpp +++ b/modules/core/src/math/matrix/vpSubRowVector.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,11 +29,7 @@ * * Description: * Mask on a vpRowVector . - * - * Authors: - * Laneurit Jean - * -*****************************************************************************/ + */ #include @@ -42,32 +37,31 @@ #include //! Default constructor that creates an empty vector. -vpSubRowVector::vpSubRowVector() : vpRowVector(), pColNum(0), parent(NULL) {} +vpSubRowVector::vpSubRowVector() : vpRowVector(), m_pColNum(0), m_parent(NULL) { } /*! - Construct a sub-row vector from a parent row vector. - \param v : parent row vector. - \param offset : offset where the sub-row vector starts in the parent row - vector. \param ncols : size of the sub-row vector. -*/ + * Construct a sub-row vector from a parent row vector. + * \param v : parent row vector. + * \param offset : offset where the sub-row vector starts in the parent row vector. + * \param ncols : size of the sub-row vector. + */ vpSubRowVector::vpSubRowVector(vpRowVector &v, const unsigned int &offset, const unsigned int &ncols) - : vpRowVector(), pColNum(0), parent(NULL) + : vpRowVector(), m_pColNum(0), m_parent(NULL) { init(v, offset, ncols); } /*! - Initialize a sub-row vector from a parent row vector. - \param v : parent row vector. - \param offset : offset where the sub-row vector starts in the parent row - vector. \param ncols : size of the sub-row vector. -*/ + * Initialize a sub-row vector from a parent row vector. + * \param v : parent row vector. + * \param offset : offset where the sub-row vector starts in the parent row vector. + * \param ncols : size of the sub-row vector. + */ void vpSubRowVector::init(vpRowVector &v, const unsigned int &offset, const unsigned int &ncols) { if (!v.data) { throw(vpException(vpException::fatalError, "Cannot initialize a sub-row " - "vector from an empty parent " - "row vector")); + "vector from an empty parent row vector")); } if (offset + ncols <= v.getCols()) { @@ -76,8 +70,8 @@ void vpSubRowVector::init(vpRowVector &v, const unsigned int &offset, const unsi rowNum = 1; colNum = ncols; - pColNum = v.getCols(); - parent = &v; + m_pColNum = v.getCols(); + m_parent = &v; if (rowPtrs) free(rowPtrs); @@ -87,44 +81,47 @@ void vpSubRowVector::init(vpRowVector &v, const unsigned int &offset, const unsi rowPtrs[i] = v.data + i + offset; dsize = colNum; - } else { + } + else { throw(vpException(vpException::dimensionError, "Cannot create a sub-row vector that is not completely " - "contained in the parrent row vector")); + "contained in the parent row vector")); } } -//! Destructor that set the pointer to the parrent row vector to NULL. +/*! + * Destructor that set the pointer to the parent row vector to NULL. + */ vpSubRowVector::~vpSubRowVector() { data = NULL; } /*! - This method can be used to detect if the parent row vector - always exits or its size have not changed. - If this not the case an exception is thrown. -*/ + * This method can be used to detect if the parent row vector + * always exits or its size have not changed. + * If this not the case an exception is thrown. + */ void vpSubRowVector::checkParentStatus() const { if (!data) { throw(vpException(vpException::fatalError, "The parent of the current sub-row vector has been destroyed")); } - if (pColNum != parent->getCols()) { + if (m_pColNum != m_parent->getCols()) { throw(vpException(vpException::dimensionError, "The size of the parent sub-row vector has changed")); } } /*! - Allow to initialize a sub-row vector from an other one using operation A = - B. Notice that the sub-row vector is not resized to the dimension of \e B. - - \param B : a sub-row vector. -*/ + * Allow to initialize a sub-row vector from an other one using operation A = + * B. Notice that the sub-row vector is not resized to the dimension of \e B. + * + * \param B : a sub-row vector. + */ vpSubRowVector &vpSubRowVector::operator=(const vpSubRowVector &B) { if (colNum != B.getCols()) { throw(vpException(vpException::dimensionError, "Cannot initialize (1x%d) sub-row vector from (1x%d) sub-row vector", colNum, B.getCols())); } - pColNum = B.pColNum; - parent = B.parent; + m_pColNum = B.m_pColNum; + m_parent = B.m_parent; for (unsigned int i = 0; i < rowNum; i++) data[i] = B[i]; @@ -132,11 +129,11 @@ vpSubRowVector &vpSubRowVector::operator=(const vpSubRowVector &B) } /*! - Allow to initialize a sub-row vector from a row vector using operation A = - B. Notice that the sub-row vector is not resized to the dimension of \e B. - - \param B : a row vector. -*/ + * Allow to initialize a sub-row vector from a row vector using operation A = + * B. Notice that the sub-row vector is not resized to the dimension of \e B. + * + * \param B : a row vector. + */ vpSubRowVector &vpSubRowVector::operator=(const vpRowVector &B) { if (colNum != B.getCols()) { @@ -151,11 +148,11 @@ vpSubRowVector &vpSubRowVector::operator=(const vpRowVector &B) } /*! - Allow to initialize a sub-row vector from a matrix using operation A = B. - Notice that the sub-row vector is not resized to the dimension of \e B. - - \param B : a matrix of size 1-by-n. -*/ + * Allow to initialize a sub-row vector from a matrix using operation A = B. + * Notice that the sub-row vector is not resized to the dimension of \e B. + * + * \param B : a matrix of size 1-by-n. + */ vpSubRowVector &vpSubRowVector::operator=(const vpMatrix &B) { if ((B.getRows() != 1) || (colNum != B.getCols())) { @@ -167,10 +164,11 @@ vpSubRowVector &vpSubRowVector::operator=(const vpMatrix &B) data[i] = B[i][1]; return *this; } + /*! - Set all the elements of the sub-row vector to \e x. - \param x : a scalar value. -*/ + * Set all the elements of the sub-row vector to \e x. + * \param x : a scalar value. + */ vpSubRowVector &vpSubRowVector::operator=(const double &x) { for (unsigned int i = 0; i < rowNum; i++) diff --git a/modules/core/src/tools/network/vpClient.cpp b/modules/core/src/tools/network/vpClient.cpp index a8864d6140..55561dca5b 100644 --- a/modules/core/src/tools/network/vpClient.cpp +++ b/modules/core/src/tools/network/vpClient.cpp @@ -41,7 +41,7 @@ // inet_ntop() not supported on win XP #ifdef VISP_HAVE_FUNC_INET_NTOP -vpClient::vpClient() : vpNetwork(), numberOfAttempts(0) {} +vpClient::vpClient() : vpNetwork(), m_numberOfAttempts(0) { } /*! Disconnect the client from all the servers, and close the sockets. @@ -94,18 +94,18 @@ bool vpClient::connectToHostname(const std::string &hostname, const unsigned int serv.receptorIP = inet_ntoa(*(in_addr *)server->h_addr); return connectServer(serv); -} + } -/*! - Connect to the server represented by the given ip, and at a given port. + /*! + Connect to the server represented by the given ip, and at a given port. - \sa vpClient::connectToHostname() + \sa vpClient::connectToHostname() - \param ip : IP of the server. - \param port_serv : Port used for the connection. + \param ip : IP of the server. + \param port_serv : Port used for the connection. - \return True if the connection has been etablished, false otherwise. -*/ + \return True if the connection has been etablished, false otherwise. + */ bool vpClient::connectToIP(const std::string &ip, const unsigned int &port_serv) { vpNetwork::vpReceptor serv; @@ -127,13 +127,13 @@ bool vpClient::connectToIP(const std::string &ip, const unsigned int &port_serv) serv.receptorAddress.sin_port = htons((unsigned short)port_serv); return connectServer(serv); -} + } -/*! - Deconnect from the server at a specific index. + /*! + Deconnect from the server at a specific index. - \param index : Index of the server. -*/ + \param index : Index of the server. + */ void vpClient::deconnect(const unsigned int &index) { if (index < receptor_list.size()) { @@ -172,15 +172,15 @@ bool vpClient::connectServer(vpNetwork::vpReceptor &serv) { serv.receptorAddressSize = sizeof(serv.receptorAddress); - numberOfAttempts = 15; + m_numberOfAttempts = 15; unsigned int ind = 1; int connectionResult = -1; - while (ind <= numberOfAttempts) { + while (ind <= m_numberOfAttempts) { std::cout << "Attempt number " << ind << "..." << std::endl; connectionResult = - connect(serv.socketFileDescriptorReceptor, (sockaddr *)&serv.receptorAddress, serv.receptorAddressSize); + connect(serv.socketFileDescriptorReceptor, (sockaddr *)&serv.receptorAddress, serv.receptorAddressSize); if (connectionResult >= 0) break; @@ -204,7 +204,8 @@ bool vpClient::connectServer(vpNetwork::vpReceptor &serv) if (serv.socketFileDescriptorReceptor > 0) { int set_option = 1; if (0 == setsockopt(serv.socketFileDescriptorReceptor, SOL_SOCKET, SO_NOSIGPIPE, &set_option, sizeof(set_option))) { - } else { + } + else { std::cout << "Failed to set socket signal option" << std::endl; } } @@ -216,5 +217,5 @@ bool vpClient::connectServer(vpNetwork::vpReceptor &serv) #elif !defined(VISP_BUILD_SHARED_LIBS) // Work around to avoid warning: libvisp_core.a(vpClient.cpp.o) has no symbols -void dummy_vpClient(){}; +void dummy_vpClient() { }; #endif diff --git a/modules/core/src/tracking/forward-projection/vpCylinder.cpp b/modules/core/src/tracking/forward-projection/vpCylinder.cpp index 98a4596368..65b83613f7 100644 --- a/modules/core/src/tracking/forward-projection/vpCylinder.cpp +++ b/modules/core/src/tracking/forward-projection/vpCylinder.cpp @@ -120,11 +120,6 @@ vpCylinder::vpCylinder(double oA, double oB, double oC, double oX, double oY, do setWorldCoordinates(oA, oB, oC, oX, oY, oZ, R); } -/*! - Default constructor. - */ -vpCylinder::~vpCylinder() {} - /*! Perspective projection of the cylinder. diff --git a/modules/core/src/tracking/forward-projection/vpSphere.cpp b/modules/core/src/tracking/forward-projection/vpSphere.cpp index e4ccff3622..d915fe0638 100644 --- a/modules/core/src/tracking/forward-projection/vpSphere.cpp +++ b/modules/core/src/tracking/forward-projection/vpSphere.cpp @@ -109,11 +109,6 @@ vpSphere::vpSphere(double oX, double oY, double oZ, double R) setWorldCoordinates(oX, oY, oZ, R); } -/*! - * Destructor that does nothing. - */ -vpSphere::~vpSphere() {} - /*! * Perspective projection of the sphere. * This method updates internal parameters (cP and p). @@ -172,12 +167,14 @@ void vpSphere::projection(const vpColVector &cP_, vpColVector &p_) const E = e; A = a; B = b; - } else { + } + else { E = -1.0 / e; A = b; B = a; } - } else { + } + else { E = 0.0; A = r / sqrt(s); B = r * sqrt(y0 * y0 + z0 * z0 - r * r) / s; diff --git a/modules/core/src/tracking/moments/vpMomentCommon.cpp b/modules/core/src/tracking/moments/vpMomentCommon.cpp index 06cde95f3c..93082bcd30 100644 --- a/modules/core/src/tracking/moments/vpMomentCommon.cpp +++ b/modules/core/src/tracking/moments/vpMomentCommon.cpp @@ -54,7 +54,7 @@ vpMomentCommon::vpMomentCommon(double dstSurface, const std::vector &ref, double refAlpha, double dstZ, bool flg_sxsyfromnormalized) : vpMomentDatabase(), momentBasic(), momentGravity(), momentCentered(), momentGravityNormalized(), - momentSurfaceNormalized(dstSurface, dstZ), momentCInvariant(), momentAlpha(ref, refAlpha), momentArea() + momentSurfaceNormalized(dstSurface, dstZ), momentCInvariant(), momentAlpha(ref, refAlpha), momentArea() { momentCInvariant = new vpMomentCInvariant(flg_sxsyfromnormalized); @@ -69,59 +69,58 @@ vpMomentCommon::vpMomentCommon(double dstSurface, const std::vector &ref } /*! -Updates all moments in the database with the object and computes all their -values. This is possible because this particular database knows the link -between the moments it contains. The order of computation is as follows: -vpMomentGravityCenter,vpMomentCentered,vpMomentAlpha,vpMomentCInvariant,vpMomentSInvariant,vpMomentAreaNormalized,vpMomentGravityCenterNormalized -\param object : Moment object. - -Example of using a preconfigured database to compute one of the C-invariants: -\code -#include -#include -#include -#include -#include - -int main() -{ - // Define two discrete points - vpPoint p; - std::vector vec_p; // std::vector that contains the vertices of the contour polygon - - p.set_x(1); p.set_y(1); // coordinates in meters in the image plane (vertex 1) - vec_p.push_back(p); - p.set_x(2); p.set_y(2); // coordinates in meters in the image plane (vertex 2) - vec_p.push_back(p); - p.set_x(-3); - p.set_y(0); // coordinates in meters in the image plane (vertex 3) - vec_p.push_back(p); - p.set_x(-3); - p.set_y(1); // coordinates in meters in the image plane (vertex 4) - vec_p.push_back(p); - - vpMomentObject obj(5); // Object initialized up to order 5 to handle - // all computations required by vpMomentCInvariant - obj.setType(vpMomentObject::DENSE_POLYGON); // object is the inner part of a polygon - obj.fromstd::vector(vec_p); // Init the discrete object with two points - - //initialisation with default values - vpMomentCommon db(vpMomentCommon::getSurface(obj),vpMomentCommon::getMu3(obj),vpMomentCommon::getAlpha(obj),1.); - bool success; - - db.updateAll(obj); // Update AND compute all moments - - //get C-invariant - vpMomentCInvariant& C = static_cast(db.get("vpMomentCInvariant",success)); - if(success) { - std::cout << C.get(0) << std:: std::endl; - } else - std::cout << "vpMomentCInvariant not found." << std::endl; - - return 0; -} - -\endcode + Updates all moments in the database with the object and computes all their + values. This is possible because this particular database knows the link + between the moments it contains. The order of computation is as follows: + vpMomentGravityCenter,vpMomentCentered,vpMomentAlpha,vpMomentCInvariant,vpMomentSInvariant,vpMomentAreaNormalized,vpMomentGravityCenterNormalized + \param object : Moment object. + + Example of using a preconfigured database to compute one of the C-invariants: + \code + #include + #include + #include + #include + #include + + int main() + { + // Define two discrete points + vpPoint p; + std::vector vec_p; // std::vector that contains the vertices of the contour polygon + + p.set_x(1); p.set_y(1); // coordinates in meters in the image plane (vertex 1) + vec_p.push_back(p); + p.set_x(2); p.set_y(2); // coordinates in meters in the image plane (vertex 2) + vec_p.push_back(p); + p.set_x(-3); + p.set_y(0); // coordinates in meters in the image plane (vertex 3) + vec_p.push_back(p); + p.set_x(-3); + p.set_y(1); // coordinates in meters in the image plane (vertex 4) + vec_p.push_back(p); + + vpMomentObject obj(5); // Object initialized up to order 5 to handle + // all computations required by vpMomentCInvariant + obj.setType(vpMomentObject::DENSE_POLYGON); // object is the inner part of a polygon + obj.fromstd::vector(vec_p); // Init the discrete object with two points + + //initialisation with default values + vpMomentCommon db(vpMomentCommon::getSurface(obj),vpMomentCommon::getMu3(obj),vpMomentCommon::getAlpha(obj),1.); + bool success; + + db.updateAll(obj); // Update AND compute all moments + + //get C-invariant + vpMomentCInvariant& C = static_cast(db.get("vpMomentCInvariant",success)); + if(success) { + std::cout << C.get(0) << std:: std::endl; + } else + std::cout << "vpMomentCInvariant not found." << std::endl; + + return 0; + } + \endcode */ void vpMomentCommon::updateAll(vpMomentObject &object) { @@ -137,7 +136,8 @@ void vpMomentCommon::updateAll(vpMomentObject &object) momentGravityNormalized.compute(); momentArea.compute(); - } catch (const char *ex) { + } + catch (const char *ex) { std::cout << "exception:" << ex << std::endl; } } @@ -170,8 +170,8 @@ double vpMomentCommon::getSurface(vpMomentObject &object) } /*! -Gets a reference alpha of an object. -\param object : Moment object. + Gets a reference alpha of an object. + \param object : Moment object. */ double vpMomentCommon::getAlpha(vpMomentObject &object) { @@ -193,8 +193,8 @@ double vpMomentCommon::getAlpha(vpMomentObject &object) } /*! -Gets the reference 3rd order moments of an object. -\param object : Moment object. + Gets the reference 3rd order moments of an object. + \param object : Moment object. */ std::vector vpMomentCommon::getMu3(vpMomentObject &object) { diff --git a/modules/core/test/tools/xml/testXmlParser.cpp b/modules/core/test/tools/xml/testXmlParser.cpp index 1fc6af3d7f..5dbcecbc5f 100644 --- a/modules/core/test/tools/xml/testXmlParser.cpp +++ b/modules/core/test/tools/xml/testXmlParser.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,26 +29,24 @@ * * Description: * Example which describes how to use the xml parser class. - * -*****************************************************************************/ + */ /*! - \example testXmlParser.cpp - - XML parser example. - - This example contains the declaration of a class used to read and write data - in a xml file like: - \code - - 5.5 - 7 - 3 - Object - - \endcode - -*/ + * \example testXmlParser.cpp + * + * XML parser example. + * + * This example contains the declaration of a class used to read and write data + * in a xml file like: + * \code + * + * 5.5 + * 7 + * 3 + * Object + * + * \endcode + */ #include @@ -65,17 +62,11 @@ #ifndef DOXYGEN_SHOULD_SKIP_THIS -/* -------------------------------------------------------------------------- +/*! + * \class vpExampleDataParser + * \brief Class example used to show how to implement a xml parser based on the + * vpXmlParser */ - /* CLASS EXAMPLE */ - /* -------------------------------------------------------------------------- - */ - - /*! - \class vpExampleDataParser - \brief Class example used to show how to implement a xml parser based on the - vpXmlParser - */ class vpExampleDataParser : public vpXmlParser { protected: @@ -88,7 +79,6 @@ class vpExampleDataParser : public vpXmlParser public: vpExampleDataParser(); - virtual ~vpExampleDataParser(); // Data accessors. double getRange() const { return m_range; } @@ -107,11 +97,10 @@ class vpExampleDataParser : public vpXmlParser }; /*! - Constructor. - Initialise the map according to the data to parse, and initialise data to - default values. - -*/ + * Constructor. + * Initialise the map according to the data to parse, and initialise data to + * default values. + */ vpExampleDataParser::vpExampleDataParser() : m_range(0.), m_step(0), m_size_filter(0), m_name("") { nodeMap["config"] = config; @@ -122,19 +111,13 @@ vpExampleDataParser::vpExampleDataParser() : m_range(0.), m_step(0), m_size_filt } /*! - Destructor. - -*/ -vpExampleDataParser::~vpExampleDataParser() { } - -/*! - Read the main class. This method corresponds to the parsing of the main - document (which contains the whole data in the class). At this point, the - document exists and is open. - - \param doc : Pointer to the document to parse. - \param node : Pointer to the root node of the document. -*/ + * Read the main class. This method corresponds to the parsing of the main + * document (which contains the whole data in the class). At this point, the + * document exists and is open. + * + * \param doc : Pointer to the document to parse. + * \param node : Pointer to the root node of the document. + */ void vpExampleDataParser::readMainClass(xmlDocPtr doc, xmlNodePtr node) { for (xmlNodePtr dataNode = node->xmlChildrenNode; dataNode != NULL; dataNode = dataNode->next) { @@ -164,12 +147,12 @@ void vpExampleDataParser::readMainClass(xmlDocPtr doc, xmlNodePtr node) } /*! - Write the data in the file. - The file has already been opened or created in the save() method. And the - root node (corresponding to the main tag) has already been writen. - - \param node : Pointer to the root node. -*/ + * Write the data in the file. + * The file has already been opened or created in the save() method. And the + * root node (corresponding to the main tag) has already been written. + * + * \param node : Pointer to the root node. + */ void vpExampleDataParser::writeMainClass(xmlNodePtr node) { xmlWriteDoubleChild(node, (const char *)"range", m_range); @@ -180,11 +163,9 @@ void vpExampleDataParser::writeMainClass(xmlNodePtr node) #endif // doxygen -/* -------------------------------------------------------------------------- - */ - /* COMMAND LINE OPTIONS */ - /* -------------------------------------------------------------------------- - */ +/* -------------------------------------------------------------------------- */ +/* COMMAND LINE OPTIONS */ +/* -------------------------------------------------------------------------- */ // List of allowed command line options #define GETOPTARGS "cdo:h" @@ -193,14 +174,12 @@ void usage(const char *name, const char *badparam, const std::string &opath, con bool getOptions(int argc, const char **argv, std::string &opath, const std::string &user); /*! - -Print the program options. - -\param name : Program name. -\param badparam : Bad parameter name. -\param opath : Output image path. -\param user : Username. - + * Print the program options. + * + * \param name : Program name. + * \param badparam : Bad parameter name. + * \param opath : Output image path. + * \param user : Username. */ void usage(const char *name, const char *badparam, const std::string &opath, const std::string &user) { @@ -230,14 +209,14 @@ OPTIONS: Default\n\ } /*! - Set the program options. - - \param argc : Command line number of parameters. - \param argv : Array of command line parameters. - \param opath : Output data path. - \param user : Username. - \return false if the program has to be stopped, true otherwise. -*/ + * Set the program options. + * + * \param argc : Command line number of parameters. + * \param argv : Array of command line parameters. + * \param opath : Output data path. + * \param user : Username. + * \return false if the program has to be stopped, true otherwise. + */ bool getOptions(int argc, const char **argv, std::string &opath, const std::string &user) { const char *optarg_; @@ -275,11 +254,9 @@ bool getOptions(int argc, const char **argv, std::string &opath, const std::stri return true; } -/* -------------------------------------------------------------------------- - */ - /* MAIN FUNCTION */ - /* -------------------------------------------------------------------------- - */ +/* -------------------------------------------------------------------------- */ +/* MAIN FUNCTION */ +/* -------------------------------------------------------------------------- */ int main(int argc, const char **argv) { diff --git a/modules/detection/include/visp3/detection/vpDetectorAprilTag.h b/modules/detection/include/visp3/detection/vpDetectorAprilTag.h index 33d5748b08..0bb54ee988 100644 --- a/modules/detection/include/visp3/detection/vpDetectorAprilTag.h +++ b/modules/detection/include/visp3/detection/vpDetectorAprilTag.h @@ -47,176 +47,177 @@ #include /*! - \class vpDetectorAprilTag - \ingroup group_detection_tag - Base class for AprilTag detector. This class is a wrapper over AprilTag. There - is no need to download and install AprilTag from source code or existing - pre-built packages since the source code is embedded in ViSP. Reference papers - are AprilTag: A robust and flexible visual fiducial system - (\cite olson2011tags), AprilTag 2: Efficient and robust fiducial - detection (\cite wang2016iros) and Flexible Layouts for Fiducial Tags - (Under Review) (\cite krogius2019iros). - - The detect() function allows to detect multiple tags in an image. Once - detected, for each tag it is possible to retrieve the location of the corners - using getPolygon(), the encoded message using getMessage(), the bounding box - using getBBox() and the center of gravity using getCog(). - - If camera parameters and the size of the tag are provided, you can also estimate - the 3D pose of the tag in terms of position and orientation wrt the camera considering 2 cases: - - 1. If all the tags have the same size use - detect(const vpImage &, const double, const vpCameraParameters &, std::vector &) - - 2. If tag sizes differ, use rather getPose() - - The following sample code shows how to use this class to detect the location - of 36h11 AprilTag patterns in an image. -\code -#include -#include - -int main() -{ -#ifdef VISP_HAVE_APRILTAG - vpImage I; - vpImageIo::read(I, "image-tag36h11.pgm"); - - vpDetectorAprilTag detector(vpDetectorAprilTag::TAG_36h11); - - bool status = detector.detect(I); - if (status) { - for(size_t i=0; i < detector.getNbObjects(); i++) { - std::cout << "Tag code " << i << ":" << std::endl; - std::vector p = detector.getPolygon(i); - for(size_t j=0; j < p.size(); j++) - std::cout << " Point " << j << ": " << p[j] << std::endl; - std::cout << " Message: \"" << detector.getMessage(i) << "\"" << std::endl; - } - } -#endif -} - \endcode - - The previous example may produce results like: - \code -Tag code 0: - Point 0: 124.008, 442.226 - Point 1: 194.614, 441.237 - Point 2: 184.833, 540.386 - Point 3: 111.948, 533.634 - Message: "36h11 id: 0" -Tag code 1: - Point 0: 245.327, 438.801 - Point 1: 338.116, 437.221 - Point 2: 339.341, 553.539 - Point 3: 238.954, 543.855 - Message: "36h11 id: 1" - \endcode - - This other example shows how to estimate the 3D pose of 36h11 AprilTag - patterns considering that all the tags have the same size (in our example 0.053 m). -\code -#include -#include - -int main() -{ -#ifdef VISP_HAVE_APRILTAG - vpImage I; - vpImageIo::read(I, "image-tag36h11.pgm"); - - vpDetectorAprilTag detector(vpDetectorAprilTag::TAG_36h11); - std::vector cMo; - vpCameraParameters cam; - cam.initPersProjWithoutDistortion(615.1674805, 615.1675415, 312.1889954, 243.4373779); - double tagSize = 0.053; - - bool status = detector.detect(I, tagSize, cam, cMo); - if (status) { - for(size_t i=0; i < detector.getNbObjects(); i++) { - std::cout << "Tag number " << i << ":" << std::endl; - std::cout << " Message: \"" << detector.getMessage(i) << "\"" << std::endl; - std::cout << " Pose: " << vpPoseVector(cMo[i]).t() << std::endl; - std::size_t tag_id_pos = detector.getMessage(i).find("id: "); - if (tag_id_pos != std::string::npos) { - std::string tag_id = detector.getMessage(i).substr(tag_id_pos + 4); - std::cout << " Tag Id: " << tag_id << std::endl; - } - } - } -#endif -} - \endcode - The previous example may produce results like: - \code -Tag number 0: - Message: "36h11 id: 0" - Pose: 0.1015061088 -0.05239057228 0.3549037285 1.991474322 2.04143538 -0.9412360063 - Tag Id: 0 -Tag number 1: - Message: "36h11 id: 1" - Pose: 0.08951250829 0.02243780207 0.306540622 1.998073197 2.061488008 -0.8699567948 - Tag Id: 1 -\endcode - - In this other example we estimate the 3D pose of 36h11 AprilTag - patterns considering that tag 36h11 with id 0 (in that case the tag message is "36h11 id: 0") - has a size of 0.040 m, while all the others have a size of 0.053m. -\code -#include -#include - -int main() -{ -#ifdef VISP_HAVE_APRILTAG - vpImage I; - vpImageIo::read(I, "image-tag36h11.pgm"); - - vpDetectorAprilTag detector(vpDetectorAprilTag::TAG_36h11); - vpHomogeneousMatrix cMo; - vpCameraParameters cam; - cam.initPersProjWithoutDistortion(615.1674805, 615.1675415, 312.1889954, 243.4373779); - double tagSize_id_0 = 0.04; - double tagSize_id_others = 0.053; - - bool status = detector.detect(I); - if (status) { - for(size_t i=0; i < detector.getNbObjects(); i++) { - std::cout << "Tag code " << i << ":" << std::endl; - std::cout << " Message: \"" << detector.getMessage(i) << "\"" << std::endl; - if (detector.getMessage(i) == std::string("36h11 id: 0")) { - if (! detector.getPose(i, tagSize_id_0, cam, cMo)) { - std::cout << "Unable to get tag index " << i << " pose!" << std::endl; - } - } - else { - if (! detector.getPose(i, tagSize_id_others, cam, cMo)) { - std::cout << "Unable to get tag index " << i << " pose!" << std::endl; - } - } - std::cout << " Pose: " << vpPoseVector(cMo).t() << std::endl; - } - } -#endif -} -\endcode - With respect to the previous example, this example may now produce a different pose for tag with id 0: - \code -Tag code 0: - Message: "36h11 id: 0" - Pose: 0.07660838403 -0.03954005455 0.2678518706 1.991474322 2.04143538 -0.9412360063 -Tag code 1: - Message: "36h11 id: 1" - Pose: 0.08951250829 0.02243780207 0.306540622 1.998073197 2.061488008 -0.8699567948 -\endcode - - Other examples are also provided in tutorial-apriltag-detector.cpp and - tutorial-apriltag-detector-live.cpp -*/ + * \class vpDetectorAprilTag + * \ingroup group_detection_tag + * Base class for AprilTag detector. This class is a wrapper over AprilTag. There + * is no need to download and install AprilTag from source code or existing + * pre-built packages since the source code is embedded in ViSP. Reference papers + * are AprilTag: A robust and flexible visual fiducial system + * (\cite olson2011tags), AprilTag 2: Efficient and robust fiducial + * detection (\cite wang2016iros) and Flexible Layouts for Fiducial Tags + * (Under Review) (\cite krogius2019iros). + * + * The detect() function allows to detect multiple tags in an image. Once + * detected, for each tag it is possible to retrieve the location of the corners + * using getPolygon(), the encoded message using getMessage(), the bounding box + * using getBBox() and the center of gravity using getCog(). + * + * If camera parameters and the size of the tag are provided, you can also estimate + * the 3D pose of the tag in terms of position and orientation wrt the camera considering 2 cases: + * - 1. If all the tags have the same size use + * detect(const vpImage &, const double, const vpCameraParameters &, std::vector &) + * - 2. If tag sizes differ, use rather getPose() + * + * The following sample code shows how to use this class to detect the location + * of 36h11 AprilTag patterns in an image. + * \code + * #include + * #include + * + * int main() + * { + * #ifdef VISP_HAVE_APRILTAG + * vpImage I; + * vpImageIo::read(I, "image-tag36h11.pgm"); + * + * vpDetectorAprilTag detector(vpDetectorAprilTag::TAG_36h11); + * + * bool status = detector.detect(I); + * if (status) { + * for(size_t i=0; i < detector.getNbObjects(); i++) { + * std::cout << "Tag code " << i << ":" << std::endl; + * std::vector p = detector.getPolygon(i); + * for(size_t j=0; j < p.size(); j++) + * std::cout << " Point " << j << ": " << p[j] << std::endl; + * std::cout << " Message: \"" << detector.getMessage(i) << "\"" << std::endl; + * } + * } + * #endif + * } + * \endcode + * + * The previous example may produce results like: + * \code + * Tag code 0: + * Point 0: 124.008, 442.226 + * Point 1: 194.614, 441.237 + * Point 2: 184.833, 540.386 + * Point 3: 111.948, 533.634 + * Message: "36h11 id: 0" + * Tag code 1: + * Point 0: 245.327, 438.801 + * Point 1: 338.116, 437.221 + * Point 2: 339.341, 553.539 + * Point 3: 238.954, 543.855 + * Message: "36h11 id: 1" + * \endcode + * + * This other example shows how to estimate the 3D pose of 36h11 AprilTag + * patterns considering that all the tags have the same size (in our example 0.053 m). + * \code + * #include + * #include + * + * int main() + * { + * #ifdef VISP_HAVE_APRILTAG + * vpImage I; + * vpImageIo::read(I, "image-tag36h11.pgm"); + * + * vpDetectorAprilTag detector(vpDetectorAprilTag::TAG_36h11); + * std::vector cMo; + * vpCameraParameters cam; + * cam.initPersProjWithoutDistortion(615.1674805, 615.1675415, 312.1889954, 243.4373779); + * double tagSize = 0.053; + * + * bool status = detector.detect(I, tagSize, cam, cMo); + * if (status) { + * for(size_t i=0; i < detector.getNbObjects(); i++) { + * std::cout << "Tag number " << i << ":" << std::endl; + * std::cout << " Message: \"" << detector.getMessage(i) << "\"" << std::endl; + * std::cout << " Pose: " << vpPoseVector(cMo[i]).t() << std::endl; + * std::size_t tag_id_pos = detector.getMessage(i).find("id: "); + * if (tag_id_pos != std::string::npos) { + * std::string tag_id = detector.getMessage(i).substr(tag_id_pos + 4); + * std::cout << " Tag Id: " << tag_id << std::endl; + * } + * } + * } + * #endif + * } + * \endcode + * The previous example may produce results like: + * \code + * Tag number 0: + * Message: "36h11 id: 0" + * Pose: 0.1015061088 -0.05239057228 0.3549037285 1.991474322 2.04143538 -0.9412360063 + * Tag Id: 0 + * Tag number 1: + * Message: "36h11 id: 1" + * Pose: 0.08951250829 0.02243780207 0.306540622 1.998073197 2.061488008 -0.8699567948 + * Tag Id: 1 + * \endcode + * + * In this other example we estimate the 3D pose of 36h11 AprilTag + * patterns considering that tag 36h11 with id 0 (in that case the tag message is "36h11 id: 0") + * has a size of 0.040 m, while all the others have a size of 0.053m. + * \code + * #include + * #include + * + * int main() + * { + * #ifdef VISP_HAVE_APRILTAG + * vpImage I; + * vpImageIo::read(I, "image-tag36h11.pgm"); + * + * vpDetectorAprilTag detector(vpDetectorAprilTag::TAG_36h11); + * vpHomogeneousMatrix cMo; + * vpCameraParameters cam; + * cam.initPersProjWithoutDistortion(615.1674805, 615.1675415, 312.1889954, 243.4373779); + * double tagSize_id_0 = 0.04; + * double tagSize_id_others = 0.053; + * + * bool status = detector.detect(I); + * if (status) { + * for(size_t i=0; i < detector.getNbObjects(); i++) { + * std::cout << "Tag code " << i << ":" << std::endl; + * std::cout << " Message: \"" << detector.getMessage(i) << "\"" << std::endl; + * if (detector.getMessage(i) == std::string("36h11 id: 0")) { + * if (! detector.getPose(i, tagSize_id_0, cam, cMo)) { + * std::cout << "Unable to get tag index " << i << " pose!" << std::endl; + * } + * } + * else { + * if (! detector.getPose(i, tagSize_id_others, cam, cMo)) { + * std::cout << "Unable to get tag index " << i << " pose!" << std::endl; + * } + * } + * std::cout << " Pose: " << vpPoseVector(cMo).t() << std::endl; + * } + * } + * #endif + * } + * \endcode + * With respect to the previous example, this example may now produce a different pose for tag with id 0: + * \code + * Tag code 0: + * Message: "36h11 id: 0" + * Pose: 0.07660838403 -0.03954005455 0.2678518706 1.991474322 2.04143538 -0.9412360063 + * Tag code 1: + * Message: "36h11 id: 1" + * Pose: 0.08951250829 0.02243780207 0.306540622 1.998073197 2.061488008 -0.8699567948 + * \endcode + * + * Other examples are also provided in tutorial-apriltag-detector.cpp and + * tutorial-apriltag-detector-live.cpp + */ class VISP_EXPORT vpDetectorAprilTag : public vpDetectorBase { public: - enum vpAprilTagFamily { + enum vpAprilTagFamily + { TAG_36h11, ///< AprilTag 36h11 pattern (recommended) TAG_36h10, ///< DEPRECATED TAG_36ARTOOLKIT, ///< DEPRECATED AND WILL NOT DETECT ARTOOLKIT TAGS @@ -230,7 +231,8 @@ class VISP_EXPORT vpDetectorAprilTag : public vpDetectorBase TAG_STANDARD52h13 ///< AprilTag Standard52h13 pattern }; - enum vpPoseEstimationMethod { + enum vpPoseEstimationMethod + { HOMOGRAPHY, /*!< Pose from homography */ HOMOGRAPHY_VIRTUAL_VS, /*!< Non linear virtual visual servoing approach initialized by the homography approach */ @@ -248,9 +250,9 @@ class VISP_EXPORT vpDetectorAprilTag : public vpDetectorBase const vpPoseEstimationMethod &poseEstimationMethod = HOMOGRAPHY_VIRTUAL_VS); vpDetectorAprilTag(const vpDetectorAprilTag &o); vpDetectorAprilTag &operator=(vpDetectorAprilTag o); - virtual ~vpDetectorAprilTag(); + virtual ~vpDetectorAprilTag() override; - bool detect(const vpImage &I); + bool detect(const vpImage &I) override; bool detect(const vpImage &I, double tagSize, const vpCameraParameters &cam, std::vector &cMo_vec, std::vector *cMo_vec2 = NULL, std::vector *projErrors = NULL, std::vector *projErrors2 = NULL); @@ -269,8 +271,8 @@ class VISP_EXPORT vpDetectorAprilTag : public vpDetectorBase vpHomogeneousMatrix *cMo2 = NULL, double *projError = NULL, double *projError2 = NULL); /*! - Return the pose estimation method. - */ + * Return the pose estimation method. + */ inline vpPoseEstimationMethod getPoseEstimationMethod() const { return m_poseEstimationMethod; } std::vector > getTagsCorners() const; @@ -288,8 +290,10 @@ class VISP_EXPORT vpDetectorAprilTag : public vpDetectorBase void setAprilTagRefineEdges(bool refineEdges); void setAprilTagRefinePose(bool refinePose); - /*! Allow to enable the display of overlay tag information in the windows - * (vpDisplay) associated to the input image. */ + /*! + * Allow to enable the display of overlay tag information in the windows + * (vpDisplay) associated to the input image. + */ inline void setDisplayTag(bool display, const vpColor &color = vpColor::none, unsigned int thickness = 2) { m_displayTag = display; diff --git a/modules/detection/include/visp3/detection/vpDetectorBase.h b/modules/detection/include/visp3/detection/vpDetectorBase.h index 3f9b4e46c9..ceb1496a7c 100644 --- a/modules/detection/include/visp3/detection/vpDetectorBase.h +++ b/modules/detection/include/visp3/detection/vpDetectorBase.h @@ -46,17 +46,18 @@ #include /*! - \class vpDetectorBase - \ingroup group_detection_barcode group_detection_face - Base class for object detection. - - This class is a generic class that can be used to detect: - - bar codes like QRcodes of Data matrices. The example given in - tutorial-barcode-detector.cpp shows how to detect one or more bar codes in - an image. In tutorial-barcode-detector-live.cpp you will find an other - example that shows how to use this class to detect bar codes in images - acquired by a camera. - - faces. An example is provided in tutorial-face-detector-live.cpp. + * \class vpDetectorBase + * \ingroup group_detection_barcode group_detection_face + * Base class for object detection. + * + * This class is a generic class that can be used to detect: + * - bar codes like QRcodes of Data matrices using vpDetectorQRCode and vpDetectorDataMatrixCode classes respectively. + * The example given in tutorial-barcode-detector.cpp shows how to detect one or more bar codes in + * an image. In tutorial-barcode-detector-live.cpp you will find an other + * example that shows how to use this class to detect bar codes in images + * acquired by a camera. + * - AprilTags using vpDetectorAprilTag class + * - faces using vpDetectorFace. An example is provided in tutorial-face-detector-live.cpp. */ class VISP_EXPORT vpDetectorBase { @@ -68,19 +69,19 @@ class VISP_EXPORT vpDetectorBase public: /*! - Default constructor. + * Default constructor. */ vpDetectorBase(); vpDetectorBase(const vpDetectorBase &o); /*! - Default destructor. - */ - virtual ~vpDetectorBase() {} + * Default destructor. + */ + virtual ~vpDetectorBase() { } /*! - Detect objects in an image. - \param I : Image where to detect objects. - \return true if one or multiple objects are detected, false otherwise. + * Detect objects in an image. + * \param I : Image where to detect objects. + * \return true if one or multiple objects are detected, false otherwise. */ virtual bool detect(const vpImage &I) = 0; @@ -88,41 +89,43 @@ class VISP_EXPORT vpDetectorBase //@{ /*! - Return the bounding box of the ith object. + * Return the bounding box of the ith object. */ vpRect getBBox(size_t i) const; /*! - Return the center of gravity location of the ith object. + * Return the center of gravity location of the ith object. */ vpImagePoint getCog(size_t i) const; /*! - Returns the contained message of the ith object if there is one. + * Returns the contained message of the ith object if there is one. */ std::vector &getMessage() { return m_message; } /*! - Returns the contained message of the ith object if there is one. + * Returns the contained message of the ith object if there is one. */ std::string &getMessage(size_t i); /*! - Return the number of objects that are detected. - */ + * Return the number of objects that are detected. + */ size_t getNbObjects() const { return m_nb_objects; } /*! - Returns object container box as a vector of points. + * Returns object container box as a vector of points. */ std::vector > &getPolygon() { return m_polygon; } /*! - Returns ith object container box as a vector of points. + * Returns ith object container box as a vector of points. */ std::vector &getPolygon(size_t i); - /*! Set detector timeout in milli-seconds. When set to 0, there is no timeout. */ + /*! + * Set detector timeout in milli-seconds. When set to 0, there is no timeout. + */ inline void setTimeout(unsigned long timeout_ms) { m_timeout_ms = timeout_ms; } //@} diff --git a/modules/detection/include/visp3/detection/vpDetectorDNNOpenCV.h b/modules/detection/include/visp3/detection/vpDetectorDNNOpenCV.h index 09c8ef7603..4cdc12fb22 100644 --- a/modules/detection/include/visp3/detection/vpDetectorDNNOpenCV.h +++ b/modules/detection/include/visp3/detection/vpDetectorDNNOpenCV.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * DNN object detection using OpenCV DNN module. - * -*****************************************************************************/ + */ #ifndef _vpDetectorDNN_h_ #define _vpDetectorDNN_h_ @@ -56,27 +54,27 @@ using json = nlohmann::json; //! json namespace shortcut #endif /*! - \class vpDetectorDNNOpenCV - \ingroup group_detection_dnn - This class is a wrapper over the - OpenCV DNN module and specialized to handle object detection task. - - This class supports the following networks dedicated to object detection: - - - Faster-RCNN, see usage to detect objects belonging to the COCO dataset using \ref dnn_supported_faster_rcnn network - - SSD MobileNet, see usage to detect objects belonging to the COCO dataset using \ref dnn_supported_mobilenet_ssd network - - ResNet 10, see usage for \ref dnn_usecase_face_detection - - Yolo v3, see usage to detect objects belonging to the COCO dataset using \ref dnn_supported_yolov3 network - - Yolo v4, see usage to detect objects belonging to the COCO dataset using \ref dnn_supported_yolov4 network - - Yolo v5, see usage to detect objects belonging to the COCO dataset using \ref dnn_supported_yolov5 network - - Yolo v7, see usage to detect objects belonging to the COCO dataset using \ref dnn_supported_yolov7 network - - Yolo v8, see usage to detect objects belonging to the COCO dataset using \ref dnn_supported_yolov8 network - - This class can be initialized from a JSON file if ViSP has been compiled with NLOHMANN JSON (see \ref soft_tool_json to see how to do it). - Examples of such JSON files can be found in the tutorial folder. - - \sa \ref tutorial-detection-dnn -*/ + * \class vpDetectorDNNOpenCV + * \ingroup group_detection_dnn + * This class is a wrapper over the + * OpenCV DNN module and specialized to handle object detection task. + * + * This class supports the following networks dedicated to object detection: + * + * - Faster-RCNN, see usage to detect objects belonging to the COCO dataset using \ref dnn_supported_faster_rcnn network + * - SSD MobileNet, see usage to detect objects belonging to the COCO dataset using \ref dnn_supported_mobilenet_ssd network + * - ResNet 10, see usage for \ref dnn_usecase_face_detection + * - Yolo v3, see usage to detect objects belonging to the COCO dataset using \ref dnn_supported_yolov3 network + * - Yolo v4, see usage to detect objects belonging to the COCO dataset using \ref dnn_supported_yolov4 network + * - Yolo v5, see usage to detect objects belonging to the COCO dataset using \ref dnn_supported_yolov5 network + * - Yolo v7, see usage to detect objects belonging to the COCO dataset using \ref dnn_supported_yolov7 network + * - Yolo v8, see usage to detect objects belonging to the COCO dataset using \ref dnn_supported_yolov8 network + * + * This class can be initialized from a JSON file if ViSP has been compiled with NLOHMANN JSON (see \ref soft_tool_json to see how to do it). + * Examples of such JSON files can be found in the tutorial folder. + * + * \sa \ref tutorial-detection-dnn + */ class VISP_EXPORT vpDetectorDNNOpenCV { public: diff --git a/modules/detection/include/visp3/detection/vpDetectorDataMatrixCode.h b/modules/detection/include/visp3/detection/vpDetectorDataMatrixCode.h index ef8993ffff..a7005566e3 100644 --- a/modules/detection/include/visp3/detection/vpDetectorDataMatrixCode.h +++ b/modules/detection/include/visp3/detection/vpDetectorDataMatrixCode.h @@ -48,71 +48,69 @@ #include /*! - \class vpDetectorDataMatrixCode - \ingroup group_detection_barcode - Base class for bar code detector. This class is a wrapper over libdmtx - available from http://www.libdmtx.org. Installation instructions are - provided here https://visp.inria.fr/3rd_dmtx. - - The detect() function allows to detect multiple QR codes in an image. Once - detected, for each QR code it is possible to retrieve the location of the - corners using getPolygon(), the encoded message using getMessage(), the - bounding box using getBBox() and the center of gravity using getCog(). - - The following sample code shows how to use this class to detect QR codes in - an image. -\code -#include -#include - -int main() -{ -#ifdef VISP_HAVE_DMTX - vpImage I; - vpImageIo::read(I, "bar-code.pgm"); - - vpDetectorDataMatrixCode detector; - - bool status = detector.detect(I); - if (status) { - for(size_t i=0; i < detector.getNbObjects(); i++) { - std::cout << "Bar code " << i << ":" << std::endl; - std::vector p = detector.getPolygon(i); - for(size_t j=0; j < p.size(); j++) - std::cout << " Point " << j << ": " << p[j] << std::endl; - std::cout << " Message: \"" << detector.getMessage(i) << "\"" << std::endl; - } - } -#endif -} - \endcode - - The previous example may produce results like: - \code -Bar code 0: - Point 0: 273.21, 78.9799 - Point 1: 390.016, 85.1014 - Point 2: 388.024, 199.185 - Point 3: 269.23, 192.96 - Message: "datamatrix 1" -Bar code 1: - Point 0: 262.23, 396.404 - Point 1: 381.041, 402.631 - Point 2: 378.92, 524.188 - Point 3: 257.916, 519.962 - Message: "datamatrix 2" - \endcode - - Other examples are also provided in tutorial-barcode-detector.cpp and - tutorial-barcode-detector-live.cpp - + * \class vpDetectorDataMatrixCode + * \ingroup group_detection_barcode + * Base class for bar code detector. This class is a wrapper over libdmtx + * available from http://www.libdmtx.org. Installation instructions are + * provided here https://visp.inria.fr/3rd_dmtx. + * + * The detect() function allows to detect multiple QR codes in an image. Once + * detected, for each QR code it is possible to retrieve the location of the + * corners using getPolygon(), the encoded message using getMessage(), the + * bounding box using getBBox() and the center of gravity using getCog(). + * + * The following sample code shows how to use this class to detect QR codes in + * an image. + * \code + * #include + * #include + * + * int main() + * { + * #ifdef VISP_HAVE_DMTX + * vpImage I; + * vpImageIo::read(I, "bar-code.pgm"); + * + * vpDetectorDataMatrixCode detector; + * + * bool status = detector.detect(I); + * if (status) { + * for(size_t i=0; i < detector.getNbObjects(); i++) { + * std::cout << "Bar code " << i << ":" << std::endl; + * std::vector p = detector.getPolygon(i); + * for(size_t j=0; j < p.size(); j++) + * std::cout << " Point " << j << ": " << p[j] << std::endl; + * std::cout << " Message: \"" << detector.getMessage(i) << "\"" << std::endl; + * } + * } + * #endif + * } + * \endcode + * + * The previous example may produce results like: + * \code + * Bar code 0: + * Point 0: 273.21, 78.9799 + * Point 1: 390.016, 85.1014 + * Point 2: 388.024, 199.185 + * Point 3: 269.23, 192.96 + * Message: "datamatrix 1" + * Bar code 1: + * Point 0: 262.23, 396.404 + * Point 1: 381.041, 402.631 + * Point 2: 378.92, 524.188 + * Point 3: 257.916, 519.962 + * Message: "datamatrix 2" + * \endcode + * + * Other examples are also provided in tutorial-barcode-detector.cpp and + * tutorial-barcode-detector-live.cpp */ class VISP_EXPORT vpDetectorDataMatrixCode : public vpDetectorBase { public: vpDetectorDataMatrixCode(); - virtual ~vpDetectorDataMatrixCode(){}; - bool detect(const vpImage &I); + bool detect(const vpImage &I) override; }; #endif diff --git a/modules/detection/include/visp3/detection/vpDetectorFace.h b/modules/detection/include/visp3/detection/vpDetectorFace.h index 2a2fae5b45..313aab4d9a 100644 --- a/modules/detection/include/visp3/detection/vpDetectorFace.h +++ b/modules/detection/include/visp3/detection/vpDetectorFace.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Detect faces. - * -*****************************************************************************/ + */ #ifndef _vpDetectorFace_h_ #define _vpDetectorFace_h_ @@ -49,40 +47,40 @@ #include /*! - \class vpDetectorFace - \ingroup group_detection_face - The vpDetectorFace class is a wrapper over OpenCV Haar cascade face - detection capabilities. To use this class ViSP should be build against - OpenCV 2.2.0 or a more recent version. Installation instructions are provided - here https://visp.inria.fr/3rd_opencv. - - The following sample code shows how to use this class to detect the largest - face in the image. The cascade classifier file - "haarcascade_frontalface_alt.xml" can be found in ViSP source code or in - OpenCV. -\code -#include - -int main() -{ -#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_OBJDETECT) - vpImage I; - vpDetectorFace face_detector; - face_detector.setCascadeClassifierFile("haarcascade_frontalface_alt.xml"); - - while(1) { - // Acquire a new image in I - bool face_found = face_detector.detect(I); - if (face_found) { - vpRect face_bbox = face_detector.getBBox(0); // largest face has index 0 - } - } -#endif -} - \endcode - - A more complete example that works with images acquired from a camera is - provided in tutorial-face-detector-live.cpp. + * \class vpDetectorFace + * \ingroup group_detection_face + * The vpDetectorFace class is a wrapper over OpenCV Haar cascade face + * detection capabilities. To use this class ViSP should be build against + * OpenCV 2.2.0 or a more recent version. Installation instructions are provided + * here https://visp.inria.fr/3rd_opencv. + * + * The following sample code shows how to use this class to detect the largest + * face in the image. The cascade classifier file + * "haarcascade_frontalface_alt.xml" can be found in ViSP source code or in + * OpenCV. + * \code + * #include + * + * int main() + * { + * #if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_OBJDETECT) + * vpImage I; + * vpDetectorFace face_detector; + * face_detector.setCascadeClassifierFile("haarcascade_frontalface_alt.xml"); + * + * while(1) { + * // Acquire a new image in I + * bool face_found = face_detector.detect(I); + * if (face_found) { + * vpRect face_bbox = face_detector.getBBox(0); // largest face has index 0 + * } + * } + * #endif + * } + * \endcode + * + * A more complete example that works with images acquired from a camera is + * provided in tutorial-face-detector-live.cpp. */ class VISP_EXPORT vpDetectorFace : public vpDetectorBase { @@ -93,12 +91,8 @@ class VISP_EXPORT vpDetectorFace : public vpDetectorBase public: vpDetectorFace(); - /*! - Default destructor. - */ - virtual ~vpDetectorFace() { } - bool detect(const vpImage &I); + bool detect(const vpImage &I) override; bool detect(const cv::Mat &frame_gray); void setCascadeClassifierFile(const std::string &filename); }; diff --git a/modules/detection/include/visp3/detection/vpDetectorQRCode.h b/modules/detection/include/visp3/detection/vpDetectorQRCode.h index 9650c0ff83..a2942cee5a 100644 --- a/modules/detection/include/visp3/detection/vpDetectorQRCode.h +++ b/modules/detection/include/visp3/detection/vpDetectorQRCode.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Base class for bar code detection. - * -*****************************************************************************/ + */ #ifndef _vpDetectorQRCode_h_ #define _vpDetectorQRCode_h_ @@ -50,63 +48,63 @@ #include /*! - \class vpDetectorQRCode - \ingroup group_detection_barcode - Base class for bar code detector. This class is a wrapper over libzbar - available from http://zbar.sourceforge.net. Installation instructions are -provided here https://visp.inria.fr/3rd_zbar. - - The detect() function allows to detect multiple QR codes in an image. Once -detected, for each QR code it is possible to retrieve the location of the -corners using getPolygon(), the encoded message using getMessage(), the -bounding box using getBBox() and the center of gravity using getCog(). - - The following sample code shows how to use this class to detect QR codes in -an image. -\code -#include -#include - -int main() -{ -#ifdef VISP_HAVE_ZBAR - vpImage I; - vpImageIo::read(I, "bar-code.pgm"); - - vpDetectorQRCode detector; - - bool status = detector.detect(I); - if (status) { - for(size_t i=0; i < detector.getNbObjects(); i++) { - std::cout << "Bar code " << i << ":" << std::endl; - std::vector p = detector.getPolygon(i); - for(size_t j=0; j < p.size(); j++) - std::cout << " Point " << j << ": " << p[j] << std::endl; - std::cout << " Message: \"" << detector.getMessage(i) << "\"" << std::endl; - } - } -#endif -} - \endcode - - The previous example may produce results like: - \code -Bar code 0: - Point 0: 48, 212 - Point 1: 57, 84 - Point 2: 188, 92 - Point 3: 183, 220 - Message: "qrcode 2" -Bar code 1: - Point 0: 26, 550 - Point 1: 35, 409 - Point 2: 174, 414 - Point 3: 167, 555 - Message: "qrcode 1" - \endcode - - Other examples are also provided in tutorial-barcode-detector.cpp and - tutorial-barcode-detector-live.cpp + * \class vpDetectorQRCode + * \ingroup group_detection_barcode + * Base class for bar code detector. This class is a wrapper over libzbar + * available from http://zbar.sourceforge.net. Installation instructions are + * provided here https://visp.inria.fr/3rd_zbar. + * + * The detect() function allows to detect multiple QR codes in an image. Once + * detected, for each QR code it is possible to retrieve the location of the + * corners using getPolygon(), the encoded message using getMessage(), the + * bounding box using getBBox() and the center of gravity using getCog(). + * + * The following sample code shows how to use this class to detect QR codes in + * an image. + * \code + * #include + * #include + * + * int main() + * { + * #ifdef VISP_HAVE_ZBAR + * vpImage I; + * vpImageIo::read(I, "bar-code.pgm"); + * + * vpDetectorQRCode detector; + * + * bool status = detector.detect(I); + * if (status) { + * for(size_t i=0; i < detector.getNbObjects(); i++) { + * std::cout << "Bar code " << i << ":" << std::endl; + * std::vector p = detector.getPolygon(i); + * for(size_t j=0; j < p.size(); j++) + * std::cout << " Point " << j << ": " << p[j] << std::endl; + * std::cout << " Message: \"" << detector.getMessage(i) << "\"" << std::endl; + * } + * } + * #endif + * } + * \endcode + * + * The previous example may produce results like: + * \code + * Bar code 0: + * Point 0: 48, 212 + * Point 1: 57, 84 + * Point 2: 188, 92 + * Point 3: 183, 220 + * Message: "qrcode 2" + * Bar code 1: + * Point 0: 26, 550 + * Point 1: 35, 409 + * Point 2: 174, 414 + * Point 3: 167, 555 + * Message: "qrcode 1" + * \endcode + * + * Other examples are also provided in tutorial-barcode-detector.cpp and + * tutorial-barcode-detector-live.cpp */ class VISP_EXPORT vpDetectorQRCode : public vpDetectorBase { @@ -115,8 +113,7 @@ class VISP_EXPORT vpDetectorQRCode : public vpDetectorBase public: vpDetectorQRCode(); - virtual ~vpDetectorQRCode() {} - bool detect(const vpImage &I); + bool detect(const vpImage &I) override; }; #endif diff --git a/modules/gui/include/visp3/gui/vpD3DRenderer.h b/modules/gui/include/visp3/gui/vpD3DRenderer.h index 6826dc71d3..06dcb884fc 100644 --- a/modules/gui/include/visp3/gui/vpD3DRenderer.h +++ b/modules/gui/include/visp3/gui/vpD3DRenderer.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,11 +29,7 @@ * * Description: * D3D renderer for windows 32 display - * - * Authors: - * Bruno Renier - * -*****************************************************************************/ + */ #ifndef DOXYGEN_SHOULD_SKIP_THIS @@ -56,15 +51,13 @@ #include /*! - \class vpD3DRenderer.h - - \brief Display under windows using Direct3D9. - Is used by vpDisplayD3D to do the drawing. - -*/ + * \class vpD3DRenderer.h + * + * \brief Display under windows using Direct3D9. + * Is used by vpDisplayD3D to do the drawing. + */ class VISP_EXPORT vpD3DRenderer : public vpWin32Renderer { - IDirect3D9 *pD3D; // The d3d device we will be working with. @@ -87,7 +80,7 @@ class VISP_EXPORT vpD3DRenderer : public vpWin32Renderer // The window's handle. HWND hWnd; - // Colors for overlay drawn with d3d directly. + // Colors for overlay drawn with d3d directly. unsigned long colors[vpColor::id_unknown]; // Colors for overlay drawn with GDI. @@ -101,7 +94,7 @@ class VISP_EXPORT vpD3DRenderer : public vpWin32Renderer bool render(); vpD3DRenderer(); - virtual ~vpD3DRenderer(); + virtual ~vpD3DRenderer() override; void setImg(const vpImage &im); void setImg(const vpImage &im); @@ -128,7 +121,7 @@ class VISP_EXPORT vpD3DRenderer : public vpWin32Renderer void drawArrow(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int w, unsigned int h, unsigned int thickness = 1); - void getImage(vpImage &I); + void getImage(vpImage &I) override; private: void initView(float, float); diff --git a/modules/gui/include/visp3/gui/vpDisplayD3D.h b/modules/gui/include/visp3/gui/vpDisplayD3D.h index 169a3c945c..b2cde98f8b 100644 --- a/modules/gui/include/visp3/gui/vpDisplayD3D.h +++ b/modules/gui/include/visp3/gui/vpDisplayD3D.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,11 +29,7 @@ * * Description: * Windows 32 display using D3D - * - * Authors: - * Bruno Renier - * -*****************************************************************************/ + */ #include #include @@ -46,62 +41,62 @@ #include /*! - \class vpDisplayD3D - - \ingroup group_gui_display - - \brief Display for windows using Direct3D 3rd party. Thus to enable this -class Direct3D should be installed. Installation instructions are provided -here https://visp.inria.fr/3rd_d3d/ - - Direct3D is part of the DirectX API available under Windows - operating systems. - - \warning Requires DirectX9 SDK to compile and DirectX9 DLLs to run. - - The example below shows how to display an image with this video device. - \code -#include -#include - -int main() -{ -#if defined(VISP_HAVE_D3D9) - vpImage I; // Grey level image - - // Read an image in PGM P5 format - vpImageIo::read(I, "C:/Temp/visp-images/Klimt/Klimt.pgm"); - - vpDisplayD3D d; - - // Initialize the display with the image I. Display and image are - // now link together. - d.init(I); - - // Specify the window location - vpDisplay::setWindowPosition(I, 400, 100); - - // Set the display window title - vpDisplay::setTitle(I, "My Direct 3D display"); - - // Set the display background with image I content - vpDisplay::display(I); - - // Draw a red rectangle in the display overlay (foreground) - vpImagePoint topLeftCorner; - topLeftCorner.set_i(10); - topLeftCorner.set_j(20); - vpDisplay::displayRectangle(I, topLeftCorner, 100, 20, vpColor::red, true); - - // Flush the foreground and background display - vpDisplay::flush(I); - - // Wait for a click in the display window - vpDisplay::getClick(I); -#endif -} - \endcode -*/ + * \class vpDisplayD3D + * + * \ingroup group_gui_display + * + * \brief Display for windows using Direct3D 3rd party. Thus to enable this + * class Direct3D should be installed. Installation instructions are provided + * here https://visp.inria.fr/3rd_d3d/ + * + * Direct3D is part of the DirectX API available under Windows + * operating systems. + * + * \warning Requires DirectX9 SDK to compile and DirectX9 DLLs to run. + * + * The example below shows how to display an image with this video device. + * \code + * #include + * #include + * + * int main() + * { + * #if defined(VISP_HAVE_D3D9) + * vpImage I; // Grey level image + * + * // Read an image in PGM P5 format + * vpImageIo::read(I, "C:/Temp/visp-images/Klimt/Klimt.pgm"); + * + * vpDisplayD3D d; + * + * // Initialize the display with the image I. Display and image are + * // now link together. + * d.init(I); + * + * // Specify the window location + * vpDisplay::setWindowPosition(I, 400, 100); + * + * // Set the display window title + * vpDisplay::setTitle(I, "My Direct 3D display"); + * + * // Set the display background with image I content + * vpDisplay::display(I); + * + * // Draw a red rectangle in the display overlay (foreground) + * vpImagePoint topLeftCorner; + * topLeftCorner.set_i(10); + * topLeftCorner.set_j(20); + * vpDisplay::displayRectangle(I, topLeftCorner, 100, 20, vpColor::red, true); + * + * // Flush the foreground and background display + * vpDisplay::flush(I); + * + * // Wait for a click in the display window + * vpDisplay::getClick(I); + * #endif + * } + * \endcode + */ class VISP_EXPORT vpDisplayD3D : public vpDisplayWin32 { public: @@ -114,7 +109,6 @@ class VISP_EXPORT vpDisplayD3D : public vpDisplayWin32 vpDisplayD3D(vpImage &I, int winx = -1, int winy = -1, const std::string &title = "", vpScaleType type = SCALE_DEFAULT); - virtual ~vpDisplayD3D(); }; #endif #endif diff --git a/modules/gui/include/visp3/gui/vpDisplayGDI.h b/modules/gui/include/visp3/gui/vpDisplayGDI.h index 883ef2ff1d..597b893753 100644 --- a/modules/gui/include/visp3/gui/vpDisplayGDI.h +++ b/modules/gui/include/visp3/gui/vpDisplayGDI.h @@ -46,84 +46,84 @@ #include /*! - \class vpDisplayGDI - - \ingroup group_gui_display - - \brief Display for windows using GDI (available on any windows 32 platform). - - GDI stands for Graphics Device Interface and is a core component of -Microsoft Windows operating systems used for displaying graphics in a window. - - The example below shows how to display an image with this video device. - \code -#include -#include - -int main() -{ -#if defined(VISP_HAVE_GDI) - vpImage I; // Grey level image - - // Read an image in PGM P5 format -#ifdef _WIN32 - vpImageIo::read(I, "C:/Temp/visp-images/Klimt/Klimt.pgm"); -#else - vpImageIo::read(I, "/local/soft/ViSP/ViSP-images/Klimt/Klimt.pgm"); -#endif - - vpDisplayGDI d; - - // Initialize the display with the image I. Display and image are - // now link together. - d.init(I); - - // Specify the window location - vpDisplay::setWindowPosition(I, 400, 100); - - // Set the display window title - vpDisplay::setTitle(I, "My GDI display"); - - // Set the display background with image I content - vpDisplay::display(I); - - // Draw a red rectangle in the display overlay (foreground) - vpDisplay::displayRectangle(I, 10, 10, 100, 20, vpColor::red, true); - - // Draw a red rectangle in the display overlay (foreground) - vpImagePoint topLeftCorner; - topLeftCorner.set_i(50); - topLeftCorner.set_j(10); - vpDisplay::displayRectangle(I, topLeftCorner, 100, 20, vpColor::green, true); - - // Flush the foreground and background display - vpDisplay::flush(I); - - // Get non blocking keyboard events - std::cout << "Check keyboard events..." << std::endl; - char key[10]; - bool ret; - for (int i=0; i< 200; i++) { - bool ret = vpDisplay::getKeyboardEvent(I, key, false); - if (ret) - std::cout << "keyboard event: key: " << "\"" << key << "\"" << std::endl; - vpTime::wait(40); - } - - // Get a blocking keyboard event - std::cout << "Wait for a keyboard event..." << std::endl; - ret = vpDisplay::getKeyboardEvent(I, key, true); - std::cout << "keyboard event: " << ret << std::endl; - if (ret) - std::cout << "key: " << "\"" << key << "\"" << std::endl; - - // Wait for a click in the display window - std::cout << "Wait for a button click..." << std::endl; - vpDisplay::getClick(I); -#endif -} - \endcode -*/ + * \class vpDisplayGDI + * + * \ingroup group_gui_display + * + * \brief Display for windows using GDI (available on any windows 32 platform). + * + * GDI stands for Graphics Device Interface and is a core component of + * Microsoft Windows operating systems used for displaying graphics in a window. + * + * The example below shows how to display an image with this video device. + * \code + * #include + * #include + * + * int main() + * { + * #if defined(VISP_HAVE_GDI) + * vpImage I; // Grey level image + * + * // Read an image in PGM P5 format + * #ifdef _WIN32 + * vpImageIo::read(I, "C:/Temp/visp-images/Klimt/Klimt.pgm"); + * #else + * vpImageIo::read(I, "/local/soft/ViSP/ViSP-images/Klimt/Klimt.pgm"); + * #endif + * + * vpDisplayGDI d; + * + * // Initialize the display with the image I. Display and image are + * // now link together. + * d.init(I); + * + * // Specify the window location + * vpDisplay::setWindowPosition(I, 400, 100); + * + * // Set the display window title + * vpDisplay::setTitle(I, "My GDI display"); + * + * // Set the display background with image I content + * vpDisplay::display(I); + * + * // Draw a red rectangle in the display overlay (foreground) + * vpDisplay::displayRectangle(I, 10, 10, 100, 20, vpColor::red, true); + * + * // Draw a red rectangle in the display overlay (foreground) + * vpImagePoint topLeftCorner; + * topLeftCorner.set_i(50); + * topLeftCorner.set_j(10); + * vpDisplay::displayRectangle(I, topLeftCorner, 100, 20, vpColor::green, true); + * + * // Flush the foreground and background display + * vpDisplay::flush(I); + * + * // Get non blocking keyboard events + * std::cout << "Check keyboard events..." << std::endl; + * char key[10]; + * bool ret; + * for (int i=0; i< 200; i++) { + * bool ret = vpDisplay::getKeyboardEvent(I, key, false); + * if (ret) + * std::cout << "keyboard event: key: " << "\"" << key << "\"" << std::endl; + * vpTime::wait(40); + * } + * + * // Get a blocking keyboard event + * std::cout << "Wait for a keyboard event..." << std::endl; + * ret = vpDisplay::getKeyboardEvent(I, key, true); + * std::cout << "keyboard event: " << ret << std::endl; + * if (ret) + * std::cout << "key: " << "\"" << key << "\"" << std::endl; + * + * // Wait for a click in the display window + * std::cout << "Wait for a button click..." << std::endl; + * vpDisplay::getClick(I); + * #endif + * } + * \endcode + */ class VISP_EXPORT vpDisplayGDI : public vpDisplayWin32 { public: @@ -135,8 +135,6 @@ class VISP_EXPORT vpDisplayGDI : public vpDisplayWin32 vpDisplayGDI(vpImage &I, vpScaleType type); vpDisplayGDI(vpImage &I, int winx = -1, int winy = -1, const std::string &title = "", vpScaleType type = SCALE_DEFAULT); - - virtual ~vpDisplayGDI(); }; #endif diff --git a/modules/gui/include/visp3/gui/vpDisplayGTK.h b/modules/gui/include/visp3/gui/vpDisplayGTK.h index b0fcda5406..cef6d6611a 100644 --- a/modules/gui/include/visp3/gui/vpDisplayGTK.h +++ b/modules/gui/include/visp3/gui/vpDisplayGTK.h @@ -127,7 +127,8 @@ class VISP_EXPORT vpDisplayGTK : public vpDisplay { private: - typedef enum { + typedef enum + { id_black = 0, id_white, id_lightGray, @@ -159,66 +160,66 @@ class VISP_EXPORT vpDisplayGTK : public vpDisplay vpDisplayGTK(vpImage &I, int win_x = -1, int win_y = -1, const std::string &win_title = "", vpScaleType type = SCALE_DEFAULT); - virtual ~vpDisplayGTK(); + virtual ~vpDisplayGTK() override; - void getImage(vpImage &I); + void getImage(vpImage &I) override; unsigned int getScreenDepth(); - unsigned int getScreenHeight(); - void getScreenSize(unsigned int &screen_width, unsigned int &screen_height); - unsigned int getScreenWidth(); + unsigned int getScreenHeight() override; + void getScreenSize(unsigned int &screen_width, unsigned int &screen_height) override; + unsigned int getScreenWidth() override; - void init(vpImage &I, int win_x = -1, int win_y = -1, const std::string &win_title = ""); - void init(vpImage &I, int win_x = -1, int win_y = -1, const std::string &win_title = ""); + void init(vpImage &I, int win_x = -1, int win_y = -1, const std::string &win_title = "") override; + void init(vpImage &I, int win_x = -1, int win_y = -1, const std::string &win_title = "") override; void init(unsigned int win_width, unsigned int win_height, int win_x = -1, int win_y = -1, - const std::string &win_title = ""); + const std::string &win_title = "") override; protected: - void setFont(const std::string &fontname); - void setTitle(const std::string &win_title); - void setWindowPosition(int win_x, int win_y); + void setFont(const std::string &fontname) override; + void setTitle(const std::string &win_title) override; + void setWindowPosition(int win_x, int win_y) override; - void clearDisplay(const vpColor &color = vpColor::white); + void clearDisplay(const vpColor &color = vpColor::white) override; - void closeDisplay(); + void closeDisplay() override; void displayArrow(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color = vpColor::white, - unsigned int w = 4, unsigned int h = 2, unsigned int thickness = 1); - void displayCharString(const vpImagePoint &ip, const char *text, const vpColor &color = vpColor::green); + unsigned int w = 4, unsigned int h = 2, unsigned int thickness = 1) override; + void displayCharString(const vpImagePoint &ip, const char *text, const vpColor &color = vpColor::green) override; void displayCircle(const vpImagePoint ¢er, unsigned int radius, const vpColor &color, bool fill = false, - unsigned int thickness = 1); - void displayCross(const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness = 1); + unsigned int thickness = 1) override; + void displayCross(const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness = 1) override; void displayDotLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, - unsigned int thickness = 1); + unsigned int thickness = 1) override; - void displayImage(const vpImage &I); - void displayImage(const vpImage &I); + void displayImage(const vpImage &I) override; + void displayImage(const vpImage &I) override; void displayImage(const unsigned char *I); void displayImageROI(const vpImage &I, const vpImagePoint &iP, unsigned int width, - unsigned int height); - void displayImageROI(const vpImage &I, const vpImagePoint &iP, unsigned int width, unsigned int height); + unsigned int height) override; + void displayImageROI(const vpImage &I, const vpImagePoint &iP, unsigned int width, unsigned int height) override; - void displayLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness = 1); + void displayLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness = 1) override; - void displayPoint(const vpImagePoint &ip, const vpColor &color, unsigned int thickness = 1); + void displayPoint(const vpImagePoint &ip, const vpColor &color, unsigned int thickness = 1) override; void displayRectangle(const vpImagePoint &topLeft, unsigned int width, unsigned int height, const vpColor &color, - bool fill = false, unsigned int thickness = 1); + bool fill = false, unsigned int thickness = 1) override; void displayRectangle(const vpImagePoint &topLeft, const vpImagePoint &bottomRight, const vpColor &color, - bool fill = false, unsigned int thickness = 1); - void displayRectangle(const vpRect &rectangle, const vpColor &color, bool fill = false, unsigned int thickness = 1); - - void flushDisplay(); - void flushDisplayROI(const vpImagePoint &iP, unsigned int width, unsigned int height); - - bool getClick(bool blocking = true); - bool getClick(vpImagePoint &ip, bool blocking = true); - bool getClick(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true); - bool getClickUp(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true); - bool getKeyboardEvent(bool blocking = true); - bool getKeyboardEvent(std::string &key, bool blocking = true); - bool getPointerMotionEvent(vpImagePoint &ip); - bool getPointerPosition(vpImagePoint &ip); + bool fill = false, unsigned int thickness = 1) override; + void displayRectangle(const vpRect &rectangle, const vpColor &color, bool fill = false, unsigned int thickness = 1) override; + + void flushDisplay() override; + void flushDisplayROI(const vpImagePoint &iP, unsigned int width, unsigned int height) override; + + bool getClick(bool blocking = true) override; + bool getClick(vpImagePoint &ip, bool blocking = true) override; + bool getClick(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true) override; + bool getClickUp(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true) override; + bool getKeyboardEvent(bool blocking = true) override; + bool getKeyboardEvent(std::string &key, bool blocking = true) override; + bool getPointerMotionEvent(vpImagePoint &ip) override; + bool getPointerPosition(vpImagePoint &ip) override; private: // Implementation diff --git a/modules/gui/include/visp3/gui/vpDisplayOpenCV.h b/modules/gui/include/visp3/gui/vpDisplayOpenCV.h index 538cf5a176..1fdff217ee 100644 --- a/modules/gui/include/visp3/gui/vpDisplayOpenCV.h +++ b/modules/gui/include/visp3/gui/vpDisplayOpenCV.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Image display. - * -*****************************************************************************/ + */ #ifndef _vpDisplayOpenCV_h_ #define _vpDisplayOpenCV_h_ @@ -52,92 +50,90 @@ #include /*! - \file vpDisplayOpenCV.h - \brief Define the OpenCV console to display images. -*/ + * \file vpDisplayOpenCV.h + * \brief Define the OpenCV console to display images. + */ /*! - - \class vpDisplayOpenCV - - \ingroup group_gui_display - - \brief The vpDisplayOpenCV allows to display image using the OpenCV library. - Thus to enable this class OpenCV should be installed. Installation - instructions are provided here https://visp.inria.fr/3rd_opencv. - - \warning Since ViSP 3.3.1 or higher we introduce the alpha channel support for color - transparency. This new feature is only supported yet using vpDisplayOpenCV. See vpColor - header documentation and displayOpenCV.cpp example for usage displaying filled - transparent circles and rectangles. - - The example below shows how to display an image with this video device. - \code -#include -#include -#include - -int main() -{ -#if defined(VISP_HAVE_OPENCV) - vpImage I; // Grey level image - - // Read an image in PGM P5 format - vpImageIo::read(I, "/local/soft/ViSP/ViSP-images/Klimt/Klimt.pgm"); - - vpDisplayOpenCV d; - - // Initialize the display with the image I. Display and image are - // now link together. - d.init(I); - - // Specify the window location - vpDisplay::setWindowPosition(I, 400, 100); - - // Set the display window title - vpDisplay::setTitle(I, "My OpenCV display"); - - // Set the display background with image I content - vpDisplay::display(I); - - // Draw a red rectangle in the display overlay (foreground) - vpDisplay::displayRectangle(I, 10, 10, 100, 20, vpColor::red, true); - - // Draw a red rectangle in the display overlay (foreground) - vpImagePoint topLeftCorner; - topLeftCorner.set_i(10); - topLeftCorner.set_j(50); - vpDisplay::displayRectangle(I, topLeftCorner, 100, 20, vpColor::green, true); - - // Flush the foreground and background display - vpDisplay::flush(I); - - // Get non blocking keyboard events - std::cout << "Check keyboard events..." << std::endl; - char key[10]; - bool ret; - for (int i=0; i< 200; i++) { - bool ret = vpDisplay::getKeyboardEvent(I, key, false); - if (ret) - std::cout << "keyboard event: key: " << "\"" << key << "\"" << std::endl; - vpTime::wait(40); - } - - // Get a blocking keyboard event - std::cout << "Wait for a keyboard event..." << std::endl; - ret = vpDisplay::getKeyboardEvent(I, key, true); - std::cout << "keyboard event: " << ret << std::endl; - if (ret) - std::cout << "key: " << "\"" << key << "\"" << std::endl; - - // Wait for a click in the display window - std::cout << "Wait for a button click..." << std::endl; - vpDisplay::getClick(I); -#endif -} - \endcode -*/ - + * \class vpDisplayOpenCV + * + * \ingroup group_gui_display + * + * \brief The vpDisplayOpenCV allows to display image using the OpenCV library. + * Thus to enable this class OpenCV should be installed. Installation + * instructions are provided here https://visp.inria.fr/3rd_opencv. + * + * \warning Since ViSP 3.3.1 or higher we introduce the alpha channel support for color + * transparency. This new feature is only supported yet using vpDisplayOpenCV. See vpColor + * header documentation and displayOpenCV.cpp example for usage displaying filled + * transparent circles and rectangles. + * + * The example below shows how to display an image with this video device. + * \code + * #include + * #include + * #include + * + * int main() + * { + * #if defined(VISP_HAVE_OPENCV) + * vpImage I; // Grey level image + * + * // Read an image in PGM P5 format + * vpImageIo::read(I, "/local/soft/ViSP/ViSP-images/Klimt/Klimt.pgm"); + * + * vpDisplayOpenCV d; + * + * // Initialize the display with the image I. Display and image are + * // now link together. + * d.init(I); + * + * // Specify the window location + * vpDisplay::setWindowPosition(I, 400, 100); + * + * // Set the display window title + * vpDisplay::setTitle(I, "My OpenCV display"); + * + * // Set the display background with image I content + * vpDisplay::display(I); + * + * // Draw a red rectangle in the display overlay (foreground) + * vpDisplay::displayRectangle(I, 10, 10, 100, 20, vpColor::red, true); + * + * // Draw a red rectangle in the display overlay (foreground) + * vpImagePoint topLeftCorner; + * topLeftCorner.set_i(10); + * topLeftCorner.set_j(50); + * vpDisplay::displayRectangle(I, topLeftCorner, 100, 20, vpColor::green, true); + * + * // Flush the foreground and background display + * vpDisplay::flush(I); + * + * // Get non blocking keyboard events + * std::cout << "Check keyboard events..." << std::endl; + * char key[10]; + * bool ret; + * for (int i=0; i< 200; i++) { + * bool ret = vpDisplay::getKeyboardEvent(I, key, false); + * if (ret) + * std::cout << "keyboard event: key: " << "\"" << key << "\"" << std::endl; + * vpTime::wait(40); + * } + * + * // Get a blocking keyboard event + * std::cout << "Wait for a keyboard event..." << std::endl; + * ret = vpDisplay::getKeyboardEvent(I, key, true); + * std::cout << "keyboard event: " << ret << std::endl; + * if (ret) + * std::cout << "key: " << "\"" << key << "\"" << std::endl; + * + * // Wait for a click in the display window + * std::cout << "Wait for a button click..." << std::endl; + * vpDisplay::getClick(I); + * #endif + * } + * \endcode + */ class VISP_EXPORT vpDisplayOpenCV : public vpDisplay { private: @@ -208,66 +204,66 @@ class VISP_EXPORT vpDisplayOpenCV : public vpDisplay vpDisplayOpenCV(vpImage &I, int winx = -1, int winy = -1, const std::string &title = "", vpScaleType type = SCALE_DEFAULT); - virtual ~vpDisplayOpenCV(); + virtual ~vpDisplayOpenCV() override; - void getImage(vpImage &I); - unsigned int getScreenHeight(); - void getScreenSize(unsigned int &width, unsigned int &height); - unsigned int getScreenWidth(); + void getImage(vpImage &I) override; + unsigned int getScreenHeight() override; + void getScreenSize(unsigned int &width, unsigned int &height) override; + unsigned int getScreenWidth() override; - void init(vpImage &I, int winx = -1, int winy = -1, const std::string &title = ""); - void init(vpImage &I, int winx = -1, int winy = -1, const std::string &title = ""); - void init(unsigned int width, unsigned int height, int winx = -1, int winy = -1, const std::string &title = ""); + void init(vpImage &I, int winx = -1, int winy = -1, const std::string &title = "") override; + void init(vpImage &I, int winx = -1, int winy = -1, const std::string &title = "") override; + void init(unsigned int width, unsigned int height, int winx = -1, int winy = -1, const std::string &title = "") override; protected: - void setFont(const std::string &font); - void setTitle(const std::string &title); - void setWindowPosition(int winx, int winy); + void setFont(const std::string &font) override; + void setTitle(const std::string &title) override; + void setWindowPosition(int winx, int winy) override; - void clearDisplay(const vpColor &color = vpColor::white); + void clearDisplay(const vpColor &color = vpColor::white) override; - void closeDisplay(); + void closeDisplay() override; void displayArrow(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color = vpColor::white, - unsigned int w = 4, unsigned int h = 2, unsigned int thickness = 1); + unsigned int w = 4, unsigned int h = 2, unsigned int thickness = 1) override; - void displayCharString(const vpImagePoint &ip, const char *text, const vpColor &color = vpColor::green); + void displayCharString(const vpImagePoint &ip, const char *text, const vpColor &color = vpColor::green) override; void displayCircle(const vpImagePoint ¢er, unsigned int radius, const vpColor &color, bool fill = false, - unsigned int thickness = 1); - void displayCross(const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness = 1); + unsigned int thickness = 1) override; + void displayCross(const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness = 1) override; void displayDotLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, - unsigned int thickness = 1); + unsigned int thickness = 1) override; - void displayImage(const vpImage &I); - void displayImage(const vpImage &I); + void displayImage(const vpImage &I) override; + void displayImage(const vpImage &I) override; void displayImage(const unsigned char *I); void displayImageROI(const vpImage &I, const vpImagePoint &iP, unsigned int width, - unsigned int height); - void displayImageROI(const vpImage &I, const vpImagePoint &iP, unsigned int width, unsigned int height); + unsigned int height) override; + void displayImageROI(const vpImage &I, const vpImagePoint &iP, unsigned int width, unsigned int height) override; - void displayLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness = 1); - void displayPoint(const vpImagePoint &ip, const vpColor &color, unsigned int thickness = 1); + void displayLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness = 1) override; + void displayPoint(const vpImagePoint &ip, const vpColor &color, unsigned int thickness = 1) override; void displayRectangle(const vpImagePoint &topLeft, unsigned int width, unsigned int height, const vpColor &color, - bool fill = false, unsigned int thickness = 1); + bool fill = false, unsigned int thickness = 1) override; void displayRectangle(const vpImagePoint &topLeft, const vpImagePoint &bottomRight, const vpColor &color, - bool fill = false, unsigned int thickness = 1); - void displayRectangle(const vpRect &rectangle, const vpColor &color, bool fill = false, unsigned int thickness = 1); + bool fill = false, unsigned int thickness = 1) override; + void displayRectangle(const vpRect &rectangle, const vpColor &color, bool fill = false, unsigned int thickness = 1) override; - void flushDisplay(); - void flushDisplayROI(const vpImagePoint &iP, unsigned int width, unsigned int height); + void flushDisplay() override; + void flushDisplayROI(const vpImagePoint &iP, unsigned int width, unsigned int height) override; - bool getClick(bool blocking = true); - bool getClick(vpImagePoint &ip, bool blocking = true); - bool getClick(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true); - bool getClickUp(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true); + bool getClick(bool blocking = true) override; + bool getClick(vpImagePoint &ip, bool blocking = true) override; + bool getClick(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true) override; + bool getClickUp(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true) override; - bool getKeyboardEvent(bool blocking = true); - bool getKeyboardEvent(std::string &key, bool blocking = true); - bool getPointerMotionEvent(vpImagePoint &ip); - bool getPointerPosition(vpImagePoint &ip); + bool getKeyboardEvent(bool blocking = true) override; + bool getKeyboardEvent(std::string &key, bool blocking = true) override; + bool getPointerMotionEvent(vpImagePoint &ip) override; + bool getPointerPosition(vpImagePoint &ip) override; static void on_mouse(int event, int x, int y, int flags, void *param); diff --git a/modules/gui/include/visp3/gui/vpDisplayWin32.h b/modules/gui/include/visp3/gui/vpDisplayWin32.h index 92a1689f71..3d3c31f0fc 100644 --- a/modules/gui/include/visp3/gui/vpDisplayWin32.h +++ b/modules/gui/include/visp3/gui/vpDisplayWin32.h @@ -58,10 +58,11 @@ #ifndef DOXYGEN_SHOULD_SKIP_THIS /*! - Used to pass parameters to the window's thread. -*/ -struct threadParam { - //! Pointer to the display associated with the window. + * Used to pass parameters to the window's thread. + */ +struct threadParam +{ +//! Pointer to the display associated with the window. vpDisplayWin32 *vpDisp; //! X position of the window. @@ -82,17 +83,15 @@ struct threadParam { #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /*! - \class vpDisplayWin32 - - \brief Base abstract class for Windows 32 displays. - Implements the window creation in a separate thread - and the associated event handling functions for - Windows 32 displays. - Uses calls to a renderer to do some display. - (i.e. all display methods are implemented in the renderer) - - \author Bruno Renier -*/ + * \class vpDisplayWin32 + * + * \brief Base abstract class for Windows 32 displays. + * Implements the window creation in a separate thread + * and the associated event handling functions for + * Windows 32 displays. + * Uses calls to a renderer to do some display. + * (i.e. all display methods are implemented in the renderer) + */ class VISP_EXPORT vpDisplayWin32 : public vpDisplay { protected: @@ -124,74 +123,72 @@ class VISP_EXPORT vpDisplayWin32 : public vpDisplay vpDisplayWin32(vpImage &I, int winx = -1, int winy = -1, const std::string &title = ""); - virtual ~vpDisplayWin32(); + virtual ~vpDisplayWin32() override; - void clearDisplay(const vpColor &color = vpColor::white); - void closeDisplay(); - void displayImage(const vpImage &I); - void displayImage(const vpImage &I); + void clearDisplay(const vpColor &color = vpColor::white) override; + void closeDisplay() override; + void displayImage(const vpImage &I) override; + void displayImage(const vpImage &I) override; void displayImageROI(const vpImage &I, const vpImagePoint &iP, unsigned int width, - unsigned int height); - void displayImageROI(const vpImage &I, const vpImagePoint &iP, unsigned int width, unsigned int height); + unsigned int height) override; + void displayImageROI(const vpImage &I, const vpImagePoint &iP, unsigned int width, unsigned int height) override; - void flushDisplay(); - void flushDisplayROI(const vpImagePoint &iP, unsigned int width, unsigned int height); + void flushDisplay() override; + void flushDisplayROI(const vpImagePoint &iP, unsigned int width, unsigned int height) override; - void getImage(vpImage &I); - unsigned int getScreenHeight(); - void getScreenSize(unsigned int &width, unsigned int &height); - unsigned int getScreenWidth(); + void getImage(vpImage &I) override; + unsigned int getScreenHeight() override; + void getScreenSize(unsigned int &width, unsigned int &height) override; + unsigned int getScreenWidth() override; - void init(vpImage &I, int winx = -1, int winy = -1, const std::string &title = ""); - void init(vpImage &I, int winx = -1, int winy = -1, const std::string &title = ""); - void init(unsigned int width, unsigned int height, int winx = -1, int winy = -1, const std::string &title = ""); + void init(vpImage &I, int winx = -1, int winy = -1, const std::string &title = "") override; + void init(vpImage &I, int winx = -1, int winy = -1, const std::string &title = "") override; + void init(unsigned int width, unsigned int height, int winx = -1, int winy = -1, const std::string &title = "") override; - void setFont(const std::string &fontname); + void setFont(const std::string &fontname) override; void setDownScalingFactor(unsigned int scale) { window.setScale(scale); m_scale = scale; } void setDownScalingFactor(vpScaleType scaleType) { m_scaleType = scaleType; } - void setTitle(const std::string &windowtitle); - void setWindowPosition(int winx, int winy); + void setTitle(const std::string &windowtitle) override; + void setWindowPosition(int winx, int winy) override; protected: void displayArrow(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color = vpColor::white, - unsigned int w = 4, unsigned int h = 2, unsigned int thickness = 1); + unsigned int w = 4, unsigned int h = 2, unsigned int thickness = 1) override; - void displayCharString(const vpImagePoint &ip, const char *text, const vpColor &color = vpColor::green); + void displayCharString(const vpImagePoint &ip, const char *text, const vpColor &color = vpColor::green) override; void displayCircle(const vpImagePoint ¢er, unsigned int radius, const vpColor &color, bool fill = false, - unsigned int thickness = 1); + unsigned int thickness = 1) override; - void displayCross(const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness = 1); + void displayCross(const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness = 1); override - void displayDotLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, - unsigned int thickness = 1); + void displayDotLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, + unsigned int thickness = 1) override; - void displayLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness = 1); + void displayLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness = 1) override; - void displayPoint(const vpImagePoint &ip, const vpColor &color, unsigned int thickness = 1); + void displayPoint(const vpImagePoint &ip, const vpColor &color, unsigned int thickness = 1) override; void displayRectangle(const vpImagePoint &topLeft, unsigned int width, unsigned int height, const vpColor &color, - bool fill = false, unsigned int thickness = 1); + bool fill = false, unsigned int thickness = 1) override; void displayRectangle(const vpImagePoint &topLeft, const vpImagePoint &bottomRight, const vpColor &color, - bool fill = false, unsigned int thickness = 1); - void displayRectangle(const vpRect &rectangle, const vpColor &color, bool fill = false, unsigned int thickness = 1); - - bool getClick(bool blocking = true); - - bool getClick(vpImagePoint &ip, bool blocking = true); - - bool getClick(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true); - - bool getClickUp(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true); - bool getKeyboardEvent(bool blocking = true); - bool getKeyboardEvent(std::string &key, bool blocking); - bool getPointerMotionEvent(vpImagePoint &ip); - bool getPointerPosition(vpImagePoint &ip); + bool fill = false, unsigned int thickness = 1) override; + void displayRectangle(const vpRect &rectangle, const vpColor &color, bool fill = false, unsigned int thickness = 1) override; + + bool getClick(bool blocking = true) override; + bool getClick(vpImagePoint &ip, bool blocking = true) override; + bool getClick(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true) override; + bool getClickUp(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true) override; + + bool getKeyboardEvent(bool blocking = true) override; + bool getKeyboardEvent(std::string &key, bool blocking) override; + bool getPointerMotionEvent(vpImagePoint &ip) override; + bool getPointerPosition(vpImagePoint &ip) override; void waitForInit(); }; diff --git a/modules/gui/include/visp3/gui/vpDisplayX.h b/modules/gui/include/visp3/gui/vpDisplayX.h index 554a93a550..feaadfc8bb 100644 --- a/modules/gui/include/visp3/gui/vpDisplayX.h +++ b/modules/gui/include/visp3/gui/vpDisplayX.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Image display. - * -*****************************************************************************/ + */ #ifndef vpDisplayX_h #define vpDisplayX_h @@ -156,69 +154,69 @@ class VISP_EXPORT vpDisplayX : public vpDisplay vpDisplayX(vpImage &I, int winx = -1, int winy = -1, const std::string &title = "", vpScaleType type = SCALE_DEFAULT); - virtual ~vpDisplayX(); + virtual ~vpDisplayX() override; - void getImage(vpImage &I); + void getImage(vpImage &I) override; unsigned int getScreenDepth(); - unsigned int getScreenHeight(); - void getScreenSize(unsigned int &width, unsigned int &height); - unsigned int getScreenWidth(); + unsigned int getScreenHeight() override; + void getScreenSize(unsigned int &width, unsigned int &height) override; + unsigned int getScreenWidth() override; - void init(vpImage &I, int win_x = -1, int win_y = -1, const std::string &win_title = ""); - void init(vpImage &I, int win_x = -1, int win_y = -1, const std::string &win_title = ""); + void init(vpImage &I, int win_x = -1, int win_y = -1, const std::string &win_title = "") override; + void init(vpImage &I, int win_x = -1, int win_y = -1, const std::string &win_title = "") override; void init(unsigned int win_width, unsigned int win_height, int win_x = -1, int win_y = -1, - const std::string &win_title = ""); + const std::string &win_title = "") override; protected: - void clearDisplay(const vpColor &color = vpColor::white); + void clearDisplay(const vpColor &color = vpColor::white) override; - void closeDisplay(); + void closeDisplay() override; void displayArrow(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color = vpColor::white, - unsigned int w = 4, unsigned int h = 2, unsigned int thickness = 1); + unsigned int w = 4, unsigned int h = 2, unsigned int thickness = 1) override; - void displayCharString(const vpImagePoint &ip, const char *text, const vpColor &color = vpColor::green); + void displayCharString(const vpImagePoint &ip, const char *text, const vpColor &color = vpColor::green) override; void displayCircle(const vpImagePoint ¢er, unsigned int radius, const vpColor &color, bool fill = false, - unsigned int thickness = 1); - void displayCross(const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness = 1); + unsigned int thickness = 1) override; + void displayCross(const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness = 1) override; void displayDotLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, - unsigned int thickness = 1); + unsigned int thickness = 1) override; - void displayImage(const vpImage &I); - void displayImage(const vpImage &I); + void displayImage(const vpImage &I) override; + void displayImage(const vpImage &I) override; void displayImage(const unsigned char *I); void displayImageROI(const vpImage &I, const vpImagePoint &iP, unsigned int width, - unsigned int height); - void displayImageROI(const vpImage &I, const vpImagePoint &iP, unsigned int width, unsigned int height); + unsigned int height) override; + void displayImageROI(const vpImage &I, const vpImagePoint &iP, unsigned int width, unsigned int height) override; - void displayLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness = 1); - void displayPoint(const vpImagePoint &ip, const vpColor &color, unsigned int thickness = 1); + void displayLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness = 1) override; + void displayPoint(const vpImagePoint &ip, const vpColor &color, unsigned int thickness = 1) override; void displayRectangle(const vpImagePoint &topLeft, unsigned int width, unsigned int height, const vpColor &color, - bool fill = false, unsigned int thickness = 1); + bool fill = false, unsigned int thickness = 1) override; void displayRectangle(const vpImagePoint &topLeft, const vpImagePoint &bottomRight, const vpColor &color, - bool fill = false, unsigned int thickness = 1); - void displayRectangle(const vpRect &rectangle, const vpColor &color, bool fill = false, unsigned int thickness = 1); + bool fill = false, unsigned int thickness = 1) override; + void displayRectangle(const vpRect &rectangle, const vpColor &color, bool fill = false, unsigned int thickness = 1) override; - void flushDisplay(); - void flushDisplayROI(const vpImagePoint &iP, unsigned int width, unsigned int height); + void flushDisplay() override; + void flushDisplayROI(const vpImagePoint &iP, unsigned int width, unsigned int height) override; - bool getClick(bool blocking = true); - bool getClick(vpImagePoint &ip, bool blocking = true); - bool getClick(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true); - bool getClickUp(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true); + bool getClick(bool blocking = true) override; + bool getClick(vpImagePoint &ip, bool blocking = true) override; + bool getClick(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true) override; + bool getClickUp(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true) override; - bool getKeyboardEvent(bool blocking = true); - bool getKeyboardEvent(std::string &key, bool blocking = true); + bool getKeyboardEvent(bool blocking = true) override; + bool getKeyboardEvent(std::string &key, bool blocking = true) override; - bool getPointerMotionEvent(vpImagePoint &ip); - bool getPointerPosition(vpImagePoint &ip); + bool getPointerMotionEvent(vpImagePoint &ip) override; + bool getPointerPosition(vpImagePoint &ip) override; - void setFont(const std::string &font); - void setTitle(const std::string &title); - void setWindowPosition(int win_x, int win_y); + void setFont(const std::string &font) override; + void setTitle(const std::string &title) override; + void setWindowPosition(int win_x, int win_y) override; private: // Implementation diff --git a/modules/gui/include/visp3/gui/vpGDIRenderer.h b/modules/gui/include/visp3/gui/vpGDIRenderer.h index 6b82fdd76e..203cc6d065 100644 --- a/modules/gui/include/visp3/gui/vpGDIRenderer.h +++ b/modules/gui/include/visp3/gui/vpGDIRenderer.h @@ -110,7 +110,7 @@ class VISP_EXPORT vpGDIRenderer : public vpWin32Renderer void drawArrow(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int w, unsigned int h, unsigned int thickness = 1); - void getImage(vpImage &I); + void getImage(vpImage &I) override; private: // updates the renderer hbitmaps. diff --git a/modules/gui/include/visp3/gui/vpPlot.h b/modules/gui/include/visp3/gui/vpPlot.h index ac3e57e1ee..0ec40b738f 100644 --- a/modules/gui/include/visp3/gui/vpPlot.h +++ b/modules/gui/include/visp3/gui/vpPlot.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Plot curves. - * -*****************************************************************************/ + */ /*! \file vpPlot.h @@ -45,70 +43,68 @@ #include #include -/*! - \class vpPlot - \ingroup group_gui_plotter - - \brief This class enables real time drawing of 2D or 3D graphics. An - instance of the class open a window which contains between 1 and 4 - graphics. Each one contains a desired number of curves. - - \warning This class is only available if one of the display functionalities - (X11, GDI, GTK, OpenCV or Direct3D) is available. In visp3/core/vpConfig.h - header file, you should have VISP_HAVE_DISPLAY define. - - The example below shows how to use the vpPlot class. An other example - provided in tutorial-ibvs-plotter.cpp and described in \ref tutorial-plotter - shows how to use this class to plot in real-time some curves during an - image-based visual servo. - - \code -#include - -int main () -{ #if defined(VISP_HAVE_DISPLAY) - // Create a window (700 by 700) at position (100, 200) with two graphics - vpPlot A(2, 700, 700, 100, 200, "Curves..."); - - // The first graphic contains 1 curve and the second graphic contains 2 curves - A.initGraph(0,1); A.initGraph(1,2); - - // The color of the curve in the first graphic is red - A.setColor(0,0,vpColor::red); - // The first curve in the second graphic is green - A.setColor(1,0,vpColor::green); - // The second curve in the second graphic is blue - A.setColor(1,1,vpColor::blue); - - // Add the point (0,0) in the first graphic - A.plot(0,0,0,0); - - // Add the point (0,1) to the first curve of the second graphic - A.plot(1,0,0,1); - - // Add the point (0,2) to the second curve of the second graphic - A.plot(1,1,0,2); - - for (int i = 0; i < 50; i++) { - // Add the point (i,sin(i*pi/10) in the first graphic - A.plot(0,0,i,sin(i*M_PI/10)); - - // Add the point (i,1) to the first curve of the second graphic - A.plot(1,0,i,1); - - // Add the point (i,2) to the second curve of the second graphic - A.plot(1,1,i,2); - } - - return 0; -#endif -} - \endcode -*/ - -#if defined(VISP_HAVE_DISPLAY) - +/*! + * \class vpPlot + * \ingroup group_gui_plotter + * + * \brief This class enables real time drawing of 2D or 3D graphics. An + * instance of the class open a window which contains between 1 and 4 + * graphics. Each one contains a desired number of curves. + * + * \warning This class is only available if one of the display functionalities + * (X11, GDI, GTK, OpenCV or Direct3D) is available. In visp3/core/vpConfig.h + * header file, you should have VISP_HAVE_DISPLAY define. + * + * The example below shows how to use the vpPlot class. An other example + * provided in tutorial-ibvs-plotter.cpp and described in \ref tutorial-plotter + * shows how to use this class to plot in real-time some curves during an + * image-based visual servo. + * + * \code + * #include + * + * int main () + * { + * #if defined(VISP_HAVE_DISPLAY) + * // Create a window (700 by 700) at position (100, 200) with two graphics + * vpPlot A(2, 700, 700, 100, 200, "Curves..."); + * + * // The first graphic contains 1 curve and the second graphic contains 2 curves + * A.initGraph(0,1); A.initGraph(1,2); + * + * // The color of the curve in the first graphic is red + * A.setColor(0,0,vpColor::red); + * // The first curve in the second graphic is green + * A.setColor(1,0,vpColor::green); + * // The second curve in the second graphic is blue + * A.setColor(1,1,vpColor::blue); + * + * // Add the point (0,0) in the first graphic + * A.plot(0,0,0,0); + * + * // Add the point (0,1) to the first curve of the second graphic + * A.plot(1,0,0,1); + * + * // Add the point (0,2) to the second curve of the second graphic + * A.plot(1,1,0,2); + * + * for (int i = 0; i < 50; i++) { + * // Add the point (i,sin(i*pi/10) in the first graphic + * A.plot(0,0,i,sin(i*M_PI/10)); + * + * // Add the point (i,1) to the first curve of the second graphic + * A.plot(1,0,i,1); + * + * // Add the point (i,2) to the second curve of the second graphic + * A.plot(1,1,i,2); + * } + * + * return 0; + $ #endif + * } + * \endcode + */ class VISP_EXPORT vpPlot { public: @@ -173,12 +169,13 @@ class VISP_EXPORT vpPlot void setColor(unsigned int graphNum, unsigned int curveNum, vpColor color); void setGraphThickness(unsigned int graphNum, unsigned int thickness); void setGridThickness(unsigned int graphNum, unsigned int thickness); - /*! - Set the font of the characters. The display should be initialized before. - To know which font are available, on Unix you can use xfontsel or xlsfonts - utilities. - */ + /*! + * Set the font of the characters. The display should be initialized before. + * + * To know which font are available, on Unix you can use xfontsel or xlsfonts + * utilities. + */ void setFont(const std::string &font) { if (display->isInitialised()) diff --git a/modules/gui/src/display/vpDisplayGTK.cpp b/modules/gui/src/display/vpDisplayGTK.cpp index a920903096..e9a849c8d7 100644 --- a/modules/gui/src/display/vpDisplayGTK.cpp +++ b/modules/gui/src/display/vpDisplayGTK.cpp @@ -73,13 +73,12 @@ class vpDisplayGTK::Impl public: Impl() : m_widget(NULL), m_background(NULL), m_gc(NULL), m_blue(), m_red(), m_yellow(), m_green(), m_cyan(), m_orange(), - m_white(), m_black(), m_gdkcolor(), m_lightBlue(), m_darkBlue(), m_lightRed(), m_darkRed(), m_lightGreen(), - m_darkGreen(), m_purple(), m_lightGray(), m_gray(), m_darkGray(), m_colormap(NULL), m_font(NULL), m_vectgtk(NULL), - m_col(NULL) - { - } + m_white(), m_black(), m_gdkcolor(), m_lightBlue(), m_darkBlue(), m_lightRed(), m_darkRed(), m_lightGreen(), + m_darkGreen(), m_purple(), m_lightGray(), m_gray(), m_darkGray(), m_colormap(NULL), m_font(NULL), m_vectgtk(NULL), + m_col(NULL) + { } - ~Impl() {} + ~Impl() { } void init(unsigned int win_width, unsigned int win_height, int win_x, int win_y, const std::string &title) { @@ -211,7 +210,8 @@ class vpDisplayGTK::Impl if (scale == 1) { /* Copie de l'image dans le pixmap fond */ gdk_draw_gray_image(m_background, m_gc, 0, 0, width, height, GDK_RGB_DITHER_NONE, I.bitmap, width); - } else { + } + else { vpImage sampled; I.subsample(scale, scale, sampled); gdk_draw_gray_image(m_background, m_gc, 0, 0, width, height, GDK_RGB_DITHER_NONE, sampled.bitmap, width); @@ -227,7 +227,8 @@ class vpDisplayGTK::Impl /* Copie de l'image dans le pixmap fond */ gdk_draw_rgb_32_image(m_background, m_gc, 0, 0, width, height, GDK_RGB_DITHER_NONE, (unsigned char *)I.bitmap, 4 * width); - } else { + } + else { vpImage sampled; I.subsample(scale, scale, sampled); gdk_draw_rgb_32_image(m_background, m_gc, 0, 0, width, height, GDK_RGB_DITHER_NONE, @@ -369,7 +370,8 @@ class vpDisplayGTK::Impl if (thickness == 1) { gdk_draw_point(m_background, m_gc, vpMath::round(ip.get_u() / scale), vpMath::round(ip.get_v() / scale)); - } else { + } + else { gdk_draw_rectangle(m_background, m_gc, TRUE, vpMath::round(ip.get_u() / scale), vpMath::round(ip.get_v() / scale), static_cast(thickness), static_cast(thickness)); } @@ -531,7 +533,8 @@ class vpDisplayGTK::Impl w = static_cast(gdk_screen_get_width(screen_)); h = static_cast(gdk_screen_get_height(screen_)); gtk_widget_destroy(widget_); - } else { + } + else { GdkScreen *screen_ = gdk_window_get_screen(m_widget->window); w = static_cast(gdk_screen_get_width(screen_)); h = static_cast(gdk_screen_get_height(screen_)); @@ -714,7 +717,7 @@ int main() } \endcode */ -vpDisplayGTK::vpDisplayGTK() : vpDisplay(), m_impl(new Impl()) {} +vpDisplayGTK::vpDisplayGTK() : vpDisplay(), m_impl(new Impl()) { } /*! Destructor. @@ -837,7 +840,8 @@ void vpDisplayGTK::setTitle(const std::string &title) if (!title.empty()) { m_impl->setTitle(title); } - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -855,7 +859,8 @@ void vpDisplayGTK::setWindowPosition(int win_x, int win_y) { if (m_displayHasBeenInitialized) { m_impl->setWindowPosition(win_x, win_y); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -875,7 +880,8 @@ void vpDisplayGTK::displayImage(const vpImage &I) { if (m_displayHasBeenInitialized) { m_impl->displayImage(I, m_scale, static_cast(m_width), static_cast(m_height)); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -910,7 +916,8 @@ void vpDisplayGTK::displayImageROI(const vpImage &I, const vpImag m_impl->displayImageROI(Itemp, static_cast(j_min), static_cast(i_min), static_cast(Itemp.getWidth()), static_cast(Itemp.getHeight())); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -930,7 +937,8 @@ void vpDisplayGTK::displayImage(const vpImage &I) { if (m_displayHasBeenInitialized) { m_impl->displayImage(I, m_scale, static_cast(m_width), static_cast(m_height)); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -964,7 +972,8 @@ void vpDisplayGTK::displayImageROI(const vpImage &I, const vpImagePoint m_impl->displayImageROI(Itemp, static_cast(j_min), static_cast(i_min), static_cast(Itemp.getWidth()), static_cast(Itemp.getHeight())); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -998,7 +1007,8 @@ void vpDisplayGTK::flushDisplay() { if (m_displayHasBeenInitialized) { m_impl->flushDisplay(); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -1012,7 +1022,8 @@ void vpDisplayGTK::flushDisplayROI(const vpImagePoint & /*iP*/, const unsigned i { if (m_displayHasBeenInitialized) { m_impl->flushDisplay(); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -1061,7 +1072,8 @@ void vpDisplayGTK::displayArrow(const vpImagePoint &ip1, const vpImagePoint &ip2 displayLine(ip1, ip2, color, thickness); } - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -1081,7 +1093,8 @@ void vpDisplayGTK::displayCharString(const vpImagePoint &ip, const char *text, c { if (m_displayHasBeenInitialized) { m_impl->displayCharString(ip, text, color, m_scale); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -1102,7 +1115,8 @@ void vpDisplayGTK::displayCircle(const vpImagePoint ¢er, unsigned int radius thickness = 0; m_impl->displayCircle(center, radius, color, fill, thickness, m_scale); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -1153,7 +1167,8 @@ void vpDisplayGTK::displayDotLine(const vpImagePoint &ip1, const vpImagePoint &i thickness = 0; m_impl->displayDotLine(ip1, ip2, color, thickness, m_scale); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -1172,7 +1187,8 @@ void vpDisplayGTK::displayLine(const vpImagePoint &ip1, const vpImagePoint &ip2, thickness = 0; m_impl->displayLine(ip1, ip2, color, thickness, m_scale); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -1187,7 +1203,8 @@ void vpDisplayGTK::displayPoint(const vpImagePoint &ip, const vpColor &color, un { if (m_displayHasBeenInitialized) { m_impl->displayPoint(ip, color, thickness, m_scale); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -1213,7 +1230,8 @@ void vpDisplayGTK::displayRectangle(const vpImagePoint &topLeft, unsigned int w, thickness = 0; m_impl->displayRectangle(topLeft, w, h, color, fill, thickness, m_scale); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -1241,7 +1259,8 @@ void vpDisplayGTK::displayRectangle(const vpImagePoint &topLeft, const vpImagePo unsigned int h = static_cast(vpMath::round(bottomRight.get_v() - topLeft.get_v())); m_impl->displayRectangle(topLeft, w, h, color, fill, thickness, m_scale); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -1268,7 +1287,8 @@ void vpDisplayGTK::displayRectangle(const vpRect &rectangle, const vpColor &colo unsigned int w = static_cast(vpMath::round(rectangle.getWidth())); unsigned int h = static_cast(vpMath::round(rectangle.getRight())); m_impl->displayRectangle(topLeft, w, h, color, fill, thickness, m_scale); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -1296,7 +1316,8 @@ bool vpDisplayGTK::getClick(bool blocking) vpImagePoint ip; vpMouseButton::vpMouseButtonType button; ret = m_impl->getClick(ip, button, blocking, m_scale, GDK_BUTTON_PRESS); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } return ret; @@ -1325,7 +1346,8 @@ bool vpDisplayGTK::getClick(vpImagePoint &ip, bool blocking) if (m_displayHasBeenInitialized) { vpMouseButton::vpMouseButtonType button; ret = m_impl->getClick(ip, button, blocking, m_scale, GDK_BUTTON_PRESS); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } return ret; @@ -1356,7 +1378,8 @@ bool vpDisplayGTK::getClick(vpImagePoint &ip, vpMouseButton::vpMouseButtonType & if (m_displayHasBeenInitialized) { ret = m_impl->getClick(ip, button, blocking, m_scale, GDK_BUTTON_PRESS); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } return ret; @@ -1391,7 +1414,8 @@ bool vpDisplayGTK::getClickUp(vpImagePoint &ip, vpMouseButton::vpMouseButtonType if (m_displayHasBeenInitialized) { ret = m_impl->getClick(ip, button, blocking, m_scale, GDK_BUTTON_RELEASE); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } return ret; @@ -1406,7 +1430,8 @@ void vpDisplayGTK::getImage(vpImage &I) // should certainly be optimized. if (m_displayHasBeenInitialized) { m_impl->getImage(I, static_cast(m_width), static_cast(m_height)); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -1445,7 +1470,8 @@ bool vpDisplayGTK::getKeyboardEvent(bool blocking) if (m_displayHasBeenInitialized) { std::string key; ret = m_impl->getKeyboardEvent(key, blocking); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } return ret; @@ -1476,7 +1502,8 @@ bool vpDisplayGTK::getKeyboardEvent(std::string &key, bool blocking) if (m_displayHasBeenInitialized) { ret = m_impl->getKeyboardEvent(key, blocking); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } return ret; @@ -1500,7 +1527,8 @@ bool vpDisplayGTK::getPointerMotionEvent(vpImagePoint &ip) if (m_displayHasBeenInitialized) { ret = m_impl->getPointerMotionEvent(ip, m_scale); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } return ret; @@ -1520,7 +1548,8 @@ bool vpDisplayGTK::getPointerPosition(vpImagePoint &ip) { if (m_displayHasBeenInitialized) { m_impl->getPointerPosition(ip, m_scale); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } @@ -1561,5 +1590,5 @@ unsigned int vpDisplayGTK::getScreenHeight() #elif !defined(VISP_BUILD_SHARED_LIBS) // Work around to avoid warning: libvisp_core.a(vpDisplayGTK.cpp.o) has no // symbols -void dummy_vpDisplayGTK(){}; +void dummy_vpDisplayGTK() { }; #endif diff --git a/modules/gui/src/display/windows/vpDisplayD3D.cpp b/modules/gui/src/display/windows/vpDisplayD3D.cpp index ce84122dc5..a00ac23d59 100644 --- a/modules/gui/src/display/windows/vpDisplayD3D.cpp +++ b/modules/gui/src/display/windows/vpDisplayD3D.cpp @@ -50,7 +50,7 @@ /*! \brief Basic constructor. */ -vpDisplayD3D::vpDisplayD3D() : vpDisplayWin32(new vpD3DRenderer()) {} +vpDisplayD3D::vpDisplayD3D() : vpDisplayWin32(new vpD3DRenderer()) { } /*! @@ -188,13 +188,8 @@ vpDisplayD3D::vpDisplayD3D(vpImage &I, int winx, int winy, const init(I, winx, winy, title); } -/*! - \brief Basic destructor. -*/ -vpDisplayD3D::~vpDisplayD3D() {} - #elif !defined(VISP_BUILD_SHARED_LIBS) // Work around to avoid warning: libvisp_core.a(vpDisplayD3D.cpp.o) has no // symbols -void dummy_vpDisplayD3D(){}; +void dummy_vpDisplayD3D() { }; #endif diff --git a/modules/gui/src/display/windows/vpDisplayGDI.cpp b/modules/gui/src/display/windows/vpDisplayGDI.cpp index 5c1a6a45d4..5140f60cbc 100644 --- a/modules/gui/src/display/windows/vpDisplayGDI.cpp +++ b/modules/gui/src/display/windows/vpDisplayGDI.cpp @@ -52,7 +52,7 @@ /*! \brief Basic constructor. */ -vpDisplayGDI::vpDisplayGDI() : vpDisplayWin32(new vpGDIRenderer()) {} +vpDisplayGDI::vpDisplayGDI() : vpDisplayWin32(new vpGDIRenderer()) { } /*! @@ -188,13 +188,8 @@ vpDisplayGDI::vpDisplayGDI(vpImage &I, int winx, int winy, const init(I, winx, winy, title); } -/*! - \brief Basic destructor. -*/ -vpDisplayGDI::~vpDisplayGDI() {} - #elif !defined(VISP_BUILD_SHARED_LIBS) // Work around to avoid warning: libvisp_core.a(vpDisplayGDI.cpp.o) has no // symbols -void dummy_vpDisplayGDI(){}; +void dummy_vpDisplayGDI() { }; #endif diff --git a/modules/imgproc/src/vpCircleHoughTransform.cpp b/modules/imgproc/src/vpCircleHoughTransform.cpp index dadfff341e..c39f9aacb6 100644 --- a/modules/imgproc/src/vpCircleHoughTransform.cpp +++ b/modules/imgproc/src/vpCircleHoughTransform.cpp @@ -483,7 +483,7 @@ vpCircleHoughTransform::computeCircleCandidates() float scalProd = rx * gx + ry * gy; float scalProd2 = scalProd * scalProd; if (scalProd2 >= circlePerfectness2 * r2 * grad2) { - // Look for the Radius Candidate Bin RCB_k to which d_ij is "the closest" will have an additionnal vote + // Look for the Radius Candidate Bin RCB_k to which d_ij is "the closest" will have an additional vote float r = static_cast(std::sqrt(r2)); unsigned int r_bin = static_cast(std::ceil((r - m_algoParams.m_minRadius)/ m_algoParams.m_centerMinDist)); r_bin = std::min(r_bin, nbBins - 1); diff --git a/modules/io/include/visp3/io/vpDiskGrabber.h b/modules/io/include/visp3/io/vpDiskGrabber.h index 1548e92a5f..221dc48806 100644 --- a/modules/io/include/visp3/io/vpDiskGrabber.h +++ b/modules/io/include/visp3/io/vpDiskGrabber.h @@ -32,9 +32,9 @@ */ /*! - \file vpDiskGrabber.h - \brief Class to load image sequence from the disk. -*/ + * \file vpDiskGrabber.h + * \brief Class to load image sequence from the disk. + */ #ifndef vpDiskGrabber_hh #define vpDiskGrabber_hh @@ -46,60 +46,60 @@ #include /*! - \class vpDiskGrabber - - \ingroup group_io_video - - \brief Class to grab (ie. read) images from the disk. - - Defined a virtual video device. "Grab" the images from the disk. - Derived from the vpFrameGrabber class. - - \sa vpFrameGrabber - - Here an example of capture from the directory - "/local/soft/ViSP/ViSP-images/cube". We want to acquire 10 images - from the first named "image.0001.pgm" by steps of 2. - - \code -#include -#include - -int main(){ - vpImage I; // Grey level image - - // Declare a framegrabber able to read a sequence of successive - // images from the disk - vpDiskGrabber g; - - // Set the path to the directory containing the sequence - g.setDirectory("/local/soft/ViSP/ViSP-images/cube"); - // Set the image base name. The directory and the base name constitute - // the constant part of the full filename - g.setBaseName("image."); - // Set the step between two images of the sequence - g.setStep(2); - // Set the number of digits to build the image number - g.setNumberOfZero(4); - // Set the first frame number of the sequence - g.setImageNumber(1); - // Set the image file extension - g.setExtension("pgm"); - - // Open the framegrabber by loading the first image of the sequence - g.open(I) ; - - unsigned int cpt = 1; - // this is the loop over the image sequence - while(cpt ++ < 10) - { - // read the image and then increment the image counter so that the next - // call to acquire(I) will get the next image - g.acquire(I) ; - } -} - \endcode -*/ + * \class vpDiskGrabber + * + * \ingroup group_io_video + * + * \brief Class to grab (ie. read) images from the disk. + * + * Defined a virtual video device. "Grab" the images from the disk. + * Derived from the vpFrameGrabber class. + * + * \sa vpFrameGrabber + * + * Here an example of capture from the directory + * "/local/soft/ViSP/ViSP-images/cube". We want to acquire 10 images + * from the first named "image.0001.pgm" by steps of 2. + * + * \code + * #include + * #include + * + * int main(){ + * vpImage I; // Grey level image + * + * // Declare a framegrabber able to read a sequence of successive + * // images from the disk + * vpDiskGrabber g; + * + * // Set the path to the directory containing the sequence + * g.setDirectory("/local/soft/ViSP/ViSP-images/cube"); + * // Set the image base name. The directory and the base name constitute + * // the constant part of the full filename + * g.setBaseName("image."); + * // Set the step between two images of the sequence + * g.setStep(2); + * // Set the number of digits to build the image number + * g.setNumberOfZero(4); + * // Set the first frame number of the sequence + * g.setImageNumber(1); + * // Set the image file extension + * g.setExtension("pgm"); + * + * // Open the framegrabber by loading the first image of the sequence + * g.open(I) ; + * + * unsigned int cpt = 1; + * // this is the loop over the image sequence + * while(cpt ++ < 10) + * { + * // read the image and then increment the image counter so that the next + * // call to acquire(I) will get the next image + * g.acquire(I) ; + * } + * } + * \endcode + */ class VISP_EXPORT vpDiskGrabber : public vpFrameGrabber { private: @@ -128,6 +128,12 @@ class VISP_EXPORT vpDiskGrabber : public vpFrameGrabber */ explicit vpDiskGrabber(const std::string &genericName); + /*! + * Destructor. + * In fact nothing to destroy... + */ + virtual ~vpDiskGrabber() { }; + /*! * Constructor. * @@ -141,12 +147,6 @@ class VISP_EXPORT vpDiskGrabber : public vpFrameGrabber explicit vpDiskGrabber(const std::string &dir, const std::string &basename, long number, int step, unsigned int noz, const std::string &ext); - /*! - * Destructor. - * In fact nothing to destroy... - */ - virtual ~vpDiskGrabber(); - /*! * Acquire an image reading the next image from the disk. * After this call, the image number is incremented considering the step. diff --git a/modules/io/include/visp3/io/vpVideoReader.h b/modules/io/include/visp3/io/vpVideoReader.h index ba4a294df0..a6ea82161e 100644 --- a/modules/io/include/visp3/io/vpVideoReader.h +++ b/modules/io/include/visp3/io/vpVideoReader.h @@ -49,117 +49,117 @@ #endif /*! - \class vpVideoReader - - \ingroup group_io_video - - \brief Class that enables to manipulate easily a video file or a sequence of - images. As it inherits from the vpFrameGrabber Class, it can be used like an - other frame grabber class. - - This class has its own implementation to read a sequence of PGM and PPM - images. - - This class may benefit from optional 3rd parties: - - libpng: If installed this optional 3rd party is used to read a sequence of - PNG images. Installation instructions are provided here - https://visp.inria.fr/3rd_png. - - libjpeg: If installed this optional 3rd party is used to read a sequence - of JPEG images. Installation instructions are provided here - https://visp.inria.fr/3rd_jpeg. - - OpenCV: If installed this optional 3rd party is used to read a sequence of - images where images could be in TIFF, BMP, DIB, PBM, RASTER, JPEG2000 format. - If libpng or libjpeg is not installed, OpenCV is also used to consider these - image formats. OpenCV allows also to consider AVI, MPEG, MPEG4, MOV, OGV, WMV, - FLV, MKV video formats. Installation instructions are provided here - https://visp.inria.fr/3rd_opencv. - - The following example available in tutorial-video-reader.cpp shows how this - class is really easy to use. It enables to read a video file named - video.mpeg. - \include tutorial-video-reader.cpp - - As shown in the next example, this class allows also to access to a specific - frame. But be careful, for video files, the getFrame() method is not precise - and returns the nearest intra key frame from the expected frame. You can use - the getFrame() method to position the reader in the video and then use the - acquire() method to get the following frames one by one. - \code -#include - -int main() -{ -#ifdef VISP_HAVE_OPENCV - vpImage I; - - vpVideoReader reader; - - // Initialize the reader. - reader.setFileName("video.mpeg"); - reader.open(I); - - // Read the nearest key frame from the 3th frame - reader.getFrame(I, 2); - - // After positioning the video reader use acquire to read the video frame by frame - reader.acquire(I); - - return 0; -#endif -} - \endcode - - The other following example explains how to use the class to read a - sequence of images. The images are stored in the folder "./image" and are - named "image0000.jpeg", "image0001.jpeg", "image0002.jpeg", ... As explained - in setFirstFrameIndex() and setLastFrameIndex() it is also possible to set - the first and last image numbers to read a portion of the sequence. If these - two functions are not used, first and last image numbers are set automatically - to match the first and image images of the sequence. - - \code -#include - -int main() -{ - vpImage I; - - vpVideoReader reader; - - // Initialize the reader. - reader.setFileName("./image/image%04d.jpeg"); - reader.setFirstFrameIndex(10); - reader.setLastFrameIndex(20); - reader.open(I); - - while (! reader.end() ) - reader.acquire(I); - - return 0; -} - \endcode - - Note that it is also possible to access to a specific frame using getFrame(). -\code -#include - -int main() -{ - vpImage I; - - vpVideoReader reader; - - // Initialize the reader. - reader.setFileName("./image/image%04d.jpeg"); - reader.open(I); - - // Read the 3th frame - reader.getFrame(I,2); - - return 0; -} - \endcode -*/ + * \class vpVideoReader + * + * \ingroup group_io_video + * + * \brief Class that enables to manipulate easily a video file or a sequence of + * images. As it inherits from the vpFrameGrabber Class, it can be used like an + * other frame grabber class. + * + * This class has its own implementation to read a sequence of PGM and PPM + * images. + * + * This class may benefit from optional 3rd parties: + * - libpng: If installed this optional 3rd party is used to read a sequence of + * PNG images. Installation instructions are provided here + * https://visp.inria.fr/3rd_png. + * - libjpeg: If installed this optional 3rd party is used to read a sequence + * of JPEG images. Installation instructions are provided here + * https://visp.inria.fr/3rd_jpeg. + * - OpenCV: If installed this optional 3rd party is used to read a sequence of + * images where images could be in TIFF, BMP, DIB, PBM, RASTER, JPEG2000 format. + * If libpng or libjpeg is not installed, OpenCV is also used to consider these + * image formats. OpenCV allows also to consider AVI, MPEG, MPEG4, MOV, OGV, WMV, + * FLV, MKV video formats. Installation instructions are provided here + * https://visp.inria.fr/3rd_opencv. + * + * The following example available in tutorial-video-reader.cpp shows how this + * class is really easy to use. It enables to read a video file named + * video.mpeg. + * \include tutorial-video-reader.cpp + * + * As shown in the next example, this class allows also to access to a specific + * frame. But be careful, for video files, the getFrame() method is not precise + * and returns the nearest intra key frame from the expected frame. You can use + * the getFrame() method to position the reader in the video and then use the + * acquire() method to get the following frames one by one. + * \code + * #include + * + * int main() + * { + * #ifdef VISP_HAVE_OPENCV + * vpImage I; + * + * vpVideoReader reader; + * + * // Initialize the reader. + * reader.setFileName("video.mpeg"); + * reader.open(I); + * + * // Read the nearest key frame from the 3th frame + * reader.getFrame(I, 2); + * + * // After positioning the video reader use acquire to read the video frame by frame + * reader.acquire(I); + * + * return 0; + * #endif + * } + * \endcode + * + * The other following example explains how to use the class to read a + * sequence of images. The images are stored in the folder "./image" and are + * named "image0000.jpeg", "image0001.jpeg", "image0002.jpeg", ... As explained + * in setFirstFrameIndex() and setLastFrameIndex() it is also possible to set + * the first and last image numbers to read a portion of the sequence. If these + * two functions are not used, first and last image numbers are set automatically + * to match the first and image images of the sequence. + * + * \code + * #include + * + * int main() + * { + * vpImage I; + * + * vpVideoReader reader; + * + * // Initialize the reader. + * reader.setFileName("./image/image%04d.jpeg"); + * reader.setFirstFrameIndex(10); + * reader.setLastFrameIndex(20); + * reader.open(I); + * + * while (! reader.end() ) + * reader.acquire(I); + * + * return 0; + * } + * \endcode + * + * Note that it is also possible to access to a specific frame using getFrame(). + * \code + * #include + * + * int main() + * { + * vpImage I; + * + * vpVideoReader reader; + * + * // Initialize the reader. + * reader.setFileName("./image/image%04d.jpeg"); + * reader.open(I); + * + * // Read the 3th frame + * reader.getFrame(I,2); + * + * return 0; + * } + * \endcode + */ class VISP_EXPORT vpVideoReader : public vpFrameGrabber { @@ -173,7 +173,8 @@ class VISP_EXPORT vpVideoReader : public vpFrameGrabber bool m_lastframe_unknown; #endif //! Types of available formats - typedef enum { + typedef enum + { FORMAT_PGM, FORMAT_PPM, FORMAT_JPEG, @@ -229,14 +230,15 @@ class VISP_EXPORT vpVideoReader : public vpFrameGrabber void close() { ; } /*! - \return true if the end of the sequence is reached. - */ + * \return true if the end of the sequence is reached. + */ inline bool end() { if (m_frameStep > 0) { if (m_frameCount + m_frameStep > m_lastFrame) return true; - } else if (m_frameStep < 0) { + } + else if (m_frameStep < 0) { if (m_frameCount + m_frameStep < m_firstFrame) return true; } @@ -244,11 +246,12 @@ class VISP_EXPORT vpVideoReader : public vpFrameGrabber } bool getFrame(vpImage &I, long frame); bool getFrame(vpImage &I, long frame); - /*! - Return the frame rate in Hz used to encode the video stream. - If the video is a sequence of images, return -1. - */ + /*! + * Return the frame rate in Hz used to encode the video stream. + * + * If the video is a sequence of images, return -1. + */ double getFramerate() { if (!m_isOpen) { @@ -258,26 +261,26 @@ class VISP_EXPORT vpVideoReader : public vpFrameGrabber } /*! - Get the frame index of the current image. This index is updated at each - call of the acquire method. It can be used to detect the end of a file - (comparison with getLastFrameIndex()). - - \return Returns the current frame index. - - \sa end() - */ + * Get the frame index of the current image. This index is updated at each + * call of the acquire method. It can be used to detect the end of a file + * (comparison with getLastFrameIndex()). + * + * \return Returns the current frame index. + * + * \sa end() + */ inline long getFrameIndex() const { return m_frameCount; } /*! - Return the name of the file in which the last frame was read. - */ + * Return the name of the file in which the last frame was read. + */ inline std::string getFrameName() const { return m_frameName; } /*! - Gets the first frame index. - - \return Returns the first frame index. - */ + * Gets the first frame index. + * + * \return Returns the first frame index. + */ inline long getFirstFrameIndex() { if (!m_isOpen) { @@ -285,11 +288,12 @@ class VISP_EXPORT vpVideoReader : public vpFrameGrabber } return m_firstFrame; } - /*! - Gets the last frame index. - \return Returns the last frame index. - */ + /*! + * Gets the last frame index. + * + * \return Returns the last frame index. + */ inline long getLastFrameIndex() { if (!m_isOpen) { @@ -297,11 +301,12 @@ class VISP_EXPORT vpVideoReader : public vpFrameGrabber } return m_lastFrame; } - /*! - Gets the frame step. - \return Returns the frame step value. - */ + /*! + * Gets the frame step. + * + * \return Returns the frame step value. + */ inline long getFrameStep() const { return m_frameStep; } bool isVideoFormat() const; @@ -312,35 +317,37 @@ class VISP_EXPORT vpVideoReader : public vpFrameGrabber vpVideoReader &operator>>(vpImage &I); /*! - Reset the frame counter and sets it to the first image index. - - By default the first frame index is set to 0. - - This method is useful if you use the class like a frame grabber (ie with - the acquire method). - */ + * Reset the frame counter and sets it to the first image index. + * + * By default the first frame index is set to 0. + * + * This method is useful if you use the class like a frame grabber (ie with + * the acquire method). + */ inline void resetFrameCounter() { m_frameCount = m_firstFrame; } void setFileName(const std::string &filename); - /*! - Enables to set the first frame index if you want to use the class like a - grabber (ie with the acquire method). - - \param first_frame : The first frame index. - \sa setLastFrameIndex() - */ + /*! + * Enables to set the first frame index if you want to use the class like a + * grabber (ie with the acquire method). + * + * \param first_frame : The first frame index. + * + * \sa setLastFrameIndex() + */ inline void setFirstFrameIndex(const long first_frame) { m_firstFrameIndexIsSet = true; m_firstFrame = first_frame; } - /*! - Enables to set the last frame index. - \param last_frame : The last frame index. - - \sa setFirstFrameIndex() - */ + /*! + * Enables to set the last frame index. + * + * \param last_frame : The last frame index. + * + * \sa setFirstFrameIndex() + */ inline void setLastFrameIndex(const long last_frame) { this->m_lastFrameIndexIsSet = true; @@ -348,13 +355,13 @@ class VISP_EXPORT vpVideoReader : public vpFrameGrabber } /*! - Sets the frame step index. - The default frame step is 1 - - \param frame_step : The frame index step. - - \sa setFrameStep() - */ + * Sets the frame step index. + * The default frame step is 1 + * + * \param frame_step : The frame index step. + * + * \sa setFrameStep() + */ inline void setFrameStep(const long frame_step) { m_frameStep = frame_step; } private: diff --git a/modules/io/src/video/vpDiskGrabber.cpp b/modules/io/src/video/vpDiskGrabber.cpp index 474809e416..f8c923d542 100644 --- a/modules/io/src/video/vpDiskGrabber.cpp +++ b/modules/io/src/video/vpDiskGrabber.cpp @@ -236,8 +236,6 @@ void vpDiskGrabber::acquire(vpImage &I, long image_number) height = I.getHeight(); } -vpDiskGrabber::~vpDiskGrabber() { } - void vpDiskGrabber::setImageNumber(long number) { m_image_number = number; diff --git a/modules/io/src/video/vpVideoReader.cpp b/modules/io/src/video/vpVideoReader.cpp index b687c48204..134db3a30a 100644 --- a/modules/io/src/video/vpVideoReader.cpp +++ b/modules/io/src/video/vpVideoReader.cpp @@ -32,9 +32,9 @@ */ /*! -\file vpVideoReader.cpp -\brief Read videos and image sequences -*/ + * \file vpVideoReader.cpp + * \brief Read videos and image sequences + */ #include #include @@ -51,13 +51,12 @@ vpVideoReader::vpVideoReader() : vpFrameGrabber(), m_imSequence(NULL), #if defined(HAVE_OPENCV_HIGHGUI) && defined(HAVE_OPENCV_VIDEOIO) - m_capture(), m_frame(), m_lastframe_unknown(false), + m_capture(), m_frame(), m_lastframe_unknown(false), #endif - m_formatType(FORMAT_UNKNOWN), m_videoName(), m_frameName(), m_initFileName(false), m_isOpen(false), m_frameCount(0), - m_firstFrame(0), m_lastFrame(0), m_firstFrameIndexIsSet(false), m_lastFrameIndexIsSet(false), m_frameStep(1), - m_frameRate(0.) -{ -} + m_formatType(FORMAT_UNKNOWN), m_videoName(), m_frameName(), m_initFileName(false), m_isOpen(false), m_frameCount(0), + m_firstFrame(0), m_lastFrame(0), m_firstFrameIndexIsSet(false), m_lastFrameIndexIsSet(false), m_frameStep(1), + m_frameRate(0.) +{ } /*! Basic destructor. @@ -131,7 +130,8 @@ void vpVideoReader::getProperties() m_imSequence->setImageNumber(m_firstFrame); } m_frameRate = -1.; - } else if (isVideoExtensionSupported()) { + } + else if (isVideoExtensionSupported()) { #if defined(HAVE_OPENCV_HIGHGUI) && defined(HAVE_OPENCV_VIDEOIO) m_capture.open(m_videoName.c_str()); @@ -150,14 +150,15 @@ void vpVideoReader::getProperties() #else throw(vpException(vpException::fatalError, "To read video files ViSP should be build with opencv " - "3rd >= 2.1.0 party libraries.")); + "3rd >= 2.1.0 party libraries.")); #endif - } else if (m_formatType == FORMAT_UNKNOWN) { - // vpERROR_TRACE("The format of the file does not correspond to a readable - // format."); + } + else if (m_formatType == FORMAT_UNKNOWN) { + // vpERROR_TRACE("The format of the file does not correspond to a readable + // format."); throw(vpException(vpException::fatalError, "The format of the file does " - "not correspond to a readable " - "format supported by ViSP.")); + "not correspond to a readable " + "format supported by ViSP.")); } findFirstFrameIndex(); @@ -253,7 +254,8 @@ void vpVideoReader::acquire(vpImage &I) try { m_imSequence->acquire(I); skip_frame = false; - } catch (...) { + } + catch (...) { skip_frame = true; } } while (skip_frame && m_imSequence->getImageNumber() < m_lastFrame); @@ -261,7 +263,8 @@ void vpVideoReader::acquire(vpImage &I) m_frameName = m_imSequence->getImageName(); if (m_frameCount + m_frameStep > m_lastFrame) { m_imSequence->setImageNumber(m_frameCount); - } else if (m_frameCount + m_frameStep < m_firstFrame) { + } + else if (m_frameCount + m_frameStep < m_firstFrame) { m_imSequence->setImageNumber(m_frameCount); } } @@ -270,19 +273,23 @@ void vpVideoReader::acquire(vpImage &I) m_capture >> m_frame; if (m_frameStep == 1) { m_frameCount++; - } else { + } + else { #if VISP_HAVE_OPENCV_VERSION >= 0x030000 m_frameCount = (long)m_capture.get(cv::CAP_PROP_POS_FRAMES); if (m_frameStep > 0) { if (m_frameCount + m_frameStep <= m_lastFrame) { m_capture.set(cv::CAP_PROP_POS_FRAMES, m_frameCount + m_frameStep - 1); - } else { + } + else { m_capture.set(cv::CAP_PROP_POS_FRAMES, m_frameCount - 1); } - } else if (m_frameStep < 0) { + } + else if (m_frameStep < 0) { if (m_frameCount + m_frameStep >= m_firstFrame) { m_capture.set(cv::CAP_PROP_POS_FRAMES, m_frameCount + m_frameStep - 1); - } else { + } + else { m_capture.set(cv::CAP_PROP_POS_FRAMES, m_firstFrame - 1); } } @@ -291,13 +298,16 @@ void vpVideoReader::acquire(vpImage &I) if (m_frameStep > 0) { if (m_frameCount + m_frameStep <= m_lastFrame) { m_capture.set(CV_CAP_PROP_POS_FRAMES, m_frameCount + m_frameStep - 1); - } else { + } + else { m_capture.set(CV_CAP_PROP_POS_FRAMES, m_frameCount - 1); } - } else if (m_frameStep < 0) { + } + else if (m_frameStep < 0) { if (m_frameCount + m_frameStep >= m_firstFrame) { m_capture.set(CV_CAP_PROP_POS_FRAMES, m_frameCount + m_frameStep - 1); - } else { + } + else { m_capture.set(CV_CAP_PROP_POS_FRAMES, m_firstFrame - 1); } } @@ -310,7 +320,8 @@ void vpVideoReader::acquire(vpImage &I) // Set last frame to this image index setLastFrameIndex(m_frameCount - m_frameStep); } - } else { + } + else { vpImageConvert::convert(m_frame, I); } } @@ -338,7 +349,8 @@ void vpVideoReader::acquire(vpImage &I) try { m_imSequence->acquire(I); skip_frame = false; - } catch (...) { + } + catch (...) { skip_frame = true; } } while (skip_frame && m_imSequence->getImageNumber() < m_lastFrame); @@ -346,7 +358,8 @@ void vpVideoReader::acquire(vpImage &I) m_frameName = m_imSequence->getImageName(); if (m_frameCount + m_frameStep > m_lastFrame) { m_imSequence->setImageNumber(m_frameCount); - } else if (m_frameCount + m_frameStep < m_firstFrame) { + } + else if (m_frameCount + m_frameStep < m_firstFrame) { m_imSequence->setImageNumber(m_frameCount); } } @@ -355,19 +368,23 @@ void vpVideoReader::acquire(vpImage &I) m_capture >> m_frame; if (m_frameStep == 1) { m_frameCount++; - } else { + } + else { #if VISP_HAVE_OPENCV_VERSION >= 0x030000 m_frameCount = (long)m_capture.get(cv::CAP_PROP_POS_FRAMES); if (m_frameStep > 0) { if (m_frameCount + m_frameStep <= m_lastFrame) { m_capture.set(cv::CAP_PROP_POS_FRAMES, m_frameCount + m_frameStep - 1); - } else { + } + else { m_capture.set(cv::CAP_PROP_POS_FRAMES, m_frameCount - 1); } - } else if (m_frameStep < 0) { + } + else if (m_frameStep < 0) { if (m_frameCount + m_frameStep >= m_firstFrame) { m_capture.set(cv::CAP_PROP_POS_FRAMES, m_frameCount + m_frameStep - 1); - } else { + } + else { m_capture.set(cv::CAP_PROP_POS_FRAMES, m_firstFrame - 1); } } @@ -376,13 +393,16 @@ void vpVideoReader::acquire(vpImage &I) if (m_frameStep > 0) { if (m_frameCount + m_frameStep <= m_lastFrame) { m_capture.set(CV_CAP_PROP_POS_FRAMES, m_frameCount + m_frameStep - 1); - } else { + } + else { m_capture.set(CV_CAP_PROP_POS_FRAMES, m_frameCount - 1); } - } else if (m_frameStep < 0) { + } + else if (m_frameStep < 0) { if (m_frameCount + m_frameStep >= m_firstFrame) { m_capture.set(CV_CAP_PROP_POS_FRAMES, m_frameCount + m_frameStep - 1); - } else { + } + else { m_capture.set(CV_CAP_PROP_POS_FRAMES, m_firstFrame - 1); } } @@ -391,7 +411,8 @@ void vpVideoReader::acquire(vpImage &I) if (m_frame.empty()) { std::cout << "Warning: Unable to decode image " << m_frameCount - m_frameStep << std::endl; - } else { + } + else { vpImageConvert::convert(m_frame, I); } } @@ -422,14 +443,17 @@ bool vpVideoReader::getFrame(vpImage &I, long frame_index) m_imSequence->setImageNumber(m_frameCount); // to not increment vpDiskGrabber next image if (m_frameCount + m_frameStep > m_lastFrame) { m_imSequence->setImageNumber(m_frameCount); - } else if (m_frameCount + m_frameStep < m_firstFrame) { + } + else if (m_frameCount + m_frameStep < m_firstFrame) { m_imSequence->setImageNumber(m_frameCount); } - } catch (...) { + } + catch (...) { vpERROR_TRACE("Couldn't find the %u th frame", frame_index); return false; } - } else { + } + else { #if defined(HAVE_OPENCV_HIGHGUI) && defined(HAVE_OPENCV_VIDEOIO) #if (VISP_HAVE_OPENCV_VERSION >= 0x030000) if (!m_capture.set(cv::CAP_PROP_POS_FRAMES, frame_index)) { @@ -446,10 +470,12 @@ bool vpVideoReader::getFrame(vpImage &I, long frame_index) if (m_frame.empty()) { setLastFrameIndex(m_frameCount - m_frameStep); return false; - } else { + } + else { vpImageConvert::convert(m_frame, I); } - } else + } + else vpImageConvert::convert(m_frame, I); #else if (!m_capture.set(CV_CAP_PROP_POS_FRAMES, frame_index)) { @@ -494,14 +520,17 @@ bool vpVideoReader::getFrame(vpImage &I, long frame_index) m_imSequence->setImageNumber(m_frameCount); // to not increment vpDiskGrabber next image if (m_frameCount + m_frameStep > m_lastFrame) { m_imSequence->setImageNumber(m_frameCount); - } else if (m_frameCount + m_frameStep < m_firstFrame) { + } + else if (m_frameCount + m_frameStep < m_firstFrame) { m_imSequence->setImageNumber(m_frameCount); } - } catch (...) { + } + catch (...) { vpERROR_TRACE("Couldn't find the %u th frame", frame_index); return false; } - } else { + } + else { #if defined(HAVE_OPENCV_HIGHGUI) && defined(HAVE_OPENCV_VIDEOIO) #if VISP_HAVE_OPENCV_VERSION >= 0x030000 if (!m_capture.set(cv::CAP_PROP_POS_FRAMES, frame_index)) { @@ -515,10 +544,12 @@ bool vpVideoReader::getFrame(vpImage &I, long frame_index) if (m_frame.empty()) { setLastFrameIndex(m_frameCount - m_frameStep); return false; - } else { + } + else { vpImageConvert::convert(m_frame, I); } - } else { + } + else { vpImageConvert::convert(m_frame, I); } #else @@ -532,7 +563,8 @@ bool vpVideoReader::getFrame(vpImage &I, long frame_index) if (m_frameStep > 1) { m_frameCount += m_frameStep - 1; // next index m_capture.set(CV_CAP_PROP_POS_FRAMES, m_frameCount); - } else if (m_frameStep < -1) { + } + else if (m_frameStep < -1) { m_frameCount += m_frameStep - 1; // next index m_capture.set(CV_CAP_PROP_POS_FRAMES, m_frameCount); } diff --git a/modules/io/src/video/vpVideoWriter.cpp b/modules/io/src/video/vpVideoWriter.cpp index 189c9f356f..d21829e200 100644 --- a/modules/io/src/video/vpVideoWriter.cpp +++ b/modules/io/src/video/vpVideoWriter.cpp @@ -74,7 +74,7 @@ vpVideoWriter::~vpVideoWriter() {} which will be saved. If you want to write a sequence of images, `filename` corresponds to - the path followed by the image name template. For exemple, if you want to + the path followed by the image name template. For example, if you want to write different images named `image0001.jpeg`, `image0002.jpg`, ... and located in the folder `/local/image`, `filename` will be `/local/image/image%04d.jpg`. diff --git a/modules/robot/include/visp3/robot/vpAfma4.h b/modules/robot/include/visp3/robot/vpAfma4.h index b857c146b7..f203268fe7 100644 --- a/modules/robot/include/visp3/robot/vpAfma4.h +++ b/modules/robot/include/visp3/robot/vpAfma4.h @@ -40,7 +40,7 @@ \file vpAfma4.h - Modelisation of Irisa's cylindrical robot named Afma4. + Modelization of Irisa's cylindrical robot named Afma4. */ @@ -50,7 +50,7 @@ \ingroup group_robot_real_cylindrical - \brief Modelisation of Irisa's cylindrical robot named Afma4. + \brief Modelization of Irisa's cylindrical robot named Afma4. This robot has five degrees of freedom, but only four motorized joints (joint 3 is not motorized). Joint 2 and 3 are prismatic. The diff --git a/modules/robot/include/visp3/robot/vpAfma6.h b/modules/robot/include/visp3/robot/vpAfma6.h index d1e698cb98..f916f45d46 100644 --- a/modules/robot/include/visp3/robot/vpAfma6.h +++ b/modules/robot/include/visp3/robot/vpAfma6.h @@ -40,7 +40,7 @@ \file vpAfma6.h - Modelisation of Irisa's gantry robot named Afma6. + Modelization of Irisa's gantry robot named Afma6. */ @@ -50,7 +50,7 @@ \ingroup group_robot_real_gantry group_robot_simu_gantry - \brief Modelisation of Irisa's gantry robot named Afma6. + \brief Modelization of Irisa's gantry robot named Afma6. In this modelization, different frames have to be considered. diff --git a/modules/robot/include/visp3/robot/vpPioneer.h b/modules/robot/include/visp3/robot/vpPioneer.h index ee58018703..6c953d1581 100644 --- a/modules/robot/include/visp3/robot/vpPioneer.h +++ b/modules/robot/include/visp3/robot/vpPioneer.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Common features for Pioneer unicycle mobile robots. - * -*****************************************************************************/ + */ #ifndef VPPIONEER_H #define VPPIONEER_H @@ -40,73 +38,66 @@ #include /*! - - \class vpPioneer - - \ingroup group_robot_real_unicycle group_robot_simu_unicycle - - \brief Generic functions for Pioneer mobile robots. - - This class provides common features for Pioneer mobile robots. - This robot has two control velocities \f$(v_x, w_z)\f$, the translational - and rotational velocities of the mobile platform respectively. - - The figure below shows the position of the frames that are used to model the - robot. The end effector frame is here located at the middle point between - the two wheels. - - \image html pioneer.png - - The robot jacobian at the end effector frame, the point located at the - middle between the two wheels is given by: - - \f[ - {^e}{\bf J}_e = \left(\begin{array}{cc} - 1 & 0 \\ - 0 & 0 \\ - 0 & 0 \\ - 0 & 0 \\ - 0 & 0 \\ - 0 & 1 \\ - \end{array} - \right) - \f] - - Considering \f$(v_x, w_z)\f$, it is possible to compute \f$\bf v\f$ the six - dimention velocity skew expressed at the end effector frame by: - - \f[ - {\bf v} = {^e}{\bf J}_e \; - \left(\begin{array}{c} - v_x \\ - w_z \\ - \end{array} - \right) - \f]. - -*/ + * \class vpPioneer + * + * \ingroup group_robot_real_unicycle group_robot_simu_unicycle + * + * \brief Generic functions for Pioneer mobile robots. + * + * This class provides common features for Pioneer mobile robots. + * This robot has two control velocities \f$(v_x, w_z)\f$, the translational + * and rotational velocities of the mobile platform respectively. + * + * The figure below shows the position of the frames that are used to model the + * robot. The end effector frame is here located at the middle point between + * the two wheels. + * + * \image html pioneer.png + * + * The robot jacobian at the end effector frame, the point located at the + * middle between the two wheels is given by: + * + * \f[ + * {^e}{\bf J}_e = \left(\begin{array}{cc} + * 1 & 0 \\ + * 0 & 0 \\ + * 0 & 0 \\ + * 0 & 0 \\ + * 0 & 0 \\ + * 0 & 1 \\ + * \end{array} + * \right) + * \f] + * + * Considering \f$(v_x, w_z)\f$, it is possible to compute \f$\bf v\f$ the six + * dimension velocity skew expressed at the end effector frame by: + * + * \f[ + * {\bf v} = {^e}{\bf J}_e \; + * \left(\begin{array}{c} + * v_x \\ + * w_z \\ + * \end{array} + * \right) + * \f]. + */ class VISP_EXPORT vpPioneer : public vpUnicycle { public: /*! - Create a default Pioneer robot. - */ + * Create a default Pioneer robot. + */ vpPioneer() : vpUnicycle() { set_cMe(); set_eJe(); } - /*! - Destructor that does nothing. - */ - virtual ~vpPioneer(){}; - private: /*! - Set the transformation between the camera frame and the mobile platform - end effector frame. - */ + * Set the transformation between the camera frame and the mobile platform + * end effector frame. + */ void set_cMe() { // Position of mobile platform end effector frame in the camera frame @@ -119,27 +110,26 @@ class VISP_EXPORT vpPioneer : public vpUnicycle } /*! - Set the robot jacobian at the end effector frame, the point located at the - middle between the two wheels. - - Considering \f${\bf v} = {^e}{\bf J}_e \; [v_x, w_z]\f$ with - \f$(v_x, w_z)\f$ respectively the translational and rotational control - velocities of the mobile robot and \f$\bf v\f$ the six dimention velocity - skew expressed at the end effector frame, the robot jacobian is given by: - - \f[ - {^e}{\bf J}_e = \left(\begin{array}{cc} - 1 & 0 \\ - 0 & 0 \\ - 0 & 0 \\ - 0 & 0 \\ - 0 & 0 \\ - 0 & 1 \\ - \end{array} - \right) - \f] - - */ + * Set the robot jacobian at the end effector frame, the point located at the + * middle between the two wheels. + * + * Considering \f${\bf v} = {^e}{\bf J}_e \; [v_x, w_z]\f$ with + * \f$(v_x, w_z)\f$ respectively the translational and rotational control + * velocities of the mobile robot and \f$\bf v\f$ the six dimension velocity + * skew expressed at the end effector frame, the robot jacobian is given by: + * + * \f[ + * {^e}{\bf J}_e = \left(\begin{array}{cc} + * 1 & 0 \\ + * 0 & 0 \\ + * 0 & 0 \\ + * 0 & 0 \\ + * 0 & 0 \\ + * 0 & 1 \\ + * \end{array} + * \right) + * \f] + */ void set_eJe() { eJe_.resize(6, 2); // pioneer jacobian expressed at point M diff --git a/modules/robot/include/visp3/robot/vpPioneerPan.h b/modules/robot/include/visp3/robot/vpPioneerPan.h index 6377766b06..1fa48f110a 100644 --- a/modules/robot/include/visp3/robot/vpPioneerPan.h +++ b/modules/robot/include/visp3/robot/vpPioneerPan.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Common features for Pioneer unicycle mobile robots. - * -*****************************************************************************/ + */ #ifndef VPPIONEERPAN_H #define VPPIONEERPAN_H @@ -41,62 +39,60 @@ #include /*! - - \class vpPioneerPan - - \ingroup group_robot_real_unicycle group_robot_simu_unicycle - - \brief Generic functions for Pioneer mobile robots equiped with a pan head. - - This class provides common features for Pioneer mobile robots equiped with a - pan head. - - This robot has three control velocities \f$(v_x, w_z, \dot{q_1})\f$, the - translational and rotational velocities of the mobile platform, the pan head - velocity respectively. - - The figure below shows the position of the frames that are used to model the - robot. The end effector frame is here located at the pan axis. - - \image html pioneer-pan.png - - Considering - \f[{\bf v} = {^e}{\bf J}_e \; - \left(\begin{array}{c} - v_x \\ - w_z \\ - \dot{q_1} \\ - \end{array} - \right) - \f] - with - \f$(v_x, w_z)\f$ respectively the translational and rotational control - velocities of the mobile platform, \f$\dot{q_1}\f$ the joint velocity of the - pan head and \f$\bf v\f$ the six dimention velocity skew expressed at point - E in frame E, the robot jacobian is given by: - - \f[ - {^e}{\bf J}_e = \left(\begin{array}{ccc} - c_1 & -c_1*p_y - s_1*p_x & 0 \\ - 0 & 0 & 0 \\ - s_1 & -s_1*p_y + c_1*p_x & 0 \\ - 0 & 0 & 0 \\ - 0 & -1 & 1 \\ - 0 & 0 & 0 \\ - \end{array} - \right) - \f] - - with \f$p_x, p_y\f$ the position of the head base frame in the mobile - platform frame located at the middle point between the two weels. - -*/ + * \class vpPioneerPan + * + * \ingroup group_robot_real_unicycle group_robot_simu_unicycle + * + * \brief Generic functions for Pioneer mobile robots equipped with a pan head. + * + * This class provides common features for Pioneer mobile robots equipped with a + * pan head. + * + * This robot has three control velocities \f$(v_x, w_z, \dot{q_1})\f$, the + * translational and rotational velocities of the mobile platform, the pan head + * velocity respectively. + * + * The figure below shows the position of the frames that are used to model the + * robot. The end effector frame is here located at the pan axis. + * + * \image html pioneer-pan.png + * + * Considering + * \f[{\bf v} = {^e}{\bf J}_e \; + * \left(\begin{array}{c} + * v_x \\ + * w_z \\ + * \dot{q_1} \\ + * \end{array} + * \right) + * \f] + * with + * \f$(v_x, w_z)\f$ respectively the translational and rotational control + * velocities of the mobile platform, \f$\dot{q_1}\f$ the joint velocity of the + * pan head and \f$\bf v\f$ the six dimension velocity skew expressed at point + * E in frame E, the robot jacobian is given by: + * + * \f[ + * {^e}{\bf J}_e = \left(\begin{array}{ccc} + * c_1 & -c_1*p_y - s_1*p_x & 0 \\ + * 0 & 0 & 0 \\ + * s_1 & -s_1*p_y + c_1*p_x & 0 \\ + * 0 & 0 & 0 \\ + * 0 & -1 & 1 \\ + * 0 & 0 & 0 \\ + * \end{array} + * \right) + * \f] + * + * with \f$p_x, p_y\f$ the position of the head base frame in the mobile + * platform frame located at the middle point between the two wheels. + */ class VISP_EXPORT vpPioneerPan : public vpUnicycle { public: /*! - Create a pioneer mobile robot equiped with a pan head. - */ + * Create a pioneer mobile robot equipped with a pan head. + */ vpPioneerPan() : mMp_(), pMe_() { double q = 0; // Initial position of the pan axis @@ -106,40 +102,34 @@ class VISP_EXPORT vpPioneerPan : public vpUnicycle set_eJe(q); } - /*! - Destructor that does nothing. - */ - virtual ~vpPioneerPan(){}; - /** @name Inherited functionalities from vpPioneerPan */ //@{ /*! - Set the robot jacobian expressed at point E the end effector frame located - on the pan head. - - Considering \f${\bf v} = {^e}{\bf J}_e \; [v_x, w_z, \dot{q_1}]\f$ with - \f$(v_x, w_z)\f$ respectively the translational and rotational control - velocities of the mobile platform, \f$\dot{q_1}\f$ the joint velocity of - the pan head and \f$\bf v\f$ the six dimention velocity skew expressed at - point E in frame E, the robot jacobian is given by: - - \f[ - {^e}{\bf J}_e = \left(\begin{array}{ccc} - c_1 & -c_1*p_y - s_1*p_x & 0 \\ - 0 & 0 & 0 \\ - s_1 & -s_1*p_y + c_1*p_x & 0 \\ - 0 & 0 & 0 \\ - 0 & -1 & 1 \\ - 0 & 0 & 0 \\ - \end{array} - \right) - \f] - - with \f$p_x, p_y\f$ the position of the head base frame in the mobile - platform frame located at the middle point between the two weels. - - */ + * Set the robot jacobian expressed at point E the end effector frame located + * on the pan head. + * + * Considering \f${\bf v} = {^e}{\bf J}_e \; [v_x, w_z, \dot{q_1}]\f$ with + * \f$(v_x, w_z)\f$ respectively the translational and rotational control + * velocities of the mobile platform, \f$\dot{q_1}\f$ the joint velocity of + * the pan head and \f$\bf v\f$ the six dimension velocity skew expressed at + * point E in frame E, the robot jacobian is given by: + * + * \f[ + * {^e}{\bf J}_e = \left(\begin{array}{ccc} + * c_1 & -c_1*p_y - s_1*p_x & 0 \\ + * 0 & 0 & 0 \\ + * s_1 & -s_1*p_y + c_1*p_x & 0 \\ + * 0 & 0 & 0 \\ + * 0 & -1 & 1 \\ + * 0 & 0 & 0 \\ + * \end{array} + * \right) + * \f] + * + * with \f$p_x, p_y\f$ the position of the head base frame in the mobile + * platform frame located at the middle point between the two wheels. + */ void set_eJe(double q_pan) { double px = mMp_[0][3]; @@ -166,9 +156,9 @@ class VISP_EXPORT vpPioneerPan : public vpUnicycle /** @name Protected Member Functions Inherited from vpPioneerPan */ //@{ /*! - Set the transformation between the camera frame and the pan head end - effector frame. - */ + * Set the transformation between the camera frame and the pan head end + * effector frame. + */ void set_cMe() { // Position of pan head end effector frame in the camera frame @@ -189,10 +179,10 @@ class VISP_EXPORT vpPioneerPan : public vpUnicycle } /*! - Set the transformation between the mobile platform frame - located at the middle point between the two weels and the base frame of - the pan head. - */ + * Set the transformation between the mobile platform frame + * located at the middle point between the two wheels and the base frame of + * the pan head. + */ void set_mMp() { // Position of the pan head in the mobile platform frame @@ -210,12 +200,11 @@ class VISP_EXPORT vpPioneerPan : public vpUnicycle } /*! - Set the transformation between the pan head reference frame and the - end-effector frame. - - \param q : Position in rad of the pan axis. - - */ + * Set the transformation between the pan head reference frame and the + * end-effector frame. + * + * \param q : Position in rad of the pan axis. + */ void set_pMe(const double q) { vpRotationMatrix pRe; diff --git a/modules/robot/include/visp3/robot/vpQbSoftHand.h b/modules/robot/include/visp3/robot/vpQbSoftHand.h index 002a20d6c4..d4dbd9995a 100644 --- a/modules/robot/include/visp3/robot/vpQbSoftHand.h +++ b/modules/robot/include/visp3/robot/vpQbSoftHand.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Interface for the qb robotics qbSoftHand device. - * -*****************************************************************************/ + */ #ifndef _vpQbSoftHand_h_ #define _vpQbSoftHand_h_ @@ -43,56 +41,53 @@ #include /*! - - \class vpQbSoftHand - - \ingroup group_robot_haptic - - Interface for qbSoftHand [device](https://qbrobotics.com/products/qb-softhand/). - - See https://qbrobotics.com/ for more details. - - \note Before using this class under Linux (Ubuntu, Debian, Fedora...) it is mandatory to add - user to the dialout group. To do so, you must execute: - \code - $ sudo adduser user_name dialout - \endcode - otherwise you will get an error: - \code - vpQbDevice fails while opening [/dev/ttyUSB0] and sets errno [Permission denied]. - \endcode - - The following example shows how to close and open the SoftHand with a given speed factor and stiffness used to stop -the command applied to the motors when the measured current is larger than the stiffness multiplied by the maximum -allowed current that can be applied to the motors. - - \code -#include - -int main() -{ - vpQbSoftHand qbsofthand; - - vpColVector q(1); - - double speed_factor = 0.5; // half speed - double stiffness = 0.7; // 70% of the max allowed current supported by the motors - std::cout << "** Close the hand with blocking positioning function" << std::endl; - q[0] = 1; - qbsofthand.setPosition(q, speed_factor, stiffness); - - std::cout << "** Open the hand with blocking positioning function" << std::endl; - q[0] = 0; - qbsofthand.setPosition(q, speed_factor, stiffness); -} - \endcode - + * \class vpQbSoftHand + * + * \ingroup group_robot_haptic + * + * Interface for qbSoftHand [device](https://qbrobotics.com/products/qb-softhand/). + * + * See https://qbrobotics.com/ for more details. + * + * \note Before using this class under Linux (Ubuntu, Debian, Fedora...) it is mandatory to add + * user to the dialout group. To do so, you must execute: + * \code + * $ sudo adduser user_name dialout + * \endcode + * otherwise you will get an error: + * \code + * vpQbDevice fails while opening [/dev/ttyUSB0] and sets errno [Permission denied]. + * \endcode + * + * The following example shows how to close and open the SoftHand with a given speed factor and stiffness used to stop + * the command applied to the motors when the measured current is larger than the stiffness multiplied by the maximum + * allowed current that can be applied to the motors. + * + * \code + * #include + * + * int main() + * { + * vpQbSoftHand qbsofthand; + * + * vpColVector q(1); + * + * double speed_factor = 0.5; // half speed + * double stiffness = 0.7; // 70% of the max allowed current supported by the motors + * std::cout << "** Close the hand with blocking positioning function" << std::endl; + * q[0] = 1; + * qbsofthand.setPosition(q, speed_factor, stiffness); + * + * std::cout << "** Open the hand with blocking positioning function" << std::endl; + * q[0] = 0; + * qbsofthand.setPosition(q, speed_factor, stiffness); + * } + * \endcode */ class VISP_EXPORT vpQbSoftHand : public vpQbDevice { public: vpQbSoftHand(); - virtual ~vpQbSoftHand(); void getCurrent(vpColVector ¤t, const int &id = 1); void getPosition(vpColVector &position, const int &id = 1); diff --git a/modules/robot/include/visp3/robot/vpRobotAfma4.h b/modules/robot/include/visp3/robot/vpRobotAfma4.h index 32c33c8044..eade24c243 100644 --- a/modules/robot/include/visp3/robot/vpRobotAfma4.h +++ b/modules/robot/include/visp3/robot/vpRobotAfma4.h @@ -219,7 +219,7 @@ class VISP_EXPORT vpRobotAfma4 : public vpAfma4, public vpRobot virtual ~vpRobotAfma4(void); void getDisplacement(vpRobot::vpControlFrameType frame, vpColVector &displacement); - void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &position); + void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &position) override; void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &position, double ×tamp); double getPositioningVelocity(void); @@ -236,8 +236,8 @@ class VISP_EXPORT vpRobotAfma4 : public vpAfma4, public vpRobot void get_cMe(vpHomogeneousMatrix &cMe) const; void get_cVe(vpVelocityTwistMatrix &cVe) const; void get_cVf(vpVelocityTwistMatrix &cVf) const; - void get_eJe(vpMatrix &eJe); - void get_fJe(vpMatrix &fJe); + void get_eJe(vpMatrix &eJe) override; + void get_fJe(vpMatrix &fJe) override; void init(void); @@ -250,7 +250,7 @@ class VISP_EXPORT vpRobotAfma4 : public vpAfma4, public vpRobot static bool savePosFile(const std::string &filename, const vpColVector &q); /* --- POSITIONNEMENT --------------------------------------------------- */ - void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &position); + void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &position) override; void setPosition(const vpRobot::vpControlFrameType frame, const double q1, const double q2, const double q4, const double q5); void setPosition(const char *filename); @@ -262,7 +262,7 @@ class VISP_EXPORT vpRobotAfma4 : public vpAfma4, public vpRobot /* --- VITESSE ---------------------------------------------------------- */ - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &velocity); + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &velocity) override; void stopMotion(); }; diff --git a/modules/robot/include/visp3/robot/vpRobotAfma6.h b/modules/robot/include/visp3/robot/vpRobotAfma6.h index 8918fb21ca..41f821b1c8 100644 --- a/modules/robot/include/visp3/robot/vpRobotAfma6.h +++ b/modules/robot/include/visp3/robot/vpRobotAfma6.h @@ -258,7 +258,7 @@ class VISP_EXPORT vpRobotAfma6 : public vpAfma6, public vpRobot void getDisplacement(vpRobot::vpControlFrameType frame, vpColVector &displacement); - void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &position); + void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &position) override; void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &position, double ×tamp); void getPosition(const vpRobot::vpControlFrameType frame, vpPoseVector &position); void getPosition(const vpRobot::vpControlFrameType frame, vpPoseVector &position, double ×tamp); @@ -275,15 +275,15 @@ class VISP_EXPORT vpRobotAfma6 : public vpAfma6, public vpRobot void get_cMe(vpHomogeneousMatrix &_cMe) const; void get_cVe(vpVelocityTwistMatrix &_cVe) const; - void get_eJe(vpMatrix &_eJe); - void get_fJe(vpMatrix &_fJe); + void get_eJe(vpMatrix &_eJe) override; + void get_fJe(vpMatrix &_fJe) override; void init(void); void init(vpAfma6::vpAfma6ToolType tool, const vpHomogeneousMatrix &eMc); void init(vpAfma6::vpAfma6ToolType tool, const std::string &filename); void - init(vpAfma6::vpAfma6ToolType tool, - vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithoutDistortion); + init(vpAfma6::vpAfma6ToolType tool, + vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithoutDistortion); void move(const std::string &filename); void move(const std::string &filename, double velocity); @@ -298,7 +298,7 @@ class VISP_EXPORT vpRobotAfma6 : public vpAfma6, public vpRobot /* --- POSITIONNEMENT --------------------------------------------------- */ void setPosition(const vpRobot::vpControlFrameType frame, const vpPoseVector &pose); - void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &position); + void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &position) override; void setPosition(const vpRobot::vpControlFrameType frame, double pos1, double pos2, double pos3, double pos4, double pos5, double pos6); void setPosition(const std::string &filename); @@ -311,7 +311,7 @@ class VISP_EXPORT vpRobotAfma6 : public vpAfma6, public vpRobot /* --- VITESSE ---------------------------------------------------------- */ - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &velocity); + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &velocity) override; void stopMotion(); }; diff --git a/modules/robot/include/visp3/robot/vpRobotBiclops.h b/modules/robot/include/visp3/robot/vpRobotBiclops.h index 356e3c4c94..e082116672 100644 --- a/modules/robot/include/visp3/robot/vpRobotBiclops.h +++ b/modules/robot/include/visp3/robot/vpRobotBiclops.h @@ -168,7 +168,7 @@ class VISP_EXPORT vpRobotBiclops : public vpBiclops, public vpRobot * \exception vpRobotException::constructionError If the config file cannot be * opened. */ - void init(); + void init() override; /*! * Get the homogeneous matrix corresponding to the transformation between the @@ -198,7 +198,7 @@ class VISP_EXPORT vpRobotBiclops : public vpBiclops, public vpRobot * \param eJe : Jacobian between end effector frame and end effector frame (on * tilt axis). */ - void get_eJe(vpMatrix &eJe); + void get_eJe(vpMatrix &eJe) override; /*! * Get the robot jacobian expressed in the robot reference frame @@ -206,7 +206,7 @@ class VISP_EXPORT vpRobotBiclops : public vpBiclops, public vpRobot * \param fJe : Jacobian between reference frame (or fix frame) and end * effector frame (on tilt axis). */ - void get_fJe(vpMatrix &fJe); + void get_fJe(vpMatrix &fJe) override; /*! * Get the robot displacement since the last call of this method. @@ -228,7 +228,7 @@ class VISP_EXPORT vpRobotBiclops : public vpBiclops, public vpRobot * \exception vpRobotException::wrongStateError If a not supported frame type * is given. */ - void getDisplacement(const vpRobot::vpControlFrameType frame, vpColVector &d); + void getDisplacement(const vpRobot::vpControlFrameType frame, vpColVector &d) override; /*! * Return the position of each axis. @@ -243,7 +243,7 @@ class VISP_EXPORT vpRobotBiclops : public vpBiclops, public vpRobot * \exception vpRobotException::wrongStateError : If a not supported frame type * is given. */ - void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q); + void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q) override; /*! * Get the velocity in % used for a position control. @@ -315,7 +315,7 @@ class VISP_EXPORT vpRobotBiclops : public vpBiclops, public vpRobot * \exception vpRobotException::wrongStateError : If a not supported frame * type is given. */ - void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &q); + void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &q) override; /*! * Move the robot in position control. @@ -359,7 +359,7 @@ class VISP_EXPORT vpRobotBiclops : public vpBiclops, public vpRobot * Change the state of the robot either to stop them, or to set position or * speed control. */ - vpRobot::vpRobotStateType setRobotState(const vpRobot::vpRobotStateType newState); + vpRobot::vpRobotStateType setRobotState(const vpRobot::vpRobotStateType newState) override; /*! * Send a velocity on each axis. @@ -385,7 +385,7 @@ class VISP_EXPORT vpRobotBiclops : public vpBiclops, public vpRobot * \warning Velocities could be saturated if one of them exceed the maximal * authorized speed (see vpRobot::maxRotationVelocity). */ - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &q_dot); + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &q_dot) override; /*! * Halt all the axis. diff --git a/modules/robot/include/visp3/robot/vpRobotCamera.h b/modules/robot/include/visp3/robot/vpRobotCamera.h index 06615ffcbe..84be9b7b93 100644 --- a/modules/robot/include/visp3/robot/vpRobotCamera.h +++ b/modules/robot/include/visp3/robot/vpRobotCamera.h @@ -111,27 +111,26 @@ class VISP_EXPORT vpRobotCamera : public vpRobotSimulator public: vpRobotCamera(); - virtual ~vpRobotCamera(); /** @name Inherited functionalities from vpRobotCamera */ //@{ void get_cVe(vpVelocityTwistMatrix &cVe) const; - void get_eJe(vpMatrix &eJe); + void get_eJe(vpMatrix &eJe) override; void getPosition(vpHomogeneousMatrix &cMw) const; - void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q); + void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q) override; void setPosition(const vpHomogeneousMatrix &cMw); - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &v); + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &v) override; //@} private: - void init(); + void init() override; // Non implemented virtual pure functions - void get_fJe(vpMatrix & /*_fJe */){}; - void getDisplacement(const vpRobot::vpControlFrameType /* frame */, vpColVector & /* q */){}; - void setPosition(const vpRobot::vpControlFrameType /* frame */, const vpColVector & /* q */){}; + void get_fJe(vpMatrix & /*_fJe */) override { }; + void getDisplacement(const vpRobot::vpControlFrameType /* frame */, vpColVector & /* q */) override { }; + void setPosition(const vpRobot::vpControlFrameType /* frame */, const vpColVector & /* q */) override { }; }; #endif diff --git a/modules/robot/include/visp3/robot/vpRobotFlirPtu.h b/modules/robot/include/visp3/robot/vpRobotFlirPtu.h index bb9aad8d38..10a14bd979 100644 --- a/modules/robot/include/visp3/robot/vpRobotFlirPtu.h +++ b/modules/robot/include/visp3/robot/vpRobotFlirPtu.h @@ -99,9 +99,9 @@ class VISP_EXPORT vpRobotFlirPtu : public vpRobot void connect(const std::string &portname, int baudrate = 9600); void disconnect(); - void get_eJe(vpMatrix &eJe); + void get_eJe(vpMatrix &eJe) override; vpMatrix get_eJe(); - void get_fJe(vpMatrix &fJe); + void get_fJe(vpMatrix &fJe) override; vpMatrix get_fJe(); vpMatrix get_fMe(); @@ -112,13 +112,13 @@ class VISP_EXPORT vpRobotFlirPtu : public vpRobot vpHomogeneousMatrix get_eMc() const { return m_eMc; } vpVelocityTwistMatrix get_cVe() const; - void getDisplacement(const vpRobot::vpControlFrameType frame, vpColVector &q); + void getDisplacement(const vpRobot::vpControlFrameType frame, vpColVector &q) override; std::string getNetworkIP(); std::string getNetworkGateway(); std::string getNetworkHostName(); - void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q); + void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q) override; vpColVector getPanPosLimits(); vpColVector getTiltPosLimits(); vpColVector getPanTiltVelMax(); @@ -130,13 +130,13 @@ class VISP_EXPORT vpRobotFlirPtu : public vpRobot If your tool is a camera, this transformation is obtained by hand-eye calibration. */ void set_eMc(vpHomogeneousMatrix &eMc) { m_eMc = eMc; } - void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &q); + void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &q) override; void setPanPosLimits(const vpColVector &pan_limits); void setTiltPosLimits(const vpColVector &tilt_limits); void setPositioningVelocity(double velocity); vpRobot::vpRobotStateType setRobotState(vpRobot::vpRobotStateType newState); - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel); + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel) override; void stopMotion(); static void emergencyStop(int signo); diff --git a/modules/robot/include/visp3/robot/vpRobotFranka.h b/modules/robot/include/visp3/robot/vpRobotFranka.h index e38b7ef5a8..8f1d1fd765 100644 --- a/modules/robot/include/visp3/robot/vpRobotFranka.h +++ b/modules/robot/include/visp3/robot/vpRobotFranka.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Interface for the Franka robot. - * -*****************************************************************************/ + */ #ifndef _vpRobotFranka_h_ #define _vpRobotFranka_h_ @@ -57,179 +55,180 @@ #include /*! - \class vpRobotFranka - - \ingroup group_robot_real_arm - - This class is a wrapper over the [libfranka](https://github.com/frankaemika/libfranka) - component part of the [Franka Control Interface](https://frankaemika.github.io/docs/) (FCI). - - Before using vpRobotFranka follow the - [installation instructions](https://frankaemika.github.io/docs/installation.html#) to install - libfranka. We suggest to - [build libfranka from source](https://frankaemika.github.io/docs/installation.html#building-libfranka) - if you are not using ROS. - - Moreover, you need also to setup a real-time kernel following these - [instructions](https://frankaemika.github.io/docs/installation.html#setting-up-the-real-time-kernel). - - Up to now, this class provides the following capabilities to: - - move to a given joint position using setPosition() that is blocking and that returns only when the robot - has reached the desired position. - \code - vpRobotFranka robot("192.168.1.1"); - - vpColVector q_d(7); - q_d[3] = -M_PI_2; - q_d[5] = M_PI_2; - q_d[6] = M_PI_4; - std::cout << "Move to joint position: " << q_d.t() << std::endl; - robot.setPosition(vpRobot::JOINT_STATE, q_d); - \endcode - - move applying a joint velocity using setVelocity(). This function is non-blocking. - \code - vpRobotFranka robot("192.168.1.1"); - - robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL); - - vpColVector dq_d(7, 0); - dq_d[4] = vpMath::rad(-20.); - dq_d[6] = vpMath::rad(20.); - while(1) { - robot.setVelocity(vpRobot::JOINT_STATE, dq_d); - ... - } - \endcode - - move applying a cartesian velocity to the end-effector using setVelocity(). This function is non-blocking. - \code - vpRobotFranka robot("192.168.1.1"); - - vpColVector ve_d(6); - ve_d[2] = 0.02; // vz = 2 cm/s goes down - - while(1) { - robot.setVelocity(vpRobot::END_EFFECTOR_FRAME, ve_d); - ... - } - \endcode - - move applying a cartesian velocity to the camera frame (or a given tool frame) using setVelocity(). - The camera frame (or a tool frame) location wrt the end-effector is set using set_eMc(). This function is - non-blocking. \code vpRobotFranka robot("192.168.1.1"); vpHomogeneousMatrix eMc; // Position of the camera wrt the - end-effector - // update eMc - robot.set_eMc(eMc); - - vpColVector vc_d(6); - vc_d[2] = 0.02; // vz = 2 cm/s is along the camera optical axis - - while(1) { - robot.setVelocity(vpRobot::CAMERA_FRAME, vc_d); - ... - } - \endcode - If the tool attached to the end-effector is not a camera, you can do exactly the same using: - \code - vpRobotFranka robot("192.168.1.1"); - vpHomogeneousMatrix eMt; - // update eMt, the position of the tool wrt the end-effector frame - robot.set_eMc(eMt); - - vpColVector vt_d(6); - vt_d[2] = 0.02; // vt = 2 cm/s is along tool z axis - - while(1) { - robot.setVelocity(vpRobot::TOOL_FRAME, vt_d); - ... - } - \endcode - - - get the joint position using getPosition() - \code - vpRobotFranka robot("192.168.1.1"); - - vpColVector q; - while(1) { - robot.getPosition(vpRobot::JOINT_STATE, q); - ... - } - \endcode - - get the cartesian end-effector position using getPosition(). This function is non-blocking. - \code - vpRobotFranka robot("192.168.1.1"); - - vpPoseVector wPe; - vpHomogeneousMatrix wMe; - while(1) { - robot.getPosition(vpRobot::END_EFFECTOR_FRAME, wPe); - wMe.buildFrom(wPe); - ... - } - \endcode - - get the cartesian camera (or tool) frame position using getPosition(). This function is non-blocking. - \code - vpRobotFranka robot("192.168.1.1"); - vpHomogeneousMatrix eMc; - // update eMc, the position of the camera wrt the end-effector frame - robot.set_eMc(eMc); - - vpPoseVector wPc; - vpHomogeneousMatrix wMc; - while(1) { - robot.getPosition(vpRobot::CAMERA_FRAME, wPc); - wMc.buildFrom(wPc); - ... - } - \endcode - If the tool attached to the end-effector is not a camera, you can do exactly the same using: - \code - vpRobotFranka robot("192.168.1.1"); - vpHomogeneousMatrix eMt; - // update eMt, the position of the tool wrt the end-effector frame - robot.set_eMc(eMt); - - vpPoseVector wPt; - vpHomogeneousMatrix wMt; - while(1) { - robot.getPosition(vpRobot::TOOL_FRAME, wPt); - wMt.buildFrom(wPt); - ... - } - \endcode - - What is not implemented is: - - move to a given cartesian end-effector position - - gripper controller - - force/torque feedback and control - - Known issues: - - sometimes the joint to joint trajectory generator provided by Franka complains about discontinuities. - - We provide also the getHandler() function that allows to access to the robot handler and call the native - [libfranka API](https://frankaemika.github.io/libfranka/index.html) functionalities: - \code - vpRobotFranka robot("192.168.1.1"); - - franka::Robot *handler = robot.getHandler(); - - // Get end-effector cartesian position - std::array pose = handler->readOnce().O_T_EE; - \endcode - - \sa \ref tutorial-franka-pbvs - \sa \ref tutorial-franka-ibvs - -*/ + * \class vpRobotFranka + * + * \ingroup group_robot_real_arm + * + * This class is a wrapper over the [libfranka](https://github.com/frankaemika/libfranka) + * component part of the [Franka Control Interface](https://frankaemika.github.io/docs/) (FCI). + * + * Before using vpRobotFranka follow the + * [installation instructions](https://frankaemika.github.io/docs/installation.html#) to install + * libfranka. We suggest to + * [build libfranka from source](https://frankaemika.github.io/docs/installation.html#building-libfranka) + * if you are not using ROS. + * + * Moreover, you need also to setup a real-time kernel following these + * [instructions](https://frankaemika.github.io/docs/installation.html#setting-up-the-real-time-kernel). + * + * Up to now, this class provides the following capabilities to: + * - move to a given joint position using setPosition() that is blocking and that returns only when the robot + * has reached the desired position. + * \code + * vpRobotFranka robot("192.168.1.1"); + * + * vpColVector q_d(7); + * q_d[3] = -M_PI_2; + * q_d[5] = M_PI_2; + * q_d[6] = M_PI_4; + * std::cout << "Move to joint position: " << q_d.t() << std::endl; + * robot.setPosition(vpRobot::JOINT_STATE, q_d); + * \endcode + * - move applying a joint velocity using setVelocity(). This function is non-blocking. + * \code + * vpRobotFranka robot("192.168.1.1"); + * + * robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL); + * + * vpColVector dq_d(7, 0); + * dq_d[4] = vpMath::rad(-20.); + * dq_d[6] = vpMath::rad(20.); + * while(1) { + * robot.setVelocity(vpRobot::JOINT_STATE, dq_d); + * ... + * } + * \endcode + * - move applying a cartesian velocity to the end-effector using setVelocity(). This function is non-blocking. + * \code + * vpRobotFranka robot("192.168.1.1"); + * + * vpColVector ve_d(6); + * ve_d[2] = 0.02; // vz = 2 cm/s goes down + * + * while(1) { + * robot.setVelocity(vpRobot::END_EFFECTOR_FRAME, ve_d); + * ... + * } + * \endcode + * - move applying a cartesian velocity to the camera frame (or a given tool frame) using setVelocity(). + * The camera frame (or a tool frame) location wrt the end-effector is set using set_eMc(). This function is + * non-blocking. + * \code + * vpRobotFranka robot("192.168.1.1"); + * vpHomogeneousMatrix eMc; // Position of the camera wrt the end-effector + * // update eMc + * robot.set_eMc(eMc); + * + * vpColVector vc_d(6); + * vc_d[2] = 0.02; // vz = 2 cm/s is along the camera optical axis + * + * while(1) { + * robot.setVelocity(vpRobot::CAMERA_FRAME, vc_d); + * ... + * } + * \endcode + * If the tool attached to the end-effector is not a camera, you can do exactly the same using: + * \code + * vpRobotFranka robot("192.168.1.1"); + * vpHomogeneousMatrix eMt; + * // update eMt, the position of the tool wrt the end-effector frame + * robot.set_eMc(eMt); + * + * vpColVector vt_d(6); + * vt_d[2] = 0.02; // vt = 2 cm/s is along tool z axis + * + * while(1) { + * robot.setVelocity(vpRobot::TOOL_FRAME, vt_d); + * ... + * } + * \endcode + * + * - get the joint position using getPosition() + * \code + * vpRobotFranka robot("192.168.1.1"); + * + * vpColVector q; + * while(1) { + * robot.getPosition(vpRobot::JOINT_STATE, q); + * ... + * } + * \endcode + * - get the cartesian end-effector position using getPosition(). This function is non-blocking. + * \code + * vpRobotFranka robot("192.168.1.1"); + * + * vpPoseVector wPe; + * vpHomogeneousMatrix wMe; + * while(1) { + * robot.getPosition(vpRobot::END_EFFECTOR_FRAME, wPe); + * wMe.buildFrom(wPe); + * ... + * } + * \endcode + * - get the cartesian camera (or tool) frame position using getPosition(). This function is non-blocking. + * \code + * vpRobotFranka robot("192.168.1.1"); + * vpHomogeneousMatrix eMc; + * // update eMc, the position of the camera wrt the end-effector frame + * robot.set_eMc(eMc); + * + * vpPoseVector wPc; + * vpHomogeneousMatrix wMc; + * while(1) { + * robot.getPosition(vpRobot::CAMERA_FRAME, wPc); + * wMc.buildFrom(wPc); + * ... + * } + * \endcode + * If the tool attached to the end-effector is not a camera, you can do exactly the same using: + * \code + * vpRobotFranka robot("192.168.1.1"); + * vpHomogeneousMatrix eMt; + * // update eMt, the position of the tool wrt the end-effector frame + * robot.set_eMc(eMt); + * + * vpPoseVector wPt; + * vpHomogeneousMatrix wMt; + * while(1) { + * robot.getPosition(vpRobot::TOOL_FRAME, wPt); + * wMt.buildFrom(wPt); + * ... + * } + * \endcode + * + * What is not implemented is: + * - move to a given cartesian end-effector position + * - gripper controller + * - force/torque feedback and control + * + * Known issues: + * - sometimes the joint to joint trajectory generator provided by Franka complains about discontinuities. + * + * We provide also the getHandler() function that allows to access to the robot handler and call the native + * [libfranka API](https://frankaemika.github.io/libfranka/index.html) functionalities: + * \code + * vpRobotFranka robot("192.168.1.1"); + * + * franka::Robot *handler = robot.getHandler(); + * + * // Get end-effector cartesian position + * std::array pose = handler->readOnce().O_T_EE; + * \endcode + * + * \sa \ref tutorial-franka-pbvs + * \sa \ref tutorial-franka-ibvs + */ class VISP_EXPORT vpRobotFranka : public vpRobot { private: /*! - Copy constructor not allowed. + * Copy constructor not allowed. */ vpRobotFranka(const vpRobotFranka &robot); /*! - This function is not implemented. + * This function is not implemented. */ - void getDisplacement(const vpRobot::vpControlFrameType, vpColVector &){}; + void getDisplacement(const vpRobot::vpControlFrameType, vpColVector &) override { }; void init(); @@ -279,9 +278,9 @@ class VISP_EXPORT vpRobotFranka : public vpRobot vpHomogeneousMatrix get_fMc(const vpColVector &q); vpHomogeneousMatrix get_eMc() const; - void get_eJe(vpMatrix &eJe); + void get_eJe(vpMatrix &eJe) override; void get_eJe(const vpColVector &q, vpMatrix &eJe); - void get_fJe(vpMatrix &fJe); + void get_fJe(vpMatrix &fJe) override; void get_fJe(const vpColVector &q, vpMatrix &fJe); void getCoriolis(vpColVector &coriolis); @@ -324,7 +323,7 @@ class VISP_EXPORT vpRobotFranka : public vpRobot void getMass(vpMatrix &mass); - void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &position); + void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &position) override; void getPosition(const vpRobot::vpControlFrameType frame, vpPoseVector &pose); void getVelocity(const vpRobot::vpControlFrameType frame, vpColVector &d_position); @@ -345,11 +344,11 @@ class VISP_EXPORT vpRobotFranka : public vpRobot void setForceTorque(const vpRobot::vpControlFrameType frame, const vpColVector &ft, const double &filter_gain = 0.1, const bool &activate_pi_controller = false); void setLogFolder(const std::string &folder); - void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &position); + void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &position) override; void setPositioningVelocity(double velocity); vpRobot::vpRobotStateType setRobotState(vpRobot::vpRobotStateType newState); - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel); + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel) override; void stopMotion(); }; diff --git a/modules/robot/include/visp3/robot/vpRobotKinova.h b/modules/robot/include/visp3/robot/vpRobotKinova.h index 803e40c9e1..7d62a0b744 100644 --- a/modules/robot/include/visp3/robot/vpRobotKinova.h +++ b/modules/robot/include/visp3/robot/vpRobotKinova.h @@ -91,12 +91,12 @@ class VISP_EXPORT vpRobotKinova : public vpRobot typedef enum { CMD_LAYER_USB, CMD_LAYER_ETHERNET, CMD_LAYER_UNSET } CommandLayer; vpRobotKinova(); - virtual ~vpRobotKinova(); + virtual ~vpRobotKinova() override; int connect(); - void get_eJe(vpMatrix &eJe); - void get_fJe(vpMatrix &fJe); + void get_eJe(vpMatrix &eJe) override; + void get_fJe(vpMatrix &fJe) override; /*! * Return constant transformation between end-effector and tool frame. @@ -106,8 +106,8 @@ class VISP_EXPORT vpRobotKinova : public vpRobot int getActiveDevice() const { return m_active_device; } int getNumDevices() const { return m_devices_count; } - void getDisplacement(const vpRobot::vpControlFrameType frame, vpColVector &q); - void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &position); + void getDisplacement(const vpRobot::vpControlFrameType frame, vpColVector &q) override; + void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &position) override; void getPosition(const vpRobot::vpControlFrameType frame, vpPoseVector &pose); void homing(); @@ -124,7 +124,7 @@ class VISP_EXPORT vpRobotKinova : public vpRobot */ void setCommandLayer(CommandLayer command_layer) { m_command_layer = command_layer; } void setDoF(unsigned int dof); - void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &q); + void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &q) override; /*! * \param[in] plugin_location: Path to Jaco SDK plugins (ie. `Kinova.API.USBCommandLayerUbuntu.so` on * unix-like platform or `CommandLayerWindows.dll` on Windows platform). By default this location is empty, @@ -132,7 +132,7 @@ class VISP_EXPORT vpRobotKinova : public vpRobot * them. */ void setPluginLocation(const std::string &plugin_location) { m_plugin_location = plugin_location; } - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel); + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel) override; /*! * Enable or disable verbose mode to print to stdout additional information. * \param[in] verbose : true to enable verbose, false to disable. By default verbose diff --git a/modules/robot/include/visp3/robot/vpRobotPioneer.h b/modules/robot/include/visp3/robot/vpRobotPioneer.h index c020288140..74d6272fa7 100644 --- a/modules/robot/include/visp3/robot/vpRobotPioneer.h +++ b/modules/robot/include/visp3/robot/vpRobotPioneer.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Interface for Pioneer mobile robots based on Aria 3rd party library. - * -*****************************************************************************/ + */ #ifndef VPROBOTPIONEER_H #define VPROBOTPIONEER_H @@ -51,56 +49,54 @@ #include /*! - - \class vpRobotPioneer - - \ingroup group_robot_real_unicycle - - \brief Interface for Pioneer mobile robots based on Aria 3rd party library. - - This class provides a position and speed control interface for Pioneer - mobile robots. It inherits from the Aria ArRobot class. For more information - about the model of the robot, see vpPioneer documentation. - -*/ + * \class vpRobotPioneer + * + * \ingroup group_robot_real_unicycle + * + * \brief Interface for Pioneer mobile robots based on Aria 3rd party library. + * + * This class provides a position and speed control interface for Pioneer + * mobile robots. It inherits from the Aria ArRobot class. For more information + * about the model of the robot, see vpPioneer documentation. + */ class VISP_EXPORT vpRobotPioneer : public vpRobot, public vpPioneer, public ArRobot { private: /* Not allowed functions. */ /*! - Copy constructor not allowed. + * Copy constructor not allowed. */ vpRobotPioneer(const vpRobotPioneer &robot); public: vpRobotPioneer(); - virtual ~vpRobotPioneer(); + virtual ~vpRobotPioneer() override; /*! - Get the robot Jacobian expressed at point E, the point located at the - middle between the two wheels. - - \param eJe : Robot jacobian such as \f$(v_x, w_z) = {^e}{\bf J}e \; {\bf - v}\f$ with \f$(v_x, w_z)\f$ respectively the translational and rotational - control velocities of the mobile robot, \f$\bf v\f$ the six dimention - velocity skew, and where - - \sa get_eJe() - - */ - void get_eJe(vpMatrix &eJe) { eJe = vpUnicycle::get_eJe(); } + * Get the robot Jacobian expressed at point E, the point located at the + * middle between the two wheels. + * + * \param eJe : Robot jacobian such as \f$(v_x, w_z) = {^e}{\bf J}e \; {\bf + * v}\f$ with \f$(v_x, w_z)\f$ respectively the translational and rotational + * control velocities of the mobile robot, \f$\bf v\f$ the six dimension + * velocity skew, and where + * + * \sa get_eJe() + */ + void get_eJe(vpMatrix &eJe) override { eJe = vpUnicycle::get_eJe(); } private: // Set as private since not implemented /*! - Get the robot Jacobian expressed in the robot reference (or world) frame. - \warning Not implemented. - */ - void get_fJe(vpMatrix & /*fJe*/){}; + * Get the robot Jacobian expressed in the robot reference (or world) frame. + * \warning Not implemented. + */ + void get_fJe(vpMatrix & /*fJe*/) override { }; /*! - Get a displacement (frame as to ve specified) between two successive - position control. \warning Not implemented. - */ - void getDisplacement(const vpRobot::vpControlFrameType /*frame*/, vpColVector & /*q*/){}; + * Get a displacement (frame as to ve specified) between two successive + * position control. + * \warning Not implemented. + */ + void getDisplacement(const vpRobot::vpControlFrameType /*frame*/, vpColVector & /*q*/) { }; public: void getVelocity(const vpRobot::vpControlFrameType frame, vpColVector &velocity); @@ -108,27 +104,27 @@ class VISP_EXPORT vpRobotPioneer : public vpRobot, public vpPioneer, public ArRo private: // Set as private since not implemented /*! - Get the robot position (frame has to be specified). - \warning Not implemented. - */ - void getPosition(const vpRobot::vpControlFrameType /*frame*/, vpColVector & /*q*/){}; + * Get the robot position (frame has to be specified). + * \warning Not implemented. + */ + void getPosition(const vpRobot::vpControlFrameType /*frame*/, vpColVector & /*q*/) { }; public: void init(); private: // Set as private since not implemented /*! - Set a displacement (frame has to be specified) in position control. - \warning Not implemented. - */ - void setPosition(const vpRobot::vpControlFrameType /*frame*/, const vpColVector & /*q*/){}; + * Set a displacement (frame has to be specified) in position control. + * \warning Not implemented. + */ + void setPosition(const vpRobot::vpControlFrameType /*frame*/, const vpColVector & /*q*/) override { }; public: - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel); + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel) override; /*! - Enable or disable sonar device usage. - */ + * Enable or disable sonar device usage. + */ void useSonar(bool usage) { this->comInt(ArCommands::SONAR, usage); } protected: diff --git a/modules/robot/include/visp3/robot/vpRobotPtu46.h b/modules/robot/include/visp3/robot/vpRobotPtu46.h index 69bba53cc8..4a88e76575 100644 --- a/modules/robot/include/visp3/robot/vpRobotPtu46.h +++ b/modules/robot/include/visp3/robot/vpRobotPtu46.h @@ -99,11 +99,11 @@ class VISP_EXPORT vpRobotPtu46 : public vpPtu46, public vpRobot void get_cMe(vpHomogeneousMatrix &_cMe) const; void get_cVe(vpVelocityTwistMatrix &_cVe) const; - void get_eJe(vpMatrix &_eJe); - void get_fJe(vpMatrix &_fJe); + void get_eJe(vpMatrix &_eJe) override; + void get_fJe(vpMatrix &_fJe) override; void getDisplacement(vpRobot::vpControlFrameType frame, vpColVector &q); - void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q); + void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q) override; double getPositioningVelocity(void); void getVelocity(const vpRobot::vpControlFrameType frame, vpColVector &q_dot); vpColVector getVelocity(const vpRobot::vpControlFrameType frame); @@ -112,13 +112,13 @@ class VISP_EXPORT vpRobotPtu46 : public vpPtu46, public vpRobot bool readPositionFile(const std::string &filename, vpColVector &q); - void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &q); + void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &q) override; void setPosition(const vpRobot::vpControlFrameType frame, const double &q1, const double &q2); void setPosition(const char *filename); void setPositioningVelocity(double velocity); vpRobot::vpRobotStateType setRobotState(vpRobot::vpRobotStateType newState); - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &q_dot); + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &q_dot) override; void stopMotion(); }; diff --git a/modules/robot/include/visp3/robot/vpRobotSimulator.h b/modules/robot/include/visp3/robot/vpRobotSimulator.h index 4e79f6a928..b6d87cbe0d 100644 --- a/modules/robot/include/visp3/robot/vpRobotSimulator.h +++ b/modules/robot/include/visp3/robot/vpRobotSimulator.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,15 @@ * * Description: * Basic class used to make robot simulators. - * -*****************************************************************************/ + */ #ifndef vpRobotSimulator_HH #define vpRobotSimulator_HH /*! - \file vpRobotSimulator.h - \brief Basic class used to make robot simulators. -*/ + * \file vpRobotSimulator.h + * \brief Basic class used to make robot simulators. + */ #include #include @@ -47,15 +45,14 @@ #include /*! - \class vpRobotSimulator - - \ingroup group_robot_simu_gantry group_robot_simu_arm - group_robot_simu_unicycle \ingroup group_robot_simu_camera - - \brief This class aims to be a basis used to create all the - robot simulators. - -*/ + * \class vpRobotSimulator + * + * \ingroup group_robot_simu_gantry group_robot_simu_arm + * group_robot_simu_unicycle \ingroup group_robot_simu_camera + * + * \brief This class aims to be a basis used to create all the + * robot simulators. + */ class VISP_EXPORT vpRobotSimulator : public vpRobot { protected: @@ -63,28 +60,23 @@ class VISP_EXPORT vpRobotSimulator : public vpRobot public: vpRobotSimulator(); - /*! - Basic destructor - */ - virtual ~vpRobotSimulator(){}; /** @name Inherited functionalities from vpRobotSimulator */ //@{ /*! - Return the sampling time. - - \return Sampling time in second used to compute the robot displacement - from the velocity applied to the robot during this time. - */ + * Return the sampling time. + * + * \return Sampling time in second used to compute the robot displacement + * from the velocity applied to the robot during this time. + */ inline double getSamplingTime() const { return (this->delta_t_); } /*! - Set the sampling time. - - \param delta_t : Sampling time in second used to compute the robot - displacement from the velocity applied to the robot during this time. - - */ + * Set the sampling time. + * + * \param delta_t : Sampling time in second used to compute the robot + * displacement from the velocity applied to the robot during this time. + */ virtual inline void setSamplingTime(const double &delta_t) { this->delta_t_ = delta_t; } //@} }; diff --git a/modules/robot/include/visp3/robot/vpRobotTemplate.h b/modules/robot/include/visp3/robot/vpRobotTemplate.h index 777998f4fb..c158ad26f8 100644 --- a/modules/robot/include/visp3/robot/vpRobotTemplate.h +++ b/modules/robot/include/visp3/robot/vpRobotTemplate.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,15 @@ * * Description: * Defines a robot just to show which function you must implement. - * -*****************************************************************************/ + */ #ifndef vpRobotTemplate_h #define vpRobotTemplate_h /*! - \file vpRobotTemplate.h - Defines a robot just to show which function you must implement. -*/ + * \file vpRobotTemplate.h + * Defines a robot just to show which function you must implement. + */ #include @@ -47,40 +45,38 @@ #include /*! - - \class vpRobotTemplate - \ingroup group_robot_real_template - \brief Class that defines a robot just to show which function you must implement. - -*/ + * \class vpRobotTemplate + * \ingroup group_robot_real_template + * \brief Class that defines a robot just to show which function you must implement. + */ class VISP_EXPORT vpRobotTemplate : public vpRobot { public: vpRobotTemplate(); - virtual ~vpRobotTemplate(); + virtual ~vpRobotTemplate() override; - void get_eJe(vpMatrix &eJe_); - void get_fJe(vpMatrix &fJe_); + void get_eJe(vpMatrix &eJe_) override; + void get_fJe(vpMatrix &fJe_) override; /*! - Return constant transformation between end-effector and tool frame. - If your tool is a camera, this transformation is obtained by hand-eye calibration. + * Return constant transformation between end-effector and tool frame. + * If your tool is a camera, this transformation is obtained by hand-eye calibration. */ vpHomogeneousMatrix get_eMc() const { return m_eMc; } - void getDisplacement(const vpRobot::vpControlFrameType frame, vpColVector &q); - void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q); + void getDisplacement(const vpRobot::vpControlFrameType frame, vpColVector &q) override; + void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q) override; /*! - Set constant transformation between end-effector and tool frame. - If your tool is a camera, this transformation is obtained by hand-eye calibration. + * Set constant transformation between end-effector and tool frame. + * If your tool is a camera, this transformation is obtained by hand-eye calibration. */ void set_eMc(vpHomogeneousMatrix &eMc) { m_eMc = eMc; } - void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &q); - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel); + void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &q) override; + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel) override; protected: - void init(); + void init() override; void getJointPosition(vpColVector &q); void setCartVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &v); void setJointVelocity(const vpColVector &qdot); diff --git a/modules/robot/include/visp3/robot/vpRobotUniversalRobots.h b/modules/robot/include/visp3/robot/vpRobotUniversalRobots.h index b1e2eb8716..30651d1c23 100644 --- a/modules/robot/include/visp3/robot/vpRobotUniversalRobots.h +++ b/modules/robot/include/visp3/robot/vpRobotUniversalRobots.h @@ -95,7 +95,7 @@ class VISP_EXPORT vpRobotUniversalRobots : public vpRobot void getForceTorque(const vpRobot::vpControlFrameType frame, vpColVector &force); std::string getPolyScopeVersion(); - void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &position); + void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &position) override; void getPosition(const vpRobot::vpControlFrameType frame, vpPoseVector &pose); int getRobotMode() const; std::string getRobotModel() const; @@ -105,12 +105,12 @@ class VISP_EXPORT vpRobotUniversalRobots : public vpRobot bool readPosFile(const std::string &filename, vpColVector &q); bool savePosFile(const std::string &filename, const vpColVector &q); - void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &position); + void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &position) override; void setPosition(const vpRobot::vpControlFrameType frame, const vpPoseVector &pose); void setPositioningVelocity(double velocity); vpRobot::vpRobotStateType setRobotState(vpRobot::vpRobotStateType newState); - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel); + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel) override; void set_eMc(const vpHomogeneousMatrix &eMc); @@ -118,9 +118,9 @@ class VISP_EXPORT vpRobotUniversalRobots : public vpRobot private: // Not implemented yet - void get_eJe(vpMatrix &_eJe){}; - void get_fJe(vpMatrix &_fJe){}; - void getDisplacement(const vpRobot::vpControlFrameType frame, vpColVector &q){}; + void get_eJe(vpMatrix &_eJe) override { }; + void get_fJe(vpMatrix &_fJe) override { }; + void getDisplacement(const vpRobot::vpControlFrameType frame, vpColVector &q) override { }; protected: void init(); diff --git a/modules/robot/include/visp3/robot/vpRobotViper650.h b/modules/robot/include/visp3/robot/vpRobotViper650.h index 0066eb88a0..cf51cf57bb 100644 --- a/modules/robot/include/visp3/robot/vpRobotViper650.h +++ b/modules/robot/include/visp3/robot/vpRobotViper650.h @@ -336,7 +336,8 @@ class VISP_EXPORT vpRobotViper650 : public vpViper650, public vpRobot public: /* Constantes */ /*! \enum vpControlModeType Control mode. */ - typedef enum { + typedef enum + { AUTO, //!< Automatic control mode (default). MANUAL, //!< Manual control mode activated when the dead man switch is in //!< use. @@ -421,13 +422,13 @@ class VISP_EXPORT vpRobotViper650 : public vpViper650, public vpRobot void get_cMe(vpHomogeneousMatrix &cMe) const; void get_cVe(vpVelocityTwistMatrix &cVe) const; - void get_eJe(vpMatrix &eJe); - void get_fJe(vpMatrix &fJe); + void get_eJe(vpMatrix &eJe) override; + void get_fJe(vpMatrix &fJe) override; void init(void); void - init(vpViper650::vpToolType tool, - vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithoutDistortion); + init(vpViper650::vpToolType tool, + vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithoutDistortion); void init(vpViper650::vpToolType tool, const std::string &filename); void init(vpViper650::vpToolType tool, const vpHomogeneousMatrix &eMc_); @@ -448,7 +449,7 @@ class VISP_EXPORT vpRobotViper650 : public vpViper650, public vpRobot void setMaxRotationVelocityJoint6(double w6_max); // Position control - void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &position); + void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &position) override; void setPosition(const vpRobot::vpControlFrameType frame, double pos1, double pos2, double pos3, double pos4, double pos5, double pos6); void setPosition(const std::string &filename); @@ -457,7 +458,7 @@ class VISP_EXPORT vpRobotViper650 : public vpViper650, public vpRobot // State vpRobot::vpRobotStateType setRobotState(vpRobot::vpRobotStateType newState); // Velocity control - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &velocity); + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &velocity) override; void stopMotion(); diff --git a/modules/robot/include/visp3/robot/vpRobotViper850.h b/modules/robot/include/visp3/robot/vpRobotViper850.h index 267858d7f0..145d633729 100644 --- a/modules/robot/include/visp3/robot/vpRobotViper850.h +++ b/modules/robot/include/visp3/robot/vpRobotViper850.h @@ -342,7 +342,8 @@ class VISP_EXPORT vpRobotViper850 : public vpViper850, public vpRobot public: /* Constantes */ /*! \enum vpControlModeType Control mode. */ - typedef enum { + typedef enum + { AUTO, //!< Automatic control mode (default). MANUAL, //!< Manual control mode activated when the dead man switch is in //!< use. @@ -414,7 +415,7 @@ class VISP_EXPORT vpRobotViper850 : public vpViper850, public vpRobot vpColVector getForceTorque() const; double getMaxRotationVelocityJoint6() const; - void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &position); + void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &position) override; void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &position, double ×tamp); void getPosition(const vpRobot::vpControlFrameType frame, vpPoseVector &position); void getPosition(const vpRobot::vpControlFrameType frame, vpPoseVector &position, double ×tamp); @@ -432,13 +433,13 @@ class VISP_EXPORT vpRobotViper850 : public vpViper850, public vpRobot void get_cMe(vpHomogeneousMatrix &cMe) const; void get_cVe(vpVelocityTwistMatrix &cVe) const; - void get_eJe(vpMatrix &eJe); - void get_fJe(vpMatrix &fJe); + void get_eJe(vpMatrix &eJe) override; + void get_fJe(vpMatrix &fJe) override; void init(void); void - init(vpViper850::vpToolType tool, - vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithoutDistortion); + init(vpViper850::vpToolType tool, + vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithoutDistortion); void init(vpViper850::vpToolType tool, const std::string &filename); void init(vpViper850::vpToolType tool, const vpHomogeneousMatrix &eMc_); @@ -459,7 +460,7 @@ class VISP_EXPORT vpRobotViper850 : public vpViper850, public vpRobot void setMaxRotationVelocityJoint6(double w6_max); // Position control - void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &position); + void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &position) override; void setPosition(const vpRobot::vpControlFrameType frame, double pos1, double pos2, double pos3, double pos4, double pos5, double pos6); void setPosition(const std::string &filename); @@ -469,7 +470,7 @@ class VISP_EXPORT vpRobotViper850 : public vpViper850, public vpRobot vpRobot::vpRobotStateType setRobotState(vpRobot::vpRobotStateType newState); // Velocity control - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &velocity); + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &velocity) override; void stopMotion(); void unbiasForceTorqueSensor(); diff --git a/modules/robot/include/visp3/robot/vpRobotWireFrameSimulator.h b/modules/robot/include/visp3/robot/vpRobotWireFrameSimulator.h index 19127ce6e4..f2fac6d886 100644 --- a/modules/robot/include/visp3/robot/vpRobotWireFrameSimulator.h +++ b/modules/robot/include/visp3/robot/vpRobotWireFrameSimulator.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,15 @@ * * Description: * Basic class used to make robot simulators. - * -*****************************************************************************/ + */ #ifndef vpRobotWireFrameSimulator_HH #define vpRobotWireFrameSimulator_HH /*! - \file vpRobotWireFrameSimulator.h - \brief Basic class used to make robot simulators. -*/ + * \file vpRobotWireFrameSimulator.h + * \brief Basic class used to make robot simulators. + */ #include @@ -67,22 +65,22 @@ #include /*! - \class vpRobotWireFrameSimulator - - \ingroup group_robot_simu_gantry group_robot_simu_arm - - \brief This class aims to be a basis used to create all the - simulators of robots. - - Thus in this class you will find all the parameters and methods - which are necessary to create a simulator. Several methods are pure - virtual. In this case it means that they are specific to the each - robot, for example the computation of the geometrical model. - - \warning This class uses threading capabilities. Thus on Unix-like - platforms, the libpthread third-party library need to be - installed. On Windows, we use the native threading capabilities. -*/ + * \class vpRobotWireFrameSimulator + * + * \ingroup group_robot_simu_gantry group_robot_simu_arm + * + * \brief This class aims to be a basis used to create all the + * simulators of robots. + * + * Thus in this class you will find all the parameters and methods + * which are necessary to create a simulator. Several methods are pure + * virtual. In this case it means that they are specific to the each + * robot, for example the computation of the geometrical model. + * + * \warning This class uses threading capabilities. Thus on Unix-like + * platforms, the libpthread third-party library need to be + * installed. On Windows, we use the native threading capabilities. + */ class VISP_EXPORT vpRobotWireFrameSimulator : protected vpWireFrameSimulator, public vpRobotSimulator { public: @@ -178,15 +176,14 @@ class VISP_EXPORT vpRobotWireFrameSimulator : protected vpWireFrameSimulator, pu public: vpRobotWireFrameSimulator(); explicit vpRobotWireFrameSimulator(bool display); - virtual ~vpRobotWireFrameSimulator(); /** @name Inherited functionalities from vpRobotWireFrameSimulator */ //@{ /*! - Get the parameters of the virtual external camera. - - \return It returns the camera parameters. - */ + * Get the parameters of the virtual external camera. + * + * \return It returns the camera parameters. + */ vpCameraParameters getExternalCameraParameters() const { // if(px_ext != 1 && py_ext != 1) @@ -200,12 +197,12 @@ class VISP_EXPORT vpRobotWireFrameSimulator : protected vpWireFrameSimulator, pu } } /*! - Get the external camera's position relative to the the world reference - frame. - - \return the main external camera position relative to the the world - reference frame. - */ + * Get the external camera's position relative to the the world reference + * frame. + * + * \return the main external camera position relative to the the world + * reference frame. + */ vpHomogeneousMatrix getExternalCameraPosition() const { return this->vpWireFrameSimulator::getExternalCameraPosition(); @@ -216,10 +213,10 @@ class VISP_EXPORT vpRobotWireFrameSimulator : protected vpWireFrameSimulator, pu vpHomogeneousMatrix get_cMo(); /*! - Get the pose between the object and the fixed world frame. - - \return The pose between the object and the fixed world frame. - */ + * Get the pose between the object and the fixed world frame. + * + * \return The pose between the object and the fixed world frame. + */ vpHomogeneousMatrix get_fMo() const { return fMo; } /* Display functions */ @@ -229,106 +226,108 @@ class VISP_EXPORT vpRobotWireFrameSimulator : protected vpWireFrameSimulator, pu void initScene(const char *obj); /*! - Set the color used to display the camera in the external view. - - \param col : The desired color. - */ + * Set the color used to display the camera in the external view. + * + * \param col : The desired color. + */ void setCameraColor(const vpColor &col) { camColor = col; } /*! - Set the flag used to force the sampling time in the thread computing the - robot's displacement to a constant value; see setSamplingTime(). It may be - useful if the main thread (computing the features) is very time consuming. - False by default. - - \param _constantSamplingTimeMode : The new value of the - constantSamplingTimeMode flag. - */ + * Set the flag used to force the sampling time in the thread computing the + * robot's displacement to a constant value; see setSamplingTime(). It may be + * useful if the main thread (computing the features) is very time consuming. + * False by default. + * + * \param _constantSamplingTimeMode : The new value of the + * constantSamplingTimeMode flag. + */ inline void setConstantSamplingTimeMode(const bool _constantSamplingTimeMode) { constantSamplingTimeMode = _constantSamplingTimeMode; } /*! - Set the color used to display the object at the current position in the - robot's camera view. - - \param col : The desired color. - */ + * Set the color used to display the object at the current position in the + * robot's camera view. + * + * \param col : The desired color. + */ void setCurrentViewColor(const vpColor &col) { curColor = col; } /*! - Set the color used to display the object at the desired position in the - robot's camera view. - - \param col : The desired color. - */ + * Set the color used to display the object at the desired position in the + * robot's camera view. + * + * \param col : The desired color. + */ void setDesiredViewColor(const vpColor &col) { desColor = col; } /*! - Set the desired position of the robot's camera relative to the object. - - \param cdMo_ : The desired pose of the camera. - */ + * Set the desired position of the robot's camera relative to the object. + * + * \param cdMo_ : The desired pose of the camera. + */ void setDesiredCameraPosition(const vpHomogeneousMatrix &cdMo_) { this->vpWireFrameSimulator::setDesiredCameraPosition(cdMo_); } /*! - Set the way to draw the robot. Depending on what you choose you can - display a 3D wireframe model or a set of lines linking the frames used to - compute the geometrical model. - - \param dispType : Type of display. Can be MODEL_3D or MODEL_DH. - */ + * Set the way to draw the robot. Depending on what you choose you can + * display a 3D wireframe model or a set of lines linking the frames used to + * compute the geometrical model. + * + * \param dispType : Type of display. Can be MODEL_3D or MODEL_DH. + */ inline void setDisplayRobotType(const vpDisplayRobotType dispType) { displayType = dispType; } /*! - Set the external camera point of view. - - \param camMf_ : The pose of the external camera relative to the world - reference frame. - */ + * Set the external camera point of view. + * + * \param camMf_ : The pose of the external camera relative to the world + * reference frame. + */ void setExternalCameraPosition(const vpHomogeneousMatrix &camMf_) { this->vpWireFrameSimulator::setExternalCameraPosition(camMf_); } /*! - Specify the thickness of the graphics drawings. - */ + * Specify the thickness of the graphics drawings. + */ void setGraphicsThickness(unsigned int thickness) { this->thickness_ = thickness; } /*! - Set the sampling time. - - \param delta_t : Sampling time in second used to compute the robot - displacement from the velocity applied to the robot during this time. - - Since the wireframe simulator is threaded, the sampling time is set to - vpTime::getMinTimeForUsleepCall() / 1000 seconds. - - */ + * Set the sampling time. + * + * \param delta_t : Sampling time in second used to compute the robot + * displacement from the velocity applied to the robot during this time. + * + * Since the wireframe simulator is threaded, the sampling time is set to + * vpTime::getMinTimeForUsleepCall() / 1000 seconds. + */ inline void setSamplingTime(const double &delta_t) { if (delta_t < static_cast(vpTime::getMinTimeForUsleepCall() * 1e-3)) { this->delta_t_ = static_cast(vpTime::getMinTimeForUsleepCall() * 1e-3); - } else { + } + else { this->delta_t_ = delta_t; } } - /*! Set the parameter which enable or disable the singularity mangement */ + /*! + * Set the parameter which enable or disable the singularity management. + */ void setSingularityManagement(bool sm) { singularityManagement = sm; } /*! - Activates extra printings when the robot reaches joint limits... - */ + * Activates extra printings when the robot reaches joint limits... + */ void setVerbose(bool verbose) { this->verbose_ = verbose; } /*! - Set the pose between the object and the fixed world frame. - - \param fMo_ : The pose between the object and the fixed world frame. - */ + * Set the pose between the object and the fixed world frame. + * + * \param fMo_ : The pose between the object and the fixed world frame. + */ void set_fMo(const vpHomogeneousMatrix &fMo_) { this->fMo = fMo_; } //@} @@ -337,8 +336,8 @@ class VISP_EXPORT vpRobotWireFrameSimulator : protected vpWireFrameSimulator, pu */ //@{ /*! - Function used to launch the thread which moves the robot. -*/ + * Function used to launch the thread which moves the robot. + */ #if defined(_WIN32) static DWORD WINAPI launcher(LPVOID lpParam) { @@ -354,8 +353,6 @@ class VISP_EXPORT vpRobotWireFrameSimulator : protected vpWireFrameSimulator, pu } #endif - /* Robot functions */ - void init() { ; } /*! Method lauched by the thread to compute the position of the robot in the * articular frame. */ virtual void updateArticularPosition() = 0; diff --git a/modules/robot/include/visp3/robot/vpSimulatorAfma6.h b/modules/robot/include/visp3/robot/vpSimulatorAfma6.h index 739b76911e..3a38780376 100644 --- a/modules/robot/include/visp3/robot/vpSimulatorAfma6.h +++ b/modules/robot/include/visp3/robot/vpSimulatorAfma6.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,15 @@ * * Description: * Class which provides a simulator for the robot Afma6. - * -*****************************************************************************/ + */ #ifndef vpSimulatorAfma6_HH #define vpSimulatorAfma6_HH /*! - \file vpSimulatorAfma6.h - \brief Class which provides a simulator for the robot Afma6. -*/ + * \file vpSimulatorAfma6.h + * \brief Class which provides a simulator for the robot Afma6. + */ #include #include @@ -49,128 +47,126 @@ #if defined(VISP_HAVE_MODULE_GUI) && ((defined(_WIN32) && !defined(WINRT_8_0)) || defined(VISP_HAVE_PTHREAD)) /*! - \class vpSimulatorAfma6 - - \ingroup group_robot_simu_gantry - - \brief Simulator of Irisa's gantry robot named Afma6. - - Implementation of the vpRobotWireFrameSimulator class in order to simulate - Irisa's Afma6 robot. This robot is a gantry robot with six degrees of freedom - manufactured in 1992 by the french Afma-Robots company. - - \warning This class uses threading capabilities. Thus on Unix-like - platforms, the libpthread third-party library need to be - installed. On Windows, we use the native threading capabilities. - - This class allows to control the Afma6 gantry robot in position - and velocity: - - in the joint space (vpRobot::ARTICULAR_FRAME), - - in the fixed reference frame (vpRobot::REFERENCE_FRAME), - - in the camera frame (vpRobot::CAMERA_FRAME), - - or in a mixed frame (vpRobot::MIXT_FRAME) where translations are expressed - in the reference frame and rotations in the camera frame. - - End-effector frame (vpRobot::END_EFFECTOR_FRAME) is not implemented. - - All the translations are expressed in meters for positions and m/s - for the velocities. Rotations are expressed in radians for the - positions, and rad/s for the rotation velocities. - - The direct and inverse kinematics models are implemented in the - vpAfma6 class. - - To control the robot in position, you may set the controller - to position control and then send the position to reach in a specific - frame like here in the joint space: - - \code -#include -#include -#include -#include - -int main() -{ - vpSimulatorAfma6 robot; - - robot.init(vpAfma6::TOOL_CCMOP, vpCameraParameters::perspectiveProjWithoutDistortion); - - vpColVector q(6); - // Set a joint position - q[0] = 0.1; // Joint 1 position, in meter - q[1] = 0.2; // Joint 2 position, in meter - q[2] = 0.3; // Joint 3 position, in meter - q[3] = M_PI/8; // Joint 4 position, in rad - q[4] = M_PI/4; // Joint 5 position, in rad - q[5] = M_PI; // Joint 6 position, in rad - - // Initialize the controller to position control - robot.setRobotState(vpRobot::STATE_POSITION_CONTROL); - - // Moves the robot in the joint space - robot.setPosition(vpRobot::ARTICULAR_FRAME, q); - - return 0; -} - \endcode - - To control the robot in velocity, you may set the controller to - velocity control and then send the velocities. To end the velocity - control and stop the robot you have to set the controller to the - stop state. Here is an example of a velocity control in the joint - space: - - \code -#include -#include -#include - -int main() -{ - vpSimulatorAfma6 robot; - - robot.init(vpAfma6::TOOL_GRIPPER, vpCameraParameters::perspectiveProjWithoutDistortion); - - vpColVector qvel(6); - // Set a joint velocity - qvel[0] = 0.1; // Joint 1 velocity in m/s - qvel[1] = 0.1; // Joint 2 velocity in m/s - qvel[2] = 0.1; // Joint 3 velocity in m/s - qvel[3] = M_PI/8; // Joint 4 velocity in rad/s - qvel[4] = 0; // Joint 5 velocity in rad/s - qvel[5] = 0; // Joint 6 velocity in rad/s - - // Initialize the controller to position control - robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL); - - for ( ; ; ) { - // Apply a velocity in the joint space - robot.setVelocity(vpRobot::ARTICULAR_FRAME, qvel); - - // Compute new velocities qvel... - } - - // Stop the robot - robot.setRobotState(vpRobot::STATE_STOP); - - return 0; -} - \endcode - - It is also possible to measure the robot current position with - getPosition() method and the robot current velocities with the getVelocity() - method. - - For convenience, there is also the ability to read/write joint - positions from a position file with readPosFile() and savePosFile() - methods. - - To know how this class can be used to achieve a visual servoing simulation, - you can follow the \ref tutorial-ibvs. - -*/ - + * \class vpSimulatorAfma6 + * + * \ingroup group_robot_simu_gantry + * + * \brief Simulator of Irisa's gantry robot named Afma6. + * + * Implementation of the vpRobotWireFrameSimulator class in order to simulate + * Irisa's Afma6 robot. This robot is a gantry robot with six degrees of freedom + * manufactured in 1992 by the french Afma-Robots company. + * + * \warning This class uses threading capabilities. Thus on Unix-like + * platforms, the libpthread third-party library need to be + * installed. On Windows, we use the native threading capabilities. + * + * This class allows to control the Afma6 gantry robot in position + * and velocity: + * - in the joint space (vpRobot::ARTICULAR_FRAME), + * - in the fixed reference frame (vpRobot::REFERENCE_FRAME), + * - in the camera frame (vpRobot::CAMERA_FRAME), + * - or in a mixed frame (vpRobot::MIXT_FRAME) where translations are expressed + * in the reference frame and rotations in the camera frame. + * + * End-effector frame (vpRobot::END_EFFECTOR_FRAME) is not implemented. + * + * All the translations are expressed in meters for positions and m/s + * for the velocities. Rotations are expressed in radians for the + * positions, and rad/s for the rotation velocities. + * + * The direct and inverse kinematics models are implemented in the + * vpAfma6 class. + * + * To control the robot in position, you may set the controller + * to position control and then send the position to reach in a specific + * frame like here in the joint space: + * + * \code + * #include + * #include + * #include + * #include + * + * int main() + * { + * vpSimulatorAfma6 robot; + * + * robot.init(vpAfma6::TOOL_CCMOP, vpCameraParameters::perspectiveProjWithoutDistortion); + * + * vpColVector q(6); + * // Set a joint position + * q[0] = 0.1; // Joint 1 position, in meter + * q[1] = 0.2; // Joint 2 position, in meter + * q[2] = 0.3; // Joint 3 position, in meter + * q[3] = M_PI/8; // Joint 4 position, in rad + * q[4] = M_PI/4; // Joint 5 position, in rad + * q[5] = M_PI; // Joint 6 position, in rad + * + * // Initialize the controller to position control + * robot.setRobotState(vpRobot::STATE_POSITION_CONTROL); + * + * // Moves the robot in the joint space + * robot.setPosition(vpRobot::ARTICULAR_FRAME, q); + * + * return 0; + * } + * \endcode + * + * To control the robot in velocity, you may set the controller to + * velocity control and then send the velocities. To end the velocity + * control and stop the robot you have to set the controller to the + * stop state. Here is an example of a velocity control in the joint + * space: + * + * \code + * #include + * #include + * #include + * + * int main() + * { + * vpSimulatorAfma6 robot; + * + * robot.init(vpAfma6::TOOL_GRIPPER, vpCameraParameters::perspectiveProjWithoutDistortion); + * + * vpColVector qvel(6); + * // Set a joint velocity + * qvel[0] = 0.1; // Joint 1 velocity in m/s + * qvel[1] = 0.1; // Joint 2 velocity in m/s + * qvel[2] = 0.1; // Joint 3 velocity in m/s + * qvel[3] = M_PI/8; // Joint 4 velocity in rad/s + * qvel[4] = 0; // Joint 5 velocity in rad/s + * qvel[5] = 0; // Joint 6 velocity in rad/s + * + * // Initialize the controller to position control + * robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL); + * + * for ( ; ; ) { + * // Apply a velocity in the joint space + * robot.setVelocity(vpRobot::ARTICULAR_FRAME, qvel); + * + * // Compute new velocities qvel... + * } + * + * // Stop the robot + * robot.setRobotState(vpRobot::STATE_STOP); + * + * return 0; + * } + * \endcode + * + * It is also possible to measure the robot current position with + * getPosition() method and the robot current velocities with the getVelocity() + * method. + * + * For convenience, there is also the ability to read/write joint + * positions from a position file with readPosFile() and savePosFile() + * methods. + * + * To know how this class can be used to achieve a visual servoing simulation, + * you can follow the \ref tutorial-ibvs. + */ class VISP_EXPORT vpSimulatorAfma6 : public vpRobotWireFrameSimulator, public vpAfma6 { public: @@ -191,13 +187,13 @@ class VISP_EXPORT vpSimulatorAfma6 : public vpRobotWireFrameSimulator, public vp public: vpSimulatorAfma6(); explicit vpSimulatorAfma6(bool display); - virtual ~vpSimulatorAfma6(); + virtual ~vpSimulatorAfma6() override; void getCameraParameters(vpCameraParameters &cam, const unsigned int &image_width, const unsigned int &image_height); void getCameraParameters(vpCameraParameters &cam, const vpImage &I); void getCameraParameters(vpCameraParameters &cam, const vpImage &I); - void getDisplacement(const vpRobot::vpControlFrameType frame, vpColVector &displacement); - void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q); + void getDisplacement(const vpRobot::vpControlFrameType frame, vpColVector &displacement) override; + void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q) override; void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q, double ×tamp); void getPosition(const vpRobot::vpControlFrameType frame, vpPoseVector &position); void getPosition(const vpRobot::vpControlFrameType frame, vpPoseVector &position, double ×tamp); @@ -209,12 +205,12 @@ class VISP_EXPORT vpSimulatorAfma6 : public vpRobotWireFrameSimulator, public vp void get_cMe(vpHomogeneousMatrix &cMe); void get_cVe(vpVelocityTwistMatrix &cVe); - void get_eJe(vpMatrix &eJe); - void get_fJe(vpMatrix &fJe); + void get_eJe(vpMatrix &eJe) override; + void get_fJe(vpMatrix &fJe) override; void - init(vpAfma6::vpAfma6ToolType tool, - vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithoutDistortion); + init(vpAfma6::vpAfma6ToolType tool, + vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithoutDistortion); bool initialiseCameraRelativeToObject(const vpHomogeneousMatrix &cMo); void initialiseObjectRelativeToCamera(const vpHomogeneousMatrix &cMo); @@ -225,26 +221,26 @@ class VISP_EXPORT vpSimulatorAfma6 : public vpRobotWireFrameSimulator, public vp void setCameraParameters(const vpCameraParameters &cam); void setJointLimit(const vpColVector &limitMin, const vpColVector &limitMax); - void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &q); + void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &q) override; void setPosition(const vpRobot::vpControlFrameType frame, double pos1, double pos2, double pos3, double pos4, double pos5, double pos6); void setPosition(const char *filename); void setPositioningVelocity(double vel) { positioningVelocity = vel; } bool setPosition(const vpHomogeneousMatrix &cdMo, vpImage *Iint = NULL, const double &errMax = 0.001); - vpRobot::vpRobotStateType setRobotState(const vpRobot::vpRobotStateType newState); + vpRobot::vpRobotStateType setRobotState(const vpRobot::vpRobotStateType newState) override; - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &velocity); + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &velocity) override; void stopMotion(); protected: /** @name Protected Member Functions Inherited from vpSimulatorAfma6 */ //@{ - void computeArticularVelocity(); + void computeArticularVelocity() override; void compute_fMi(); void findHighestPositioningSpeed(vpColVector &q); void getExternalImage(vpImage &I); - inline void get_fMi(vpHomogeneousMatrix *fMit) + inline void get_fMi(vpHomogeneousMatrix *fMit) override { m_mutex_fMi.lock(); for (int i = 0; i < 8; i++) { @@ -253,12 +249,12 @@ class VISP_EXPORT vpSimulatorAfma6 : public vpRobotWireFrameSimulator, public vp m_mutex_fMi.unlock(); } - void init(); - void initArms(); + void init() override; + void initArms() override; void initDisplay(); - int isInJointLimit(void); + int isInJointLimit() override; bool singularityTest(const vpColVector &q, vpMatrix &J); - void updateArticularPosition(); + void updateArticularPosition() override; //@} }; diff --git a/modules/robot/include/visp3/robot/vpSimulatorCamera.h b/modules/robot/include/visp3/robot/vpSimulatorCamera.h index 451c5c2289..a3f474f4e3 100644 --- a/modules/robot/include/visp3/robot/vpSimulatorCamera.h +++ b/modules/robot/include/visp3/robot/vpSimulatorCamera.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,15 @@ * * Description: * Defines the simplest robot : a free flying camera. - * -*****************************************************************************/ + */ #ifndef vpSimulatorCamera_H #define vpSimulatorCamera_H /*! - \file vpSimulatorCamera.h - \brief class that defines the simplest robot : a free flying camera -*/ + * \file vpSimulatorCamera.h + * \brief class that defines the simplest robot : a free flying camera + */ #include #include @@ -48,59 +46,59 @@ #include /*! - \class vpSimulatorCamera - - \ingroup group_robot_simu_Camera - - \brief Class that defines the simplest robot: a free flying camera. - - This free flying camera has 6 dof; 3 in translation and 3 in rotation. - It evolves as a gentry robot with respect to a world frame. This class - is similar to vpRobotCamera class except that here the position of the robot - is provided as the transformation from world frame to camera frame; wMc. -This representation is more intuitive than the one implemented in -vpRobotCamera where the transformation from camera to world frame is -considered; cMw. - - For this particular simulated robot, the end-effector and camera frame are -confused. That means that the cMe transformation is equal to identity. - - The robot jacobian expressed in the end-effector frame - \f$ {^e}{\bf J}_e \f$ is also set to identity (see get_eJe()). - - The following code shows how to control this robot in position and velocity. - \code -#include - -int main() -{ - vpHomogeneousMatrix wMc; - vpSimulatorCamera robot; - - robot.getPosition(wMc); // Position of the camera in the world frame - std::cout << "Default position of the camera in the world frame wMc:\n" << wMc << std::endl; - - wMc[2][3] = 1.; // Camera frame is 1 meter along z axis in front of the world frame - robot.setPosition(wMc); // Set the new position of the camera in the world frame - std::cout << "New position of the camera in the world frame wMc:\n" << wMc << std::endl; - - robot.setSamplingTime(0.100); // Modify the default sampling time to 0.1 second - robot.setMaxTranslationVelocity(1.); // vx, vy and vz max set to 1 m/s - robot.setMaxRotationVelocity(vpMath::rad(90)); // wx, wy and wz max set to 90 deg/s - - vpColVector v(6); - v = 0; - v[2] = 1.; // set v_z to 1 m/s - robot.setVelocity(vpRobot::CAMERA_FRAME, v); - // The robot has moved from 0.1 meters along the z axis - robot.getPosition(wMc); // Position of the camera in the world frame - std::cout << "New position of the camera wMc:\n" << wMc << std::endl; -} - \endcode - - To know how this class can be used to achieve a visual servoing simulation, - you can follow the \ref tutorial-ibvs. -*/ + * \class vpSimulatorCamera + * + * \ingroup group_robot_simu_Camera + * + * \brief Class that defines the simplest robot: a free flying camera. + * + * This free flying camera has 6 dof; 3 in translation and 3 in rotation. + * It evolves as a gentry robot with respect to a world frame. This class + * is similar to vpRobotCamera class except that here the position of the robot + * is provided as the transformation from world frame to camera frame; wMc. + * This representation is more intuitive than the one implemented in + * vpRobotCamera where the transformation from camera to world frame is + * considered; cMw. + * + * For this particular simulated robot, the end-effector and camera frame are + * confused. That means that the cMe transformation is equal to identity. + * + * The robot jacobian expressed in the end-effector frame + * \f$ {^e}{\bf J}_e \f$ is also set to identity (see get_eJe()). + * + * The following code shows how to control this robot in position and velocity. + * \code + * #include + * + * int main() + * { + * vpHomogeneousMatrix wMc; + * vpSimulatorCamera robot; + * + * robot.getPosition(wMc); // Position of the camera in the world frame + * std::cout << "Default position of the camera in the world frame wMc:\n" << wMc << std::endl; + * + * wMc[2][3] = 1.; // Camera frame is 1 meter along z axis in front of the world frame + * robot.setPosition(wMc); // Set the new position of the camera in the world frame + * std::cout << "New position of the camera in the world frame wMc:\n" << wMc << std::endl; + * + * robot.setSamplingTime(0.100); // Modify the default sampling time to 0.1 second + * robot.setMaxTranslationVelocity(1.); // vx, vy and vz max set to 1 m/s + * robot.setMaxRotationVelocity(vpMath::rad(90)); // wx, wy and wz max set to 90 deg/s + * + * vpColVector v(6); + * v = 0; + * v[2] = 1.; // set v_z to 1 m/s + * robot.setVelocity(vpRobot::CAMERA_FRAME, v); + * // The robot has moved from 0.1 meters along the z axis + * robot.getPosition(wMc); // Position of the camera in the world frame + * std::cout << "New position of the camera wMc:\n" << wMc << std::endl; + * } + * \endcode + * + * To know how this class can be used to achieve a visual servoing simulation, + * you can follow the \ref tutorial-ibvs. + */ class VISP_EXPORT vpSimulatorCamera : public vpRobotSimulator { protected: @@ -108,28 +106,27 @@ class VISP_EXPORT vpSimulatorCamera : public vpRobotSimulator public: vpSimulatorCamera(); - virtual ~vpSimulatorCamera(); public: /** @name Inherited functionalities from vpSimulatorCamera */ //@{ void get_cVe(vpVelocityTwistMatrix &cVe) const; - void get_eJe(vpMatrix &eJe); + void get_eJe(vpMatrix &eJe) override; vpHomogeneousMatrix getPosition() const; void getPosition(vpHomogeneousMatrix &wMc) const; - void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q); + void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q) override; void setPosition(const vpHomogeneousMatrix &wMc); - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel); + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel) override; //@} private: - void init(); + void init() override; // Non implemented virtual pure functions - void get_fJe(vpMatrix & /*_fJe */){}; - void getDisplacement(const vpRobot::vpControlFrameType /* frame */, vpColVector & /* q */){}; - void setPosition(const vpRobot::vpControlFrameType /* frame */, const vpColVector & /* q */){}; + void get_fJe(vpMatrix & /*_fJe */) override { }; + void getDisplacement(const vpRobot::vpControlFrameType /* frame */, vpColVector & /* q */) override { }; + void setPosition(const vpRobot::vpControlFrameType /* frame */, const vpColVector & /* q */) override { }; }; #endif diff --git a/modules/robot/include/visp3/robot/vpSimulatorPioneer.h b/modules/robot/include/visp3/robot/vpSimulatorPioneer.h index 947fcb29be..e58064e504 100644 --- a/modules/robot/include/visp3/robot/vpSimulatorPioneer.h +++ b/modules/robot/include/visp3/robot/vpSimulatorPioneer.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,17 +29,16 @@ * * Description: * Pioneer mobile robot simulator without display. - * -*****************************************************************************/ + */ #ifndef vpSimulatorPioneer_H #define vpSimulatorPioneer_H /*! - \file vpSimulatorPioneer.h - \brief class that defines the Pioneer mobile robot simulator equipped with a - static camera. -*/ + * \file vpSimulatorPioneer.h + * \brief class that defines the Pioneer mobile robot simulator equipped with a + * static camera. + */ #include #include @@ -50,53 +48,52 @@ #include /*! - \class vpSimulatorPioneer - - \ingroup group_robot_simu_unicycle - - \brief Class that defines the Pioneer mobile robot simulator equipped with a -static camera. - - It intends to simulate the mobile robot described in vpPioneer class. - This robot has 2 dof: \f$(v_x, w_z)\f$, the translational and - rotational velocities that are applied at point E. - - The robot position evolves with respect to a world frame; wMc. When a new -joint velocity is applied to the robot using setVelocity(), the position of -the camera wrt the world frame is updated. - - \image html pioneer.png - - The following code shows how to control this robot in position and velocity. - \code -#include - -int main() -{ - vpHomogeneousMatrix wMc; - vpSimulatorPioneer robot; - - robot.getPosition(wMc); // Position of the camera in the world frame - std::cout << "Default position of the camera in the world frame wMc:\n" << wMc << std::endl; - - robot.setSamplingTime(0.100); // Modify the default sampling time to 0.1 second - robot.setMaxTranslationVelocity(1.); // vx max set to 1 m/s - robot.setMaxRotationVelocity(vpMath::rad(90)); // wz max set to 90 deg/s - - vpColVector v(2); // we control vx and wz dof - v = 0; - v[0] = 1.; // set vx to 1 m/s - robot.setVelocity(vpRobot::ARTICULAR_FRAME, v); - // The robot has moved from 0.1 meters along the z axis - robot.getPosition(wMc); // Position of the camera in the world frame - std::cout << "New position of the camera wMc:\n" << wMc << std::endl; -} - \endcode - - The usage of this class is also highlighted in \ref -tutorial-simu-robot-pioneer. - -*/ + * \class vpSimulatorPioneer + * + * \ingroup group_robot_simu_unicycle + * + * \brief Class that defines the Pioneer mobile robot simulator equipped with a + * static camera. + * + * It intends to simulate the mobile robot described in vpPioneer class. + * This robot has 2 dof: \f$(v_x, w_z)\f$, the translational and + * rotational velocities that are applied at point E. + * + * The robot position evolves with respect to a world frame; wMc. When a new + * joint velocity is applied to the robot using setVelocity(), the position of + * the camera wrt the world frame is updated. + * + * \image html pioneer.png + * + * The following code shows how to control this robot in position and velocity. + * \code + * #include + * + * int main() + * { + * vpHomogeneousMatrix wMc; + * vpSimulatorPioneer robot; + * + * robot.getPosition(wMc); // Position of the camera in the world frame + * std::cout << "Default position of the camera in the world frame wMc:\n" << wMc << std::endl; + * + * robot.setSamplingTime(0.100); // Modify the default sampling time to 0.1 second + * robot.setMaxTranslationVelocity(1.); // vx max set to 1 m/s + * robot.setMaxRotationVelocity(vpMath::rad(90)); // wz max set to 90 deg/s + * + * vpColVector v(2); // we control vx and wz dof + * v = 0; + * v[0] = 1.; // set vx to 1 m/s + * robot.setVelocity(vpRobot::ARTICULAR_FRAME, v); + * // The robot has moved from 0.1 meters along the z axis + * robot.getPosition(wMc); // Position of the camera in the world frame + * std::cout << "New position of the camera wMc:\n" << wMc << std::endl; + * } + * \endcode + * + * The usage of this class is also highlighted in \ref + * tutorial-simu-robot-pioneer. + */ class VISP_EXPORT vpSimulatorPioneer : public vpPioneer, public vpRobotSimulator { @@ -114,25 +111,24 @@ class VISP_EXPORT vpSimulatorPioneer : public vpPioneer, public vpRobotSimulator public: vpSimulatorPioneer(); - virtual ~vpSimulatorPioneer(); public: /** @name Inherited functionalities from vpSimulatorPioneer */ //@{ - void get_eJe(vpMatrix &eJe); + void get_eJe(vpMatrix &eJe) override; void getPosition(vpHomogeneousMatrix &wMc) const; - void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q); - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel); + void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q) override; + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel) override; //@} private: - void init(); + void init() override; // Non implemented virtual pure functions - void get_fJe(vpMatrix & /*_fJe */){}; - void getDisplacement(const vpRobot::vpControlFrameType /* frame */, vpColVector & /* q */){}; - void setPosition(const vpRobot::vpControlFrameType /* frame */, const vpColVector & /* q */){}; + void get_fJe(vpMatrix & /*_fJe */) override { }; + void getDisplacement(const vpRobot::vpControlFrameType /* frame */, vpColVector & /* q */) override { }; + void setPosition(const vpRobot::vpControlFrameType /* frame */, const vpColVector & /* q */) override { }; }; #endif diff --git a/modules/robot/include/visp3/robot/vpSimulatorPioneerPan.h b/modules/robot/include/visp3/robot/vpSimulatorPioneerPan.h index 21cd005b00..25c8e2ac59 100644 --- a/modules/robot/include/visp3/robot/vpSimulatorPioneerPan.h +++ b/modules/robot/include/visp3/robot/vpSimulatorPioneerPan.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,17 +29,16 @@ * * Description: * Pioneer mobile robot equipped with a pan head simulator without display. - * -*****************************************************************************/ + */ #ifndef vpSimulatorPioneerPan_H #define vpSimulatorPioneerPan_H /*! - \file vpSimulatorPioneerPan.h - \brief class that defines the Pioneer mobile robot simulator equipped - with a camera able to move in pan. -*/ + * \file vpSimulatorPioneerPan.h + * \brief class that defines the Pioneer mobile robot simulator equipped + * with a camera able to move in pan. + */ #include #include @@ -50,61 +48,59 @@ #include /*! - \class vpSimulatorPioneerPan - - \ingroup group_robot_simu_unicycle - - \brief Class that defines the Pioneer mobile robot simulator equipped - with a camera able to move in pan. - - It intends to simulate the mobile robot described in vpPioneerPan class. - This robot has 3 dof: \f$(v_x, w_z, \dot{q_1})\f$, the translational and - rotational velocities of the mobile platform, the pan head velocity -respectively. - - The robot position evolves with respect to a world frame; wMc. When a new -joint velocity is applied to the robot using setVelocity(), the position of -the camera wrt the world frame is updated. - - \image html pioneer-pan.png - - The following code shows how to control this robot in position and velocity. - \code -#include - -int main() -{ - vpHomogeneousMatrix wMc; - vpSimulatorPioneerPan robot; - - robot.getPosition(wMc); // Position of the camera in the world frame - std::cout << "Default position of the camera in the world frame wMc:\n" << wMc << std::endl; - - robot.setSamplingTime(0.100); // Modify the default sampling time to 0.1 second - robot.setMaxTranslationVelocity(1.); // vx max set to 1 m/s - robot.setMaxRotationVelocity(vpMath::rad(90)); // wz max set to 90 deg/s - - vpColVector v(3); // we control vx, wz and q_pan - v = 0; - v[0] = 1.; // set vx to 1 m/s - robot.setVelocity(vpRobot::ARTICULAR_FRAME, v); - // The robot has moved from 0.1 meters along the z axis - robot.getPosition(wMc); // Position of the camera in the world frame - std::cout << "New position of the camera wMc:\n" << wMc << std::endl; -} - \endcode - - The usage of this class is also highlighted in \ref -tutorial-simu-robot-pioneer. - -*/ + * \class vpSimulatorPioneerPan + * + * \ingroup group_robot_simu_unicycle + * + * \brief Class that defines the Pioneer mobile robot simulator equipped + * with a camera able to move in pan. + * + * It intends to simulate the mobile robot described in vpPioneerPan class. + * This robot has 3 dof: \f$(v_x, w_z, \dot{q_1})\f$, the translational and + * rotational velocities of the mobile platform, the pan head velocity + * respectively. + * + * The robot position evolves with respect to a world frame; wMc. When a new + * joint velocity is applied to the robot using setVelocity(), the position of + * the camera wrt the world frame is updated. + * + * \image html pioneer-pan.png + * + * The following code shows how to control this robot in position and velocity. + * \code + * #include + * + * int main() + * { + * vpHomogeneousMatrix wMc; + * vpSimulatorPioneerPan robot; + * + * robot.getPosition(wMc); // Position of the camera in the world frame + * std::cout << "Default position of the camera in the world frame wMc:\n" << wMc << std::endl; + * + * robot.setSamplingTime(0.100); // Modify the default sampling time to 0.1 second + * robot.setMaxTranslationVelocity(1.); // vx max set to 1 m/s + * robot.setMaxRotationVelocity(vpMath::rad(90)); // wz max set to 90 deg/s + * + * vpColVector v(3); // we control vx, wz and q_pan + * v = 0; + * v[0] = 1.; // set vx to 1 m/s + * robot.setVelocity(vpRobot::ARTICULAR_FRAME, v); + * // The robot has moved from 0.1 meters along the z axis + * robot.getPosition(wMc); // Position of the camera in the world frame + * std::cout << "New position of the camera wMc:\n" << wMc << std::endl; + * } + * \endcode + * + * The usage of this class is also highlighted in \ref tutorial-simu-robot-pioneer. + */ class VISP_EXPORT vpSimulatorPioneerPan : public vpPioneerPan, public vpRobotSimulator { protected: //! robot / camera location in the world frame vpHomogeneousMatrix wMc_; // world to camera - vpHomogeneousMatrix wMm_; // world to mobile robot frame located between the two weels + vpHomogeneousMatrix wMm_; // world to mobile robot frame located between the two wheels // mMp_ mobile robot to pan frame is a protected member of vpPioneerPan // pMe_ pan head to end effector frame is a protected member of vpPioneerPan // cMe_ is a protected member of vpUnicycle @@ -116,25 +112,24 @@ class VISP_EXPORT vpSimulatorPioneerPan : public vpPioneerPan, public vpRobotSim public: vpSimulatorPioneerPan(); - virtual ~vpSimulatorPioneerPan(); public: /** @name Inherited functionalities from vpSimulatorPioneerPan */ //@{ - void get_eJe(vpMatrix &eJe); + void get_eJe(vpMatrix &eJe) override; void getPosition(vpHomogeneousMatrix &wMc) const; - void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q); - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel); + void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q) override; + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel) override; //@} private: - void init(); + void init() override; // Non implemented virtual pure functions - void get_fJe(vpMatrix & /*_fJe */){}; - void getDisplacement(const vpRobot::vpControlFrameType /* frame */, vpColVector & /* q */){}; - void setPosition(const vpRobot::vpControlFrameType /* frame */, const vpColVector & /* q */){}; + void get_fJe(vpMatrix & /*_fJe */) override { }; + void getDisplacement(const vpRobot::vpControlFrameType /* frame */, vpColVector & /* q */) override { }; + void setPosition(const vpRobot::vpControlFrameType /* frame */, const vpColVector & /* q */) override { }; }; #endif diff --git a/modules/robot/include/visp3/robot/vpSimulatorViper850.h b/modules/robot/include/visp3/robot/vpSimulatorViper850.h index 95aec1dbfa..627b4dd7ab 100644 --- a/modules/robot/include/visp3/robot/vpSimulatorViper850.h +++ b/modules/robot/include/visp3/robot/vpSimulatorViper850.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,15 @@ * * Description: * Class which provides a simulator for the robot Viper850. - * -*****************************************************************************/ + */ #ifndef vpSimulatorViper850_HH #define vpSimulatorViper850_HH /*! - \file vpSimulatorViper850.h - \brief Class which provides a simulator for the robot Viper850.. -*/ + * \file vpSimulatorViper850.h + * \brief Class which provides a simulator for the robot Viper850.. + */ #include #if defined(VISP_HAVE_MODULE_GUI) && ((defined(_WIN32) && !defined(WINRT_8_0)) || defined(VISP_HAVE_PTHREAD)) @@ -49,153 +47,151 @@ #include /*! - \class vpSimulatorViper850 - - \ingroup group_robot_simu_arm - - \brief Simulator of Irisa's Viper S850 robot named Viper850. - - Implementation of the vpRobotWireFrameSimulator class in order to simulate - Irisa's Viper850 robot. This robot is an ADEPT six degrees of freedom arm. - - \warning This class uses threading capabilities. Thus on Unix-like - platforms, the libpthread third-party library need to be - installed. On Windows, we use the native threading capabilities. - - This class allows to control the Viper850 arm robot in position - and velocity: - - in the joint space (vpRobot::ARTICULAR_FRAME), - - in the fixed reference frame (vpRobot::REFERENCE_FRAME), - - in the camera frame (vpRobot::CAMERA_FRAME), - - or in a mixed frame (vpRobot::MIXT_FRAME) where translations are expressed - in the reference frame and rotations in the camera frame. - - End-effector frame (vpRobot::END_EFFECTOR_FRAME) is not implemented. - - All the translations are expressed in meters for positions and m/s - for the velocities. Rotations are expressed in radians for the - positions, and rad/s for the rotation velocities. - - The direct and inverse kinematics models are implemented in the - vpViper850 class. - - To control the robot in position, you may set the controller - to position control and then send the position to reach in a specific - frame like here in the joint space: - - \code -#include -#include -#include -#include - -int main() -{ - vpSimulatorViper850 robot; - - vpColVector q(6); - // Set a joint position - q[0] = vpMath::rad(10); // Joint 1 position, in rad - q[1] = 0.2; // Joint 2 position, in rad - q[2] = 0.3; // Joint 3 position, in rad - q[3] = M_PI/8; // Joint 4 position, in rad - q[4] = M_PI/4; // Joint 5 position, in rad - q[5] = M_PI; // Joint 6 position, in rad - - // Initialize the controller to position control - robot.setRobotState(vpRobot::STATE_POSITION_CONTROL); - - // Moves the robot in the joint space - robot.setPosition(vpRobot::ARTICULAR_FRAME, q); - - return 0; -} - \endcode - - The robot moves to the specified position with the default - positioning velocity vpRobotViper850::defaultPositioningVelocity. The - setPositioningVelocity() method allows to change the maximal - velocity used to reach the desired position. - - \code -#include -#include -#include - -int main() -{ - vpSimulatorViper850 robot; - - vpColVector q(6); - // Set q[i] with i in [0:5] - - // Initialize the controller to position control - robot.setRobotState(vpRobot::STATE_POSITION_CONTROL); - - // Set the max velocity to 40% - robot.setPositioningVelocity(40); - - // Moves the robot in the joint space - robot.setPosition(vpRobot::ARTICULAR_FRAME, q); - - return 0; -} - \endcode - - To control the robot in velocity, you may set the controller to - velocity control and then send the velocities. To end the velocity - control and stop the robot you have to set the controller to the - stop state. Here is an example of a velocity control in the joint - space: - - \code -#include -#include -#include - -int main() -{ - vpSimulatorViper850 robot; - - vpColVector qvel(6); - // Set a joint velocity - qvel[0] = 0.1; // Joint 1 velocity in rad/s - qvel[1] = vpMath::rad(15); // Joint 2 velocity in rad/s - qvel[2] = 0; // Joint 3 velocity in rad/s - qvel[3] = M_PI/8; // Joint 4 velocity in rad/s - qvel[4] = 0; // Joint 5 velocity in rad/s - qvel[5] = 0; // Joint 6 velocity in rad/s - - // Initialize the controller to position control - robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL); - - for ( ; ; ) { - // Apply a velocity in the joint space - robot.setVelocity(vpRobot::ARTICULAR_FRAME, qvel); - - // Compute new velocities qvel... - } - - // Stop the robot - robot.setRobotState(vpRobot::STATE_STOP); - - return 0; -} - \endcode - - It is also possible to measure the robot current position with - getPosition() method and the robot current velocities with the getVelocity() - method. - - For convenience, there is also the ability to read/write joint - positions from a position file with readPosFile() and savePosFile() - methods. - - To know how this class can be used to achieve a visual servoing simulation, - you can follow the \ref tutorial-ibvs. - -*/ - + * \class vpSimulatorViper850 + * + * \ingroup group_robot_simu_arm + * + * \brief Simulator of Irisa's Viper S850 robot named Viper850. + * + * Implementation of the vpRobotWireFrameSimulator class in order to simulate + * Irisa's Viper850 robot. This robot is an ADEPT six degrees of freedom arm. + * + * \warning This class uses threading capabilities. Thus on Unix-like + * platforms, the libpthread third-party library need to be + * installed. On Windows, we use the native threading capabilities. + * + * This class allows to control the Viper850 arm robot in position + * and velocity: + * - in the joint space (vpRobot::ARTICULAR_FRAME), + * - in the fixed reference frame (vpRobot::REFERENCE_FRAME), + * - in the camera frame (vpRobot::CAMERA_FRAME), + * - or in a mixed frame (vpRobot::MIXT_FRAME) where translations are expressed + * in the reference frame and rotations in the camera frame. + * + * End-effector frame (vpRobot::END_EFFECTOR_FRAME) is not implemented. + * + * All the translations are expressed in meters for positions and m/s + * for the velocities. Rotations are expressed in radians for the + * positions, and rad/s for the rotation velocities. + * + * The direct and inverse kinematics models are implemented in the + * vpViper850 class. + * + * To control the robot in position, you may set the controller + * to position control and then send the position to reach in a specific + * frame like here in the joint space: + * + * \code + * #include + * #include + * #include + * #include + * + * int main() + * { + * vpSimulatorViper850 robot; + * + * vpColVector q(6); + * // Set a joint position + * q[0] = vpMath::rad(10); // Joint 1 position, in rad + * q[1] = 0.2; // Joint 2 position, in rad + * q[2] = 0.3; // Joint 3 position, in rad + * q[3] = M_PI/8; // Joint 4 position, in rad + * q[4] = M_PI/4; // Joint 5 position, in rad + * q[5] = M_PI; // Joint 6 position, in rad + * + * // Initialize the controller to position control + * robot.setRobotState(vpRobot::STATE_POSITION_CONTROL); + * + * // Moves the robot in the joint space + * robot.setPosition(vpRobot::ARTICULAR_FRAME, q); + * + * return 0; + * } + * \endcode + * + * The robot moves to the specified position with the default + * positioning velocity vpRobotViper850::defaultPositioningVelocity. The + * setPositioningVelocity() method allows to change the maximal + * velocity used to reach the desired position. + * + * \code + * #include + * #include + * #include + * + * int main() + * { + * vpSimulatorViper850 robot; + * + * vpColVector q(6); + * // Set q[i] with i in [0:5] + * + * // Initialize the controller to position control + * robot.setRobotState(vpRobot::STATE_POSITION_CONTROL); + * + * // Set the max velocity to 40% + * robot.setPositioningVelocity(40); + * + * // Moves the robot in the joint space + * robot.setPosition(vpRobot::ARTICULAR_FRAME, q); + * + * return 0; + * } + * \endcode + * + * To control the robot in velocity, you may set the controller to + * velocity control and then send the velocities. To end the velocity + * control and stop the robot you have to set the controller to the + * stop state. Here is an example of a velocity control in the joint + * space: + * + * \code + * #include + * #include + * #include + * + * int main() + * { + * vpSimulatorViper850 robot; + * + * vpColVector qvel(6); + * // Set a joint velocity + * qvel[0] = 0.1; // Joint 1 velocity in rad/s + * qvel[1] = vpMath::rad(15); // Joint 2 velocity in rad/s + * qvel[2] = 0; // Joint 3 velocity in rad/s + * qvel[3] = M_PI/8; // Joint 4 velocity in rad/s + * qvel[4] = 0; // Joint 5 velocity in rad/s + * qvel[5] = 0; // Joint 6 velocity in rad/s + * + * // Initialize the controller to position control + * robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL); + * + * for ( ; ; ) { + * // Apply a velocity in the joint space + * robot.setVelocity(vpRobot::ARTICULAR_FRAME, qvel); + * + * // Compute new velocities qvel... + * } + * + * // Stop the robot + * robot.setRobotState(vpRobot::STATE_STOP); + * + * return 0; + * } + * \endcode + * + * It is also possible to measure the robot current position with + * getPosition() method and the robot current velocities with the getVelocity() + * method. + * + * For convenience, there is also the ability to read/write joint + * positions from a position file with readPosFile() and savePosFile() + * methods. + * + * To know how this class can be used to achieve a visual servoing simulation, + * you can follow the \ref tutorial-ibvs. + */ class VISP_EXPORT vpSimulatorViper850 : public vpRobotWireFrameSimulator, public vpViper850 { public: @@ -216,15 +212,15 @@ class VISP_EXPORT vpSimulatorViper850 : public vpRobotWireFrameSimulator, public public: vpSimulatorViper850(); explicit vpSimulatorViper850(bool display); - virtual ~vpSimulatorViper850(); + virtual ~vpSimulatorViper850() override; void getCameraParameters(vpCameraParameters &cam, const unsigned int &image_width, const unsigned int &image_height); void getCameraParameters(vpCameraParameters &cam, const vpImage &I); void getCameraParameters(vpCameraParameters &cam, const vpImage &I); - void getDisplacement(const vpRobot::vpControlFrameType frame, vpColVector &displacement); + void getDisplacement(const vpRobot::vpControlFrameType frame, vpColVector &displacement) override; - void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q); + void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q) override; void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q, double ×tamp); void getPosition(const vpRobot::vpControlFrameType frame, vpPoseVector &position); void getPosition(const vpRobot::vpControlFrameType frame, vpPoseVector &position, double ×tamp); @@ -237,12 +233,12 @@ class VISP_EXPORT vpSimulatorViper850 : public vpRobotWireFrameSimulator, public void get_cMe(vpHomogeneousMatrix &cMe); void get_cVe(vpVelocityTwistMatrix &cVe); - void get_eJe(vpMatrix &eJe); - void get_fJe(vpMatrix &fJe); + void get_eJe(vpMatrix &eJe) override; + void get_fJe(vpMatrix &fJe) override; void - init(vpViper850::vpToolType tool, - vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithoutDistortion); + init(vpViper850::vpToolType tool, + vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithoutDistortion); bool initialiseCameraRelativeToObject(const vpHomogeneousMatrix &cMo); void initialiseObjectRelativeToCamera(const vpHomogeneousMatrix &cMo); @@ -253,26 +249,26 @@ class VISP_EXPORT vpSimulatorViper850 : public vpRobotWireFrameSimulator, public void setCameraParameters(const vpCameraParameters &cam); void setJointLimit(const vpColVector &limitMin, const vpColVector &limitMax); - void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &q); + void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &q) override; void setPosition(const vpRobot::vpControlFrameType frame, double pos1, double pos2, double pos3, double pos4, double pos5, double pos6); void setPosition(const char *filename); void setPositioningVelocity(double vel) { positioningVelocity = vel; } - vpRobot::vpRobotStateType setRobotState(const vpRobot::vpRobotStateType newState); + vpRobot::vpRobotStateType setRobotState(const vpRobot::vpRobotStateType newState) override; - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &velocity); + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &velocity) override; void stopMotion(); protected: /** @name Protected Member Functions Inherited from vpSimulatorViper850 */ //@{ - void computeArticularVelocity(); + void computeArticularVelocity() override; void compute_fMi(); void findHighestPositioningSpeed(vpColVector &q); void getExternalImage(vpImage &I); - inline void get_fMi(vpHomogeneousMatrix *fMit) + inline void get_fMi(vpHomogeneousMatrix *fMit) override { m_mutex_fMi.lock(); for (int i = 0; i < 8; i++) { @@ -280,12 +276,12 @@ class VISP_EXPORT vpSimulatorViper850 : public vpRobotWireFrameSimulator, public } m_mutex_fMi.unlock(); } - void init(); - void initArms(); + void init() override; + void initArms() override; void initDisplay(); - int isInJointLimit(void); + int isInJointLimit() override; bool singularityTest(const vpColVector &q, vpMatrix &J); - void updateArticularPosition(); + void updateArticularPosition() override; //@} }; diff --git a/modules/robot/include/visp3/robot/vpUnicycle.h b/modules/robot/include/visp3/robot/vpUnicycle.h index 9fa31000e1..840e00a8a3 100644 --- a/modules/robot/include/visp3/robot/vpUnicycle.h +++ b/modules/robot/include/visp3/robot/vpUnicycle.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Common features for unicycle mobile robots. - * -*****************************************************************************/ + */ #ifndef VPUNICYCLE_H #define VPUNICYCLE_H @@ -40,42 +38,35 @@ #include /*! - - \class vpUnicycle - - \ingroup group_robot_simu_unicycle - - \brief Generic functions for unicycle mobile robots. - - This class provides common features for unicycle mobile robots. - -*/ + * \class vpUnicycle + * + * \ingroup group_robot_simu_unicycle + * + * \brief Generic functions for unicycle mobile robots. + * + * This class provides common features for unicycle mobile robots. + */ class VISP_EXPORT vpUnicycle { public: /*! - Default constructor that does nothing. - */ - vpUnicycle() : cMe_(), eJe_(){}; - /*! - Destructor that does nothing. - */ - virtual ~vpUnicycle(){}; + * Default constructor that does nothing. + */ + vpUnicycle() : cMe_(), eJe_() { }; /** @name Inherited functionalities from vpUnicycle */ //@{ /*! - Return the transformation \f${^c}{\bf M}_e\f$ between the camera frame - and the mobile robot end effector frame. - */ + * Return the transformation \f${^c}{\bf M}_e\f$ between the camera frame + * and the mobile robot end effector frame. + */ vpHomogeneousMatrix get_cMe() const { return cMe_; } /*! - - Return the twist transformation from camera frame to the mobile robot - end effector frame. This transformation allows to compute a velocity - expressed in the end effector frame into the camera frame. - */ + * Return the twist transformation from camera frame to the mobile robot + * end effector frame. This transformation allows to compute a velocity + * expressed in the end effector frame into the camera frame. + */ vpVelocityTwistMatrix get_cVe() const { vpVelocityTwistMatrix cVe; @@ -84,39 +75,38 @@ class VISP_EXPORT vpUnicycle } /*! - - Return the twist transformation from camera frame to the mobile robot - end effector frame. This transformation allows to compute a velocity - expressed in the end effector frame into the camera frame. - - \sa get_cVe() - */ + * Return the twist transformation from camera frame to the mobile robot + * end effector frame. This transformation allows to compute a velocity + * expressed in the end effector frame into the camera frame. + * + * \sa get_cVe() + */ void get_cVe(vpVelocityTwistMatrix &cVe) const { cVe = get_cVe(); } /*! - Return the robot jacobian \f${^e}{\bf J}_e\f$ expressed in the end - effector frame. - - \return The robot jacobian such as \f${\bf v} = {^e}{\bf J}_e \; \dot{\bf - q}\f$ with \f$\dot{\bf q} = (v_x, w_z)\f$ the robot control velocities and - \f$\bf v\f$ the six dimention velocity skew. - */ + * Return the robot jacobian \f${^e}{\bf J}_e\f$ expressed in the end + * effector frame. + * + * \return The robot jacobian such as \f${\bf v} = {^e}{\bf J}_e \; \dot{\bf + * q}\f$ with \f$\dot{\bf q} = (v_x, w_z)\f$ the robot control velocities and + * \f$\bf v\f$ the six dimension velocity skew. + */ vpMatrix get_eJe() const { return eJe_; } /*! - Set the transformation between the camera frame and the end effector - frame. - */ + * Set the transformation between the camera frame and the end effector + * frame. + */ void set_cMe(const vpHomogeneousMatrix &cMe) { cMe_ = cMe; } /*! - Set the robot jacobian \f${^e}{\bf J}_e\f$ expressed in the end effector - frame. - - \param eJe : The robot jacobian to set such as \f${\bf v} = {^e}{\bf J}_e - \; \dot{\bf q}\f$ with \f$\dot{\bf q} = (v_x, w_z)\f$ the robot control - velocities and \f$\bf v\f$ the six dimention velocity skew. - */ + * Set the robot jacobian \f${^e}{\bf J}_e\f$ expressed in the end effector + * frame. + * + * \param eJe : The robot jacobian to set such as \f${\bf v} = {^e}{\bf J}_e + * \; \dot{\bf q}\f$ with \f$\dot{\bf q} = (v_x, w_z)\f$ the robot control + * velocities and \f$\bf v\f$ the six dimension velocity skew. + */ void set_eJe(const vpMatrix &eJe) { eJe_ = eJe; } //@} diff --git a/modules/robot/include/visp3/robot/vpViper.h b/modules/robot/include/visp3/robot/vpViper.h index e47f5e615c..465f81500a 100644 --- a/modules/robot/include/visp3/robot/vpViper.h +++ b/modules/robot/include/visp3/robot/vpViper.h @@ -40,7 +40,7 @@ \file vpViper.h - Modelisation of the ADEPT Viper 650 or 850 robot. + Modelization of the ADEPT Viper 650 or 850 robot. */ @@ -57,7 +57,7 @@ \ingroup group_robot_real_arm group_robot_simu_arm - \brief Modelisation of the ADEPT Viper robot + \brief Modelization of the ADEPT Viper robot This robot has six degrees of freedom. The model of the robot is the following: \image html model-viper.png Model of the Viper 850 robot. diff --git a/modules/robot/include/visp3/robot/vpViper650.h b/modules/robot/include/visp3/robot/vpViper650.h index c4eedb66a3..ab2082ae04 100644 --- a/modules/robot/include/visp3/robot/vpViper650.h +++ b/modules/robot/include/visp3/robot/vpViper650.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,72 +29,66 @@ * * Description: * Interface for the ADEPT Viper 650 robot. - * -*****************************************************************************/ + */ #ifndef vpViper650_h #define vpViper650_h -/*! - - \file vpViper650.h - - Modelisation of the ADEPT Viper 650 robot. - -*/ +#include /*! + * \file vpViper650.h + * + * Modelization of the ADEPT Viper 650 robot. + */ - \class vpViper650 - - \ingroup group_robot_real_arm - - \brief Modelisation of the ADEPT Viper 650 robot. - - The model of the robot is the following: - \image html model-viper.png Model of the Viper 650 robot. - - The non modified Denavit-Hartenberg representation of the robot is - given in the table below, where \f$q_1^*, \ldots, q_6^*\f$ - are the variable joint positions. - - \f[ - \begin{tabular}{|c|c|c|c|c|} - \hline - Joint & $a_i$ & $d_i$ & $\alpha_i$ & $\theta_i$ \\ - \hline - 1 & $a_1$ & $d_1$ & $-\pi/2$ & $q_1^*$ \\ - 2 & $a_2$ & 0 & 0 & $q_2^*$ \\ - 3 & $a_3$ & 0 & $-\pi/2$ & $q_3^* - \pi$ \\ - 4 & 0 & $d_4$ & $\pi/2$ & $q_4^*$ \\ - 5 & 0 & 0 & $-\pi/2$ & $q_5^*$ \\ - 6 & 0 & 0 & 0 & $q_6^*-\pi$ \\ - 7 & 0 & $d_6$ & 0 & 0 \\ - \hline - \end{tabular} - \f] - - In this modelization, different frames have to be considered. - - - \f$ {\cal F}_f \f$: the reference frame, also called world frame - - - \f$ {\cal F}_w \f$: the wrist frame located at the intersection of - the last three rotations, with \f$ ^f{\bf M}_w = ^0{\bf M}_6 \f$ - - - \f$ {\cal F}_e \f$: the end-effector frame located at the interface of the - two tool changers, with \f$^f{\bf M}_e = 0{\bf M}_7 \f$ - - - \f$ {\cal F}_c \f$: the camera or tool frame, with \f$^f{\bf M}_c = ^f{\bf - M}_e \; ^e{\bf M}_c \f$ where \f$ ^e{\bf M}_c \f$ is the result of - a calibration stage. We can also consider a custom tool TOOL_CUSTOM and - set this tool during robot initialisation or using set_eMc(). - - - \f$ {\cal F}_s \f$: the force/torque sensor frame, with \f$d7=0.0666\f$. - -*/ - -#include - +/*! + * \class vpViper650 + * + * \ingroup group_robot_real_arm + * + * \brief Modelization of the ADEPT Viper 650 robot. + * + * The model of the robot is the following: + * \image html model-viper.png Model of the Viper 650 robot. + * + * The non modified Denavit-Hartenberg representation of the robot is + * given in the table below, where \f$q_1^*, \ldots, q_6^*\f$ + * are the variable joint positions. + * + * \f[ + * \begin{tabular}{|c|c|c|c|c|} + * \hline + * Joint & $a_i$ & $d_i$ & $\alpha_i$ & $\theta_i$ \\ + * \hline + * 1 & $a_1$ & $d_1$ & $-\pi/2$ & $q_1^*$ \\ + * 2 & $a_2$ & 0 & 0 & $q_2^*$ \\ + * 3 & $a_3$ & 0 & $-\pi/2$ & $q_3^* - \pi$ \\ + * 4 & 0 & $d_4$ & $\pi/2$ & $q_4^*$ \\ + * 5 & 0 & 0 & $-\pi/2$ & $q_5^*$ \\ + * 6 & 0 & 0 & 0 & $q_6^*-\pi$ \\ + * 7 & 0 & $d_6$ & 0 & 0 \\ + * \hline + * \end{tabular} + * \f] + * + * In this modelization, different frames have to be considered. + * + * - \f$ {\cal F}_f \f$: the reference frame, also called world frame + * + * - \f$ {\cal F}_w \f$: the wrist frame located at the intersection of + * the last three rotations, with \f$ ^f{\bf M}_w = ^0{\bf M}_6 \f$ + * + * - \f$ {\cal F}_e \f$: the end-effector frame located at the interface of the + * two tool changers, with \f$^f{\bf M}_e = 0{\bf M}_7 \f$ + * + * - \f$ {\cal F}_c \f$: the camera or tool frame, with \f$^f{\bf M}_c = ^f{\bf + * M}_e \; ^e{\bf M}_c \f$ where \f$ ^e{\bf M}_c \f$ is the result of + * a calibration stage. We can also consider a custom tool TOOL_CUSTOM and + * set this tool during robot initialisation or using set_eMc(). + * + * - \f$ {\cal F}_s \f$: the force/torque sensor frame, with \f$d7=0.0666\f$. + */ class VISP_EXPORT vpViper650 : public vpViper { public: @@ -121,7 +114,8 @@ class VISP_EXPORT vpViper650 : public vpViper static const char *const CONST_GENERIC_CAMERA_NAME; //! List of possible tools that can be attached to the robot end-effector. - typedef enum { + typedef enum + { TOOL_MARLIN_F033C_CAMERA, /*!< Marlin F033C camera. */ TOOL_PTGREY_FLEA2_CAMERA, /*!< Point Grey Flea 2 camera. */ TOOL_SCHUNK_GRIPPER_CAMERA, /*!< Camera attached to the Schunk gripper. */ @@ -133,15 +127,14 @@ class VISP_EXPORT vpViper650 : public vpViper static const vpToolType defaultTool; vpViper650(); - virtual ~vpViper650(){}; /** @name Inherited functionalities from vpViper650 */ //@{ void init(void); void init(const std::string &camera_extrinsic_parameters); void - init(vpViper650::vpToolType tool, - vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithoutDistortion); + init(vpViper650::vpToolType tool, + vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithoutDistortion); void init(vpViper650::vpToolType tool, const std::string &filename); void init(vpViper650::vpToolType tool, const vpHomogeneousMatrix &eMc_); diff --git a/modules/robot/include/visp3/robot/vpViper850.h b/modules/robot/include/visp3/robot/vpViper850.h index 0c94fd44c9..2a04d80c5a 100644 --- a/modules/robot/include/visp3/robot/vpViper850.h +++ b/modules/robot/include/visp3/robot/vpViper850.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,73 +29,68 @@ * * Description: * Interface for the ADEPT Viper 850 robot. - * -*****************************************************************************/ + */ #ifndef vpViper850_h #define vpViper850_h -/*! - - \file vpViper850.h - - Modelisation of the ADEPT Viper 850 robot. - -*/ - -/*! - - \class vpViper850 - - \ingroup group_robot_real_arm group_robot_simu_arm - - \brief Modelisation of the ADEPT Viper 850 robot. - - The model of the robot is the following: - \image html model-viper.png Model of the Viper 850 robot. - - The non modified Denavit-Hartenberg representation of the robot is - given in the table below, where \f$q_1^*, \ldots, q_6^*\f$ - are the variable joint positions. - - \f[ - \begin{tabular}{|c|c|c|c|c|} - \hline - Joint & $a_i$ & $d_i$ & $\alpha_i$ & $\theta_i$ \\ - \hline - 1 & $a_1$ & $d_1$ & $-\pi/2$ & $q_1^*$ \\ - 2 & $a_2$ & 0 & 0 & $q_2^*$ \\ - 3 & $a_3$ & 0 & $-\pi/2$ & $q_3^* - \pi$ \\ - 4 & 0 & $d_4$ & $\pi/2$ & $q_4^*$ \\ - 5 & 0 & 0 & $-\pi/2$ & $q_5^*$ \\ - 6 & 0 & 0 & 0 & $q_6^*-\pi$ \\ - 7 & 0 & $d_6$ & 0 & 0 \\ - \hline - \end{tabular} - \f] - - In this modelization, different frames have to be considered. - - - \f$ {\cal F}_f \f$: the reference frame, also called world frame - - - \f$ {\cal F}_w \f$: the wrist frame located at the intersection of - the last three rotations, with \f$ ^f{\bf M}_w = ^0{\bf M}_6 \f$ - - - \f$ {\cal F}_e \f$: the end-effector frame located at the interface of the - two tool changers, with \f$^f{\bf M}_e = 0{\bf M}_7 \f$ - - - \f$ {\cal F}_c \f$: the camera or tool frame, with \f$^f{\bf M}_c = ^f{\bf - M}_e \; ^e{\bf M}_c \f$ where \f$ ^e{\bf M}_c \f$ is the result of - a calibration stage. We can also consider a custom tool TOOL_CUSTOM and - set this during robot initialisation or using set_eMc(). - - - \f$ {\cal F}_s \f$: the force/torque sensor frame, with \f$d7=0.0666\f$. - -*/ #include #include +/*! + * \file vpViper850.h + * + * Modelization of the ADEPT Viper 850 robot. + */ + +/*! + * \class vpViper850 + * + * \ingroup group_robot_real_arm group_robot_simu_arm + * + * \brief Modelization of the ADEPT Viper 850 robot. + * + * The model of the robot is the following: + * \image html model-viper.png Model of the Viper 850 robot. + * + * The non modified Denavit-Hartenberg representation of the robot is + * given in the table below, where \f$q_1^*, \ldots, q_6^*\f$ + * are the variable joint positions. + * + * \f[ + * \begin{tabular}{|c|c|c|c|c|} + * \hline + * Joint & $a_i$ & $d_i$ & $\alpha_i$ & $\theta_i$ \\ + * \hline + * 1 & $a_1$ & $d_1$ & $-\pi/2$ & $q_1^*$ \\ + * 2 & $a_2$ & 0 & 0 & $q_2^*$ \\ + * 3 & $a_3$ & 0 & $-\pi/2$ & $q_3^* - \pi$ \\ + * 4 & 0 & $d_4$ & $\pi/2$ & $q_4^*$ \\ + * 5 & 0 & 0 & $-\pi/2$ & $q_5^*$ \\ + * 6 & 0 & 0 & 0 & $q_6^*-\pi$ \\ + * 7 & 0 & $d_6$ & 0 & 0 \\ + * \hline + * \end{tabular} + * \f] + * + * In this modelization, different frames have to be considered. + * + * - \f$ {\cal F}_f \f$: the reference frame, also called world frame + * + * - \f$ {\cal F}_w \f$: the wrist frame located at the intersection of + * the last three rotations, with \f$ ^f{\bf M}_w = ^0{\bf M}_6 \f$ + * + * - \f$ {\cal F}_e \f$: the end-effector frame located at the interface of the + * two tool changers, with \f$^f{\bf M}_e = 0{\bf M}_7 \f$ + * + * - \f$ {\cal F}_c \f$: the camera or tool frame, with \f$^f{\bf M}_c = ^f{\bf + * M}_e \; ^e{\bf M}_c \f$ where \f$ ^e{\bf M}_c \f$ is the result of + * a calibration stage. We can also consider a custom tool TOOL_CUSTOM and + * set this during robot initialisation or using set_eMc(). + * + * - \f$ {\cal F}_s \f$: the force/torque sensor frame, with \f$d7=0.0666\f$. + */ class VISP_EXPORT vpViper850 : public vpViper { public: @@ -122,7 +116,8 @@ class VISP_EXPORT vpViper850 : public vpViper static const char *const CONST_GENERIC_CAMERA_NAME; //! List of possible tools that can be attached to the robot end-effector. - typedef enum { + typedef enum + { TOOL_MARLIN_F033C_CAMERA, /*!< Marlin F033C camera. */ TOOL_PTGREY_FLEA2_CAMERA, /*!< Point Grey Flea 2 camera. */ TOOL_SCHUNK_GRIPPER_CAMERA, /*!< Camera attached to the Schunk gripper. */ @@ -134,15 +129,14 @@ class VISP_EXPORT vpViper850 : public vpViper static const vpToolType defaultTool; vpViper850(); - virtual ~vpViper850(){}; /** @name Inherited functionalities from vpViper850 */ //@{ void init(void); void init(const std::string &camera_extrinsic_parameters); void - init(vpViper850::vpToolType tool, - vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithoutDistortion); + init(vpViper850::vpToolType tool, + vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithoutDistortion); void init(vpViper850::vpToolType tool, const std::string &filename); void init(vpViper850::vpToolType tool, const vpHomogeneousMatrix &eMc_); diff --git a/modules/robot/src/haptic-device/qbdevice/vpQbSoftHand.cpp b/modules/robot/src/haptic-device/qbdevice/vpQbSoftHand.cpp index 8ddb8c1c63..5c8aca7159 100644 --- a/modules/robot/src/haptic-device/qbdevice/vpQbSoftHand.cpp +++ b/modules/robot/src/haptic-device/qbdevice/vpQbSoftHand.cpp @@ -44,13 +44,7 @@ * Default constructor that does nothing. * To connect to a device call init(). */ -vpQbSoftHand::vpQbSoftHand() : vpQbDevice() {} - -/** - * Close all the still open serial ports. - * \sa close() - */ -vpQbSoftHand::~vpQbSoftHand() {} +vpQbSoftHand::vpQbSoftHand() : vpQbDevice() { } /** * Retrieve the motor currents of the given device. @@ -129,7 +123,8 @@ void vpQbSoftHand::setPosition(const vpColVector &position, const int &id) if (commands[0] < position_limits[0]) { commands[0] = position_limits[0]; - } else if (commands[0] > position_limits[1]) { + } + else if (commands[0] > position_limits[1]) { commands[0] = position_limits[1]; } @@ -171,13 +166,15 @@ void vpQbSoftHand::setPosition(const vpColVector &position, double speed_factor, double vel = speed_factor; if (vel < 0.01) { vel = 0.01; - } else if (vel > 1.) { + } + else if (vel > 1.) { vel = 1.0; } double current_factor = stiffness; if (current_factor < 0.0) { current_factor = 0.0; - } else if (current_factor > 1.) { + } + else if (current_factor > 1.) { current_factor = 1.0; } double slope = sign * max_slope * vel; @@ -189,7 +186,8 @@ void vpQbSoftHand::setPosition(const vpColVector &position, double speed_factor, q[0] = q_mes[0] + slope * delta_t / 1000.0 * i; if (q[0] < getPositionLimits()[0]) { q[0] = getPositionLimits()[0]; - } else if (q[0] > getPositionLimits()[1]) { + } + else if (q[0] > getPositionLimits()[1]) { q[0] = getPositionLimits()[1]; } setPosition(q, id); @@ -198,7 +196,8 @@ void vpQbSoftHand::setPosition(const vpColVector &position, double speed_factor, if (std::fabs(current[0]) > current_factor * current_max) { current_failures++; - } else { + } + else { current_failures = 0; } diff --git a/modules/robot/src/real-robot/viper/vpViper.cpp b/modules/robot/src/real-robot/viper/vpViper.cpp index 0019e7fc1e..edcb72aedf 100644 --- a/modules/robot/src/real-robot/viper/vpViper.cpp +++ b/modules/robot/src/real-robot/viper/vpViper.cpp @@ -37,7 +37,7 @@ \file vpViper.cpp - Modelisation of the ADEPT Viper 650 or 850 robot. + Modelization of the ADEPT Viper 650 or 850 robot. */ diff --git a/modules/robot/src/real-robot/viper/vpViper650.cpp b/modules/robot/src/real-robot/viper/vpViper650.cpp index 4557434552..704de86e7d 100644 --- a/modules/robot/src/real-robot/viper/vpViper650.cpp +++ b/modules/robot/src/real-robot/viper/vpViper650.cpp @@ -37,7 +37,7 @@ \file vpViper650.cpp - Modelisation of the ADEPT Viper 650 robot. + Modelization of the ADEPT Viper 650 robot. */ diff --git a/modules/robot/src/real-robot/viper/vpViper850.cpp b/modules/robot/src/real-robot/viper/vpViper850.cpp index b0d612fe16..53eac839cf 100644 --- a/modules/robot/src/real-robot/viper/vpViper850.cpp +++ b/modules/robot/src/real-robot/viper/vpViper850.cpp @@ -37,7 +37,7 @@ \file vpViper850.cpp - Modelisation of the ADEPT Viper 850 robot. + Modelization of the ADEPT Viper 850 robot. */ diff --git a/modules/robot/src/robot-simulator/vpRobotCamera.cpp b/modules/robot/src/robot-simulator/vpRobotCamera.cpp index 0f1f3954fd..e5ac10c3d1 100644 --- a/modules/robot/src/robot-simulator/vpRobotCamera.cpp +++ b/modules/robot/src/robot-simulator/vpRobotCamera.cpp @@ -89,12 +89,6 @@ void vpRobotCamera::init() setMaxRotationVelocity(vpMath::rad(90)); // wx, wy and wz max set to 90 deg/s } -/*! - Destructor. - -*/ -vpRobotCamera::~vpRobotCamera() {} - /*! Get the twist transformation from camera frame to end-effector @@ -256,5 +250,5 @@ void vpRobotCamera::setPosition(const vpHomogeneousMatrix &cMw) #elif !defined(VISP_BUILD_SHARED_LIBS) // Work around to avoid warning: libvisp_robot.a(vpRobotCamera.cpp.o) has no // symbols -void dummy_vpRobotCamera(){}; +void dummy_vpRobotCamera() { }; #endif diff --git a/modules/robot/src/robot-simulator/vpRobotWireFrameSimulator.cpp b/modules/robot/src/robot-simulator/vpRobotWireFrameSimulator.cpp index 5fc26d3a2b..e64efb01a9 100644 --- a/modules/robot/src/robot-simulator/vpRobotWireFrameSimulator.cpp +++ b/modules/robot/src/robot-simulator/vpRobotWireFrameSimulator.cpp @@ -48,19 +48,19 @@ */ vpRobotWireFrameSimulator::vpRobotWireFrameSimulator() : vpWireFrameSimulator(), vpRobotSimulator(), I(), tcur(0), tprev(0), robotArms(NULL), size_fMi(8), fMi(NULL), - artCoord(), artVel(), velocity(), + artCoord(), artVel(), velocity(), #if defined(_WIN32) #elif defined(VISP_HAVE_PTHREAD) - thread(), attr(), + thread(), attr(), #endif - m_mutex_fMi(), m_mutex_eMc(), m_mutex_artVel(), m_mutex_artCoord(), m_mutex_velocity(), m_mutex_display(), m_mutex_robotStop(), m_mutex_frame(), m_mutex_setVelocityCalled(), m_mutex_scene(), - displayBusy(false), - robotStop(false), jointLimit(false), jointLimitArt(false), singularityManagement(true), cameraParam(), + m_mutex_fMi(), m_mutex_eMc(), m_mutex_artVel(), m_mutex_artCoord(), m_mutex_velocity(), m_mutex_display(), m_mutex_robotStop(), m_mutex_frame(), m_mutex_setVelocityCalled(), m_mutex_scene(), + displayBusy(false), + robotStop(false), jointLimit(false), jointLimitArt(false), singularityManagement(true), cameraParam(), #if defined(VISP_HAVE_DISPLAY) - display(), + display(), #endif - displayType(MODEL_3D), displayAllowed(true), constantSamplingTimeMode(false), setVelocityCalled(false), - verbose_(false) + displayType(MODEL_3D), displayAllowed(true), constantSamplingTimeMode(false), setVelocityCalled(false), + verbose_(false) { setSamplingTime(0.010); velocity.resize(6); @@ -77,19 +77,19 @@ vpRobotWireFrameSimulator::vpRobotWireFrameSimulator() */ vpRobotWireFrameSimulator::vpRobotWireFrameSimulator(bool do_display) : vpWireFrameSimulator(), vpRobotSimulator(), I(), tcur(0), tprev(0), robotArms(NULL), size_fMi(8), fMi(NULL), - artCoord(), artVel(), velocity(), + artCoord(), artVel(), velocity(), #if defined(_WIN32) #elif defined(VISP_HAVE_PTHREAD) - thread(), attr(), + thread(), attr(), #endif - m_mutex_fMi(), m_mutex_eMc(), m_mutex_artVel(), m_mutex_artCoord(), m_mutex_velocity(), m_mutex_display(), m_mutex_robotStop(), m_mutex_frame(), m_mutex_setVelocityCalled(), m_mutex_scene(), - displayBusy(false), robotStop(false), jointLimit(false), jointLimitArt(false), singularityManagement(true), - cameraParam(), + m_mutex_fMi(), m_mutex_eMc(), m_mutex_artVel(), m_mutex_artCoord(), m_mutex_velocity(), m_mutex_display(), m_mutex_robotStop(), m_mutex_frame(), m_mutex_setVelocityCalled(), m_mutex_scene(), + displayBusy(false), robotStop(false), jointLimit(false), jointLimitArt(false), singularityManagement(true), + cameraParam(), #if defined(VISP_HAVE_DISPLAY) - display(), + display(), #endif - displayType(MODEL_3D), displayAllowed(do_display), constantSamplingTimeMode(false), setVelocityCalled(false), - verbose_(false) + displayType(MODEL_3D), displayAllowed(do_display), constantSamplingTimeMode(false), setVelocityCalled(false), + verbose_(false) { setSamplingTime(0.010); velocity.resize(6); @@ -102,13 +102,6 @@ vpRobotWireFrameSimulator::vpRobotWireFrameSimulator(bool do_display) #endif } -/*! - Basic destructor -*/ -vpRobotWireFrameSimulator::~vpRobotWireFrameSimulator() -{ -} - /*! Initialize the display. It enables to choose the type of scene which will be used to display the object at the current position and at the desired @@ -231,7 +224,8 @@ void vpRobotWireFrameSimulator::getInternalView(vpImage &I_) (std::fabs(py_int - 1) > vpMath::maximum(py_int, 1.) * std::numeric_limits::epsilon())) { u = (double)I_.getWidth() / (2 * px_int); v = (double)I_.getHeight() / (2 * py_int); - } else { + } + else { u = (double)I_.getWidth() / (vpMath::minimum(I_.getWidth(), I_.getHeight())); v = (double)I_.getHeight() / (vpMath::minimum(I_.getWidth(), I_.getHeight())); } @@ -304,7 +298,8 @@ void vpRobotWireFrameSimulator::getInternalView(vpImage &I_) (std::fabs(py_int - 1) > vpMath::maximum(py_int, 1.) * std::numeric_limits::epsilon())) { u = (double)I.getWidth() / (2 * px_int); v = (double)I.getHeight() / (2 * py_int); - } else { + } + else { u = (double)I_.getWidth() / (vpMath::minimum(I_.getWidth(), I_.getHeight())); v = (double)I_.getHeight() / (vpMath::minimum(I_.getWidth(), I_.getHeight())); } @@ -371,5 +366,5 @@ vpHomogeneousMatrix vpRobotWireFrameSimulator::get_cMo() #elif !defined(VISP_BUILD_SHARED_LIBS) // Work around to avoid warning: // libvisp_robot.a(vpRobotWireFrameSimulator.cpp.o) has no symbols -void dummy_vpRobotWireFrameSimulator(){}; +void dummy_vpRobotWireFrameSimulator() { }; #endif diff --git a/modules/robot/src/robot-simulator/vpSimulatorCamera.cpp b/modules/robot/src/robot-simulator/vpSimulatorCamera.cpp index f12d2a370b..889449e6da 100644 --- a/modules/robot/src/robot-simulator/vpSimulatorCamera.cpp +++ b/modules/robot/src/robot-simulator/vpSimulatorCamera.cpp @@ -72,12 +72,6 @@ void vpSimulatorCamera::init() setMaxRotationVelocity(vpMath::rad(90)); // wx, wy and wz max set to 90 deg/s } -/*! - Destructor. - -*/ -vpSimulatorCamera::~vpSimulatorCamera() {} - /*! Get the twist transformation from camera frame to end-effector diff --git a/modules/robot/src/robot-simulator/vpSimulatorPioneer.cpp b/modules/robot/src/robot-simulator/vpSimulatorPioneer.cpp index 9193c9dd71..54368f734e 100644 --- a/modules/robot/src/robot-simulator/vpSimulatorPioneer.cpp +++ b/modules/robot/src/robot-simulator/vpSimulatorPioneer.cpp @@ -75,12 +75,6 @@ void vpSimulatorPioneer::init() wMc_ = wMe_ * cMe_.inverse(); } -/*! - Destructor. - -*/ -vpSimulatorPioneer::~vpSimulatorPioneer() {} - /*! Get the robot jacobian expressed in the end-effector frame. The jacobian expression is given in vpPioneer class. @@ -196,8 +190,8 @@ void vpSimulatorPioneer::getPosition(const vpRobot::vpControlFrameType frame, vp case vpRobot::ARTICULAR_FRAME: std::cout << "ARTICULAR_FRAME is not implemented in " - "vpSimulatorPioneer::getPosition()" - << std::endl; + "vpSimulatorPioneer::getPosition()" + << std::endl; break; case vpRobot::REFERENCE_FRAME: { // Convert wMc_ to a position diff --git a/modules/robot/src/robot-simulator/vpSimulatorPioneerPan.cpp b/modules/robot/src/robot-simulator/vpSimulatorPioneerPan.cpp index 316db9559d..a7f9f7c73f 100644 --- a/modules/robot/src/robot-simulator/vpSimulatorPioneerPan.cpp +++ b/modules/robot/src/robot-simulator/vpSimulatorPioneerPan.cpp @@ -83,12 +83,6 @@ void vpSimulatorPioneerPan::init() wMc_ = wMm_ * mMp_ * pMe_ * cMe_.inverse(); } -/*! - Destructor. - -*/ -vpSimulatorPioneerPan::~vpSimulatorPioneerPan() {} - /*! Get the robot jacobian expressed in the end-effector frame. The jacobian expression is given in vpPioneerPan class. @@ -211,8 +205,8 @@ void vpSimulatorPioneerPan::getPosition(const vpRobot::vpControlFrameType frame, case vpRobot::ARTICULAR_FRAME: std::cout << "ARTICULAR_FRAME is not implemented in " - "vpSimulatorPioneer::getPosition()" - << std::endl; + "vpSimulatorPioneer::getPosition()" + << std::endl; break; case vpRobot::REFERENCE_FRAME: { // Convert wMc_ to a position diff --git a/modules/sensor/include/visp3/sensor/vpForceTorqueAtiNetFTSensor.h b/modules/sensor/include/visp3/sensor/vpForceTorqueAtiNetFTSensor.h index ac40b94b53..80edb39c38 100755 --- a/modules/sensor/include/visp3/sensor/vpForceTorqueAtiNetFTSensor.h +++ b/modules/sensor/include/visp3/sensor/vpForceTorqueAtiNetFTSensor.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * ATI Force torque interface. - * -*****************************************************************************/ + */ #ifndef _vpForceTorqueAtiNetFTSensor_h_ #define _vpForceTorqueAtiNetFTSensor_h_ @@ -48,64 +46,63 @@ #ifdef VISP_HAVE_FUNC_INET_NTOP /*! - \class vpForceTorqueAtiNetFTSensor - - \ingroup group_sensor_ft - - Interface for ATI force/torque sensor using [Net F/T](https://www.ati-ia.com/products/ft/ft_NetFT.aspx) over UDP. - - The Network Force/Torque (Net F/T) sensor system measures six components of force and torque (Fx, Fy, Fz, Tx, Ty, Tz). - The Net F/T provides an EtherNet/IP communication interface and is compatible with standard Ethernet. The Net - F/T system is available with any of ATI transducer models. The Net F/T's web browser interface allows for easy - configuration and set up via the Ethernet connection present on all NetBox models. - - This class was tested with ATI Nano 43 F/T sensor connected to a NetBox. To use this class, you don't need to install - any specific third-party. - - To use this class, connect an Ethernet cable to the NetBox. The default IP address of the Net F/T is: 192.168.1.1. - The default Ethernet port is 49152. - You can use your favorite web browser on http://192.168.1.1 to modify Net F/T sensor settings and select sensor - calibration configuration. - - The following example shows how to use this class to get F/T measurements. - \code -#include - -#include - -int main(int argc, char **argv) -{ - vpForceTorqueAtiNetFTSensor ati_net_ft("192.168.1.1", 49152); - - ati_net_ft.startStreaming(); - ati_net_ft.bias(); - - while (1) { - double t = vpTime::measureTimeMs(); - if (ati_net_ft.waitForNewData()) { - vpColVector ft = ati_net_ft.getForceTorque(); - std::cout << "F/T: " << ft.t() << std::endl; - } - std::cout << "Loop time: " << vpTime::measureTimeMs() - t << " ms" << std::endl; - } -} - \endcode - - It produces the following output: - \code -F/T: -0.00150018 0.0030764 -0.00791356 -8.22294e-06 4.18799e-05 1.078288e-05 -Loop time: 0.03393554688 ms -... - \endcode - where 3 first values are forces Fx, Fy, Fz in N and the 3 last are torques Tx, Ty, Tz in Nm. - -*/ + * \class vpForceTorqueAtiNetFTSensor + * + * \ingroup group_sensor_ft + * + * Interface for ATI force/torque sensor using [Net F/T](https://www.ati-ia.com/products/ft/ft_NetFT.aspx) over UDP. + * + * The Network Force/Torque (Net F/T) sensor system measures six components of force and torque (Fx, Fy, Fz, Tx, Ty, Tz). + * The Net F/T provides an EtherNet/IP communication interface and is compatible with standard Ethernet. The Net + * F/T system is available with any of ATI transducer models. The Net F/T's web browser interface allows for easy + * configuration and set up via the Ethernet connection present on all NetBox models. + * + * This class was tested with ATI Nano 43 F/T sensor connected to a NetBox. To use this class, you don't need to install + * any specific third-party. + * + * To use this class, connect an Ethernet cable to the NetBox. The default IP address of the Net F/T is: 192.168.1.1. + * The default Ethernet port is 49152. + * You can use your favorite web browser on http://192.168.1.1 to modify Net F/T sensor settings and select sensor + * calibration configuration. + * + * The following example shows how to use this class to get F/T measurements. + * \code + * #include + * + * #include + * + * int main(int argc, char **argv) + * { + * vpForceTorqueAtiNetFTSensor ati_net_ft("192.168.1.1", 49152); + * + * ati_net_ft.startStreaming(); + * ati_net_ft.bias(); + * + * while (1) { + * double t = vpTime::measureTimeMs(); + * if (ati_net_ft.waitForNewData()) { + * vpColVector ft = ati_net_ft.getForceTorque(); + * std::cout << "F/T: " << ft.t() << std::endl; + * } + * std::cout << "Loop time: " << vpTime::measureTimeMs() - t << " ms" << std::endl; + * } + * } + * \endcode + * + * It produces the following output: + * \code + * F/T: -0.00150018 0.0030764 -0.00791356 -8.22294e-06 4.18799e-05 1.078288e-05 + * Loop time: 0.03393554688 ms + * ... + * \endcode + * where 3 first values are forces Fx, Fy, Fz in N and the 3 last are torques Tx, Ty, Tz in Nm. + */ class VISP_EXPORT vpForceTorqueAtiNetFTSensor : public vpUDPClient { public: vpForceTorqueAtiNetFTSensor(); vpForceTorqueAtiNetFTSensor(const std::string &hostname, int port); - virtual ~vpForceTorqueAtiNetFTSensor(); + virtual ~vpForceTorqueAtiNetFTSensor() override; void bias(unsigned int n_counts = 50); /*! diff --git a/modules/sensor/include/visp3/sensor/vpForceTorqueAtiSensor.h b/modules/sensor/include/visp3/sensor/vpForceTorqueAtiSensor.h index 84e9634664..de7033e568 100644 --- a/modules/sensor/include/visp3/sensor/vpForceTorqueAtiSensor.h +++ b/modules/sensor/include/visp3/sensor/vpForceTorqueAtiSensor.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * ATI Force torque interface. - * -*****************************************************************************/ + */ #ifndef _vpForceTorqueAtiSensor_h_ #define _vpForceTorqueAtiSensor_h_ @@ -45,57 +43,56 @@ #include /*! - \class vpForceTorqueAtiSensor - - \ingroup group_sensor_ft - - Interface for ATI force/torque sensor. This class works only under -linux-like OS. It requires Comedi 3rd party. Installation instructions are -provided here https://visp.inria.fr/3rd_comedi. - - Comedi is the linux control and measurement device interface. For more -information see http://www.comedi.org. - - This class was tested with ATI Gamma 65-SI FT sensor connected to a - National Instrument NI DAQmx PCI-6220 board. - - Synchronous F/T data acquisition is performed using getForceTorque(). The -call to the function blocks until the whole acquisition has finished. - - The following example shows how to get single measures from an ATI F/T -device each 10 ms (100 Hz). -\code -#include -#include - -int main(int argc, char** argv) -{ - vpForceTorqueAtiSensor ati; - ati.setCalibrationFile("FT12345.cal"); - ati.open(); - ati.bias(); - for(unsigned int i=0; i < 20; i++) { - std::cout << "F/T: " << ati.getForceTorque().t() << std::endl; - vpTime::wait(10); - } - ati.close(); -#endif -} - \endcode - -*/ + * \class vpForceTorqueAtiSensor + * + * \ingroup group_sensor_ft + * + * Interface for ATI force/torque sensor. This class works only under + * linux-like OS. It requires Comedi 3rd party. Installation instructions are + * provided here https://visp.inria.fr/3rd_comedi. + * + * Comedi is the linux control and measurement device interface. For more + * information see http://www.comedi.org. + * + * This class was tested with ATI Gamma 65-SI FT sensor connected to a + * National Instrument NI DAQmx PCI-6220 board. + * + * Synchronous F/T data acquisition is performed using getForceTorque(). The + * call to the function blocks until the whole acquisition has finished. + * + * The following example shows how to get single measures from an ATI F/T + * device each 10 ms (100 Hz). + * \code + * #include + * #include + * + * int main(int argc, char** argv) + * { + * vpForceTorqueAtiSensor ati; + * ati.setCalibrationFile("FT12345.cal"); + * ati.open(); + * ati.bias(); + * for(unsigned int i=0; i < 20; i++) { + * std::cout << "F/T: " << ati.getForceTorque().t() << std::endl; + * vpTime::wait(10); + * } + * ati.close(); + * #endif + * } + * \endcode + */ class VISP_EXPORT vpForceTorqueAtiSensor : public vpComedi { public: vpForceTorqueAtiSensor(); - virtual ~vpForceTorqueAtiSensor(); + virtual ~vpForceTorqueAtiSensor() override; void bias(); void close(); /*! - Return the calibration file location specified using - setCalibrationFile(). \sa setCalibrationFile() + * Return the calibration file location specified using + * setCalibrationFile(). \sa setCalibrationFile() */ std::string getCalibrationFile() const { return m_calibfile; } vpColVector getForceTorque() const; diff --git a/modules/sensor/include/visp3/sensor/vpSickLDMRS.h b/modules/sensor/include/visp3/sensor/vpSickLDMRS.h index 84a71ccdec..de42458366 100644 --- a/modules/sensor/include/visp3/sensor/vpSickLDMRS.h +++ b/modules/sensor/include/visp3/sensor/vpSickLDMRS.h @@ -51,77 +51,79 @@ #include /*! - - \file vpSickLDMRS.h - - \brief Driver for the Sick LD-MRS laser scanner. -*/ + * \file vpSickLDMRS.h + * + * \brief Driver for the Sick LD-MRS laser scanner. + */ /*! - - \class vpSickLDMRS - - \ingroup group_sensor_laserscanner - - \brief Driver for the Sick LD-MRS laser scanner. - - \warning For the moment, this driver works only on UNIX platform. - - The code below shows how the four laser scan provided by the Sick - LD-MRS could be acquired. - - \code -#include "visp3/sensor/vpSickLDMRS.h" - -int main() -{ -#if !defined(_WIN32) && (defined(__unix__) || defined(__unix) || -(defined(__APPLE__) && defined(__MACH__))) // UNIX std::string ip = -"131.254.12.119"; - - vpSickLDMRS laser; - laser.setIpAddress(ip); - laser.setup(); - - vpLaserScan laserscan[4]; - for ( ; ; ) { - // Get the measured points in the four layers - laser.measure(laserscan); - - // Prints all the measured points - for (int layer=0; layer<4; layer++) { - std::vector pointsInLayer = laserscan[layer].getScanPoints(); vpScanPoint p; - - for (unsigned int i=0; i < pointsInLayer.size(); i++) { - std::cout << pointsInLayer[i] << std::endl; - } - } - } -#endif -} - \endcode -*/ + * \class vpSickLDMRS + * + * \ingroup group_sensor_laserscanner + * + * \brief Driver for the Sick LD-MRS laser scanner. + * + * \warning For the moment, this driver works only on UNIX platform. + * + * The code below shows how the four laser scan provided by the Sick + * LD-MRS could be acquired. + * + * \code + * #include "visp3/sensor/vpSickLDMRS.h" + * + * int main() + * { + * #if !defined(_WIN32) && (defined(__unix__) || defined(__unix) || + * (defined(__APPLE__) && defined(__MACH__))) // UNIX std::string ip = + * "131.254.12.119"; + * + * vpSickLDMRS laser; + * laser.setIpAddress(ip); + * laser.setup(); + * + * vpLaserScan laserscan[4]; + * for ( ; ; ) { + * // Get the measured points in the four layers + * laser.measure(laserscan); + * + * // Prints all the measured points + * for (int layer=0; layer<4; layer++) { + * std::vector pointsInLayer = laserscan[layer].getScanPoints(); vpScanPoint p; + * + * for (unsigned int i=0; i < pointsInLayer.size(); i++) { + * std::cout << pointsInLayer[i] << std::endl; + * } + * } + * } + * #endif + * } + * \endcode + */ class VISP_EXPORT vpSickLDMRS : public vpLaserScanner { public: - enum MagicWord { + enum MagicWord + { MagicWordC2 = 0xAFFEC0C2 ///< The magic word that allows to identify the ///< messages that are sent by the Sick LD-MRS. }; - enum DataType { + enum DataType + { MeasuredData = 0x2202 ///< Flag to indicate that the body of a message ///< contains measured data. }; vpSickLDMRS(); + /*! Copy constructor. */ vpSickLDMRS(const vpSickLDMRS &sick) : vpLaserScanner(sick), socket_fd(-1), body(NULL), vAngle(), time_offset(0), isFirstMeasure(true), - maxlen_body(104000) + maxlen_body(104000) { *this = sick; }; - virtual ~vpSickLDMRS(); - /*! Copy constructor. */ + virtual ~vpSickLDMRS() override; + + /*! Copy operator. */ vpSickLDMRS &operator=(const vpSickLDMRS &sick) { if (this != &sick) { diff --git a/modules/tracker/blob/include/visp3/blob/vpDot.h b/modules/tracker/blob/include/visp3/blob/vpDot.h index f208d48f6c..ef8e4190f5 100644 --- a/modules/tracker/blob/include/visp3/blob/vpDot.h +++ b/modules/tracker/blob/include/visp3/blob/vpDot.h @@ -59,62 +59,63 @@ #endif /*! - \class vpDot - - \ingroup module_blob - - \brief This tracker is meant to track a dot (connected pixels with same - gray level) on a vpImage. - - The underground algorithm is based on a binarization of the image - and a connex component segmentation to determine the dot - characteristics (location, moments, size...). - - The following sample code shows how to grab images from a firewire camera, - track a blob and display the tracking results. - - \code -#include -#include -#include - -int main() -{ -#if defined(VISP_HAVE_DC1394) - vpImage I; // Create a gray level image container - vp1394TwoGrabber g(false); // Create a grabber based on libdc1394-2.x third party lib - g.acquire(I); // Acquire an image - -#if defined(VISP_HAVE_X11) - vpDisplayX d(I, 0, 0, "Camera view"); -#endif - vpDisplay::display(I); - vpDisplay::flush(I); - - vpDot blob; - blob.initTracking(I); - blob.setGraphics(true); - - while(1) { - g.acquire(I); // Acquire an image - vpDisplay::display(I); - blob.track(I); - - vpDisplay::flush(I); - } -#endif -} - \endcode - - \sa vpDot2 -*/ + * \class vpDot + * + * \ingroup module_blob + * + * \brief This tracker is meant to track a dot (connected pixels with same + * gray level) on a vpImage. + * + * The underground algorithm is based on a binarization of the image + * and a connex component segmentation to determine the dot + * characteristics (location, moments, size...). + * + * The following sample code shows how to grab images from a firewire camera, + * track a blob and display the tracking results. + * + * \code + * #include + * #include + * #include + * + * int main() + * { + * #if defined(VISP_HAVE_DC1394) + * vpImage I; // Create a gray level image container + * vp1394TwoGrabber g(false); // Create a grabber based on libdc1394-2.x third party lib + * g.acquire(I); // Acquire an image + * + * #if defined(VISP_HAVE_X11) + * vpDisplayX d(I, 0, 0, "Camera view"); + * #endif + * vpDisplay::display(I); + * vpDisplay::flush(I); + * + * vpDot blob; + * blob.initTracking(I); + * blob.setGraphics(true); + * + * while(1) { + * g.acquire(I); // Acquire an image + * vpDisplay::display(I); + * blob.track(I); + * + * vpDisplay::flush(I); + * } + * #endif + * } + * \endcode + * + * \sa vpDot2 + */ class VISP_EXPORT vpDot : public vpTracker { public: /*! \enum vpConnexityType - Type of connexity 4, or 8. - */ - typedef enum { + * Type of connexity 4, or 8. + */ + typedef enum + { CONNEXITY_4, /*!< For a given pixel 4 neighbors are considered (left, right, up, down) */ CONNEXITY_8 /*!< For a given pixel 8 neighbors are considered (left, @@ -190,19 +191,19 @@ class VISP_EXPORT vpDot : public vpTracker vpDot(); explicit vpDot(const vpImagePoint &ip); vpDot(const vpDot &d); - virtual ~vpDot(); + virtual ~vpDot() override; void display(const vpImage &I, vpColor color = vpColor::red, unsigned int thickness = 1) const; /*! - Gets the second order normalized centered moment \f$ n_{ij} \f$ - as a 3-dim vector containing \f$ n_{20}, n_{11}, n_{02} \f$ - such as \f$ n_{ij} = \mu_{ij}/m_{00} \f$ - - \return The 3-dim vector containing \f$ n_{20}, n_{11}, n_{02} \f$. - - \sa getCog(), getArea() - */ + * Gets the second order normalized centered moment \f$ n_{ij} \f$ + * as a 3-dim vector containing \f$ n_{20}, n_{11}, n_{02} \f$ + * such as \f$ n_{ij} = \mu_{ij}/m_{00} \f$ + * + * \return The 3-dim vector containing \f$ n_{20}, n_{11}, n_{02} \f$. + * + * \sa getCog(), getArea() + */ inline vpColVector get_nij() const { vpColVector nij(3); @@ -214,19 +215,17 @@ class VISP_EXPORT vpDot : public vpTracker } /*! - Gets the area of the blob corresponding also to the zero order moment. - - \return The blob area. - */ + * Gets the area of the blob corresponding also to the zero order moment. + * + * \return The blob area. + */ inline double getArea() const { return m00; } /*! - - Return the dot bounding box. - - \sa getWidth(), getHeight() - - */ + * Return the dot bounding box. + * + * \sa getWidth(), getHeight() + */ inline vpRect getBBox() const { vpRect bbox; @@ -236,66 +235,60 @@ class VISP_EXPORT vpDot : public vpTracker return (bbox); }; /*! - Return the location of the dot center of gravity. - - \return The coordinates of the center of gravity. - */ + * Return the location of the dot center of gravity. + * + * \return The coordinates of the center of gravity. + */ inline vpImagePoint getCog() const { return cog; } /*! - Return the list of all the image points on the border of the dot. - - \warning Doesn't return the image points inside the dot anymore. To get - those points see getConnexities(). - */ + * Return the list of all the image points on the border of the dot. + * + * \warning Doesn't return the image points inside the dot anymore. To get + * those points see getConnexities(). + */ inline std::list getEdges() const { return this->ip_edges_list; }; /*! - - Return the list of all the image points inside the dot. - - \return The list of all the images points in the dot. - This list is updated after a call to track(). - - */ + * Return the list of all the image points inside the dot. + * + * \return The list of all the images points in the dot. + * This list is updated after a call to track(). + */ inline std::list getConnexities() const { return this->ip_connexities_list; }; inline double getGamma() const { return this->gamma; }; - /*! - - Return the precision of the gray level of the dot. It is a double - precision float witch value is in ]0,1]. 1 means full precision, whereas - values close to 0 show a very bad precision. - */ + /*! + * Return the precision of the gray level of the dot. It is a double + * precision float witch value is in ]0,1]. 1 means full precision, whereas + * values close to 0 show a very bad precision. + */ double getGrayLevelPrecision() const { return grayLevelPrecision; } double getMaxDotSize() const { return this->maxDotSizePercentage; } + /*! - Return the mean gray level value of the dot. - */ + * Return the mean gray level value of the dot. + */ double getMeanGrayLevel() const { return (this->mean_gray_level); }; /*! - \return a vpPolygon made from the edges of the dot. - */ + * \return a vpPolygon made from the edges of the dot. + */ vpPolygon getPolygon() const { return (vpPolygon(ip_edges_list)); }; /*! - - Return the width of the dot. - - \sa getHeight() - - */ + * Return the width of the dot. + * + * \sa getHeight() + */ inline unsigned int getWidth() const { return (this->u_max - this->u_min + 1); }; /*! - - Return the width of the dot. - - \sa getHeight() - - */ + * Return the width of the dot. + * + * \sa getHeight() + */ inline unsigned int getHeight() const { return (this->v_max - this->v_min + 1); }; void initTracking(const vpImage &I); @@ -311,30 +304,28 @@ class VISP_EXPORT vpDot : public vpTracker void print(std::ostream &os) { os << *this << std::endl; } /*! - Initialize the dot coordinates with \e ip. - */ + * Initialize the dot coordinates with \e ip. + */ inline void setCog(const vpImagePoint &ip) { this->cog = ip; } /*! - - Activates the dot's moments computation. - - \param activate true, if you want to compute the moments. If false, - moments are not computed. - - Computed moment are vpDot::m00, vpDot::m10, vpDot::m01, vpDot::m11, - vpDot::m20, vpDot::m02 and second order centered moments vpDot::mu11, - vpDot::mu20, vpDot::mu02 computed with respect to the blob centroid. - - The coordinates of the region's centroid (u, v) can be computed from the - moments by \f$u=\frac{m10}{m00}\f$ and \f$v=\frac{m01}{m00}\f$. - - */ + * Activates the dot's moments computation. + * + * \param activate true, if you want to compute the moments. If false, + * moments are not computed. + * + * Computed moment are vpDot::m00, vpDot::m10, vpDot::m01, vpDot::m11, + * vpDot::m20, vpDot::m02 and second order centered moments vpDot::mu11, + * vpDot::mu20, vpDot::mu02 computed with respect to the blob centroid. + * + * The coordinates of the region's centroid (u, v) can be computed from the + * moments by \f$u=\frac{m10}{m00}\f$ and \f$v=\frac{m01}{m00}\f$. + */ void setComputeMoments(bool activate) { compute_moment = activate; } /*! - Set the type of connexity: 4 or 8. - */ + * Set the type of connexity: 4 or 8. + */ void setConnexity(vpConnexityType type) { this->connexityType = type; }; void setMaxDotSize(double percentage); void setGrayLevelMin(const unsigned int &level_min) { this->gray_level_min = level_min; }; @@ -342,25 +333,26 @@ class VISP_EXPORT vpDot : public vpTracker void setGrayLevelPrecision(const double &grayLevelPrecision); /*! - Activates the display of all the pixels of the dot during the tracking. - The default thickness of the overlayed drawings can be modified using - setGraphicsThickness(). + * Activates the display of all the pixels of the dot during the tracking. + * The default thickness of the overlayed drawings can be modified using + * setGraphicsThickness(). + * + * \warning To effectively display the dot graphics a call to + * vpDisplay::flush() is needed. + * + * \param activate true to activate the display of dot pixels, false to turn + * off the display. + * + * \sa setGraphicsThickness() + */ - \warning To effectively display the dot graphics a call to - vpDisplay::flush() is needed. - - \param activate true to activate the display of dot pixels, false to turn - off the display. - - \sa setGraphicsThickness() - */ void setGraphics(bool activate) { graphics = activate; } /*! - Modify the default thickness that is set to 1 of the drawings in overlay - when setGraphics() is enabled. - - \sa setGraphics() - */ + * Modify the default thickness that is set to 1 of the drawings in overlay + * when setGraphics() is enabled. + * + * \sa setGraphics() + */ void setGraphicsThickness(unsigned int t) { this->thickness = t; }; void track(const vpImage &I); @@ -394,8 +386,8 @@ class VISP_EXPORT vpDot : public vpTracker unsigned char gray_level_out; double mean_gray_level; // Mean gray level of the dot - unsigned int gray_level_min; // left threshold for binarisation - unsigned int gray_level_max; // right threshold for binarisation + unsigned int gray_level_min; // left threshold for binarization + unsigned int gray_level_max; // right threshold for binarization double grayLevelPrecision; // precision of the gray level of the dot. // It is a double precision float witch value is in ]0,1]. // 1 means full precision, whereas values close to 0 show a very bad diff --git a/modules/tracker/blob/include/visp3/blob/vpDot2.h b/modules/tracker/blob/include/visp3/blob/vpDot2.h index 1b02a29e06..a89dceba84 100644 --- a/modules/tracker/blob/include/visp3/blob/vpDot2.h +++ b/modules/tracker/blob/include/visp3/blob/vpDot2.h @@ -52,81 +52,80 @@ #include /*! - \class vpDot2 - - \ingroup module_blob - - \brief This tracker is meant to track a blob (connex pixels with same - gray level) on a vpImage. - - The underground algorithm is based on a binarisation of the image - and then on a contour detection using the Freeman chain coding to - determine the blob characteristics (location, moments, size...). - - The binarisation is done using gray level minimum and maximum values - that define the admissible gray levels of the blob. You can specify these - levels by setGrayLevelMin() and setGrayLevelMax(). These levels are also - set automatically by setGrayLevelPrecision(). The algorithm allows - to track white objects on a black background and vice versa. - - When a blob is found, some tests are done to see if it is valid: - - A blob is considered by default as ellipsoid. The found blob could - be rejected if the shape is not ellipsoid. To determine if the shape - is ellipsoid the algorithm consider an inner and outside ellipse. - Sampled points on these two ellipses should have the right gray levels. - Along the inner ellipse the sampled points should have gray levels - that are in the gray level minimum and maximum bounds, while - on the outside ellipse, the gray levels should be out of the gray level - bounds. To set the percentage of the sample points which should have the - right levels use setEllipsoidBadPointsPercentage(). The distance between the - inner ellpsoid and the blob contour, as well the distance between the - blob contour and the outside ellipse is fixed by - setEllipsoidShapePrecision(). If you want to track a non ellipsoid shape, - and turn off this validation test, you have to call - setEllipsoidShapePrecision(0). - - The width, height and surface of the blob are compared to the - corresponding values of the previous blob. If they differ to much - the blob could be rejected. To set the admissible distance you can - use setSizePrecision(). - - Note that track() and searchDotsInArea() are the most important features - of this class. - - - track() estimate the current position of the dot using its previous - position, then try to compute the new parameters of the dot. If everything - went ok, tracking succeeds, otherwise we search this dot in a window - around the last position of the dot. - - - searchDotsInArea() enable to find dots similar to this dot in a window. It - is used when there was a problem performing basic tracking of the dot, but - can also be used to find a certain type of dots in the full image. - - The following sample code available in - tutorial-blob-tracker-live-firewire.cpp shows how to grab images from a - firewire camera, track a blob and display the tracking results. - - \include tutorial-blob-tracker-live-firewire.cpp - A line by line explanation of the previous example is provided in - \ref tutorial-tracking-blob. - - This other example available in tutorial-blob-auto-tracker.cpp shows firstly - how to detect in the first image all the blobs that match some - characteristics in terms of size, area, gray level. Secondly, it shows how - to track all the dots that are detected. - - \include tutorial-blob-auto-tracker.cpp - A line by line explanation of this last example is also provided in - \ref tutorial-tracking-blob, section \ref tracking_blob_tracking. - - \sa vpDot -*/ + * \class vpDot2 + * + * \ingroup module_blob + * + * \brief This tracker is meant to track a blob (connex pixels with same + * gray level) on a vpImage. + * + * The underground algorithm is based on a binarization of the image + * and then on a contour detection using the Freeman chain coding to + * determine the blob characteristics (location, moments, size...). + * + * The binarization is done using gray level minimum and maximum values + * that define the admissible gray levels of the blob. You can specify these + * levels by setGrayLevelMin() and setGrayLevelMax(). These levels are also + * set automatically by setGrayLevelPrecision(). The algorithm allows + * to track white objects on a black background and vice versa. + * + * When a blob is found, some tests are done to see if it is valid: + * - A blob is considered by default as ellipsoid. The found blob could + * be rejected if the shape is not ellipsoid. To determine if the shape + * is ellipsoid the algorithm consider an inner and outside ellipse. + * Sampled points on these two ellipses should have the right gray levels. + * Along the inner ellipse the sampled points should have gray levels + * that are in the gray level minimum and maximum bounds, while + * on the outside ellipse, the gray levels should be out of the gray level + * bounds. To set the percentage of the sample points which should have the + * right levels use setEllipsoidBadPointsPercentage(). The distance between the + * inner ellipsoid and the blob contour, as well the distance between the + * blob contour and the outside ellipse is fixed by + * setEllipsoidShapePrecision(). If you want to track a non ellipsoid shape, + * and turn off this validation test, you have to call + * setEllipsoidShapePrecision(0). + * - The width, height and surface of the blob are compared to the + * corresponding values of the previous blob. If they differ to much + * the blob could be rejected. To set the admissible distance you can + * use setSizePrecision(). + * + * Note that track() and searchDotsInArea() are the most important features + * of this class. + * + * - track() estimate the current position of the dot using its previous + * position, then try to compute the new parameters of the dot. If everything + * went ok, tracking succeeds, otherwise we search this dot in a window + * around the last position of the dot. + * + * - searchDotsInArea() enable to find dots similar to this dot in a window. It + * is used when there was a problem performing basic tracking of the dot, but + * can also be used to find a certain type of dots in the full image. + * + * The following sample code available in + * tutorial-blob-tracker-live-firewire.cpp shows how to grab images from a + * firewire camera, track a blob and display the tracking results. + * + * \include tutorial-blob-tracker-live-firewire.cpp + * A line by line explanation of the previous example is provided in + * \ref tutorial-tracking-blob. + * + * This other example available in tutorial-blob-auto-tracker.cpp shows firstly + * how to detect in the first image all the blobs that match some + * characteristics in terms of size, area, gray level. Secondly, it shows how + * to track all the dots that are detected. + * + * \include tutorial-blob-auto-tracker.cpp + * A line by line explanation of this last example is also provided in + * \ref tutorial-tracking-blob, section \ref tracking_blob_tracking. + * + * \sa vpDot + */ class VISP_EXPORT vpDot2 : public vpTracker { public: vpDot2(); explicit vpDot2(const vpImagePoint &ip); vpDot2(const vpDot2 &twinDot); - virtual ~vpDot2(); static vpMatrix defineDots(vpDot2 dot[], const unsigned int &n, const std::string &dotFile, vpImage &I, vpColor col = vpColor::blue, bool trackDot = true); @@ -134,14 +133,14 @@ class VISP_EXPORT vpDot2 : public vpTracker void display(const vpImage &I, vpColor color = vpColor::red, unsigned int thickness = 1) const; /*! - Gets the second order normalized centered moment \f$ n_{ij} \f$ - as a 3-dim vector containing \f$ n_{20}, n_{11}, n_{02} \f$ - such as \f$ n_{ij} = \mu_{ij}/m_{00} \f$ - - \return The 3-dim vector containing \f$ n_{20}, n_{11}, n_{02} \f$. - - \sa getCog(), getArea() - */ + * Gets the second order normalized centered moment \f$ n_{ij} \f$ + * as a 3-dim vector containing \f$ n_{20}, n_{11}, n_{02} \f$ + * such as \f$ n_{ij} = \mu_{ij}/m_{00} \f$ + * + * \return The 3-dim vector containing \f$ n_{20}, n_{11}, n_{02} \f$. + * + * \sa getCog(), getArea() + */ inline vpColVector get_nij() const { vpColVector nij(3); @@ -153,13 +152,12 @@ class VISP_EXPORT vpDot2 : public vpTracker } double getArea() const; - /*! - - Return the dot bounding box. - \sa getWidth(), getHeight() - - */ + /*! + * Return the dot bounding box. + * + * \sa getWidth(), getHeight() + */ inline vpRect getBBox() const { vpRect bbox; @@ -169,40 +167,39 @@ class VISP_EXPORT vpDot2 : public vpTracker return (bbox); }; - /*! - Return the location of the dot center of gravity. - \return The coordinates of the center of gravity. - */ + /*! + * Return the location of the dot center of gravity. + * + * \return The coordinates of the center of gravity. + */ inline vpImagePoint getCog() const { return cog; } double getDistance(const vpDot2 &distantDot) const; /*! - - Return the list of all the image points on the dot - border. - - \param edges_list : The list of all the images points on the dot - border. This list is update after a call to track(). - - */ + * Return the list of all the image points on the dot + * border. + * + * \param edges_list : The list of all the images points on the dot + * border. This list is update after a call to track(). + */ void getEdges(std::list &edges_list) const { edges_list = this->ip_edges_list; }; - /*! - - Return the list of all the image points on the dot - border. - - \return The list of all the images points on the dot - border. This list is update after a call to track(). - */ - std::list getEdges() const { return (this->ip_edges_list); }; /*! - Get the percentage of sampled points that are considered non conform - in terms of the gray level on the inner and the ouside ellipses. + * Return the list of all the image points on the dot + * border. + * + * \return The list of all the images points on the dot + * border. This list is update after a call to track(). + */ + std::list getEdges() const { return (this->ip_edges_list); }; - \sa setEllipsoidBadPointsPercentage() - */ + /*! + * Get the percentage of sampled points that are considered non conform + * in terms of the gray level on the inner and the outside ellipses. + * + * \sa setEllipsoidBadPointsPercentage() + */ double getEllipsoidBadPointsPercentage() const { return allowedBadPointsPercentage_; } double getEllipsoidShapePrecision() const; @@ -210,28 +207,30 @@ class VISP_EXPORT vpDot2 : public vpTracker inline double getGamma() const { return this->gamma; }; /*! - Return the color level of pixels inside the dot. - - \sa getGrayLevelMax() - */ + * Return the color level of pixels inside the dot. + * + * \sa getGrayLevelMax() + */ inline unsigned int getGrayLevelMin() const { return gray_level_min; }; /*! - Return the color level of pixels inside the dot. - - \sa getGrayLevelMin() - */ + * Return the color level of pixels inside the dot. + * + * \sa getGrayLevelMin() + */ inline unsigned int getGrayLevelMax() const { return gray_level_max; }; double getGrayLevelPrecision() const; double getHeight() const; double getMaxSizeSearchDistancePrecision() const; + /*! - \return The mean gray level value of the dot. - */ + * \return The mean gray level value of the dot. + */ double getMeanGrayLevel() const { return (this->mean_gray_level); }; + /*! - \return a vpPolygon made from the edges of the dot. - */ + * \return a vpPolygon made from the edges of the dot. + */ vpPolygon getPolygon() const { return (vpPolygon(ip_edges_list)); }; double getSizePrecision() const; double getWidth() const; @@ -252,38 +251,37 @@ class VISP_EXPORT vpDot2 : public vpTracker void setArea(const double &area); /*! - Initialize the dot coordinates with \e ip. - */ + * Initialize the dot coordinates with \e ip. + */ inline void setCog(const vpImagePoint &ip) { this->cog = ip; } - /*! - - Activates the dot's moments computation. - - \param activate true, if you want to compute the moments. If false, - moments are not computed. - - Computed moment are vpDot::m00, vpDot::m10, vpDot::m01, vpDot::m11, - vpDot::m20, vpDot::m02 and second order centereed moments vpDot::mu11, - vpDot::mu20, vpDot::mu02 computed with respect to the blob centroid. - - The coordinates of the region's centroid (u, v) can be computed from the - moments by \f$u=\frac{m10}{m00}\f$ and \f$v=\frac{m01}{m00}\f$. - */ + /*! + * Activates the dot's moments computation. + * + * \param activate true, if you want to compute the moments. If false, + * moments are not computed. + * + * Computed moment are vpDot::m00, vpDot::m10, vpDot::m01, vpDot::m11, + * vpDot::m20, vpDot::m02 and second order centered moments vpDot::mu11, + * vpDot::mu20, vpDot::mu02 computed with respect to the blob centroid. + * + * The coordinates of the region's centroid (u, v) can be computed from the + * moments by \f$u=\frac{m10}{m00}\f$ and \f$v=\frac{m01}{m00}\f$. + */ void setComputeMoments(bool activate) { compute_moment = activate; } /*! - Set the percentage of sampled points that are considered non conform - in terms of the gray level on the inner and the ouside ellipses. - Points located on the inner ellipse should have the same gray level - than the blob, while points located on the outside ellipse should - have a different gray level. - - \param percentage : Percentage of points sampled with bad gray level - on the inner and outside ellipses that are admissible. 0 means - that all the points should have a right level, while a value of 1 - means that all the points can have a bad gray level. - */ + * Set the percentage of sampled points that are considered non conform + * in terms of the gray level on the inner and the outside ellipses. + * Points located on the inner ellipse should have the same gray level + * than the blob, while points located on the outside ellipse should + * have a different gray level. + * + * \param percentage : Percentage of points sampled with bad gray level + * on the inner and outside ellipses that are admissible. 0 means + * that all the points should have a right level, while a value of 1 + * means that all the points can have a bad gray level. + */ void setEllipsoidBadPointsPercentage(const double &percentage = 0.0) { if (percentage < 0.) @@ -295,39 +293,40 @@ class VISP_EXPORT vpDot2 : public vpTracker } void setEllipsoidShapePrecision(const double &ellipsoidShapePrecision); - /*! - Activates the display of the border of the dot during the tracking. - The default thickness of the overlayed drawings can be modified using - setGraphicsThickness(). - - \warning To effectively display the dot graphics a call to - vpDisplay::flush() is needed. - - \param activate If true, the border of the dot will be painted. false to - turn off border painting. - \sa setGraphicsThickness() - */ - void setGraphics(bool activate) { graphics = activate; } /*! - Modify the default thickness that is set to 1 of the drawings in overlay - when setGraphics() is enabled. + * Activates the display of the border of the dot during the tracking. + * The default thickness of the overlayed drawings can be modified using + * setGraphicsThickness(). + * + * \warning To effectively display the dot graphics a call to + * vpDisplay::flush() is needed. + * + * \param activate If true, the border of the dot will be painted. false to + * turn off border painting. + * + * \sa setGraphicsThickness() + */ + void setGraphics(bool activate) { graphics = activate; } - \sa setGraphics() - */ - void setGraphicsThickness(unsigned int t) { this->thickness = t; }; /*! + * Modify the default thickness that is set to 1 of the drawings in overlay + * when setGraphics() is enabled. + * + * \sa setGraphics() + */ + void setGraphicsThickness(unsigned int t) { this->thickness = t; }; - Set the color level of the dot to search a dot in a region of interest. This - level will be used to know if a pixel in the image belongs to the dot or - not. Only pixels with higher level can belong to the dot. If the level is - lower than the minimum level for a dot, set the level to MIN_IN_LEVEL. - - \param min : Color level of a dot to search in a region of interest. - - \sa setGrayLevelMax(), setGrayLevelPrecision() - - */ + /*! + * Set the color level of the dot to search a dot in a region of interest. This + * level will be used to know if a pixel in the image belongs to the dot or + * not. Only pixels with higher level can belong to the dot. If the level is + * lower than the minimum level for a dot, set the level to MIN_IN_LEVEL. + * + * \param min : Color level of a dot to search in a region of interest. + * + * \sa setGrayLevelMax(), setGrayLevelPrecision() + */ inline void setGrayLevelMin(const unsigned int &min) { if (min > 255) @@ -337,14 +336,13 @@ class VISP_EXPORT vpDot2 : public vpTracker }; /*! - - Set the color level of pixels surrounding the dot. This is meant to be used - to search a dot in a region of interest. - - \param max : Intensity level of a dot to search in a region of interest. - - \sa setGrayLevelMin(), setGrayLevelPrecision() - */ + * Set the color level of pixels surrounding the dot. This is meant to be used + * to search a dot in a region of interest. + * + * \param max : Intensity level of a dot to search in a region of interest. + * + * \sa setGrayLevelMin(), setGrayLevelPrecision() + */ inline void setGrayLevelMax(const unsigned int &max) { if (max > 255) @@ -352,6 +350,7 @@ class VISP_EXPORT vpDot2 : public vpTracker else this->gray_level_max = max; }; + void setGrayLevelPrecision(const double &grayLevelPrecision); void setHeight(const double &height); void setMaxSizeSearchDistancePrecision(const double &maxSizeSearchDistancePrecision); @@ -451,22 +450,19 @@ class VISP_EXPORT vpDot2 : public vpTracker void computeMeanGrayLevel(const vpImage &I); /*! - - Get the starting point on a dot border. The dot border is - computed from this point. - - \sa getFirstBorder_v() - - */ + Get the starting point on a dot border. The dot border is + computed from this point. + * + \sa getFirstBorder_v() + */ unsigned int getFirstBorder_u() const { return this->firstBorder_u; } - /*! - Get the starting point on a dot border. The dot border is - computed from this point. - - \sa getFirstBorder_u() - - */ + /*! + Get the starting point on a dot border. The dot border is + computed from this point. + * + \sa getFirstBorder_u() + */ unsigned int getFirstBorder_v() const { return this->firstBorder_v; } bool computeFreemanChainElement(const vpImage &I, const unsigned int &u, const unsigned int &v, @@ -522,7 +518,7 @@ class VISP_EXPORT vpDot2 : public vpTracker // Bounding box int bbox_u_min, bbox_u_max, bbox_v_min, bbox_v_max; - // The first point coodinate on the dot border + // The first point coordinate on the dot border unsigned int firstBorder_u; unsigned int firstBorder_v; diff --git a/modules/tracker/blob/src/dots/vpDot2.cpp b/modules/tracker/blob/src/dots/vpDot2.cpp index 4c2d5607be..f3453957db 100644 --- a/modules/tracker/blob/src/dots/vpDot2.cpp +++ b/modules/tracker/blob/src/dots/vpDot2.cpp @@ -102,12 +102,11 @@ void vpDot2::init() */ vpDot2::vpDot2() : m00(0.), m10(0.), m01(0.), m11(0.), m20(0.), m02(0.), mu11(0.), mu20(0.), mu02(0.), cog(), width(0), height(0), - surface(0), gray_level_min(128), gray_level_max(255), mean_gray_level(0), grayLevelPrecision(0.8), gamma(1.5), - sizePrecision(0.65), ellipsoidShapePrecision(0.65), maxSizeSearchDistancePrecision(0.65), - allowedBadPointsPercentage_(0.), area(), direction_list(), ip_edges_list(), compute_moment(false), graphics(false), - thickness(1), bbox_u_min(0), bbox_u_max(0), bbox_v_min(0), bbox_v_max(0), firstBorder_u(0), firstBorder_v() -{ -} + surface(0), gray_level_min(128), gray_level_max(255), mean_gray_level(0), grayLevelPrecision(0.8), gamma(1.5), + sizePrecision(0.65), ellipsoidShapePrecision(0.65), maxSizeSearchDistancePrecision(0.65), + allowedBadPointsPercentage_(0.), area(), direction_list(), ip_edges_list(), compute_moment(false), graphics(false), + thickness(1), bbox_u_min(0), bbox_u_max(0), bbox_v_min(0), bbox_v_max(0), firstBorder_u(0), firstBorder_v() +{ } /*! @@ -119,23 +118,22 @@ vpDot2::vpDot2() */ vpDot2::vpDot2(const vpImagePoint &ip) : m00(0.), m10(0.), m01(0.), m11(0.), m20(0.), m02(0.), mu11(0.), mu20(0.), mu02(0.), cog(ip), width(0), height(0), - surface(0), gray_level_min(128), gray_level_max(255), mean_gray_level(0), grayLevelPrecision(0.8), gamma(1.5), - sizePrecision(0.65), ellipsoidShapePrecision(0.65), maxSizeSearchDistancePrecision(0.65), - allowedBadPointsPercentage_(0.), area(), direction_list(), ip_edges_list(), compute_moment(false), graphics(false), - thickness(1), bbox_u_min(0), bbox_u_max(0), bbox_v_min(0), bbox_v_max(0), firstBorder_u(0), firstBorder_v() -{ -} + surface(0), gray_level_min(128), gray_level_max(255), mean_gray_level(0), grayLevelPrecision(0.8), gamma(1.5), + sizePrecision(0.65), ellipsoidShapePrecision(0.65), maxSizeSearchDistancePrecision(0.65), + allowedBadPointsPercentage_(0.), area(), direction_list(), ip_edges_list(), compute_moment(false), graphics(false), + thickness(1), bbox_u_min(0), bbox_u_max(0), bbox_v_min(0), bbox_v_max(0), firstBorder_u(0), firstBorder_v() +{ } /*! Copy constructor. */ vpDot2::vpDot2(const vpDot2 &twinDot) : vpTracker(twinDot), m00(0.), m10(0.), m01(0.), m11(0.), m20(0.), m02(0.), mu11(0.), mu20(0.), mu02(0.), cog(), - width(0), height(0), surface(0), gray_level_min(128), gray_level_max(255), mean_gray_level(0), - grayLevelPrecision(0.8), gamma(1.5), sizePrecision(0.65), ellipsoidShapePrecision(0.65), - maxSizeSearchDistancePrecision(0.65), allowedBadPointsPercentage_(0.), area(), direction_list(), ip_edges_list(), - compute_moment(false), graphics(false), thickness(1), bbox_u_min(0), bbox_u_max(0), bbox_v_min(0), bbox_v_max(0), - firstBorder_u(0), firstBorder_v() + width(0), height(0), surface(0), gray_level_min(128), gray_level_max(255), mean_gray_level(0), + grayLevelPrecision(0.8), gamma(1.5), sizePrecision(0.65), ellipsoidShapePrecision(0.65), + maxSizeSearchDistancePrecision(0.65), allowedBadPointsPercentage_(0.), area(), direction_list(), ip_edges_list(), + compute_moment(false), graphics(false), thickness(1), bbox_u_min(0), bbox_u_max(0), bbox_v_min(0), bbox_v_max(0), + firstBorder_u(0), firstBorder_v() { *this = twinDot; } @@ -191,11 +189,6 @@ vpDot2 &vpDot2::operator=(const vpDot2 &twinDot) return (*this); } -/*! - Destructor... do nothing for the moment. -*/ -vpDot2::~vpDot2() {} - /****************************************************************************** * * PUBLIC METHODS @@ -261,7 +254,8 @@ void vpDot2::initTracking(const vpImage &I, unsigned int size) if (Ip - (1 - grayLevelPrecision) < 0) { gray_level_min = 0; - } else { + } + else { gray_level_min = (unsigned int)(255 * pow(Ip - (1 - grayLevelPrecision), gamma)); if (gray_level_min > 255) gray_level_min = 255; @@ -275,8 +269,9 @@ void vpDot2::initTracking(const vpImage &I, unsigned int size) try { track(I); - } catch (const vpException &e) { - // vpERROR_TRACE("Error caught") ; + } + catch (const vpException &e) { + // vpERROR_TRACE("Error caught") ; throw(e); } } @@ -319,7 +314,8 @@ void vpDot2::initTracking(const vpImage &I, const vpImagePoint &i if (Ip - (1 - grayLevelPrecision) < 0) { gray_level_min = 0; - } else { + } + else { gray_level_min = (unsigned int)(255 * pow(Ip - (1 - grayLevelPrecision), gamma)); if (gray_level_min > 255) gray_level_min = 255; @@ -333,8 +329,9 @@ void vpDot2::initTracking(const vpImage &I, const vpImagePoint &i try { track(I); - } catch (const vpException &e) { - // vpERROR_TRACE("Error caught") ; + } + catch (const vpException &e) { + // vpERROR_TRACE("Error caught") ; throw(e); } } @@ -391,8 +388,9 @@ void vpDot2::initTracking(const vpImage &I, const vpImagePoint &i try { track(I); - } catch (const vpException &e) { - // vpERROR_TRACE("Error caught") ; + } + catch (const vpException &e) { + // vpERROR_TRACE("Error caught") ; throw(e); } } @@ -486,10 +484,12 @@ void vpDot2::track(const vpImage &I, bool canMakeTheWindowGrow) std::fabs(getHeight()) <= std::numeric_limits::epsilon()) { searchWindowWidth = 80.; searchWindowHeight = 80.; - } else if (canMakeTheWindowGrow) { + } + else if (canMakeTheWindowGrow) { searchWindowWidth = getWidth() * 5; searchWindowHeight = getHeight() * 5; - } else { + } + else { searchWindowWidth = getWidth(); searchWindowHeight = getHeight(); } @@ -547,7 +547,8 @@ void vpDot2::track(const vpImage &I, bool canMakeTheWindowGrow) // getMeanGrayLevel(I); if (Ip - (1 - grayLevelPrecision) < 0) { gray_level_min = 0; - } else { + } + else { gray_level_min = (unsigned int)(255 * pow(Ip - (1 - grayLevelPrecision), gamma)); if (gray_level_min > 255) gray_level_min = 255; @@ -719,9 +720,11 @@ void vpDot2::setGrayLevelPrecision(const double &precision) double epsilon = 0.05; if (grayLevelPrecision < epsilon) { this->grayLevelPrecision = epsilon; - } else if (grayLevelPrecision > 1) { + } + else if (grayLevelPrecision > 1) { this->grayLevelPrecision = 1.0; - } else { + } + else { this->grayLevelPrecision = precision; } } @@ -746,9 +749,11 @@ void vpDot2::setSizePrecision(const double &precision) { if (sizePrecision < 0) { this->sizePrecision = 0; - } else if (sizePrecision > 1) { + } + else if (sizePrecision > 1) { this->sizePrecision = 1.0; - } else { + } + else { this->sizePrecision = precision; } } @@ -790,9 +795,11 @@ void vpDot2::setEllipsoidShapePrecision(const double &precision) if (ellipsoidShapePrecision < 0) { this->ellipsoidShapePrecision = 0; - } else if (ellipsoidShapePrecision > 1) { + } + else if (ellipsoidShapePrecision > 1) { this->ellipsoidShapePrecision = 1.0; - } else { + } + else { this->ellipsoidShapePrecision = precision; } } @@ -817,9 +824,11 @@ void vpDot2::setMaxSizeSearchDistancePrecision(const double &precision) double epsilon = 0.05; if (maxSizeSearchDistancePrecision < epsilon) { this->maxSizeSearchDistancePrecision = epsilon; - } else if (maxSizeSearchDistancePrecision > 1) { + } + else if (maxSizeSearchDistancePrecision > 1) { this->maxSizeSearchDistancePrecision = 1.0; - } else { + } + else { this->maxSizeSearchDistancePrecision = precision; } } @@ -1062,10 +1071,10 @@ void vpDot2::searchDotsInArea(const vpImage &I, int area_u, int a // if( border_u == cogBadDot.get_u() && v == cogBadDot.get_v()) { if ((std::fabs(border_u - cogBadDot.get_u()) <= vpMath::maximum(std::fabs((double)border_u), std::fabs(cogBadDot.get_u())) * - std::numeric_limits::epsilon()) && + std::numeric_limits::epsilon()) && (std::fabs(v - cogBadDot.get_v()) <= vpMath::maximum(std::fabs((double)v), std::fabs(cogBadDot.get_v())) * - std::numeric_limits::epsilon())) { + std::numeric_limits::epsilon())) { good_germ = false; } ++it_edges; @@ -1179,8 +1188,9 @@ void vpDot2::searchDotsInArea(const vpImage &I, int area_u, int a if (itnice == niceDots.end() && stopLoop == false) { niceDots.push_back(*dotToTest); } - } else { - // Store bad dots + } + else { + // Store bad dots badDotsVector.push_front(*dotToTest); } } @@ -1231,10 +1241,10 @@ bool vpDot2::isValid(const vpImage &I, const vpDot2 &wantedDot) #ifdef DEBUG std::cout << "test size precision......................\n"; std::cout << "wanted dot: " - << "w=" << wantedDot.getWidth() << " h=" << wantedDot.getHeight() << " s=" << wantedDot.getArea() - << " precision=" << size_precision << " epsilon=" << epsilon << std::endl; + << "w=" << wantedDot.getWidth() << " h=" << wantedDot.getHeight() << " s=" << wantedDot.getArea() + << " precision=" << size_precision << " epsilon=" << epsilon << std::endl; std::cout << "dot found: " - << "w=" << getWidth() << " h=" << getHeight() << " s=" << getArea() << std::endl; + << "w=" << getWidth() << " h=" << getHeight() << " s=" << getArea() << std::endl; #endif if ((wantedDot.getWidth() * size_precision - epsilon < getWidth()) == false) { @@ -1440,7 +1450,8 @@ bool vpDot2::hasGoodLevel(const vpImage &I, const unsigned int &u if (I[v][u] >= gray_level_min && I[v][u] <= gray_level_max) { return true; - } else { + } + else { return false; } } @@ -1465,7 +1476,8 @@ bool vpDot2::hasReverseLevel(const vpImage &I, const unsigned int if (I[v][u] < gray_level_min || I[v][u] > gray_level_max) { return true; - } else { + } + else { return false; } } @@ -1702,7 +1714,8 @@ bool vpDot2::computeParameters(const vpImage &I, const double &_u std::fabs(m00 - 1.) <= vpMath::maximum(std::fabs(m00), 1.) * std::numeric_limits::epsilon()) { vpDEBUG_TRACE(3, "The center of gravity of the dot wasn't properly detected"); return false; - } else // compute the center + } + else // compute the center { // this magic formula gives the coordinates of the center of gravity double tmpCenter_u = m10 / m00; @@ -1808,58 +1821,66 @@ bool vpDot2::computeFreemanChainElement(const vpImage &I, const u updateFreemanPosition(_u, _v, (element + 2) % 8); if (hasGoodLevel(I, _u, _v)) { element = (element + 2) % 8; // turn right - } else { + } + else { unsigned int _u1 = u; unsigned int _v1 = v; updateFreemanPosition(_u1, _v1, (element + 1) % 8); if (hasGoodLevel(I, _u1, _v1)) { element = (element + 1) % 8; // turn diag right - } else { + } + else { unsigned int _u2 = u; unsigned int _v2 = v; updateFreemanPosition(_u2, _v2, element); // same direction if (hasGoodLevel(I, _u2, _v2)) { // element = element; // keep same dir - } else { + } + else { unsigned int _u3 = u; unsigned int _v3 = v; updateFreemanPosition(_u3, _v3, (element + 7) % 8); // diag left if (hasGoodLevel(I, _u3, _v3)) { element = (element + 7) % 8; // turn diag left - } else { + } + else { unsigned int _u4 = u; unsigned int _v4 = v; updateFreemanPosition(_u4, _v4, (element + 6) % 8); // left if (hasGoodLevel(I, _u4, _v4)) { element = (element + 6) % 8; // turn left - } else { + } + else { unsigned int _u5 = u; unsigned int _v5 = v; updateFreemanPosition(_u5, _v5, (element + 5) % 8); // left if (hasGoodLevel(I, _u5, _v5)) { element = (element + 5) % 8; // turn diag down - } else { + } + else { unsigned int _u6 = u; unsigned int _v6 = v; updateFreemanPosition(_u6, _v6, (element + 4) % 8); // left if (hasGoodLevel(I, _u6, _v6)) { element = (element + 4) % 8; // turn down - } else { + } + else { unsigned int _u7 = u; unsigned int _v7 = v; updateFreemanPosition(_u7, _v7, (element + 3) % 8); // diag if (hasGoodLevel(I, _u7, _v7)) { element = (element + 3) % 8; // turn diag right down - } else { - // No neighbor with a good level - // + } + else { + // No neighbor with a good level + // return false; } } @@ -2208,14 +2229,16 @@ void vpDot2::computeMeanGrayLevel(const vpImage &I) if (nb_pixels < 10) { // could be good to choose the min nb points from area of dot // add diagonals points to have enough point int imin, imax; - if ((cog_u - bbox_u_min) > (cog_v - bbox_v_min)) { + if ((cog_u - bbox_u_min) >(cog_v - bbox_v_min)) { imin = cog_v - bbox_v_min; - } else { + } + else { imin = cog_u - bbox_u_min; } if ((bbox_u_max - cog_u) > (bbox_v_max - cog_v)) { imax = bbox_v_max - cog_v; - } else { + } + else { imax = bbox_u_max - cog_u; } for (int i = -imin; i <= imax; i++) { @@ -2228,12 +2251,14 @@ void vpDot2::computeMeanGrayLevel(const vpImage &I) if ((cog_u - bbox_u_min) > (bbox_v_max - cog_v)) { imin = bbox_v_max - cog_v; - } else { + } + else { imin = cog_u - bbox_u_min; } if ((bbox_u_max - cog_u) > (cog_v - bbox_v_min)) { imax = cog_v - bbox_v_min; - } else { + } + else { imax = bbox_u_max - cog_u; } @@ -2249,7 +2274,8 @@ void vpDot2::computeMeanGrayLevel(const vpImage &I) if (nb_pixels == 0) { // should never happen throw(vpTrackingException(vpTrackingException::notEnoughPointError, "No point was found")); - } else { + } + else { mean_gray_level = sum_value / nb_pixels; } } @@ -2303,7 +2329,8 @@ vpMatrix vpDot2::defineDots(vpDot2 dot[], const unsigned int &n, const std::stri vpDisplay::displayCross(I, cog, 10, col); } } - } catch (...) { + } + catch (...) { std::cout << "Cannot track dots from file" << std::endl; fromFile = false; } @@ -2331,7 +2358,8 @@ vpMatrix vpDot2::defineDots(vpDot2 dot[], const unsigned int &n, const std::stri dot[i].setGraphics(true); dot[i].initTracking(I); cog = dot[i].getCog(); - } else { + } + else { vpDisplay::getClick(I, cog); dot[i].setCog(cog); } diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbDepthDenseTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbDepthDenseTracker.h index b804b965f9..b42f849550 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbDepthDenseTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbDepthDenseTracker.h @@ -49,26 +49,26 @@ class VISP_EXPORT vpMbDepthDenseTracker : public virtual vpMbTracker { public: vpMbDepthDenseTracker(); - virtual ~vpMbDepthDenseTracker(); + virtual ~vpMbDepthDenseTracker() override; virtual void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false); + const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false) override; virtual void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false); + const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false) override; - virtual inline vpColVector getError() const { return m_error_depthDense; } + virtual inline vpColVector getError() const override { return m_error_depthDense; } virtual std::vector > getModelForDisplay(unsigned int width, unsigned int height, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - bool displayFullModel = false); + bool displayFullModel = false) override; - virtual inline vpColVector getRobustWeights() const { return m_w_depthDense; } + virtual inline vpColVector getRobustWeights() const override { return m_w_depthDense; } - virtual void init(const vpImage &I); + virtual void init(const vpImage &I) override; - virtual void loadConfigFile(const std::string &configFile, bool verbose = true); + virtual void loadConfigFile(const std::string &configFile, bool verbose = true) override; void reInitModel(const vpImage &I, const std::string &cad_name, const vpHomogeneousMatrix &cMo, bool verbose = false); @@ -77,9 +77,9 @@ class VISP_EXPORT vpMbDepthDenseTracker : public virtual vpMbTracker const vpHomogeneousMatrix &cMo, bool verbose = false); #endif - virtual void resetTracker(); + virtual void resetTracker() override; - virtual void setCameraParameters(const vpCameraParameters &camera); + virtual void setCameraParameters(const vpCameraParameters &camera) override; virtual void setDepthDenseFilteringMaxDistance(double maxDistance); virtual void setDepthDenseFilteringMethod(int method); @@ -97,22 +97,22 @@ class VISP_EXPORT vpMbDepthDenseTracker : public virtual vpMbTracker m_depthDenseSamplingStepY = stepY; } - virtual void setOgreVisibilityTest(const bool &v); + virtual void setOgreVisibilityTest(const bool &v) override; - virtual void setPose(const vpImage &I, const vpHomogeneousMatrix &cdMo); - virtual void setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo); + virtual void setPose(const vpImage &I, const vpHomogeneousMatrix &cdMo) override; + virtual void setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo) override; #ifdef VISP_HAVE_PCL virtual void setPose(const pcl::PointCloud::ConstPtr &point_cloud, const vpHomogeneousMatrix &cdMo); #endif - virtual void setScanLineVisibilityTest(const bool &v); + virtual void setScanLineVisibilityTest(const bool &v) override; void setUseDepthDenseTracking(const std::string &name, const bool &useDepthDenseTracking); - virtual void testTracking(); + virtual void testTracking() override; - virtual void track(const vpImage &); - virtual void track(const vpImage &); + virtual void track(const vpImage &) override; + virtual void track(const vpImage &) override; #ifdef VISP_HAVE_PCL virtual void track(const pcl::PointCloud::ConstPtr &point_cloud); #endif @@ -151,20 +151,20 @@ class VISP_EXPORT vpMbDepthDenseTracker : public virtual vpMbTracker void computeVisibility(unsigned int width, unsigned int height); void computeVVS(); - virtual void computeVVSInit(); - virtual void computeVVSInteractionMatrixAndResidu(); + virtual void computeVVSInit() override; + virtual void computeVVSInteractionMatrixAndResidu() override; virtual void computeVVSWeights(); using vpMbTracker::computeVVSWeights; virtual void initCircle(const vpPoint &p1, const vpPoint &p2, const vpPoint &p3, double radius, int idFace = 0, - const std::string &name = ""); + const std::string &name = "") override; virtual void initCylinder(const vpPoint &p1, const vpPoint &p2, double radius, int idFace = 0, - const std::string &name = ""); + const std::string &name = "") override; - virtual void initFaceFromCorners(vpMbtPolygon &polygon); + virtual void initFaceFromCorners(vpMbtPolygon &polygon) override; - virtual void initFaceFromLines(vpMbtPolygon &polygon); + virtual void initFaceFromLines(vpMbtPolygon &polygon) override; #ifdef VISP_HAVE_PCL void segmentPointCloud(const pcl::PointCloud::ConstPtr &point_cloud); diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbDepthNormalTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbDepthNormalTracker.h index 290d51bf9d..72678f823e 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbDepthNormalTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbDepthNormalTracker.h @@ -49,31 +49,31 @@ class VISP_EXPORT vpMbDepthNormalTracker : public virtual vpMbTracker { public: vpMbDepthNormalTracker(); - virtual ~vpMbDepthNormalTracker(); + virtual ~vpMbDepthNormalTracker() override; virtual void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false); + const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false) override; virtual void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false); + const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false) override; virtual inline vpMbtFaceDepthNormal::vpFeatureEstimationType getDepthFeatureEstimationMethod() const { return m_depthNormalFeatureEstimationMethod; } - virtual inline vpColVector getError() const { return m_error_depthNormal; } + virtual inline vpColVector getError() const override { return m_error_depthNormal; } virtual std::vector > getModelForDisplay(unsigned int width, unsigned int height, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - bool displayFullModel = false); + bool displayFullModel = false) override; - virtual inline vpColVector getRobustWeights() const { return m_w_depthNormal; } + virtual inline vpColVector getRobustWeights() const override { return m_w_depthNormal; } - virtual void init(const vpImage &I); + virtual void init(const vpImage &I) override; - virtual void loadConfigFile(const std::string &configFile, bool verbose = true); + virtual void loadConfigFile(const std::string &configFile, bool verbose = true) override; void reInitModel(const vpImage &I, const std::string &cad_name, const vpHomogeneousMatrix &cMo, bool verbose = false); @@ -82,9 +82,9 @@ class VISP_EXPORT vpMbDepthNormalTracker : public virtual vpMbTracker const vpHomogeneousMatrix &cMo, bool verbose = false); #endif - virtual void resetTracker(); + virtual void resetTracker() override; - virtual void setCameraParameters(const vpCameraParameters &camera); + virtual void setCameraParameters(const vpCameraParameters &camera) override; virtual void setDepthNormalFaceCentroidMethod(const vpMbtFaceDepthNormal::vpFaceCentroidType &method); @@ -100,22 +100,22 @@ class VISP_EXPORT vpMbDepthNormalTracker : public virtual vpMbTracker // virtual void setDepthNormalUseRobust(bool use); - virtual void setOgreVisibilityTest(const bool &v); + virtual void setOgreVisibilityTest(const bool &v) override; - virtual void setPose(const vpImage &I, const vpHomogeneousMatrix &cdMo); - virtual void setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo); + virtual void setPose(const vpImage &I, const vpHomogeneousMatrix &cdMo) override; + virtual void setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo) override; #if defined(VISP_HAVE_PCL) virtual void setPose(const pcl::PointCloud::ConstPtr &point_cloud, const vpHomogeneousMatrix &cdMo); #endif - virtual void setScanLineVisibilityTest(const bool &v); + virtual void setScanLineVisibilityTest(const bool &v) override; void setUseDepthNormalTracking(const std::string &name, const bool &useDepthNormalTracking); - virtual void testTracking(); + virtual void testTracking() override; - virtual void track(const vpImage &); - virtual void track(const vpImage &I_color); + virtual void track(const vpImage &) override; + virtual void track(const vpImage &I_color) override; #if defined(VISP_HAVE_PCL) virtual void track(const pcl::PointCloud::ConstPtr &point_cloud); #endif @@ -166,20 +166,20 @@ class VISP_EXPORT vpMbDepthNormalTracker : public virtual vpMbTracker void computeVisibility(unsigned int width, unsigned int height); void computeVVS(); - virtual void computeVVSInit(); - virtual void computeVVSInteractionMatrixAndResidu(); + virtual void computeVVSInit() override; + virtual void computeVVSInteractionMatrixAndResidu() override; virtual std::vector > getFeaturesForDisplayDepthNormal(); virtual void initCircle(const vpPoint &p1, const vpPoint &p2, const vpPoint &p3, double radius, int idFace = 0, - const std::string &name = ""); + const std::string &name = "") override; virtual void initCylinder(const vpPoint &p1, const vpPoint &p2, double radius, int idFace = 0, - const std::string &name = ""); + const std::string &name = "") override; - virtual void initFaceFromCorners(vpMbtPolygon &polygon); + virtual void initFaceFromCorners(vpMbtPolygon &polygon) override; - virtual void initFaceFromLines(vpMbtPolygon &polygon); + virtual void initFaceFromLines(vpMbtPolygon &polygon) override; #ifdef VISP_HAVE_PCL void segmentPointCloud(const pcl::PointCloud::ConstPtr &point_cloud); diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbEdgeKltTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbEdgeKltTracker.h index 68ef176a30..73152356c9 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbEdgeKltTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbEdgeKltTracker.h @@ -232,18 +232,18 @@ class VISP_EXPORT vpMbEdgeKltTracker : virtual ~vpMbEdgeKltTracker(); virtual void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false); + const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false) override; virtual void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false); + const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false) override; - virtual inline vpColVector getError() const { return m_error_hybrid; } + virtual inline vpColVector getError() const override { return m_error_hybrid; } virtual std::vector > getModelForDisplay(unsigned int width, unsigned int height, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - bool displayFullModel = false); + bool displayFullModel = false) override; - virtual inline vpColVector getRobustWeights() const { return m_w_hybrid; } + virtual inline vpColVector getRobustWeights() const override { return m_w_hybrid; } /*! Get the near distance for clipping. @@ -252,13 +252,13 @@ class VISP_EXPORT vpMbEdgeKltTracker : */ virtual inline double getNearClippingDistance() const { return vpMbKltTracker::getNearClippingDistance(); } - virtual void loadConfigFile(const std::string &configFile, bool verbose = true); + virtual void loadConfigFile(const std::string &configFile, bool verbose = true) override; void reInitModel(const vpImage &I, const std::string &cad_name, const vpHomogeneousMatrix &cMo, - bool verbose = false, const vpHomogeneousMatrix &T = vpHomogeneousMatrix()); - void resetTracker(); + bool verbose = false, const vpHomogeneousMatrix &T = vpHomogeneousMatrix()) override; + void resetTracker() override; - virtual void setCameraParameters(const vpCameraParameters &cam); + virtual void setCameraParameters(const vpCameraParameters &cam) override; /*! Specify which clipping to use. @@ -267,21 +267,21 @@ class VISP_EXPORT vpMbEdgeKltTracker : \param flags : New clipping flags. */ - virtual void setClipping(const unsigned int &flags) { vpMbEdgeTracker::setClipping(flags); } + virtual void setClipping(const unsigned int &flags) override { vpMbEdgeTracker::setClipping(flags); } /*! Set the far distance for clipping. \param dist : Far clipping value. */ - virtual void setFarClippingDistance(const double &dist) { vpMbEdgeTracker::setFarClippingDistance(dist); } + virtual void setFarClippingDistance(const double &dist) override { vpMbEdgeTracker::setFarClippingDistance(dist); } /*! Set the near distance for clipping. \param dist : Near clipping value. */ - virtual void setNearClippingDistance(const double &dist) { vpMbEdgeTracker::setNearClippingDistance(dist); } + virtual void setNearClippingDistance(const double &dist) override { vpMbEdgeTracker::setNearClippingDistance(dist); } /*! Use Ogre3D for visibility tests @@ -291,7 +291,7 @@ class VISP_EXPORT vpMbEdgeKltTracker : \param v : True to use it, False otherwise */ - virtual void setOgreVisibilityTest(const bool &v) + virtual void setOgreVisibilityTest(const bool &v) override { vpMbTracker::setOgreVisibilityTest(v); #ifdef VISP_HAVE_OGRE @@ -304,40 +304,43 @@ class VISP_EXPORT vpMbEdgeKltTracker : \param v : True to use it, False otherwise */ - virtual void setScanLineVisibilityTest(const bool &v) + virtual void setScanLineVisibilityTest(const bool &v) override { vpMbEdgeTracker::setScanLineVisibilityTest(v); vpMbKltTracker::setScanLineVisibilityTest(v); } - virtual void setPose(const vpImage &I, const vpHomogeneousMatrix &cdMo); - virtual void setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo); + virtual void setPose(const vpImage &I, const vpHomogeneousMatrix &cdMo) override; + virtual void setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo) override; /*! - Set if the projection error criteria has to be computed. - - \param flag : True if the projection error criteria has to be computed, - false otherwise - */ - virtual void setProjectionErrorComputation(const bool &flag) { vpMbEdgeTracker::setProjectionErrorComputation(flag); } + * Set if the projection error criteria has to be computed. + * + * \param flag : True if the projection error criteria has to be computed, + * false otherwise + */ + virtual void setProjectionErrorComputation(const bool &flag) override + { + vpMbEdgeTracker::setProjectionErrorComputation(flag); + } - virtual void testTracking() {} - virtual void track(const vpImage &I); - virtual void track(const vpImage &I_color); + virtual void testTracking() override { } + virtual void track(const vpImage &I) override; + virtual void track(const vpImage &I_color) override; protected: virtual void computeVVS(const vpImage &I, const unsigned int &nbInfos, unsigned int &nbrow, unsigned int lvl = 0, double *edge_residual = NULL, double *klt_residual = NULL); - virtual void computeVVSInit(); - virtual void computeVVSInteractionMatrixAndResidu(); + virtual void computeVVSInit() override; + virtual void computeVVSInteractionMatrixAndResidu() override; using vpMbTracker::computeCovarianceMatrixVVS; using vpMbTracker::computeVVSPoseEstimation; - virtual void init(const vpImage &I); + virtual void init(const vpImage &I) override; virtual void initCircle(const vpPoint &, const vpPoint &, const vpPoint &, double r, int idFace = 0, - const std::string &name = ""); - virtual void initCylinder(const vpPoint &, const vpPoint &, double r, int idFace, const std::string &name = ""); - virtual void initFaceFromCorners(vpMbtPolygon &polygon); - virtual void initFaceFromLines(vpMbtPolygon &polygon); + const std::string &name = "") override; + virtual void initCylinder(const vpPoint &, const vpPoint &, double r, int idFace, const std::string &name = "") override; + virtual void initFaceFromCorners(vpMbtPolygon &polygon) override; + virtual void initFaceFromLines(vpMbtPolygon &polygon) override; unsigned int initMbtTracking(unsigned int level = 0); bool postTracking(const vpImage &I, vpColVector &w_mbt, vpColVector &w_klt, unsigned int lvl = 0); diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbEdgeTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbEdgeTracker.h index 8fc8ce6c70..4f5073230f 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbEdgeTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbEdgeTracker.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,13 +29,12 @@ * * Description: * Make the complete tracking of an object by using its CAD model - * -*****************************************************************************/ + */ /*! - \file vpMbEdgeTracker.h - \brief Make the complete tracking of an object by using its CAD model. -*/ + * \file vpMbEdgeTracker.h + * \brief Make the complete tracking of an object by using its CAD model. + */ #ifndef vpMbEdgeTracker_HH #define vpMbEdgeTracker_HH @@ -76,168 +74,166 @@ #endif /*! - \class vpMbEdgeTracker - \ingroup group_mbt_trackers - \brief Make the complete tracking of an object by using its CAD model. - \warning This class is deprecated for user usage. You should rather use the high level - vpMbGenericTracker class. - - This class allows to track an object or a scene given its 3D model. A - video can be found on YouTube \e https://www.youtube.com/watch?v=UK10KMMJFCI - The \ref tutorial-tracking-mb-deprecated is also a good starting point to use this class. - - The tracker requires the knowledge of the 3D model that could be provided in - a vrml or in a cao file. The cao format is described in loadCAOModel(). It may - also use an xml file used to tune the behavior of the tracker and an init file - used to compute the pose at the very first image. - - The following code shows the simplest way to use the tracker. - -\code -#include -#include -#include -#include -#include -#include - -int main() -{ - vpMbEdgeTracker tracker; // Create a model based tracker. - vpImage I; - vpHomogeneousMatrix cMo; // Pose computed using the tracker. - vpCameraParameters cam; - - // Acquire an image - vpImageIo::read(I, "cube.pgm"); - -#if defined(VISP_HAVE_X11) - vpDisplayX display; - display.init(I,100,100,"Mb Edge Tracker"); -#endif - - tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker - tracker.getCameraParameters(cam); // Get the camera parameters used by the tracker (from the configuration file). - tracker.loadModel("cube.cao"); // Load the 3d model in cao format. No 3rd party library is required - // Initialise manually the pose by clicking on the image points associated to the 3d points contained in the - // cube.init file. - tracker.initClick(I, "cube.init"); - - while(true){ - // Acquire a new image - vpDisplay::display(I); - tracker.track(I); // Track the object on this image - tracker.getPose(cMo); // Get the pose - - tracker.display(I, cMo, cam, vpColor::darkRed, 1); // Display the model at the computed pose. - vpDisplay::flush(I); - } - - return 0; -} -\endcode - - For application with large inter-images displacement, multi-scale tracking - is also possible, by setting the number of scales used and by activating (or - not) them using a vector of booleans, as presented in the following code: - -\code - ... - vpHomogeneousMatrix cMo; // Pose computed using the tracker. - vpCameraParameters cam; - - std::vector< bool > scales(3); //Three scales used - scales.push_back(true); //First scale : active - scales.push_back(false); //Second scale (/2) : not active - scales.push_back(true); //Third scale (/4) : active - tracker.setScales(scales); // Set active scales for multi-scale tracking - - tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker - tracker.getCameraParameters(cam); // Get the camera parameters used by the tracker (from the configuration file). - ... -\endcode - - The tracker can also be used without display, in that case the initial pose - must be known (object always at the same initial pose for example) or - computed using another method: - -\code -#include -#include -#include -#include -#include - -int main() -{ - vpMbEdgeTracker tracker; // Create a model based tracker. - vpImage I; - vpHomogeneousMatrix cMo; // Pose used in entry (has to be defined), then computed using the tracker. - - //acquire an image - vpImageIo::read(I, "cube.pgm"); // Example of acquisition - - tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker - // load the 3d model, to read .wrl model coin is required, if coin is not installed .cao file can be used. - tracker.loadModel("cube.cao"); - tracker.initFromPose(I, cMo); // initialize the tracker with the given pose. - - while(true){ - // acquire a new image - tracker.track(I); // track the object on this image - tracker.getPose(cMo); // get the pose - } - - return 0; -} -\endcode - - Finally it can be used not to track an object but just to display a model at - a given pose: - -\code -#include -#include -#include -#include -#include -#include - -int main() -{ - vpMbEdgeTracker tracker; // Create a model based tracker. - vpImage I; - vpHomogeneousMatrix cMo; // Pose used to display the model. - vpCameraParameters cam; - - // Acquire an image - vpImageIo::read(I, "cube.pgm"); - -#if defined(VISP_HAVE_X11) - vpDisplayX display; - display.init(I,100,100,"Mb Edge Tracker"); -#endif - - tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker - tracker.getCameraParameters(cam); // Get the camera parameters used by the tracker (from the configuration file). - // load the 3d model, to read .wrl model coin is required, if coin is not installed - // .cao file can be used. - tracker.loadModel("cube.cao"); - - while(true){ - // acquire a new image - // Get the pose using any method - vpDisplay::display(I); - tracker.display(I, cMo, cam, vpColor::darkRed, 1, true); // Display the model at the computed pose. - vpDisplay::flush(I); - } - - return 0; -} -\endcode - -*/ - + * \class vpMbEdgeTracker + * \ingroup group_mbt_trackers + * \brief Make the complete tracking of an object by using its CAD model. + * \warning This class is deprecated for user usage. You should rather use the high level + * vpMbGenericTracker class. + * + * This class allows to track an object or a scene given its 3D model. A + * video can be found on YouTube \e https://www.youtube.com/watch?v=UK10KMMJFCI + * The \ref tutorial-tracking-mb-deprecated is also a good starting point to use this class. + * + * The tracker requires the knowledge of the 3D model that could be provided in + * a vrml or in a cao file. The cao format is described in loadCAOModel(). It may + * also use an xml file used to tune the behavior of the tracker and an init file + * used to compute the pose at the very first image. + * + * The following code shows the simplest way to use the tracker. + * + * \code + * #include + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * vpMbEdgeTracker tracker; // Create a model based tracker. + * vpImage I; + * vpHomogeneousMatrix cMo; // Pose computed using the tracker. + * vpCameraParameters cam; + * + * // Acquire an image + * vpImageIo::read(I, "cube.pgm"); + * + * #if defined(VISP_HAVE_X11) + * vpDisplayX display; + * display.init(I,100,100,"Mb Edge Tracker"); + * #endif + * + * tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker + * tracker.getCameraParameters(cam); // Get the camera parameters used by the tracker (from the configuration file). + * tracker.loadModel("cube.cao"); // Load the 3d model in cao format. No 3rd party library is required + * // Initialise manually the pose by clicking on the image points associated to the 3d points contained in the + * // cube.init file. + * tracker.initClick(I, "cube.init"); + * + * while(true){ + * // Acquire a new image + * vpDisplay::display(I); + * tracker.track(I); // Track the object on this image + * tracker.getPose(cMo); // Get the pose + * + * tracker.display(I, cMo, cam, vpColor::darkRed, 1); // Display the model at the computed pose. + * vpDisplay::flush(I); + * } + * + * return 0; + * } + * \endcode + * + * For application with large inter-images displacement, multi-scale tracking + * is also possible, by setting the number of scales used and by activating (or + * not) them using a vector of booleans, as presented in the following code: + * + * \code + * ... + * vpHomogeneousMatrix cMo; // Pose computed using the tracker. + * vpCameraParameters cam; + * + * std::vector< bool > scales(3); //Three scales used + * scales.push_back(true); //First scale : active + * scales.push_back(false); //Second scale (/2) : not active + * scales.push_back(true); //Third scale (/4) : active + * tracker.setScales(scales); // Set active scales for multi-scale tracking + * + * tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker + * tracker.getCameraParameters(cam); // Get the camera parameters used by the tracker (from the configuration file). + * ... + * \endcode + * + * The tracker can also be used without display, in that case the initial pose + * must be known (object always at the same initial pose for example) or + * computed using another method: + * + * \code + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * vpMbEdgeTracker tracker; // Create a model based tracker. + * vpImage I; + * vpHomogeneousMatrix cMo; // Pose used in entry (has to be defined), then computed using the tracker. + * + * //acquire an image + * vpImageIo::read(I, "cube.pgm"); // Example of acquisition + * + * tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker + * // load the 3d model, to read .wrl model coin is required, if coin is not installed .cao file can be used. + * tracker.loadModel("cube.cao"); + * tracker.initFromPose(I, cMo); // initialize the tracker with the given pose. + * + * while(true){ + * // acquire a new image + * tracker.track(I); // track the object on this image + * tracker.getPose(cMo); // get the pose + * } + * + * return 0; + * } + * \endcode + * + * Finally it can be used not to track an object but just to display a model at + * a given pose: + * + * \code + * #include + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * vpMbEdgeTracker tracker; // Create a model based tracker. + * vpImage I; + * vpHomogeneousMatrix cMo; // Pose used to display the model. + * vpCameraParameters cam; + * + * // Acquire an image + * vpImageIo::read(I, "cube.pgm"); + * + * #if defined(VISP_HAVE_X11) + * vpDisplayX display; + * display.init(I,100,100,"Mb Edge Tracker"); + * #endif + * + * tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker + * tracker.getCameraParameters(cam); // Get the camera parameters used by the tracker (from the configuration file). + * // load the 3d model, to read .wrl model coin is required, if coin is not installed + * // .cao file can be used. + * tracker.loadModel("cube.cao"); + * + * while(true){ + * // acquire a new image + * // Get the pose using any method + * vpDisplay::display(I); + * tracker.display(I, cMo, cam, vpColor::darkRed, 1, true); // Display the model at the computed pose. + * vpDisplay::flush(I); + * } + * + * return 0; + * } + $ \endcode + */ class VISP_EXPORT vpMbEdgeTracker : public virtual vpMbTracker { protected: @@ -314,15 +310,15 @@ class VISP_EXPORT vpMbEdgeTracker : public virtual vpMbTracker public: vpMbEdgeTracker(); - virtual ~vpMbEdgeTracker(); + virtual ~vpMbEdgeTracker() override; /** @name Inherited functionalities from vpMbEdgeTracker */ //@{ virtual void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false); + const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false) override; virtual void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false); + const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false) override; void getLline(std::list &linesList, unsigned int level = 0) const; void getLcircle(std::list &circlesList, unsigned int level = 0) const; @@ -331,57 +327,57 @@ class VISP_EXPORT vpMbEdgeTracker : public virtual vpMbTracker virtual std::vector > getModelForDisplay(unsigned int width, unsigned int height, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - bool displayFullModel = false); + bool displayFullModel = false) override; /*! - Get the moving edge parameters. - - \param p_me [out] : an instance of the moving edge parameters used by the - tracker. - */ + * Get the moving edge parameters. + * + * \param p_me [out] : an instance of the moving edge parameters used by the + * tracker. + */ virtual inline void getMovingEdge(vpMe &p_me) const { p_me = this->me; } /*! - Get the moving edge parameters. - - \return an instance of the moving edge parameters used by the tracker. - */ + * Get the moving edge parameters. + * + * \return an instance of the moving edge parameters used by the tracker. + */ virtual inline vpMe getMovingEdge() const { return this->me; } virtual unsigned int getNbPoints(unsigned int level = 0) const; /*! - Return the scales levels used for the tracking. - - \return The scales levels used for the tracking. - */ + * Return the scales levels used for the tracking. + * + * \return The scales levels used for the tracking. + */ std::vector getScales() const { return scales; } /*! - \return The threshold value between 0 and 1 over good moving edges ratio. - It allows to decide if the tracker has enough valid moving edges to - compute a pose. 1 means that all moving edges should be considered as - good to have a valid pose, while 0.1 means that 10% of the moving edge - are enough to declare a pose valid. - - \sa setGoodMovingEdgesRatioThreshold() + * \return The threshold value between 0 and 1 over good moving edges ratio. + * It allows to decide if the tracker has enough valid moving edges to + * compute a pose. 1 means that all moving edges should be considered as + * good to have a valid pose, while 0.1 means that 10% of the moving edge + * are enough to declare a pose valid. + * + * \sa setGoodMovingEdgesRatioThreshold() */ inline double getGoodMovingEdgesRatioThreshold() const { return percentageGdPt; } - virtual inline vpColVector getError() const { return m_error_edge; } + virtual inline vpColVector getError() const override { return m_error_edge; } - virtual inline vpColVector getRobustWeights() const { return m_w_edge; } + virtual inline vpColVector getRobustWeights() const override { return m_w_edge; } - virtual void loadConfigFile(const std::string &configFile, bool verbose = true); + virtual void loadConfigFile(const std::string &configFile, bool verbose = true) override; virtual void reInitModel(const vpImage &I, const std::string &cad_name, const vpHomogeneousMatrix &cMo, bool verbose = false, const vpHomogeneousMatrix &T = vpHomogeneousMatrix()); - void resetTracker(); - - /*! - Set the camera parameters. + void resetTracker() override; - \param cam : The new camera parameters. + /*! + * Set the camera parameters. + * + * \param cam : The new camera parameters. */ - virtual void setCameraParameters(const vpCameraParameters &cam) + virtual void setCameraParameters(const vpCameraParameters &cam) override { m_cam = cam; @@ -403,21 +399,21 @@ class VISP_EXPORT vpMbEdgeTracker : public virtual vpMbTracker } } - virtual void setClipping(const unsigned int &flags); + virtual void setClipping(const unsigned int &flags) override; - virtual void setFarClippingDistance(const double &dist); + virtual void setFarClippingDistance(const double &dist) override; - virtual void setNearClippingDistance(const double &dist); + virtual void setNearClippingDistance(const double &dist) override; /*! - Use Ogre3D for visibility tests - - \warning This function has to be called before the initialization of the - tracker. - - \param v : True to use it, False otherwise - */ - virtual void setOgreVisibilityTest(const bool &v) + * Use Ogre3D for visibility tests + * + * \warning This function has to be called before the initialization of the + * tracker. + * + * \param v : True to use it, False otherwise + */ + virtual void setOgreVisibilityTest(const bool &v) override { vpMbTracker::setOgreVisibilityTest(v); #ifdef VISP_HAVE_OGRE @@ -426,11 +422,11 @@ class VISP_EXPORT vpMbEdgeTracker : public virtual vpMbTracker } /*! - Use Scanline algorithm for visibility tests - - \param v : True to use it, False otherwise - */ - virtual void setScanLineVisibilityTest(const bool &v) + * Use Scanline algorithm for visibility tests + * + * \param v : True to use it, False otherwise + */ + virtual void setScanLineVisibilityTest(const bool &v) override { vpMbTracker::setScanLineVisibilityTest(v); @@ -444,31 +440,31 @@ class VISP_EXPORT vpMbEdgeTracker : public virtual vpMbTracker } /*! - Set the threshold value between 0 and 1 over good moving edges ratio. It - allows to decide if the tracker has enough valid moving edges to compute - a pose. 1 means that all moving edges should be considered as good to - have a valid pose, while 0.1 means that 10% of the moving edge are enough - to declare a pose valid. - - \param threshold : Value between 0 and 1 that corresponds to the ratio of - good moving edges that is necessary to consider that the estimated pose - is valid. Default value is 0.4. - - \sa getGoodMovingEdgesRatioThreshold() + * Set the threshold value between 0 and 1 over good moving edges ratio. It + * allows to decide if the tracker has enough valid moving edges to compute + * a pose. 1 means that all moving edges should be considered as good to + * have a valid pose, while 0.1 means that 10% of the moving edge are enough + * to declare a pose valid. + * + * \param threshold : Value between 0 and 1 that corresponds to the ratio of + * good moving edges that is necessary to consider that the estimated pose + * is valid. Default value is 0.4. + * + * \sa getGoodMovingEdgesRatioThreshold() */ void setGoodMovingEdgesRatioThreshold(double threshold) { percentageGdPt = threshold; } void setMovingEdge(const vpMe &me); - virtual void setPose(const vpImage &I, const vpHomogeneousMatrix &cdMo); - virtual void setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo); + virtual void setPose(const vpImage &I, const vpHomogeneousMatrix &cdMo) override; + virtual void setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo) override; void setScales(const std::vector &_scales); void setUseEdgeTracking(const std::string &name, const bool &useEdgeTracking); - virtual void track(const vpImage &I); - virtual void track(const vpImage &I); + virtual void track(const vpImage &I) override; + virtual void track(const vpImage &I) override; //@} protected: @@ -487,8 +483,8 @@ class VISP_EXPORT vpMbEdgeTracker : public virtual vpMbTracker void computeVVSFirstPhase(const vpImage &I, unsigned int iter, double &count, unsigned int lvl = 0); void computeVVSFirstPhaseFactor(const vpImage &I, unsigned int lvl = 0); void computeVVSFirstPhasePoseEstimation(unsigned int iter, bool &isoJoIdentity); - virtual void computeVVSInit(); - virtual void computeVVSInteractionMatrixAndResidu(); + virtual void computeVVSInit() override; + virtual void computeVVSInteractionMatrixAndResidu() override; virtual void computeVVSInteractionMatrixAndResidu(const vpImage &I); virtual void computeVVSWeights(); using vpMbTracker::computeVVSWeights; @@ -497,13 +493,13 @@ class VISP_EXPORT vpMbEdgeTracker : public virtual vpMbTracker void displayFeaturesOnImage(const vpImage &I); void downScale(const unsigned int _scale); virtual std::vector > getFeaturesForDisplayEdge(); - virtual void init(const vpImage &I); + virtual void init(const vpImage &I) override; virtual void initCircle(const vpPoint &p1, const vpPoint &p2, const vpPoint &p3, double radius, int idFace = 0, - const std::string &name = ""); + const std::string &name = "") override; virtual void initCylinder(const vpPoint &p1, const vpPoint &p2, double radius, int idFace = 0, - const std::string &name = ""); - virtual void initFaceFromCorners(vpMbtPolygon &polygon); - virtual void initFaceFromLines(vpMbtPolygon &polygon); + const std::string &name = "") override; + virtual void initFaceFromCorners(vpMbtPolygon &polygon) override; + virtual void initFaceFromLines(vpMbtPolygon &polygon) override; unsigned int initMbtTracking(unsigned int &nberrors_lines, unsigned int &nberrors_cylinders, unsigned int &nberrors_circles); void initMovingEdge(const vpImage &I, const vpHomogeneousMatrix &_cMo); @@ -514,7 +510,7 @@ class VISP_EXPORT vpMbEdgeTracker : public virtual vpMbTracker void removeCylinder(const std::string &name); void removeLine(const std::string &name); void resetMovingEdge(); - virtual void testTracking(); + virtual void testTracking() override; void trackMovingEdge(const vpImage &I); void updateMovingEdge(const vpImage &I); void updateMovingEdgeWeights(); diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h index c8a40c05dd..fd8e63dc8d 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h @@ -214,17 +214,17 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker explicit vpMbGenericTracker(const std::vector &trackerTypes); vpMbGenericTracker(const std::vector &cameraNames, const std::vector &trackerTypes); - virtual ~vpMbGenericTracker(); + virtual ~vpMbGenericTracker() override; virtual double computeCurrentProjectionError(const vpImage &I, const vpHomogeneousMatrix &_cMo, - const vpCameraParameters &_cam); + const vpCameraParameters &_cam) override; virtual double computeCurrentProjectionError(const vpImage &I, const vpHomogeneousMatrix &_cMo, const vpCameraParameters &_cam); virtual void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false); + const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false) override; virtual void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false); + const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false) override; virtual void display(const vpImage &I1, const vpImage &I2, const vpHomogeneousMatrix &c1Mo, const vpHomogeneousMatrix &c2Mo, const vpCameraParameters &cam1, @@ -246,7 +246,7 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker virtual std::vector getCameraNames() const; using vpMbTracker::getCameraParameters; - virtual void getCameraParameters(vpCameraParameters &camera) const; + virtual void getCameraParameters(vpCameraParameters &camera) const override; virtual void getCameraParameters(vpCameraParameters &cam1, vpCameraParameters &cam2) const; virtual void getCameraParameters(std::map &mapOfCameraParameters) const; @@ -256,9 +256,9 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker virtual void getClipping(unsigned int &clippingFlag1, unsigned int &clippingFlag2) const; virtual void getClipping(std::map &mapOfClippingFlags) const; - virtual inline vpColVector getError() const { return m_error; } + virtual inline vpColVector getError() const override { return m_error; } - virtual vpMbHiddenFaces &getFaces(); + virtual vpMbHiddenFaces &getFaces() override; virtual vpMbHiddenFaces &getFaces(const std::string &cameraName); #if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO) @@ -303,7 +303,7 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker virtual std::vector > getModelForDisplay(unsigned int width, unsigned int height, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - bool displayFullModel = false); + bool displayFullModel = false) override; virtual void getModelForDisplay(std::map > > &mapOfModels, const std::map &mapOfwidths, const std::map &mapOfheights, @@ -337,30 +337,30 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker virtual unsigned int getNbPoints(unsigned int level = 0) const; virtual void getNbPoints(std::map &mapOfNbPoints, unsigned int level = 0) const; - virtual unsigned int getNbPolygon() const; + virtual unsigned int getNbPolygon() const override; virtual void getNbPolygon(std::map &mapOfNbPolygons) const; - virtual vpMbtPolygon *getPolygon(unsigned int index); + virtual vpMbtPolygon *getPolygon(unsigned int index) override; virtual vpMbtPolygon *getPolygon(const std::string &cameraName, unsigned int index); virtual std::pair, std::vector > > - getPolygonFaces(bool orderPolygons = true, bool useVisibility = true, bool clipPolygon = false); + getPolygonFaces(bool orderPolygons = true, bool useVisibility = true, bool clipPolygon = false) override; virtual void getPolygonFaces(std::map > &mapOfPolygons, std::map > > &mapOfPoints, bool orderPolygons = true, bool useVisibility = true, bool clipPolygon = false); using vpMbTracker::getPose; - virtual void getPose(vpHomogeneousMatrix &cMo) const; + virtual void getPose(vpHomogeneousMatrix &cMo) const override; virtual void getPose(vpHomogeneousMatrix &c1Mo, vpHomogeneousMatrix &c2Mo) const; virtual void getPose(std::map &mapOfCameraPoses) const; virtual std::string getReferenceCameraName() const; - virtual inline vpColVector getRobustWeights() const { return m_w; } + virtual inline vpColVector getRobustWeights() const override { return m_w; } virtual int getTrackerType() const; - virtual void init(const vpImage &I); + virtual void init(const vpImage &I) override; #ifdef VISP_HAVE_MODULE_GUI using vpMbTracker::initClick; @@ -395,7 +395,7 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker const std::map &mapOfInitPoints); using vpMbTracker::initFromPose; - virtual void initFromPose(const vpImage &I, const vpHomogeneousMatrix &cMo); + virtual void initFromPose(const vpImage &I, const vpHomogeneousMatrix &cMo) override; virtual void initFromPose(const vpImage &I1, const vpImage &I2, const std::string &initFile1, const std::string &initFile2); virtual void initFromPose(const vpImage &I_color1, const vpImage &I_color2, @@ -416,7 +416,7 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker virtual void initFromPose(const std::map *> &mapOfColorImages, const std::map &mapOfCameraPoses); - virtual void loadConfigFile(const std::string &configFile, bool verbose = true); + virtual void loadConfigFile(const std::string &configFile, bool verbose = true) override; virtual void loadConfigFile(const std::string &configFile1, const std::string &configFile2, bool verbose = true); virtual void loadConfigFile(const std::map &mapOfConfigFiles, bool verbose = true); @@ -425,7 +425,7 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker #endif virtual void loadModel(const std::string &modelFile, bool verbose = false, - const vpHomogeneousMatrix &T = vpHomogeneousMatrix()); + const vpHomogeneousMatrix &T = vpHomogeneousMatrix()) override; virtual void loadModel(const std::string &modelFile1, const std::string &modelFile2, bool verbose = false, const vpHomogeneousMatrix &T1 = vpHomogeneousMatrix(), const vpHomogeneousMatrix &T2 = vpHomogeneousMatrix()); @@ -461,17 +461,17 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker const std::map &mapOfCameraPoses, bool verbose = false, const std::map &mapOfT = std::map()); - virtual void resetTracker(); + virtual void resetTracker() override; - virtual void setAngleAppear(const double &a); + virtual void setAngleAppear(const double &a) override; virtual void setAngleAppear(const double &a1, const double &a2); virtual void setAngleAppear(const std::map &mapOfAngles); - virtual void setAngleDisappear(const double &a); + virtual void setAngleDisappear(const double &a) override; virtual void setAngleDisappear(const double &a1, const double &a2); virtual void setAngleDisappear(const std::map &mapOfAngles); - virtual void setCameraParameters(const vpCameraParameters &camera); + virtual void setCameraParameters(const vpCameraParameters &camera) override; virtual void setCameraParameters(const vpCameraParameters &camera1, const vpCameraParameters &camera2); virtual void setCameraParameters(const std::map &mapOfCameraParameters); @@ -480,7 +480,7 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker virtual void setCameraTransformationMatrix(const std::map &mapOfTransformationMatrix); - virtual void setClipping(const unsigned int &flags); + virtual void setClipping(const unsigned int &flags) override; virtual void setClipping(const unsigned int &flags1, const unsigned int &flags2); virtual void setClipping(const std::map &mapOfClippingFlags); @@ -497,9 +497,9 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker virtual void setDepthNormalPclPlaneEstimationRansacThreshold(double threshold); virtual void setDepthNormalSamplingStep(unsigned int stepX, unsigned int stepY); - virtual void setDisplayFeatures(bool displayF); + virtual void setDisplayFeatures(bool displayF) override; - virtual void setFarClippingDistance(const double &dist); + virtual void setFarClippingDistance(const double &dist) override; virtual void setFarClippingDistance(const double &dist1, const double &dist2); virtual void setFarClippingDistance(const std::map &mapOfClippingDists); @@ -524,28 +524,28 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker virtual void setKltThresholdAcceptation(double th); #endif - virtual void setLod(bool useLod, const std::string &name = ""); + virtual void setLod(bool useLod, const std::string &name = "") override; - virtual void setMask(const vpImage &mask); + virtual void setMask(const vpImage &mask) override; - virtual void setMinLineLengthThresh(double minLineLengthThresh, const std::string &name = ""); - virtual void setMinPolygonAreaThresh(double minPolygonAreaThresh, const std::string &name = ""); + virtual void setMinLineLengthThresh(double minLineLengthThresh, const std::string &name = "") override; + virtual void setMinPolygonAreaThresh(double minPolygonAreaThresh, const std::string &name = "") override; virtual void setMovingEdge(const vpMe &me); virtual void setMovingEdge(const vpMe &me1, const vpMe &me2); virtual void setMovingEdge(const std::map &mapOfMe); - virtual void setNearClippingDistance(const double &dist); + virtual void setNearClippingDistance(const double &dist) override; virtual void setNearClippingDistance(const double &dist1, const double &dist2); virtual void setNearClippingDistance(const std::map &mapOfDists); - virtual void setOgreShowConfigDialog(bool showConfigDialog); - virtual void setOgreVisibilityTest(const bool &v); + virtual void setOgreShowConfigDialog(bool showConfigDialog) override; + virtual void setOgreVisibilityTest(const bool &v) override; - virtual void setOptimizationMethod(const vpMbtOptimizationMethod &opt); + virtual void setOptimizationMethod(const vpMbtOptimizationMethod &opt) override; - virtual void setPose(const vpImage &I, const vpHomogeneousMatrix &cdMo); - virtual void setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo); + virtual void setPose(const vpImage &I, const vpHomogeneousMatrix &cdMo) override; + virtual void setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo) override; virtual void setPose(const vpImage &I1, const vpImage &I2, const vpHomogeneousMatrix &c1Mo, const vpHomogeneousMatrix &c2Mo); @@ -557,15 +557,15 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker virtual void setPose(const std::map *> &mapOfColorImages, const std::map &mapOfCameraPoses); - virtual void setProjectionErrorComputation(const bool &flag); + virtual void setProjectionErrorComputation(const bool &flag) override; - virtual void setProjectionErrorDisplay(bool display); - virtual void setProjectionErrorDisplayArrowLength(unsigned int length); - virtual void setProjectionErrorDisplayArrowThickness(unsigned int thickness); + virtual void setProjectionErrorDisplay(bool display) override; + virtual void setProjectionErrorDisplayArrowLength(unsigned int length) override; + virtual void setProjectionErrorDisplayArrowThickness(unsigned int thickness) override; virtual void setReferenceCameraName(const std::string &referenceCameraName); - virtual void setScanLineVisibilityTest(const bool &v); + virtual void setScanLineVisibilityTest(const bool &v) override; virtual void setTrackerType(int type); virtual void setTrackerType(const std::map &mapOfTrackerTypes); @@ -577,10 +577,10 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker virtual void setUseKltTracking(const std::string &name, const bool &useKltTracking); #endif - virtual void testTracking(); + virtual void testTracking() override; - virtual void track(const vpImage &I); - virtual void track(const vpImage &I_color); + virtual void track(const vpImage &I) override; + virtual void track(const vpImage &I_color) override; virtual void track(const vpImage &I1, const vpImage &I2); virtual void track(const vpImage &I_color1, const vpImage &I_color2); @@ -609,23 +609,23 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker virtual void computeVVS(std::map *> &mapOfImages); - virtual void computeVVSInit(); + virtual void computeVVSInit() override; virtual void computeVVSInit(std::map *> &mapOfImages); - virtual void computeVVSInteractionMatrixAndResidu(); + virtual void computeVVSInteractionMatrixAndResidu() override; virtual void computeVVSInteractionMatrixAndResidu(std::map *> &mapOfImages, std::map &mapOfVelocityTwist); using vpMbTracker::computeVVSWeights; virtual void computeVVSWeights(); virtual void initCircle(const vpPoint &p1, const vpPoint &p2, const vpPoint &p3, double radius, int idFace = 0, - const std::string &name = ""); + const std::string &name = "") override; virtual void initCylinder(const vpPoint &p1, const vpPoint &p2, double radius, int idFace = 0, - const std::string &name = ""); + const std::string &name = "") override; - virtual void initFaceFromCorners(vpMbtPolygon &polygon); + virtual void initFaceFromCorners(vpMbtPolygon &polygon) override; - virtual void initFaceFromLines(vpMbtPolygon &polygon); + virtual void initFaceFromLines(vpMbtPolygon &polygon) override; virtual void loadConfigFileXML(const std::string &configFile, bool verbose = true); #ifdef VISP_HAVE_NLOHMANN_JSON @@ -670,63 +670,60 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker TrackerWrapper(); explicit TrackerWrapper(int trackerType); - virtual ~TrackerWrapper(); - + virtual inline vpColVector getError() const override { return m_error; } - virtual inline vpColVector getError() const { return m_error; } - - virtual inline vpColVector getRobustWeights() const { return m_w; } + virtual inline vpColVector getRobustWeights() const override { return m_w; } virtual inline int getTrackerType() const { return m_trackerType; } virtual void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false); + const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false) override; virtual void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false); + const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false) override; virtual std::vector > getFeaturesForDisplay(); virtual std::vector > getModelForDisplay(unsigned int width, unsigned int height, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - bool displayFullModel = false); + bool displayFullModel = false) override; - virtual void init(const vpImage &I); + virtual void init(const vpImage &I) override; - virtual void loadConfigFile(const std::string &configFile, bool verbose = true); + virtual void loadConfigFile(const std::string &configFile, bool verbose = true) override; virtual void reInitModel(const vpImage &I, const std::string &cad_name, const vpHomogeneousMatrix &cMo, bool verbose = false, - const vpHomogeneousMatrix &T = vpHomogeneousMatrix()); + const vpHomogeneousMatrix &T = vpHomogeneousMatrix()) override; virtual void reInitModel(const vpImage &I_color, const std::string &cad_name, const vpHomogeneousMatrix &cMo, bool verbose = false, const vpHomogeneousMatrix &T = vpHomogeneousMatrix()); - virtual void resetTracker(); + virtual void resetTracker() override; - virtual void setCameraParameters(const vpCameraParameters &camera); + virtual void setCameraParameters(const vpCameraParameters &camera) override; - virtual void setClipping(const unsigned int &flags); + virtual void setClipping(const unsigned int &flags) override; - virtual void setFarClippingDistance(const double &dist); + virtual void setFarClippingDistance(const double &dist) override; - virtual void setNearClippingDistance(const double &dist); + virtual void setNearClippingDistance(const double &dist) override; - virtual void setOgreVisibilityTest(const bool &v); + virtual void setOgreVisibilityTest(const bool &v) override; - virtual void setPose(const vpImage &I, const vpHomogeneousMatrix &cdMo); - virtual void setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo); + virtual void setPose(const vpImage &I, const vpHomogeneousMatrix &cdMo) override; + virtual void setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo) override; - virtual void setProjectionErrorComputation(const bool &flag); + virtual void setProjectionErrorComputation(const bool &flag) override; - virtual void setScanLineVisibilityTest(const bool &v); + virtual void setScanLineVisibilityTest(const bool &v) override; virtual void setTrackerType(int type); - virtual void testTracking(); + virtual void testTracking() override; - virtual void track(const vpImage &I); - virtual void track(const vpImage &I_color); + virtual void track(const vpImage &I) override; + virtual void track(const vpImage &I_color) override; #ifdef VISP_HAVE_PCL // Fix error: using declaration ‘using vpMbDepthDenseTracker::setPose’ conflicts with a previous // using declaration that occurs with g++ 4.6.3 on Ubuntu 12.04 @@ -741,22 +738,22 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker protected: virtual void computeVVS(const vpImage *const ptr_I); - virtual void computeVVSInit(); + virtual void computeVVSInit() override; virtual void computeVVSInit(const vpImage *const ptr_I); - virtual void computeVVSInteractionMatrixAndResidu(); + virtual void computeVVSInteractionMatrixAndResidu() override; using vpMbEdgeTracker::computeVVSInteractionMatrixAndResidu; virtual void computeVVSInteractionMatrixAndResidu(const vpImage *const ptr_I); using vpMbTracker::computeVVSWeights; - virtual void computeVVSWeights(); + virtual void computeVVSWeights() override; virtual void initCircle(const vpPoint &p1, const vpPoint &p2, const vpPoint &p3, double radius, int idFace = 0, - const std::string &name = ""); + const std::string &name = "") override; virtual void initCylinder(const vpPoint &p1, const vpPoint &p2, double radius, int idFace = 0, - const std::string &name = ""); + const std::string &name = "") override; - virtual void initFaceFromCorners(vpMbtPolygon &polygon); - virtual void initFaceFromLines(vpMbtPolygon &polygon); + virtual void initFaceFromCorners(vpMbtPolygon &polygon) override; + virtual void initFaceFromLines(vpMbtPolygon &polygon) override; virtual void initMbtTracking(const vpImage *const ptr_I); diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbHiddenFaces.h b/modules/tracker/mbt/include/visp3/mbt/vpMbHiddenFaces.h index 504356f31a..b5a5cea431 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbHiddenFaces.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbHiddenFaces.h @@ -251,7 +251,7 @@ template class vpMbHiddenFaces Ogre rendering options) when Ogre visibility is enabled. By default, this functionality is turned off. */ - inline void setOgreShowConfigDialog(bool showConfigDialog) { ogreShowConfigDialog = showConfigDialog; } + inline void setOgreShowConfigDialog(bool showConfigDialog) override { ogreShowConfigDialog = showConfigDialog; } #endif unsigned int setVisible(unsigned int width, unsigned int height, const vpCameraParameters &cam, @@ -331,9 +331,9 @@ template vpMbHiddenFaces::vpMbHiddenFaces(const vpMbHiddenFaces ©) : Lpol(), nbVisiblePolygon(copy.nbVisiblePolygon), scanlineRender(copy.scanlineRender) #ifdef VISP_HAVE_OGRE - , - ogreBackground(copy.ogreBackground), ogreInitialised(copy.ogreInitialised), nbRayAttempts(copy.nbRayAttempts), - ratioVisibleRay(copy.ratioVisibleRay), ogre(NULL), lOgrePolygons(), ogreShowConfigDialog(copy.ogreShowConfigDialog) + , + ogreBackground(copy.ogreBackground), ogreInitialised(copy.ogreInitialised), nbRayAttempts(copy.nbRayAttempts), + ratioVisibleRay(copy.ratioVisibleRay), ogre(NULL), lOgrePolygons(), ogreShowConfigDialog(copy.ogreShowConfigDialog) #endif { // Copy the list of polygons @@ -594,7 +594,7 @@ bool vpMbHiddenFaces::computeVisibility(const vpHomogeneousMatrix & if (useOgre) #ifdef VISP_HAVE_OGRE testDisappear = - ((!Lpol[i]->isVisible(cMo, angleDisappears, true, cam, width, height)) || !isVisibleOgre(cameraPos, i)); + ((!Lpol[i]->isVisible(cMo, angleDisappears, true, cam, width, height)) || !isVisibleOgre(cameraPos, i)); #else { (void)cameraPos; // Avoid warning @@ -611,21 +611,23 @@ bool vpMbHiddenFaces::computeVisibility(const vpHomogeneousMatrix & // std::endl; changed = true; Lpol[i]->isvisible = false; - } else { - // nbVisiblePolygon++; + } + else { + // nbVisiblePolygon++; Lpol[i]->isvisible = true; // if(nbCornerInsidePrev > Lpol[i]->getNbCornerInsidePrevImage()) // changed = true; } - } else { + } + else { bool testAppear = true; if (testAppear) { if (useOgre) #ifdef VISP_HAVE_OGRE testAppear = - ((Lpol[i]->isVisible(cMo, angleAppears, true, cam, width, height)) && isVisibleOgre(cameraPos, i)); + ((Lpol[i]->isVisible(cMo, angleAppears, true, cam, width, height)) && isVisibleOgre(cameraPos, i)); #else testAppear = (Lpol[i]->isVisible(cMo, angleAppears, false, cam, width, height)); #endif @@ -638,8 +640,9 @@ bool vpMbHiddenFaces::computeVisibility(const vpHomogeneousMatrix & Lpol[i]->isvisible = true; changed = true; // nbVisiblePolygon++; - } else { - // std::cout << "Problem" << std::endl; + } + else { + // std::cout << "Problem" << std::endl; Lpol[i]->isvisible = false; } } @@ -755,7 +758,8 @@ template void vpMbHiddenFaces::displayOgre(cons for (unsigned int i = 0; i < Lpol.size(); i++) { if (Lpol[i]->isVisible()) { lOgrePolygons[i]->setVisible(true); - } else + } + else lOgrePolygons[i]->setVisible(false); } ogre->display(ogreBackground, cMo); @@ -883,7 +887,8 @@ bool vpMbHiddenFaces::isVisibleOgre(const vpTranslationVector &came if (it != result.end()) { if (it->movable->getName() == Ogre::StringConverter::toString(index)) { nbVisible++; - } else { + } + else { distance = it->distance; // Cannot use epsilon for comparison as ray length is slightly // different from the collision distance returned by @@ -892,9 +897,11 @@ bool vpMbHiddenFaces::isVisibleOgre(const vpTranslationVector &came 1e-6 /*std::fabs(distance) * std::numeric_limits::epsilon()*/) nbVisible++; } - } else + } + else nbVisible++; // Collision not detected but present. - } else { + } + else { if (it != result.end()) { distance = it->distance; double distancePrev = distance; @@ -904,7 +911,8 @@ bool vpMbHiddenFaces::isVisibleOgre(const vpTranslationVector &came if (it->movable->getName() == Ogre::StringConverter::toString(index)) { nbVisible++; - } else { + } + else { ++it; while (it != result.end()) { distance = it->distance; @@ -918,7 +926,8 @@ bool vpMbHiddenFaces::isVisibleOgre(const vpTranslationVector &came } ++it; distancePrev = distance; - } else + } + else break; } } @@ -938,7 +947,8 @@ bool vpMbHiddenFaces::isVisibleOgre(const vpTranslationVector &came if (visible) { lOgrePolygons[index]->setVisible(true); Lpol[index]->isvisible = true; - } else { + } + else { lOgrePolygons[index]->setVisible(false); Lpol[index]->isvisible = false; } diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h index 0746228d76..87b99217b2 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h @@ -258,9 +258,9 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker void addCircle(const vpPoint &P1, const vpPoint &P2, const vpPoint &P3, double r, const std::string &name = ""); virtual void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false); + const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false) override; virtual void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false); + const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false) override; /*! Return the address of the circle feature list. */ virtual std::list &getFeaturesCircle() { return circles_disp; } @@ -309,22 +309,22 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker */ inline double getKltThresholdAcceptation() const { return threshold_outlier; } - virtual inline vpColVector getError() const { return m_error_klt; } + virtual inline vpColVector getError() const override { return m_error_klt; } - virtual inline vpColVector getRobustWeights() const { return m_w_klt; } + virtual inline vpColVector getRobustWeights() const override { return m_w_klt; } virtual std::vector > getModelForDisplay(unsigned int width, unsigned int height, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - bool displayFullModel = false); + bool displayFullModel = false) override; - virtual void loadConfigFile(const std::string &configFile, bool verbose = true); + virtual void loadConfigFile(const std::string &configFile, bool verbose = true) override; virtual void reInitModel(const vpImage &I, const std::string &cad_name, const vpHomogeneousMatrix &cMo, - bool verbose = false, const vpHomogeneousMatrix &T = vpHomogeneousMatrix()); - void resetTracker(); + bool verbose = false, const vpHomogeneousMatrix &T = vpHomogeneousMatrix()) override; + void resetTracker() override; - void setCameraParameters(const vpCameraParameters &cam); + void setCameraParameters(const vpCameraParameters &cam) override; /*! Set the erosion of the mask used on the Model faces. @@ -355,7 +355,7 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker \param v : True to use it, False otherwise */ - virtual void setOgreVisibilityTest(const bool &v) + virtual void setOgreVisibilityTest(const bool &v) override { vpMbTracker::setOgreVisibilityTest(v); #ifdef VISP_HAVE_OGRE @@ -368,7 +368,7 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker \param v : True to use it, False otherwise */ - virtual void setScanLineVisibilityTest(const bool &v) + virtual void setScanLineVisibilityTest(const bool &v) override { vpMbTracker::setScanLineVisibilityTest(v); @@ -376,8 +376,8 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker (*it)->useScanLine = v; } - virtual void setPose(const vpImage &I, const vpHomogeneousMatrix &cdMo); - virtual void setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo); + virtual void setPose(const vpImage &I, const vpHomogeneousMatrix &cdMo) override; + virtual void setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo) override; /*! Set if the projection error criteria has to be computed. @@ -385,19 +385,19 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker \param flag : True if the projection error criteria has to be computed, false otherwise */ - virtual void setProjectionErrorComputation(const bool &flag) + virtual void setProjectionErrorComputation(const bool &flag) override { if (flag) std::cerr << "This option is not yet implemented in vpMbKltTracker, " - "projection error computation set to false." - << std::endl; + "projection error computation set to false." + << std::endl; } void setUseKltTracking(const std::string &name, const bool &useKltTracking); - virtual void testTracking(); - virtual void track(const vpImage &I); - virtual void track(const vpImage &I_color); + virtual void testTracking() override; + virtual void track(const vpImage &I) override; + virtual void track(const vpImage &I_color) override; /*! @name Deprecated functions @@ -453,16 +453,16 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker /** @name Protected Member Functions Inherited from vpMbKltTracker */ //@{ void computeVVS(); - virtual void computeVVSInit(); - virtual void computeVVSInteractionMatrixAndResidu(); + virtual void computeVVSInit() override; + virtual void computeVVSInteractionMatrixAndResidu() override; virtual std::vector > getFeaturesForDisplayKlt(); - virtual void init(const vpImage &I); - virtual void initFaceFromCorners(vpMbtPolygon &polygon); - virtual void initFaceFromLines(vpMbtPolygon &polygon); - virtual void initCircle(const vpPoint &, const vpPoint &, const vpPoint &, double, int, const std::string &name = ""); - virtual void initCylinder(const vpPoint &, const vpPoint &, double, int, const std::string &name = ""); + virtual void init(const vpImage &I) override; + virtual void initFaceFromCorners(vpMbtPolygon &polygon) override; + virtual void initFaceFromLines(vpMbtPolygon &polygon) override; + virtual void initCircle(const vpPoint &, const vpPoint &, const vpPoint &, double, int, const std::string &name = "") override; + virtual void initCylinder(const vpPoint &, const vpPoint &, double, int, const std::string &name = "") override; void preTracking(const vpImage &I); bool postTracking(const vpImage &I, vpColVector &w); diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbTracker.h index 36a12ac67a..32949e7c1b 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbTracker.h @@ -268,8 +268,8 @@ class VISP_EXPORT vpMbTracker // vpTRACE("Warning : The covariance matrix has not been computed. // See setCovarianceComputation() to do it."); std::cerr << "Warning : The covariance matrix has not been computed. " - "See setCovarianceComputation() to do it." - << std::endl; + "See setCovarianceComputation() to do it." + << std::endl; } return covarianceMatrix; @@ -402,15 +402,15 @@ class VISP_EXPORT vpMbTracker } virtual std::pair, std::vector > > - getPolygonFaces(bool orderPolygons = true, bool useVisibility = true, bool clipPolygon = false); + getPolygonFaces(bool orderPolygons = true, bool useVisibility = true, bool clipPolygon = false); - /*! - Get the current pose between the object and the camera. - cMo is the matrix which can be used to express - coordinates from the object frame to camera frame. + /*! + Get the current pose between the object and the camera. + cMo is the matrix which can be used to express + coordinates from the object frame to camera frame. - \param cMo : the pose - */ + \param cMo : the pose + */ virtual inline void getPose(vpHomogeneousMatrix &cMo) const { cMo = m_cMo; } /*! @@ -551,10 +551,10 @@ class VISP_EXPORT vpMbTracker virtual void setNearClippingDistance(const double &dist); /*! - Set the optimization method used during the tracking. - - \param opt : Optimization method to use. - */ + * Set the optimization method used during the tracking. + * + * \param opt : Optimization method to use. + */ virtual inline void setOptimizationMethod(const vpMbtOptimizationMethod &opt) { m_optimizationMethod = opt; } void setProjectionErrorMovingEdge(const vpMe &me); @@ -585,18 +585,21 @@ class VISP_EXPORT vpMbTracker virtual void setProjectionErrorComputation(const bool &flag) { computeProjError = flag; } /*! - Display or not gradient and model orientation when computing the projection error. - */ + * Display or not gradient and model orientation when computing the projection error. + */ virtual void setProjectionErrorDisplay(bool display) { m_projectionErrorDisplay = display; } /*! - Arrow length used to display gradient and model orientation for projection error computation. - */ - virtual void setProjectionErrorDisplayArrowLength(unsigned int length) { m_projectionErrorDisplayLength = length; } + * Arrow length used to display gradient and model orientation for projection error computation. + */ + virtual void setProjectionErrorDisplayArrowLength(unsigned int length) + { + m_projectionErrorDisplayLength = length; + } /*! - Arrow thickness used to display gradient and model orientation for projection error computation. - */ + * Arrow thickness used to display gradient and model orientation for projection error computation. + */ virtual void setProjectionErrorDisplayArrowThickness(unsigned int thickness) { m_projectionErrorDisplayThickness = thickness; diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbtMeEllipse.h b/modules/tracker/mbt/include/visp3/mbt/vpMbtMeEllipse.h index 3ff14f3be1..2a712dc9d4 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbtMeEllipse.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbtMeEllipse.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,13 +29,12 @@ * * Description: * Moving edges. - * -*****************************************************************************/ + */ /*! - \file vpMbtMeEllipse.h - \brief Moving edges on an ellipse -*/ + * \file vpMbtMeEllipse.h + * \brief Moving edges on an ellipse + */ #ifndef vpMbtMeEllipse_HH #define vpMbtMeEllipse_HH @@ -49,12 +47,12 @@ #ifndef DOXYGEN_SHOULD_SKIP_THIS /*! - \class vpMbtMeEllipse - \ingroup group_mbt_features - - \brief Class that tracks an ellipse moving edges with specific capabilities for - model-based tracking. -*/ + * \class vpMbtMeEllipse + * \ingroup group_mbt_features + * + * \brief Class that tracks an ellipse moving edges with specific capabilities for + * model-based tracking. + */ class VISP_EXPORT vpMbtMeEllipse : public vpMeEllipse { public: @@ -62,7 +60,6 @@ class VISP_EXPORT vpMbtMeEllipse : public vpMeEllipse vpMbtMeEllipse(); vpMbtMeEllipse(const vpMbtMeEllipse &me_ellipse); - virtual ~vpMbtMeEllipse(); void computeProjectionError(const vpImage &_I, double &_sumErrorRad, unsigned int &_nbFeatures, const vpMatrix &SobelX, const vpMatrix &SobelY, bool display, unsigned int length, @@ -77,7 +74,7 @@ class VISP_EXPORT vpMbtMeEllipse : public vpMeEllipse private: void reSample(const vpImage &I); - void sample(const vpImage &I, bool doNotTrack = false); + void sample(const vpImage &I, bool doNotTrack = false) override; void suppressPoints(); }; diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbtMeLine.h b/modules/tracker/mbt/include/visp3/mbt/vpMbtMeLine.h index 53991fe4c1..9cf88f3cfd 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbtMeLine.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbtMeLine.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,12 @@ * * Description: * Implementation of a line used by the model-based tracker. - * - * Authors: - * Romain Tallonneau - * -*****************************************************************************/ + */ /*! - \file vpMbtMeLine.h - \brief Implementation of a line used by the model-based tracker. -*/ + * \file vpMbtMeLine.h + * \brief Implementation of a line used by the model-based tracker. + */ #ifndef vpMbtMeLine_HH #define vpMbtMeLine_HH @@ -51,10 +46,9 @@ #ifndef DOXYGEN_SHOULD_SKIP_THIS /*! - \class vpMbtMeLine - \brief Implementation of a line used by the model-based tracker. - \ingroup group_mbt_features - + * \class vpMbtMeLine + * \brief Implementation of a line used by the model-based tracker. + * \ingroup group_mbt_features */ class VISP_EXPORT vpMbtMeLine : public vpMeTracker { @@ -72,7 +66,7 @@ class VISP_EXPORT vpMbtMeLine : public vpMeTracker public: vpMbtMeLine(); - virtual ~vpMbtMeLine(); + virtual ~vpMbtMeLine() override; void computeProjectionError(const vpImage &_I, double &_sumErrorRad, unsigned int &_nbFeatures, const vpMatrix &SobelX, const vpMatrix &SobelY, bool display, unsigned int length, @@ -82,27 +76,27 @@ class VISP_EXPORT vpMbtMeLine : public vpMeTracker using vpMeTracker::display; /*! - Get the a coefficient of the line corresponding to \f$ i \; cos(\theta) + j - \; sin(\theta) - \rho = 0 \f$ - - \return : The a coefficient of the moving edge - */ + * Get the a coefficient of the line corresponding to \f$ i \; cos(\theta) + j + * \; sin(\theta) - \rho = 0 \f$ + * + * \return : The a coefficient of the moving edge + */ inline double get_a() const { return this->a; } /*! - Get the a coefficient of the line corresponding to \f$ i \; cos(\theta) + j - \; sin(\theta) - \rho = 0 \f$ - - \return : The b coefficient of the moving edge - */ + * Get the a coefficient of the line corresponding to \f$ i \; cos(\theta) + j + * \; sin(\theta) - \rho = 0 \f$ + * + * \return : The b coefficient of the moving edge + */ inline double get_b() const { return this->b; } /*! - Get the a coefficient of the line corresponding to \f$ i \; cos(\theta) + j - \; sin(\theta) - \rho = 0 \f$ - - \return : The c coefficient of the moving edge - */ + * Get the a coefficient of the line corresponding to \f$ i \; cos(\theta) + j + * \; sin(\theta) - \rho = 0 \f$ + * + * \return : The c coefficient of the moving edge + */ inline double get_c() const { return this->c; } void initTracking(const vpImage &I, const vpImagePoint &ip1, const vpImagePoint &ip2, double rho, @@ -117,7 +111,7 @@ class VISP_EXPORT vpMbtMeLine : public vpMeTracker private: void bubbleSortI(); void bubbleSortJ(); - virtual void sample(const vpImage &image, bool doNotTrack = false); + void sample(const vpImage &image, bool doNotTrack = false) override; void seekExtremities(const vpImage &I); void setExtremities(); void suppressPoints(const vpImage &I); diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbtPolygon.h b/modules/tracker/mbt/include/visp3/mbt/vpMbtPolygon.h index a1b6810021..7b26bbd01c 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbtPolygon.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbtPolygon.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,17 +29,12 @@ * * Description: * Implements a polygon of the model used by the model-based tracker. - * - * Authors: - * Romain Tallonneau - * Aurelien Yol - * -*****************************************************************************/ + */ /*! - \file vpMbtPolygon.h - \brief Implements a polygon of the model used by the model-based tracker. -*/ + * \file vpMbtPolygon.h + *\brief Implements a polygon of the model used by the model-based tracker. + */ #ifndef vpMbtPolygon_HH #define vpMbtPolygon_HH @@ -53,13 +47,12 @@ #include /*! - \class vpMbtPolygon - - \brief Implementation of a polygon of the model used by the model-based - tracker. - - \ingroup group_mbt_faces - + * \class vpMbtPolygon + * + * \brief Implementation of a polygon of the model used by the model-based + * tracker. + * + * \ingroup group_mbt_faces */ class VISP_EXPORT vpMbtPolygon : public vpPolygon3D { @@ -89,19 +82,18 @@ class VISP_EXPORT vpMbtPolygon : public vpPolygon3D public: vpMbtPolygon(); vpMbtPolygon(const vpMbtPolygon &mbtp); - virtual ~vpMbtPolygon(); /*! - Get the index of the face. - - \return index : the index of the face. - */ + * Get the index of the face. + * + * \return index : the index of the face. + */ inline int getIndex() const { return index; } /*! - Get the name of the face. - - \return Name of the face. + * Get the name of the face. + * + * \return Name of the face. */ inline std::string getName() const { return name; } @@ -115,51 +107,51 @@ class VISP_EXPORT vpMbtPolygon : public vpPolygon3D vpMbtPolygon &operator=(const vpMbtPolygon &mbtp); /*! - Set the index of the face. - - \param i : the new index of the face. - */ + * Set the index of the face. + * + * \param i : the new index of the face. + */ virtual inline void setIndex(int i) { index = i; } // Due to a doxygen warning include the sample code in the doc, we remove // the inline and put the doc in the *.cpp file void setLod(bool use_lod); /*! - Set the threshold for the minimum line length to be considered as visible - in the LOD (level of detail) case. This threshold is only used when - setLoD() is turned on. - - \param min_line_length : threshold for the minimum line length in pixel. - When a single line that doesn't belong to a face is considered by the - tracker, this line is tracked only if its length in pixel is greater than - \e min_line_length. - - \sa setLoD() + * Set the threshold for the minimum line length to be considered as visible + * in the LOD (level of detail) case. This threshold is only used when + * setLoD() is turned on. + * + * \param min_line_length : threshold for the minimum line length in pixel. + * When a single line that doesn't belong to a face is considered by the + * tracker, this line is tracked only if its length in pixel is greater than + * \e min_line_length. + * + * \sa setLoD() */ inline void setMinLineLengthThresh(double min_line_length) { this->minLineLengthThresh = min_line_length; } /*! - Set the minimum polygon area to be considered as visible in the LOD (level - of detail) case. This threshold is only used when setLoD() is turned on. - - \param min_polygon_area : threshold for the minimum polygon area in pixel. - When a face is considered by the tracker, this face is tracked only if its - area in pixel is greater than \e min_polygon_area. - - \sa setLoD() - */ + * Set the minimum polygon area to be considered as visible in the LOD (level + * of detail) case. This threshold is only used when setLoD() is turned on. + * + * \param min_polygon_area : threshold for the minimum polygon area in pixel. + * When a face is considered by the tracker, this face is tracked only if its + * area in pixel is greater than \e min_polygon_area. + * + * \sa setLoD() + */ inline void setMinPolygonAreaThresh(double min_polygon_area) { this->minPolygonAreaThresh = min_polygon_area; } /*! - Set the name of the face. - - \param face_name : name of the face. + * Set the name of the face. + * + * \param face_name : name of the face. */ inline void setName(const std::string &face_name) { this->name = face_name; } /*! - Set if the polygon is oriented or not. - - \param oriented : True if the polygon is oriented, false otherwise. + * Set if the polygon is oriented or not. + * + * \param oriented : True if the polygon is oriented, false otherwise. */ inline void setIsPolygonOriented(const bool &oriented) { this->hasOrientation = oriented; } }; diff --git a/modules/tracker/mbt/src/edge/vpMbtMeEllipse.cpp b/modules/tracker/mbt/src/edge/vpMbtMeEllipse.cpp index 2eb6de1986..e7e81ad3f6 100644 --- a/modules/tracker/mbt/src/edge/vpMbtMeEllipse.cpp +++ b/modules/tracker/mbt/src/edge/vpMbtMeEllipse.cpp @@ -56,10 +56,6 @@ vpMbtMeEllipse::vpMbtMeEllipse() : vpMeEllipse() { } Copy constructor. */ vpMbtMeEllipse::vpMbtMeEllipse(const vpMbtMeEllipse &me_ellipse) : vpMeEllipse(me_ellipse) { } -/*! - Destructor. -*/ -vpMbtMeEllipse::~vpMbtMeEllipse() { } /*! Compute the projection error of the ellipse. diff --git a/modules/tracker/mbt/src/vpMbGenericTracker.cpp b/modules/tracker/mbt/src/vpMbGenericTracker.cpp index 64489dca7c..71b8ee65b0 100644 --- a/modules/tracker/mbt/src/vpMbGenericTracker.cpp +++ b/modules/tracker/mbt/src/vpMbGenericTracker.cpp @@ -5789,8 +5789,6 @@ vpMbGenericTracker::TrackerWrapper::TrackerWrapper(int trackerType) #endif } -vpMbGenericTracker::TrackerWrapper::~TrackerWrapper() { } - // Implemented only for debugging purposes: use TrackerWrapper as a standalone tracker void vpMbGenericTracker::TrackerWrapper::computeVVS(const vpImage *const ptr_I) { diff --git a/modules/tracker/mbt/src/vpMbTracker.cpp b/modules/tracker/mbt/src/vpMbTracker.cpp index 68b7132512..0aa00ffc5f 100644 --- a/modules/tracker/mbt/src/vpMbTracker.cpp +++ b/modules/tracker/mbt/src/vpMbTracker.cpp @@ -1096,7 +1096,7 @@ void vpMbTracker::initFromPose(const vpImage *const I, const vpIm // The six value of the pose vector 0.0000 // \ 0.0000 // | - 1.0000 // | Exemple of value for the pose vector where Z = 1 meter + 1.0000 // | Example of value for the pose vector where Z = 1 meter 0.0000 // | 0.0000 // | 0.0000 // / @@ -1119,7 +1119,7 @@ void vpMbTracker::initFromPose(const vpImage &I, const std::strin // The six value of the pose vector 0.0000 // \ 0.0000 // | - 1.0000 // | Exemple of value for the pose vector where Z = 1 meter + 1.0000 // | Example of value for the pose vector where Z = 1 meter 0.0000 // | 0.0000 // | 0.0000 // / diff --git a/modules/tracker/mbt/src/vpMbtPolygon.cpp b/modules/tracker/mbt/src/vpMbtPolygon.cpp index 4ea7c8485c..5f9a2998c1 100644 --- a/modules/tracker/mbt/src/vpMbtPolygon.cpp +++ b/modules/tracker/mbt/src/vpMbtPolygon.cpp @@ -53,14 +53,13 @@ */ vpMbtPolygon::vpMbtPolygon() : index(-1), isvisible(false), isappearing(false), useLod(false), minLineLengthThresh(50.0), - minPolygonAreaThresh(2500.0), name(""), hasOrientation(true) -{ -} + minPolygonAreaThresh(2500.0), name(""), hasOrientation(true) +{ } vpMbtPolygon::vpMbtPolygon(const vpMbtPolygon &mbtp) : vpPolygon3D(mbtp), index(mbtp.index), isvisible(mbtp.isvisible), isappearing(mbtp.isappearing), useLod(mbtp.useLod), - minLineLengthThresh(mbtp.minLineLengthThresh), minPolygonAreaThresh(mbtp.minPolygonAreaThresh), name(mbtp.name), - hasOrientation(mbtp.hasOrientation) + minLineLengthThresh(mbtp.minLineLengthThresh), minPolygonAreaThresh(mbtp.minPolygonAreaThresh), name(mbtp.name), + hasOrientation(mbtp.hasOrientation) { //*this = mbtp; // Should not be called by copy constructor to avoid multiple // assignements. @@ -81,11 +80,6 @@ vpMbtPolygon &vpMbtPolygon::operator=(const vpMbtPolygon &mbtp) return (*this); } -/*! - Basic destructor. -*/ -vpMbtPolygon::~vpMbtPolygon() {} - /*! Check if the polygon is visible in the image and if the angle between the normal to the face and the line vector going from the optical center to the @@ -215,9 +209,11 @@ bool vpMbtPolygon::isVisible(const vpHomogeneousMatrix &cMo, double alpha, const if (angle < alpha + vpMath::rad(1)) { isappearing = true; - } else if (modulo && (M_PI - angle) < alpha + vpMath::rad(1)) { + } + else if (modulo && (M_PI - angle) < alpha + vpMath::rad(1)) { isappearing = true; - } else { + } + else { isappearing = false; } diff --git a/modules/tracker/me/include/visp3/me/vpMeEllipse.h b/modules/tracker/me/include/visp3/me/vpMeEllipse.h index a15317dcda..cf60d79183 100644 --- a/modules/tracker/me/include/visp3/me/vpMeEllipse.h +++ b/modules/tracker/me/include/visp3/me/vpMeEllipse.h @@ -98,10 +98,11 @@ class VISP_EXPORT vpMeEllipse : public vpMeTracker * Copy constructor. */ vpMeEllipse(const vpMeEllipse &me_ellipse); + /*! * Destructor. */ - virtual ~vpMeEllipse(); + virtual ~vpMeEllipse() override; /*! * Display the ellipse or arc of ellipse * diff --git a/modules/tracker/me/include/visp3/me/vpMeLine.h b/modules/tracker/me/include/visp3/me/vpMeLine.h index 45bca9be7c..fa03bdc306 100644 --- a/modules/tracker/me/include/visp3/me/vpMeLine.h +++ b/modules/tracker/me/include/visp3/me/vpMeLine.h @@ -181,7 +181,7 @@ class VISP_EXPORT vpMeLine : public vpMeTracker /*! * Destructor. */ - virtual ~vpMeLine(); + virtual ~vpMeLine() override; /*! * Display line. diff --git a/modules/tracker/me/include/visp3/me/vpMeNurbs.h b/modules/tracker/me/include/visp3/me/vpMeNurbs.h index 87c6e9bba9..c6f9d5ec28 100644 --- a/modules/tracker/me/include/visp3/me/vpMeNurbs.h +++ b/modules/tracker/me/include/visp3/me/vpMeNurbs.h @@ -164,11 +164,6 @@ class VISP_EXPORT vpMeNurbs : public vpMeTracker */ vpMeNurbs(const vpMeNurbs &menurbs); - /*! - * Destructor. - */ - virtual ~vpMeNurbs(); - /*! * Sets the number of control points used to compute the Nurbs. * diff --git a/modules/tracker/me/include/visp3/me/vpMeTracker.h b/modules/tracker/me/include/visp3/me/vpMeTracker.h index 24df003659..743799af19 100644 --- a/modules/tracker/me/include/visp3/me/vpMeTracker.h +++ b/modules/tracker/me/include/visp3/me/vpMeTracker.h @@ -100,7 +100,7 @@ class VISP_EXPORT vpMeTracker : public vpTracker /*! * Destructor. */ - virtual ~vpMeTracker(); + virtual ~vpMeTracker() override; /** @name Public Member Functions Inherited from vpMeTracker */ //@{ diff --git a/modules/tracker/me/include/visp3/me/vpNurbs.h b/modules/tracker/me/include/visp3/me/vpNurbs.h index 09c7170d5d..ee9fad05f7 100644 --- a/modules/tracker/me/include/visp3/me/vpNurbs.h +++ b/modules/tracker/me/include/visp3/me/vpNurbs.h @@ -167,10 +167,6 @@ class VISP_EXPORT vpNurbs : public vpBSpline */ vpNurbs(const vpNurbs &nurbs); - /*! - * Destructor. - */ - virtual ~vpNurbs(); /*! * Gets all the weights relative to the control points. diff --git a/modules/tracker/me/src/moving-edges/vpMeNurbs.cpp b/modules/tracker/me/src/moving-edges/vpMeNurbs.cpp index 041f51cb5e..d6c28a541b 100644 --- a/modules/tracker/me/src/moving-edges/vpMeNurbs.cpp +++ b/modules/tracker/me/src/moving-edges/vpMeNurbs.cpp @@ -198,8 +198,6 @@ vpMeNurbs::vpMeNurbs(const vpMeNurbs &menurbs) cannyTh2 = menurbs.cannyTh2; } -vpMeNurbs::~vpMeNurbs() { } - void vpMeNurbs::initTracking(const vpImage &I) { std::list ptList; diff --git a/modules/tracker/me/src/moving-edges/vpNurbs.cpp b/modules/tracker/me/src/moving-edges/vpNurbs.cpp index ec9b0ec379..c253f55ea7 100644 --- a/modules/tracker/me/src/moving-edges/vpNurbs.cpp +++ b/modules/tracker/me/src/moving-edges/vpNurbs.cpp @@ -50,8 +50,6 @@ vpNurbs::vpNurbs() : weights() { p = 3; } vpNurbs::vpNurbs(const vpNurbs &nurbs) : vpBSpline(nurbs), weights(nurbs.weights) { } -vpNurbs::~vpNurbs() { } - vpImagePoint vpNurbs::computeCurvePoint(double l_u, unsigned int l_i, unsigned int l_p, std::vector &l_knots, std::vector &l_controlPoints, std::vector &l_weights) { diff --git a/modules/tracker/me/test/testNurbs.cpp b/modules/tracker/me/test/testNurbs.cpp index 97dc6af1b6..4eceb74edc 100644 --- a/modules/tracker/me/test/testNurbs.cpp +++ b/modules/tracker/me/test/testNurbs.cpp @@ -28,7 +28,7 @@ * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * Description: - * Exemple of a Nurbs curve. + * Example of a Nurbs curve. */ /*! \example testNurbs.cpp diff --git a/modules/tracker/tt/include/visp3/tt/vpTemplateTrackerWarpHomographySL3.h b/modules/tracker/tt/include/visp3/tt/vpTemplateTrackerWarpHomographySL3.h index 634901ac68..9a531b5efb 100644 --- a/modules/tracker/tt/include/visp3/tt/vpTemplateTrackerWarpHomographySL3.h +++ b/modules/tracker/tt/include/visp3/tt/vpTemplateTrackerWarpHomographySL3.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,17 +29,13 @@ * * Description: * Template tracker. - * - * Authors: - * Amaury Dame - * Aurelien Yol - * -*****************************************************************************/ + */ + /*! - \file vpTemplateTrackerWarpHomographySL3.h - \brief warping function of an homography: the homography is defined on the - sl3 lie algebra H=exp(Sum(p[i]* A_i)) A_i is the basis of the SL3 Algebra -*/ + *\file vpTemplateTrackerWarpHomographySL3.h + *\brief warping function of an homography: the homography is defined on the + *sl3 lie algebra H=exp(Sum(p[i]* A_i)) A_i is the basis of the SL3 Algebra + */ #ifndef vpTemplateTrackerWarpHomographySL3_hh #define vpTemplateTrackerWarpHomographySL3_hh @@ -51,9 +46,9 @@ #include /*! - \class vpTemplateTrackerWarpHomographySL3 - \ingroup group_tt_warp -*/ + * \class vpTemplateTrackerWarpHomographySL3 + * \ingroup group_tt_warp + */ class VISP_EXPORT vpTemplateTrackerWarpHomographySL3 : public vpTemplateTrackerWarp { protected: @@ -63,7 +58,6 @@ class VISP_EXPORT vpTemplateTrackerWarpHomographySL3 : public vpTemplateTrackerW public: vpTemplateTrackerWarpHomographySL3(); - virtual ~vpTemplateTrackerWarpHomographySL3(); void computeCoeff(const vpColVector &p); void computeDenom(vpColVector &X, const vpColVector &); @@ -95,7 +89,7 @@ class VISP_EXPORT vpTemplateTrackerWarpHomographySL3 : public vpTemplateTrackerW void warpX(const int &v1, const int &u1, double &v2, double &u2, const vpColVector &); #ifndef DOXYGEN_SHOULD_SKIP_THIS - void warpXInv(const vpColVector &, vpColVector &, const vpColVector &) {} + void warpXInv(const vpColVector &, vpColVector &, const vpColVector &) { } #endif }; #endif diff --git a/modules/tracker/tt/src/warp/vpTemplateTrackerWarpHomographySL3.cpp b/modules/tracker/tt/src/warp/vpTemplateTrackerWarpHomographySL3.cpp index 9dcf5fdecd..666ee5827b 100644 --- a/modules/tracker/tt/src/warp/vpTemplateTrackerWarpHomographySL3.cpp +++ b/modules/tracker/tt/src/warp/vpTemplateTrackerWarpHomographySL3.cpp @@ -109,8 +109,9 @@ void vpTemplateTrackerWarpHomographySL3::findWarp(const double *ut0, const doubl vpMatrix::computeHLM(H, lambda, HLM); try { dp = HLM.inverseByLU() * G_; - } catch (const vpException &e) { - // std::cout<<"Cannot inverse the matrix by LU "< /*! - \class vpTemplateTrackerMI - \ingroup group_tt_mi_tracker -*/ + * \class vpTemplateTrackerMI + * \ingroup group_tt_mi_tracker + */ class VISP_EXPORT vpTemplateTrackerMI : public vpTemplateTracker { public: /*! Hessian approximation. */ - typedef enum { + typedef enum + { HESSIAN_NONSECOND = -1, HESSIAN_0, HESSIAN_d2I, @@ -120,12 +115,10 @@ class VISP_EXPORT vpTemplateTrackerMI : public vpTemplateTracker void computeMI(double &MI); void computeProba(int &nbpoint); - double getCost(const vpImage &I, const vpColVector &tp); + double getCost(const vpImage &I, const vpColVector &tp) override; double getCost(const vpImage &I) { return getCost(I, p); } double getNormalizedCost(const vpImage &I, const vpColVector &tp); double getNormalizedCost(const vpImage &I) { return getNormalizedCost(I, p); } - virtual void initHessienDesired(const vpImage &I) = 0; - virtual void trackNoPyr(const vpImage &I) = 0; void zeroProbabilities(); // private: @@ -149,18 +142,16 @@ class VISP_EXPORT vpTemplateTrackerMI : public vpTemplateTracker //#endif public: - // constructeur //! Default constructor. vpTemplateTrackerMI() : vpTemplateTracker(), hessianComputation(USE_HESSIEN_NORMAL), ApproxHessian(HESSIAN_0), lambda(0), temp(NULL), - Prt(NULL), dPrt(NULL), Pt(NULL), Pr(NULL), d2Prt(NULL), PrtTout(NULL), dprtemp(NULL), PrtD(NULL), dPrtD(NULL), - influBspline(0), bspline(0), Nc(0), Ncb(0), d2Ix(), d2Iy(), d2Ixy(), MI_preEstimation(0), MI_postEstimation(0), - NMI_preEstimation(0), NMI_postEstimation(0), covarianceMatrix(), computeCovariance(false), m_du(), m_dv(), m_A(), - m_dB(), m_d2u(), m_d2v(), m_dA() - { - } + Prt(NULL), dPrt(NULL), Pt(NULL), Pr(NULL), d2Prt(NULL), PrtTout(NULL), dprtemp(NULL), PrtD(NULL), dPrtD(NULL), + influBspline(0), bspline(0), Nc(0), Ncb(0), d2Ix(), d2Iy(), d2Ixy(), MI_preEstimation(0), MI_postEstimation(0), + NMI_preEstimation(0), NMI_postEstimation(0), covarianceMatrix(), computeCovariance(false), m_du(), m_dv(), m_A(), + m_dB(), m_d2u(), m_d2v(), m_dA() + { } explicit vpTemplateTrackerMI(vpTemplateTrackerWarp *_warp); - virtual ~vpTemplateTrackerMI(); + virtual ~vpTemplateTrackerMI() override; vpMatrix getCovarianceMatrix() const { return covarianceMatrix; } double getMI() const { return MI_postEstimation; } double getMI(const vpImage &I, int &nc, const int &bspline, vpColVector &tp); diff --git a/modules/vision/include/visp3/vision/vpHomography.h b/modules/vision/include/visp3/vision/vpHomography.h index 6811dd0c98..5aa9f40b58 100644 --- a/modules/vision/include/visp3/vision/vpHomography.h +++ b/modules/vision/include/visp3/vision/vpHomography.h @@ -183,8 +183,6 @@ class VISP_EXPORT vpHomography : public vpArray2D vpHomography(const vpThetaUVector &tu, const vpTranslationVector &atb, const vpPlane &bP); //! Construction from translation and rotation and a plane. vpHomography(const vpPoseVector &arb, const vpPlane &bP); - //! Destructor. - virtual ~vpHomography() { }; //! Construction from translation and rotation and a plane void buildFrom(const vpRotationMatrix &aRb, const vpTranslationVector &atb, const vpPlane &bP); diff --git a/modules/vision/include/visp3/vision/vpPoseFeatures.h b/modules/vision/include/visp3/vision/vpPoseFeatures.h index 21f7ff7691..2f2ca927b4 100644 --- a/modules/vision/include/visp3/vision/vpPoseFeatures.h +++ b/modules/vision/include/visp3/vision/vpPoseFeatures.h @@ -305,20 +305,20 @@ class vpPoseSpecificFeatureTemplate : public vpPoseSpecificFeature m_tuple = new std::tuple(args...); } - virtual ~vpPoseSpecificFeatureTemplate() { delete m_tuple; } + virtual ~vpPoseSpecificFeatureTemplate() override { delete m_tuple; } - virtual void createDesired() { buildDesiredFeatureWithTuple(m_desiredFeature, func_ptr, *m_tuple); } + virtual void createDesired() override { buildDesiredFeatureWithTuple(m_desiredFeature, func_ptr, *m_tuple); } - virtual vpColVector error() + virtual vpColVector error() override { // std::cout << "Getting S... : " << std::get<0>(*tuple).get_s() << // std::endl; return m_currentFeature.error(m_desiredFeature); } - virtual vpMatrix currentInteraction() { return m_currentFeature.interaction(); } + virtual vpMatrix currentInteraction() override { return m_currentFeature.interaction(); } - virtual void createCurrent(const vpHomogeneousMatrix &cMo) + virtual void createCurrent(const vpHomogeneousMatrix &cMo) override { buildCurrentFeatureWithTuple(m_currentFeature, cMo, func_ptr, *m_tuple); } @@ -353,15 +353,15 @@ class vpPoseSpecificFeatureTemplateObject : public vpPoseSpecificFeature m_obj = o; } - virtual ~vpPoseSpecificFeatureTemplateObject() { delete m_tuple; } + virtual ~vpPoseSpecificFeatureTemplateObject() override { delete m_tuple; } - virtual void createDesired() { buildDesiredFeatureObjectWithTuple(m_obj, m_desiredFeature, func_ptr, *m_tuple); } + virtual void createDesired() override { buildDesiredFeatureObjectWithTuple(m_obj, m_desiredFeature, func_ptr, *m_tuple); } - virtual vpColVector error() { return m_currentFeature.error(m_desiredFeature); } + virtual vpColVector error() override { return m_currentFeature.error(m_desiredFeature); } - virtual vpMatrix currentInteraction() { return m_currentFeature.interaction(); } + virtual vpMatrix currentInteraction() override { return m_currentFeature.interaction(); } - virtual void createCurrent(const vpHomogeneousMatrix &cMo) + virtual void createCurrent(const vpHomogeneousMatrix &cMo) override { buildCurrentFeatureObjectWithTuple(m_obj, m_currentFeature, cMo, func_ptr, *m_tuple); } diff --git a/modules/visual_features/include/visp3/visual_features/vpBasicFeature.h b/modules/visual_features/include/visp3/visual_features/vpBasicFeature.h index 486199e0ba..7c61681309 100644 --- a/modules/visual_features/include/visp3/visual_features/vpBasicFeature.h +++ b/modules/visual_features/include/visp3/visual_features/vpBasicFeature.h @@ -69,21 +69,20 @@ // #define FEATURE_LINE8 0x80 /*! - \class vpBasicFeature - \ingroup group_core_features - \brief class that defines what is a visual feature -*/ + * \class vpBasicFeature + * \ingroup group_core_features + * \brief class that defines what is a visual feature + */ class VISP_EXPORT vpBasicFeature { -public: // Public constantes +public: static const unsigned int FEATURE_LINE[32]; enum { FEATURE_ALL = 0xffff }; /*! - \enum vpBasicFeatureDeallocatorType - Indicates who should deallocate the feature. - - */ + * \enum vpBasicFeatureDeallocatorType + * Indicates who should deallocate the feature. + */ typedef enum { user, vpServo } vpBasicFeatureDeallocatorType; protected: @@ -92,7 +91,7 @@ class VISP_EXPORT vpBasicFeature //! Dimension of the visual feature. unsigned int dim_s; // int featureLine[8] ; - //! Ensure that all the parameters needed to compute the iteraction matrix + //! Ensure that all the parameters needed to compute the interaction matrix //! are set. bool *flags; //! Number of parameters needed to compute the interaction matrix. diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureDepth.h b/modules/visual_features/include/visp3/visual_features/vpFeatureDepth.h index d8fd609aff..7b4e1ae37f 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureDepth.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureDepth.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,15 @@ * * Description: * 2D point visual feature. - * -*****************************************************************************/ + */ #ifndef vpFeatureDepth_H #define vpFeatureDepth_H /*! - \file vpFeatureDepth.h - \brief Class that defines 3D point visual feature -*/ + * \file vpFeatureDepth.h + * \brief Class that defines 3D point visual feature + */ #include #include @@ -48,112 +46,111 @@ #include /*! - \class vpFeatureDepth - \ingroup group_visual_features - - \brief Class that defines a 3D point visual feature \f$ s\f$ which - is composed by one parameters that is \f$ log( \frac{Z}{Z^*}) \f$ - that defines the current depth relative to the desired depth. Here - \f$ Z \f$ represents the current depth and \f$ Z^* \f$ the desired - depth. - - In this class \f$ x \f$ and \f$ y \f$ are the 2D coordinates in the - camera frame and are given in meter. \f$ x \f$, \f$ y \f$ and \f$ Z - \f$ are needed during the computation of the interaction matrix \f$ - L \f$. - - The visual features can be set easily thanks to the buildFrom() method. - - As the visual feature \f$ s \f$ represents the current depth - relative to the desired depth, the desired visual feature \f$ s^* - \f$ is set to zero. Once the value of the visual feature is set, the - interaction() method allows to compute the interaction matrix \f$ L - \f$ associated to the visual feature, while the error() method - computes the error vector \f$(s - s^*)\f$ between the current visual - feature and the desired one which is here set to zero. - - The code below shows how to create a eye-in hand visual servoing - task using a 3D depth feature \f$ log( \frac{Z}{Z^*}) \f$ that - corresponds to the current depth relative to the desired depth. To - control six degrees of freedom, at least five other features must be - considered. First we create a current (\f$s\f$) 3D depth - feature. Then we set the task to use the interaction matrix - associated to the current feature \f$L_s\f$. And finally we compute - the camera velocity \f$v=-\lambda \; L_s^+ \; (s-s^*)\f$. The - current feature \f$s\f$ is updated in the while() loop. - - \code -#include -#include - -int main() -{ - vpServo task; // Visual servoing task - - vpFeatureDepth s; //The current point feature. - //Set the current parameters x, y, Z and the desired depth Zs - double x; //You have to compute the value of x. - double y; //You have to compute the value of y. - double Z; //You have to compute the value of Z. - double Zs; //You have to define the desired depth Zs. - //Set the point feature thanks to the current parameters. - s.buildfrom(x, y, Z, log(Z/Zs)); - - // Set eye-in-hand control law. - // The computed velocities will be expressed in the camera frame - task.setServo(vpServo::EYEINHAND_CAMERA); - // Interaction matrix is computed with the desired visual features sd - task.setInteractionMatrixType(vpServo::CURRENT); - - // Add the 3D depth feature to the task - task.addFeature(s); // s* is here considered as zero - - // Control loop - for ( ; ; ) { - // The new parameters x, y and Z must be computed here. - - // Update the current point visual feature - s.buildfrom(x, y, Z, log(Z/Zs)); - - // compute the control law - vpColVector v = task.computeControlLaw(); // camera velocity - } - return 0; -} - \endcode - - If you want to build your own control law, this other example shows how - to create a current (\f$s\f$) and desired (\f$s^*\f$) 2D point visual - feature, compute the corresponding error vector \f$(s-s^*)\f$ and finally - build the interaction matrix \f$L_s\f$. - - \code -#include -#include -#include - -int main() -{ - vpFeatureDepth s; //The current point feature. - //Set the current parameters x, y, Z and the desired depth Zs - double x; //You have to compute the value of x. - double y; //You have to compute the value of y. - double Z; //You have to compute the value of Z. - double Zs; //You have to define the desired depth Zs. - //Set the point feature thanks to the current parameters. - s.buildfrom(x, y, Z, log(Z/Zs)); - - // Compute the interaction matrix L_s for the current point feature - vpMatrix L = s.interaction(); - - // Compute the error vector (s-s*) for the point feature with s* considered as 0. - vpColVector s_star(1); // The dimension is 1. - s_star(1) = 0; // The value of s* is 0. - s.error(s_star); -} - \endcode -*/ - + * \class vpFeatureDepth + * \ingroup group_visual_features + * + * \brief Class that defines a 3D point visual feature \f$ s\f$ which + * is composed by one parameters that is \f$ log( \frac{Z}{Z^*}) \f$ + * that defines the current depth relative to the desired depth. Here + * \f$ Z \f$ represents the current depth and \f$ Z^* \f$ the desired + * depth. + * + * In this class \f$ x \f$ and \f$ y \f$ are the 2D coordinates in the + * camera frame and are given in meter. \f$ x \f$, \f$ y \f$ and \f$ Z + * \f$ are needed during the computation of the interaction matrix \f$ + * L \f$. + * + * The visual features can be set easily thanks to the buildFrom() method. + * + * As the visual feature \f$ s \f$ represents the current depth + * relative to the desired depth, the desired visual feature \f$ s^* + * \f$ is set to zero. Once the value of the visual feature is set, the + * interaction() method allows to compute the interaction matrix \f$ L + * \f$ associated to the visual feature, while the error() method + * computes the error vector \f$(s - s^*)\f$ between the current visual + * feature and the desired one which is here set to zero. + * + * The code below shows how to create a eye-in hand visual servoing + * task using a 3D depth feature \f$ log( \frac{Z}{Z^*}) \f$ that + * corresponds to the current depth relative to the desired depth. To + * control six degrees of freedom, at least five other features must be + * considered. First we create a current (\f$s\f$) 3D depth + * feature. Then we set the task to use the interaction matrix + * associated to the current feature \f$L_s\f$. And finally we compute + * the camera velocity \f$v=-\lambda \; L_s^+ \; (s-s^*)\f$. The + * current feature \f$s\f$ is updated in the while() loop. + * + * \code + * #include + * #include + * + * int main() + * { + * vpServo task; // Visual servoing task + * + * vpFeatureDepth s; //The current point feature. + * //Set the current parameters x, y, Z and the desired depth Zs + * double x; // You have to compute the value of x. + * double y; // You have to compute the value of y. + * double Z; // You have to compute the value of Z. + * double Zs; // You have to define the desired depth Zs. + * //Set the point feature thanks to the current parameters. + * s.buildfrom(x, y, Z, log(Z/Zs)); + * + * // Set eye-in-hand control law. + * // The computed velocities will be expressed in the camera frame + * task.setServo(vpServo::EYEINHAND_CAMERA); + * // Interaction matrix is computed with the desired visual features sd + * task.setInteractionMatrixType(vpServo::CURRENT); + * + * // Add the 3D depth feature to the task + * task.addFeature(s); // s* is here considered as zero + * + * // Control loop + * for ( ; ; ) { + * // The new parameters x, y and Z must be computed here. + * + * // Update the current point visual feature + * s.buildfrom(x, y, Z, log(Z/Zs)); + * + * // compute the control law + * vpColVector v = task.computeControlLaw(); // camera velocity + * } + * return 0; + * } + * \endcode + * + * If you want to build your own control law, this other example shows how + * to create a current (\f$s\f$) and desired (\f$s^*\f$) 2D point visual + * feature, compute the corresponding error vector \f$(s-s^*)\f$ and finally + * build the interaction matrix \f$L_s\f$. + * + * \code + * #include + * #include + * #include + * + * int main() + * { + * vpFeatureDepth s; //The current point feature. + * //Set the current parameters x, y, Z and the desired depth Zs + * double x; // You have to compute the value of x. + * double y; // You have to compute the value of y. + * double Z; // You have to compute the value of Z. + * double Zs; // You have to define the desired depth Zs. + * //Set the point feature thanks to the current parameters. + * s.buildfrom(x, y, Z, log(Z/Zs)); + * + * // Compute the interaction matrix L_s for the current point feature + * vpMatrix L = s.interaction(); + * + * // Compute the error vector (s-s*) for the point feature with s* considered as 0. + * vpColVector s_star(1); // The dimension is 1. + * s_star(1) = 0; // The value of s* is 0. + * s.error(s_star); + * } + * \endcode + */ class VISP_EXPORT vpFeatureDepth : public vpBasicFeature { @@ -170,8 +167,6 @@ class VISP_EXPORT vpFeatureDepth : public vpBasicFeature public: vpFeatureDepth(); - //! Destructor. - virtual ~vpFeatureDepth() {} /* section Set coordinates @@ -180,11 +175,11 @@ class VISP_EXPORT vpFeatureDepth : public vpBasicFeature void buildFrom(double x, double y, double Z, double LogZoverZstar); void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; - vpFeatureDepth *duplicate() const; - vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL); + unsigned int thickness = 1) const override; + vpFeatureDepth *duplicate() const override; + vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL) override; double get_x() const; @@ -194,10 +189,10 @@ class VISP_EXPORT vpFeatureDepth : public vpBasicFeature double get_LogZoverZstar() const; - void init(); - vpMatrix interaction(unsigned int select = FEATURE_ALL); + void init() override; + vpMatrix interaction(unsigned int select = FEATURE_ALL) override; - void print(unsigned int select = FEATURE_ALL) const; + void print(unsigned int select = FEATURE_ALL) const override; void set_x(double x); void set_y(double y); diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureEllipse.h b/modules/visual_features/include/visp3/visual_features/vpFeatureEllipse.h index a6c0381932..6dc382d56b 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureEllipse.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureEllipse.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,15 @@ * * Description: * 2D ellipse visual feature. - * -*****************************************************************************/ + */ #ifndef vpFeatureEllipse_H #define vpFeatureEllipse_H /*! - \file vpFeatureEllipse.h - \brief Class that defines 2D ellipse visual feature -*/ + * \file vpFeatureEllipse.h + * \brief Class that defines 2D ellipse visual feature + */ #include #include @@ -48,10 +46,10 @@ #include /*! - \class vpFeatureEllipse - \ingroup group_visual_features - \brief Class that defines 2D ellipse visual feature. -*/ + * \class vpFeatureEllipse + * \ingroup group_visual_features + * \brief Class that defines 2D ellipse visual feature. + */ class VISP_EXPORT vpFeatureEllipse : public vpBasicFeature { /* @@ -66,8 +64,6 @@ class VISP_EXPORT vpFeatureEllipse : public vpBasicFeature public: //! Default constructor. vpFeatureEllipse(); - //! Destructor. - virtual ~vpFeatureEllipse() { } /*! \section Set coordinates @@ -80,16 +76,15 @@ class VISP_EXPORT vpFeatureEllipse : public vpBasicFeature void buildFrom(double x, double y, double n20, double n11, double n02, double A, double B, double C); void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; //! Feature duplication - vpFeatureEllipse *duplicate() const; + vpFeatureEllipse *duplicate() const override; //! compute the error between two visual features from a subset //! a the possible features - vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL); - + vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL) override; /*! * Returns the visual feature corresponding to the ellipse centroid coordinate along camera x-axis. @@ -116,12 +111,12 @@ class VISP_EXPORT vpFeatureEllipse : public vpBasicFeature double get_n02() const { return s[4]; } //! Default initialization. - void init(); + void init() override; //! compute the interaction matrix from a subset a the possible features - vpMatrix interaction(unsigned int select = FEATURE_ALL); + vpMatrix interaction(unsigned int select = FEATURE_ALL) override; - //! print the name of the feature - void print(unsigned int select = FEATURE_ALL) const; + //! Print the name of the feature + void print(unsigned int select = FEATURE_ALL) const override; void set_x(double x); void set_y(double y); diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureLine.h b/modules/visual_features/include/visp3/visual_features/vpFeatureLine.h index f41777f395..807b732fa2 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureLine.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureLine.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,15 @@ * * Description: * 2D line visual feature. - * -*****************************************************************************/ + */ #ifndef _vpFeatureLine_h_ #define _vpFeatureLine_h_ /*! - \file vpFeatureLine.h - \brief Class that defines 2D line visual feature -*/ + * \file vpFeatureLine.h + * \brief Class that defines 2D line visual feature + */ #include #include @@ -48,191 +46,185 @@ #include /*! - \class vpFeatureLine - \ingroup group_visual_features - - \brief Class that defines a 2D line visual feature \f$ s\f$ which is - composed by two parameters that are \f$ \rho \f$ and \f$ \theta \f$, - the polar coordinates of a line. - - In this class, the equation of the line in the image plane is given by : - \f[ x \; cos(\theta) + y \; sin(\theta) -\rho = 0 \f] Here - \f$ x \f$ and \f$ y \f$ are the coordinates of a point belonging to - the line and they are given in meter. The following image shows the - meanings of the distance \f$\rho\f$ and the angle \f$\theta\f$. - - \image html vpFeatureLine.gif - \image latex vpFeatureLine.ps width=10cm - - You have to note that the \f$ \theta \f$ angle has its value between - \f$ -\pi \f$ and \f$ \pi \f$ and that the \f$ \rho \f$ distance can - be positive or negative. The conventions are illustrated by the - image above. - - The visual features can be set easily from an instance of the - classes vpLine, vpMeLine or vpCylinder. For more precision see the - class vpFeatureBuilder. - - Once the values of the visual features are set, the interaction() - method allows to compute the interaction matrix \f$ L \f$ associated - to the visual feature, while the error() method computes the error - vector \f$(s - s^*)\f$ between the current visual feature and the - desired one. - - The code below shows how to create a eye-in hand visual servoing - task using a 2D line feature \f$(\rho,\theta)\f$ that correspond to - the 2D equation of a line in the image plan. To control six - degrees of freedom, at least four other features must be considered - like two other line features for example. First we create a current - (\f$s\f$) 2D line feature. Then we set the task to use the - interaction matrix associated to the current feature \f$L_s\f$. And - finally we compute the camera velocity \f$v=-\lambda \; L_s^+ \; - (s-s^*)\f$. The current feature \f$s\f$ is updated in the while() - loop. - - \code -#include -#include - -int main() -{ - vpServo task; // Visual servoing task - - vpFeatureLine sd; //The desired line feature. - // Sets the desired features rho and theta - double rhod = 0; - double thetad = 0; - // Sets the parameters which describe the equation of a plane in the camera frame : AX+BY+CZ+D=0. - // The line described by the features belongs to this plan. - // Normally two plans are needed to describe a line. But to compute the interaction matrix only - // one equation of the two plans is needed. - // Notes that the Dd value must not be equal to zero ! - double Ad = 0; - double Bd = 0; - double Cd = 1; - double Dd = -1; - // Set the line feature thanks to the desired parameters. - sd.buildfrom(rhod, thetad, Ad,Bd, Cd, Dd); - - vpFeatureLine s; //The current line feature. - // Sets the current features rho and theta - double rho; // You have to compute the value of rho. - double theta; // You have to compute the value of theta. - // Set the line feature thanks to the current parameters. - s.buildfrom(rho, theta); - // In this case the parameters A, B, C, D are not needed because the interaction matrix is computed - // with the desired visual feature. - - // Set eye-in-hand control law. - // The computed velocities will be expressed in the camera frame - task.setServo(vpServo::EYEINHAND_CAMERA); - // Interaction matrix is computed with the desired visual features sd - task.setInteractionMatrixType(vpServo::DESIRED); - - // Add the 2D line feature to the task - task.addFeature(s, sd); - - // Control loop - for ( ; ; ) { - // The new parameters rho and theta must be computed here. - - // Update the current line visual feature - s.buildfrom(rho, theta); - - // Compute the control law - vpColVector v = task.computeControlLaw(); // camera velocity - } - return 0; -} - \endcode - - If you want to build your own control law, this other example shows how to -create a current (\f$s\f$) and desired (\f$s^*\f$) 2D line visual feature, -compute the corresponding error vector \f$(s-s^*)\f$ and finally build the -interaction matrix \f$L_s\f$. - - \code -#include -#include - -int main() -{ - vpFeatureLine sd; //The desired line feature. - // Sets the desired features rho and theta - double rhod = 0; - double thetad = 0; - // Sets the parameters which describe the equation of a plane in the camera frame : AX+BY+CZ+D=0. - double Ad = 0; double Bd = 0; double Cd = 1; double Dd = -1; - // Set the line feature thanks to the desired parameters. - sd.buildfrom(rhod, thetad, Ad,Bd, Cd, Dd); - - vpFeatureLine s; // The current line feature. - // Sets the current features rho and theta - double rho; // You have to compute the value of rho. - double theta; // You have to compute the value of theta. - // Sets the parameters which describe the equation of a plane in the camera frame : AX+BY+CZ+D=0. - double A; // You have to compute the value of A. - double B; // You have to compute the value of B. - double C; // You have to compute the value of C. - double D; // You have to compute the value of D. D must not be equal to zero ! - // Set the line feature thanks to the current parameters. - s.buildfrom(rho, theta, A, B, C, D); - - // Compute the interaction matrix L_s for the current line feature - vpMatrix L = s.interaction(); - // You can also compute the interaction matrix L_s for the desired line feature - // The corresponding line of code is : vpMatrix L = sd.interaction(); - - // Compute the error vector (s-sd) for the line feature - s.error(s_star); -} - \endcode - -*/ + * \class vpFeatureLine + * \ingroup group_visual_features + * + * \brief Class that defines a 2D line visual feature \f$ s\f$ which is + * composed by two parameters that are \f$ \rho \f$ and \f$ \theta \f$, + * the polar coordinates of a line. + * + * In this class, the equation of the line in the image plane is given by : + * \f[ x \; cos(\theta) + y \; sin(\theta) -\rho = 0 \f] Here + * \f$ x \f$ and \f$ y \f$ are the coordinates of a point belonging to + * the line and they are given in meter. The following image shows the + * meanings of the distance \f$\rho\f$ and the angle \f$\theta\f$. + * + * \image html vpFeatureLine.gif + * \image latex vpFeatureLine.ps width=10cm + * + * You have to note that the \f$ \theta \f$ angle has its value between + * \f$ -\pi \f$ and \f$ \pi \f$ and that the \f$ \rho \f$ distance can + * be positive or negative. The conventions are illustrated by the + * image above. + * + * The visual features can be set easily from an instance of the + * classes vpLine, vpMeLine or vpCylinder. For more precision see the + * class vpFeatureBuilder. + * + * Once the values of the visual features are set, the interaction() + * method allows to compute the interaction matrix \f$ L \f$ associated + * to the visual feature, while the error() method computes the error + * vector \f$(s - s^*)\f$ between the current visual feature and the + * desired one. + * + * The code below shows how to create a eye-in hand visual servoing + * task using a 2D line feature \f$(\rho,\theta)\f$ that correspond to + * the 2D equation of a line in the image plan. To control six + * degrees of freedom, at least four other features must be considered + * like two other line features for example. First we create a current + * (\f$s\f$) 2D line feature. Then we set the task to use the + * interaction matrix associated to the current feature \f$L_s\f$. And + * finally we compute the camera velocity \f$v=-\lambda \; L_s^+ \; + * (s-s^*)\f$. The current feature \f$s\f$ is updated in the while() + * loop. + * + * \code + * #include + * #include + * + * int main() + * { + * vpServo task; // Visual servoing task + * + * vpFeatureLine sd; //The desired line feature. + * // Sets the desired features rho and theta + * double rhod = 0; + * double thetad = 0; + * // Sets the parameters which describe the equation of a plane in the camera frame : AX+BY+CZ+D=0. + * // The line described by the features belongs to this plan. + * // Normally two plans are needed to describe a line. But to compute the interaction matrix only + * // one equation of the two plans is needed. + * // Notes that the Dd value must not be equal to zero ! + * double Ad = 0; + * double Bd = 0; + * double Cd = 1; + * double Dd = -1; + * // Set the line feature thanks to the desired parameters. + * sd.buildfrom(rhod, thetad, Ad,Bd, Cd, Dd); + * + * vpFeatureLine s; //The current line feature. + * // Sets the current features rho and theta + * double rho; // You have to compute the value of rho. + * double theta; // You have to compute the value of theta. + * // Set the line feature thanks to the current parameters. + * s.buildfrom(rho, theta); + * // In this case the parameters A, B, C, D are not needed because the interaction matrix is computed + * // with the desired visual feature. + * + * // Set eye-in-hand control law. + * // The computed velocities will be expressed in the camera frame + * task.setServo(vpServo::EYEINHAND_CAMERA); + * // Interaction matrix is computed with the desired visual features sd + * task.setInteractionMatrixType(vpServo::DESIRED); + * + * // Add the 2D line feature to the task + * task.addFeature(s, sd); + * + * // Control loop + * for ( ; ; ) { + * // The new parameters rho and theta must be computed here. + * + * // Update the current line visual feature + * s.buildfrom(rho, theta); + * + * // Compute the control law + * vpColVector v = task.computeControlLaw(); // camera velocity + * } + * return 0; + * } + * \endcode + * + * If you want to build your own control law, this other example shows how to + * create a current (\f$s\f$) and desired (\f$s^*\f$) 2D line visual feature, + * compute the corresponding error vector \f$(s-s^*)\f$ and finally build the + * interaction matrix \f$L_s\f$. + * + * \code + * #include + * #include + * + * int main() + * { + * vpFeatureLine sd; //The desired line feature. + * // Sets the desired features rho and theta + * double rhod = 0; + * double thetad = 0; + * // Sets the parameters which describe the equation of a plane in the camera frame : AX+BY+CZ+D=0. + * double Ad = 0; double Bd = 0; double Cd = 1; double Dd = -1; + * // Set the line feature thanks to the desired parameters. + * sd.buildfrom(rhod, thetad, Ad,Bd, Cd, Dd); + * + * vpFeatureLine s; // The current line feature. + * // Sets the current features rho and theta + * double rho; // You have to compute the value of rho. + * double theta; // You have to compute the value of theta. + * // Sets the parameters which describe the equation of a plane in the camera frame : AX+BY+CZ+D=0. + * double A; // You have to compute the value of A. + * double B; // You have to compute the value of B. + * double C; // You have to compute the value of C. + * double D; // You have to compute the value of D. D must not be equal to zero ! + * // Set the line feature thanks to the current parameters. + * s.buildfrom(rho, theta, A, B, C, D); + * + * // Compute the interaction matrix L_s for the current line feature + * vpMatrix L = s.interaction(); + * // You can also compute the interaction matrix L_s for the desired line feature + * // The corresponding line of code is : vpMatrix L = sd.interaction(); + * + * // Compute the error vector (s-sd) for the line feature + * s.error(s_star); + * } + * \endcode + */ class VISP_EXPORT vpFeatureLine : public vpBasicFeature { /*! - attributes and members directly related to the vpBasicFeature needs - other functionalities ar useful but not mandatory - */ + * Attributes and members directly related to the vpBasicFeature needs + * other functionalities ar useful but not mandatory + */ private: //! FeatureLine depth (required to compute the interaction matrix) - //! equation of a plane + //! equation of a plane double A, B, C, D; public: vpFeatureLine(); - //! Destructor. - virtual ~vpFeatureLine() { } - // void buildFrom(const vpLine &l) ; - // void buildFrom(const vpCylinder &c, int l) ; void buildFrom(double rho, double theta); void buildFrom(double rho, double theta, double A, double B, double C, double D); void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; - vpFeatureLine *duplicate() const; + unsigned int thickness = 1) const override; + vpFeatureLine *duplicate() const override; - vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL); - // vpColVector error(const int select = FEATURE_ALL) ; + vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL) override; /*! - Return the \f$ \rho \f$ subset value of the visual feature \f$ s \f$. - */ + * Return the \f$ \rho \f$ subset value of the visual feature \f$ s \f$. + */ double getRho() const { return s[0]; } /*! - Return the \f$ \theta \f$ subset value of the visual feature \f$ s \f$. - */ + * Return the \f$ \theta \f$ subset value of the visual feature \f$ s \f$. + */ double getTheta() const { return s[1]; } - void init(); - vpMatrix interaction(unsigned int select = FEATURE_ALL); + void init() override; + vpMatrix interaction(unsigned int select = FEATURE_ALL) override; - void print(unsigned int select = FEATURE_ALL) const; + void print(unsigned int select = FEATURE_ALL) const override; void setRhoTheta(double rho, double theta); void setABCD(double A, double B, double C, double D); diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureLuminance.h b/modules/visual_features/include/visp3/visual_features/vpFeatureLuminance.h index 0067c88104..5fc3e56f5b 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureLuminance.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureLuminance.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Luminance based feature. - * -*****************************************************************************/ + */ #ifndef vpFeatureLuminance_h #define vpFeatureLuminance_h @@ -41,20 +39,20 @@ #include /*! - \file vpFeatureLuminance.h - \brief Class that defines the image luminance visual feature - - For more details see \cite Collewet08c. -*/ + * \file vpFeatureLuminance.h + * \brief Class that defines the image luminance visual feature + * + * For more details see \cite Collewet08c. + */ #ifndef DOXYGEN_SHOULD_SKIP_THIS /*! - \class vpLuminance - \brief Class that defines the luminance and gradient of a point - - \sa vpFeatureLuminance -*/ + * \class vpLuminance + * \brief Class that defines the luminance and gradient of a point. + * + * \sa vpFeatureLuminance + */ class VISP_EXPORT vpLuminance { public: @@ -66,13 +64,12 @@ class VISP_EXPORT vpLuminance #endif /*! - \class vpFeatureLuminance - \ingroup group_visual_features - \brief Class that defines the image luminance visual feature - - For more details see \cite Collewet08c. -*/ - + * \class vpFeatureLuminance + * \ingroup group_visual_features + * \brief Class that defines the image luminance visual feature + * + * For more details see \cite Collewet08c. + */ class VISP_EXPORT vpFeatureLuminance : public vpBasicFeature { protected: @@ -95,30 +92,30 @@ class VISP_EXPORT vpFeatureLuminance : public vpBasicFeature vpFeatureLuminance(); vpFeatureLuminance(const vpFeatureLuminance &f); //! Destructor. - virtual ~vpFeatureLuminance(); + virtual ~vpFeatureLuminance() override; void buildFrom(vpImage &I); void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; - vpFeatureLuminance *duplicate() const; + vpFeatureLuminance *duplicate() const override; - vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL); + vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL) override; void error(const vpBasicFeature &s_star, vpColVector &e); double get_Z() const; - void init(); + void init() override; void init(unsigned int _nbr, unsigned int _nbc, double _Z); - vpMatrix interaction(unsigned int select = FEATURE_ALL); + vpMatrix interaction(unsigned int select = FEATURE_ALL) override; void interaction(vpMatrix &L); vpFeatureLuminance &operator=(const vpFeatureLuminance &f); - void print(unsigned int select = FEATURE_ALL) const; + void print(unsigned int select = FEATURE_ALL) const override; void setCameraParameters(vpCameraParameters &_cam); void set_Z(double Z); diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMoment.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMoment.h index 9bfe4344da..2517506534 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMoment.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMoment.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,18 +29,15 @@ * * Description: * Base for all moment features - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ -/*! -\file vpFeatureMoment.h -\brief Base class for moment features. + */ -Handles common system operations like selection, duplication. Functionality is -computed in derived classes. -*/ +/*! + * \file vpFeatureMoment.h + * \brief Base class for moment features. + * + * Handles common system operations like selection, duplication. Functionality is + * computed in derived classes. + */ #ifndef _vpFeatureMoment_h_ #define _vpFeatureMoment_h_ @@ -56,106 +52,106 @@ class vpFeatureMomentDatabase; class vpMoment; /*! -\class vpFeatureMoment - -\ingroup group_visual_features - -\brief This class defines shared system methods/attributes for 2D moment -features but no functional code. It is used to compute interaction matrices -for moment features. - -While vpMoment-type classes do only compute moment values and can by used for -almost anything, vpFeatureMoment-type classes are specifically designed for -visual servoing. More importantly, a vpFeatureMoment is used to compute the -interaction matrix associated to it's moment primitive. - -This class is virtual and cannot be used directly. It defines the following -characteristics common to all moment features: -- Plane orientation parameters (A,B,C): -Each camera frame corresponds to a physical planar object contained in a -plane. This plane's equation has the following form: \f$ A \times x+B \times y -+ C = \frac{1}{Z} \f$. These parameters can be updated anytime. -- Get corresponding moment primitive: for example a vpFeatureMomentCInvariant -will provide access to a vpMomentCInvariant instance. -- Provide access to a feature database (vpFeatureMomentDatabase). -- All interaction matrices (different from vpBasicFeature::interaction which -selects the required interaction matrix). - -Like vpMoment, vpFeatureMoment provides a vpFeatureMoment::update() method. -But unlike vpMoment::update() which only acknowledges the new object, the -vpFeatureMoment::update() acknowledges the new plane parameters AND computes -the interaction matrices associated with the feature. - -A vpFeatureMoment will be often part of a vpFeatureMomentDatabase in the same -way a vpMoment is part of a vpMomentDatabase. This database is specified -inside the vpFeatureMoment::vpFeatureMoment() constructor. As a result, a -vpFeatureMoment will be able to access other vpFeatureMoments through this -database. - -A vpBasicFeature can be duplicated into a vpMomentGenericFeature. In that -case, all data in the vpBasicFeature is copied but the feature's name is lost. -For example if a vpFeatureMomentCInvariant is duplicated, the duplicata will -be operational but could not be used in a vpFeatureMomentDatabase. - -Note that you can use vpFeatureMoment to do visual servoing but it is not it's -only purpose. You may compute your interaction matrices with -vpFeatureMoment::update() and use them for any purpose. - -\attention - A vpFeatureMoment is not responsible for updating the moment -primitives it depends on. Make sure your vpMoments are all up to date before -computing an interaction matrix using vpFeatureMoment. - -\attention - Be careful with orders. Often, computing a feature of order n -requires vpMoment primitives of order n+1. Make sure to check the -documentation of the specialised vpFeatureMoment classes when deciding to -which order you want to initialize the object. An object of order 6 should be -sufficient for all classic implementations of vpFeatureMoment. - -Here is an example of how to use a vpFeatureMoment (in this case -vpFeatureMomentBasic). -\code -#include -#include -#include -#include -#include -#include - -int main() -{ - vpPoint p; - std::vector vec_p; // vector that contains the vertices - - p.set_x(1); p.set_y(1); // coordinates in meters in the image plane (vertex 1) - vec_p.push_back(p); - p.set_x(2); p.set_y(2); // coordinates in meters in the image plane (vertex 2) - vec_p.push_back(p); - - //////////////////////////////REFERENCE VALUES//////////////////////////////// - // Init object of order 3 because we need vpFeatureMomentBasic of order 2 which - // implies third-order moment primitives - vpMomentObject obj(3); - obj.setType(vpMomentObject::DISCRETE); // Discrete mode for object - obj.fromVector(vec_p); - - vpMomentDatabase mdb; //database for moment primitives. This will - //only contain the basic moment. - vpMomentBasic bm; //basic moment (this particular moment is nothing - //more than a shortcut to the vpMomentObject) - bm.linkTo(mdb); //add basic moment to moment database - - vpFeatureMomentBasic fmb(mdb,0,0,1,NULL); - - //update and compute the vpMoment BEFORE doing any operations with vpFeatureMoment - bm.update(obj); - bm.compute(); - - fmb.update(0,0,1); //update the vpFeatureMoment with a plane - //configuration - std::cout << fmb.interaction(1,1) << std::endl; -} -\endcode -*/ + * \class vpFeatureMoment + * + * \ingroup group_visual_features + * + * \brief This class defines shared system methods/attributes for 2D moment + * features but no functional code. It is used to compute interaction matrices + * for moment features. + * + * While vpMoment-type classes do only compute moment values and can by used for + * almost anything, vpFeatureMoment-type classes are specifically designed for + * visual servoing. More importantly, a vpFeatureMoment is used to compute the + * interaction matrix associated to it's moment primitive. + * + * This class is virtual and cannot be used directly. It defines the following + * characteristics common to all moment features: + * - Plane orientation parameters (A,B,C): + * Each camera frame corresponds to a physical planar object contained in a + * plane. This plane's equation has the following form: \f$ A \times x+B \times y + * + C = \frac{1}{Z} \f$. These parameters can be updated anytime. + * - Get corresponding moment primitive: for example a vpFeatureMomentCInvariant + * will provide access to a vpMomentCInvariant instance. + * - Provide access to a feature database (vpFeatureMomentDatabase). + * - All interaction matrices (different from vpBasicFeature::interaction which + * selects the required interaction matrix). + * + * Like vpMoment, vpFeatureMoment provides a vpFeatureMoment::update() method. + * But unlike vpMoment::update() which only acknowledges the new object, the + * vpFeatureMoment::update() acknowledges the new plane parameters AND computes + * the interaction matrices associated with the feature. + * + * A vpFeatureMoment will be often part of a vpFeatureMomentDatabase in the same + * way a vpMoment is part of a vpMomentDatabase. This database is specified + * inside the vpFeatureMoment::vpFeatureMoment() constructor. As a result, a + * vpFeatureMoment will be able to access other vpFeatureMoments through this + * database. + * + * A vpBasicFeature can be duplicated into a vpMomentGenericFeature. In that + * case, all data in the vpBasicFeature is copied but the feature's name is lost. + * For example if a vpFeatureMomentCInvariant is duplicated, the duplicate will + * be operational but could not be used in a vpFeatureMomentDatabase. + * + * Note that you can use vpFeatureMoment to do visual servoing but it is not it's + * only purpose. You may compute your interaction matrices with + * vpFeatureMoment::update() and use them for any purpose. + * + * \attention - A vpFeatureMoment is not responsible for updating the moment + * primitives it depends on. Make sure your vpMoments are all up to date before + * computing an interaction matrix using vpFeatureMoment. + * + * \attention - Be careful with orders. Often, computing a feature of order n + * requires vpMoment primitives of order n+1. Make sure to check the + * documentation of the specialized vpFeatureMoment classes when deciding to + * which order you want to initialize the object. An object of order 6 should be + * sufficient for all classic implementations of vpFeatureMoment. + * + * Here is an example of how to use a vpFeatureMoment (in this case + * vpFeatureMomentBasic). + * \code + * #include + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * vpPoint p; + * std::vector vec_p; // vector that contains the vertices + * + * p.set_x(1); p.set_y(1); // coordinates in meters in the image plane (vertex 1) + * vec_p.push_back(p); + * p.set_x(2); p.set_y(2); // coordinates in meters in the image plane (vertex 2) + * vec_p.push_back(p); + * + * //////////////////////////////REFERENCE VALUES//////////////////////////////// + * // Init object of order 3 because we need vpFeatureMomentBasic of order 2 which + * // implies third-order moment primitives + * vpMomentObject obj(3); + * obj.setType(vpMomentObject::DISCRETE); // Discrete mode for object + * obj.fromVector(vec_p); + * + * vpMomentDatabase mdb; //database for moment primitives. This will + * //only contain the basic moment. + * vpMomentBasic bm; //basic moment (this particular moment is nothing + * //more than a shortcut to the vpMomentObject) + * bm.linkTo(mdb); //add basic moment to moment database + * + * vpFeatureMomentBasic fmb(mdb,0,0,1,NULL); + * + * //update and compute the vpMoment BEFORE doing any operations with vpFeatureMoment + * bm.update(obj); + * bm.compute(); + * + * fmb.update(0,0,1); //update the vpFeatureMoment with a plane + * //configuration + * std::cout << fmb.interaction(1,1) << std::endl; + * } + * \endcode + */ class VISP_EXPORT vpFeatureMoment : public vpBasicFeature { protected: @@ -188,52 +184,49 @@ class VISP_EXPORT vpFeatureMoment : public vpBasicFeature public: /*! - Initializes the feature with information about the database of moment - primitives, the object plane, feature database and matrix size. \param - data_base : Moment database. The database of moment primitives (first - parameter) is mandatory. It is used to access different moment values later - used to compute the final matrix. \param A_ : Plane coefficient in a \f$ A - \times x+B \times y + C = \frac{1}{Z} \f$ plane. \param B_ : Plane - coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = - \frac{1}{Z} \f$ plane. \param featureMoments : Feature database \param - nbmatrices : If you want to create a new vpFeatureMoment implementation, - your feature will often have a matrix size of n lines. You can specify the - number of lines by this parameter. - */ + * Initializes the feature with information about the database of moment + * primitives, the object plane, feature database and matrix size. + * \param data_base : Moment database. The database of moment primitives (first + * parameter) is mandatory. It is used to access different moment values later + * used to compute the final matrix. + * \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param featureMoments : Feature database + * \param nbmatrices : If you want to create a new vpFeatureMoment implementation, + * your feature will often have a matrix size of n lines. You can specify the + * number of lines by this parameter. + */ vpFeatureMoment(vpMomentDatabase &data_base, double A_ = 0.0, double B_ = 0.0, double C_ = 0.0, vpFeatureMomentDatabase *featureMoments = NULL, unsigned int nbmatrices = 1) : vpBasicFeature(), moment(NULL), moments(data_base), featureMomentsDataBase(featureMoments), - interaction_matrices(nbmatrices), A(A_), B(B_), C(C_), _name() - { - } - - virtual ~vpFeatureMoment(); + interaction_matrices(nbmatrices), A(A_), B(B_), C(C_), _name() + { } /** @name Inherited functionalities from vpFeatureMoment */ //@{ virtual void compute_interaction(void); - vpBasicFeature *duplicate() const; + vpBasicFeature *duplicate() const override; void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; int getDimension(unsigned int select = FEATURE_ALL) const; - void init(void); - vpMatrix interaction(unsigned int select = FEATURE_ALL); + void init(void) override; + vpMatrix interaction(unsigned int select = FEATURE_ALL) override; void linkTo(vpFeatureMomentDatabase &featureMoments); /*! - Name of the moment corresponding to the feature. This allows to locate - the moment associated with the feature in the provided database. - */ + * Name of the moment corresponding to the feature. This allows to locate + * the moment associated with the feature in the provided database. + */ virtual const char *momentName() const = 0; /*! - Name of the feature used to locate it in the database of features. - */ + * Name of the feature used to locate it in the database of features. + */ virtual const char *name() const = 0; - void print(unsigned int select = FEATURE_ALL) const; + void print(unsigned int select = FEATURE_ALL) const override; virtual void printDependencies(std::ostream &os) const; void update(double A, double B, double C); @@ -243,23 +236,22 @@ class VISP_EXPORT vpFeatureMoment : public vpBasicFeature }; /*! -\class vpMomentGenericFeature - -\ingroup group_visual_features - -\brief This class defines a generic feature used for moment feature -duplication. - -A vpBasicFeature can be duplicated into a vpMomentGenericFeature. In that -case, all data in the vpBasicFeature is copied but the feature's name is lost. -For example if a vpFeatureMomentCInvariant is duplicated, the duplicata will -be operational but could not be used in a vpFeatureMomentDatabase. The reason -for this is that a vpMomentGenericFeature can refer to anything therefore it -has no specific name. - -Duplication is mostly used internally in ViSP. - -*/ + * \class vpMomentGenericFeature + * + * \ingroup group_visual_features + * + * \brief This class defines a generic feature used for moment feature + * duplication. + * + * A vpBasicFeature can be duplicated into a vpMomentGenericFeature. In that + * case, all data in the vpBasicFeature is copied but the feature's name is lost. + * For example if a vpFeatureMomentCInvariant is duplicated, the duplicate will + * be operational but could not be used in a vpFeatureMomentDatabase. The reason + * for this is that a vpMomentGenericFeature can refer to anything therefore it + * has no specific name. + * + * Duplication is mostly used internally in ViSP. + */ class VISP_EXPORT vpMomentGenericFeature : public vpFeatureMoment { public: @@ -269,13 +261,15 @@ class VISP_EXPORT vpMomentGenericFeature : public vpFeatureMoment { this->moment = p_moment; } + /*! - No specific moment name. - */ + * No specific moment name. + */ const char *momentName() const { return NULL; } + /*! - No specific feature name. - */ + * No specific feature name. + */ virtual const char *name() const { return NULL; } }; diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentAlpha.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentAlpha.h index 1ff7eb45e3..f8214c084d 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentAlpha.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentAlpha.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,13 @@ * * Description: * Implementation for all supported moment features. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ + /*! - \file vpFeatureMomentAlpha.h - \brief Implementation of the interaction matrix computation for - vpMomentAlpha. -*/ + * \file vpFeatureMomentAlpha.h + * \brief Implementation of the interaction matrix computation for + * vpMomentAlpha. + */ #ifndef _vpFeatureMomentAlpha_h_ #define _vpFeatureMomentAlpha_h_ @@ -49,88 +45,87 @@ class vpMomentDatabase; /*! - \class vpFeatureMomentAlpha - - \ingroup group_visual_features - - \brief Functionality computation for in-plane rotation moment feature \f$ \alpha \f$: - computes the interaction matrix associated with vpMomentAlpha. - - The interaction matrix for the feature can be deduced from \cite Tahri05z. - - This class computes the interaction matrix associated to \f$ \alpha = - \frac{1}{2} arctan(\frac{2\mu_{11}}{\mu_{20}-\mu_{02}}) \f$ moment primitive. - - The interaction matrix for the feature has the following form: - \f[{ - \left[ \begin {array}{c} {\frac {\mu_{{1,1}}{\it DA}\,A}{d}}+{\frac { - \left( {\it DA}\,\mu_{{0,2}}+1/2\,d-1/2\,{{\it DA}}^{2} \right) B}{d} -}\\ \noalign{\medskip}{\frac { \left( {\it DA}\,\mu_{{0,2}}-1/2\,d-1/2 -\,{{\it DA}}^{2} \right) A}{d}}-{\frac {B\mu_{{1,1}}{\it DA}}{d}} -\\ \noalign{\medskip}Bw_{{x}}-Aw_{{y}}\\ \noalign{\medskip}{\frac { -\beta\, \left( \mu_{{1,2}} \left( \mu_{{2,0}}-\mu_{{0,2}} \right) +\mu -_{{1,1}} \left( \mu_{{0,3}}-\mu_{{2,1}} \right) \right) +\gamma\,x_{{ -g}} \left( \mu_{{0,2}} \left( \mu_{{2,0}}-\mu_{{0,2}} \right) -2\,{\mu -_{{1,1}}}^{2} \right) +\gamma\,y_{{g}}\mu_{{1,1}} \left( \mu_{{2,0}}+ -\mu_{{0,2}} \right) }{d}}\\ \noalign{\medskip}{\frac {\beta\, \left( -\mu_{{2,1}} \left( \mu_{{0,2}}-\mu_{{2,0}} \right) +\mu_{{1,1}} - \left( \mu_{{3,0}}-\mu_{{1,2}} \right) \right) +\gamma\,x_{{g}}\mu_{ -{1,1}} \left( \mu_{{2,0}}+\mu_{{0,2}} \right) +\gamma\,y_{{g}} \left( -\mu_{{2,0}} \left( \mu_{{0,2}}-\mu_{{2,0}} \right) -2\,{\mu_{{1,1}}}^{ -2} \right) }{d}}\\ \noalign{\medskip}-1\end {array} \right] -}^t -\f] -with \f${\it DA} = \mu_{{2,0}}-\mu_{{0,2}}\f$ and \f${\it d} = -DA^2+4{\mu_{1,1}}^2\f$. - - - In the discrete case: - \f$beta = 4\f$,\f$gamma = 2\f$. - - In the dense case: - \f$beta = 5\f$,\f$gamma = 1\f$. - - - The interaction matrix computed is single-dimension (no selection possible) - and can be obtained by calling vpFeatureMomentAlpha::interaction(). - - This feature is often used in moment-based visual servoing to control the - planar rotation parameter. - - Minimum vpMomentObject order needed to compute this feature: 4. - - This feature depends on: - - vpMomentCentered - - vpMomentGravityCenter -*/ + * \class vpFeatureMomentAlpha + * + * \ingroup group_visual_features + * + * \brief Functionality computation for in-plane rotation moment feature \f$ \alpha \f$: + * computes the interaction matrix associated with vpMomentAlpha. + * + * The interaction matrix for the feature can be deduced from \cite Tahri05z. + * + * This class computes the interaction matrix associated to \f$ \alpha = + * \frac{1}{2} arctan(\frac{2\mu_{11}}{\mu_{20}-\mu_{02}}) \f$ moment primitive. + * + * The interaction matrix for the feature has the following form: + * \f[{ + * \left[ \begin {array}{c} {\frac {\mu_{{1,1}}{\it DA}\,A}{d}}+{\frac { + * \left( {\it DA}\,\mu_{{0,2}}+1/2\,d-1/2\,{{\it DA}}^{2} \right) B}{d} + * }\\ \noalign{\medskip}{\frac { \left( {\it DA}\,\mu_{{0,2}}-1/2\,d-1/2 + * \,{{\it DA}}^{2} \right) A}{d}}-{\frac {B\mu_{{1,1}}{\it DA}}{d}} + * \\ \noalign{\medskip}Bw_{{x}}-Aw_{{y}}\\ \noalign{\medskip}{\frac { + * \beta\, \left( \mu_{{1,2}} \left( \mu_{{2,0}}-\mu_{{0,2}} \right) +\mu + * _{{1,1}} \left( \mu_{{0,3}}-\mu_{{2,1}} \right) \right) +\gamma\,x_{{ + * g}} \left( \mu_{{0,2}} \left( \mu_{{2,0}}-\mu_{{0,2}} \right) -2\,{\mu + * _{{1,1}}}^{2} \right) +\gamma\,y_{{g}}\mu_{{1,1}} \left( \mu_{{2,0}}+ + * \mu_{{0,2}} \right) }{d}}\\ \noalign{\medskip}{\frac {\beta\, \left( + * \mu_{{2,1}} \left( \mu_{{0,2}}-\mu_{{2,0}} \right) +\mu_{{1,1}} + * \left( \mu_{{3,0}}-\mu_{{1,2}} \right) \right) +\gamma\,x_{{g}}\mu_{ + * {1,1}} \left( \mu_{{2,0}}+\mu_{{0,2}} \right) +\gamma\,y_{{g}} \left( + * \mu_{{2,0}} \left( \mu_{{0,2}}-\mu_{{2,0}} \right) -2\,{\mu_{{1,1}}}^{ + * 2} \right) }{d}}\\ \noalign{\medskip}-1\end {array} \right] + * }^t + * \f] + * with \f${\it DA} = \mu_{{2,0}}-\mu_{{0,2}}\f$ and \f${\it d} = + * DA^2+4{\mu_{1,1}}^2\f$. + * + * - In the discrete case: + * \f$beta = 4\f$,\f$gamma = 2\f$. + * - In the dense case: + * \f$beta = 5\f$,\f$gamma = 1\f$. + * + * The interaction matrix computed is single-dimension (no selection possible) + * and can be obtained by calling vpFeatureMomentAlpha::interaction(). + * + * This feature is often used in moment-based visual servoing to control the + * planar rotation parameter. + * + * Minimum vpMomentObject order needed to compute this feature: 4. + * + * This feature depends on: + * - vpMomentCentered + * - vpMomentGravityCenter + */ class VISP_EXPORT vpFeatureMomentAlpha : public vpFeatureMoment { public: /*! - Initializes the feature with information about the database of moment - primitives, the object plane and feature database. - \param data_base : Moment database. The database of moment primitives (first parameter) is mandatory. - It is used to access different moment values later used to compute the final matrix. - \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param featureMoments : Feature database. - - */ + * Initializes the feature with information about the database of moment + * primitives, the object plane and feature database. + * \param data_base : Moment database. The database of moment primitives (first parameter) is mandatory. + * It is used to access different moment values later used to compute the final matrix. + * \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param featureMoments : Feature database. + */ vpFeatureMomentAlpha(vpMomentDatabase &data_base, double A_, double B_, double C_, vpFeatureMomentDatabase *featureMoments = NULL) : vpFeatureMoment(data_base, A_, B_, C_, featureMoments, 1) - { - } + { } + + void compute_interaction() override; - void compute_interaction(); /*! - Associated moment name. - */ - const char *momentName() const { return "vpMomentAlpha"; } + * Associated moment name. + */ + const char *momentName() const override { return "vpMomentAlpha"; } + /*! - Feature name. - */ - const char *name() const { return "vpFeatureMomentAlpha"; } + * Feature name. + */ + const char *name() const override { return "vpFeatureMomentAlpha"; } - vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL); + vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL) override; }; #endif diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentArea.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentArea.h index e45ee07240..08d16431f9 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentArea.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentArea.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,13 @@ * * Description: * Definition of vpFeatureMomentArea associated to vpMomentArea - * - * Authors: - * Manikandan Bakthavatchalam - * -*****************************************************************************/ + */ + /*! - \file vpFeatureMomentArea.h - \brief Implementation of the interaction matrix computation for - vpMomentArea. -*/ + * \file vpFeatureMomentArea.h + * \brief Implementation of the interaction matrix computation for + * vpMomentArea. + */ #ifndef _vpFeatureMomentArea_h_ #define _vpFeatureMomentArea_h_ @@ -48,43 +44,40 @@ class vpMomentDatabase; /*! - \class vpFeatureMomentArea - - \ingroup group_visual_features - - \brief Surface moment feature. Computes the interaction matrix associated - with vpMomentArea. - -*/ - + * \class vpFeatureMomentArea + * + * \ingroup group_visual_features + * + * \brief Surface moment feature. Computes the interaction matrix associated + * with vpMomentArea. + */ class VISP_EXPORT vpFeatureMomentArea : public vpFeatureMoment { public: /*! - Initializes the feature with information about the database of moment - primitives, the object plane and feature database. - \param data_base : Moment database. The database of moment primitives (first parameter) is mandatory. - It is used to access different moment values later used to compute the final matrix. - \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param featureMoments : Feature database. - - */ + * Initializes the feature with information about the database of moment + * primitives, the object plane and feature database. + * \param data_base : Moment database. The database of moment primitives (first parameter) is mandatory. + * It is used to access different moment values later used to compute the final matrix. + * \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param featureMoments : Feature database. + */ vpFeatureMomentArea(vpMomentDatabase &data_base, double A_, double B_, double C_, vpFeatureMomentDatabase *featureMoments = NULL) : vpFeatureMoment(data_base, A_, B_, C_, featureMoments, 1) - { - } + { } - void compute_interaction(); + void compute_interaction() override; /*! - associated moment name - */ - const char *momentName() const { return "vpMomentArea"; } + * associated moment name + */ + const char *momentName() const override { return "vpMomentArea"; } + /*! - feature name - */ - const char *name() const { return "vpFeatureMomentArea"; } + * feature name + */ + const char *name() const override { return "vpFeatureMomentArea"; } }; #endif diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentAreaNormalized.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentAreaNormalized.h index 61c074ba47..4cc159960b 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentAreaNormalized.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentAreaNormalized.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,13 @@ * * Description: * Implementation for all supported moment features. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ + /*! - \file vpFeatureMomentAreaNormalized.h - \brief Implementation of the interaction matrix computation for - vpMomentAreaNormalized. -*/ + * \file vpFeatureMomentAreaNormalized.h + * \brief Implementation of the interaction matrix computation for + * vpMomentAreaNormalized. + */ #ifndef _vpFeatureMomentAreaNormalized_h_ #define _vpFeatureMomentAreaNormalized_h_ @@ -49,162 +45,161 @@ class vpMomentDatabase; /*! - \class vpFeatureMomentAreaNormalized - - \ingroup group_visual_features - - \brief Functionality computation for normalized surface moment feature. - Computes the interaction matrix associated with vpMomentAreaNormalized. - - The interaction matrix for the moment feature can be deduced from \cite - Tahri05z. - - To do so, one must derive it and obtain a combination of interaction - matrices by using (1). It allows to compute the interaction matrix for \f$ - a_n \f$. - - The interaction matrix computed is single-dimension (no selection possible) - and can be obtained by calling - vpFeatureMomentGravityCenterNormalized::interaction. - - This feature is often used in moment-based visual servoing to control the - depth parameter. - - Minimum vpMomentObject order needed to compute this feature: 1 in dense mode - and 3 in discrete mode. - - This feature depends on: - - vpMomentCentered - - vpFeatureMomentCentered - - vpMomentAreaNormalized - - vpFeatureMomentBasic - + * \class vpFeatureMomentAreaNormalized + * + * \ingroup group_visual_features + * + * \brief Functionality computation for normalized surface moment feature. + * Computes the interaction matrix associated with vpMomentAreaNormalized. + * + * The interaction matrix for the moment feature can be deduced from \cite + * Tahri05z. + * + * To do so, one must derive it and obtain a combination of interaction + * matrices by using (1). It allows to compute the interaction matrix for \f$ + * a_n \f$. + * + * The interaction matrix computed is single-dimension (no selection possible) + * and can be obtained by calling + * vpFeatureMomentGravityCenterNormalized::interaction. + * + * This feature is often used in moment-based visual servoing to control the + * depth parameter. + * + * Minimum vpMomentObject order needed to compute this feature: 1 in dense mode + * and 3 in discrete mode. + * + * This feature depends on: + * - vpMomentCentered + * - vpFeatureMomentCentered + * - vpMomentAreaNormalized + * - vpFeatureMomentBasic */ class VISP_EXPORT vpFeatureMomentAreaNormalized : public vpFeatureMoment { public: /*! - Initializes the feature with information about the database of moment - primitives, the object plane and feature database. - \param database : Moment database. The database of moment primitives (first parameter) is mandatory. - It is used to access different moment values later used to compute the final matrix. - \param A : Plane coefficient in a \f$ A \times x+B \times y + C = - \frac{1}{Z} \f$ plane. - \param B : Plane coefficient in a \f$ A \times x+B - \times y + C = \frac{1}{Z} \f$ plane. - \param C : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param featureMoments : Feature database. - - */ + * Initializes the feature with information about the database of moment + * primitives, the object plane and feature database. + * \param database : Moment database. The database of moment primitives (first parameter) is mandatory. + * It is used to access different moment values later used to compute the final matrix. + * \param A : Plane coefficient in a \f$ A \times x+B \times y + C = + * \frac{1}{Z} \f$ plane. + * \param B : Plane coefficient in a \f$ A \times x+B + * \times y + C = \frac{1}{Z} \f$ plane. + * \param C : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param featureMoments : Feature database. + */ vpFeatureMomentAreaNormalized(vpMomentDatabase &database, double A_, double B_, double C_, vpFeatureMomentDatabase *featureMoments = NULL) : vpFeatureMoment(database, A_, B_, C_, featureMoments, 1) - { - } - void compute_interaction(); + { } + void compute_interaction() override; + /*! - associated moment name - */ - const char *momentName() const { return "vpMomentAreaNormalized"; } + * Associated moment name. + */ + const char *momentName() const override { return "vpMomentAreaNormalized"; } + /*! - feature name - */ - const char *name() const { return "vpFeatureMomentAreaNormalized"; } + * Feature name. + */ + const char *name() const override { return "vpFeatureMomentAreaNormalized"; } }; #else class vpMomentDatabase; /*! - \class vpFeatureMomentAreaNormalized - - \ingroup group_visual_features - - \brief Functionality computation for normalized surface moment feature. - Computes the interaction matrix associated with vpMomentAreaNormalized. - - The interaction matrix for the moment has the following form: - - In the discrete case: - \f[ - L_{a_n} = - { - \left[ - \begin {array}{c} - a_{{n}}Ae_{{2,0}}+a_{{n}}Be_{{1,1}} \\ - \noalign{\medskip}a_{{n}}Ae_{{1,1}}+a_{{n}}Be_{{0,2}} \\ - \noalign{\medskip}-a_{{n}}C+Bw_{{x}}-Aw_{{y}} \\ - \noalign{\medskip}- \left( e_{{2,0}}+2\,e_{{0,2}} \right) y_{{g}}-e_{{2,1}}-x_{{g}}e_{{1,1}}+\eta_{{1,1}}e_{{1,0}}-e_{{0,3}}+\eta_{{0,2}}e_{{0,1}} \\ - \noalign{\medskip} \left( 2\,e_{{2,0}}+e_{{0,2}} \right) x_{{g}}+e_{{3,0}}+y_{{g}}e_{{1,1}}-\eta_{{2,0}}e_{{1,0}}+e_{{1,2}}-\eta_{{1,1}}e_{{0,1}} \\ - \noalign{\medskip}0 - \end {array} - \right] - }^t - \f] - - In the dense case: - \f[ - L_{a_n} = - { - \left[ - \begin {array}{c} - 1/2\,a_{{n}}A \\ - \noalign{\medskip}1/2\,a_{{n}}B \\ - \noalign{\medskip}-a_{{n}}C-3/2\,Ax_{{n}}-3/2\,By_{{n}} \\ - \noalign{\medskip}-3/2\,y_{{n}} \\ - \noalign{\medskip}3/2\,x_{{n}} \\ - \noalign{\medskip}0 - \end {array} - \right] - }^t - \f] - with: - - \f$e_{i,j}=\frac{\mu_{i,j}}{NA}\f$ - - \f$NA=\mu_{2,0}+\mu_{0,2}\f$ - - \f$\eta\f$ is the centered and normalized moment. - To do so, one must derive it and obtain a combination of interaction - matrices by using (1). It allows to compute the interaction matrix for \f$ a_n \f$. - - The interaction matrix computed is single-dimension (no selection possible) - and can be obtained by calling vpFeatureMomentGravityCenterNormalized::interaction. - - This feature is often used in moment-based visual servoing to control the depth parameter. - - Minimum vpMomentObject order needed to compute this feature: 1 in dense mode - and 3 in discrete mode. - - This feature depends on: - - vpMomentCentered - - vpMomentAreaNormalized - - vpMomentGravityCenter - -*/ + * \class vpFeatureMomentAreaNormalized + * + * \ingroup group_visual_features + * + * \brief Functionality computation for normalized surface moment feature. + * Computes the interaction matrix associated with vpMomentAreaNormalized. + * + * The interaction matrix for the moment has the following form: + * - In the discrete case: + * \f[ + * L_{a_n} = + * { + * \left[ + * \begin {array}{c} + * a_{{n}}Ae_{{2,0}}+a_{{n}}Be_{{1,1}} \\ + * \noalign{\medskip}a_{{n}}Ae_{{1,1}}+a_{{n}}Be_{{0,2}} \\ + * \noalign{\medskip}-a_{{n}}C+Bw_{{x}}-Aw_{{y}} \\ + * \noalign{\medskip}- \left( e_{{2,0}}+2\,e_{{0,2}} \right) y_{{g}}-e_{{2,1}}-x_{{g}}e_{{1,1}}+\eta_{{1,1}}e_{{1,0}}-e_{{0,3}}+\eta_{{0,2}}e_{{0,1}} \\ + * \noalign{\medskip} \left( 2\,e_{{2,0}}+e_{{0,2}} \right) x_{{g}}+e_{{3,0}}+y_{{g}}e_{{1,1}}-\eta_{{2,0}}e_{{1,0}}+e_{{1,2}}-\eta_{{1,1}}e_{{0,1}} \\ + * \noalign{\medskip}0 + * \end {array} + * \right] + * }^t + * \f] + * + * - In the dense case: + * \f[ + * L_{a_n} = + * { + * \left[ + * \begin {array}{c} + * 1/2\,a_{{n}}A \\ + * \noalign{\medskip}1/2\,a_{{n}}B \\ + * \noalign{\medskip}-a_{{n}}C-3/2\,Ax_{{n}}-3/2\,By_{{n}} \\ + * \noalign{\medskip}-3/2\,y_{{n}} \\ + * \noalign{\medskip}3/2\,x_{{n}} \\ + * \noalign{\medskip}0 + * \end {array} + * \right] + * }^t + * \f] + * with: + * - \f$e_{i,j}=\frac{\mu_{i,j}}{NA}\f$ + * - \f$NA=\mu_{2,0}+\mu_{0,2}\f$ + * - \f$\eta\f$ is the centered and normalized moment. + * To do so, one must derive it and obtain a combination of interaction + * matrices by using (1). It allows to compute the interaction matrix for \f$ a_n \f$. + * + * The interaction matrix computed is single-dimension (no selection possible) + * and can be obtained by calling vpFeatureMomentGravityCenterNormalized::interaction. + * + * This feature is often used in moment-based visual servoing to control the depth parameter. + * + * Minimum vpMomentObject order needed to compute this feature: 1 in dense mode + * and 3 in discrete mode. + * + * This feature depends on: + * - vpMomentCentered + * - vpMomentAreaNormalized + * - vpMomentGravityCenter + */ class VISP_EXPORT vpFeatureMomentAreaNormalized : public vpFeatureMoment { public: /*! - Initializes the feature with information about the database of moment - primitives, the object plane and feature database. - \param data_base : Moment database. The database of moment primitives (first parameter) is mandatory. - It is used to access different moment values later used to compute the final matrix. - \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param featureMoments : Feature database. - - */ + * Initializes the feature with information about the database of moment + * primitives, the object plane and feature database. + * \param data_base : Moment database. The database of moment primitives (first parameter) is mandatory. + * It is used to access different moment values later used to compute the final matrix. + * \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param featureMoments : Feature database. + */ vpFeatureMomentAreaNormalized(vpMomentDatabase &data_base, double A_, double B_, double C_, vpFeatureMomentDatabase *featureMoments = NULL) : vpFeatureMoment(data_base, A_, B_, C_, featureMoments, 1) - { - } - void compute_interaction(); + { } + void compute_interaction() override; + /*! - associated moment name - */ - const char *momentName() const { return "vpMomentAreaNormalized"; } + * Associated moment name + */ + const char *momentName() const override { return "vpMomentAreaNormalized"; } + /*! - feature name - */ - const char *name() const { return "vpFeatureMomentAreaNormalized"; } + * Feature name + */ + const char *name() const override { return "vpFeatureMomentAreaNormalized"; } }; #endif #endif diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentBasic.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentBasic.h index 4708d1cea9..9a8855bb27 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentBasic.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentBasic.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,13 @@ * * Description: * Implementation for all supported moment features. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ + /*! - \file vpFeatureMomentBasic.h - \brief Implementation of the interaction matrix computation for - vpMomentBasic. -*/ + * \file vpFeatureMomentBasic.h + * \brief Implementation of the interaction matrix computation for + * vpMomentBasic. + */ #ifndef _vpFeatureMomentBasic_h_ #define _vpFeatureMomentBasic_h_ @@ -50,33 +46,33 @@ class vpMomentDatabase; /*! - \class vpFeatureMomentBasic - - \ingroup group_visual_features - - \brief Functionality computation for basic moment feature. Computes the - interaction matrix associated with vpMomentBasic. - - The interaction matrix for the basic moment feature is defined in - \cite Tahri05z, equation (13). This vpFeatureMoment, as well as it's - corresponding moment primitive is double-indexed. The interaction matrix \f$ - L_{m_{ij}} \f$ is obtained by calling vpFeatureMomentBasic::interaction - (i,j) and is associated to \f$ m_{ij} \f$ obtained by vpMomentBasic::get - (i,j). vpFeatureMomentBasic computes interaction matrices all interaction - matrices up to vpMomentObject::getOrder()-1. \attention The maximum order - reached by vpFeatureMomentBasic is NOT the maximum order of the - vpMomentObject, it is one unit smaller. For example if you define your - vpMomentObject up to order n then vpFeatureMomentBasic will be able to - compute interaction matrices up to order n-1 that is \f$ L_{m_{ij}} \f$ with - \f$ i+j<=n-1 \f$. - - You can see an example of vpFeatureMomentBasic by looking at the - documentation of the vpFeatureMoment class. - - This feature depends on: - - vpMomentBasic - -*/ + * \class vpFeatureMomentBasic + * + * \ingroup group_visual_features + * + * \brief Functionality computation for basic moment feature. Computes the + * interaction matrix associated with vpMomentBasic. + * + * The interaction matrix for the basic moment feature is defined in + * \cite Tahri05z, equation (13). This vpFeatureMoment, as well as it's + * corresponding moment primitive is double-indexed. The interaction matrix \f$ + * L_{m_{ij}} \f$ is obtained by calling vpFeatureMomentBasic::interaction + * (i,j) and is associated to \f$ m_{ij} \f$ obtained by vpMomentBasic::get + * (i,j). vpFeatureMomentBasic computes interaction matrices all interaction + * matrices up to vpMomentObject::getOrder()-1. + * + * \attention The maximum order reached by vpFeatureMomentBasic is NOT the maximum order of the + * vpMomentObject, it is one unit smaller. For example if you define your + * vpMomentObject up to order n then vpFeatureMomentBasic will be able to + * compute interaction matrices up to order n-1 that is \f$ L_{m_{ij}} \f$ with + * \f$ i+j<=n-1 \f$. + * + * You can see an example of vpFeatureMomentBasic by looking at the + * documentation of the vpFeatureMoment class. + * + * This feature depends on: + * - vpMomentBasic + */ class VISP_EXPORT vpFeatureMomentBasic : public vpFeatureMoment { protected: @@ -85,24 +81,26 @@ class VISP_EXPORT vpFeatureMomentBasic : public vpFeatureMoment public: vpFeatureMomentBasic(vpMomentDatabase &moments, double A, double B, double C, vpFeatureMomentDatabase *featureMoments = NULL); - void compute_interaction(); + void compute_interaction() override; #ifndef DOXYGEN_SHOULD_SKIP_THIS /* Add function due to pure virtual definition in vpBasicFeature.h */ - vpMatrix interaction(unsigned int /* select = FEATURE_ALL */) + vpMatrix interaction(unsigned int /* select = FEATURE_ALL */) override { throw vpException(vpException::functionNotImplementedError, "Not implemented!"); } #endif vpMatrix interaction(unsigned int select_one, unsigned int select_two) const; + /*! - Associated moment name. - */ - const char *momentName() const { return "vpMomentBasic"; } + * Associated moment name. + */ + const char *momentName() const override { return "vpMomentBasic"; } + /*! - Feature name. - */ - const char *name() const { return "vpFeatureMomentBasic"; } + * Feature name. + */ + const char *name() const override { return "vpFeatureMomentBasic"; } }; #endif diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCInvariant.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCInvariant.h index 9f04fa0a68..466fd87c58 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCInvariant.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCInvariant.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,12 @@ * * Description: * Implementation for all supported moment features. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ + /*! - \file vpFeatureMomentCInvariant.h - \brief Implementation of the interaction matrix computation for - vpMomentCInvariant. -*/ + * \file vpFeatureMomentCInvariant.h + * \brief Implementation of the interaction matrix computation for vpMomentCInvariant. + */ #ifndef _vpFeatureMomentCInvariant_h_ #define _vpFeatureMomentCInvariant_h_ @@ -47,135 +42,134 @@ #ifdef VISP_MOMENTS_COMBINE_MATRICES /*! - \class vpFeatureMomentCInvariant - - \ingroup group_visual_features - - \brief Functionality computation for 2D rotation/translation/scale - non-symmetric invariant moment feature. Computes the interaction matrix - associated with vpMomentCInvariant. - - The interaction matrix for the moment feature can be deduced from \cite - Tahri05z, equations (9). To do so, one must derive them and obtain a - combination of interaction matrices by using (1). It allows to compute the - interaction matrix for \f$ c_i, i \in [1..10] \f$. - - These interaction matrices may be selected afterwards by calling - vpFeatureMomentCInvariant::interaction(). The selection by the - vpFeatureMomentCInvariant::selectCi method for \f$ L_{c_i} \f$. For example, - to select \f$ L_{c_1} \f$ you should input - vpFeatureMomentCInvariant::selectC1() into ViSP's selector. Special matrices - for features \f$ S_x \f$ and \f$ S_y \f$ are selected by - vpFeatureMomentCInvariant::selectSx() and - vpFeatureMomentCInvariant::selectSy() respectively. Special matrices for - features \f$ P_x \f$ and \f$ P_y \f$ are selected by - vpFeatureMomentCInvariant::selectPx() and - vpFeatureMomentCInvariant::selectPy() respectively. - - These features are often used in moment-based visual servoing to control the - two out-of-plane rotations. - - Be careful about the nature of your object when selecting the right - features. Use \f$ L_{S_{x}} \f$ and \f$ L_{S_{y}} \f$ when you're dealing - with a symmetric object all other features otherwise. - - Minimum vpMomentObject order needed to compute this feature: 6. This is the - highest ordrer required by classic features. - - This feature depends on: - - vpMomentCentered - - vpFeatureMomentCentered - - vpMomentCInvariant - - vpFeatureMomentBasic - - An example of how to use vpFeatureMomentCInvariant in a complete visual - servoing example is given in vpFeatureMomentCommon. - -*/ + * \class vpFeatureMomentCInvariant + * + * \ingroup group_visual_features + * + * \brief Functionality computation for 2D rotation/translation/scale + * non-symmetric invariant moment feature. Computes the interaction matrix + * associated with vpMomentCInvariant. + * + * The interaction matrix for the moment feature can be deduced from \cite + * Tahri05z, equations (9). To do so, one must derive them and obtain a + * combination of interaction matrices by using (1). It allows to compute the + * interaction matrix for \f$ c_i, i \in [1..10] \f$. + * + * These interaction matrices may be selected afterwards by calling + * vpFeatureMomentCInvariant::interaction(). The selection by the + * vpFeatureMomentCInvariant::selectCi method for \f$ L_{c_i} \f$. For example, + * to select \f$ L_{c_1} \f$ you should input + * vpFeatureMomentCInvariant::selectC1() into ViSP's selector. Special matrices + * for features \f$ S_x \f$ and \f$ S_y \f$ are selected by + * vpFeatureMomentCInvariant::selectSx() and + * vpFeatureMomentCInvariant::selectSy() respectively. Special matrices for + * features \f$ P_x \f$ and \f$ P_y \f$ are selected by + * vpFeatureMomentCInvariant::selectPx() and + * vpFeatureMomentCInvariant::selectPy() respectively. + * + * These features are often used in moment-based visual servoing to control the + * two out-of-plane rotations. + * + * Be careful about the nature of your object when selecting the right + * features. Use \f$ L_{S_{x}} \f$ and \f$ L_{S_{y}} \f$ when you're dealing + * with a symmetric object all other features otherwise. + * + * Minimum vpMomentObject order needed to compute this feature: 6. This is the + * highest order required by classic features. + * + * This feature depends on: + * - vpMomentCentered + * - vpFeatureMomentCentered + * - vpMomentCInvariant + * - vpFeatureMomentBasic + * + * An example of how to use vpFeatureMomentCInvariant in a complete visual + * servoing example is given in vpFeatureMomentCommon. + */ class VISP_EXPORT vpFeatureMomentCInvariant : public vpFeatureMoment { public: /*! - Initializes the feature with information about the database of moment - primitives, the object plane and feature database. - \param moments : Moment database. The database of moment primitives (first parameter) is mandatory. - It is used to access different moment values later used to compute the final matrix. - \param A : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param B : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param C : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param featureMoments : Feature database. - - */ + * Initializes the feature with information about the database of moment + * primitives, the object plane and feature database. + * \param moments : Moment database. The database of moment primitives (first parameter) is mandatory. + * It is used to access different moment values later used to compute the final matrix. + * \param A : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param B : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param C : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param featureMoments : Feature database. + */ vpFeatureMomentCInvariant(vpMomentDatabase &moments, double A, double B, double C, vpFeatureMomentDatabase *featureMoments = NULL) : vpFeatureMoment(moments, A, B, C, featureMoments, 16) - { - } - void compute_interaction(); + { } + void compute_interaction() override; + /*! - associated moment name - */ - const char *momentName() const { return "vpMomentCInvariant"; } + * Associated moment name. + */ + const char *momentName() const override { return "vpMomentCInvariant"; } + /*! - feature name - */ - const char *name() const { return "vpFeatureMomentCInvariant"; } + * Feature name. + */ + const char *name() const override { return "vpFeatureMomentCInvariant"; } /*! - Shortcut selector for \f$C_1\f$. - */ + * Shortcut selector for \f$C_1\f$. + */ static unsigned int selectC1() { return 1 << 0; } /*! - Shortcut selector for \f$C_2\f$. - */ + * Shortcut selector for \f$C_2\f$. + */ static unsigned int selectC2() { return 1 << 1; } /*! - Shortcut selector for \f$C_3\f$. - */ + * Shortcut selector for \f$C_3\f$. + */ static unsigned int selectC3() { return 1 << 2; } /*! - Shortcut selector for \f$C_4\f$. - */ + * Shortcut selector for \f$C_4\f$. + */ static unsigned int selectC4() { return 1 << 3; } /*! - Shortcut selector for \f$C_5\f$. - */ + * Shortcut selector for \f$C_5\f$. + */ static unsigned int selectC5() { return 1 << 4; } /*! - Shortcut selector for \f$C_6\f$. - */ + * Shortcut selector for \f$C_6\f$. + */ static unsigned int selectC6() { return 1 << 5; } /*! - Shortcut selector for \f$C_7\f$. - */ + * Shortcut selector for \f$C_7\f$. + */ static unsigned int selectC7() { return 1 << 6; } /*! - Shortcut selector for \f$C_8\f$. - */ + * Shortcut selector for \f$C_8\f$. + */ static unsigned int selectC8() { return 1 << 7; } /*! - Shortcut selector for \f$C_9\f$. - */ + * Shortcut selector for \f$C_9\f$. + */ static unsigned int selectC9() { return 1 << 8; } /*! - Shortcut selector for \f$C_{10}\f$. - */ + * Shortcut selector for \f$C_{10}\f$. + */ static unsigned int selectC10() { return 1 << 9; } /*! - Shortcut selector for \f$S_x\f$. - */ + * Shortcut selector for \f$S_x\f$. + */ static unsigned int selectSx() { return 1 << 10; } /*! - Shortcut selector for \f$S_y\f$. - */ + * Shortcut selector for \f$S_y\f$. + */ static unsigned int selectSy() { return 1 << 11; } /*! - Shortcut selector for \f$P_x\f$. - */ + * Shortcut selector for \f$P_x\f$. + */ static unsigned int selectPx() { return 1 << 12; } /*! - Shortcut selector for \f$P_y\f$. - */ + * Shortcut selector for \f$P_y\f$. + */ static unsigned int selectPy() { return 1 << 13; } }; @@ -183,51 +177,50 @@ class VISP_EXPORT vpFeatureMomentCInvariant : public vpFeatureMoment class vpMomentDatabase; /*! - \class vpFeatureMomentCInvariant - - \ingroup group_visual_features - - \brief Functionality computation for 2D rotation/translation/scale - non-symmetric invariant moment feature. Computes the interaction matrix - associated with vpMomentCInvariant. - - The interaction matrix for the moment feature can be deduced from - \cite Tahri05z, equations (9). To do so, one must derive them and obtain a - combination of interaction matrices by using (1). It allows to compute the - interaction matrix for \f$ c_i, i \in [1..10] \f$. - - These interaction matrices may be selected afterwards by calling - vpFeatureMomentCInvariant::interaction(). The selection by the - vpFeatureMomentCInvariant::selectCi method for \f$ L_{c_i} \f$. For example, - to select \f$ L_{c_1} \f$ you should input - vpFeatureMomentCInvariant::selectC1() into ViSP's selector. Special matrices - for features \f$ S_x \f$ and \f$ S_y \f$ are selected by - vpFeatureMomentCInvariant::selectSx() and - vpFeatureMomentCInvariant::selectSy() respectively. Special matrices for - features \f$ P_x \f$ and \f$ P_y \f$ are selected by - vpFeatureMomentCInvariant::selectPx() and - vpFeatureMomentCInvariant::selectPy() respectively. - - These features are often used in moment-based visual servoing to control the - two out-of-plane rotations. - - Be careful about the nature of your object when selecting the right - features. Use \f$ L_{S_{x}} \f$ and \f$ L_{S_{y}} \f$ when you're dealing - with a symmetric object all other features otherwise. - - Minimum vpMomentObject order needed to compute this feature: 6. This is the - highest ordrer required by classic features. - - This feature depends on: - - vpMomentCentered - - vpFeatureMomentCentered - - vpMomentCInvariant - - vpFeatureMomentBasic - - An example of how to use vpFeatureMomentCInvariant in a complete visual - servoing example is given in vpFeatureMomentCommon. - -*/ + * \class vpFeatureMomentCInvariant + * + * \ingroup group_visual_features + * + * \brief Functionality computation for 2D rotation/translation/scale + * non-symmetric invariant moment feature. Computes the interaction matrix + * associated with vpMomentCInvariant. + * + * The interaction matrix for the moment feature can be deduced from + * \cite Tahri05z, equations (9). To do so, one must derive them and obtain a + * combination of interaction matrices by using (1). It allows to compute the + * interaction matrix for \f$ c_i, i \in [1..10] \f$. + * + * These interaction matrices may be selected afterwards by calling + * vpFeatureMomentCInvariant::interaction(). The selection by the + * vpFeatureMomentCInvariant::selectCi method for \f$ L_{c_i} \f$. For example, + * to select \f$ L_{c_1} \f$ you should input + * vpFeatureMomentCInvariant::selectC1() into ViSP's selector. Special matrices + * for features \f$ S_x \f$ and \f$ S_y \f$ are selected by + * vpFeatureMomentCInvariant::selectSx() and + * vpFeatureMomentCInvariant::selectSy() respectively. Special matrices for + * features \f$ P_x \f$ and \f$ P_y \f$ are selected by + * vpFeatureMomentCInvariant::selectPx() and + * vpFeatureMomentCInvariant::selectPy() respectively. + * + * These features are often used in moment-based visual servoing to control the + * two out-of-plane rotations. + * + * Be careful about the nature of your object when selecting the right + * features. Use \f$ L_{S_{x}} \f$ and \f$ L_{S_{y}} \f$ when you're dealing + * with a symmetric object all other features otherwise. + * + * Minimum vpMomentObject order needed to compute this feature: 6. This is the + * highest order required by classic features. + * + * This feature depends on: + * - vpMomentCentered + * - vpFeatureMomentCentered + * - vpMomentCInvariant + * - vpFeatureMomentBasic + * + * An example of how to use vpFeatureMomentCInvariant in a complete visual + * servoing example is given in vpFeatureMomentCommon. + */ class VISP_EXPORT vpFeatureMomentCInvariant : public vpFeatureMoment { private: @@ -235,91 +228,89 @@ class VISP_EXPORT vpFeatureMomentCInvariant : public vpFeatureMoment public: /*! - Initializes the feature with information about the database of moment - primitives, the object plane and feature database. - \param data_base : Moment database. The database of moment primitives (first parameter) is mandatory. - It is used to access different moment values later used to compute the final matrix. - \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param featureMoments : Feature database. - - */ + * Initializes the feature with information about the database of moment + * primitives, the object plane and feature database. + * \param data_base : Moment database. The database of moment primitives (first parameter) is mandatory. + * It is used to access different moment values later used to compute the final matrix. + * \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param featureMoments : Feature database. + */ vpFeatureMomentCInvariant(vpMomentDatabase &data_base, double A_, double B_, double C_, vpFeatureMomentDatabase *featureMoments = NULL) : vpFeatureMoment(data_base, A_, B_, C_, featureMoments, 16), LI(16) - { - } + { } - void compute_interaction(); + void compute_interaction() override; /*! - associated moment name - */ - const char *momentName() const { return "vpMomentCInvariant"; } + * Associated moment name. + */ + const char *momentName() const override { return "vpMomentCInvariant"; } /*! - feature name - */ - const char *name() const { return "vpFeatureMomentCInvariant"; } + * Feature name. + */ + const char *name() const override { return "vpFeatureMomentCInvariant"; } /*! - Shortcut selector for \f$C_1\f$. - */ + * Shortcut selector for \f$C_1\f$. + */ static unsigned int selectC1() { return 1 << 0; } /*! - Shortcut selector for \f$C_2\f$. - */ + * Shortcut selector for \f$C_2\f$. + */ static unsigned int selectC2() { return 1 << 1; } /*! - Shortcut selector for \f$C_3\f$. - */ + * Shortcut selector for \f$C_3\f$. + */ static unsigned int selectC3() { return 1 << 2; } /*! - Shortcut selector for \f$C_4\f$. - */ + * Shortcut selector for \f$C_4\f$. + */ static unsigned int selectC4() { return 1 << 3; } /*! - Shortcut selector for \f$C_5\f$. - */ + * Shortcut selector for \f$C_5\f$. + */ static unsigned int selectC5() { return 1 << 4; } /*! - Shortcut selector for \f$C_6\f$. - */ + * Shortcut selector for \f$C_6\f$. + */ static unsigned int selectC6() { return 1 << 5; } /*! - Shortcut selector for \f$C_7\f$. - */ + * Shortcut selector for \f$C_7\f$. + */ static unsigned int selectC7() { return 1 << 6; } /*! - Shortcut selector for \f$C_8\f$. - */ + * Shortcut selector for \f$C_8\f$. + */ static unsigned int selectC8() { return 1 << 7; } /*! - Shortcut selector for \f$C_9\f$. - */ + * Shortcut selector for \f$C_9\f$. + */ static unsigned int selectC9() { return 1 << 8; } /*! - Shortcut selector for \f$C_{10}\f$. - */ + * Shortcut selector for \f$C_{10}\f$. + */ static unsigned int selectC10() { return 1 << 9; } /*! - Shortcut selector for \f$S_x\f$. - */ + * Shortcut selector for \f$S_x\f$. + */ static unsigned int selectSx() { return 1 << 10; } /*! - Shortcut selector for \f$S_y\f$. - */ + * Shortcut selector for \f$S_y\f$. + */ static unsigned int selectSy() { return 1 << 11; } /*! - Shortcut selector for \f$P_x\f$. - */ + * Shortcut selector for \f$P_x\f$. + */ static unsigned int selectPx() { return 1 << 12; } /*! - Shortcut selector for \f$P_y\f$. - */ + * Shortcut selector for \f$P_y\f$. + */ static unsigned int selectPy() { return 1 << 13; } /*! - Print all the interaction matrices of the moment invariants + * Print all the interaction matrices of the moment invariants */ void printLsofInvariants(std::ostream &os) const; diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCentered.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCentered.h index 3e19e4324a..2733ea9d0b 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCentered.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCentered.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,12 @@ * * Description: * Implementation for all supported moment features. - * - * Authors: - * Filip Novotny - * Manikandan Bakthavatchalam - *****************************************************************************/ + */ + /*! - \file vpFeatureMomentCentered.h - \brief Implementation of the interaction matrix computation for - vpMomentCentered. -*/ + * \file vpFeatureMomentCentered.h + * \brief Implementation of the interaction matrix computation for vpMomentCentered. + */ #ifndef _vpFeatureMomentCentered_h_ #define _vpFeatureMomentCentered_h_ @@ -48,41 +43,41 @@ class vpMomentDatabase; /*! - \class vpFeatureMomentCentered - - \ingroup group_visual_features - - \brief Functionality computation for centered moment feature. Computes the - interaction matrix associated with vpMomentCentered. - - The interaction matrix for the feature is defined in \cite Tahri05z, - equation (17). This vpFeatureMoment, as well as it's corresponding moment - primitive is double-indexed. The interaction matrix \f$ L_{\mu_{ij}} \f$ is - obtained by calling vpFeatureMomentBasic::interaction (i,j) and is - associated to \f$ \mu_{ij} \f$ obtained by vpFeatureMomentCentered::get - (i,j). - - vpFeatureMomentCentered computes interaction matrices all interaction - matrices up to vpMomentObject::getOrder()-1. \attention The maximum order - reached by vpFeatureMomentBasic is NOT the maximum order of the - vpMomentObject, it is one unit smaller. For example if you define your - vpMomentObject up to order n then vpFeatureMomentBasic will be able to - compute interaction matrices up to order n-1 that is \f$ L_{m_{ij}} \f$ with - \f$ i+j<=n-1 \f$. - - This feature depends on: - - vpFeatureMomentBasic - - vpFeatureMomentGravityCenter - - vpMomentGravityCenter -*/ + * \class vpFeatureMomentCentered + * + * \ingroup group_visual_features + * + * \brief Functionality computation for centered moment feature. Computes the + * interaction matrix associated with vpMomentCentered. + * + * The interaction matrix for the feature is defined in \cite Tahri05z, + * equation (17). This vpFeatureMoment, as well as it's corresponding moment + * primitive is double-indexed. The interaction matrix \f$ L_{\mu_{ij}} \f$ is + * obtained by calling vpFeatureMomentBasic::interaction (i,j) and is + * associated to \f$ \mu_{ij} \f$ obtained by vpFeatureMomentCentered::get + * (i,j). + * + * vpFeatureMomentCentered computes interaction matrices all interaction + * matrices up to vpMomentObject::getOrder()-1. \attention The maximum order + * reached by vpFeatureMomentBasic is NOT the maximum order of the + * vpMomentObject, it is one unit smaller. For example if you define your + * vpMomentObject up to order n then vpFeatureMomentBasic will be able to + * compute interaction matrices up to order n-1 that is \f$ L_{m_{ij}} \f$ with + * \f$ i+j<=n-1 \f$. + * + * This feature depends on: + * - vpFeatureMomentBasic + * - vpFeatureMomentGravityCenter + * - vpMomentGravityCenter + */ class VISP_EXPORT vpFeatureMomentCentered : public vpFeatureMoment { protected: unsigned int order; /*! - Core computation of interaction matrix for moment m_pq - */ + * Core computation of interaction matrix for moment m_pq. + */ vpMatrix compute_Lmu_pq(const unsigned int &p, const unsigned int &q, const double &xg, const double &yg, const vpMatrix &L_xg, const vpMatrix &L_yg, const vpMomentBasic &m, const vpFeatureMomentBasic &feature_moment_m) const; @@ -90,31 +85,32 @@ class VISP_EXPORT vpFeatureMomentCentered : public vpFeatureMoment public: vpFeatureMomentCentered(vpMomentDatabase &moments, double A, double B, double C, vpFeatureMomentDatabase *featureMoments = NULL); - void compute_interaction(); + void compute_interaction() override; #ifndef DOXYGEN_SHOULD_SKIP_THIS /* Add function due to pure virtual definition in vpBasicFeature.h */ - vpMatrix interaction(unsigned int /* select = FEATURE_ALL */) + vpMatrix interaction(unsigned int /* select = FEATURE_ALL */) override { throw vpException(vpException::functionNotImplementedError, "Not implemented!"); } #endif /*! - Interaction matrix corresponding to \f$ \mu_{ij} \f$ moment - \param select_one : first index (i) - \param select_two : second index (j) - \return Interaction matrix corresponding to the moment - */ + * Interaction matrix corresponding to \f$ \mu_{ij} \f$ moment + * \param select_one : first index (i) + * \param select_two : second index (j) + * \return Interaction matrix corresponding to the moment + */ vpMatrix interaction(unsigned int select_one, unsigned int select_two) const; /*! - associated moment name - */ - const char *momentName() const { return "vpMomentCentered"; } + * Associated moment name + */ + const char *momentName() const override { return "vpMomentCentered"; } + /*! - feature name - */ - const char *name() const { return "vpFeatureMomentCentered"; } + * Feature name + */ + const char *name() const override { return "vpFeatureMomentCentered"; } friend VISP_EXPORT std::ostream &operator<<(std::ostream &os, const vpFeatureMomentCentered &v); }; diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCommon.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCommon.h index b5047577fc..39a26f163f 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCommon.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCommon.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -31,17 +30,13 @@ * Description: * Pre-filled pseudo-database used to handle dependencies between common *moment features. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ /*! - \file vpFeatureMomentCommon.h - \brief Pre-filled pseudo-database used to handle dependencies between common - moment features. -*/ + * \file vpFeatureMomentCommon.h + * \brief Pre-filled pseudo-database used to handle dependencies between common + * moment features. + */ #ifndef _vpFeatureMomentCommon_h_ #define _vpFeatureMomentCommon_h_ @@ -59,168 +54,165 @@ class vpMomentDatabase; class vpServo; /*! - \class vpFeatureMomentCommon - - \ingroup group_visual_features - - \brief This class allows to access common vpFeatureMoments in a pre-filled -database. - - It is a vpMomentDatabase filled with the following moments: - - vpFeatureMomentGravityCenter - - vpFeatureMomentGravityCenterNormalized - - vpFeatureMomentAreaNormalized - - vpFeatureMomentCInvariant - - vpFeatureMomentAlpha - - vpFeatureMomentCentered - - vpFeatureMomentBasic - - - There is no need to do the linkTo operations manually nor is it necessary to -care about the order of feature computation. - - This class has an vpMomentCommon::updateAll method capable of updating the -plane parameters AND computing interaction matrices inside the features. - - The moment features computed by this class are classical moments - features used in moment-based visual servoing. For more - information see \cite Tahri05z. - - To initialize this feature set, the user needs to supply a vpMomentDatabase -containing at least the contents of vpMomentCommon. - - The features can be retrieved like from a normal vpFeatureMomentDatabase. -However, some shortcuts to retrieve the features are provided. - - \attention Make sure your object is at least of order 6 when using this -pre-filled database. - - The following code demonstrates the construction of a 6x6 interaction matrix -as described in [1]. -\code -#include -#include -#include -#include -#include -#include -#include -#include - -int main() -{ - // Define source polygon - vpPoint p; - std::vector vec_p; // vector that contains the vertices of the contour polygon - - p.set_x(-0.2); p.set_y(0.1); // coordinates in meters in the image plane (vertex 1) - vec_p.push_back(p); - p.set_x(+0.3); p.set_y(0.1); // coordinates in meters in the image plane (vertex 2) - vec_p.push_back(p); - p.set_x(+0.2); p.set_y(-0.1); // coordinates in meters in the image plane (vertex 3) - vec_p.push_back(p); - p.set_x(-0.2); p.set_y(-0.15); // coordinates in meters in the image plane (vertex 4) - vec_p.push_back(p); - p.set_x(-0.2); p.set_y(0.1); // close the contour (vertex 5 = vertex 1) - vec_p.push_back(p); - - - vpMomentObject src(6); // Create a source moment object with 6 as maximum order - src.setType(vpMomentObject::DENSE_POLYGON); // The object is defined by a countour polygon - src.fromVector(vec_p); // Init the dense object with the source polygon - vec_p.clear(); - - //Define destination polygon. This is the source polygon translated - //of 0.1 on x-axis - p.set_x(-0.1); p.set_y(0.1); // coordinates in meters in the image plane (vertex 1) - vec_p.push_back(p); - p.set_x(+0.4); p.set_y(0.1); // coordinates in meters in the image plane (vertex 2) - vec_p.push_back(p); - p.set_x(+0.3); p.set_y(-0.1); // coordinates in meters in the image plane (vertex 3) - vec_p.push_back(p); - p.set_x(-0.1); p.set_y(-0.15); // coordinates in meters in the image plane (vertex 4) - vec_p.push_back(p); - p.set_x(-0.1); p.set_y(0.1); // close the contour (vertex 5 = vertex 1) - vec_p.push_back(p); - - vpMomentObject dst(6); // Create a destination moment object with 6 as maximum order - dst.setType(vpMomentObject::DENSE_POLYGON); // The object is defined by a countour polygon - dst.fromVector(vec_p); // Init the dense object with the destination - // polygon - - - //init classic moment primitives (for source) - vpMomentCommon mdb_src(vpMomentCommon::getSurface(dst),vpMomentCommon::getMu3(dst),vpMomentCommon::getAlpha(dst),1.); - //Init classic features - vpFeatureMomentCommon fmdb_src(mdb_src); - - ////init classic moment primitives (for destination) - vpMomentCommon mdb_dst(vpMomentCommon::getSurface(dst),vpMomentCommon::getMu3(dst),vpMomentCommon::getAlpha(dst),1.); - //Init classic features - vpFeatureMomentCommon fmdb_dst(mdb_dst); - - //update+compute moment primitives from object (for source) - mdb_src.updateAll(src); - //update+compute features (+interaction matrices) from plane - fmdb_src.updateAll(0.,0.,1.); - - //update+compute moment primitives from object (for destination) - mdb_dst.updateAll(dst); - //update+compute features (+interaction matrices) from plane - fmdb_dst.updateAll(0.,0.,1.); - - //define visual servoing task - vpServo task; - task.setServo(vpServo::EYEINHAND_CAMERA); - task.setInteractionMatrixType(vpServo::CURRENT); - - //Add all classic features to the task - //In this example, source and destination features are translated by 0.1 - //will produce a movement of 0.1 on x-axis. - task.addFeature(fmdb_src.getFeatureGravityNormalized(),fmdb_dst.getFeatureGravityNormalized()); - task.addFeature(fmdb_src.getFeatureAn(),fmdb_dst.getFeatureAn()); - //the object is NOT symmetric - //select C4 and C6 - task.addFeature(fmdb_src.getFeatureCInvariant(),fmdb_dst.getFeatureCInvariant(),(1 << 3) | (1 << 5)); - task.addFeature(fmdb_src.getFeatureAlpha(),fmdb_dst.getFeatureAlpha()); - - task.setLambda(1) ; - vpColVector v = task.computeControlLaw() ; - - task.print(); - - return 0; -} - \endcode -This code produces the following output: -\code -Visual servoing task: -Type of control law -Eye-in-hand configuration -Control in the camera frame -List of visual features : s -0.0166667,-0.00833333, -1, --0.312148,0.0249916, --1.43449, -List of desired visual features : s* -0.116667,-0.00833333, -1, --0.312148,0.0249916, --1.43449, -Interaction Matrix Ls --1 0 -6.938893904e-18 0.007291666667 -1.06875 -0.008333333333 -0 -1 3.469446952e-18 1.0171875 -0.007291666667 -0.01666666667 -0 0 -1 0.0125 0.025 0 -0 0 -4.585529113e-15 -0.2983860943 0.5832596643 -4.376751552e-16 -0 0 -3.58244462e-15 0.08633028234 -0.2484618767 3.63421192e-16 -4.353086256e-17 -1.339411156e-16 -0 -0.03019436997 -0.0168230563 -1 -Error vector (s-s*) --0.1 0 0 1.831867991e-15 -1.072059108e-15 0 -Gain : Zero= 1 Inf= 1 Deriv= 0 - -\endcode -*/ + * \class vpFeatureMomentCommon + * + * \ingroup group_visual_features + * + * \brief This class allows to access common vpFeatureMoments in a pre-filled + * database. + * + * It is a vpMomentDatabase filled with the following moments: + * - vpFeatureMomentGravityCenter + * - vpFeatureMomentGravityCenterNormalized + * - vpFeatureMomentAreaNormalized + * - vpFeatureMomentCInvariant + * - vpFeatureMomentAlpha + * - vpFeatureMomentCentered + * - vpFeatureMomentBasic + * + * There is no need to do the linkTo operations manually nor is it necessary to + * care about the order of feature computation. + * + * This class has an vpMomentCommon::updateAll method capable of updating the + * plane parameters AND computing interaction matrices inside the features. + * + * The moment features computed by this class are classical moments + * features used in moment-based visual servoing. For more + * information see \cite Tahri05z. + * + * To initialize this feature set, the user needs to supply a vpMomentDatabase + * containing at least the contents of vpMomentCommon. + * + * The features can be retrieved like from a normal vpFeatureMomentDatabase. + * However, some shortcuts to retrieve the features are provided. + * + * \attention Make sure your object is at least of order 6 when using this + * pre-filled database. + * + * The following code demonstrates the construction of a 6x6 interaction matrix + * as described in [1]. + * \code + * #include + * #include + * #include + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * // Define source polygon + * vpPoint p; + * std::vector vec_p; // vector that contains the vertices of the contour polygon + * + * p.set_x(-0.2); p.set_y(0.1); // coordinates in meters in the image plane (vertex 1) + * vec_p.push_back(p); + * p.set_x(+0.3); p.set_y(0.1); // coordinates in meters in the image plane (vertex 2) + * vec_p.push_back(p); + * p.set_x(+0.2); p.set_y(-0.1); // coordinates in meters in the image plane (vertex 3) + * vec_p.push_back(p); + * p.set_x(-0.2); p.set_y(-0.15); // coordinates in meters in the image plane (vertex 4) + * vec_p.push_back(p); + * p.set_x(-0.2); p.set_y(0.1); // close the contour (vertex 5 = vertex 1) + * vec_p.push_back(p); + * + * vpMomentObject src(6); // Create a source moment object with 6 as maximum order + * src.setType(vpMomentObject::DENSE_POLYGON); // The object is defined by a contour polygon + * src.fromVector(vec_p); // Init the dense object with the source polygon + * vec_p.clear(); + * + * //Define destination polygon. This is the source polygon translated + * //of 0.1 on x-axis + * p.set_x(-0.1); p.set_y(0.1); // coordinates in meters in the image plane (vertex 1) + * vec_p.push_back(p); + * p.set_x(+0.4); p.set_y(0.1); // coordinates in meters in the image plane (vertex 2) + * vec_p.push_back(p); + * p.set_x(+0.3); p.set_y(-0.1); // coordinates in meters in the image plane (vertex 3) + * vec_p.push_back(p); + * p.set_x(-0.1); p.set_y(-0.15); // coordinates in meters in the image plane (vertex 4) + * vec_p.push_back(p); + * p.set_x(-0.1); p.set_y(0.1); // close the contour (vertex 5 = vertex 1) + * vec_p.push_back(p); + * + * vpMomentObject dst(6); // Create a destination moment object with 6 as maximum order + * dst.setType(vpMomentObject::DENSE_POLYGON); // The object is defined by a contour polygon + * dst.fromVector(vec_p); // Init the dense object with the destination + * // polygon + * + * //init classic moment primitives (for source) + * vpMomentCommon mdb_src(vpMomentCommon::getSurface(dst),vpMomentCommon::getMu3(dst),vpMomentCommon::getAlpha(dst),1.); + * //Init classic features + * vpFeatureMomentCommon fmdb_src(mdb_src); + * + * ////init classic moment primitives (for destination) + * vpMomentCommon mdb_dst(vpMomentCommon::getSurface(dst),vpMomentCommon::getMu3(dst),vpMomentCommon::getAlpha(dst),1.); + * //Init classic features + * vpFeatureMomentCommon fmdb_dst(mdb_dst); + * + * //update+compute moment primitives from object (for source) + * mdb_src.updateAll(src); + * //update+compute features (+interaction matrices) from plane + * fmdb_src.updateAll(0.,0.,1.); + * + * //update+compute moment primitives from object (for destination) + * mdb_dst.updateAll(dst); + * //update+compute features (+interaction matrices) from plane + * fmdb_dst.updateAll(0.,0.,1.); + * + * //define visual servoing task + * vpServo task; + * task.setServo(vpServo::EYEINHAND_CAMERA); + * task.setInteractionMatrixType(vpServo::CURRENT); + * + * //Add all classic features to the task + * //In this example, source and destination features are translated by 0.1 + * //will produce a movement of 0.1 on x-axis. + * task.addFeature(fmdb_src.getFeatureGravityNormalized(),fmdb_dst.getFeatureGravityNormalized()); + * task.addFeature(fmdb_src.getFeatureAn(),fmdb_dst.getFeatureAn()); + * //the object is NOT symmetric + * //select C4 and C6 + * task.addFeature(fmdb_src.getFeatureCInvariant(),fmdb_dst.getFeatureCInvariant(),(1 << 3) | (1 << 5)); + * task.addFeature(fmdb_src.getFeatureAlpha(),fmdb_dst.getFeatureAlpha()); + * + * task.setLambda(1) ; + * vpColVector v = task.computeControlLaw() ; + * + * task.print(); + * + * return 0; + * } + * \endcode + * This code produces the following output: + * \code + * Visual servoing task: + * Type of control law + * Eye-in-hand configuration + * Control in the camera frame + * List of visual features : s + * 0.0166667,-0.00833333, + * 1, + * -0.312148,0.0249916, + * -1.43449, + * List of desired visual features : s* + * 0.116667,-0.00833333, + * 1, + * -0.312148,0.0249916, + * -1.43449, + * Interaction Matrix Ls + * -1 0 -6.938893904e-18 0.007291666667 -1.06875 -0.008333333333 + * 0 -1 3.469446952e-18 1.0171875 -0.007291666667 -0.01666666667 + * 0 0 -1 0.0125 0.025 0 + * 0 0 -4.585529113e-15 -0.2983860943 0.5832596643 -4.376751552e-16 + * 0 0 -3.58244462e-15 0.08633028234 -0.2484618767 3.63421192e-16 + * 4.353086256e-17 -1.339411156e-16 -0 -0.03019436997 -0.0168230563 -1 + * Error vector (s-s*) + * -0.1 0 0 1.831867991e-15 -1.072059108e-15 0 + * Gain : Zero= 1 Inf= 1 Deriv= 0 + * + * \endcode + */ class VISP_EXPORT vpFeatureMomentCommon : public vpFeatureMomentDatabase { private: @@ -237,38 +229,41 @@ class VISP_EXPORT vpFeatureMomentCommon : public vpFeatureMomentDatabase vpFeatureMomentCommon(vpMomentDatabase &moments, double A = 0.0, double B = 0.0, double C = 1.0); void updateAll(double A, double B, double C); /*! - Returns alpha. - */ + * Returns alpha. + */ vpFeatureMomentAlpha &getFeatureAlpha() { return featureAlpha; } /*! - Returns normalized surface. - */ + * Returns normalized surface. + */ vpFeatureMomentAreaNormalized &getFeatureAn() { return featureAn; } /*! - Returns basic moment. - */ + * Returns basic moment. + */ vpFeatureMomentBasic &getFeatureMomentBasic() { return featureMomentBasic; } /*! - Returns centered moments. - */ + * Returns centered moments. + */ vpFeatureMomentCentered &getFeatureCentered() { return featureCentered; } /*! - Returns non-symmetric invariants. + * Returns non-symmetric invariants. */ vpFeatureMomentCInvariant &getFeatureCInvariant() { return featureCInvariant; } + /*! - Returns normalized gravity center. - */ + * Returns normalized gravity center. + */ vpFeatureMomentGravityCenterNormalized &getFeatureGravityNormalized() { return featureGravityNormalized; } + /*! - Returns the area - */ + * Returns the area. + */ vpFeatureMomentArea &getFeatureArea() { return feature_moment_area; } + /*! - Returns gravity center - */ + * Returns gravity center. + */ vpFeatureMomentGravityCenter &getFeatureGravityCenter() { return featureGravity; } }; diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentDatabase.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentDatabase.h index 6ee0450bc1..c4856154eb 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentDatabase.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentDatabase.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,12 @@ * * Description: * Pseudo-database used to handle dependencies between moment features. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ /*! - \file vpFeatureMomentDatabase.h - \brief Pseudo-database used to handle dependencies between moment features. -*/ + * \file vpFeatureMomentDatabase.h + * \brief Pseudo-database used to handle dependencies between moment features. + */ #ifndef _vpFeatureMomentDatabase_h_ #define _vpFeatureMomentDatabase_h_ @@ -52,112 +47,113 @@ class vpFeatureMoment; class vpMomentObject; /*! - \class vpFeatureMomentDatabase - - \ingroup group_visual_features - - \brief This class allows to register all feature moments (implemented in -vpFeatureMoment... classes) so they can access each other according to their -dependencies. - - Like moments (implemented in vpMoment... classes), a vpFeatureMoment needs -to have access to other vpFeatureMoment's values to be computed. In most -cases, a vpFeatureMoment needs both: vpMoments and vpFeatureMoments which -explains the two databases (see vpFeatureMoment::vpFeatureMoment). For example -vpFeatureMomentAlpha needs additionnal information about centered moments -vpMomentCentered AND their interaction matrices obtained by -vpFeatureMomentCentered in order to compute the moment's value from a -vpMomentObject. Like the vpMomentCentered is stored in a vpMomentDatabase, the -vpFeatureMomentCentered should be stored in a vpFeatureMomentDatabase. - - All moment features in a database can access each other freely at any time. -They can also verify if a moment feature is present in the database or not. - This code illustrates the use of both databases to handle dependencies -between moment primitives and moment features: - -\code -#include - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -int main() -{ - try { - - vpPoint p; - std::vector vec_p; // vector that contains the vertices - - p.set_x(1); p.set_y(1); // coordinates in meters in the image plane (vertex 1) - vec_p.push_back(p); - p.set_x(2); p.set_y(2); // coordinates in meters in the image plane (vertex 2) - vec_p.push_back(p); - - //////////////////////////////REFERENCE VALUES//////////////////////////////// - vpMomentObject obj(6); // Init object of order 6 because we are - // computing C-invariants - obj.setType(vpMomentObject::DISCRETE); // Discrete mode for object - obj.fromVector(vec_p); - - vpMomentDatabase mdb; // database for moment primitives. This will - // only contain the basic moment. - vpMomentCentered mc; // Centered moment - vpMomentBasic bm; // Basic moment - vpMomentGravityCenter gc; // gravity center - vpMomentCInvariant ci; // C-type invariant - - bm.linkTo(mdb); //add basic moment to moment database - mc.linkTo(mdb); //add centered moment to moment database - gc.linkTo(mdb); //add gravity center to moment database - ci.linkTo(mdb); //add C-invariant to moment database - - vpFeatureMomentDatabase fmdb; // feature moment database to store - // feature dependencies - - // Declare and link moments to database - vpFeatureMomentBasic fmb(mdb,0.,0.,1.,&fmdb); fmb.linkTo(fmdb); - vpFeatureMomentCentered fmc(mdb,0.,0.,1.,&fmdb); fmc.linkTo(fmdb); - vpFeatureMomentCInvariant fci(mdb,0.,0.,1.,&fmdb); fci.linkTo(fmdb); - - // update the whole moment database - mdb.updateAll(obj); - - // Compute moments in the correct order with the object - bm.compute(); - gc.compute(); - mc.compute(); - ci.compute(); - - // update the whole feature moment database with a plane - fmb.update(0.,0.,1.); - fmc.update(0.,0.,1.); - fci.update(0.,0.,1.); - - std::cout << fci.interaction(vpFeatureMomentCInvariant::selectC1()) << std::endl; - } - catch(const vpException &e){ - std::cout << e.getMessage() << std::endl; - } - - return 0; -} -\endcode -*/ + * \class vpFeatureMomentDatabase + * + * \ingroup group_visual_features + * + * \brief This class allows to register all feature moments (implemented in + * vpFeatureMoment... classes) so they can access each other according to their + * dependencies. + * + * Like moments (implemented in vpMoment... classes), a vpFeatureMoment needs + * to have access to other vpFeatureMoment's values to be computed. In most + * cases, a vpFeatureMoment needs both: vpMoments and vpFeatureMoments which + * explains the two databases (see vpFeatureMoment::vpFeatureMoment). For example + * vpFeatureMomentAlpha needs additional information about centered moments + * vpMomentCentered AND their interaction matrices obtained by + * vpFeatureMomentCentered in order to compute the moment's value from a + * vpMomentObject. Like the vpMomentCentered is stored in a vpMomentDatabase, the + * vpFeatureMomentCentered should be stored in a vpFeatureMomentDatabase. + * + * All moment features in a database can access each other freely at any time. + * They can also verify if a moment feature is present in the database or not. + * This code illustrates the use of both databases to handle dependencies + * between moment primitives and moment features: + * + * \code + * #include + * + * #include + * #include + * #include + * #include + * #include + * #include + * + * #include + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * try { + * + * vpPoint p; + * std::vector vec_p; // vector that contains the vertices + * + * p.set_x(1); p.set_y(1); // coordinates in meters in the image plane (vertex 1) + * vec_p.push_back(p); + * p.set_x(2); p.set_y(2); // coordinates in meters in the image plane (vertex 2) + * vec_p.push_back(p); + * + * //////////////////////////////REFERENCE VALUES//////////////////////////////// + * vpMomentObject obj(6); // Init object of order 6 because we are + * // computing C-invariants + * obj.setType(vpMomentObject::DISCRETE); // Discrete mode for object + * obj.fromVector(vec_p); + * + * vpMomentDatabase mdb; // database for moment primitives. This will + * // only contain the basic moment. + * vpMomentCentered mc; // Centered moment + * vpMomentBasic bm; // Basic moment + * vpMomentGravityCenter gc; // gravity center + * vpMomentCInvariant ci; // C-type invariant + * + * bm.linkTo(mdb); //add basic moment to moment database + * mc.linkTo(mdb); //add centered moment to moment database + * gc.linkTo(mdb); //add gravity center to moment database + * ci.linkTo(mdb); //add C-invariant to moment database + * + * vpFeatureMomentDatabase fmdb; // feature moment database to store + * // feature dependencies + * + * // Declare and link moments to database + * vpFeatureMomentBasic fmb(mdb,0.,0.,1.,&fmdb); fmb.linkTo(fmdb); + * vpFeatureMomentCentered fmc(mdb,0.,0.,1.,&fmdb); fmc.linkTo(fmdb); + * vpFeatureMomentCInvariant fci(mdb,0.,0.,1.,&fmdb); fci.linkTo(fmdb); + * + * // update the whole moment database + * mdb.updateAll(obj); + * + * // Compute moments in the correct order with the object + * bm.compute(); + * gc.compute(); + * mc.compute(); + * ci.compute(); + * + * // update the whole feature moment database with a plane + * fmb.update(0.,0.,1.); + * fmc.update(0.,0.,1.); + * fci.update(0.,0.,1.); + * + * std::cout << fci.interaction(vpFeatureMomentCInvariant::selectC1()) << std::endl; + * } + * catch(const vpException &e){ + * std::cout << e.getMessage() << std::endl; + * } + * + * return 0; + * } + * \endcode + */ class VISP_EXPORT vpFeatureMomentDatabase { private: - struct vpCmpStr_t { + struct vpCmpStr_t + { bool operator()(const char *a, const char *b) const { return std::strcmp(a, b) < 0; } char *operator=(const char *) { return NULL; } // Only to avoid a warning under Visual with /Wall flag }; @@ -166,13 +162,15 @@ class VISP_EXPORT vpFeatureMomentDatabase public: /*! - Default constructor. - */ - vpFeatureMomentDatabase() : featureMomentsDataBase() {} + * Default constructor. + */ + vpFeatureMomentDatabase() : featureMomentsDataBase() { } + /*! - Virtual destructor that does nothing. - */ - virtual ~vpFeatureMomentDatabase() {} + * Virtual destructor that does nothing. + */ + virtual ~vpFeatureMomentDatabase() { } + virtual void updateAll(double A = 0.0, double B = 0.0, double C = 1.0); vpFeatureMoment &get(const char *type, bool &found); diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentGravityCenter.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentGravityCenter.h index 2a5cea3d14..28c148890a 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentGravityCenter.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentGravityCenter.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,13 @@ * * Description: * Implementation for all supported moment features. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ + /*! - \file vpFeatureMomentGravityCenter.h - \brief Implementation of the interaction matrix computation for - vpMomentGravityCenter. -*/ + * \file vpFeatureMomentGravityCenter.h + * \brief Implementation of the interaction matrix computation for + * vpMomentGravityCenter. + */ #ifndef _vpFeatureMomentGravityCenter_h_ #define _vpFeatureMomentGravityCenter_h_ @@ -48,212 +44,215 @@ #ifdef VISP_MOMENTS_COMBINE_MATRICES class vpMomentDatabase; /*! - \class vpFeatureMomentGravityCenter - - \ingroup group_visual_features - - \brief Functionality computation for gravity center moment feature. Computes -the interaction matrix associated with vpMomentGravityCenter. - - The interaction matrix for the is defined in \cite Tahri05z, equation (16). - It allows to compute the interaction matrices for \f$ (x_g,y_g) \f$. - - These interaction matrices may be selected afterwards by calling -vpFeatureMomentGravityCenter::interaction(). The selection is done by the -following methods: vpFeatureMomentGravityCenter::selectXg for \f$ L_{x_{g}} -\f$ and vpFeatureMomentGravityCenter::selectYg for \f$ L_{y_{g}} \f$. The -following code demonstrates a selection of \f$ L_{y_{g}} \f$: - - \code -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -int main() -{ - vpPoint p; - std::vector vec_p; // vector that contains the vertices - - p.set_x(1); p.set_y(1); // coordinates in meters in the image plane (vertex 1) - vec_p.push_back(p); - p.set_x(2); p.set_y(2); // coordinates in meters in the image plane (vertex 2) - vec_p.push_back(p); - - //////////////////////////////REFERENCE VALUES//////////////////////////////// - vpMomentObject obj(2); // Init object of order 2 because we need - // vpFeatureMomentBasic of order 1 (for vpFeatureMomentGravityCenter) which - // implies third-order moment primitives - obj.setType(vpMomentObject::DISCRETE); // Discrete mode for object - obj.fromVector(vec_p); - - - vpMomentDatabase mdb; //database for moment primitives. This will - //only contain the basic moment. - vpMomentBasic bm; //basic moment (this particular moment is nothing - //more than a shortcut to the vpMomentObject) - vpMomentGravityCenter gc; //gravity center - - bm.linkTo(mdb); //add basic moment to moment database - gc.linkTo(mdb); //add gravity center to moment database - - vpFeatureMomentDatabase fmdb; //feature moment database to store - //feature dependencies - - //Declare and link moments to database - vpFeatureMomentBasic fmb(mdb,0.,0.,1.,&fmdb); fmb.linkTo(fmdb); - vpFeatureMomentGravityCenter fgc(mdb,0.,0.,1.,&fmdb); fgc.linkTo(fmdb); - - //update and compute the vpMomentBasic before computing vpMomentGravityCenter - bm.update(obj); - bm.compute(); - //update and compute the vpMomentGravityCenter before computing vpFeatureMomentBasic - gc.update(obj); - gc.compute(); - - fmb.update(0.,0.,1.); //update the vpFeatureMoment with a plane - //configuration and compute interaction matrix - - fgc.update(0.,0.,1.); //update the plane configuration for gravity - //center feature and compute it's associated matrix. - - std::cout << fgc.interaction(1 << 1) << std::endl; - - return 0; -} - \endcode - - This code produces the following output: - \code -0 -1 1.5 3.5 -2.5 -1.5 - \endcode - - You can also use the shortcut selectors -vpFeatureMomentGravityCenter::selectXg or -vpFeatureMomentGravityCenter::selectYg as follows: - - \code - task.addFeature(db_src.getFeatureGravityNormalized(), db_dst.getFeatureGravityNormalized(), - vpFeatureMomentGravityCenter::selectXg() | vpFeatureMomentGravityCenter::selectYg()); - \endcode - This feature depends on: - - vpFeatureMomentBasic - - Minimum vpMomentObject order needed to compute this feature: 2. -*/ + * \class vpFeatureMomentGravityCenter + * + * \ingroup group_visual_features + * + * \brief Functionality computation for gravity center moment feature. Computes + * the interaction matrix associated with vpMomentGravityCenter. + * + * The interaction matrix for the is defined in \cite Tahri05z, equation (16). + * It allows to compute the interaction matrices for \f$ (x_g,y_g) \f$. + * + * These interaction matrices may be selected afterwards by calling + * vpFeatureMomentGravityCenter::interaction(). The selection is done by the + * following methods: vpFeatureMomentGravityCenter::selectXg for \f$ L_{x_{g}} + * \f$ and vpFeatureMomentGravityCenter::selectYg for \f$ L_{y_{g}} \f$. The + * following code demonstrates a selection of \f$ L_{y_{g}} \f$: + * + * \code + * #include + * #include + * #include + * #include + * #include + * #include + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * vpPoint p; + * std::vector vec_p; // vector that contains the vertices + * + * p.set_x(1); p.set_y(1); // coordinates in meters in the image plane (vertex 1) + * vec_p.push_back(p); + * p.set_x(2); p.set_y(2); // coordinates in meters in the image plane (vertex 2) + * vec_p.push_back(p); + * + * //////////////////////////////REFERENCE VALUES//////////////////////////////// + * vpMomentObject obj(2); // Init object of order 2 because we need + * // vpFeatureMomentBasic of order 1 (for vpFeatureMomentGravityCenter) which + * // implies third-order moment primitives + * obj.setType(vpMomentObject::DISCRETE); // Discrete mode for object + * obj.fromVector(vec_p); + * + * vpMomentDatabase mdb; //database for moment primitives. This will + * //only contain the basic moment. + * vpMomentBasic bm; //basic moment (this particular moment is nothing + * //more than a shortcut to the vpMomentObject) + * vpMomentGravityCenter gc; //gravity center + * + * bm.linkTo(mdb); //add basic moment to moment database + * gc.linkTo(mdb); //add gravity center to moment database + * + * vpFeatureMomentDatabase fmdb; //feature moment database to store + * //feature dependencies + * + * //Declare and link moments to database + * vpFeatureMomentBasic fmb(mdb,0.,0.,1.,&fmdb); fmb.linkTo(fmdb); + * vpFeatureMomentGravityCenter fgc(mdb,0.,0.,1.,&fmdb); fgc.linkTo(fmdb); + * + * //update and compute the vpMomentBasic before computing vpMomentGravityCenter + * bm.update(obj); + * bm.compute(); + * //update and compute the vpMomentGravityCenter before computing vpFeatureMomentBasic + * gc.update(obj); + * gc.compute(); + * + * fmb.update(0.,0.,1.); //update the vpFeatureMoment with a plane + * //configuration and compute interaction matrix + * + * fgc.update(0.,0.,1.); //update the plane configuration for gravity + * //center feature and compute it's associated matrix. + * + * std::cout << fgc.interaction(1 << 1) << std::endl; + * + * return 0; + * } + * \endcode + * + * This code produces the following output: + * \code + * 0 -1 1.5 3.5 -2.5 -1.5 + * \endcode + * + * You can also use the shortcut selectors + * vpFeatureMomentGravityCenter::selectXg or + * vpFeatureMomentGravityCenter::selectYg as follows: + * + * \code + * task.addFeature(db_src.getFeatureGravityNormalized(), db_dst.getFeatureGravityNormalized(), + * vpFeatureMomentGravityCenter::selectXg() | vpFeatureMomentGravityCenter::selectYg()); + * \endcode + * This feature depends on: + * - vpFeatureMomentBasic + * + * Minimum vpMomentObject order needed to compute this feature: 2. + */ class VISP_EXPORT vpFeatureMomentGravityCenter : public vpFeatureMoment { public: /*! - Initializes the feature with information about the database of moment - primitives, the object plane and feature database. - \param database : Moment database. The database of moment primitives (first parameter) is mandatory. - It is used to access different moment values later used to compute the final matrix. - \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param featureMoments : Feature database. - */ + * Initializes the feature with information about the database of moment + * primitives, the object plane and feature database. + * \param database : Moment database. The database of moment primitives (first parameter) is mandatory. + * It is used to access different moment values later used to compute the final matrix. + * \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param featureMoments : Feature database. + */ vpFeatureMomentGravityCenter(vpMomentDatabase &database, double A_, double B_, double C_, vpFeatureMomentDatabase *featureMoments = NULL) : vpFeatureMoment(database, A_, B_, C_, featureMoments, 2) - { - } - void compute_interaction(); + { } + + void compute_interaction() override; + /*! - Associated moment name. - */ - const char *momentName() const { return "vpMomentGravityCenter"; } + * Associated moment name. + */ + const char *momentName() const override { return "vpMomentGravityCenter"; } + /*! - Feature name. - */ - const char *name() const { return "vpFeatureMomentGravityCenter"; } + * Feature name. + */ + const char *name() const override { return "vpFeatureMomentGravityCenter"; } /*! - Shortcut selector for \f$x_g\f$. - */ + * Shortcut selector for \f$x_g\f$. + */ static unsigned int selectXg() { return 1 << 0; } /*! - Shortcut selector for \f$y_g\f$. - */ + * Shortcut selector for \f$y_g\f$. + */ static unsigned int selectYg() { return 1 << 1; } }; #else class vpMomentDatabase; /*! - \class vpFeatureMomentGravityCenter - - \ingroup group_visual_features - - \brief Functionality computation for gravity center moment feature. Computes - the interaction matrix associated with vpMomentGravityCenter. - - The interaction matrix for the is defined in \cite Tahri05z, equation (16). - It allows to compute the interaction matrices for \f$ (x_g,y_g) \f$. - - These interaction matrices may be selected afterwards by calling - vpFeatureMomentGravityCenter::interaction(). The selection is done by the - following methods: vpFeatureMomentGravityCenter::selectXg for \f$ L_{x_{g}} - \f$ and vpFeatureMomentGravityCenter::selectYg for \f$ L_{y_{g}} \f$. - - You can use the selectors vpFeatureMomentGravityCenter::selectXg or - vpFeatureMomentGravityCenter::selectYg as follows: - - \code - task.addFeature(db_src.getFeatureGravityNormalized(), db_dst.getFeatureGravityNormalized(), - vpFeatureMomentGravityCenter::selectXg() | vpFeatureMomentGravityCenter::selectYg()); - \endcode - This feature depends on: - - vpMomentCentered - - vpMomentGravityCenter - - Minimum vpMomentObject order needed to compute this feature: 2. -*/ + * \class vpFeatureMomentGravityCenter + * + * \ingroup group_visual_features + * + * \brief Functionality computation for gravity center moment feature. Computes + * the interaction matrix associated with vpMomentGravityCenter. + * + * The interaction matrix for the is defined in \cite Tahri05z, equation (16). + * It allows to compute the interaction matrices for \f$ (x_g,y_g) \f$. + * + * These interaction matrices may be selected afterwards by calling + * vpFeatureMomentGravityCenter::interaction(). The selection is done by the + * following methods: vpFeatureMomentGravityCenter::selectXg for \f$ L_{x_{g}} + * \f$ and vpFeatureMomentGravityCenter::selectYg for \f$ L_{y_{g}} \f$. + * + * You can use the selectors vpFeatureMomentGravityCenter::selectXg or + * vpFeatureMomentGravityCenter::selectYg as follows: + * + * \code + * task.addFeature(db_src.getFeatureGravityNormalized(), db_dst.getFeatureGravityNormalized(), + * vpFeatureMomentGravityCenter::selectXg() | vpFeatureMomentGravityCenter::selectYg()); + * \endcode + * This feature depends on: + * - vpMomentCentered + * - vpMomentGravityCenter + * + * Minimum vpMomentObject order needed to compute this feature: 2. + */ class VISP_EXPORT vpFeatureMomentGravityCenter : public vpFeatureMoment { public: /*! - Initializes the feature with information about the database of moment - primitives, the object plane and feature database. - \param data_base : Moment database. The database of moment primitives (first parameter) is mandatory. - It is used to access different moment values later used to compute the final matrix. - \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param featureMoments : Feature database. - */ + * Initializes the feature with information about the database of moment + * primitives, the object plane and feature database. + * \param data_base : Moment database. The database of moment primitives (first parameter) is mandatory. + * It is used to access different moment values later used to compute the final matrix. + * \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param featureMoments : Feature database. + */ vpFeatureMomentGravityCenter(vpMomentDatabase &data_base, double A_, double B_, double C_, vpFeatureMomentDatabase *featureMoments = NULL) : vpFeatureMoment(data_base, A_, B_, C_, featureMoments, 2) - { - } - void compute_interaction(); + { } + + void compute_interaction() override; + /*! - Associated moment name. + * Associated moment name. + */ + const char *momentName() const override { return "vpMomentGravityCenter"; } + + /*! + * Feature name. */ - const char *momentName() const { return "vpMomentGravityCenter"; } - /*! - Feature name. - */ - const char *name() const { return "vpFeatureMomentGravityCenter"; } + const char *name() const override { return "vpFeatureMomentGravityCenter"; } /*! - Shortcut selector for \f$x_g\f$. - */ + * Shortcut selector for \f$x_g\f$. + */ static unsigned int selectXg() { return 1 << 0; } /*! - Shortcut selector for \f$y_g\f$. - */ + * Shortcut selector for \f$y_g\f$. + */ static unsigned int selectYg() { return 1 << 1; } }; diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentGravityCenterNormalized.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentGravityCenterNormalized.h index dccfc21dcb..eb408e2b97 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentGravityCenterNormalized.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentGravityCenterNormalized.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,13 @@ * * Description: * Implementation for all supported moment features. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ + /*! - \file vpFeatureMomentGravityCenterNormalized.h - \brief Implementation of the interaction matrix computation for - vpMomentGravityCenterNormalized. -*/ + * \file vpFeatureMomentGravityCenterNormalized.h + * \brief Implementation of the interaction matrix computation for + * vpMomentGravityCenterNormalized. + */ #ifndef _vpFeatureMomentGravityCenterNormalized_h_ #define _vpFeatureMomentGravityCenterNormalized_h_ @@ -48,230 +44,228 @@ #ifdef VISP_MOMENTS_COMBINE_MATRICES class vpMomentDatabase; /*! - \class vpFeatureMomentGravityCenterNormalized - - \ingroup group_visual_features - - \brief Functionality computation for centered and normalized moment feature. - Computes the interaction matrix associated with - vpMomentGravityCenterNormalized. - - The interaction matrix for the moment feature can be deduced from \cite - Tahri05z, equation (19). To do so, one must derive it and obtain a - combination of interaction matrices by using (1). It allows to compute the - interaction matrices for \f$ (x_n,y_n) \f$. - - These interaction matrices may be selected afterwards by calling - vpFeatureMomentGravityCenterNormalized::interaction. The selection is done - by the following methods: vpFeatureMomentGravityCenterNormalized::selectXn - for \f$ L_{x_{n}} \f$ and vpFeatureMomentGravityCenterNormalized::selectYn - for \f$ L_{y_{n}} \f$. You can use these shortcut selectors as follows: - - \code - task.addFeature(db_src.getFeatureGravityNormalized(), db_dst.getFeatureGravityNormalized(), - vpFeatureMomentGravityCenterNormalized::selectXn() | - vpFeatureMomentGravityCenterNormalized::selectYn()); \endcode - - The behaviour of this feature is very similar to - vpFeatureMomentGravityCenter which also contains a sample code demonstrating - a selection. - - This feature is often used in moment-based visual servoing to control the - planar translation parameters. - - Minimum vpMomentObject order needed to compute this feature: 2 in dense mode - and 3 in discrete mode. - - This feature depends on: - - vpFeatureMomentGravityCenter - - vpMomentGravityCenter - - vpMomentAreaNormalized - - vpFeatureMomentAreaNormalized - -*/ + * \class vpFeatureMomentGravityCenterNormalized + * + * \ingroup group_visual_features + * + * \brief Functionality computation for centered and normalized moment feature. + * Computes the interaction matrix associated with + * vpMomentGravityCenterNormalized. + * + * The interaction matrix for the moment feature can be deduced from \cite + * Tahri05z, equation (19). To do so, one must derive it and obtain a + * combination of interaction matrices by using (1). It allows to compute the + * interaction matrices for \f$ (x_n,y_n) \f$. + * + * These interaction matrices may be selected afterwards by calling + * vpFeatureMomentGravityCenterNormalized::interaction. The selection is done + * by the following methods: vpFeatureMomentGravityCenterNormalized::selectXn + * for \f$ L_{x_{n}} \f$ and vpFeatureMomentGravityCenterNormalized::selectYn + * for \f$ L_{y_{n}} \f$. You can use these shortcut selectors as follows: + * + * \code + * task.addFeature(db_src.getFeatureGravityNormalized(), db_dst.getFeatureGravityNormalized(), + * vpFeatureMomentGravityCenterNormalized::selectXn() | vpFeatureMomentGravityCenterNormalized::selectYn()); + * \endcode + * + * The behaviour of this feature is very similar to + * vpFeatureMomentGravityCenter which also contains a sample code demonstrating + * a selection. + * + * This feature is often used in moment-based visual servoing to control the + * planar translation parameters. + * + * Minimum vpMomentObject order needed to compute this feature: 2 in dense mode + * and 3 in discrete mode. + * + * This feature depends on: + * - vpFeatureMomentGravityCenter + * - vpMomentGravityCenter + * - vpMomentAreaNormalized + * - vpFeatureMomentAreaNormalized + */ class VISP_EXPORT vpFeatureMomentGravityCenterNormalized : public vpFeatureMoment { public: /*! - Initializes the feature with information about the database of moment - primitives, the object plane and feature database. - \param database : Moment database. The database of moment primitives (first parameter) is mandatory. - It is used to access different moment values later used to compute the final matrix. - \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param featureMoments : Feature database. - - */ + * Initializes the feature with information about the database of moment + * primitives, the object plane and feature database. + * \param database : Moment database. The database of moment primitives (first parameter) is mandatory. + * It is used to access different moment values later used to compute the final matrix. + * \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param featureMoments : Feature database. + */ vpFeatureMomentGravityCenterNormalized(vpMomentDatabase &database, double A_, double B_, double C_, vpFeatureMomentDatabase *featureMoments = NULL) : vpFeatureMoment(database, A_, B_, C_, featureMoments, 2) - { - } - void compute_interaction(); + { } + void compute_interaction() override; + /*! - associated moment name - */ - const char *momentName() const { return "vpMomentGravityCenterNormalized"; } + * Associated moment name. + */ + const char *momentName() const override { return "vpMomentGravityCenterNormalized"; } + /*! - feature name - */ - const char *name() const { return "vpFeatureMomentGravityCenterNormalized"; } + * Feature name. + */ + const char *name() const override { return "vpFeatureMomentGravityCenterNormalized"; } /*! - Shortcut selector for \f$x_n\f$. - */ + * Shortcut selector for \f$x_n\f$. + */ static unsigned int selectXn() { return 1 << 0; } /*! - Shortcut selector for \f$y_n\f$. - */ + * Shortcut selector for \f$y_n\f$. + */ static unsigned int selectYn() { return 1 << 1; } }; #else class vpMomentDatabase; /*! - \class vpFeatureMomentGravityCenterNormalized - - \ingroup group_visual_features - - \brief Functionality computation for centered and normalized moment feature. - Computes the interaction matrix associated with vpMomentGravityCenterNormalized. - - It computes the interaction matrices for \f$ (x_n,y_n) \f$. - The interaction matrix for the moment feature has the following expression: - - In the discrete case: - \f[ - L_{x_n} = - { - \left[ - \begin{array}{c} - -Ax_{{n}}\theta+ \left( x_{{n}}e_{{1,1}}-y_{{n}} \right) B-a_{{n}}C \\ - \noalign{\medskip}Ax_{{n}}e_{{1,1}}+Bx_{{n}} \theta \\ - \noalign{\medskip} - \left( - -a_{{n}}-w_{{y}} - \right) - A+Bw_{{x}} \\ - \noalign{\medskip}a_{{n}}e_{{1,1}}{\it NA}+ \left( \eta_{{1,0}}e_{{1,1}}+\eta_{{0,1}}-e_{{2,1}}-x_{{g}}e_{{1,1}}+\eta_{{0,1}}\theta \right) x_{{n}}+ \left( \eta_{{1,0}}-x_{{g}}\theta \right) y_{{n}}-{\frac {x_{{n}}\eta_{{0,3}}}{{\it NA}}} \\ - \noalign{\medskip} \left( -1+\theta \right) a_{{n}}{\it NA}+ \left( e_{{1,2}}+x_{{g}}-\eta_{{0,1}}e_{{1,1}}-2\,\eta_{{1,0}}+e_{{3,0}}+ \left( -x_{{g}}+\eta_{{1,0}} \right) \theta \right) x_{{n}}+e_{{1,1}}x_{{g}}y_{{n}}-a_{{n}} \\ - \noalign{\medskip}y_{{n}} - \end{array} - \right] - }^t - \f] - - \f[ - L_{y_n} = - { - \left[ - \begin{array}{c} - \left( 1-\theta \right) y_{{n}}A+y_{{n}}e_{{1,1}}B \\ - \noalign{\medskip} \left( -x_{{n}}+y_{{n}}e_{{1,1}} - \right) - A+ \left( -1+\theta \right) y_{{n}}B-a_{{n}}C \\ - \noalign{\medskip}-Aw_{{y}}+ \left( -a_{{n}}+w_{{x}} \right) B \\ - \noalign{\medskip}\theta\,a_{{n}}{\it NA}+ - \left( -e_{{2,1}}+\eta_{{1,0}}e_{{1,1}}+\eta_{{0,1}}-x_{{g}}e_{{1,1}}+ \left( \eta_{{0,1}}-y_{{g}} \right) \theta - \right) y_{{n}}+a_{{n}}-{\frac {y_{{n}}\eta_{{0,3}}}{{\it NA}}} \\ - \noalign{\medskip}-a_{{n}}e_{{1,1}}{\it NA}-x_{{n}}\eta_{{0,1}}+\left( e_{{1,2}}+y_{{g}}e_{{1,1}}-\eta_{{0,1}}e_{{1,1}}+x_{{g}}+e_{{3,0}}-2\,\eta_{{1,0}}+ \left( -x_{{g}}+\eta_{{1,0}} \right) \theta - \right) y_{{n}} \\ - \noalign{\medskip}-x_{{n}} - \end{array} - \right] - }^t - \f] - - In the dense case: - \f[ - L_{x_n} = - { - \left[ - \begin {array}{c} -a_{{n}}C-1/2\,Ax_{{n}}-By_{{n}} \\ - \noalign{\medskip}1/2\,Bx_{{n}} \\ - \noalign{\medskip} \left( -a_{{n}}-w_{{y}} \right) A+Bw_{{x}} \\ - \noalign{\medskip} \left( 4\,\eta_{{1,0}}-1/2\,x_{{g}} \right) y_{{n}}+4\,a_{{n}}\eta_{{1,1}}+4\,x_{{n}}\eta_{{0,1}} \\ - \noalign{\medskip} \left( -4\,\eta_{{1,0}}+1/2\,x_{{g}} \right) x_{{n}}+ \left( -1-4\,\eta_{{2,0}} \right) a_{{n}} \\ - \noalign{\medskip}y_{{n}}\end {array} - \right] - }^t - L_{y_n} = - { - \left[ - \begin {array}{c} - 1/2\,Ay_{{n}} \\ - \noalign{\medskip}-1/2\,By_{{n}}-a_{{n}}C-Ax_{{n}} \\ - \noalign{\medskip}-Aw_{{y}}+ \left( -a_{{n}}+w_{{x}} \right) B \\ - \noalign{\medskip}4\,\theta\,a_{{n}}{\it NA}+ \left( 4\,\eta_{{0,1}}-1/2\,y_{{g}} \right) y_{{n}}+a_{{n}} \\ - \noalign{\medskip} \left( -4\,\eta_{{1,0}}+1/2\,x_{{g}} \right) y_{{n}}-4\,a_{{n}}\eta_{{1,1}}-4\,x_{{n}}\eta_{{0,1}} \\ - \noalign{\medskip}-x_{{n}} - \end {array} - \right] - }^t - \f] - with: - - \f$e_{i,j}=\frac{\mu_{i,j}}{NA}\f$ - - \f$NA=\mu_{2,0}+\mu_{0,2}\f$ - - \f$\theta=\frac{\eta_{0,2}}{NA}\f$ - - \f$\eta\f$ is the centered and normalized moment. - - These interaction matrices may be selected afterwards by calling - vpFeatureMomentGravityCenterNormalized::interaction. The selection is done by - the following methods: vpFeatureMomentGravityCenterNormalized::selectXn for - \f$ L_{x_{n}} \f$ and vpFeatureMomentGravityCenterNormalized::selectYn for \f$L_{y_{n}} \f$. - You can use these shortcut selectors as follows: - - \code - task.addFeature(db_src.getFeatureGravityNormalized(),db_dst.getFeatureGravityNormalized(), - vpFeatureMomentGravityCenterNormalized::selectXn() | - vpFeatureMomentGravityCenterNormalized::selectYn()); - \endcode - - The behaviour of this feature is very similar to - vpFeatureMomentGravityCenter which also contains a sample code demonstrating a - selection. - - This feature is often used in moment-based visual servoing to control the - planar translation parameters. - - Minimum vpMomentObject order needed to compute this feature: 2 in dense mode - and 3 in discrete mode. - - This feature depends on: - - vpFeatureMomentGravityCenter - - vpMomentGravityCenter - - vpMomentAreaNormalized - - vpFeatureMomentAreaNormalized - -*/ + * \class vpFeatureMomentGravityCenterNormalized + * + * \ingroup group_visual_features + * + * \brief Functionality computation for centered and normalized moment feature. + * Computes the interaction matrix associated with vpMomentGravityCenterNormalized. + * + * It computes the interaction matrices for \f$ (x_n,y_n) \f$. + * The interaction matrix for the moment feature has the following expression: + * - In the discrete case: + * \f[ + * L_{x_n} = + * { + * \left[ + * \begin{array}{c} + * -Ax_{{n}}\theta+ \left( x_{{n}}e_{{1,1}}-y_{{n}} \right) B-a_{{n}}C \\ + * \noalign{\medskip}Ax_{{n}}e_{{1,1}}+Bx_{{n}} \theta \\ + * \noalign{\medskip} + * \left( + * -a_{{n}}-w_{{y}} + * \right) + * A+Bw_{{x}} \\ + * \noalign{\medskip}a_{{n}}e_{{1,1}}{\it NA}+ \left( \eta_{{1,0}}e_{{1,1}}+\eta_{{0,1}}-e_{{2,1}}-x_{{g}}e_{{1,1}}+\eta_{{0,1}}\theta \right) x_{{n}}+ \left( \eta_{{1,0}}-x_{{g}}\theta \right) y_{{n}}-{\frac {x_{{n}}\eta_{{0,3}}}{{\it NA}}} \\ + * \noalign{\medskip} \left( -1+\theta \right) a_{{n}}{\it NA}+ \left( e_{{1,2}}+x_{{g}}-\eta_{{0,1}}e_{{1,1}}-2\,\eta_{{1,0}}+e_{{3,0}}+ \left( -x_{{g}}+\eta_{{1,0}} \right) \theta \right) x_{{n}}+e_{{1,1}}x_{{g}}y_{{n}}-a_{{n}} \\ + * \noalign{\medskip}y_{{n}} + * \end{array} + * \right] + * }^t + * \f] + * + * \f[ + * L_{y_n} = + * { + * \left[ + * \begin{array}{c} + * \left( 1-\theta \right) y_{{n}}A+y_{{n}}e_{{1,1}}B \\ + * \noalign{\medskip} \left( -x_{{n}}+y_{{n}}e_{{1,1}} + * \right) + * A+ \left( -1+\theta \right) y_{{n}}B-a_{{n}}C \\ + * \noalign{\medskip}-Aw_{{y}}+ \left( -a_{{n}}+w_{{x}} \right) B \\ + * \noalign{\medskip}\theta\,a_{{n}}{\it NA}+ + * \left( -e_{{2,1}}+\eta_{{1,0}}e_{{1,1}}+\eta_{{0,1}}-x_{{g}}e_{{1,1}}+ \left( \eta_{{0,1}}-y_{{g}} \right) \theta + * \right) y_{{n}}+a_{{n}}-{\frac {y_{{n}}\eta_{{0,3}}}{{\it NA}}} \\ + * \noalign{\medskip}-a_{{n}}e_{{1,1}}{\it NA}-x_{{n}}\eta_{{0,1}}+\left( e_{{1,2}}+y_{{g}}e_{{1,1}}-\eta_{{0,1}}e_{{1,1}}+x_{{g}}+e_{{3,0}}-2\,\eta_{{1,0}}+ \left( -x_{{g}}+\eta_{{1,0}} \right) \theta + * \right) y_{{n}} \\ + * \noalign{\medskip}-x_{{n}} + * \end{array} + * \right] + * }^t + * \f] + * + * - In the dense case: + * \f[ + * L_{x_n} = + * { + * \left[ + * \begin {array}{c} -a_{{n}}C-1/2\,Ax_{{n}}-By_{{n}} \\ + * \noalign{\medskip}1/2\,Bx_{{n}} \\ + * \noalign{\medskip} \left( -a_{{n}}-w_{{y}} \right) A+Bw_{{x}} \\ + * \noalign{\medskip} \left( 4\,\eta_{{1,0}}-1/2\,x_{{g}} \right) y_{{n}}+4\,a_{{n}}\eta_{{1,1}}+4\,x_{{n}}\eta_{{0,1}} \\ + * \noalign{\medskip} \left( -4\,\eta_{{1,0}}+1/2\,x_{{g}} \right) x_{{n}}+ \left( -1-4\,\eta_{{2,0}} \right) a_{{n}} \\ + * \noalign{\medskip}y_{{n}}\end {array} + * \right] + * }^t + * L_{y_n} = + * { + * \left[ + * \begin {array}{c} + * 1/2\,Ay_{{n}} \\ + * \noalign{\medskip}-1/2\,By_{{n}}-a_{{n}}C-Ax_{{n}} \\ + * \noalign{\medskip}-Aw_{{y}}+ \left( -a_{{n}}+w_{{x}} \right) B \\ + * \noalign{\medskip}4\,\theta\,a_{{n}}{\it NA}+ \left( 4\,\eta_{{0,1}}-1/2\,y_{{g}} \right) y_{{n}}+a_{{n}} \\ + * \noalign{\medskip} \left( -4\,\eta_{{1,0}}+1/2\,x_{{g}} \right) y_{{n}}-4\,a_{{n}}\eta_{{1,1}}-4\,x_{{n}}\eta_{{0,1}} \\ + * \noalign{\medskip}-x_{{n}} + * \end {array} + * \right] + * }^t + * \f] + * with: + * - \f$e_{i,j}=\frac{\mu_{i,j}}{NA}\f$ + * - \f$NA=\mu_{2,0}+\mu_{0,2}\f$ + * - \f$\theta=\frac{\eta_{0,2}}{NA}\f$ + * - \f$\eta\f$ is the centered and normalized moment. + * + * These interaction matrices may be selected afterwards by calling + * vpFeatureMomentGravityCenterNormalized::interaction. The selection is done by + * the following methods: vpFeatureMomentGravityCenterNormalized::selectXn for + * \f$ L_{x_{n}} \f$ and vpFeatureMomentGravityCenterNormalized::selectYn for \f$L_{y_{n}} \f$. + * You can use these shortcut selectors as follows: + * + * \code + * task.addFeature(db_src.getFeatureGravityNormalized(),db_dst.getFeatureGravityNormalized(), + * vpFeatureMomentGravityCenterNormalized::selectXn() | vpFeatureMomentGravityCenterNormalized::selectYn()); + * \endcode + * + * The behaviour of this feature is very similar to + * vpFeatureMomentGravityCenter which also contains a sample code demonstrating a + * selection. + * + * This feature is often used in moment-based visual servoing to control the + * planar translation parameters. + * + * Minimum vpMomentObject order needed to compute this feature: 2 in dense mode + * and 3 in discrete mode. + * + * This feature depends on: + * - vpFeatureMomentGravityCenter + * - vpMomentGravityCenter + * - vpMomentAreaNormalized + * - vpFeatureMomentAreaNormalized + */ class VISP_EXPORT vpFeatureMomentGravityCenterNormalized : public vpFeatureMoment { public: /*! - Initializes the feature with information about the database of moment - primitives, the object plane and feature database. - \param data_base : Moment database. The database of moment primitives (first parameter) is mandatory. - It is used to access different moment values later used to compute the final matrix. - \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param featureMoments : Feature database. - - */ + * Initializes the feature with information about the database of moment + * primitives, the object plane and feature database. + * \param data_base : Moment database. The database of moment primitives (first parameter) is mandatory. + * It is used to access different moment values later used to compute the final matrix. + * \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param featureMoments : Feature database. + */ vpFeatureMomentGravityCenterNormalized(vpMomentDatabase &data_base, double A_, double B_, double C_, vpFeatureMomentDatabase *featureMoments = NULL) : vpFeatureMoment(data_base, A_, B_, C_, featureMoments, 2) - { - } - void compute_interaction(); + { } + void compute_interaction() override; + /*! * Associated moment name. */ - const char *momentName() const { return "vpMomentGravityCenterNormalized"; } + const char *momentName() const override { return "vpMomentGravityCenterNormalized"; } + /*! - * feature name + * Feature name. */ - const char *name() const { return "vpFeatureMomentGravityCenterNormalized"; } + const char *name() const override { return "vpFeatureMomentGravityCenterNormalized"; } /*! * Shortcut selector for \f$x_n\f$. @@ -279,7 +273,7 @@ class VISP_EXPORT vpFeatureMomentGravityCenterNormalized : public vpFeatureMomen static unsigned int selectXn() { return 1 << 0; } /*! - * Shortcut selector for \f$y_n\f$. + * Shortcut selector for \f$y_n\f$. */ static unsigned int selectYn() { return 1 << 1; } }; diff --git a/modules/visual_features/include/visp3/visual_features/vpFeaturePoint.h b/modules/visual_features/include/visp3/visual_features/vpFeaturePoint.h index 6d55e9faff..5949fab755 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeaturePoint.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeaturePoint.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,15 @@ * * Description: * 2D point visual feature. - * -*****************************************************************************/ + */ #ifndef vpFeaturePoint_H #define vpFeaturePoint_H /*! - \file vpFeaturePoint.h - \brief Class that defines 2D point visual feature -*/ + * \file vpFeaturePoint.h + * \brief Class that defines 2D point visual feature + */ #include #include @@ -49,132 +47,130 @@ #include /*! - \class vpFeaturePoint - \ingroup group_visual_features - - \brief Class that defines a 2D point visual feature \f$ s\f$ which - is composed by two parameters that are the cartesian coordinates \f$ - x \f$ and \f$ y \f$. - - In this class \f$ x \f$ and \f$ y \f$ are the 2D coordinates in the - image plane and are given in meter. \f$ Z \f$ which is the 3D - coordinate representing the depth is also a parameter of the - point. It is needed during the computation of the interaction matrix - \f$ L \f$. - - The visual features can be set easily from an instance of the - classes vpPoint, vpDot or vpDot2. For more precision see the - vpFeatureBuilder class. - - Once the values of the visual features are set, the interaction() - method allows to compute the interaction matrix \f$ L \f$ associated - to the visual feature, while the error() method computes the error - vector \f$(s - s^*)\f$ between the current visual feature and the - desired one. - - The code below shows how to create a eye-in hand visual servoing - task using a 2D point feature \f$(x,y)\f$ that correspond to the 2D - coordinates of a point in the image plane. To control six degrees - of freedom, at least four other features must be considered like two - other point features for example. First we create a current - (\f$s\f$) 2D point feature. Then we set the task to use the - interaction matrix associated to the current feature \f$L_s\f$. And - finally we compute the camera velocity \f$v=-\lambda \; L_s^+ \; - (s-s^*)\f$. The current feature \f$s\f$ is updated in the while() - loop. - - \code -#include -#include - -int main() -{ - vpServo task; // Visual servoing task - - vpFeaturePoint sd; //The desired point feature. - //Set the desired features x and y - double xd = 0; - double yd = 0; - //Set the depth of the point in the camera frame. - double Zd = 1; - //Set the point feature thanks to the desired parameters. - sd.buildFrom(xd, yd, Zd); - - vpFeaturePoint s; //The current point feature. - //Set the current features x and y - double x; //You have to compute the value of x. - double y; //You have to compute the value of y. - double Z; //You have to compute the value of Z. - //Set the point feature thanks to the current parameters. - s.buildFrom(x, y, Z); - //In this case the parameter Z is not necessary because the interaction matrix is computed - //with the desired visual feature. - - // Set eye-in-hand control law. - // The computed velocities will be expressed in the camera frame - task.setServo(vpServo::EYEINHAND_CAMERA); - // Interaction matrix is computed with the desired visual features sd - task.setInteractionMatrixType(vpServo::DESIRED); - - // Add the 2D point feature to the task - task.addFeature(s, sd); - - // Control loop - for ( ; ; ) { - // The new parameters x and y must be computed here. - - // Update the current point visual feature - s.buildFrom(x, y, Z); - - // compute the control law - vpColVector v = task.computeControlLaw(); // camera velocity - } - return 0; -} - \endcode - - If you want to build your own control law, this other example shows how - to create a current (\f$s\f$) and desired (\f$s^*\f$) 2D point visual - feature, compute the corresponding error vector \f$(s-s^*)\f$ and finally - build the interaction matrix \f$L_s\f$. - - \code -#include -#include - -int main() -{ - vpFeaturePoint sd; //The desired point feature. - //Set the desired features x and y - double xd = 0; - double yd = 0; - //Set the depth of the point in the camera frame. - double Zd = 1; - //Set the point feature thanks to the desired parameters. - sd.buildFrom(xd, yd, Zd); - - vpFeaturePoint s; //The current point feature. - //Set the current features x and y - double x; //You have to compute the value of x. - double y; //You have to compute the value of y. - double Z; //You have to compute the value of Z. - //Set the point feature thanks to the current parameters. - s.buildFrom(x, y, Z); - - // Compute the interaction matrix L_s for the current point feature - vpMatrix L = s.interaction(); - // You can also compute the interaction matrix L_s for the desired point feature - // The corresponding line of code is : vpMatrix L = sd.interaction(); - - // Compute the error vector (s-sd) for the point feature - s.error(s_star); -} - \endcode - - An other fully explained example is given in the \ref tutorial-ibvs. - -*/ - + * \class vpFeaturePoint + * \ingroup group_visual_features + * + * \brief Class that defines a 2D point visual feature \f$ s\f$ which + * is composed by two parameters that are the cartesian coordinates \f$ + * x \f$ and \f$ y \f$. + * + * In this class \f$ x \f$ and \f$ y \f$ are the 2D coordinates in the + * image plane and are given in meter. \f$ Z \f$ which is the 3D + * coordinate representing the depth is also a parameter of the + * point. It is needed during the computation of the interaction matrix + * \f$ L \f$. + * + * The visual features can be set easily from an instance of the + * classes vpPoint, vpDot or vpDot2. For more precision see the + * vpFeatureBuilder class. + * + * Once the values of the visual features are set, the interaction() + * method allows to compute the interaction matrix \f$ L \f$ associated + * to the visual feature, while the error() method computes the error + * vector \f$(s - s^*)\f$ between the current visual feature and the + * desired one. + * + * The code below shows how to create a eye-in hand visual servoing + * task using a 2D point feature \f$(x,y)\f$ that correspond to the 2D + * coordinates of a point in the image plane. To control six degrees + * of freedom, at least four other features must be considered like two + * other point features for example. First we create a current + * (\f$s\f$) 2D point feature. Then we set the task to use the + * interaction matrix associated to the current feature \f$L_s\f$. And + * finally we compute the camera velocity \f$v=-\lambda \; L_s^+ \; + * (s-s^*)\f$. The current feature \f$s\f$ is updated in the while() + * loop. + * + * \code + * #include + * #include + * + * int main() + * { + * vpServo task; // Visual servoing task + * + * vpFeaturePoint sd; //The desired point feature. + * // Set the desired features x and y + * double xd = 0; + * double yd = 0; + * // Set the depth of the point in the camera frame. + * double Zd = 1; + * // Set the point feature thanks to the desired parameters. + * sd.buildFrom(xd, yd, Zd); + * + * vpFeaturePoint s; //The current point feature. + * // Set the current features x and y + * double x; // You have to compute the value of x. + * double y; // You have to compute the value of y. + * double Z; // You have to compute the value of Z. + * // Set the point feature thanks to the current parameters. + * s.buildFrom(x, y, Z); + * // In this case the parameter Z is not necessary because the interaction matrix is computed + * // with the desired visual feature. + * + * // Set eye-in-hand control law. + * // The computed velocities will be expressed in the camera frame + * task.setServo(vpServo::EYEINHAND_CAMERA); + * // Interaction matrix is computed with the desired visual features sd + * task.setInteractionMatrixType(vpServo::DESIRED); + * + * // Add the 2D point feature to the task + * task.addFeature(s, sd); + * + * // Control loop + * for ( ; ; ) { + * // The new parameters x and y must be computed here. + * + * // Update the current point visual feature + * s.buildFrom(x, y, Z); + * + * // Compute the control law + * vpColVector v = task.computeControlLaw(); // camera velocity + * } + * return 0; + * } + * \endcode + * + * If you want to build your own control law, this other example shows how + * to create a current (\f$s\f$) and desired (\f$s^*\f$) 2D point visual + * feature, compute the corresponding error vector \f$(s-s^*)\f$ and finally + * build the interaction matrix \f$L_s\f$. + * + * \code + * #include + * #include + * + * int main() + * { + * vpFeaturePoint sd; //The desired point feature. + * // Set the desired features x and y + * double xd = 0; + * double yd = 0; + * // Set the depth of the point in the camera frame. + * double Zd = 1; + * // Set the point feature thanks to the desired parameters. + * sd.buildFrom(xd, yd, Zd); + * + * vpFeaturePoint s; //The current point feature. + * // Set the current features x and y + * double x; // You have to compute the value of x. + * double y; // You have to compute the value of y. + * double Z; // You have to compute the value of Z. + * // Set the point feature thanks to the current parameters. + * s.buildFrom(x, y, Z); + * + * // Compute the interaction matrix L_s for the current point feature + * vpMatrix L = s.interaction(); + * // You can also compute the interaction matrix L_s for the desired point feature + * // The corresponding line of code is : vpMatrix L = sd.interaction(); + * + * // Compute the error vector (s-sd) for the point feature + * s.error(s_star); + * } + * \endcode + * + * An other fully explained example is given in the \ref tutorial-ibvs. + */ class VISP_EXPORT vpFeaturePoint : public vpBasicFeature { private: @@ -184,19 +180,17 @@ class VISP_EXPORT vpFeaturePoint : public vpBasicFeature public: vpFeaturePoint(); - //! Destructor. - virtual ~vpFeaturePoint() { } void buildFrom(double x, double y, double Z); void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; - vpFeaturePoint *duplicate() const; + vpFeaturePoint *duplicate() const override; - vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL); + vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL) override; double get_x() const; @@ -204,10 +198,10 @@ class VISP_EXPORT vpFeaturePoint : public vpBasicFeature double get_Z() const; - void init(); - vpMatrix interaction(unsigned int select = FEATURE_ALL); + void init() override; + vpMatrix interaction(unsigned int select = FEATURE_ALL) override; - void print(unsigned int select = FEATURE_ALL) const; + void print(unsigned int select = FEATURE_ALL) const override; void set_x(double x); diff --git a/modules/visual_features/include/visp3/visual_features/vpFeaturePoint3D.h b/modules/visual_features/include/visp3/visual_features/vpFeaturePoint3D.h index b04397b6f8..52908f7da3 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeaturePoint3D.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeaturePoint3D.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,15 @@ * * Description: * 3D point visual feature. - * -*****************************************************************************/ + */ #ifndef vpFeaturePoint3d_H #define vpFeaturePoint3d_H /*! - \file vpFeaturePoint3D.h - \brief class that defines the 3D point visual feature. -*/ + * \file vpFeaturePoint3D.h + * \brief class that defines the 3D point visual feature. + */ #include #include @@ -49,171 +47,166 @@ #include /*! - \class vpFeaturePoint3D - \ingroup group_visual_features - \brief Class that defines the 3D point visual feature. - - A 3D point visual feature corresponds to a 3D point with \f$ - {\bf X} = (X,Y,Z)\f$ coordinates in the camera frame. - - This class is intended to manipulate the 3D point visual feature - \f$ s = (X,Y,Z) \f$. The interaction matrix related to \f$ s \f$ is given -by: \f[ L = \left[ \begin{array}{rrrrrr} - -1 & 0 & 0 & 0 & -Z & Y \\ - 0 & -1 & 0 & Z & 0 & -X \\ - 0 & 0 & -1 & -Y & X & 0 \\ - \end{array} - \right] - \f] - - Two ways are allowed to initialize the feature. - - - The first way by setting the feature values \f$(X,Y,Z)\f$ using - vpFeaturePoint3D member functions like set_X(), set_Y(), set_Z(), - or also buildFrom(). - - - The second by using the feature builder functionalities to - initialize the feature from a point structure like - vpFeatureBuilder::create (vpFeaturePoint3D &, const vpPoint &). - - The interaction() method allows to compute the interaction matrix - \f$ L\f$ associated to the 3D point visual feature, while the - error() method computes the error vector \f$ (s - s^*)\f$ between the - current visual feature and the desired one. - - The code below shows how to create a eye-in hand visual servoing - task using a 3D point feature \f$(X,Y,Z)\f$ that correspond to the - 3D point coordinates in the camera frame. To control six degrees of - freedom, at least three other features must be considered like - vpFeatureThetaU visual features. First we create a current (\f$s\f$) - and desired (\f$s^*\f$) 3D point feature, set the task to use the - interaction matrix associated to the desired feature \f$L_{s^*}\f$ - and than compute the camera velocity \f$v=-\lambda \; {L_{s^*}}^+ \; - (s-s^*)\f$. The current feature \f$s\f$ is updated in the while() - loop while \f$s^*\f$ is set to \f$Z^*=1\f$. - - \code -#include -#include -#include -#include - -int main() -{ - vpServo task; // Visual servoing task - - // Set the 3D point coordinates in the object frame: oP - vpPoint point(0.1, -0.1, 0); - - vpHomogeneousMatrix cMo; // Pose between the camera and the object frame - cMo.buildFrom(0, 0, 1.2, 0, 0, 0); - // ... cMo need here to be computed from a pose estimation - - point.changeFrame(cMo); // Compute the 3D point coordinates in the camera frame cP = cMo * oP - - // Creation of the current feature s - vpFeaturePoint3D s; - s.buildFrom(point); // Initialize the feature from the 3D point coordinates in the camera frame: s=(X,Y,Z) - s.print(); - - // Creation of the desired feature s*. - vpFeaturePoint3D s_star; - s_star.buildFrom(0, 0, 1); // Z*=1 meter - s_star.print(); - - // Set eye-in-hand control law. - // The computed velocities will be expressed in the camera frame - task.setServo(vpServo::EYEINHAND_CAMERA); - // Interaction matrix is computed with the desired visual features s* - task.setInteractionMatrixType(vpServo::DESIRED); - // Set the constant gain - double lambda = 0.8; - task.setLambda(lambda); - - // Add the 3D point feature to the task - task.addFeature(s, s_star); - - // Control loop - for ( ; ; ) { - // ... cMo need here to be estimated from for example a pose estimation. - point.changeFrame(cMo); // Compute the 3D point coordinates in the camera frame cP = cMo * oP - - // Update the current 3D point visual feature - s.buildFrom(point); - - // compute the control law - vpColVector v = task.computeControlLaw(); // camera velocity - } -} - \endcode - - If you want to deal only with the \f$(X,Y)\f$ subset feature from the 3D - point feature, you have just to modify the addFeature() call in - the previous example by the following line. In that case, the dimension - of \f$s\f$ is two. - - \code - // Add the (X,Y) subset feature from the 3D point visual feature to the task - task.addFeature(s, s_star, vpFeaturePoint3D::selectX() | vpFeaturePoint3D::selectY()); - \endcode - - If you want to build your own control law, this other example shows - how to create a current (\f$s\f$) and desired (\f$s^*\f$) 3D - point visual feature, compute the corresponding error - vector \f$(s-s^*)\f$ and finally build the interaction matrix \f$L_s\f$. - - \code -#include -#include -#include -#include - -int main() -{ - // Set the 3D point coordinates in the object frame: oP - vpPoint point(0.1, -0.1, 0); - - vpHomogeneousMatrix cMo; // Pose between the camera and the object frame - cMo.buildFrom(0, 0, 1.2, 0, 0, 0); - // ... cMo need here to be computed from a pose estimation - - point.changeFrame(cMo); // Compute the 3D point coordinates in the camera frame cP = cMo * oP - - // Creation of the current feature s - vpFeaturePoint3D s; - s.buildFrom(point); // Initialize the feature from the 3D point coordinates in the camera frame - s.print(); - - // Creation of the desired feature s*. - vpFeaturePoint3D s_star; - s_star.buildFrom(0, 0, 1); // Z*=1 meter - s_star.print(); - - // Compute the L_s interaction matrix associated to the current feature - vpMatrix L = s.interaction(); - std::cout << "L: " << L << std::endl; - - // Compute the error vector (s-s*) for the 3D point feature - vpColVector e = s.error(s_star); // e = (s-s*) - - std::cout << "e: " << e << std::endl; -} - \endcode - -*/ + * \class vpFeaturePoint3D + * \ingroup group_visual_features + * \brief Class that defines the 3D point visual feature. + * + * A 3D point visual feature corresponds to a 3D point with \f$ + * {\bf X} = (X,Y,Z)\f$ coordinates in the camera frame. + * + * This class is intended to manipulate the 3D point visual feature + * \f$ s = (X,Y,Z) \f$. The interaction matrix related to \f$ s \f$ is given + * by: \f[ L = \left[ \begin{array}{rrrrrr} + * -1 & 0 & 0 & 0 & -Z & Y \\ + * 0 & -1 & 0 & Z & 0 & -X \\ + * 0 & 0 & -1 & -Y & X & 0 \\ + * \end{array} + * \right] + * \f] + * + * Two ways are allowed to initialize the feature. + * + * - The first way by setting the feature values \f$(X,Y,Z)\f$ using + * vpFeaturePoint3D member functions like set_X(), set_Y(), set_Z(), + * or also buildFrom(). + * + * - The second by using the feature builder functionalities to + * initialize the feature from a point structure like + * vpFeatureBuilder::create (vpFeaturePoint3D &, const vpPoint &). + * + * The interaction() method allows to compute the interaction matrix + * \f$ L\f$ associated to the 3D point visual feature, while the + * error() method computes the error vector \f$ (s - s^*)\f$ between the + * current visual feature and the desired one. + * + * The code below shows how to create a eye-in hand visual servoing + * task using a 3D point feature \f$(X,Y,Z)\f$ that correspond to the + * 3D point coordinates in the camera frame. To control six degrees of + * freedom, at least three other features must be considered like + * vpFeatureThetaU visual features. First we create a current (\f$s\f$) + * and desired (\f$s^*\f$) 3D point feature, set the task to use the + * interaction matrix associated to the desired feature \f$L_{s^*}\f$ + * and than compute the camera velocity \f$v=-\lambda \; {L_{s^*}}^+ \; + * (s-s^*)\f$. The current feature \f$s\f$ is updated in the while() + * loop while \f$s^*\f$ is set to \f$Z^*=1\f$. + * + * \code + * #include + * #include + * #include + * #include + * + * int main() + * { + * vpServo task; // Visual servoing task + * + * // Set the 3D point coordinates in the object frame: oP + * vpPoint point(0.1, -0.1, 0); + * + * vpHomogeneousMatrix cMo; // Pose between the camera and the object frame + * cMo.buildFrom(0, 0, 1.2, 0, 0, 0); + * // ... cMo need here to be computed from a pose estimation + * + * point.changeFrame(cMo); // Compute the 3D point coordinates in the camera frame cP = cMo * oP + * + * // Creation of the current feature s + * vpFeaturePoint3D s; + * s.buildFrom(point); // Initialize the feature from the 3D point coordinates in the camera frame: s=(X,Y,Z) + * s.print(); + * + * // Creation of the desired feature s*. + * vpFeaturePoint3D s_star; + * s_star.buildFrom(0, 0, 1); // Z*=1 meter + * s_star.print(); + * + * // Set eye-in-hand control law. + * // The computed velocities will be expressed in the camera frame + * task.setServo(vpServo::EYEINHAND_CAMERA); + * // Interaction matrix is computed with the desired visual features s* + * task.setInteractionMatrixType(vpServo::DESIRED); + * // Set the constant gain + * double lambda = 0.8; + * task.setLambda(lambda); + * + * // Add the 3D point feature to the task + * task.addFeature(s, s_star); + * + * // Control loop + * for ( ; ; ) { + * // ... cMo need here to be estimated from for example a pose estimation. + * point.changeFrame(cMo); // Compute the 3D point coordinates in the camera frame cP = cMo * oP + * + * // Update the current 3D point visual feature + * s.buildFrom(point); + * + * // compute the control law + * vpColVector v = task.computeControlLaw(); // camera velocity + * } + * } + * \endcode + * + * If you want to deal only with the \f$(X,Y)\f$ subset feature from the 3D + * point feature, you have just to modify the addFeature() call in + * the previous example by the following line. In that case, the dimension + * of \f$s\f$ is two. + * + * \code + * // Add the (X,Y) subset feature from the 3D point visual feature to the task + * task.addFeature(s, s_star, vpFeaturePoint3D::selectX() | vpFeaturePoint3D::selectY()); + * \endcode + * + * If you want to build your own control law, this other example shows + * how to create a current (\f$s\f$) and desired (\f$s^*\f$) 3D + * point visual feature, compute the corresponding error + * vector \f$(s-s^*)\f$ and finally build the interaction matrix \f$L_s\f$. + * + * \code + * #include + * #include + * #include + * #include + * + * int main() + * { + * // Set the 3D point coordinates in the object frame: oP + * vpPoint point(0.1, -0.1, 0); + * + * vpHomogeneousMatrix cMo; // Pose between the camera and the object frame + * cMo.buildFrom(0, 0, 1.2, 0, 0, 0); + * // ... cMo need here to be computed from a pose estimation + * + * point.changeFrame(cMo); // Compute the 3D point coordinates in the camera frame cP = cMo * oP + * + * // Creation of the current feature s + * vpFeaturePoint3D s; + * s.buildFrom(point); // Initialize the feature from the 3D point coordinates in the camera frame + * s.print(); + * + * // Creation of the desired feature s*. + * vpFeaturePoint3D s_star; + * s_star.buildFrom(0, 0, 1); // Z*=1 meter + * s_star.print(); + * + * // Compute the L_s interaction matrix associated to the current feature + * vpMatrix L = s.interaction(); + * std::cout << "L: " << L << std::endl; + * + * // Compute the error vector (s-s*) for the 3D point feature + * vpColVector e = s.error(s_star); // e = (s-s*) + * + * std::cout << "e: " << e << std::endl; + * } + * \endcode + */ class VISP_EXPORT vpFeaturePoint3D : public vpBasicFeature - { - public: // basic constructor vpFeaturePoint3D(); - //! Destructor. Does nothing. - virtual ~vpFeaturePoint3D() {} /* - /section Set coordinates - */ + * Set coordinates + */ // build feature from a point (vpPoint) void buildFrom(const vpPoint &p); @@ -221,16 +214,16 @@ class VISP_EXPORT vpFeaturePoint3D : public vpBasicFeature void buildFrom(double X, double Y, double Z); void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; // feature duplication - vpFeaturePoint3D *duplicate() const; + vpFeaturePoint3D *duplicate() const override; // compute the error between two visual features from a subset // a the possible features - vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL); + vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL) override; // get the point X-coordinates double get_X() const; @@ -240,12 +233,12 @@ class VISP_EXPORT vpFeaturePoint3D : public vpBasicFeature double get_Z() const; // basic construction - void init(); + void init() override; // compute the interaction matrix from a subset a the possible features - vpMatrix interaction(unsigned int select = FEATURE_ALL); + vpMatrix interaction(unsigned int select = FEATURE_ALL) override; // print the name of the feature - void print(unsigned int select = FEATURE_ALL) const; + void print(unsigned int select = FEATURE_ALL) const override; // set the point X-coordinates void set_X(double X); diff --git a/modules/visual_features/include/visp3/visual_features/vpFeaturePointPolar.h b/modules/visual_features/include/visp3/visual_features/vpFeaturePointPolar.h index 6e69fb9aa8..cee08b8a78 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeaturePointPolar.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeaturePointPolar.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,15 @@ * * Description: * 2D point with polar coordinates visual feature. - * -*****************************************************************************/ + */ #ifndef vpFeaturePointPolar_H #define vpFeaturePointPolar_H /*! - \file vpFeaturePointPolar.h - \brief Class that defines a 2D point visual feature with polar coordinates. -*/ + * \file vpFeaturePointPolar.h + * \brief Class that defines a 2D point visual feature with polar coordinates. + */ #include #include @@ -49,209 +47,207 @@ #include /*! - \class vpFeaturePointPolar - \ingroup group_visual_features - - \brief Class that defines 2D image point visual feature with - polar coordinates \f$(\rho,\theta)\f$ described in \cite Corke09a. - - Let us denote \f$(\rho,\theta)\f$ the polar coordinates of an image - point, with \f$\rho\f$ the radius of the feature point with respect - to the optical center and \f$\theta\f$ the angle. From cartesian - coordinates \f$(x,y)\f$ of a image point, polar coordinates are - obtained by: - - \f[\rho = \sqrt{x^2+y^2} \hbox{,}\; \; \theta = \arctan \frac{y}{x}\f] - - From polar coordinates, cartesian coordinates of the feature point - can be obtained by: - - \f[x = \rho \cos\theta \hbox{,}\; \; y = \rho \sin\theta\f] - - This class is intended to manipulate the 2D image point visual - feature in polar coordinates \f$ s = (\rho, \theta) \f$. The - interaction matrix related to \f$ s \f$ is given by: - - \f[ - L = \left[ - \begin{array}{l} - L_{\rho} \\ - \; \\ - L_{\theta}\\ - \end{array} - \right] - = - \left[ - \begin{array}{cccccc} - \frac{-\cos \theta}{Z} & \frac{-\sin \theta}{Z} & \frac{\rho}{Z} & - (1+\rho^2)\sin\theta & -(1+\rho^2)\cos\theta & 0 \\ - \;\\ \ - \frac{\sin\theta}{\rho Z} & \frac{-\cos\theta}{\rho Z} & 0 & \cos\theta - /\rho & \sin\theta/\rho & -1 \\ \end{array} \right] \f] - - where \f$Z\f$ is the 3D depth of the considered point in the camera frame. - - Two ways are allowed to initialize the feature. - - - The first way by setting the feature values \f$(\rho,\theta,Z)\f$ - using vpFeaturePointPolar members like set_rho(), set_theta(), - set_Z(), or set_rhoThetaZ(), or also buildFrom(). - - - The second way by using the feature builder functionalities to - initialize the feature from a dot tracker, like - vpFeatureBuilder::create (vpFeaturePointPolar &, const - vpCameraParameters &, const vpDot &) or vpFeatureBuilder::create - (vpFeaturePointPolar &, const vpCameraParameters &, const vpDot2 - &). Be aware, that in that case only \f$(\rho,\theta)\f$ are - initialized. You may also initialize the 3D depth \f$Z\f$. It is - also possible to initialize the feature from a point structure, - like vpFeatureBuilder::create(vpFeaturePointPolar &, const vpPoint - &) or vpFeatureBuilder::create(vpFeaturePointPolar &, const - vpCameraParameters &, const vpCameraParameters &, const vpPoint - &). In that case all the feature parameters \f$(\rho,\theta,Z)\f$ - would be initialized. - - The interaction() method allows to compute the interaction matrix - \f$L\f$ associated to the visual feature, while the error() method - computes the error vector \f$(s - s^*)\f$ between the current visual - feature and the desired one. - - The code below shows how to create a eye-in hand visual servoing - task using four 2D point features with polar coordinates. First we - create four current features \f$s\f$ (p var name in the code) and - four desired \f$s^*\f$ (pd var name in the code) point features with - polar coordinates, set the task to use the interaction matrix - associated to the current feature \f$L_{s}\f$ and than compute the - camera velocity \f$v=-\lambda \; {L_{s}}^+ \; (s-s^*)\f$. The - current feature \f$s\f$ is updated in the while() loop, while - \f$s^*\f$ is initialized at the beginning. - - \code - #include - #include - #include - #include - - int main() - { - - // Create 4 points to specify the object of interest - vpPoint point[4]; - - // Set the 3D point coordinates in the object frame: oP - point[0].setWorldCoordinates(-0.1, -0.1, 0); - point[1].setWorldCoordinates( 0.1, -0.1, 0); - point[2].setWorldCoordinates( 0.1, 0.1, 0); - point[3].setWorldCoordinates(-0.1, 0.1, 0); - - // Initialize the desired pose between the camera and the object frame - vpHomogeneousMatrix cMod; - cMod.buildFrom(0, 0, 1, 0, 0, 0); - - // Compute the desired position of the point - for (int i = 0 ; i < 4 ; i++) { - // Compute the 3D point coordinates in the camera frame cP = cMod * oP - point[i].changeFrame(cMod); - // Compute the perspective projection to set (x,y) - point[i].projection(); - } - - // Create 4 desired visual features as 2D points with polar coordinates - vpFeaturePointPolar pd[4]; - // Initialize the desired visual feature from the desired point positions - for (int i = 0 ; i < 4 ; i++) - vpFeatureBuilder::create(pd[i], point[i]); - - // Initialize the current pose between the camera and the object frame - vpHomogeneousMatrix cMo; - cMo.buildFrom(0, 0, 1.2, 0, 0, M_PI); - // ... cMo need here to be computed from a pose estimation - - for (int i = 0 ; i < 4 ; i++) { - // Compute the 3D point coordinates in the camera frame cP = cMo * oP - point[i].changeFrame(cMo); - // Compute the perspective projection to set (x,y) - point[i].projection(); - } - // Create 4 current visual features as 2D points with polar coordinates - vpFeaturePointPolar p[4]; - // Initialize the current visual feature from the current point positions - for (int i = 0 ; i < 4 ; i++) - vpFeatureBuilder::create(p[i], point[i]); - - // Visual servo task initialization - vpServo task; - // - Camera is mounted on the robot end-effector and velocities are - // computed in the camera frame - task.setServo(vpServo::EYEINHAND_CAMERA); - // - Interaction matrix is computed with the current visual features s - task.setInteractionMatrixType(vpServo::CURRENT); - // - Set the constant gain to 1 - task.setLambda(1); - // - Add current and desired features - for (int i = 0 ; i < 4 ; i++) - task.addFeature(p[i], pd[i]); - - // Control loop - for ( ; ; ) { - // ... cMo need here to be estimated from for example a pose estimation. - // Computes the point coordinates in the camera frame and its 2D - // coordinates in the image plane - for (int i = 0 ; i < 4 ; i++) - point[i].track(cMo) ; - - // Update the current 2D point visual feature with polar coordinates - for (int i = 0 ; i < 4 ; i++) - vpFeatureBuilder::create(p[i], point[i]); - - // compute the control law - vpColVector v = task.computeControlLaw(); // camera velocity - } - } - \endcode - - If you want to deal only with the \f$\rho\f$ subset feature from the 2D - point feature set, you have just to modify the addFeature() call in the - previous example by the following line. In that case, the dimension of - \f$s\f$ is four. - - \code - // Add the rho subset feature from the 2D point polar coordinates visual features - task.addFeature(p[i], pd[i], vpFeaturePointPolar::selectRho()); - \endcode - - If you want to build your own control law, this other example shows how - to create a current (\f$s\f$) and desired (\f$s^*\f$) 2D point visual - feature with polar coordinates, compute the corresponding error vector - \f$(s-s^*)\f$ and finally build the interaction matrix \f$L_s\f$. - - \code - #include - #include - - int main() - { - // Creation of the current feature s - vpFeaturePointPolar s; - // Initialize the current feature - s.buildFrom(0.1, M_PI, 1); // rho=0.1m, theta=pi, Z=1m - - // Creation of the desired feature s - vpFeaturePointPolar s_star; - // Initialize the desired feature - s.buildFrom(0.15, 0, 0.8); // rho=0.15m, theta=0, Z=0.8m - - // Compute the interaction matrix L_s for the current feature - vpMatrix L = s.interaction(); - - // Compute the error vector (s-s*) for the point feature with polar coordinates - s.error(s_star); - - return 0; - } - \endcode - -*/ + * \class vpFeaturePointPolar + * \ingroup group_visual_features + * + * \brief Class that defines 2D image point visual feature with + * polar coordinates \f$(\rho,\theta)\f$ described in \cite Corke09a. + * + * Let us denote \f$(\rho,\theta)\f$ the polar coordinates of an image + * point, with \f$\rho\f$ the radius of the feature point with respect + * to the optical center and \f$\theta\f$ the angle. From cartesian + * coordinates \f$(x,y)\f$ of a image point, polar coordinates are + * obtained by: + * + * \f[\rho = \sqrt{x^2+y^2} \hbox{,}\; \; \theta = \arctan \frac{y}{x}\f] + * + * From polar coordinates, cartesian coordinates of the feature point + * can be obtained by: + * + * \f[x = \rho \cos\theta \hbox{,}\; \; y = \rho \sin\theta\f] + * + * This class is intended to manipulate the 2D image point visual + * feature in polar coordinates \f$ s = (\rho, \theta) \f$. The + * interaction matrix related to \f$ s \f$ is given by: + * + * \f[ + * L = \left[ + * \begin{array}{l} + * L_{\rho} \\ + * \; \\ + * L_{\theta}\\ + * \end{array} + * \right] + * = + * \left[ + * \begin{array}{cccccc} + * \frac{-\cos \theta}{Z} & \frac{-\sin \theta}{Z} & \frac{\rho}{Z} & + * (1+\rho^2)\sin\theta& -(1+\rho^2)\cos\theta & 0 \\ + * \;\\ \ + * \frac{\sin\theta}{\rho Z} & \frac{-\cos\theta}{\rho Z} & 0 & \cos\theta + * /\rho & \sin\theta/\rho & -1 \\ \end{array} \right] \f] + * + * where \f$Z\f$ is the 3D depth of the considered point in the camera frame. + * + * Two ways are allowed to initialize the feature. + * + * - The first way by setting the feature values \f$(\rho,\theta,Z)\f$ + * using vpFeaturePointPolar members like set_rho(), set_theta(), + * set_Z(), or set_rhoThetaZ(), or also buildFrom(). + * + * - The second way by using the feature builder functionalities to + * initialize the feature from a dot tracker, like + * vpFeatureBuilder::create (vpFeaturePointPolar &, const + * vpCameraParameters &, const vpDot &) or vpFeatureBuilder::create + * (vpFeaturePointPolar &, const vpCameraParameters &, const vpDot2 + * &). Be aware, that in that case only \f$(\rho,\theta)\f$ are + * initialized. You may also initialize the 3D depth \f$Z\f$. It is + * also possible to initialize the feature from a point structure, + * like vpFeatureBuilder::create(vpFeaturePointPolar &, const vpPoint + * &) or vpFeatureBuilder::create(vpFeaturePointPolar &, const + * vpCameraParameters &, const vpCameraParameters &, const vpPoint + * &). In that case all the feature parameters \f$(\rho,\theta,Z)\f$ + * would be initialized. + * + * The interaction() method allows to compute the interaction matrix + * \f$L\f$ associated to the visual feature, while the error() method + * computes the error vector \f$(s - s^*)\f$ between the current visual + * feature and the desired one. + * + * The code below shows how to create a eye-in hand visual servoing + * task using four 2D point features with polar coordinates. First we + * create four current features \f$s\f$ (p var name in the code) and + * four desired \f$s^*\f$ (pd var name in the code) point features with + * polar coordinates, set the task to use the interaction matrix + * associated to the current feature \f$L_{s}\f$ and than compute the + * camera velocity \f$v=-\lambda \; {L_{s}}^+ \; (s-s^*)\f$. The + * current feature \f$s\f$ is updated in the while() loop, while + * \f$s^*\f$ is initialized at the beginning. + * + * \code + * #include + * #include + * #include + * #include + * + * int main() + * { + * // Create 4 points to specify the object of interest + * vpPoint point[4]; + * + * // Set the 3D point coordinates in the object frame: oP + * point[0].setWorldCoordinates(-0.1, -0.1, 0); + * point[1].setWorldCoordinates( 0.1, -0.1, 0); + * point[2].setWorldCoordinates( 0.1, 0.1, 0); + * point[3].setWorldCoordinates(-0.1, 0.1, 0); + * + * // Initialize the desired pose between the camera and the object frame + * vpHomogeneousMatrix cMod; + * cMod.buildFrom(0, 0, 1, 0, 0, 0); + * + * // Compute the desired position of the point + * for (int i = 0 ; i < 4 ; i++) { + * // Compute the 3D point coordinates in the camera frame cP = cMod * oP + * point[i].changeFrame(cMod); + * // Compute the perspective projection to set (x,y) + * point[i].projection(); + * } + * + * // Create 4 desired visual features as 2D points with polar coordinates + * vpFeaturePointPolar pd[4]; + * // Initialize the desired visual feature from the desired point positions + * for (int i = 0 ; i < 4 ; i++) + * vpFeatureBuilder::create(pd[i], point[i]); + * + * // Initialize the current pose between the camera and the object frame + * vpHomogeneousMatrix cMo; + * cMo.buildFrom(0, 0, 1.2, 0, 0, M_PI); + * // ... cMo need here to be computed from a pose estimation + * + * for (int i = 0 ; i < 4 ; i++) { + * // Compute the 3D point coordinates in the camera frame cP = cMo * oP + * point[i].changeFrame(cMo); + * // Compute the perspective projection to set (x,y) + * point[i].projection(); + * } + * // Create 4 current visual features as 2D points with polar coordinates + * vpFeaturePointPolar p[4]; + * // Initialize the current visual feature from the current point positions + * for (int i = 0 ; i < 4 ; i++) + * vpFeatureBuilder::create(p[i], point[i]); + * + * // Visual servo task initialization + * vpServo task; + * // - Camera is mounted on the robot end-effector and velocities are + * // computed in the camera frame + * task.setServo(vpServo::EYEINHAND_CAMERA); + * // - Interaction matrix is computed with the current visual features s + * task.setInteractionMatrixType(vpServo::CURRENT); + * // - Set the constant gain to 1 + * task.setLambda(1); + * // - Add current and desired features + * for (int i = 0 ; i < 4 ; i++) + * task.addFeature(p[i], pd[i]); + * + * // Control loop + * for ( ; ; ) { + * // ... cMo need here to be estimated from for example a pose estimation. + * // Computes the point coordinates in the camera frame and its 2D + * // coordinates in the image plane + * for (int i = 0 ; i < 4 ; i++) + * point[i].track(cMo) ; + + * // Update the current 2D point visual feature with polar coordinates + * for (int i = 0 ; i < 4 ; i++) + * vpFeatureBuilder::create(p[i], point[i]); + + * // compute the control law + * vpColVector v = task.computeControlLaw(); // camera velocity + * } + * } + * \endcode + * + * If you want to deal only with the \f$\rho\f$ subset feature from the 2D + * point feature set, you have just to modify the addFeature() call in the + * previous example by the following line. In that case, the dimension of + * \f$s\f$ is four. + * + * \code + * // Add the rho subset feature from the 2D point polar coordinates visual features + * task.addFeature(p[i], pd[i], vpFeaturePointPolar::selectRho()); + * \endcode + * + * If you want to build your own control law, this other example shows how + * to create a current (\f$s\f$) and desired (\f$s^*\f$) 2D point visual + * feature with polar coordinates, compute the corresponding error vector + * \f$(s-s^*)\f$ and finally build the interaction matrix \f$L_s\f$. + * + * \code + * #include + * #include + * + * int main() + * { + * // Creation of the current feature s + * vpFeaturePointPolar s; + * // Initialize the current feature + * s.buildFrom(0.1, M_PI, 1); // rho=0.1m, theta=pi, Z=1m + * + * // Creation of the desired feature s + * vpFeaturePointPolar s_star; + * // Initialize the desired feature + * s.buildFrom(0.15, 0, 0.8); // rho=0.15m, theta=0, Z=0.8m + * + * // Compute the interaction matrix L_s for the current feature + * vpMatrix L = s.interaction(); + * + * // Compute the error vector (s-s*) for the point feature with polar coordinates + * s.error(s_star); + * + * return 0; + * } + * \endcode + */ class VISP_EXPORT vpFeaturePointPolar : public vpBasicFeature { private: @@ -262,25 +258,23 @@ class VISP_EXPORT vpFeaturePointPolar : public vpBasicFeature public: // basic constructor vpFeaturePointPolar(); - //! Destructor. Does nothing. - virtual ~vpFeaturePointPolar() { } void buildFrom(double rho, double theta, double Z); void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; // feature duplication - vpFeaturePointPolar *duplicate() const; + vpFeaturePointPolar *duplicate() const override; // compute the error between two visual features from a subset // a the possible features - vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL); + vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL) override; // basic construction - void init(); + void init() override; // get the point rho-coordinates double get_rho() const; @@ -290,10 +284,10 @@ class VISP_EXPORT vpFeaturePointPolar : public vpBasicFeature double get_Z() const; // compute the interaction matrix from a subset a the possible features - vpMatrix interaction(unsigned int select = FEATURE_ALL); + vpMatrix interaction(unsigned int select = FEATURE_ALL) override; // print the name of the feature - void print(unsigned int select = FEATURE_ALL) const; + void print(unsigned int select = FEATURE_ALL) const override; // set the point rho-coordinates void set_rho(double rho); diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureSegment.h b/modules/visual_features/include/visp3/visual_features/vpFeatureSegment.h index 09a1312220..fa074df8eb 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureSegment.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureSegment.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,19 +29,15 @@ * * Description: * Segment visual feature. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ #ifndef vpFeatureSegment_H #define vpFeatureSegment_H /*! - \file vpFeatureSegment.h - \brief class that defines the Segment visual feature -*/ + * \file vpFeatureSegment.h + * \brief class that defines the Segment visual feature + */ #include #include @@ -51,106 +46,100 @@ #include /*! - \class vpFeatureSegment - \ingroup group_visual_features - - \brief Class that defines a 2D segment visual features. - This class allow to consider two sets of visual features: - - the non normalised features \f${\bf s} = (x_c, y_c, l, \alpha)\f$ where - \f$(x_c,y_c)\f$ are the coordinates of the segment center, \f$ l \f$ the - segment length and \f$ \alpha \f$ the orientation of the segment with - respect to the \f$ x \f$ axis. - - or the normalized features \f${\bf s} = (x_n, y_n, l_n, \alpha)\f$ with - \f$x_n = x_c/l\f$, \f$y_n = y_c/l\f$ and \f$l_n = 1/l\f$. - - - - The selection of the feature set is done either during construction using - vpFeatureSegment(bool), or by setNormalized(bool). - -*/ + * \class vpFeatureSegment + * \ingroup group_visual_features + * + * \brief Class that defines a 2D segment visual features. + * This class allow to consider two sets of visual features: + * - the non normalized features \f${\bf s} = (x_c, y_c, l, \alpha)\f$ where + * \f$(x_c,y_c)\f$ are the coordinates of the segment center, \f$ l \f$ the + * segment length and \f$ \alpha \f$ the orientation of the segment with + * respect to the \f$ x \f$ axis. + * - or the normalized features \f${\bf s} = (x_n, y_n, l_n, \alpha)\f$ with + * \f$x_n = x_c/l\f$, \f$y_n = y_c/l\f$ and \f$l_n = 1/l\f$. + * + * The selection of the feature set is done either during construction using + * vpFeatureSegment(bool), or by setNormalized(bool). + */ class VISP_EXPORT vpFeatureSegment : public vpBasicFeature { public: // empty constructor explicit vpFeatureSegment(bool normalized = false); - //! Destructor. Does nothing. - virtual ~vpFeatureSegment() {} // change values of the segment void buildFrom(double x1, double y1, double Z1, double x2, double y2, double Z2); void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; //! Feature duplication. - vpFeatureSegment *duplicate() const; + vpFeatureSegment *duplicate() const override; // compute the error between two visual features from a subset // a the possible features - vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL); + vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL) override; /*! - Get the x coordinate of the segment center in the image plane. - - \return If normalized features are used, return \f$ x_n = x_c / l \f$. - Otherwise return \f$ x_c \f$. + * Get the x coordinate of the segment center in the image plane. + * + * \return If normalized features are used, return \f$ x_n = x_c / l \f$. + * Otherwise return \f$ x_c \f$. */ inline double getXc() const { return s[0]; } /*! - Get the y coordinate of the segment center in the image plane. - - \return If normalized features are used, return \f$ y_n = y_c / l \f$. - Otherwise return \f$ y_c \f$. - */ + * Get the y coordinate of the segment center in the image plane. + * + * \return If normalized features are used, return \f$ y_n = y_c / l \f$. + * Otherwise return \f$ y_c \f$. + */ inline double getYc() const { return s[1]; } /*! - Get the length of the segment. - - \return If normalized features are used, return \f$ l_n = 1 / l \f$. - Otherwise return \f$ l \f$. - - */ + * Get the length of the segment. + * + * \return If normalized features are used, return \f$ l_n = 1 / l \f$. + * Otherwise return \f$ l \f$. + */ inline double getL() const { return s[2]; } /*! - Get the value of \f$ \alpha \f$ which represents the orientation of - the segment. - - \return The value of \f$ \alpha \f$. - */ + * Get the value of \f$ \alpha \f$ which represents the orientation of + * the segment. + * + * \return The value of \f$ \alpha \f$. + */ inline double getAlpha() const { return s[3]; } /*! - Get the value of \f$ Z_1 \f$ which represents the Z coordinate in the - camera frame of the 3D point that corresponds to the segment first point. - - \return The value of the depth \f$ Z_1 \f$. - */ + * Get the value of \f$ Z_1 \f$ which represents the Z coordinate in the + * camera frame of the 3D point that corresponds to the segment first point. + * + * \return The value of the depth \f$ Z_1 \f$. + */ inline double getZ1() const { return Z1_; } /*! - Get the value of \f$ Z_2 \f$ which represents the Z coordinate in the - camera frame of the 3D point that corresponds to the segment second - point. - - \return The value of the depth \f$ Z_2 \f$. - */ + * Get the value of \f$ Z_2 \f$ which represents the Z coordinate in the + * camera frame of the 3D point that corresponds to the segment second + * point. + * + * \return The value of the depth \f$ Z_2 \f$. + */ inline double getZ2() const { return Z2_; } // Basic construction. - void init(); + void init() override; // compute the interaction matrix from a subset a the possible features - vpMatrix interaction(unsigned int select = FEATURE_ALL); + vpMatrix interaction(unsigned int select = FEATURE_ALL) override; - void print(unsigned int select = FEATURE_ALL) const; + void print(unsigned int select = FEATURE_ALL) const override; /*! - Indicates if the normalized features are considered. - */ + * Indicates if the normalized features are considered. + */ bool isNormalized() { return normalized_; }; static unsigned int selectXc(); @@ -159,61 +148,61 @@ class VISP_EXPORT vpFeatureSegment : public vpBasicFeature static unsigned int selectAlpha(); /*! - Set the king of feature to consider. - \param normalized : If true, use normalized features \f${\bf s} = (x_n, - y_n, l_n, \alpha)\f$. If false, use non normalized features \f${\bf s} = - (x_c, y_c, l_c, \alpha)\f$. - */ + * Set the king of feature to consider. + * \param normalized : If true, use normalized features \f${\bf s} = (x_n, + * y_n, l_n, \alpha)\f$. If false, use non normalized features \f${\bf s} = + * (x_c, y_c, l_c, \alpha)\f$. + */ void setNormalized(bool normalized) { normalized_ = normalized; }; - /*! - Set the value of the x coordinate of the segment center - in the image plane. It is one parameter of the visual feature \f$ s \f$. - - \param val : Value to set, that is either equal to \f$ x_n = x_c/l \f$ - when normalized features are considered, or equal to \f$ x_c \f$ - otherwise. - */ + /*! + * Set the value of the x coordinate of the segment center + * in the image plane. It is one parameter of the visual feature \f$ s \f$. + * + * \param val : Value to set, that is either equal to \f$ x_n = x_c/l \f$ + * when normalized features are considered, or equal to \f$ x_c \f$ + * otherwise. + */ inline void setXc(double val) { s[0] = xc_ = val; flags[0] = true; } - /*! - Set the value of the y coordinate of the segment center - in the image plane. It is one parameter of the visual feature \f$ s \f$. - - \param val : Value to set, that is either equal to \f$ y_n = y_c/l \f$ - when normalized features are considered, or equal to \f$ y_c \f$ - otherwise. - */ + /*! + * Set the value of the y coordinate of the segment center + * in the image plane. It is one parameter of the visual feature \f$ s \f$. + * + * \param val : Value to set, that is either equal to \f$ y_n = y_c/l \f$ + * when normalized features are considered, or equal to \f$ y_c \f$ + * otherwise. + */ inline void setYc(double val) { s[1] = yc_ = val; flags[1] = true; } - /*! - - Set the value of the segment length in the image plane. It is one - parameter of the visual feature \f$ s \f$. - \param val : Value to set, that is either equal to \f$l_n= 1/l \f$ when - normalized features are considered, or equal to \f$ l \f$ otherwise. - */ + /*! + * Set the value of the segment length in the image plane. It is one + * parameter of the visual feature \f$ s \f$. + * + * \param val : Value to set, that is either equal to \f$l_n= 1/l \f$ when + * normalized features are considered, or equal to \f$ l \f$ otherwise. + */ inline void setL(double val) { s[2] = l_ = val; flags[2] = true; } - /*! - - Set the value of \f$ \alpha \f$ which represents the orientation of the - segment in the image plane. It is one parameter of the visual feature \f$ - s \f$. - \param val : \f$ \alpha \f$ value to set. - */ + /*! + * Set the value of \f$ \alpha \f$ which represents the orientation of the + * segment in the image plane. It is one parameter of the visual feature \f$ + * s \f$. + * + * \param val : \f$ \alpha \f$ value to set. + */ inline void setAlpha(double val) { s[3] = alpha_ = val; @@ -223,17 +212,16 @@ class VISP_EXPORT vpFeatureSegment : public vpBasicFeature } /*! - - Set the value of \f$ Z_1 \f$ which represents the Z coordinate in the - camera frame of the 3D point that corresponds to the segment first point. - - This value is requested to compute the interaction matrix. - - \param val : \f$ Z_1 \f$ value to set. - - \exception vpFeatureException::badInitializationError : If Z1 is behind - the camera or equal to zero. - */ + * Set the value of \f$ Z_1 \f$ which represents the Z coordinate in the + * camera frame of the 3D point that corresponds to the segment first point. + * + * This value is requested to compute the interaction matrix. + * + * \param val : \f$ Z_1 \f$ value to set. + * + * \exception vpFeatureException::badInitializationError : If Z1 is behind + * the camera or equal to zero. + */ inline void setZ1(double val) { Z1_ = val; @@ -256,17 +244,16 @@ class VISP_EXPORT vpFeatureSegment : public vpBasicFeature } /*! - - Set the value of \f$ Z_2 \f$ which represents the Z coordinate in the - camera frame of the 3D point that corresponds to the segment second point. - - This value is requested to compute the interaction matrix. - - \param val : \f$ Z_2 \f$ value to set. - - \exception vpFeatureException::badInitializationError : If Z2 is behind - the camera or equal to zero. - */ + * Set the value of \f$ Z_2 \f$ which represents the Z coordinate in the + * camera frame of the 3D point that corresponds to the segment second point. + * + * This value is requested to compute the interaction matrix. + * + * \param val : \f$ Z_2 \f$ value to set. + * + * \exception vpFeatureException::badInitializationError : If Z2 is behind + * the camera or equal to zero. + */ inline void setZ2(double val) { Z2_ = val; diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureThetaU.h b/modules/visual_features/include/visp3/visual_features/vpFeatureThetaU.h index 1d239979e3..35356cabbb 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureThetaU.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureThetaU.h @@ -49,178 +49,177 @@ #include /*! - \class vpFeatureThetaU - \ingroup group_visual_features - - \brief Class that defines a 3D visual feature \f$ s\f$ from a \f$ \theta - u \f$ axis/angle parametrization that represent the rotation between - to frames. - - Let us denote \f$ \theta u = (\theta u_x, \theta u_y, \theta u_z)\f$ . - - It is convenient to consider two coordinate frames: the current - camera frame \f$ {\cal{F}}_c \f$ and the desired camera frame \f$ - {\cal{F}}_{c^*} \f$. - - Let \f$^{c^*}R_c \f$ be the rotation matrix that gives the - orientation of the current camera frame relative to the desired camera - frame. Let \f$ \theta u_{^{c^*}R_c} \f$ to corresponding axis/angle - representation of this rotation. - - Furthermore, let \f$^{c}R_{c^*} \f$ the rotation matrix that gives the - orientation of the desired camera frame relative to the current - camera frame. Let \f$ \theta u_{^{c}R_{c^*}} \f$ to corresponding - axis/angle representation of this rotation. - - This class can be used to manipulate two kind of visual features: - - - \f$ s = \theta u_{^{c^*}R_c} \f$ if the orientation of current - camera frame relative to the desired frame has to be - considered. The desired visual feature \f$ s^* \f$ is equal to - zero. The corresponding error is than equal to \f$ e=(s-s^*) = - \theta u_{^{c^*}R_c} \f$. In this case, the interaction matrix - related to \f$ s \f$ is given by \f[ L = \left[ \begin{array}{cc} - 0_3 & L_{\theta u} \end{array} \right] \f] with \f[ - L_{\theta u} = I_3 + \frac{\theta}{2} \; [u]_\times + - \left(1 - \frac{sinc \theta}{sinc^2 \frac{\theta}{2}}\right) - [u]^2_\times \f] where \f$ 0_3 \f$ is a \f$ 3 \times 3 \f$ nul - matrix, \f$ I_3 \f$ is the \f$3 \times 3\f$ identity matrix, and - for more readability \f$ \theta \f$ and \f$ u \f$ respectively the - angle and the axis coordinates of the \f$ \theta u_{^{c^*}R_c} \f$ - representation. - - - \f$ s = \theta u_{^{c}R_{c^*}} \f$ if it is more the orientation - of the desired camera frame relative to the current frame that has - to be considered. The desired visual feature \f$ s^* \f$ is equal - to zero. The corresponding error is than equal to \f$e=(s-s^*) = - \theta u_{^{c}R_{c^*}} \f$. In this case, the interaction matrix - related to \f$ s \f$ is given by \f[ L = \left[ \begin{array}{cc} - 0_3 & L_{\theta u} \end{array} \right] \f] with \f[ - L_{\theta u} = -I_3 + \frac{\theta}{2} \; [u]_\times - - \left(1 - \frac{sinc \theta}{sinc^2 \frac{\theta}{2}}\right) - [u]^2_\times \f] where \f$ 0_3 \f$ is a \f$ 3 \times 3 \f$ nul - matrix, \f$ I_3 \f$ is the \f$3 \times 3\f$ identity matrix, and - for more readability \f$ \theta \f$ and \f$ u \f$ respectively the - angle and the axis coordinates of the \f$ \theta u_{^{c}R_{c^*}} - \f$ representation. - - The kind of visual feature is to set during the construction of the - vpFeatureThetaU() object by using the selector - vpFeatureThetaU::vpFeatureThetaURotationRepresentationType. - - To initialize the feature \f$(\theta u_x, \theta u_y, \theta u_z)\f$ - you may use vpFeatureThetaU member functions like set_TUx(), - set_TUy(), set_TUz(), or also buildFrom() functions. - - Depending on the choice of the visual feature representation, the - interaction() method allows to compute the interaction matrix \f$ - L \f$ associated to the visual feature, while the error() - method computes the error vector \f$(s - s^*)\f$ between the current - visual feature and the desired one. - - To know more on the \f$ \theta u \f$ axis/angle representation for a - 3D rotation see the vpThetaUVector class. - - The code below shows how to create a eye-in hand visual servoing - task using a 3D \f$\theta u\f$ feature \f$(\theta u_x,\theta u_y, - \theta u_z)\f$ that correspond to the 3D rotation between the - current camera frame and the desired camera frame. To control six - degrees of freedom, at least three other features must be considered - like vpFeatureTranslation visual features. First we create a current - (\f$s\f$) 3D \f$\theta u\f$ feature, than set the - task to use the interaction matrix associated to the current feature - \f$L_s\f$ and than compute the camera velocity \f$v=-\lambda \; - L_s^+ \; (s-s^*)\f$. The current feature \f$s\f$ is updated in the - while() loop while \f$s^*\f$ is considered as zero. - - \code -#include -#include -#include - -int main() -{ - vpServo task; // Visual servoing task - - vpHomogeneousMatrix cMcd; - // ... cMcd need here to be initialized from for example a pose estimation. - - // Creation of the current feature s that correspond to the rotation - // in angle/axis parametrization between the current camera frame - // and the desired camera frame - vpFeatureThetaU s(vpFeatureThetaU::cRcd); - s.buildFrom(cMcd); // Initialization of the feature - - // Set eye-in-hand control law. - // The computed velocities will be expressed in the camera frame - task.setServo(vpServo::EYEINHAND_CAMERA); - // Interaction matrix is computed with the current visual features s - task.setInteractionMatrixType(vpServo::CURRENT); - - // Add the 3D ThetaU feature to the task - task.addFeature(s); // s* is here considered as zero - - // Control loop - for ( ; ; ) { - // ... cMcd need here to be initialized from for example a pose estimation. - - // Update the current ThetaU visual feature - s.buildFrom(cMcd); - - // compute the control law - vpColVector v = task.computeControlLaw(); // camera velocity - } -} - \endcode - - If you want to deal only with the \f$(\theta u_x,\theta u_y)\f$ subset - feature from the 3D \f$\theta u\f$ , you have just to modify the - addFeature() call in the previous example by the following line. In - that case, the dimension of \f$s\f$ is two. - - \code - // Add the (ThetaU_x, ThetaU_y) subset features from the 3D ThetaU - // rotation to the task - task.addFeature(s, vpFeatureThetaU::selectTUx() | vpFeatureThetaU::selectTUy()); - \endcode - - If you want to build your own control law, this other example shows - how to create a current (\f$s\f$) and desired (\f$s^*\f$) 3D - \f$\theta u\f$ visual feature, compute the corresponding error - vector \f$(s-s^*)\f$ and finally build the interaction matrix \f$L_s\f$. - - \code -#include -#include -#include - -int main() -{ - vpHomogeneousMatrix cdMc; - // ... cdMc need here to be initialized from for example a pose estimation. - - // Creation of the current feature s - vpFeatureThetaU s(vpFeatureThetaU::cdRc); - s.buildFrom(cdMc); // Initialization of the feature - - // Creation of the desired feature s*. By default this feature is - // initialized to zero - vpFeatureThetaU s_star(vpFeatureThetaU::cdRc); - - // Compute the interaction matrix L_s for the current ThetaU feature - vpMatrix L = s.interaction(); - - // Compute the error vector (s-s*) for the ThetaU feature - s.error(s_star); -} - \endcode - - -*/ + * \class vpFeatureThetaU + * \ingroup group_visual_features + * + * \brief Class that defines a 3D visual feature \f$ s\f$ from a \f$ \theta + * u \f$ axis/angle parametrization that represent the rotation between + * to frames. + * + * Let us denote \f$ \theta u = (\theta u_x, \theta u_y, \theta u_z)\f$ . + * + * It is convenient to consider two coordinate frames: the current + * camera frame \f$ {\cal{F}}_c \f$ and the desired camera frame \f$ + * {\cal{F}}_{c^*} \f$. + * + * Let \f$^{c^*}R_c \f$ be the rotation matrix that gives the + * orientation of the current camera frame relative to the desired camera + * frame. Let \f$ \theta u_{^{c^*}R_c} \f$ to corresponding axis/angle + * representation of this rotation. + * + * Furthermore, let \f$^{c}R_{c^*} \f$ the rotation matrix that gives the + * orientation of the desired camera frame relative to the current + * camera frame. Let \f$ \theta u_{^{c}R_{c^*}} \f$ to corresponding + * axis/angle representation of this rotation. + * + * This class can be used to manipulate two kind of visual features: + * + * - \f$ s = \theta u_{^{c^*}R_c} \f$ if the orientation of current + * camera frame relative to the desired frame has to be + * considered. The desired visual feature \f$ s^* \f$ is equal to + * zero. The corresponding error is than equal to \f$ e=(s-s^*) = + * \theta u_{^{c^*}R_c} \f$. In this case, the interaction matrix + * related to \f$ s \f$ is given by \f[ L = \left[ \begin{array}{cc} + * 0_3 & L_{\theta u} \end{array} \right] \f] with \f[ + * L_{\theta u} = I_3 + \frac{\theta}{2} \; [u]_\times + + * \left(1 - \frac{sinc \theta}{sinc^2 \frac{\theta}{2}}\right) + * [u]^2_\times \f] where \f$ 0_3 \f$ is a \f$ 3 \times 3 \f$ nul + * matrix, \f$ I_3 \f$ is the \f$3 \times 3\f$ identity matrix, and + * for more readability \f$ \theta \f$ and \f$ u \f$ respectively the + * angle and the axis coordinates of the \f$ \theta u_{^{c^*}R_c} \f$ + * representation. + * + * - \f$ s = \theta u_{^{c}R_{c^*}} \f$ if it is more the orientation + * of the desired camera frame relative to the current frame that has + * to be considered. The desired visual feature \f$ s^* \f$ is equal + * to zero. The corresponding error is than equal to \f$e=(s-s^*) = + * \theta u_{^{c}R_{c^*}} \f$. In this case, the interaction matrix + * related to \f$ s \f$ is given by \f[ L = \left[ \begin{array}{cc} + * 0_3 & L_{\theta u} \end{array} \right] \f] with \f[ + * L_{\theta u} = -I_3 + \frac{\theta}{2} \; [u]_\times + * - \left(1 - \frac{sinc \theta}{sinc^2 \frac{\theta}{2}}\right) + * [u]^2_\times \f] where \f$ 0_3 \f$ is a \f$ 3 \times 3 \f$ nul + * matrix, \f$ I_3 \f$ is the \f$3 \times 3\f$ identity matrix, and + * for more readability \f$ \theta \f$ and \f$ u \f$ respectively the + * angle and the axis coordinates of the \f$ \theta u_{^{c}R_{c^*}} + * \f$ representation. + * + * The kind of visual feature is to set during the construction of the + * vpFeatureThetaU() object by using the selector + * vpFeatureThetaU::vpFeatureThetaURotationRepresentationType. + * + * To initialize the feature \f$(\theta u_x, \theta u_y, \theta u_z)\f$ + * you may use vpFeatureThetaU member functions like set_TUx(), + * set_TUy(), set_TUz(), or also buildFrom() functions. + * + * Depending on the choice of the visual feature representation, the + * interaction() method allows to compute the interaction matrix \f$ + * L \f$ associated to the visual feature, while the error() + * method computes the error vector \f$(s - s^*)\f$ between the current + * visual feature and the desired one. + * + * To know more on the \f$ \theta u \f$ axis/angle representation for a + * 3D rotation see the vpThetaUVector class. + * + * The code below shows how to create a eye-in hand visual servoing + * task using a 3D \f$\theta u\f$ feature \f$(\theta u_x,\theta u_y, + * \theta u_z)\f$ that correspond to the 3D rotation between the + * current camera frame and the desired camera frame. To control six + * degrees of freedom, at least three other features must be considered + * like vpFeatureTranslation visual features. First we create a current + * (\f$s\f$) 3D \f$\theta u\f$ feature, than set the + * task to use the interaction matrix associated to the current feature + * \f$L_s\f$ and than compute the camera velocity \f$v=-\lambda \; + * L_s^+ \; (s-s^*)\f$. The current feature \f$s\f$ is updated in the + * while() loop while \f$s^*\f$ is considered as zero. + * + * \code + * #include + * #include + * #include + * + * int main() + * { + * vpServo task; // Visual servoing task + * + * vpHomogeneousMatrix cMcd; + * // ... cMcd need here to be initialized from for example a pose estimation. + * + * // Creation of the current feature s that correspond to the rotation + * // in angle/axis parametrization between the current camera frame + * // and the desired camera frame + * vpFeatureThetaU s(vpFeatureThetaU::cRcd); + * s.buildFrom(cMcd); // Initialization of the feature + * + * // Set eye-in-hand control law. + * // The computed velocities will be expressed in the camera frame + * task.setServo(vpServo::EYEINHAND_CAMERA); + * // Interaction matrix is computed with the current visual features s + * task.setInteractionMatrixType(vpServo::CURRENT); + * + * // Add the 3D ThetaU feature to the task + * task.addFeature(s); // s* is here considered as zero + * + * // Control loop + * for ( ; ; ) { + * // ... cMcd need here to be initialized from for example a pose estimation. + * + * // Update the current ThetaU visual feature + * s.buildFrom(cMcd); + * + * // compute the control law + * vpColVector v = task.computeControlLaw(); // camera velocity + * } + * } + * \endcode + * + * If you want to deal only with the \f$(\theta u_x,\theta u_y)\f$ subset + * feature from the 3D \f$\theta u\f$ , you have just to modify the + * addFeature() call in the previous example by the following line. In + * that case, the dimension of \f$s\f$ is two. + * + * \code + * // Add the (ThetaU_x, ThetaU_y) subset features from the 3D ThetaU + * // rotation to the task + * task.addFeature(s, vpFeatureThetaU::selectTUx() | vpFeatureThetaU::selectTUy()); + * \endcode + * + * If you want to build your own control law, this other example shows + * how to create a current (\f$s\f$) and desired (\f$s^*\f$) 3D + * \f$\theta u\f$ visual feature, compute the corresponding error + * vector \f$(s-s^*)\f$ and finally build the interaction matrix \f$L_s\f$. + * + * \code + * #include + * #include + * #include + * + * int main() + * { + * vpHomogeneousMatrix cdMc; + * // ... cdMc need here to be initialized from for example a pose estimation. + * + * // Creation of the current feature s + * vpFeatureThetaU s(vpFeatureThetaU::cdRc); + * s.buildFrom(cdMc); // Initialization of the feature + * + * // Creation of the desired feature s*. By default this feature is + * // initialized to zero + * vpFeatureThetaU s_star(vpFeatureThetaU::cdRc); + * + * // Compute the interaction matrix L_s for the current ThetaU feature + * vpMatrix L = s.interaction(); + * + * // Compute the error vector (s-s*) for the ThetaU feature + * s.error(s_star); + * } + * \endcode + */ class VISP_EXPORT vpFeatureThetaU : public vpBasicFeature { public: - typedef enum { + typedef enum + { TUx = 1, /*!< Select the subset \f$ \theta u_x \f$ visual feature from the \f$ \theta u\f$ angle/axis representation. */ TUy = 2, /*!< Select the subset \f$ \theta u_y \f$ visual feature @@ -228,7 +227,8 @@ class VISP_EXPORT vpFeatureThetaU : public vpBasicFeature TUz = 4 /*!< Select the subset \f$ \theta u_z \f$ visual feature from the \f$ \theta u\f$ angle/axis representation. */ } vpFeatureThetaUType; - typedef enum { + typedef enum + { cdRc, /*!< Selector used to manipulate the visual feature \f$ s = \theta u_{^{c^*}R_c} \f$. This visual feature represent the orientation of the current camera frame @@ -250,26 +250,24 @@ class VISP_EXPORT vpFeatureThetaU : public vpBasicFeature vpFeatureThetaU(vpThetaUVector &tu, vpFeatureThetaURotationRepresentationType r); vpFeatureThetaU(vpRotationMatrix &R, vpFeatureThetaURotationRepresentationType r); vpFeatureThetaU(vpHomogeneousMatrix &M, vpFeatureThetaURotationRepresentationType r); - //! Destructor. Does nothing. - virtual ~vpFeatureThetaU() {} void buildFrom(vpThetaUVector &tu); // build from a rotation matrix void buildFrom(const vpRotationMatrix &R); - // build from an homogeneous matrix + // build from an homogeneous matrix void buildFrom(const vpHomogeneousMatrix &M); void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; //! Feature duplication. - vpFeatureThetaU *duplicate() const; + vpFeatureThetaU *duplicate() const override; // compute the error between two visual features from a subset // a the possible features - vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL); + vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL) override; vpFeatureThetaURotationRepresentationType getFeatureThetaURotationType() const; @@ -278,11 +276,11 @@ class VISP_EXPORT vpFeatureThetaU : public vpBasicFeature double get_TUz() const; // Basic construction. - void init(); + void init() override; // compute the interaction matrix from a subset a the possible features - vpMatrix interaction(unsigned int select = FEATURE_ALL); + vpMatrix interaction(unsigned int select = FEATURE_ALL) override; - void print(unsigned int select = FEATURE_ALL) const; + void print(unsigned int select = FEATURE_ALL) const override; void set_TUx(double tu_x); void set_TUy(double tu_y); diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureTranslation.h b/modules/visual_features/include/visp3/visual_features/vpFeatureTranslation.h index d8d6ce9134..91ea77ca9b 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureTranslation.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureTranslation.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * 3D translation visual feature. - * -*****************************************************************************/ + */ #ifndef vpFeatureTranslation_H #define vpFeatureTranslation_H @@ -48,238 +46,236 @@ #include /*! - \class vpFeatureTranslation - \ingroup group_visual_features - - \brief Class that defines the translation visual feature - \f$s=(t_x,t_y,t_z)\f$. - - It is convenient to consider two coordinate frames noted here \f$ -{\cal{F}}_1 \f$ and \f$ - {\cal{F}}_{2} \f$. - - Let \f$^{{\cal{F}}_2}M_{{\cal{F}}_1} \f$ be the homogeneous matrix that -gives the orientation and the translation of the frame \f$ {\cal{F}}_1 \f$ -with respect to the frame \f$ {\cal{F}}_2 \f$. - - \f[ - ^{{\cal{F}}_2}M_{{\cal{F}}_1} = \left(\begin{array}{cc} - ^{{\cal{F}}_2}R_{{\cal{F}}_1} & ^{{\cal{F}}_2}t_{{\cal{F}}_1} \\ - {\bf 0}_{1\times 3} & 1 - \end{array} - \right) - \f] - - with \f$^{{\cal{F}}_2}R_{{\cal{F}}_1} \f$ the rotation matrix that gives the -orientation of the frame \f$ {\cal{F}}_1 \f$ relative to the frame \f$ -{\cal{F}}_2 \f$ and \f$^{{\cal{F}}_2}t_{{\cal{F}}_1} \f$ the translation -vector that gives the position of the frame \f$ {\cal{F}}_1 \f$ relative to -the frame \f$ {\cal{F}}_2 \f$. To know more about homogeneous matrices see -vpHomogeneousMatrix documentation. - - This class can be used to manipulate three kind of visual features: - - - This class can be used to manipulate the translation visual feature - \f$s= ^{c^*}t_c\f$ which gives the position of - the current camera frame relative to the desired camera frame. It is -composed by the three components \f$(t_x,t_y,t_z)\f$. The desired visual -feature \f$ s^* \f$ is equal to zero. The corresponding error is than equal to -\f$ e=(s-s^*) = ^{c^*}t_c \f$. In this case, the interaction matrix related to -\f$ s \f$ is given by \f[ L = [ - ^{c^*}R_c \;\; 0_3] \f] - - - This class can also be used to manipulate the translation visual feature - \f$s= ^{c}t_{c^*}\f$ which gives the position of - the desired camera frame relative to the current camera frame. It is -composed by the three components \f$(t_x,t_y,t_z)\f$. The desired visual -feature \f$ s^* \f$ is equal to zero. The corresponding error is than equal to -\f$ e=(s-s^*) = ^{c}t_{c^*} \f$. In this case, the interaction matrix related -to \f$ s \f$ is given by \f[ L = [ -I_3 \;\; [^{c}t_{c^*}]_\times] \f] - - - Actually, this class can also be used to manipulate the - translation visual feature \f$s= ^{c}t_o\f$ which gives the position - of the object frame relative to the current camera frame. It is - composed by the three components \f$(t_x,t_y,t_z)\f$ too. The - desired visual feature \f$ s^* \f$ is the translation visual feature - \f$s^*= ^{c^*}t_o\f$ which gives the position of the object frame - relative to the desired camera frame. The corresponding error is - than equal to \f$ e=(s-s^*) = ^{c}t_o - ^{c^*}t_o \f$. In this case, - the interaction matrix related to \f$ s \f$ is given by \f[ L = [ - -I_3 \;\; [^{c}t_o]_\times] \f] - - To initialize the feature \f$(t_x, t_y, t_z)\f$ you may use member - functions like set_Tx(), set_Ty(), set_Tz(), or also buildFrom() - functions. - - The interaction() method allows to compute the interaction matrix - \f$ L\f$ associated to the translation visual feature, while the - error() method computes the error vector \f$(s - s^*)\f$ between the - current visual feature and the desired one. - - The code below shows how to create a eye-in hand visual servoing - task using a 3D translation feature \f$(t_x,t_y,t_z)\f$ that - correspond to the 3D translation between the desired camera frame - and the current camera frame. To control six degrees of freedom, at - least three other features must be considered like vpFeatureThetaU - visual features. First we create a current (\f$s\f$) and desired - (\f$s^*\f$) 3D translation feature, set the task to use the - interaction matrix associated to the current feature \f$L_s\f$ and - than compute the camera velocity \f$v=-\lambda \; L_s^+ \; - (s-s^*)\f$. The current feature \f$s\f$ is updated in the while() loop - while \f$s^*\f$ is set to zero. - - \code -#include -#include -#include - -int main() -{ - vpServo task; // Visual servoing task - - vpHomogeneousMatrix cdMc; - // ... cdMc need here to be initialized from for example a pose estimation. - - // Creation of the current visual feature s - vpFeatureTranslation s(vpFeatureTranslation::cdMc); - s.buildFrom(cdMc); // Initialization of the current feature s=(tx,ty,tz) - - // Set eye-in-hand control law. - // The computed velocities will be expressed in the camera frame - task.setServo(vpServo::EYEINHAND_CAMERA); - // Interaction matrix is computed with the current visual features s - task.setInteractionMatrixType(vpServo::CURRENT); - // Set the constant gain - double lambda = 0.8; - task.setLambda(lambda); - - // Add the 3D translation feature to the task - task.addFeature(s); // s* is here considered as zero - - // Control loop - for ( ; ; ) { - // ... cdMc need here to be initialized from for example a pose estimation. - - // Update the current 3D translation visual feature - s.buildFrom(cdMc); - - // compute the control law - vpColVector v = task.computeControlLaw(); // camera velocity - } -} - \endcode - - If you want to deal only with the \f$(t_x,t_y)\f$ subset feature from the 3D - translation, you have just to modify the addFeature() call in - the previous example by the following line. In that case, the dimension - of \f$s\f$ is two. - - \code - // Add the (tx,ty) subset features from 3D translation to the task - task.addFeature(s, vpFeatureTranslation::selectTx() | vpFeatureTranslation::selectTy()); - \endcode - - If you want to build your own control law, this other example shows - how to create a current (\f$s\f$) and desired (\f$s^*\f$) 3D - translation visual feature, compute the corresponding error - vector \f$(s-s^*)\f$ and finally build the interaction matrix \f$L_s\f$. - - \code -#include -#include -#include - -int main() -{ - vpHomogeneousMatrix cdMc; - // ... cdMc need here to be initialized from for example a pose estimation. - - // Creation of the current feature s - vpFeatureTranslation s(vpFeatureTranslation::cdMc); - s.buildFrom(cdMc); // Initialization of the feature - - // Creation of the desired feature s*. By default this feature is - // initialized to zero - vpFeatureTranslation s_star(vpFeatureTranslation::cdMc); - - // Compute the interaction matrix for the translation feature - vpMatrix L = s.interaction(); - - // Compute the error vector (s-s*) for the translation feature - vpColVector e = s.error(s_star); // e = (s-s*) -} - \endcode - - The code below shows how to create an eye-in hand visual servoing - task using a 3D translation feature \f$(t_x,t_y,t_z)\f$ that - correspond to the 3D translation between the current camera frame - and the object frame. Like with the previous examples, to - control six degrees of freedom, at least three other features must be - considered like vpFeatureThetaU visual features. The way to initialize - the visual features is quite the same as before. The difference is that - the cMo method must be precised and the desired feature is note - necessary equal to zero. - - \code -#include -#include -#include - -int main() -{ - vpServo task; // Visual servoing task - - vpHomogeneousMatrix cdMo; - // ... cdMo need here to be initialized from for example a pose estimation. - - // Creation of the desired visual feature s* - vpFeatureTranslation s_star(vpFeatureTranslation::cMo); - s_star.buildFrom(cdMo); // Initialization of the desired feature s*=(tx*,ty*,tz*) - - vpHomogeneousMatrix cMo; - // ... cMo need here to be computed. - - // Creation of the current visual feature s - vpFeatureTranslation s(vpFeatureTranslation::cMo); - s.buildFrom(cMo); // Initialization of the current feature s=(tx,ty,tz) - - // Set eye-in-hand control law. - // The computed velocities will be expressed in the camera frame - task.setServo(vpServo::EYEINHAND_CAMERA); - // Interaction matrix is computed with the current visual features s - task.setInteractionMatrixType(vpServo::CURRENT); - // Set the constant gain - double lambda = 0.8; - task.setLambda(lambda); - - // Add the 3D translation feature to the task - task.addFeature(s, s_star); // s* is here considered as zero - - // Control loop - for ( ; ; ) { - // ... cMo need here to be computed from for example a pose estimation. - - // Update the current 3D translation visual feature - s.buildFrom(cMo); - - // compute the control law - vpColVector v = task.computeControlLaw(); // camera velocity - } -} - \endcode - -*/ + * \class vpFeatureTranslation + * \ingroup group_visual_features + * + * \brief Class that defines the translation visual feature + * \f$s=(t_x,t_y,t_z)\f$. + * + * It is convenient to consider two coordinate frames noted here \f$ + * {\cal{F}}_1 \f$ and \f$ {\cal{F}}_{2} \f$. + * + * Let \f$^{{\cal{F}}_2}M_{{\cal{F}}_1} \f$ be the homogeneous matrix that + * gives the orientation and the translation of the frame \f$ {\cal{F}}_1 \f$ + * with respect to the frame \f$ {\cal{F}}_2 \f$. + * + * \f[ + * ^{{\cal{F}}_2}M_{{\cal{F}}_1} = \left(\begin{array}{cc} + * ^{{\cal{F}}_2}R_{{\cal{F}}_1} & ^{{\cal{F}}_2}t_{{\cal{F}}_1} \\ + * {\bf 0}_{1\times 3} & 1 + * \end{array} + * \right) + * \f] + * + * with \f$^{{\cal{F}}_2}R_{{\cal{F}}_1} \f$ the rotation matrix that gives the + * orientation of the frame \f$ {\cal{F}}_1 \f$ relative to the frame \f$ + * {\cal{F}}_2 \f$ and \f$^{{\cal{F}}_2}t_{{\cal{F}}_1} \f$ the translation + * vector that gives the position of the frame \f$ {\cal{F}}_1 \f$ relative to + * the frame \f$ {\cal{F}}_2 \f$. To know more about homogeneous matrices see + * vpHomogeneousMatrix documentation. + * + * This class can be used to manipulate three kind of visual features: + * + * - This class can be used to manipulate the translation visual feature + * \f$s= ^{c^*}t_c\f$ which gives the position of + * the current camera frame relative to the desired camera frame. It is + * composed by the three components \f$(t_x,t_y,t_z)\f$. The desired visual + * feature \f$ s^* \f$ is equal to zero. The corresponding error is than equal to + * \f$ e=(s-s^*) = ^{c^*}t_c \f$. In this case, the interaction matrix related to + * \f$ s \f$ is given by \f[ L = [ ^{c^*}R_c \;\; 0_3] \f] + * + * - This class can also be used to manipulate the translation visual feature + * \f$s= ^{c}t_{c^*}\f$ which gives the position of + * the desired camera frame relative to the current camera frame. It is + * composed by the three components \f$(t_x,t_y,t_z)\f$. The desired visual + * feature \f$ s^* \f$ is equal to zero. The corresponding error is than equal to + * \f$ e=(s-s^*) = ^{c}t_{c^*} \f$. In this case, the interaction matrix related + * to \f$ s \f$ is given by \f[ L = [ -I_3 \;\; [^{c}t_{c^*}]_\times] \f] + * + * - Actually, this class can also be used to manipulate the + * translation visual feature \f$s= ^{c}t_o\f$ which gives the position + * of the object frame relative to the current camera frame. It is + * composed by the three components \f$(t_x,t_y,t_z)\f$ too. The + * desired visual feature \f$ s^* \f$ is the translation visual feature + * \f$s^*= ^{c^*}t_o\f$ which gives the position of the object frame + * relative to the desired camera frame. The corresponding error is + * than equal to \f$ e=(s-s^*) = ^{c}t_o - ^{c^*}t_o \f$. In this case, + * the interaction matrix related to \f$ s \f$ is given by \f[ L = [ + * -I_3 \;\; [^{c}t_o]_\times] \f] + * + * To initialize the feature \f$(t_x, t_y, t_z)\f$ you may use member + * functions like set_Tx(), set_Ty(), set_Tz(), or also buildFrom() + * functions. + * + * The interaction() method allows to compute the interaction matrix + * \f$ L\f$ associated to the translation visual feature, while the + * error() method computes the error vector \f$(s - s^*)\f$ between the + * current visual feature and the desired one. + * + * The code below shows how to create a eye-in hand visual servoing + * task using a 3D translation feature \f$(t_x,t_y,t_z)\f$ that + * correspond to the 3D translation between the desired camera frame + * and the current camera frame. To control six degrees of freedom, at + * least three other features must be considered like vpFeatureThetaU + * visual features. First we create a current (\f$s\f$) and desired + * (\f$s^*\f$) 3D translation feature, set the task to use the + * interaction matrix associated to the current feature \f$L_s\f$ and + * than compute the camera velocity \f$v=-\lambda \; L_s^+ \; + * (s-s^*)\f$. The current feature \f$s\f$ is updated in the while() loop + * while \f$s^*\f$ is set to zero. + * + * \code + * #include + * #include + * #include + * + * int main() + * { + * vpServo task; // Visual servoing task + * + * vpHomogeneousMatrix cdMc; + * // ... cdMc need here to be initialized from for example a pose estimation. + * + * // Creation of the current visual feature s + * vpFeatureTranslation s(vpFeatureTranslation::cdMc); + * s.buildFrom(cdMc); // Initialization of the current feature s=(tx,ty,tz) + * + * // Set eye-in-hand control law. + * // The computed velocities will be expressed in the camera frame + * task.setServo(vpServo::EYEINHAND_CAMERA); + * // Interaction matrix is computed with the current visual features s + * task.setInteractionMatrixType(vpServo::CURRENT); + * // Set the constant gain + * double lambda = 0.8; + * task.setLambda(lambda); + * + * // Add the 3D translation feature to the task + * task.addFeature(s); // s* is here considered as zero + * + * // Control loop + * for ( ; ; ) { + * // ... cdMc need here to be initialized from for example a pose estimation. + * + * // Update the current 3D translation visual feature + * s.buildFrom(cdMc); + * + * // compute the control law + * vpColVector v = task.computeControlLaw(); // camera velocity + * } + * } + * \endcode + * + * If you want to deal only with the \f$(t_x,t_y)\f$ subset feature from the 3D + * translation, you have just to modify the addFeature() call in + * the previous example by the following line. In that case, the dimension + * of \f$s\f$ is two. + * + * \code + * // Add the (tx,ty) subset features from 3D translation to the task + * task.addFeature(s, vpFeatureTranslation::selectTx() | vpFeatureTranslation::selectTy()); + * \endcode + * + * If you want to build your own control law, this other example shows + * how to create a current (\f$s\f$) and desired (\f$s^*\f$) 3D + * translation visual feature, compute the corresponding error + * vector \f$(s-s^*)\f$ and finally build the interaction matrix \f$L_s\f$. + * + * \code + * #include + * #include + * #include + * + * int main() + * { + * vpHomogeneousMatrix cdMc; + * // ... cdMc need here to be initialized from for example a pose estimation. + * + * // Creation of the current feature s + * vpFeatureTranslation s(vpFeatureTranslation::cdMc); + * s.buildFrom(cdMc); // Initialization of the feature + * + * // Creation of the desired feature s*. By default this feature is + * // initialized to zero + * vpFeatureTranslation s_star(vpFeatureTranslation::cdMc); + * + * // Compute the interaction matrix for the translation feature + * vpMatrix L = s.interaction(); + * + * // Compute the error vector (s-s*) for the translation feature + * vpColVector e = s.error(s_star); // e = (s-s*) + * } + * \endcode + * + * The code below shows how to create an eye-in hand visual servoing + * task using a 3D translation feature \f$(t_x,t_y,t_z)\f$ that + * correspond to the 3D translation between the current camera frame + * and the object frame. Like with the previous examples, to + * control six degrees of freedom, at least three other features must be + * considered like vpFeatureThetaU visual features. The way to initialize + * the visual features is quite the same as before. The difference is that + * the cMo method must be precised and the desired feature is note + * necessary equal to zero. + * + * \code + * #include + * #include + * #include + * + * int main() + * { + * vpServo task; // Visual servoing task + * + * vpHomogeneousMatrix cdMo; + * // ... cdMo need here to be initialized from for example a pose estimation. + * + * // Creation of the desired visual feature s* + * vpFeatureTranslation s_star(vpFeatureTranslation::cMo); + * s_star.buildFrom(cdMo); // Initialization of the desired feature s*=(tx*,ty*,tz*) + * + * vpHomogeneousMatrix cMo; + * // ... cMo need here to be computed. + * + * // Creation of the current visual feature s + * vpFeatureTranslation s(vpFeatureTranslation::cMo); + * s.buildFrom(cMo); // Initialization of the current feature s=(tx,ty,tz) + * + * // Set eye-in-hand control law. + * // The computed velocities will be expressed in the camera frame + * task.setServo(vpServo::EYEINHAND_CAMERA); + * // Interaction matrix is computed with the current visual features s + * task.setInteractionMatrixType(vpServo::CURRENT); + * // Set the constant gain + * double lambda = 0.8; + * task.setLambda(lambda); + * + * // Add the 3D translation feature to the task + * task.addFeature(s, s_star); // s* is here considered as zero + * + * // Control loop + * for ( ; ; ) { + * // ... cMo need here to be computed from for example a pose estimation. + * + * // Update the current 3D translation visual feature + * s.buildFrom(cMo); + * + * // compute the control law + * vpColVector v = task.computeControlLaw(); // camera velocity + * } + * } + * \endcode + */ class VISP_EXPORT vpFeatureTranslation : public vpBasicFeature { public: /*! - \enum vpFeatureTranslationRepresentationType - Kind of implemented 3D translation feature. + * \enum vpFeatureTranslationRepresentationType + * Kind of implemented 3D translation feature. */ - typedef enum { - /*! Selector used to manipulate the visual feature \f$s= - ^{c^*}t_c\f$ which gives the position of the current camera frame - relative to the desired camera frame.*/ + typedef enum + { +/*! Selector used to manipulate the visual feature \f$s= + ^{c^*}t_c\f$ which gives the position of the current camera frame + relative to the desired camera frame.*/ cdMc, /*! Selector used to manipulate the visual feature \f$s= ^{c}t_{c^*}\f$ which gives the position of the desired camera frame @@ -298,24 +294,22 @@ class VISP_EXPORT vpFeatureTranslation : public vpBasicFeature // constructor : build from an homogeneous matrix // cdMc is the displacement that the camera has to realize vpFeatureTranslation(vpHomogeneousMatrix &f2Mf1, vpFeatureTranslationRepresentationType r); - //! Destructor. Does nothing. - virtual ~vpFeatureTranslation() {} // build from an homogeneous matrix // cdMc is the displacement that the camera has to realize void buildFrom(const vpHomogeneousMatrix &f2Mf1); void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; //! Feature duplication - vpFeatureTranslation *duplicate() const; + vpFeatureTranslation *duplicate() const override; // compute the error between two visual features from a subset // a the possible features - vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL); + vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL) override; vpFeatureTranslationRepresentationType getFeatureTranslationType() const; @@ -324,12 +318,12 @@ class VISP_EXPORT vpFeatureTranslation : public vpBasicFeature double get_Tz() const; // basic construction - void init(); + void init() override; // compute the interaction matrix from a subset a the possible features - vpMatrix interaction(unsigned int select = FEATURE_ALL); + vpMatrix interaction(unsigned int select = FEATURE_ALL) override; // print the name of the feature - void print(unsigned int select = FEATURE_ALL) const; + void print(unsigned int select = FEATURE_ALL) const override; void set_Tx(double t_x); void set_Ty(double t_y); diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureVanishingPoint.h b/modules/visual_features/include/visp3/visual_features/vpFeatureVanishingPoint.h index 491c7a53d1..e4da3cb7cb 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureVanishingPoint.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureVanishingPoint.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,19 +29,15 @@ * * Description: * 2D vanishing point visual feature (Z coordinate in 3D space is infinity) - * - * Authors: - * Odile Bourquardez - * -*****************************************************************************/ + */ #ifndef vpFeatureVanishingPoint_H #define vpFeatureVanishingPoint_H /*! - \file vpFeatureVanishingPoint.h \brief Class that defines 2D vanishing - point visual feature (Z coordinate in 3D space is infinity) -*/ + * \file vpFeatureVanishingPoint.h \brief Class that defines 2D vanishing + * point visual feature (Z coordinate in 3D space is infinity) + */ #include #include @@ -52,26 +47,26 @@ #include /*! - \class vpFeatureVanishingPoint - \ingroup group_visual_features - - Class that defines 2D vanishing point visual features. Various features can be considered: - - - Either the cartesian coordinates \f$ (x, y) \f$ of the vanishing point obtained from the intersection of two lines; - in that case \f$ {\bf s} = (x, y) \f$ and the corresponding interaction matrices are: - \f[ L_x = \left[ \begin{array}{cccccc} 0 & 0 & 0 & x y & -(1 + x^2) & y \end{array} \right] \f] - \f[ L_y = \left[ \begin{array}{cccccc} 0 & 0 & 0 & 1 + y * y & -xy & -x \end{array} \right] \f] - - - Rather features function of the polar coordinates of the vanishing point obtained themselves from the polar - coordinates of the two lines \f$(\rho_1, \theta_1)\f$ and \f$(\rho_2, \theta_2)\f$; in that case \f$ {\bf s} = - (\arctan(1/\rho), 1/\rho, \alpha) \f$ with: \f[ 1/\rho = \frac{\sin(\theta_1 - \theta_2)}{\sqrt{\rho_1^2 + \rho_2^2 - - 2 \rho_1 \rho_2 cos(\theta_1 - \theta_2)}} \f] \f[ \alpha = \frac{\rho_1 \cos \theta_2 - \rho_2 cos - \theta_1}{\sqrt{\rho_1^2 + \rho_2^2 - 2 \rho_1 \rho_2 cos(\theta_1 - \theta_2)}} \f] The corresponding interaction - matrices are: \f[ L_{\arctan(\frac{1}{\rho})} = \left[ \begin{array}{cccccc} 0 & 0 & 0 & - \sin \alpha & \cos \alpha & - 0 \end{array} \right] \f] \f[ L_{\frac{1}{\rho}} = \left[ \begin{array}{cccccc} 0 & 0 & 0 & -(1 + \frac{1}{\rho^2}) - \sin \alpha & (1 + \frac{1}{\rho^2}) \cos \alpha & 0 \end{array} \right] \f] \f[ L_{\alpha} = \left[ - \begin{array}{cccccc} 0 & 0 & 0 & \frac{\cos \alpha}{\rho} & \frac{\sin \alpha}{\rho} & -1 \end{array} \right] \f] -*/ + * \class vpFeatureVanishingPoint + * \ingroup group_visual_features + * + * Class that defines 2D vanishing point visual features. Various features can be considered: + + * - Either the cartesian coordinates \f$ (x, y) \f$ of the vanishing point obtained from the intersection of two lines; + * in that case \f$ {\bf s} = (x, y) \f$ and the corresponding interaction matrices are: + * \f[ L_x = \left[ \begin{array}{cccccc} 0 & 0 & 0 & x y & -(1 + x^2) & y \end{array} \right] \f] + * \f[ L_y = \left[ \begin{array}{cccccc} 0 & 0 & 0 & 1 + y * y & -xy & -x \end{array} \right] \f] + * + * - Rather features function of the polar coordinates of the vanishing point obtained themselves from the polar + * coordinates of the two lines \f$(\rho_1, \theta_1)\f$ and \f$(\rho_2, \theta_2)\f$; in that case \f$ {\bf s} = + * (\arctan(1/\rho), 1/\rho, \alpha) \f$ with: \f[ 1/\rho = \frac{\sin(\theta_1 - \theta_2)}{\sqrt{\rho_1^2 + \rho_2^2 - + * 2 \rho_1 \rho_2 cos(\theta_1 - \theta_2)}} \f] \f[ \alpha = \frac{\rho_1 \cos \theta_2 - \rho_2 cos + * \theta_1}{\sqrt{\rho_1^2 + \rho_2^2 - 2 \rho_1 \rho_2 cos(\theta_1 - \theta_2)}} \f] The corresponding interaction + * matrices are: \f[ L_{\arctan(\frac{1}{\rho})} = \left[ \begin{array}{cccccc} 0 & 0 & 0 & - \sin \alpha & \cos \alpha & + * 0 \end{array} \right] \f] \f[ L_{\frac{1}{\rho}} = \left[ \begin{array}{cccccc} 0 & 0 & 0 & -(1 + \frac{1}{\rho^2}) + * \sin \alpha & (1 + \frac{1}{\rho^2}) \cos \alpha & 0 \end{array} \right] \f] \f[ L_{\alpha} = \left[ + * \begin{array}{cccccc} 0 & 0 & 0 & \frac{\cos \alpha}{\rho} & \frac{\sin \alpha}{\rho} & -1 \end{array} \right] \f] + */ class VISP_EXPORT vpFeatureVanishingPoint : public vpBasicFeature { public: @@ -83,19 +78,17 @@ class VISP_EXPORT vpFeatureVanishingPoint : public vpBasicFeature public: vpFeatureVanishingPoint(); - //! Destructor. - virtual ~vpFeatureVanishingPoint() {} void buildFrom(double x, double y); void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; - vpFeatureVanishingPoint *duplicate() const; + vpFeatureVanishingPoint *duplicate() const override; - vpColVector error(const vpBasicFeature &s_star, unsigned int select = (selectX() | selectY())); + vpColVector error(const vpBasicFeature &s_star, unsigned int select = (selectX() | selectY())) override; double get_x() const; double get_y() const; @@ -103,10 +96,10 @@ class VISP_EXPORT vpFeatureVanishingPoint : public vpBasicFeature double getOneOverRho() const; double getAlpha() const; - void init(); - vpMatrix interaction(unsigned int select = (selectX() | selectY())); + void init() override; + vpMatrix interaction(unsigned int select = (selectX() | selectY())) override; - void print(unsigned int select = (selectX() | selectY())) const; + void print(unsigned int select = (selectX() | selectY())) const override; void set_x(double x); void set_y(double y); diff --git a/modules/visual_features/include/visp3/visual_features/vpGenericFeature.h b/modules/visual_features/include/visp3/visual_features/vpGenericFeature.h index 9432823071..0af2b86297 100644 --- a/modules/visual_features/include/visp3/visual_features/vpGenericFeature.h +++ b/modules/visual_features/include/visp3/visual_features/vpGenericFeature.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,15 @@ * * Description: * Generic feature (used to create new feature not implemented in ViSP). - * -*****************************************************************************/ + */ #ifndef vpGenericFeature_hh #define vpGenericFeature_hh /*! - \file vpGenericFeature.h - \brief class that defines what is a generic feature (used to create new - feature not implemented in ViSP2 + * \file vpGenericFeature.h + * \brief class that defines what is a generic feature (used to create new + * feature not implemented in ViSP2 */ #include @@ -49,132 +47,130 @@ #include /*! - \class vpGenericFeature - \ingroup group_core_features - - \brief Class that enables to define a feature or a set of features which are -not implemented in ViSP as a specific class. It is indeed possible to create -its own features, to use the corresponding interaction matrix, and to compute -an error between the current and the desired feature. Moreover the created -features can be mixed with features already implemented. - - The following example shows how to use the vpGenericFeature class to create -and use the feature \f$ log(Z) \f$ where Z corresponds to the depth of a point -whose 2D coordinates in the camera frame are \f$ x \f$ and \f$ y \f$. The -interaction matrix corresponding to this feature is \f[ L = -\left[\begin{array}{cccccc} 0 & 0 & -1/Z & -y & x & 0 \end{array}\right]\f]. - \code -#include -#include - -int main() -{ - vpServo task; // Visual servoing task - - //First we have to define the desired feature log(Z*) corresponding to the desired point. - double xd = 0; //The x coordinate of the desired point. - double yd = 0; //The y coordinate of the desired point. - double Zd = 1; //The depth of the desired point. - vpGenericFeature logZd(1); //The dimension of the feature is 1. - logZd.set_s( log(Zd) ); - - //Then we have to define the current feature log(Z) corresponding to the current point. - double x = 1; //The x coordinate of the current point. - double y = 1; //The y coordinate of the current point. - double Z = 2; //The depth of the current point. - vpGenericFeature logZ(1); //The dimension of the feature is 1. - logZ.set_s( log(Z) ); - - // Set eye-in-hand control law. - // The computed velocities will be expressed in the camera frame - task.setServo(vpServo::EYEINHAND_CAMERA); - // Interaction matrix is computed with the current visual features sd - task.setInteractionMatrixType(vpServo::CURRENT); - - // Add the point feature to the task - task.addFeature(logZ, logZd); - - // Control loop - for ( ; ; ) { - // The new parameters x, y and Z must be computed here. - - // Update the current point visual feature - logZ.set_s( log(Z) ) ; - - // We have to compute the interaction matrix corresponding to the feature. - vpMatrix LlogZ(1,6) ; - LlogZ[0][0] = LlogZ[0][1] = LlogZ[0][5] = 0 ; - LlogZ[0][2] = -1/Z; - LlogZ[0][3] = -y; - LlogZ[0][4] = x; - logZ.setInteractionMatrix(LlogZ) ; - - - // compute the control law - vpColVector v = task.computeControlLaw(); // camera velocity - } - return 0; -} - \endcode - -The second example shows how to create and use a feature whose specificity is -to have a desired feature fixed to zero. It is the case for the feature \f$ -log( \frac{Z}{Z^*}) \f$. - - \code -#include -#include - -int main() -{ - vpServo task; // Visual servoing task - - //First we have to define the desired feature log(Z*) corresponding to the desired point. - double xd = 0; //The x coordinate of the desired point. - double yd = 0; //The y coordinate of the desired point. - double Zd = 1; //The depth of the desired point. - - //Then we have to define the current feature log(Z) corresponding to the current point. - double x = 1; //The x coordinate of the current point. - double y = 1; //The y coordinate of the current point. - double Z = 2; //The depth of the current point. - vpGenericFeature logZ(1); //The dimension of the feature is 1. - logZ.set_s( log(Z/Zd) ); - - // Set eye-in-hand control law. - // The computed velocities will be expressed in the camera frame - task.setServo(vpServo::EYEINHAND_CAMERA); - // Interaction matrix is computed with the current visual features sd - task.setInteractionMatrixType(vpServo::CURRENT); - - // Add the point feature to the task - task.addFeature(logZ); - - // Control loop - for ( ; ; ) { - // The new parameters x, y and Z must be computed here. - - // Update the current point visual feature - logZ.set_s( log(Z/Zd) ) ; - - // We have to compute the interaction matrix corresponding to the feature. - vpMatrix LlogZ(1,6) ; - LlogZ[0][0] = LlogZ[0][1] = LlogZ[0][5] = 0 ; - LlogZ[0][2] = -1/Z; - LlogZ[0][3] = -y; - LlogZ[0][4] = x; - logZ.setInteractionMatrix(LlogZ) ; - - - // compute the control law - vpColVector v = task.computeControlLaw(); // camera velocity - } - return 0; -} - \endcode - -If the feature needs to be use with other features, the example -servoSimuPoint2DhalfCamVelocity2.cpp shows how to do it. + * \class vpGenericFeature + * \ingroup group_core_features + * + * \brief Class that enables to define a feature or a set of features which are + * not implemented in ViSP as a specific class. It is indeed possible to create + * its own features, to use the corresponding interaction matrix, and to compute + * an error between the current and the desired feature. Moreover the created + * features can be mixed with features already implemented. + * + * The following example shows how to use the vpGenericFeature class to create + * and use the feature \f$ log(Z) \f$ where Z corresponds to the depth of a point + * whose 2D coordinates in the camera frame are \f$ x \f$ and \f$ y \f$. The + * interaction matrix corresponding to this feature is \f[ L = + * \left[\begin{array}{cccccc} 0 & 0 & -1/Z & -y & x & 0 \end{array}\right]\f]. + * \code + * #include + * #include + * + * int main() + * { + * vpServo task; // Visual servoing task + * + * //First we have to define the desired feature log(Z*) corresponding to the desired point. + * double xd = 0; //The x coordinate of the desired point. + * double yd = 0; //The y coordinate of the desired point. + * double Zd = 1; //The depth of the desired point. + * vpGenericFeature logZd(1); //The dimension of the feature is 1. + * logZd.set_s( log(Zd) ); + * + * //Then we have to define the current feature log(Z) corresponding to the current point. + * double x = 1; //The x coordinate of the current point. + * double y = 1; //The y coordinate of the current point. + * double Z = 2; //The depth of the current point. + * vpGenericFeature logZ(1); //The dimension of the feature is 1. + * logZ.set_s( log(Z) ); + * + * // Set eye-in-hand control law. + * // The computed velocities will be expressed in the camera frame + * task.setServo(vpServo::EYEINHAND_CAMERA); + * // Interaction matrix is computed with the current visual features sd + * task.setInteractionMatrixType(vpServo::CURRENT); + * + * // Add the point feature to the task + * task.addFeature(logZ, logZd); + * + * // Control loop + * for ( ; ; ) { + * // The new parameters x, y and Z must be computed here. + * + * // Update the current point visual feature + * logZ.set_s( log(Z) ) ; + * + * // We have to compute the interaction matrix corresponding to the feature. + * vpMatrix LlogZ(1,6) ; + * LlogZ[0][0] = LlogZ[0][1] = LlogZ[0][5] = 0 ; + * LlogZ[0][2] = -1/Z; + * LlogZ[0][3] = -y; + * LlogZ[0][4] = x; + * logZ.setInteractionMatrix(LlogZ) ; + * + * // compute the control law + * vpColVector v = task.computeControlLaw(); // camera velocity + * } + * return 0; + * } + * \endcode + * + * The second example shows how to create and use a feature whose specificity is + * to have a desired feature fixed to zero. It is the case for the feature \f$ + * log( \frac{Z}{Z^*}) \f$. + * + * \code + * #include + * #include + * + * int main() + * { + * vpServo task; // Visual servoing task + * + * //First we have to define the desired feature log(Z*) corresponding to the desired point. + * double xd = 0; //The x coordinate of the desired point. + * double yd = 0; //The y coordinate of the desired point. + * double Zd = 1; //The depth of the desired point. + * + * //Then we have to define the current feature log(Z) corresponding to the current point. + * double x = 1; //The x coordinate of the current point. + * double y = 1; //The y coordinate of the current point. + * double Z = 2; //The depth of the current point. + * vpGenericFeature logZ(1); //The dimension of the feature is 1. + * logZ.set_s( log(Z/Zd) ); + * + * // Set eye-in-hand control law. + * // The computed velocities will be expressed in the camera frame + * task.setServo(vpServo::EYEINHAND_CAMERA); + * // Interaction matrix is computed with the current visual features sd + * task.setInteractionMatrixType(vpServo::CURRENT); + * + * // Add the point feature to the task + * task.addFeature(logZ); + * + * // Control loop + * for ( ; ; ) { + * // The new parameters x, y and Z must be computed here. + * + * // Update the current point visual feature + * logZ.set_s( log(Z/Zd) ) ; + * + * // We have to compute the interaction matrix corresponding to the feature. + * vpMatrix LlogZ(1,6) ; + * LlogZ[0][0] = LlogZ[0][1] = LlogZ[0][5] = 0 ; + * LlogZ[0][2] = -1/Z; + * LlogZ[0][3] = -y; + * LlogZ[0][4] = x; + * logZ.setInteractionMatrix(LlogZ) ; + * + * // compute the control law + * vpColVector v = task.computeControlLaw(); // camera velocity + * } + * return 0; + * } + * \endcode + * + * If the feature needs to be use with other features, the example + * servoSimuPoint2DhalfCamVelocity2.cpp shows how to do it. */ class VISP_EXPORT vpGenericFeature : public vpBasicFeature { @@ -183,16 +179,15 @@ class VISP_EXPORT vpGenericFeature : public vpBasicFeature public: explicit vpGenericFeature(unsigned int dim); - virtual ~vpGenericFeature(); void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; - vpGenericFeature *duplicate() const; + vpGenericFeature *duplicate() const override; - vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL); + vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL) override; vpColVector error(unsigned int select = FEATURE_ALL); @@ -202,11 +197,11 @@ class VISP_EXPORT vpGenericFeature : public vpBasicFeature void get_s(double &s0, double &s1) const; void get_s(double &s0, double &s1, double &s2) const; - void init(); + void init() override; - vpMatrix interaction(unsigned int select = FEATURE_ALL); + vpMatrix interaction(unsigned int select = FEATURE_ALL) override; - void print(unsigned int select = FEATURE_ALL) const; + void print(unsigned int select = FEATURE_ALL) const override; void setInteractionMatrix(const vpMatrix &L); void setError(const vpColVector &error_vector); void set_s(const vpColVector &s); diff --git a/modules/visual_features/src/feature-builder/vpFeatureBuilderSegment.cpp b/modules/visual_features/src/feature-builder/vpFeatureBuilderSegment.cpp index c59c667ecc..9f9d8f2e1c 100644 --- a/modules/visual_features/src/feature-builder/vpFeatureBuilderSegment.cpp +++ b/modules/visual_features/src/feature-builder/vpFeatureBuilderSegment.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,12 @@ * * Description: * Segment creation out of dots. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ /*! - \file vpFeatureBuilderSegment.cpp - \brief Segment creation out of dots. -*/ + * \file vpFeatureBuilderSegment.cpp + * \brief Segment creation out of dots. + */ #include #include @@ -47,15 +42,13 @@ #ifdef VISP_HAVE_MODULE_BLOB /*! - Initialize a segment feature out of vpDots and camera parameters. - - \param s : Visual feature to initialize. - \param cam : The parameters of the camera used to acquire the image - containing the point. \param d1 : The dot corresponding to the first point - of the segment. \param d2 : The dot corresponding to the second point of the - segment. - -*/ + * Initialize a segment feature out of vpDots and camera parameters. + * + * \param s : Visual feature to initialize. + * \param cam : The parameters of the camera used to acquire the image containing the point. + * \param d1 : The dot corresponding to the first point of the segment. + * \param d2 : The dot corresponding to the second point of the segment. + */ void vpFeatureBuilder::create(vpFeatureSegment &s, const vpCameraParameters &cam, const vpDot &d1, const vpDot &d2) { double x1 = 0, y1 = 0, x2 = 0, y2 = 0; @@ -71,7 +64,8 @@ void vpFeatureBuilder::create(vpFeatureSegment &s, const vpCameraParameters &cam s.setXc(xc / l); s.setYc(yc / l); s.setL(1 / l); - } else { + } + else { s.setXc(xc); s.setYc(yc); s.setL(l); @@ -81,15 +75,13 @@ void vpFeatureBuilder::create(vpFeatureSegment &s, const vpCameraParameters &cam } /*! - Initialize a segment feature out of vpDots and camera parameters. - - \param s : Visual feature to initialize. - \param cam : The parameters of the camera used to acquire the image - containing the point. \param d1 : The dot corresponding to the first point - of the segment. \param d2 : The dot corresponding to the second point of the - segment. - -*/ + * Initialize a segment feature out of vpDots and camera parameters. + * + * \param s : Visual feature to initialize. + * \param cam : The parameters of the camera used to acquire the image containing the point. + * \param d1 : The dot corresponding to the first point of the segment. + * \param d2 : The dot corresponding to the second point of the segment. + */ void vpFeatureBuilder::create(vpFeatureSegment &s, const vpCameraParameters &cam, const vpDot2 &d1, const vpDot2 &d2) { double x1 = 0, y1 = 0, x2 = 0, y2 = 0; @@ -105,7 +97,8 @@ void vpFeatureBuilder::create(vpFeatureSegment &s, const vpCameraParameters &cam s.setXc(xc / l); s.setYc(yc / l); s.setL(1 / l); - } else { + } + else { s.setXc(xc); s.setYc(yc); s.setL(l); @@ -116,15 +109,13 @@ void vpFeatureBuilder::create(vpFeatureSegment &s, const vpCameraParameters &cam #endif //#ifdef VISP_HAVE_MODULE_BLOB /*! - Initialize a segment feature out of image points and camera parameters. - - \param s : Visual feature to initialize. - \param cam : The parameters of the camera used to acquire the image - containing the point. \param ip1 : The image point corresponding to the - first point of the segment. \param ip2 : The image point corresponding to - the second point of the segment. - -*/ + * Initialize a segment feature out of image points and camera parameters. + * + * \param s : Visual feature to initialize. + * \param cam : The parameters of the camera used to acquire the image containing the point. + * \param ip1 : The image point corresponding to the first point of the segment. + * \param ip2 : The image point corresponding to the second point of the segment. + */ void vpFeatureBuilder::create(vpFeatureSegment &s, const vpCameraParameters &cam, const vpImagePoint &ip1, const vpImagePoint &ip2) { @@ -141,7 +132,8 @@ void vpFeatureBuilder::create(vpFeatureSegment &s, const vpCameraParameters &cam s.setXc(xc / l); s.setYc(yc / l); s.setL(1 / l); - } else { + } + else { s.setXc(xc); s.setYc(yc); s.setL(l); @@ -151,15 +143,13 @@ void vpFeatureBuilder::create(vpFeatureSegment &s, const vpCameraParameters &cam } /*! - - Build a segment visual feature from two points. - - \param s : Visual feature to initialize. - \param P1, P2 : Two points defining the segment. These points must contain - the 3D coordinates in the camera frame (cP) and the projected coordinates in - the image plane (p). - -*/ + * Build a segment visual feature from two points. + * + * \param s : Visual feature to initialize. + * \param P1, P2 : Two points defining the segment. These points must contain + * the 3D coordinates in the camera frame (cP) and the projected coordinates in + * the image plane (p). + */ void vpFeatureBuilder::create(vpFeatureSegment &s, vpPoint &P1, vpPoint &P2) { double x1 = P1.get_x(); diff --git a/modules/visual_features/src/visual-feature/vpFeatureMoment.cpp b/modules/visual_features/src/visual-feature/vpFeatureMoment.cpp index 6d64283a03..5f7e9515af 100644 --- a/modules/visual_features/src/visual-feature/vpFeatureMoment.cpp +++ b/modules/visual_features/src/visual-feature/vpFeatureMoment.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,11 +29,7 @@ * * Description: * Base for all moment features - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ #include #include @@ -51,8 +46,8 @@ class vpBasicFeature; /*! - Initialize common parameters for moment features. -*/ + * Initialize common parameters for moment features. + */ void vpFeatureMoment::init() { // feature dimension @@ -80,8 +75,8 @@ void vpFeatureMoment::init() } /*! - Feature's dimension according to selection. -*/ + * Feature's dimension according to selection. + */ int vpFeatureMoment::getDimension(unsigned int select) const { int dim = 0; @@ -94,8 +89,8 @@ int vpFeatureMoment::getDimension(unsigned int select) const } /*! - Outputs the content of the feature: it's corresponding selected moments. -*/ + * Outputs the content of the feature: it's corresponding selected moments. + */ void vpFeatureMoment::print(unsigned int select) const { for (unsigned int i = 0; i < dim_s; ++i) { @@ -122,9 +117,8 @@ void vpFeatureMoment::display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color, unsigned int thickness) const { @@ -135,21 +129,21 @@ void vpFeatureMoment::display(const vpCameraParameters &cam, const vpImageA = A_; @@ -175,27 +169,28 @@ void vpFeatureMoment::update(double A_, double B_, double C_) flags = new bool[nbParameters]; for (unsigned int i = 0; i < nbParameters; i++) flags[i] = false; - } else + } + else dim_s = 0; compute_interaction(); } /*! - Retrieves the interaction matrix. No computation is done. - - \param select : Feature selector. - - \return The corresponding interaction matrix. - - There is no rule about the format of the feature selector. It may be - different for different features. For example, for - vpFeatureMomentBasic or vpFeatureMomentCentered features, select may - refer to the \f$ (i,j) \f$ couple in the \f$ j \times order + i \f$ - format, but for vpFeatureMomentCInvariant the selector allows to - select couples \f$ (i,j,k,l...) \f$ in the following format: 1 << i - + 1 << j + 1 << k + 1 << l. -*/ + * Retrieves the interaction matrix. No computation is done. + * + * \param select : Feature selector. + * + * \return The corresponding interaction matrix. + * + * There is no rule about the format of the feature selector. It may be + * different for different features. For example, for + * vpFeatureMomentBasic or vpFeatureMomentCentered features, select may + * refer to the \f$ (i,j) \f$ couple in the \f$ j \times order + i \f$ + * format, but for vpFeatureMomentCInvariant the selector allows to + * select couples \f$ (i,j,k,l...) \f$ in the following format: 1 << i + * + 1 << j + 1 << k + 1 << l. + */ vpMatrix vpFeatureMoment::interaction(unsigned int select) { vpMatrix L(0, 0); @@ -209,14 +204,15 @@ vpMatrix vpFeatureMoment::interaction(unsigned int select) return L; } -/*! Duplicates the feature into a vpGenericFeature harbouring the - same properties. The resulting feature is of vpMomentGenericFeature - type. While it still can compute interaction matrices and has acces - to it's moment primitive, it has lost all precise information about - its precise type and therefore cannot be used in a feature database. - - \return The corresponding feature. -*/ +/*! + * Duplicates the feature into a vpGenericFeature harbouring the + * same properties. The resulting feature is of vpMomentGenericFeature + * type. While it still can compute interaction matrices and has access + * to it's moment primitive, it has lost all precise information about + * its precise type and therefore cannot be used in a feature database. + * + * \return The corresponding feature. + */ vpBasicFeature *vpFeatureMoment::duplicate() const { vpFeatureMoment *feat = new vpMomentGenericFeature(moments, A, B, C, featureMomentsDataBase, moment); @@ -236,10 +232,11 @@ vpBasicFeature *vpFeatureMoment::duplicate() const } /*! - Links the feature to the feature's database. NB: The feature's database is - different from the moment's database. \param featureMoments : database in - which the moment features are stored. - + * Links the feature to the feature's database. + * + * \note The feature's database is different from the moment's database. + * \param featureMoments : database in + * which the moment features are stored. */ void vpFeatureMoment::linkTo(vpFeatureMomentDatabase &featureMoments) { @@ -253,35 +250,33 @@ void vpFeatureMoment::linkTo(vpFeatureMomentDatabase &featureMoments) featureMoments.add(*this, _name); } -void vpFeatureMoment::compute_interaction() {} - -vpFeatureMoment::~vpFeatureMoment() {} +void vpFeatureMoment::compute_interaction() { } VISP_EXPORT std::ostream &operator<<(std::ostream &os, const vpFeatureMoment &featM) { /* - A const_cast is forced here since interaction() defined in vpBasicFeature() - is not const But introducing const in vpBasicFeature() can break a lot of - client code - */ - vpMatrix Lcomplete((unsigned int)featM.getDimension(), - 6); // 6 corresponds to 6velocities in standard interaction matrix + * - A static_cast is forced here since interaction() defined in vpBasicFeature() + * is not const. But introducing const in vpBasicFeature() can break a lot of + * client code. + * - 6 corresponds to 6 velocities in standard interaction matrix + */ + vpMatrix Lcomplete(static_cast(featM.getDimension()), 6); Lcomplete = const_cast(featM).interaction(vpBasicFeature::FEATURE_ALL); Lcomplete.matlabPrint(os); return os; } /*! -Interface function to display the moments and other interaction matrices -on which a particular vpFeatureMoment is dependent upon -Not made pure to maintain compatibility -Recommended : Types inheriting from vpFeatureMoment should implement this -function -*/ + * Interface function to display the moments and other interaction matrices + * on which a particular vpFeatureMoment is dependent upon + * Not made pure to maintain compatibility + * Recommended : Types inheriting from vpFeatureMoment should implement this + * function. + */ void vpFeatureMoment::printDependencies(std::ostream &os) const { os << " WARNING : Falling back to base class version of " - "printDependencies() in vpFeatureMoment. To prevent that, this has " - "to be implemented in the derived classes!" - << std::endl; + "printDependencies() in vpFeatureMoment. To prevent that, this has " + "to be implemented in the derived classes!" + << std::endl; } diff --git a/modules/visual_features/src/visual-feature/vpFeatureMomentAlpha.cpp b/modules/visual_features/src/visual-feature/vpFeatureMomentAlpha.cpp index 0b4e278a93..22a418b852 100644 --- a/modules/visual_features/src/visual-feature/vpFeatureMomentAlpha.cpp +++ b/modules/visual_features/src/visual-feature/vpFeatureMomentAlpha.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,11 +29,7 @@ * * Description: * Implementation for alpha moment features. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ #include #include @@ -49,21 +44,21 @@ #ifdef VISP_MOMENTS_COMBINE_MATRICES /*! - Computes interaction matrix for alpha moment. Called internally. - The moment primitives must be computed before calling this. - This feature depends on: - - vpMomentCentered - - vpFeatureMomentCentered -*/ + * Computes interaction matrix for alpha moment. Called internally. + * The moment primitives must be computed before calling this. + * This feature depends on: + * - vpMomentCentered + * - vpFeatureMomentCentered + */ void vpFeatureMomentAlpha::compute_interaction() { bool found_moment_centered; bool found_FeatureMoment_centered; const vpMomentCentered &momentCentered = - (static_cast(moments.get("vpMomentCentered", found_moment_centered))); + (static_cast(moments.get("vpMomentCentered", found_moment_centered))); vpFeatureMomentCentered &featureMomentCentered = (static_cast( - featureMomentsDataBase->get("vpFeatureMomentCentered", found_FeatureMoment_centered))); + featureMomentsDataBase->get("vpFeatureMomentCentered", found_FeatureMoment_centered))); if (!found_moment_centered) throw vpException(vpException::notInitialized, "vpMomentCentered not found"); @@ -75,28 +70,28 @@ void vpFeatureMomentAlpha::compute_interaction() double dinv = 1 / (4 * u11 * u11 + u20_u02 * u20_u02); interaction_matrices[0].resize(1, 6); interaction_matrices[0] = - (u20_u02 * dinv) * featureMomentCentered.interaction(1, 1) + - (u11 * dinv) * (featureMomentCentered.interaction(0, 2) - featureMomentCentered.interaction(2, 0)); + (u20_u02 * dinv) * featureMomentCentered.interaction(1, 1) + + (u11 * dinv) * (featureMomentCentered.interaction(0, 2) - featureMomentCentered.interaction(2, 0)); } #else // #ifdef VISP_MOMENTS_COMBINE_MATRICES /*! - Computes interaction matrix for alpha moment. Called internally. - The moment primitives must be computed before calling this. - This feature depends on: - - vpMomentCentered - - vpMomentGravityCenter -*/ + * Computes interaction matrix for alpha moment. Called internally. + * The moment primitives must be computed before calling this. + * This feature depends on: + * - vpMomentCentered + * - vpMomentGravityCenter + */ void vpFeatureMomentAlpha::compute_interaction() { bool found_moment_centered; bool found_moment_gravity; const vpMomentCentered &momentCentered = - static_cast(moments.get("vpMomentCentered", found_moment_centered)); + static_cast(moments.get("vpMomentCentered", found_moment_centered)); const vpMomentGravityCenter &momentGravity = - static_cast(moments.get("vpMomentGravityCenter", found_moment_gravity)); + static_cast(moments.get("vpMomentGravityCenter", found_moment_gravity)); const vpMomentObject &momentObject = moment->getObject(); if (!found_moment_centered) @@ -128,10 +123,10 @@ void vpFeatureMomentAlpha::compute_interaction() Awx = (beta * (mu12 * (mu20 - mu02) + mu11 * (mu03 - mu21)) + Xg * (mu02 * (mu20 - mu02) - 2 * mu11_2) + Yg * mu11 * (mu20 + mu02)) / - d; + d; Awy = (beta * (mu21 * (mu02 - mu20) + mu11 * (mu30 - mu12)) + Xg * mu11 * (mu20 + mu02) + Yg * (mu20 * (mu02 - mu20) - 2 * mu11_2)) / - d; + d; Avz = B * Awx - A * Awy; interaction_matrices.resize(1); diff --git a/modules/visual_features/src/visual-feature/vpFeatureMomentAreaNormalized.cpp b/modules/visual_features/src/visual-feature/vpFeatureMomentAreaNormalized.cpp index d976f58084..61c845c788 100644 --- a/modules/visual_features/src/visual-feature/vpFeatureMomentAreaNormalized.cpp +++ b/modules/visual_features/src/visual-feature/vpFeatureMomentAreaNormalized.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,11 +29,7 @@ * * Description: * Implementation for all supported moment features. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ #include #ifdef VISP_MOMENTS_COMBINE_MATRICES @@ -50,14 +45,14 @@ #include /*! - Computes interaction matrix for the normalized surface moment. Called - internally. The moment primitives must be computed before calling this. This - feature depends on: - - vpMomentCentered - - vpFeatureMomentCentered - - vpMomentAreaNormalized - - vpFeatureMomentBasic -*/ + * Computes interaction matrix for the normalized surface moment. Called + * internally. The moment primitives must be computed before calling this. This + * feature depends on: + * - vpMomentCentered + * - vpFeatureMomentCentered + * - vpMomentAreaNormalized + * - vpFeatureMomentBasic + */ void vpFeatureMomentAreaNormalized::compute_interaction() { bool found_moment_centered; @@ -66,15 +61,15 @@ void vpFeatureMomentAreaNormalized::compute_interaction() bool found_featuremoment_basic; vpFeatureMomentBasic &featureMomentBasic = (static_cast( - featureMomentsDataBase->get("vpFeatureMomentBasic", found_featuremoment_basic))); + featureMomentsDataBase->get("vpFeatureMomentBasic", found_featuremoment_basic))); const vpMomentCentered &momentCentered = - static_cast(moments.get("vpMomentCentered", found_moment_centered)); + static_cast(moments.get("vpMomentCentered", found_moment_centered)); const vpMomentObject &momentObject = moment->getObject(); const vpMomentAreaNormalized &momentSurfaceNormalized = static_cast( moments.get("vpMomentAreaNormalized", found_moment_surface_normalized)); vpFeatureMomentCentered &featureMomentCentered = (static_cast( - featureMomentsDataBase->get("vpFeatureMomentCentered", found_FeatureMoment_centered))); + featureMomentsDataBase->get("vpFeatureMomentCentered", found_FeatureMoment_centered))); if (!found_FeatureMoment_centered) throw vpException(vpException::notInitialized, "vpFeatureMomentCentered not found"); @@ -92,13 +87,14 @@ void vpFeatureMomentAreaNormalized::compute_interaction() if (momentObject.getType() == vpMomentObject::DISCRETE) { a = momentCentered.get(2, 0) + momentCentered.get(0, 2); La = featureMomentCentered.interaction(2, 0) + featureMomentCentered.interaction(0, 2); - } else { + } + else { a = momentObject.get(0, 0); La = featureMomentBasic.interaction(0, 0); } normalized_multiplier = - (-momentSurfaceNormalized.getDesiredDepth() / (2 * a)) * sqrt(momentSurfaceNormalized.getDesiredArea() / a); + (-momentSurfaceNormalized.getDesiredDepth() / (2 * a)) * sqrt(momentSurfaceNormalized.getDesiredArea() / a); interaction_matrices[0] = normalized_multiplier * La; } @@ -115,13 +111,13 @@ void vpFeatureMomentAreaNormalized::compute_interaction() #include /*! - Computes interaction matrix for the normalized surface moment. Called - internally. The moment primitives must be computed before calling this. This - feature depends on: - - vpMomentCentered - - vpMomentAreaNormalized - - vpMomentGravityCenter -*/ + * Computes interaction matrix for the normalized surface moment. Called + * internally. The moment primitives must be computed before calling this. This + * feature depends on: + * - vpMomentCentered + * - vpMomentAreaNormalized + * - vpMomentGravityCenter + */ void vpFeatureMomentAreaNormalized::compute_interaction() { bool found_moment_centered; @@ -129,9 +125,9 @@ void vpFeatureMomentAreaNormalized::compute_interaction() bool found_moment_gravity; const vpMomentCentered &momentCentered = - static_cast(moments.get("vpMomentCentered", found_moment_centered)); + static_cast(moments.get("vpMomentCentered", found_moment_centered)); const vpMomentGravityCenter &momentGravity = - static_cast(moments.get("vpMomentGravityCenter", found_moment_gravity)); + static_cast(moments.get("vpMomentGravityCenter", found_moment_gravity)); const vpMomentObject &momentObject = moment->getObject(); const vpMomentAreaNormalized &momentSurfaceNormalized = static_cast( moments.get("vpMomentAreaNormalized", found_moment_surface_normalized)); @@ -178,7 +174,8 @@ void vpFeatureMomentAreaNormalized::compute_interaction() Anvz = -An * C + B * Anwx - A * Anwy; - } else { + } + else { Anvx = A * An / 2.; Anvy = B * An / 2.; Anvz = -An * C - (3. / 2.) * A * Xn - (3. / 2.) * B * Yn; diff --git a/modules/visual_features/src/visual-feature/vpFeatureMomentBasic.cpp b/modules/visual_features/src/visual-feature/vpFeatureMomentBasic.cpp index 29d264472e..ff1bd231e8 100644 --- a/modules/visual_features/src/visual-feature/vpFeatureMomentBasic.cpp +++ b/modules/visual_features/src/visual-feature/vpFeatureMomentBasic.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,11 +29,7 @@ * * Description: * Implementation for all supported moment features. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ #include #include @@ -42,24 +37,22 @@ #include #include /*! - Default constructor. - \param data_base : Database of moment primitives. - \param A_ : First plane coefficient for a plane equation of the following - type Ax+By+C=1/Z. \param B_ : Second plane coefficient for a plane equation - of the following type Ax+By+C=1/Z. \param C_ : Third plane coefficient for a - plane equation of the following type Ax+By+C=1/Z. \param featureMoments : - Database of features. -*/ + * Default constructor. + * \param data_base : Database of moment primitives. + * \param A_ : First plane coefficient for a plane equation of the following type Ax+By+C=1/Z. + * \param B_ : Second plane coefficient for a plane equation of the following type Ax+By+C=1/Z. + * \param C_ : Third plane coefficient for a plane equation of the following type Ax+By+C=1/Z. + * \param featureMoments : Database of features. + */ vpFeatureMomentBasic::vpFeatureMomentBasic(vpMomentDatabase &data_base, double A_, double B_, double C_, vpFeatureMomentDatabase *featureMoments) : vpFeatureMoment(data_base, A_, B_, C_, featureMoments), order(0) -{ -} +{ } /*! - Computes interaction matrix for basic moment. Called internally. - The moment primitives must be computed before calling this. -*/ + * Computes interaction matrix for basic moment. Called internally. + * The moment primitives must be computed before calling this. + */ void vpFeatureMomentBasic::compute_interaction() { int delta; @@ -70,7 +63,8 @@ void vpFeatureMomentBasic::compute_interaction() i->resize(1, 6); if (m.getType() == vpMomentObject::DISCRETE) { delta = 0; - } else { + } + else { delta = 1; } @@ -85,7 +79,7 @@ void vpFeatureMomentBasic::compute_interaction() interaction_matrices[0][0][VX] = -delta * A * m.get(0, 0); interaction_matrices[0][0][VY] = -delta * B * m.get(0, 0); interaction_matrices[0][0][VZ] = - 3 * delta * (A * m.get(1, 0) + B * m.get(0, 1) + C * m.get(0, 0)) - delta * C * m.get(0, 0); + 3 * delta * (A * m.get(1, 0) + B * m.get(0, 1) + C * m.get(0, 0)) - delta * C * m.get(0, 0); interaction_matrices[0][0][WX] = 3 * delta * m.get(0, 1); interaction_matrices[0][0][WY] = -3 * delta * m.get(1, 0); @@ -99,9 +93,9 @@ void vpFeatureMomentBasic::compute_interaction() interaction_matrices[j_ * order][0][VX] = -delta * A * m.get(0, j_); interaction_matrices[j_ * order][0][VY] = - -j * (A * m.get(1, jm1_) + B * m.get(0, j_) + C * m.get(0, jm1_)) - delta * B * m.get(0, j_); + -j * (A * m.get(1, jm1_) + B * m.get(0, j_) + C * m.get(0, jm1_)) - delta * B * m.get(0, j_); interaction_matrices[j_ * order][0][VZ] = - (j + 3 * delta) * (A * m.get(1, j_) + B * m.get(0, jp1_) + C * m.get(0, j_)) - delta * C * m.get(0, j_); + (j + 3 * delta) * (A * m.get(1, j_) + B * m.get(0, jp1_) + C * m.get(0, j_)) - delta * C * m.get(0, j_); interaction_matrices[j_ * order][0][WX] = (j + 3 * delta) * m.get(0, jp1_) + j * m.get(0, jm1_); interaction_matrices[j_ * order][0][WY] = -(j + 3 * delta) * m.get(1, j_); @@ -115,10 +109,10 @@ void vpFeatureMomentBasic::compute_interaction() unsigned int ip1_ = i_ + 1; interaction_matrices[i_][0][VX] = - -i * (A * m.get(i_, 0) + B * m.get(im1_, 1) + C * m.get(im1_, 0)) - delta * A * m.get(i_, 0); + -i * (A * m.get(i_, 0) + B * m.get(im1_, 1) + C * m.get(im1_, 0)) - delta * A * m.get(i_, 0); interaction_matrices[i_][0][VY] = -delta * B * m.get(i_, 0); interaction_matrices[i_][0][VZ] = - (i + 3 * delta) * (A * m.get(ip1_, 0) + B * m.get(i_, 1) + C * m.get(i_, 0)) - delta * C * m.get(i_, 0); + (i + 3 * delta) * (A * m.get(ip1_, 0) + B * m.get(i_, 1) + C * m.get(i_, 0)) - delta * C * m.get(i_, 0); interaction_matrices[i_][0][WX] = (i + 3 * delta) * m.get(i_, 1); interaction_matrices[i_][0][WY] = -(i + 3 * delta) * m.get(ip1_, 0) - i * m.get(im1_, 0); @@ -136,12 +130,12 @@ void vpFeatureMomentBasic::compute_interaction() unsigned int ip1_ = i_ + 1; interaction_matrices[j_ * order + i_][0][VX] = - -i * (A * m.get(i_, j_) + B * m.get(im1_, jp1_) + C * m.get(im1_, j_)) - delta * A * m.get(i_, j_); + -i * (A * m.get(i_, j_) + B * m.get(im1_, jp1_) + C * m.get(im1_, j_)) - delta * A * m.get(i_, j_); interaction_matrices[j_ * order + i_][0][VY] = - -j * (A * m.get(ip1_, jm1_) + B * m.get(i_, j_) + C * m.get(i_, jm1_)) - delta * B * m.get(i_, j_); + -j * (A * m.get(ip1_, jm1_) + B * m.get(i_, j_) + C * m.get(i_, jm1_)) - delta * B * m.get(i_, j_); interaction_matrices[j_ * order + i_][0][VZ] = - (i + j + 3 * delta) * (A * m.get(ip1_, j_) + B * m.get(i_, jp1_) + C * m.get(i_, j_)) - - delta * C * m.get(i_, j_); + (i + j + 3 * delta) * (A * m.get(ip1_, j_) + B * m.get(i_, jp1_) + C * m.get(i_, j_)) - + delta * C * m.get(i_, j_); interaction_matrices[j_ * order + i_][0][WX] = (i + j + 3 * delta) * m.get(i_, jp1_) + j * m.get(i_, jm1_); interaction_matrices[j_ * order + i_][0][WY] = -(i + j + 3 * delta) * m.get(ip1_, j_) - i * m.get(im1_, j_); @@ -151,11 +145,11 @@ void vpFeatureMomentBasic::compute_interaction() } /*! -Interaction matrix corresponding to \f$ m_{ij} \f$ moment. -\param select_one : first index (i). -\param select_two : second index (j). -\return Interaction matrix \f$ L_{m_{ij}} \f$ corresponding to the moment. -*/ + * Interaction matrix corresponding to \f$ m_{ij} \f$ moment. + * \param select_one : first index (i). + * \param select_two : second index (j). + * \return Interaction matrix \f$ L_{m_{ij}} \f$ corresponding to the moment. + */ vpMatrix vpFeatureMomentBasic::interaction(unsigned int select_one, unsigned int select_two) const { if (select_one + select_two > moment->getObject().getOrder()) diff --git a/modules/visual_features/src/visual-feature/vpFeatureMomentCInvariant.cpp b/modules/visual_features/src/visual-feature/vpFeatureMomentCInvariant.cpp index 40bc326afb..97769c5d7a 100644 --- a/modules/visual_features/src/visual-feature/vpFeatureMomentCInvariant.cpp +++ b/modules/visual_features/src/visual-feature/vpFeatureMomentCInvariant.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,11 +29,7 @@ * * Description: * Implementation for all supported moment features. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ #include #ifdef VISP_MOMENTS_COMBINE_MATRICES #include @@ -49,14 +44,14 @@ #include /*! - Computes interaction matrix for space-scale-rotation invariants. Called - internally. The moment primitives must be computed before calling this. This - feature depends on: - - vpMomentCentered - - vpFeatureMomentCentered - - vpMomentCInvariant - - vpFeatureMomentBasic -*/ + * Computes interaction matrix for space-scale-rotation invariants. Called + * internally. The moment primitives must be computed before calling this. This + * feature depends on: + * - vpMomentCentered + * - vpFeatureMomentCentered + * - vpMomentCInvariant + * - vpFeatureMomentBasic + */ void vpFeatureMomentCInvariant::compute_interaction() { std::vector LI(16); @@ -67,14 +62,14 @@ void vpFeatureMomentCInvariant::compute_interaction() const vpMomentObject &momentObject = moment->getObject(); const vpMomentCentered &momentCentered = - (static_cast(moments.get("vpMomentCentered", found_moment_centered))); + (static_cast(moments.get("vpMomentCentered", found_moment_centered))); const vpMomentCInvariant &momentCInvariant = - (static_cast(moments.get("vpMomentCInvariant", found_moment_cinvariant))); + (static_cast(moments.get("vpMomentCInvariant", found_moment_cinvariant))); vpFeatureMomentCentered &featureMomentCentered = (static_cast( - featureMomentsDataBase->get("vpFeatureMomentCentered", found_FeatureMoment_centered))); + featureMomentsDataBase->get("vpFeatureMomentCentered", found_FeatureMoment_centered))); vpFeatureMomentBasic &featureMomentBasic = (static_cast( - featureMomentsDataBase->get("vpFeatureMomentBasic", found_featuremoment_basic))); + featureMomentsDataBase->get("vpFeatureMomentBasic", found_featuremoment_basic))); if (!found_featuremoment_basic) throw vpException(vpException::notInitialized, "vpFeatureMomentBasic not found"); @@ -91,244 +86,244 @@ void vpFeatureMomentCInvariant::compute_interaction() zeros[0][i] = 0; LI[1] = -featureMomentCentered.interaction(2, 0) * momentCentered.get(0, 2) - - momentCentered.get(2, 0) * featureMomentCentered.interaction(0, 2) + - 2 * momentCentered.get(1, 1) * featureMomentCentered.interaction(1, 1); + momentCentered.get(2, 0) * featureMomentCentered.interaction(0, 2) + + 2 * momentCentered.get(1, 1) * featureMomentCentered.interaction(1, 1); LI[2] = 2 * (momentCentered.get(2, 0) - momentCentered.get(0, 2)) * - (featureMomentCentered.interaction(2, 0) - featureMomentCentered.interaction(0, 2)) + - 8 * momentCentered.get(1, 1) * featureMomentCentered.interaction(1, 1); + (featureMomentCentered.interaction(2, 0) - featureMomentCentered.interaction(0, 2)) + + 8 * momentCentered.get(1, 1) * featureMomentCentered.interaction(1, 1); LI[3] = 2 * (momentCentered.get(3, 0) - 3 * momentCentered.get(1, 2)) * - (featureMomentCentered.interaction(3, 0) - 3 * featureMomentCentered.interaction(1, 2)) + - 2 * (3 * momentCentered.get(2, 1) - momentCentered.get(0, 3)) * - (3 * featureMomentCentered.interaction(2, 1) - featureMomentCentered.interaction(0, 3)); + (featureMomentCentered.interaction(3, 0) - 3 * featureMomentCentered.interaction(1, 2)) + + 2 * (3 * momentCentered.get(2, 1) - momentCentered.get(0, 3)) * + (3 * featureMomentCentered.interaction(2, 1) - featureMomentCentered.interaction(0, 3)); LI[4] = 2 * (momentCentered.get(3, 0) + momentCentered.get(1, 2)) * - (featureMomentCentered.interaction(3, 0) + featureMomentCentered.interaction(1, 2)) + - 2 * (momentCentered.get(2, 1) + momentCentered.get(0, 3)) * - (featureMomentCentered.interaction(2, 1) + featureMomentCentered.interaction(0, 3)); + (featureMomentCentered.interaction(3, 0) + featureMomentCentered.interaction(1, 2)) + + 2 * (momentCentered.get(2, 1) + momentCentered.get(0, 3)) * + (featureMomentCentered.interaction(2, 1) + featureMomentCentered.interaction(0, 3)); LI[5] = -2 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(3, 0) + - 6 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(2, 1) * - featureMomentCentered.interaction(3, 0) - - 4 * pow(momentCentered.get(1, 2), 3) * featureMomentCentered.interaction(3, 0) + - 6 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(3, 0) * - featureMomentCentered.interaction(2, 1) - - 12 * momentCentered.get(0, 3) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(2, 1) + - 6 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(2, 1) * featureMomentCentered.interaction(2, 1) + - 6 * momentCentered.get(0, 3) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * - featureMomentCentered.interaction(1, 2) - - 12 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(1, 2) + - 6 * momentCentered.get(1, 2) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(1, 2) - - 2 * momentCentered.get(0, 3) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(0, 3) + - 6 * momentCentered.get(1, 2) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * - featureMomentCentered.interaction(0, 3) - - 4 * pow(momentCentered.get(2, 1), 3) * featureMomentCentered.interaction(0, 3); + 6 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(2, 1) * + featureMomentCentered.interaction(3, 0) - + 4 * pow(momentCentered.get(1, 2), 3) * featureMomentCentered.interaction(3, 0) + + 6 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(3, 0) * + featureMomentCentered.interaction(2, 1) - + 12 * momentCentered.get(0, 3) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(2, 1) + + 6 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(2, 1) * featureMomentCentered.interaction(2, 1) + + 6 * momentCentered.get(0, 3) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * + featureMomentCentered.interaction(1, 2) - + 12 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(1, 2) + + 6 * momentCentered.get(1, 2) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(1, 2) - + 2 * momentCentered.get(0, 3) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(0, 3) + + 6 * momentCentered.get(1, 2) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * + featureMomentCentered.interaction(0, 3) - + 4 * pow(momentCentered.get(2, 1), 3) * featureMomentCentered.interaction(0, 3); LI[6] = 6 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(3, 0) + - 4 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(3, 0) - - 6 * momentCentered.get(1, 2) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(3, 0) - - 6 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(2, 1) * - featureMomentCentered.interaction(3, 0) + - 2 * pow(momentCentered.get(1, 2), 3) * featureMomentCentered.interaction(3, 0) - - 12 * momentCentered.get(1, 2) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * - featureMomentCentered.interaction(2, 1) - - 6 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(3, 0) * - featureMomentCentered.interaction(2, 1) + - 12 * pow(momentCentered.get(2, 1), 3) * featureMomentCentered.interaction(2, 1) + - 6 * momentCentered.get(0, 3) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(2, 1) + - 6 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(2, 1) * featureMomentCentered.interaction(2, 1) - - 6 * momentCentered.get(0, 3) * pow(momentCentered.get(1, 2), 2) * featureMomentCentered.interaction(2, 1) + - 6 * momentCentered.get(1, 2) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(1, 2) - - 6 * pow(momentCentered.get(2, 1), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(1, 2) - - 6 * momentCentered.get(0, 3) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * - featureMomentCentered.interaction(1, 2) + - 6 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(1, 2) - - 12 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(2, 1) * - featureMomentCentered.interaction(1, 2) + - 12 * pow(momentCentered.get(1, 2), 3) * featureMomentCentered.interaction(1, 2) + - 4 * momentCentered.get(0, 3) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(0, 3) - - 6 * momentCentered.get(1, 2) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * - featureMomentCentered.interaction(0, 3) + - 2 * pow(momentCentered.get(2, 1), 3) * featureMomentCentered.interaction(0, 3) + - 6 * momentCentered.get(0, 3) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(0, 3) - - 6 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(2, 1) * featureMomentCentered.interaction(0, 3); + 4 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(3, 0) - + 6 * momentCentered.get(1, 2) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(3, 0) - + 6 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(2, 1) * + featureMomentCentered.interaction(3, 0) + + 2 * pow(momentCentered.get(1, 2), 3) * featureMomentCentered.interaction(3, 0) - + 12 * momentCentered.get(1, 2) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * + featureMomentCentered.interaction(2, 1) - + 6 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(3, 0) * + featureMomentCentered.interaction(2, 1) + + 12 * pow(momentCentered.get(2, 1), 3) * featureMomentCentered.interaction(2, 1) + + 6 * momentCentered.get(0, 3) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(2, 1) + + 6 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(2, 1) * featureMomentCentered.interaction(2, 1) - + 6 * momentCentered.get(0, 3) * pow(momentCentered.get(1, 2), 2) * featureMomentCentered.interaction(2, 1) + + 6 * momentCentered.get(1, 2) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(1, 2) - + 6 * pow(momentCentered.get(2, 1), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(1, 2) - + 6 * momentCentered.get(0, 3) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * + featureMomentCentered.interaction(1, 2) + + 6 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(1, 2) - + 12 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(2, 1) * + featureMomentCentered.interaction(1, 2) + + 12 * pow(momentCentered.get(1, 2), 3) * featureMomentCentered.interaction(1, 2) + + 4 * momentCentered.get(0, 3) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(0, 3) - + 6 * momentCentered.get(1, 2) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * + featureMomentCentered.interaction(0, 3) + + 2 * pow(momentCentered.get(2, 1), 3) * featureMomentCentered.interaction(0, 3) + + 6 * momentCentered.get(0, 3) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(0, 3) - + 6 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(2, 1) * featureMomentCentered.interaction(0, 3); LI[7] = -3 * momentCentered.get(0, 3) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(3, 0) + - 6 * momentCentered.get(1, 2) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * - featureMomentCentered.interaction(3, 0) - - 2 * pow(momentCentered.get(2, 1), 3) * featureMomentCentered.interaction(3, 0) - - 3 * momentCentered.get(0, 3) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(3, 0) + - 6 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(2, 1) * featureMomentCentered.interaction(3, 0) + - 3 * momentCentered.get(0, 3) * pow(momentCentered.get(1, 2), 2) * featureMomentCentered.interaction(3, 0) + - pow(momentCentered.get(0, 3), 3) * featureMomentCentered.interaction(3, 0) + - 3 * momentCentered.get(1, 2) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(2, 1) - - 6 * pow(momentCentered.get(2, 1), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(2, 1) - - 6 * momentCentered.get(0, 3) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * - featureMomentCentered.interaction(2, 1) + - 6 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(2, 1) - - 9 * momentCentered.get(1, 2) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(2, 1) - - 12 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(2, 1) * - featureMomentCentered.interaction(2, 1) + - 3 * pow(momentCentered.get(1, 2), 3) * featureMomentCentered.interaction(2, 1) - - 3 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(1, 2) * featureMomentCentered.interaction(2, 1) + - 3 * momentCentered.get(2, 1) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(1, 2) + - 12 * momentCentered.get(1, 2) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * - featureMomentCentered.interaction(1, 2) + - 6 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(3, 0) * - featureMomentCentered.interaction(1, 2) - - 3 * pow(momentCentered.get(2, 1), 3) * featureMomentCentered.interaction(1, 2) - - 6 * momentCentered.get(0, 3) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(1, 2) + - 9 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(2, 1) * featureMomentCentered.interaction(1, 2) - - 3 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(2, 1) * featureMomentCentered.interaction(1, 2) + - 6 * momentCentered.get(0, 3) * pow(momentCentered.get(1, 2), 2) * featureMomentCentered.interaction(1, 2) - - pow(momentCentered.get(3, 0), 3) * featureMomentCentered.interaction(0, 3) - - 3 * pow(momentCentered.get(2, 1), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(0, 3) + - 3 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(0, 3) + - 3 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(0, 3) - - 6 * momentCentered.get(1, 2) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(0, 3) - - 6 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(2, 1) * - featureMomentCentered.interaction(0, 3) + - 2 * pow(momentCentered.get(1, 2), 3) * featureMomentCentered.interaction(0, 3); + 6 * momentCentered.get(1, 2) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * + featureMomentCentered.interaction(3, 0) - + 2 * pow(momentCentered.get(2, 1), 3) * featureMomentCentered.interaction(3, 0) - + 3 * momentCentered.get(0, 3) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(3, 0) + + 6 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(2, 1) * featureMomentCentered.interaction(3, 0) + + 3 * momentCentered.get(0, 3) * pow(momentCentered.get(1, 2), 2) * featureMomentCentered.interaction(3, 0) + + pow(momentCentered.get(0, 3), 3) * featureMomentCentered.interaction(3, 0) + + 3 * momentCentered.get(1, 2) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(2, 1) - + 6 * pow(momentCentered.get(2, 1), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(2, 1) - + 6 * momentCentered.get(0, 3) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * + featureMomentCentered.interaction(2, 1) + + 6 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(2, 1) - + 9 * momentCentered.get(1, 2) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(2, 1) - + 12 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(2, 1) * + featureMomentCentered.interaction(2, 1) + + 3 * pow(momentCentered.get(1, 2), 3) * featureMomentCentered.interaction(2, 1) - + 3 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(1, 2) * featureMomentCentered.interaction(2, 1) + + 3 * momentCentered.get(2, 1) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(1, 2) + + 12 * momentCentered.get(1, 2) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * + featureMomentCentered.interaction(1, 2) + + 6 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(3, 0) * + featureMomentCentered.interaction(1, 2) - + 3 * pow(momentCentered.get(2, 1), 3) * featureMomentCentered.interaction(1, 2) - + 6 * momentCentered.get(0, 3) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(1, 2) + + 9 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(2, 1) * featureMomentCentered.interaction(1, 2) - + 3 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(2, 1) * featureMomentCentered.interaction(1, 2) + + 6 * momentCentered.get(0, 3) * pow(momentCentered.get(1, 2), 2) * featureMomentCentered.interaction(1, 2) - + pow(momentCentered.get(3, 0), 3) * featureMomentCentered.interaction(0, 3) - + 3 * pow(momentCentered.get(2, 1), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(0, 3) + + 3 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(0, 3) + + 3 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(0, 3) - + 6 * momentCentered.get(1, 2) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(0, 3) - + 6 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(2, 1) * + featureMomentCentered.interaction(0, 3) + + 2 * pow(momentCentered.get(1, 2), 3) * featureMomentCentered.interaction(0, 3); LI[8] = -2 * momentCentered.get(3, 0) * momentCentered.get(2, 1) * momentCentered.get(0, 3) * - featureMomentCentered.interaction(3, 0) + - 6 * momentCentered.get(3, 0) * momentCentered.get(2, 1) * momentCentered.get(1, 2) * - featureMomentCentered.interaction(2, 1) - - 6 * featureMomentCentered.interaction(3, 0) * momentCentered.get(2, 1) * momentCentered.get(0, 3) * - momentCentered.get(1, 2) - - 6 * momentCentered.get(3, 0) * featureMomentCentered.interaction(2, 1) * momentCentered.get(0, 3) * - momentCentered.get(1, 2) - - 6 * momentCentered.get(3, 0) * momentCentered.get(2, 1) * featureMomentCentered.interaction(0, 3) * - momentCentered.get(1, 2) - - 6 * momentCentered.get(3, 0) * momentCentered.get(2, 1) * momentCentered.get(0, 3) * - featureMomentCentered.interaction(1, 2) - - 2 * momentCentered.get(3, 0) * momentCentered.get(1, 2) * momentCentered.get(0, 3) * - featureMomentCentered.interaction(0, 3) + - 6 * momentCentered.get(2, 1) * momentCentered.get(1, 2) * momentCentered.get(0, 3) * - featureMomentCentered.interaction(1, 2) - - pow((double)momentCentered.get(3, 0), (double)3) * featureMomentCentered.interaction(1, 2) + - 3 * featureMomentCentered.interaction(3, 0) * pow((double)momentCentered.get(1, 2), (double)3) + - 6 * pow((double)momentCentered.get(2, 1), (double)3) * featureMomentCentered.interaction(0, 3) - - featureMomentCentered.interaction(2, 1) * pow((double)momentCentered.get(0, 3), (double)3) + - 3 * featureMomentCentered.interaction(2, 1) * pow((double)momentCentered.get(1, 2), (double)2) * - momentCentered.get(0, 3) + - 18 * pow((double)momentCentered.get(2, 1), (double)2) * momentCentered.get(0, 3) * - featureMomentCentered.interaction(2, 1) - - pow((double)momentCentered.get(3, 0), (double)2) * featureMomentCentered.interaction(2, 1) * - momentCentered.get(0, 3) + - 9 * momentCentered.get(3, 0) * pow((double)momentCentered.get(1, 2), (double)2) * - featureMomentCentered.interaction(1, 2) - - 4 * pow((double)momentCentered.get(3, 0), (double)2) * momentCentered.get(1, 2) * - featureMomentCentered.interaction(1, 2) + - 2 * pow((double)momentCentered.get(1, 2), (double)2) * momentCentered.get(0, 3) * - featureMomentCentered.interaction(0, 3) - - 4 * momentCentered.get(3, 0) * pow((double)momentCentered.get(1, 2), (double)2) * - featureMomentCentered.interaction(3, 0) + - 2 * momentCentered.get(1, 2) * pow((double)momentCentered.get(0, 3), (double)2) * - featureMomentCentered.interaction(1, 2) - - 4 * momentCentered.get(2, 1) * pow((double)momentCentered.get(0, 3), (double)2) * - featureMomentCentered.interaction(2, 1) + - 3 * momentCentered.get(3, 0) * pow((double)momentCentered.get(2, 1), (double)2) * - featureMomentCentered.interaction(1, 2) - - 3 * pow((double)momentCentered.get(3, 0), (double)2) * momentCentered.get(1, 2) * - featureMomentCentered.interaction(3, 0) - - momentCentered.get(3, 0) * featureMomentCentered.interaction(1, 2) * - pow((double)momentCentered.get(0, 3), (double)2) - - 4 * pow((double)momentCentered.get(2, 1), (double)2) * momentCentered.get(0, 3) * - featureMomentCentered.interaction(0, 3) - - 3 * momentCentered.get(2, 1) * pow((double)momentCentered.get(0, 3), (double)2) * - featureMomentCentered.interaction(0, 3) + - 2 * momentCentered.get(3, 0) * pow((double)momentCentered.get(2, 1), (double)2) * - featureMomentCentered.interaction(3, 0) + - 2 * pow((double)momentCentered.get(3, 0), (double)2) * momentCentered.get(2, 1) * - featureMomentCentered.interaction(2, 1) + - 3 * featureMomentCentered.interaction(3, 0) * pow((double)momentCentered.get(2, 1), (double)2) * - momentCentered.get(1, 2) - - pow((double)momentCentered.get(3, 0), (double)2) * momentCentered.get(2, 1) * - featureMomentCentered.interaction(0, 3) + - 3 * momentCentered.get(2, 1) * pow((double)momentCentered.get(1, 2), (double)2) * - featureMomentCentered.interaction(0, 3) - - featureMomentCentered.interaction(3, 0) * momentCentered.get(1, 2) * - pow((double)momentCentered.get(0, 3), (double)2); + featureMomentCentered.interaction(3, 0) + + 6 * momentCentered.get(3, 0) * momentCentered.get(2, 1) * momentCentered.get(1, 2) * + featureMomentCentered.interaction(2, 1) - + 6 * featureMomentCentered.interaction(3, 0) * momentCentered.get(2, 1) * momentCentered.get(0, 3) * + momentCentered.get(1, 2) - + 6 * momentCentered.get(3, 0) * featureMomentCentered.interaction(2, 1) * momentCentered.get(0, 3) * + momentCentered.get(1, 2) - + 6 * momentCentered.get(3, 0) * momentCentered.get(2, 1) * featureMomentCentered.interaction(0, 3) * + momentCentered.get(1, 2) - + 6 * momentCentered.get(3, 0) * momentCentered.get(2, 1) * momentCentered.get(0, 3) * + featureMomentCentered.interaction(1, 2) - + 2 * momentCentered.get(3, 0) * momentCentered.get(1, 2) * momentCentered.get(0, 3) * + featureMomentCentered.interaction(0, 3) + + 6 * momentCentered.get(2, 1) * momentCentered.get(1, 2) * momentCentered.get(0, 3) * + featureMomentCentered.interaction(1, 2) - + pow((double)momentCentered.get(3, 0), (double)3) * featureMomentCentered.interaction(1, 2) + + 3 * featureMomentCentered.interaction(3, 0) * pow((double)momentCentered.get(1, 2), (double)3) + + 6 * pow((double)momentCentered.get(2, 1), (double)3) * featureMomentCentered.interaction(0, 3) - + featureMomentCentered.interaction(2, 1) * pow((double)momentCentered.get(0, 3), (double)3) + + 3 * featureMomentCentered.interaction(2, 1) * pow((double)momentCentered.get(1, 2), (double)2) * + momentCentered.get(0, 3) + + 18 * pow((double)momentCentered.get(2, 1), (double)2) * momentCentered.get(0, 3) * + featureMomentCentered.interaction(2, 1) - + pow((double)momentCentered.get(3, 0), (double)2) * featureMomentCentered.interaction(2, 1) * + momentCentered.get(0, 3) + + 9 * momentCentered.get(3, 0) * pow((double)momentCentered.get(1, 2), (double)2) * + featureMomentCentered.interaction(1, 2) - + 4 * pow((double)momentCentered.get(3, 0), (double)2) * momentCentered.get(1, 2) * + featureMomentCentered.interaction(1, 2) + + 2 * pow((double)momentCentered.get(1, 2), (double)2) * momentCentered.get(0, 3) * + featureMomentCentered.interaction(0, 3) - + 4 * momentCentered.get(3, 0) * pow((double)momentCentered.get(1, 2), (double)2) * + featureMomentCentered.interaction(3, 0) + + 2 * momentCentered.get(1, 2) * pow((double)momentCentered.get(0, 3), (double)2) * + featureMomentCentered.interaction(1, 2) - + 4 * momentCentered.get(2, 1) * pow((double)momentCentered.get(0, 3), (double)2) * + featureMomentCentered.interaction(2, 1) + + 3 * momentCentered.get(3, 0) * pow((double)momentCentered.get(2, 1), (double)2) * + featureMomentCentered.interaction(1, 2) - + 3 * pow((double)momentCentered.get(3, 0), (double)2) * momentCentered.get(1, 2) * + featureMomentCentered.interaction(3, 0) - + momentCentered.get(3, 0) * featureMomentCentered.interaction(1, 2) * + pow((double)momentCentered.get(0, 3), (double)2) - + 4 * pow((double)momentCentered.get(2, 1), (double)2) * momentCentered.get(0, 3) * + featureMomentCentered.interaction(0, 3) - + 3 * momentCentered.get(2, 1) * pow((double)momentCentered.get(0, 3), (double)2) * + featureMomentCentered.interaction(0, 3) + + 2 * momentCentered.get(3, 0) * pow((double)momentCentered.get(2, 1), (double)2) * + featureMomentCentered.interaction(3, 0) + + 2 * pow((double)momentCentered.get(3, 0), (double)2) * momentCentered.get(2, 1) * + featureMomentCentered.interaction(2, 1) + + 3 * featureMomentCentered.interaction(3, 0) * pow((double)momentCentered.get(2, 1), (double)2) * + momentCentered.get(1, 2) - + pow((double)momentCentered.get(3, 0), (double)2) * momentCentered.get(2, 1) * + featureMomentCentered.interaction(0, 3) + + 3 * momentCentered.get(2, 1) * pow((double)momentCentered.get(1, 2), (double)2) * + featureMomentCentered.interaction(0, 3) - + featureMomentCentered.interaction(3, 0) * momentCentered.get(1, 2) * + pow((double)momentCentered.get(0, 3), (double)2); LI[9] = 4 * pow(momentCentered.get(3, 0), 3) * featureMomentCentered.interaction(3, 0) + - 18 * momentCentered.get(1, 2) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(3, 0) + - 12 * momentCentered.get(0, 3) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * - featureMomentCentered.interaction(3, 0) + - 18 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(3, 0) + - 4 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(3, 0) + - 18 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(2, 1) * - featureMomentCentered.interaction(3, 0) + - 6 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(1, 2) * featureMomentCentered.interaction(3, 0) + - 6 * momentCentered.get(0, 3) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(2, 1) + - 18 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(3, 0) * - featureMomentCentered.interaction(2, 1) + - 18 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(2, 1) * featureMomentCentered.interaction(2, 1) + - 6 * pow(momentCentered.get(0, 3), 3) * featureMomentCentered.interaction(2, 1) + - 6 * pow(momentCentered.get(3, 0), 3) * featureMomentCentered.interaction(1, 2) + - 18 * momentCentered.get(1, 2) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(1, 2) + - 18 * momentCentered.get(0, 3) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * - featureMomentCentered.interaction(1, 2) + - 6 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(1, 2) + - 6 * momentCentered.get(2, 1) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(0, 3) + - 4 * momentCentered.get(0, 3) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(0, 3) + - 18 * momentCentered.get(1, 2) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * - featureMomentCentered.interaction(0, 3) + - 12 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(3, 0) * - featureMomentCentered.interaction(0, 3) + - 18 * momentCentered.get(0, 3) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(0, 3) + - 18 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(2, 1) * featureMomentCentered.interaction(0, 3) + - 4 * pow(momentCentered.get(0, 3), 3) * featureMomentCentered.interaction(0, 3); + 18 * momentCentered.get(1, 2) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(3, 0) + + 12 * momentCentered.get(0, 3) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * + featureMomentCentered.interaction(3, 0) + + 18 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(3, 0) + + 4 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(3, 0) + + 18 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(2, 1) * + featureMomentCentered.interaction(3, 0) + + 6 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(1, 2) * featureMomentCentered.interaction(3, 0) + + 6 * momentCentered.get(0, 3) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(2, 1) + + 18 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(3, 0) * + featureMomentCentered.interaction(2, 1) + + 18 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(2, 1) * featureMomentCentered.interaction(2, 1) + + 6 * pow(momentCentered.get(0, 3), 3) * featureMomentCentered.interaction(2, 1) + + 6 * pow(momentCentered.get(3, 0), 3) * featureMomentCentered.interaction(1, 2) + + 18 * momentCentered.get(1, 2) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(1, 2) + + 18 * momentCentered.get(0, 3) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * + featureMomentCentered.interaction(1, 2) + + 6 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(1, 2) + + 6 * momentCentered.get(2, 1) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(0, 3) + + 4 * momentCentered.get(0, 3) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(0, 3) + + 18 * momentCentered.get(1, 2) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * + featureMomentCentered.interaction(0, 3) + + 12 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(3, 0) * + featureMomentCentered.interaction(0, 3) + + 18 * momentCentered.get(0, 3) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(0, 3) + + 18 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(2, 1) * featureMomentCentered.interaction(0, 3) + + 4 * pow(momentCentered.get(0, 3), 3) * featureMomentCentered.interaction(0, 3); LI[10] = featureMomentCentered.interaction(4, 0) * momentCentered.get(0, 4) + - momentCentered.get(4, 0) * featureMomentCentered.interaction(0, 4) - - 4 * featureMomentCentered.interaction(3, 1) * momentCentered.get(1, 3) - - 4 * momentCentered.get(3, 1) * featureMomentCentered.interaction(1, 3) + - 6 * momentCentered.get(2, 2) * featureMomentCentered.interaction(2, 2); + momentCentered.get(4, 0) * featureMomentCentered.interaction(0, 4) - + 4 * featureMomentCentered.interaction(3, 1) * momentCentered.get(1, 3) - + 4 * momentCentered.get(3, 1) * featureMomentCentered.interaction(1, 3) + + 6 * momentCentered.get(2, 2) * featureMomentCentered.interaction(2, 2); LI[11] = -3 * featureMomentCentered.interaction(4, 0) * momentCentered.get(2, 2) - - 3 * momentCentered.get(4, 0) * featureMomentCentered.interaction(2, 2) - - 2 * featureMomentCentered.interaction(4, 0) * momentCentered.get(0, 4) - - 2 * momentCentered.get(4, 0) * featureMomentCentered.interaction(0, 4) + - 6 * momentCentered.get(3, 1) * featureMomentCentered.interaction(3, 1) + - 2 * featureMomentCentered.interaction(3, 1) * momentCentered.get(1, 3) + - 2 * momentCentered.get(3, 1) * featureMomentCentered.interaction(1, 3) - - 3 * featureMomentCentered.interaction(2, 2) * momentCentered.get(0, 4) - - 3 * momentCentered.get(2, 2) * featureMomentCentered.interaction(0, 4) + - 6 * momentCentered.get(1, 3) * featureMomentCentered.interaction(1, 3); + 3 * momentCentered.get(4, 0) * featureMomentCentered.interaction(2, 2) - + 2 * featureMomentCentered.interaction(4, 0) * momentCentered.get(0, 4) - + 2 * momentCentered.get(4, 0) * featureMomentCentered.interaction(0, 4) + + 6 * momentCentered.get(3, 1) * featureMomentCentered.interaction(3, 1) + + 2 * featureMomentCentered.interaction(3, 1) * momentCentered.get(1, 3) + + 2 * momentCentered.get(3, 1) * featureMomentCentered.interaction(1, 3) - + 3 * featureMomentCentered.interaction(2, 2) * momentCentered.get(0, 4) - + 3 * momentCentered.get(2, 2) * featureMomentCentered.interaction(0, 4) + + 6 * momentCentered.get(1, 3) * featureMomentCentered.interaction(1, 3); LI[12] = 6 * momentCentered.get(4, 0) * featureMomentCentered.interaction(4, 0) + - 12 * featureMomentCentered.interaction(4, 0) * momentCentered.get(2, 2) + - 12 * momentCentered.get(4, 0) * featureMomentCentered.interaction(2, 2) + - 2 * featureMomentCentered.interaction(4, 0) * momentCentered.get(0, 4) + - 2 * momentCentered.get(4, 0) * featureMomentCentered.interaction(0, 4) + - 16 * featureMomentCentered.interaction(3, 1) * momentCentered.get(1, 3) + - 16 * momentCentered.get(3, 1) * featureMomentCentered.interaction(1, 3) + - 12 * featureMomentCentered.interaction(2, 2) * momentCentered.get(0, 4) + - 12 * momentCentered.get(2, 2) * featureMomentCentered.interaction(0, 4) + - 6 * momentCentered.get(0, 4) * featureMomentCentered.interaction(0, 4); + 12 * featureMomentCentered.interaction(4, 0) * momentCentered.get(2, 2) + + 12 * momentCentered.get(4, 0) * featureMomentCentered.interaction(2, 2) + + 2 * featureMomentCentered.interaction(4, 0) * momentCentered.get(0, 4) + + 2 * momentCentered.get(4, 0) * featureMomentCentered.interaction(0, 4) + + 16 * featureMomentCentered.interaction(3, 1) * momentCentered.get(1, 3) + + 16 * momentCentered.get(3, 1) * featureMomentCentered.interaction(1, 3) + + 12 * featureMomentCentered.interaction(2, 2) * momentCentered.get(0, 4) + + 12 * momentCentered.get(2, 2) * featureMomentCentered.interaction(0, 4) + + 6 * momentCentered.get(0, 4) * featureMomentCentered.interaction(0, 4); LI[13] = 2 * (momentCentered.get(5, 0) + 2 * momentCentered.get(3, 2) + momentCentered.get(1, 4)) * - (featureMomentCentered.interaction(5, 0) + 2 * featureMomentCentered.interaction(3, 2) + - featureMomentCentered.interaction(1, 4)) + - 2 * (momentCentered.get(0, 5) + 2 * momentCentered.get(2, 3) + momentCentered.get(4, 1)) * - (featureMomentCentered.interaction(0, 5) + 2 * featureMomentCentered.interaction(2, 3) + - featureMomentCentered.interaction(4, 1)); + (featureMomentCentered.interaction(5, 0) + 2 * featureMomentCentered.interaction(3, 2) + + featureMomentCentered.interaction(1, 4)) + + 2 * (momentCentered.get(0, 5) + 2 * momentCentered.get(2, 3) + momentCentered.get(4, 1)) * + (featureMomentCentered.interaction(0, 5) + 2 * featureMomentCentered.interaction(2, 3) + + featureMomentCentered.interaction(4, 1)); LI[14] = 2 * (momentCentered.get(5, 0) - 2 * momentCentered.get(3, 2) - 3 * momentCentered.get(1, 4)) * - (featureMomentCentered.interaction(5, 0) - 2 * featureMomentCentered.interaction(3, 2) - - 3 * featureMomentCentered.interaction(1, 4)) + - 2 * (momentCentered.get(0, 5) - 2 * momentCentered.get(2, 3) - 3 * momentCentered.get(4, 1)) * - (featureMomentCentered.interaction(0, 5) - 2 * featureMomentCentered.interaction(2, 3) - - 3 * featureMomentCentered.interaction(4, 1)); + (featureMomentCentered.interaction(5, 0) - 2 * featureMomentCentered.interaction(3, 2) - + 3 * featureMomentCentered.interaction(1, 4)) + + 2 * (momentCentered.get(0, 5) - 2 * momentCentered.get(2, 3) - 3 * momentCentered.get(4, 1)) * + (featureMomentCentered.interaction(0, 5) - 2 * featureMomentCentered.interaction(2, 3) - + 3 * featureMomentCentered.interaction(4, 1)); LI[15] = 2 * (momentCentered.get(5, 0) - 10 * momentCentered.get(3, 2) + 5 * momentCentered.get(1, 4)) * - (featureMomentCentered.interaction(5, 0) - 10 * featureMomentCentered.interaction(3, 2) + - 5 * featureMomentCentered.interaction(1, 4)) + - 2 * (momentCentered.get(0, 5) - 10 * momentCentered.get(2, 3) + 5 * momentCentered.get(4, 1)) * - (featureMomentCentered.interaction(0, 5) - 10 * featureMomentCentered.interaction(2, 3) + - 5 * featureMomentCentered.interaction(4, 1)); + (featureMomentCentered.interaction(5, 0) - 10 * featureMomentCentered.interaction(3, 2) + + 5 * featureMomentCentered.interaction(1, 4)) + + 2 * (momentCentered.get(0, 5) - 10 * momentCentered.get(2, 3) + 5 * momentCentered.get(4, 1)) * + (featureMomentCentered.interaction(0, 5) - 10 * featureMomentCentered.interaction(2, 3) + + 5 * featureMomentCentered.interaction(4, 1)); double s3 = momentCInvariant.getS(3); double s2 = momentCInvariant.getS(2); @@ -341,18 +336,18 @@ void vpFeatureMomentCInvariant::compute_interaction() vpMatrix Lc2 = featureMomentCentered.interaction(0, 3) - 3 * featureMomentCentered.interaction(2, 1); vpMatrix Ls2 = featureMomentCentered.interaction(3, 0) - 3 * featureMomentCentered.interaction(1, 2); vpMatrix Lc3 = 2 * (momentCentered.get(2, 0) - momentCentered.get(0, 2)) * - (featureMomentCentered.interaction(2, 0) - featureMomentCentered.interaction(0, 2)) - - 8 * momentCentered.get(1, 1) * featureMomentCentered.interaction(1, 1); + (featureMomentCentered.interaction(2, 0) - featureMomentCentered.interaction(0, 2)) - + 8 * momentCentered.get(1, 1) * featureMomentCentered.interaction(1, 1); vpMatrix Ls3 = 4 * featureMomentCentered.interaction(1, 1) * (momentCentered.get(2, 0) - momentCentered.get(0, 2)) + - 4 * momentCentered.get(1, 1) * - (featureMomentCentered.interaction(2, 0) - featureMomentCentered.interaction(0, 2)); + 4 * momentCentered.get(1, 1) * + (featureMomentCentered.interaction(2, 0) - featureMomentCentered.interaction(0, 2)); vpMatrix LI1 = 2 * (momentCentered.get(2, 0) - momentCentered.get(0, 2)) * - (featureMomentCentered.interaction(2, 0) - featureMomentCentered.interaction(0, 2)) + - 8 * momentCentered.get(1, 1) * featureMomentCentered.interaction(1, 1); + (featureMomentCentered.interaction(2, 0) - featureMomentCentered.interaction(0, 2)) + + 8 * momentCentered.get(1, 1) * featureMomentCentered.interaction(1, 1); vpMatrix LI2 = 2 * (momentCentered.get(0, 3) - 3 * momentCentered.get(2, 1)) * - (featureMomentCentered.interaction(0, 3) - 3 * featureMomentCentered.interaction(2, 1)) + - 2 * (momentCentered.get(3, 0) - 3 * momentCentered.get(1, 2)) * - (featureMomentCentered.interaction(3, 0) - 3 * featureMomentCentered.interaction(1, 2)); + (featureMomentCentered.interaction(0, 3) - 3 * featureMomentCentered.interaction(2, 1)) + + 2 * (momentCentered.get(3, 0) - 3 * momentCentered.get(1, 2)) * + (featureMomentCentered.interaction(3, 0) - 3 * featureMomentCentered.interaction(1, 2)); vpMatrix LI3 = featureMomentCentered.interaction(2, 0) + featureMomentCentered.interaction(0, 2); vpMatrix La(1, 6); @@ -360,54 +355,55 @@ void vpFeatureMomentCInvariant::compute_interaction() if (momentObject.getType() == vpMomentObject::DISCRETE) { a = momentCentered.get(2, 0) + momentCentered.get(0, 2); La = (featureMomentCentered.interaction(2, 0) + featureMomentCentered.interaction(0, 2)); - } else { + } + else { a = momentObject.get(0, 0); La = featureMomentBasic.interaction(0, 0); } interaction_matrices.resize(14); interaction_matrices[0] = (1. / (momentCInvariant.getI(2) * momentCInvariant.getI(2))) * - (momentCInvariant.getI(2) * LI[1] - momentCInvariant.getI(1) * LI[2]); + (momentCInvariant.getI(2) * LI[1] - momentCInvariant.getI(1) * LI[2]); interaction_matrices[1] = (1. / (momentCInvariant.getI(4) * momentCInvariant.getI(4))) * - (momentCInvariant.getI(4) * LI[3] - momentCInvariant.getI(3) * LI[4]); + (momentCInvariant.getI(4) * LI[3] - momentCInvariant.getI(3) * LI[4]); interaction_matrices[2] = (1. / (momentCInvariant.getI(6) * momentCInvariant.getI(6))) * - (momentCInvariant.getI(6) * LI[5] - momentCInvariant.getI(5) * LI[6]); + (momentCInvariant.getI(6) * LI[5] - momentCInvariant.getI(5) * LI[6]); interaction_matrices[3] = (1. / (momentCInvariant.getI(6) * momentCInvariant.getI(6))) * - (momentCInvariant.getI(6) * LI[7] - momentCInvariant.getI(7) * LI[6]); + (momentCInvariant.getI(6) * LI[7] - momentCInvariant.getI(7) * LI[6]); interaction_matrices[4] = (1. / (momentCInvariant.getI(6) * momentCInvariant.getI(6))) * - (momentCInvariant.getI(6) * LI[8] - momentCInvariant.getI(8) * LI[6]); + (momentCInvariant.getI(6) * LI[8] - momentCInvariant.getI(8) * LI[6]); interaction_matrices[5] = (1. / (momentCInvariant.getI(6) * momentCInvariant.getI(6))) * - (momentCInvariant.getI(6) * LI[9] - momentCInvariant.getI(9) * LI[6]); + (momentCInvariant.getI(6) * LI[9] - momentCInvariant.getI(9) * LI[6]); interaction_matrices[6] = (1. / (momentCInvariant.getI(10) * momentCInvariant.getI(10))) * - (momentCInvariant.getI(10) * LI[11] - momentCInvariant.getI(11) * LI[10]); + (momentCInvariant.getI(10) * LI[11] - momentCInvariant.getI(11) * LI[10]); interaction_matrices[7] = (1. / (momentCInvariant.getI(10) * momentCInvariant.getI(10))) * - (momentCInvariant.getI(10) * LI[12] - momentCInvariant.getI(12) * LI[10]); + (momentCInvariant.getI(10) * LI[12] - momentCInvariant.getI(12) * LI[10]); interaction_matrices[8] = (1. / (momentCInvariant.getI(15) * momentCInvariant.getI(15))) * - (momentCInvariant.getI(15) * LI[13] - momentCInvariant.getI(13) * LI[15]); + (momentCInvariant.getI(15) * LI[13] - momentCInvariant.getI(13) * LI[15]); interaction_matrices[9] = (1. / (momentCInvariant.getI(15) * momentCInvariant.getI(15))) * - (momentCInvariant.getI(15) * LI[14] - momentCInvariant.getI(14) * LI[15]); + (momentCInvariant.getI(15) * LI[14] - momentCInvariant.getI(14) * LI[15]); interaction_matrices[10] = (Lc2 * c3 + c2 * Lc3 + Ls2 * s3 + s2 * Ls3) * sqrt(a) / I1 * pow(I3, -0.3e1 / 0.2e1) + - (c2 * c3 + s2 * s3) * pow(a, -0.1e1 / 0.2e1) / I1 * pow(I3, -0.3e1 / 0.2e1) * La / 0.2e1 - - (c2 * c3 + s2 * s3) * sqrt(a) * pow(I1, -0.2e1) * pow(I3, -0.3e1 / 0.2e1) * LI1 - - 0.3e1 / 0.2e1 * (c2 * c3 + s2 * s3) * sqrt(a) / I1 * pow(I3, -0.5e1 / 0.2e1) * LI3; + (c2 * c3 + s2 * s3) * pow(a, -0.1e1 / 0.2e1) / I1 * pow(I3, -0.3e1 / 0.2e1) * La / 0.2e1 - + (c2 * c3 + s2 * s3) * sqrt(a) * pow(I1, -0.2e1) * pow(I3, -0.3e1 / 0.2e1) * LI1 - + 0.3e1 / 0.2e1 * (c2 * c3 + s2 * s3) * sqrt(a) / I1 * pow(I3, -0.5e1 / 0.2e1) * LI3; interaction_matrices[11] = (Ls2 * c3 + s2 * Lc3 - Lc2 * s3 - c2 * Ls3) * sqrt(a) / I1 * pow(I3, -0.3e1 / 0.2e1) + - (s2 * c3 - c2 * s3) * pow(a, -0.1e1 / 0.2e1) / I1 * pow(I3, -0.3e1 / 0.2e1) * La / 0.2e1 - - (s2 * c3 - c2 * s3) * sqrt(a) * pow(I1, -0.2e1) * pow(I3, -0.3e1 / 0.2e1) * LI1 - - 0.3e1 / 0.2e1 * (s2 * c3 - c2 * s3) * sqrt(a) / I1 * pow(I3, -0.5e1 / 0.2e1) * LI3; + (s2 * c3 - c2 * s3) * pow(a, -0.1e1 / 0.2e1) / I1 * pow(I3, -0.3e1 / 0.2e1) * La / 0.2e1 - + (s2 * c3 - c2 * s3) * sqrt(a) * pow(I1, -0.2e1) * pow(I3, -0.3e1 / 0.2e1) * LI1 - + 0.3e1 / 0.2e1 * (s2 * c3 - c2 * s3) * sqrt(a) / I1 * pow(I3, -0.5e1 / 0.2e1) * LI3; interaction_matrices[12] = (1 / (I3 * I3)) * LI1 - (2 * I1 / (I3 * I3 * I3)) * LI3; interaction_matrices[13] = - (I2 / (I3 * I3 * I3)) * La + (a / (I3 * I3 * I3)) * LI2 - (3 * a * I2 / (I3 * I3 * I3 * I3)) * LI3; + (I2 / (I3 * I3 * I3)) * La + (a / (I3 * I3 * I3)) * LI2 - (3 * a * I2 / (I3 * I3 * I3 * I3)) * LI3; } #else @@ -425,17 +421,16 @@ void vpFeatureMomentCInvariant::compute_interaction() #include /*! - Computes interaction matrix for space-scale-rotation invariants. Called - internally. The moment primitives must be computed before calling this. This - feature depends on: - - vpMomentCentered - - vpFeatureMomentCentered - - vpMomentCInvariant - - vpFeatureMomentBasic -*/ + * Computes interaction matrix for space-scale-rotation invariants. Called + * internally. The moment primitives must be computed before calling this. This + * feature depends on: + * - vpMomentCentered + * - vpFeatureMomentCentered + * - vpMomentCInvariant + * - vpFeatureMomentBasic + */ void vpFeatureMomentCInvariant::compute_interaction() { - // std::vector LI(16); LI.resize(16); // LI made class member @@ -446,15 +441,15 @@ void vpFeatureMomentCInvariant::compute_interaction() const vpMomentObject &momentObject = moment->getObject(); const vpMomentCentered &momentCentered = - (static_cast(moments.get("vpMomentCentered", found_moment_centered))); + (static_cast(moments.get("vpMomentCentered", found_moment_centered))); const vpMomentCInvariant &momentCInvariant = - (static_cast(moments.get("vpMomentCInvariant", found_moment_cinvariant))); + (static_cast(moments.get("vpMomentCInvariant", found_moment_cinvariant))); vpFeatureMomentCentered &featureMomentCentered = (static_cast( - featureMomentsDataBase->get("vpFeatureMomentCentered", found_FeatureMoment_centered))); + featureMomentsDataBase->get("vpFeatureMomentCentered", found_FeatureMoment_centered))); vpFeatureMomentBasic &featureMomentBasic = (static_cast( - featureMomentsDataBase->get("vpFeatureMomentBasic", found_featuremoment_basic))); + featureMomentsDataBase->get("vpFeatureMomentBasic", found_featuremoment_basic))); if (!found_featuremoment_basic) throw vpException(vpException::notInitialized, "vpFeatureMomentBasic not found"); @@ -523,75 +518,75 @@ void vpFeatureMomentCInvariant::compute_interaction() LI[2] = (-2 * mu20 + 2 * mu02) * Lmu02 + 8 * mu11 * Lmu11 + (2 * mu20 - 2 * mu02) * Lmu20; LI[3] = (-6 * mu21 + 2 * mu03) * Lmu03 + (-6 * mu30 + 18 * mu12) * Lmu12 + (18 * mu21 - 6 * mu03) * Lmu21 + - (2 * mu30 - 6 * mu12) * Lmu30; + (2 * mu30 - 6 * mu12) * Lmu30; LI[4] = (2 * mu21 + 2 * mu03) * Lmu03 + (2 * mu30 + 2 * mu12) * Lmu12 + (2 * mu21 + 2 * mu03) * Lmu21 + - (2 * mu30 + 2 * mu12) * Lmu30; + (2 * mu30 + 2 * mu12) * Lmu30; LI[5] = (-2 * mu30_2 * mu03 + 6 * mu30 * mu21 * mu12 - 4 * mu21_3) * Lmu03 + - (6 * mu30 * mu21 * mu03 - 12 * mu30 * mu12_2 + 6 * mu21_2 * mu12) * Lmu12 + - (6 * mu30 * mu12 * mu03 - 12 * mu21_2 * mu03 + 6 * mu21 * mu12_2) * Lmu21 + - (-2 * mu30 * mu03_2 - 4 * mu12_3 + 6 * mu21 * mu12 * mu03) * Lmu30; + (6 * mu30 * mu21 * mu03 - 12 * mu30 * mu12_2 + 6 * mu21_2 * mu12) * Lmu12 + + (6 * mu30 * mu12 * mu03 - 12 * mu21_2 * mu03 + 6 * mu21 * mu12_2) * Lmu21 + + (-2 * mu30 * mu03_2 - 4 * mu12_3 + 6 * mu21 * mu12 * mu03) * Lmu30; LI[6] = (-6 * mu30 * mu21 * mu12 - 6 * mu21 * mu12_2 + 6 * mu21_2 * mu03 + 2 * mu21_3 + 4 * mu30_2 * mu03) * Lmu03 + - (-6 * mu30 * mu21_2 - 6 * mu30 * mu21 * mu03 + 12 * mu12_3 + 6 * mu30_2 * mu12 - 12 * mu21 * mu12 * mu03 + - 6 * mu30 * mu12_2) * - Lmu12 + - (6 * mu21 * mu03_2 + 6 * mu21_2 * mu03 - 6 * mu30 * mu12 * mu03 + 12 * mu21_3 - 12 * mu30 * mu21 * mu12 - - 6 * mu12_2 * mu03) * - Lmu21 + - (6 * mu30 * mu12_2 + 2 * mu12_3 + 4 * mu30 * mu03_2 - 6 * mu21_2 * mu12 - 6 * mu21 * mu12 * mu03) * Lmu30; + (-6 * mu30 * mu21_2 - 6 * mu30 * mu21 * mu03 + 12 * mu12_3 + 6 * mu30_2 * mu12 - 12 * mu21 * mu12 * mu03 + + 6 * mu30 * mu12_2) * + Lmu12 + + (6 * mu21 * mu03_2 + 6 * mu21_2 * mu03 - 6 * mu30 * mu12 * mu03 + 12 * mu21_3 - 12 * mu30 * mu21 * mu12 - + 6 * mu12_2 * mu03) * + Lmu21 + + (6 * mu30 * mu12_2 + 2 * mu12_3 + 4 * mu30 * mu03_2 - 6 * mu21_2 * mu12 - 6 * mu21 * mu12 * mu03) * Lmu30; LI[7] = (-6 * mu21_2 * mu12 + 3 * mu30 * mu03_2 - mu30_3 - 3 * mu30 * mu21_2 - 6 * mu21 * mu12 * mu03 + 3 * mu30 * mu12_2 + 2 * mu12_3) * - Lmu03 + - (-3 * mu21 * mu03_2 + 12 * mu30 * mu21 * mu12 + 6 * mu30 * mu12 * mu03 + 3 * mu30_2 * mu21 + - 9 * mu21 * mu12_2 - 6 * mu21_2 * mu03 - 3 * mu21_3 + 6 * mu12_2 * mu03) * - Lmu12 + - (3 * mu30_2 * mu12 - 9 * mu21_2 * mu12 - 12 * mu21 * mu12 * mu03 - 6 * mu30 * mu21 * mu03 - - 6 * mu30 * mu21_2 + 6 * mu30 * mu12_2 + 3 * mu12_3 - 3 * mu12 * mu03_2) * - Lmu21 + - (6 * mu21 * mu12_2 + 6 * mu30 * mu21 * mu12 - 3 * mu30_2 * mu03 + 3 * mu12_2 * mu03 - 3 * mu21_2 * mu03 - - 2 * mu21_3 + mu03_3) * - Lmu30; + Lmu03 + + (-3 * mu21 * mu03_2 + 12 * mu30 * mu21 * mu12 + 6 * mu30 * mu12 * mu03 + 3 * mu30_2 * mu21 + + 9 * mu21 * mu12_2 - 6 * mu21_2 * mu03 - 3 * mu21_3 + 6 * mu12_2 * mu03) * + Lmu12 + + (3 * mu30_2 * mu12 - 9 * mu21_2 * mu12 - 12 * mu21 * mu12 * mu03 - 6 * mu30 * mu21 * mu03 - + 6 * mu30 * mu21_2 + 6 * mu30 * mu12_2 + 3 * mu12_3 - 3 * mu12 * mu03_2) * + Lmu21 + + (6 * mu21 * mu12_2 + 6 * mu30 * mu21 * mu12 - 3 * mu30_2 * mu03 + 3 * mu12_2 * mu03 - 3 * mu21_2 * mu03 - + 2 * mu21_3 + mu03_3) * + Lmu30; LI[8] = (6 * mu21_3 - 2 * mu30 * mu12 * mu03 + 2 * mu12_2 * mu03 + 3 * mu21 * mu12_2 - 6 * mu30 * mu21 * mu12 - mu30_2 * mu21 - 4 * mu21_2 * mu03 - 3 * mu21 * mu03_2) * - Lmu03 + - (2 * mu12 * mu03_2 - 4 * mu30_2 * mu12 + 9 * mu30 * mu12_2 - mu30 * mu03_2 - 6 * mu30 * mu21 * mu03 + - 3 * mu30 * mu21_2 + 6 * mu21 * mu12 * mu03 - mu30_3) * - Lmu12 + - (18 * mu21_2 * mu03 + 6 * mu30 * mu21 * mu12 - 4 * mu21 * mu03_2 - mu03_3 - mu30_2 * mu03 - - 6 * mu30 * mu12 * mu03 + 3 * mu12_2 * mu03 + 2 * mu30_2 * mu21) * - Lmu21 + - (-6 * mu21 * mu12 * mu03 - 4 * mu30 * mu12_2 - 2 * mu30 * mu21 * mu03 + 2 * mu30 * mu21_2 + 3 * mu12_3 + - 3 * mu21_2 * mu12 - 3 * mu30_2 * mu12 - mu12 * mu03_2) * - Lmu30; + Lmu03 + + (2 * mu12 * mu03_2 - 4 * mu30_2 * mu12 + 9 * mu30 * mu12_2 - mu30 * mu03_2 - 6 * mu30 * mu21 * mu03 + + 3 * mu30 * mu21_2 + 6 * mu21 * mu12 * mu03 - mu30_3) * + Lmu12 + + (18 * mu21_2 * mu03 + 6 * mu30 * mu21 * mu12 - 4 * mu21 * mu03_2 - mu03_3 - mu30_2 * mu03 - + 6 * mu30 * mu12 * mu03 + 3 * mu12_2 * mu03 + 2 * mu30_2 * mu21) * + Lmu21 + + (-6 * mu21 * mu12 * mu03 - 4 * mu30 * mu12_2 - 2 * mu30 * mu21 * mu03 + 2 * mu30 * mu21_2 + 3 * mu12_3 + + 3 * mu21_2 * mu12 - 3 * mu30_2 * mu12 - mu12 * mu03_2) * + Lmu30; LI[9] = (2 * (2 * mu03 + 3 * mu21)) * (3 * mu03 * mu21 + 3 * mu30 * mu12 + mu30_2 + mu03_2) * Lmu03 + - 6 * mu30 * (3 * mu03 * mu21 + 3 * mu30 * mu12 + mu30_2 + mu03_2) * Lmu12 + - 6 * mu03 * (3 * mu03 * mu21 + 3 * mu30 * mu12 + mu30_2 + mu03_2) * Lmu21 + - (2 * (2 * mu30 + 3 * mu12)) * (3 * mu03 * mu21 + 3 * mu30 * mu12 + mu30_2 + mu03_2) * Lmu30; + 6 * mu30 * (3 * mu03 * mu21 + 3 * mu30 * mu12 + mu30_2 + mu03_2) * Lmu12 + + 6 * mu03 * (3 * mu03 * mu21 + 3 * mu30 * mu12 + mu30_2 + mu03_2) * Lmu21 + + (2 * (2 * mu30 + 3 * mu12)) * (3 * mu03 * mu21 + 3 * mu30 * mu12 + mu30_2 + mu03_2) * Lmu30; LI[10] = Lmu40 * mu04 + mu40 * Lmu04 - 4 * Lmu31 * mu13 - 4 * mu31 * Lmu13 + 6 * mu22 * Lmu22; LI[11] = (-2 * mu40 - 3 * mu22) * Lmu04 + (2 * mu31 + 6 * mu13) * Lmu13 + (-3 * mu04 - 3 * mu40) * Lmu22 + - (2 * mu13 + 6 * mu31) * Lmu31 + (-3 * mu22 - 2 * mu04) * Lmu40; + (2 * mu13 + 6 * mu31) * Lmu31 + (-3 * mu22 - 2 * mu04) * Lmu40; LI[12] = (2 * mu40 + 12 * mu22 + 6 * mu04) * Lmu04 + 16 * mu31 * Lmu13 + (12 * mu40 + 12 * mu04) * Lmu22 + - 16 * Lmu31 * mu13 + (6 * mu40 + 12 * mu22 + 2 * mu04) * Lmu40; + 16 * Lmu31 * mu13 + (6 * mu40 + 12 * mu22 + 2 * mu04) * Lmu40; LI[13] = (2 * mu05 + 4 * mu23 + 2 * mu41) * Lmu05 + (2 * mu50 + 4 * mu32 + 2 * mu14) * Lmu14 + - (4 * mu05 + 8 * mu23 + 4 * mu41) * Lmu23 + (4 * mu50 + 8 * mu32 + 4 * mu14) * Lmu32 + - (2 * mu05 + 4 * mu23 + 2 * mu41) * Lmu41 + (2 * mu50 + 4 * mu32 + 2 * mu14) * Lmu50; + (4 * mu05 + 8 * mu23 + 4 * mu41) * Lmu23 + (4 * mu50 + 8 * mu32 + 4 * mu14) * Lmu32 + + (2 * mu05 + 4 * mu23 + 2 * mu41) * Lmu41 + (2 * mu50 + 4 * mu32 + 2 * mu14) * Lmu50; LI[14] = (2 * mu05 - 4 * mu23 - 6 * mu41) * Lmu05 + (-6 * mu50 + 12 * mu32 + 18 * mu14) * Lmu14 + - (-4 * mu05 + 8 * mu23 + 12 * mu41) * Lmu23 + (-4 * mu50 + 8 * mu32 + 12 * mu14) * Lmu32 + - (-6 * mu05 + 12 * mu23 + 18 * mu41) * Lmu41 + (2 * mu50 - 4 * mu32 - 6 * mu14) * Lmu50; + (-4 * mu05 + 8 * mu23 + 12 * mu41) * Lmu23 + (-4 * mu50 + 8 * mu32 + 12 * mu14) * Lmu32 + + (-6 * mu05 + 12 * mu23 + 18 * mu41) * Lmu41 + (2 * mu50 - 4 * mu32 - 6 * mu14) * Lmu50; LI[15] = (2 * mu05 - 20 * mu23 + 10 * mu41) * Lmu05 + (10 * mu50 - 100 * mu32 + 50 * mu14) * Lmu14 + - (-20 * mu05 + 200 * mu23 - 100 * mu41) * Lmu23 + (-20 * mu50 + 200 * mu32 - 100 * mu14) * Lmu32 + - (10 * mu05 - 100 * mu23 + 50 * mu41) * Lmu41 + (2 * mu50 - 20 * mu32 + 10 * mu14) * Lmu50; + (-20 * mu05 + 200 * mu23 - 100 * mu41) * Lmu23 + (-20 * mu50 + 200 * mu32 - 100 * mu14) * Lmu32 + + (10 * mu05 - 100 * mu23 + 50 * mu41) * Lmu41 + (2 * mu50 - 20 * mu32 + 10 * mu14) * Lmu50; double s3 = momentCInvariant.getS(3); double s2 = momentCInvariant.getS(2); @@ -607,9 +602,9 @@ void vpFeatureMomentCInvariant::compute_interaction() vpMatrix Lc2 = Lmu03 - 3 * Lmu21; vpMatrix Ls2 = Lmu30 - 3 * Lmu12; vpMatrix Lc3 = 2 * (mu20__mu02) * (Lmu20__Lmu02)-8. * mu11 * Lmu11; - vpMatrix Ls3 = 4 * Lmu11 * (mu20__mu02) + 4 * mu11 * (Lmu20__Lmu02); - vpMatrix LI1 = 2 * (mu20__mu02) * (Lmu20__Lmu02) + 8 * mu11 * Lmu11; - vpMatrix LI2 = 2 * (mu03 - 3 * mu21) * (Lc2) + 2 * (mu30 - 3 * mu12) * (Ls2); + vpMatrix Ls3 = 4 * Lmu11 * (mu20__mu02)+4 * mu11 * (Lmu20__Lmu02); + vpMatrix LI1 = 2 * (mu20__mu02) * (Lmu20__Lmu02)+8 * mu11 * Lmu11; + vpMatrix LI2 = 2 * (mu03 - 3 * mu21) * (Lc2)+2 * (mu30 - 3 * mu12) * (Ls2); vpMatrix LI3 = Lmu20 + Lmu02; vpMatrix La(1, 6); @@ -617,7 +612,8 @@ void vpFeatureMomentCInvariant::compute_interaction() if (momentObject.getType() == vpMomentObject::DISCRETE) { a = momentCentered.get(2, 0) + momentCentered.get(0, 2); La = (featureMomentCentered.interaction(2, 0) + featureMomentCentered.interaction(0, 2)); - } else { + } + else { a = momentObject.get(0, 0); La = featureMomentBasic.interaction(0, 0); } @@ -630,55 +626,55 @@ void vpFeatureMomentCInvariant::compute_interaction() */ interaction_matrices[0] = (1. / (momentCInvariant.getI(2) * momentCInvariant.getI(2))) * - (momentCInvariant.getI(2) * LI[1] - momentCInvariant.getI(1) * LI[2]); + (momentCInvariant.getI(2) * LI[1] - momentCInvariant.getI(1) * LI[2]); interaction_matrices[1] = (1. / (momentCInvariant.getI(4) * momentCInvariant.getI(4))) * - (momentCInvariant.getI(4) * LI[3] - momentCInvariant.getI(3) * LI[4]); + (momentCInvariant.getI(4) * LI[3] - momentCInvariant.getI(3) * LI[4]); interaction_matrices[2] = (1. / (momentCInvariant.getI(6) * momentCInvariant.getI(6))) * - (momentCInvariant.getI(6) * LI[5] - momentCInvariant.getI(5) * LI[6]); + (momentCInvariant.getI(6) * LI[5] - momentCInvariant.getI(5) * LI[6]); interaction_matrices[3] = (1. / (momentCInvariant.getI(6) * momentCInvariant.getI(6))) * - (momentCInvariant.getI(6) * LI[7] - momentCInvariant.getI(7) * LI[6]); + (momentCInvariant.getI(6) * LI[7] - momentCInvariant.getI(7) * LI[6]); interaction_matrices[4] = (1. / (momentCInvariant.getI(6) * momentCInvariant.getI(6))) * - (momentCInvariant.getI(6) * LI[8] - momentCInvariant.getI(8) * LI[6]); + (momentCInvariant.getI(6) * LI[8] - momentCInvariant.getI(8) * LI[6]); interaction_matrices[5] = (1. / (momentCInvariant.getI(6) * momentCInvariant.getI(6))) * - (momentCInvariant.getI(6) * LI[9] - momentCInvariant.getI(9) * LI[6]); + (momentCInvariant.getI(6) * LI[9] - momentCInvariant.getI(9) * LI[6]); interaction_matrices[6] = (1. / (momentCInvariant.getI(10) * momentCInvariant.getI(10))) * - (momentCInvariant.getI(10) * LI[11] - momentCInvariant.getI(11) * LI[10]); + (momentCInvariant.getI(10) * LI[11] - momentCInvariant.getI(11) * LI[10]); interaction_matrices[7] = (1. / (momentCInvariant.getI(10) * momentCInvariant.getI(10))) * - (momentCInvariant.getI(10) * LI[12] - momentCInvariant.getI(12) * LI[10]); + (momentCInvariant.getI(10) * LI[12] - momentCInvariant.getI(12) * LI[10]); interaction_matrices[8] = (1. / (momentCInvariant.getI(15) * momentCInvariant.getI(15))) * - (momentCInvariant.getI(15) * LI[13] - momentCInvariant.getI(13) * LI[15]); + (momentCInvariant.getI(15) * LI[13] - momentCInvariant.getI(13) * LI[15]); interaction_matrices[9] = (1. / (momentCInvariant.getI(15) * momentCInvariant.getI(15))) * - (momentCInvariant.getI(15) * LI[14] - momentCInvariant.getI(14) * LI[15]); + (momentCInvariant.getI(15) * LI[14] - momentCInvariant.getI(14) * LI[15]); interaction_matrices[10] = (Lc2 * c3 + c2 * Lc3 + Ls2 * s3 + s2 * Ls3) * sqrt(a) / I1 * pow(I3, -0.3e1 / 0.2e1) + - (c2 * c3 + s2 * s3) * pow(a, -0.1e1 / 0.2e1) / I1 * pow(I3, -0.3e1 / 0.2e1) * La / 0.2e1 - - (c2 * c3 + s2 * s3) * sqrt(a) * pow(I1, -0.2e1) * pow(I3, -0.3e1 / 0.2e1) * LI1 - - 0.3e1 / 0.2e1 * (c2 * c3 + s2 * s3) * sqrt(a) / I1 * pow(I3, -0.5e1 / 0.2e1) * LI3; + (c2 * c3 + s2 * s3) * pow(a, -0.1e1 / 0.2e1) / I1 * pow(I3, -0.3e1 / 0.2e1) * La / 0.2e1 - + (c2 * c3 + s2 * s3) * sqrt(a) * pow(I1, -0.2e1) * pow(I3, -0.3e1 / 0.2e1) * LI1 - + 0.3e1 / 0.2e1 * (c2 * c3 + s2 * s3) * sqrt(a) / I1 * pow(I3, -0.5e1 / 0.2e1) * LI3; interaction_matrices[11] = (Ls2 * c3 + s2 * Lc3 - Lc2 * s3 - c2 * Ls3) * sqrt(a) / I1 * pow(I3, -0.3e1 / 0.2e1) + - (s2 * c3 - c2 * s3) * pow(a, -0.1e1 / 0.2e1) / I1 * pow(I3, -0.3e1 / 0.2e1) * La / 0.2e1 - - (s2 * c3 - c2 * s3) * sqrt(a) * pow(I1, -0.2e1) * pow(I3, -0.3e1 / 0.2e1) * LI1 - - 0.3e1 / 0.2e1 * (s2 * c3 - c2 * s3) * sqrt(a) / I1 * pow(I3, -0.5e1 / 0.2e1) * LI3; + (s2 * c3 - c2 * s3) * pow(a, -0.1e1 / 0.2e1) / I1 * pow(I3, -0.3e1 / 0.2e1) * La / 0.2e1 - + (s2 * c3 - c2 * s3) * sqrt(a) * pow(I1, -0.2e1) * pow(I3, -0.3e1 / 0.2e1) * LI1 - + 0.3e1 / 0.2e1 * (s2 * c3 - c2 * s3) * sqrt(a) / I1 * pow(I3, -0.5e1 / 0.2e1) * LI3; interaction_matrices[12] = (1 / (I3 * I3)) * LI1 - (2 * I1 / (I3 * I3 * I3)) * LI3; interaction_matrices[13] = - (I2 / (I3 * I3 * I3)) * La + (a / (I3 * I3 * I3)) * LI2 - (3 * a * I2 / (I3 * I3 * I3 * I3)) * LI3; + (I2 / (I3 * I3 * I3)) * La + (a / (I3 * I3 * I3)) * LI2 - (3 * a * I2 / (I3 * I3 * I3 * I3)) * LI3; } /*! - Print out all invariants that were computed - There are 15 of them, as in [Point-based and region based.ITRO05] - \cite Tahri05z + * Print out all invariants that were computed + * There are 15 of them, as in [Point-based and region based.ITRO05] + * \cite Tahri05z */ void vpFeatureMomentCInvariant::printLsofInvariants(std::ostream &os) const { @@ -690,8 +686,8 @@ void vpFeatureMomentCInvariant::printLsofInvariants(std::ostream &os) const } /*! - \relates vpFeatureMomentCInvariant - Print all the interaction matrices of visual features + * \relates vpFeatureMomentCInvariant + * Print all the interaction matrices of visual features */ std::ostream &operator<<(std::ostream &os, const vpFeatureMomentCInvariant &featcinv) { diff --git a/modules/visual_features/src/visual-feature/vpFeatureMomentCentered.cpp b/modules/visual_features/src/visual-feature/vpFeatureMomentCentered.cpp index 53d50b46fe..7373e2f280 100644 --- a/modules/visual_features/src/visual-feature/vpFeatureMomentCentered.cpp +++ b/modules/visual_features/src/visual-feature/vpFeatureMomentCentered.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,11 +29,7 @@ * * Description: * Implementation for all supported moment features. - * - * Authors: - * Filip Novotny - * Manikandan Bakthavatchalam - *****************************************************************************/ + */ #include @@ -50,26 +45,24 @@ #include /*! - Default constructor - \param moments_ : Database of moment primitives. - \param A_ : First plane coefficient for a plane equation of the following - type Ax+By+C=1/Z. \param B_ : Second plane coefficient for a plane equation - of the following type Ax+By+C=1/Z. \param C_ : Third plane coefficient for a - plane equation of the following type Ax+By+C=1/Z. \param featureMoments : - Database of features. -*/ + * Default constructor + * \param moments_ : Database of moment primitives. + * \param A_ : First plane coefficient for a plane equation of the following type Ax+By+C=1/Z. + * \param B_ : Second plane coefficient for a plane equation of the following type Ax+By+C=1/Z. + * \param C_ : Third plane coefficient for a plane equation of the following type Ax+By+C=1/Z. + * \param featureMoments : Database of features. + */ vpFeatureMomentCentered::vpFeatureMomentCentered(vpMomentDatabase &moments_, double A_, double B_, double C_, vpFeatureMomentDatabase *featureMoments) : vpFeatureMoment(moments_, A_, B_, C_, featureMoments), order(0) -{ -} +{ } /*! -Interaction matrix corresponding to \f$ \mu_{ij} \f$ moment -\param select_one : first index (i) -\param select_two : second index (j) -\return Interaction matrix corresponding to the moment -*/ + * Interaction matrix corresponding to \f$ \mu_{ij} \f$ moment + * \param select_one : first index (i). + * \param select_two : second index (j). + * \return Interaction matrix corresponding to the moment. + */ vpMatrix vpFeatureMomentCentered::interaction(unsigned int select_one, unsigned int select_two) const { if (select_one + select_two > moment->getObject().getOrder()) @@ -80,9 +73,9 @@ vpMatrix vpFeatureMomentCentered::interaction(unsigned int select_one, unsigned } /*! - * Core function for the interaction matrix computation for moment m_pq - * Given its dependent moment and interaction matrices, computes the - * interaction matrix of centred moments + * Core function for the interaction matrix computation for moment m_pq + * Given its dependent moment and interaction matrices, computes the + * interaction matrix of centred moments. */ vpMatrix vpFeatureMomentCentered::compute_Lmu_pq(const unsigned int &p, const unsigned int &q, const double &xg, const double &yg, const vpMatrix &L_xg, const vpMatrix &L_yg, @@ -131,16 +124,17 @@ vpMatrix vpFeatureMomentCentered::compute_Lmu_pq(const unsigned int &p, const un } /*! - Interface to the interaction matrix computation for centered moments. Called -internally. Calls compute_Lmu_pq() for main computation moments (upto order-1) -Dependencies: - Moment classes - - vpMomentBasic - Interaction matrix classes - - vpMomentGravityCenter - - vpFeatureMomentBasic - - vpFeatureMomentGravityCenter -*/ + * Interface to the interaction matrix computation for centered moments. Called + * internally. Calls compute_Lmu_pq() for main computation moments (upto order-1) + * + * Dependencies to moment classes: + * - vpMomentBasic + * + * Dependencies to interaction matrix classes: + * - vpMomentGravityCenter + * - vpFeatureMomentBasic + * - vpFeatureMomentGravityCenter + */ void vpFeatureMomentCentered::compute_interaction() { #ifdef VISP_MOMENTS_COMBINE_MATRICES @@ -152,7 +146,7 @@ void vpFeatureMomentCentered::compute_interaction() bool found_moment_gravity; const vpMomentGravityCenter &momentGravity = - static_cast(moments.get("vpMomentGravityCenter", found_moment_gravity)); + static_cast(moments.get("vpMomentGravityCenter", found_moment_gravity)); if (!found_moment_gravity) throw vpException(vpException::notInitialized, "vpMomentGravityCenter not found"); double xg = momentGravity.get()[0]; @@ -160,7 +154,7 @@ void vpFeatureMomentCentered::compute_interaction() bool found_feature_gravity_center; vpFeatureMomentGravityCenter &featureMomentGravityCenter = (static_cast( - featureMomentsDataBase->get("vpFeatureMomentGravityCenter", found_feature_gravity_center))); + featureMomentsDataBase->get("vpFeatureMomentGravityCenter", found_feature_gravity_center))); if (!found_feature_gravity_center) throw vpException(vpException::notInitialized, "vpFeatureMomentGravityCenter not found"); vpMatrix Lxg = featureMomentGravityCenter.interaction(1 << 0); @@ -168,13 +162,13 @@ void vpFeatureMomentCentered::compute_interaction() bool found_moment_basic; const vpMomentBasic &momentbasic = - static_cast(moments.get("vpMomentBasic", found_moment_basic)); + static_cast(moments.get("vpMomentBasic", found_moment_basic)); if (!found_moment_basic) throw vpException(vpException::notInitialized, "vpMomentBasic not found"); bool found_featuremoment_basic; vpFeatureMomentBasic &featureMomentBasic = (static_cast( - featureMomentsDataBase->get("vpFeatureMomentBasic", found_featuremoment_basic))); + featureMomentsDataBase->get("vpFeatureMomentBasic", found_featuremoment_basic))); if (!found_featuremoment_basic) throw vpException(vpException::notInitialized, "vpFeatureMomentBasic not found"); @@ -182,7 +176,7 @@ void vpFeatureMomentCentered::compute_interaction() for (int i = 0; i < (int)order - 1; i++) { for (int j = 0; j < (int)order - 1 - i; j++) { interaction_matrices[(unsigned int)j * order + (unsigned int)i] = - compute_Lmu_pq(i, j, xg, yg, Lxg, Lyg, momentbasic, featureMomentBasic); + compute_Lmu_pq(i, j, xg, yg, Lxg, Lyg, momentbasic, featureMomentBasic); } } #else // #ifdef VISP_MOMENTS_COMBINE_MATRICES @@ -190,9 +184,9 @@ void vpFeatureMomentCentered::compute_interaction() bool found_moment_gravity; const vpMomentCentered &momentCentered = - (static_cast(moments.get("vpMomentCentered", found_moment_centered))); + (static_cast(moments.get("vpMomentCentered", found_moment_centered))); const vpMomentGravityCenter &momentGravity = - static_cast(moments.get("vpMomentGravityCenter", found_moment_gravity)); + static_cast(moments.get("vpMomentGravityCenter", found_moment_gravity)); if (!found_moment_centered) throw vpException(vpException::notInitialized, "vpMomentCentered not found"); @@ -209,7 +203,8 @@ void vpFeatureMomentCentered::compute_interaction() if (momentObject.getType() == vpMomentObject::DISCRETE) { delta = 0; epsilon = 1; - } else { + } + else { delta = 1; epsilon = 4; } @@ -237,7 +232,7 @@ void vpFeatureMomentCentered::compute_interaction() interaction_matrices[0][0][WX] = (3 * delta) * Yg * mu00; interaction_matrices[0][0][WY] = -(3 * delta) * Xg * mu00; interaction_matrices[0][0][VZ] = - -A * interaction_matrices[0][0][WY] + B * interaction_matrices[0][0][WX] + (2 * delta) * C * mu00; + -A * interaction_matrices[0][0][WY] + B * interaction_matrices[0][0][WX] + (2 * delta) * C * mu00; interaction_matrices[0][0][WZ] = 0.; for (int i = 1; i < (int)order - 1; i++) { @@ -255,11 +250,11 @@ void vpFeatureMomentCentered::compute_interaction() interaction_matrices[i_][0][VY] = -(delta)*B * mu_i0; interaction_matrices[i_][0][WX] = - (i + 3 * delta) * mu_i1 + (i + 3 * delta) * Yg * mu_i0 + i * Xg * mu_im11 - i * epsilon * n11 * mu_im10; + (i + 3 * delta) * mu_i1 + (i + 3 * delta) * Yg * mu_i0 + i * Xg * mu_im11 - i * epsilon * n11 * mu_im10; interaction_matrices[i_][0][WY] = - -(i + 3 * delta) * mu_ip10 - (2 * i + 3 * delta) * Xg * mu_i0 + i * epsilon * n20 * mu_im10; + -(i + 3 * delta) * mu_ip10 - (2 * i + 3 * delta) * Xg * mu_i0 + i * epsilon * n20 * mu_im10; interaction_matrices[i_][0][VZ] = - -A * interaction_matrices[i_][0][WY] + B * interaction_matrices[i_][0][WX] + (i + 2 * delta) * C * mu_i0; + -A * interaction_matrices[i_][0][WY] + B * interaction_matrices[i_][0][WX] + (i + 2 * delta) * C * mu_i0; interaction_matrices[i_][0][WZ] = i * mu_im11; } @@ -278,11 +273,11 @@ void vpFeatureMomentCentered::compute_interaction() interaction_matrices[j_ * order][0][VY] = -j * A * mu_1jm1 - (j + delta) * B * mu_0j; interaction_matrices[j_ * order][0][WX] = - (j + 3 * delta) * mu_0jp1 + (2 * j + 3 * delta) * Yg * mu_0j - j * epsilon * n02 * mu_0jm1; + (j + 3 * delta) * mu_0jp1 + (2 * j + 3 * delta) * Yg * mu_0j - j * epsilon * n02 * mu_0jm1; interaction_matrices[j_ * order][0][WY] = - -(j + 3 * delta) * mu_1j - (j + 3 * delta) * Xg * mu_0j - j * Yg * mu_1jm1 + j * epsilon * n11 * mu_0jm1; + -(j + 3 * delta) * mu_1j - (j + 3 * delta) * Xg * mu_0j - j * Yg * mu_1jm1 + j * epsilon * n11 * mu_0jm1; interaction_matrices[j_ * order][0][VZ] = -A * interaction_matrices[j_ * order][0][WY] + - B * interaction_matrices[j_ * order][0][WX] + (j + 2 * delta) * C * mu_0j; + B * interaction_matrices[j_ * order][0][WX] + (j + 2 * delta) * C * mu_0j; interaction_matrices[j_ * order][0][WZ] = -j * mu_1jm1; } @@ -307,14 +302,14 @@ void vpFeatureMomentCentered::compute_interaction() interaction_matrices[j_ * order + i_][0][VY] = -j * A * mu_ip1jm1 - (j + delta) * B * mu_ij; interaction_matrices[j_ * order + i_][0][WX] = (i + j + 3 * delta) * mu_ijp1 + - (i + 2 * j + 3 * delta) * Yg * mu_ij + i * Xg * mu_im1jp1 - - i * epsilon * n11 * mu_im1j - j * epsilon * n02 * mu_ijm1; + (i + 2 * j + 3 * delta) * Yg * mu_ij + i * Xg * mu_im1jp1 - + i * epsilon * n11 * mu_im1j - j * epsilon * n02 * mu_ijm1; interaction_matrices[j_ * order + i_][0][WY] = -(i + j + 3 * delta) * mu_ip1j - - (2 * i + j + 3 * delta) * Xg * mu_ij - j * Yg * mu_ip1jm1 + - i * epsilon * n20 * mu_im1j + j * epsilon * n11 * mu_ijm1; + (2 * i + j + 3 * delta) * Xg * mu_ij - j * Yg * mu_ip1jm1 + + i * epsilon * n20 * mu_im1j + j * epsilon * n11 * mu_ijm1; interaction_matrices[j_ * order + i_][0][VZ] = -A * interaction_matrices[j_ * order + i_][0][WY] + - B * interaction_matrices[j_ * order + i_][0][WX] + - (i + j + 2 * delta) * C * mu_ij; + B * interaction_matrices[j_ * order + i_][0][WX] + + (i + j + 2 * delta) * C * mu_ij; interaction_matrices[j_ * order + i_][0][WZ] = i * mu_im1jp1 - j * mu_ip1jm1; } } @@ -322,8 +317,8 @@ void vpFeatureMomentCentered::compute_interaction() } /*! - \relates vpFeatureMomentCentered - Print all the interaction matrices of visual features + * \relates vpFeatureMomentCentered + * Print all the interaction matrices of visual features */ std::ostream &operator<<(std::ostream &os, const vpFeatureMomentCentered &mu) { diff --git a/modules/visual_features/src/visual-feature/vpFeatureMomentCommon.cpp b/modules/visual_features/src/visual-feature/vpFeatureMomentCommon.cpp index bbff94b203..3922385be5 100644 --- a/modules/visual_features/src/visual-feature/vpFeatureMomentCommon.cpp +++ b/modules/visual_features/src/visual-feature/vpFeatureMomentCommon.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,28 +29,23 @@ * * Description: * Pre-filled pseudo-database used to handle dependencies between common - *moment features. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + * moment features. + */ #include #include /*! - Constructor which initializes and links all common features in the database - \param moments : database for moment primitives - \param A : first plane coefficient for a plane equation of the following - type Ax+By+C=1/Z \param B : second plane coefficient for a plane equation of - the following type Ax+By+C=1/Z \param C : third plane coefficient for a - plane equation of the following type Ax+By+C=1/Z -*/ + * Constructor which initializes and links all common features in the database + * \param moments : database for moment primitives + * \param A : first plane coefficient for a plane equation of the following type Ax+By+C=1/Z + * \param B : second plane coefficient for a plane equation of the following type Ax+By+C=1/Z + * \param C : third plane coefficient for a plane equation of the following type Ax+By+C=1/Z + */ vpFeatureMomentCommon::vpFeatureMomentCommon(vpMomentDatabase &moments, double A, double B, double C) : featureGravity(moments, A, B, C), featureGravityNormalized(moments, A, B, C), featureAn(moments, A, B, C), - featureCInvariant(moments, A, B, C), featureAlpha(moments, A, B, C), featureCentered(moments, A, B, C), - featureMomentBasic(moments, A, B, C), feature_moment_area(moments, A, B, C) + featureCInvariant(moments, A, B, C), featureAlpha(moments, A, B, C), featureCentered(moments, A, B, C), + featureMomentBasic(moments, A, B, C), feature_moment_area(moments, A, B, C) { featureGravity.linkTo(*this); @@ -65,12 +59,11 @@ vpFeatureMomentCommon::vpFeatureMomentCommon(vpMomentDatabase &moments, double A } /*! - Update all moment features in the database with plane coefficients - \param A : first plane coefficient for a plane equation of the following - type Ax+By+C=1/Z \param B : second plane coefficient for a plane equation of - the following type Ax+By+C=1/Z \param C : third plane coefficient for a - plane equation of the following type Ax+By+C=1/Z -*/ + * Update all moment features in the database with plane coefficients + * \param A : first plane coefficient for a plane equation of the following type Ax+By+C=1/Z + * \param B : second plane coefficient for a plane equation of the following type Ax+By+C=1/Z + * \param C : third plane coefficient for a plane equation of the following type Ax+By+C=1/Z + */ void vpFeatureMomentCommon::updateAll(double A, double B, double C) { featureMomentBasic.update(A, B, C); diff --git a/modules/visual_features/src/visual-feature/vpFeatureMomentDatabase.cpp b/modules/visual_features/src/visual-feature/vpFeatureMomentDatabase.cpp index be6c1a5653..35a30a1aaf 100644 --- a/modules/visual_features/src/visual-feature/vpFeatureMomentDatabase.cpp +++ b/modules/visual_features/src/visual-feature/vpFeatureMomentDatabase.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,11 +29,7 @@ * * Description: * Pseudo-database used to handle dependencies between moment features. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ #include #include @@ -43,24 +38,24 @@ #include /*! - Add a moment and it's corresponding name to the database - \param featureMoment : database for moment features - \param name : the feature's name, usually the string naming it's class. Each - name must be unique -*/ + * Add a moment and it's corresponding name to the database + * \param featureMoment : database for moment features + * \param name : the feature's name, usually the string naming it's class. Each + * name must be unique + */ void vpFeatureMomentDatabase::add(vpFeatureMoment &featureMoment, char *name) { featureMomentsDataBase.insert(std::pair((const char *)name, &featureMoment)); } /*! - Retrieves a moment feature from the database - \param type : the name of the feature, the one specified when using add - \param found : true if the type string is found inside the database, false - otherwise - - \return the moment feature corresponding to the type string -*/ + * Retrieves a moment feature from the database + * \param type : the name of the feature, the one specified when using add + * \param found : true if the type string is found inside the database, false + * otherwise + * + * \return the moment feature corresponding to the type string + */ vpFeatureMoment &vpFeatureMomentDatabase::get(const char *type, bool &found) { std::map::const_iterator it = @@ -71,12 +66,11 @@ vpFeatureMoment &vpFeatureMomentDatabase::get(const char *type, bool &found) } /*! - Update all moment features in the database with plane coefficients - \param A : first plane coefficient for a plane equation of the following - type Ax+By+C=1/Z \param B : second plane coefficient for a plane equation of - the following type Ax+By+C=1/Z \param C : third plane coefficient for a - plane equation of the following type Ax+By+C=1/Z -*/ + * Update all moment features in the database with plane coefficients + * \param A : first plane coefficient for a plane equation of the following type Ax+By+C=1/Z + * \param B : second plane coefficient for a plane equation of the following type Ax+By+C=1/Z + * \param C : third plane coefficient for a plane equation of the following type Ax+By+C=1/Z + */ void vpFeatureMomentDatabase::updateAll(double A, double B, double C) { std::map::const_iterator itr; @@ -97,17 +91,3 @@ void vpFeatureMomentDatabase::updateAll(double A, double B, double C) } #endif } - -/* -std::ostream & operator<<(std::ostream & os, const vpFeatureMomentDatabase& -m){ std::map::const_iterator itr; os << -"{"; - - for(itr = m.featureMoments.begin(); itr != m.featureMoments.end(); itr++){ - os << (*itr).first << ": [" << *((*itr).second) << "],"; - } - os << "}"; - - return os; -}*/ diff --git a/modules/visual_features/src/visual-feature/vpFeatureMomentGravityCenter.cpp b/modules/visual_features/src/visual-feature/vpFeatureMomentGravityCenter.cpp index c0c1a7b637..a0fa07da9c 100644 --- a/modules/visual_features/src/visual-feature/vpFeatureMomentGravityCenter.cpp +++ b/modules/visual_features/src/visual-feature/vpFeatureMomentGravityCenter.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,11 +29,7 @@ * * Description: * Implementation for all supported moment features. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ #include @@ -49,19 +44,19 @@ #include /*! - Computes interaction matrix for gravity center moment. Called internally. - The moment primitives must be computed before calling this. - This feature depends on: - - vpFeatureMomentBasic - - Minimum vpMomentObject order needed to compute this feature: 2. -*/ + * Computes interaction matrix for gravity center moment. Called internally. + * The moment primitives must be computed before calling this. + * This feature depends on: + * - vpFeatureMomentBasic + * + $ Minimum vpMomentObject order needed to compute this feature: 2. + */ void vpFeatureMomentGravityCenter::compute_interaction() { bool found_featuremoment_basic; vpFeatureMomentBasic &featureMomentBasic = (static_cast( - featureMomentsDataBase->get("vpFeatureMomentBasic", found_featuremoment_basic))); + featureMomentsDataBase->get("vpFeatureMomentBasic", found_featuremoment_basic))); const vpMomentObject &momentObject = moment->getObject(); if (!found_featuremoment_basic) @@ -71,11 +66,11 @@ void vpFeatureMomentGravityCenter::compute_interaction() interaction_matrices[1].resize(1, 6); interaction_matrices[0] = - featureMomentBasic.interaction(1, 0) / momentObject.get(0, 0) - - momentObject.get(1, 0) * pow(momentObject.get(0, 0), -0.2e1) * featureMomentBasic.interaction(0, 0); + featureMomentBasic.interaction(1, 0) / momentObject.get(0, 0) - + momentObject.get(1, 0) * pow(momentObject.get(0, 0), -0.2e1) * featureMomentBasic.interaction(0, 0); interaction_matrices[1] = - featureMomentBasic.interaction(0, 1) / momentObject.get(0, 0) - - momentObject.get(0, 1) * pow(momentObject.get(0, 0), -0.2e1) * featureMomentBasic.interaction(0, 0); + featureMomentBasic.interaction(0, 1) / momentObject.get(0, 0) - + momentObject.get(0, 1) * pow(momentObject.get(0, 0), -0.2e1) * featureMomentBasic.interaction(0, 0); } #else @@ -90,23 +85,23 @@ void vpFeatureMomentGravityCenter::compute_interaction() #include /*! - Computes interaction matrix for gravity center moment. Called internally. - The moment primitives must be computed before calling this. - This feature depends on: - - vpMomentCentered - - vpMomentGravityCenter - - Minimum vpMomentObject order needed to compute this feature: 2. -*/ + * Computes interaction matrix for gravity center moment. Called internally. + * The moment primitives must be computed before calling this. + * This feature depends on: + * - vpMomentCentered + * - vpMomentGravityCenter + * + * Minimum vpMomentObject order needed to compute this feature: 2. + */ void vpFeatureMomentGravityCenter::compute_interaction() { bool found_moment_centered; bool found_moment_gravity; const vpMomentCentered &momentCentered = - (static_cast(moments.get("vpMomentCentered", found_moment_centered))); + (static_cast(moments.get("vpMomentCentered", found_moment_centered))); const vpMomentGravityCenter &momentGravity = - static_cast(moments.get("vpMomentGravityCenter", found_moment_gravity)); + static_cast(moments.get("vpMomentGravityCenter", found_moment_gravity)); const vpMomentObject &momentObject = moment->getObject(); @@ -120,7 +115,8 @@ void vpFeatureMomentGravityCenter::compute_interaction() int epsilon; if (momentObject.getType() == vpMomentObject::DISCRETE) { epsilon = 1; - } else { + } + else { epsilon = 4; } double n11 = momentCentered.get(1, 1) / momentObject.get(0, 0); diff --git a/modules/visual_features/src/visual-feature/vpFeatureMomentGravityCenterNormalized.cpp b/modules/visual_features/src/visual-feature/vpFeatureMomentGravityCenterNormalized.cpp index 97e6530300..55145e01b9 100644 --- a/modules/visual_features/src/visual-feature/vpFeatureMomentGravityCenterNormalized.cpp +++ b/modules/visual_features/src/visual-feature/vpFeatureMomentGravityCenterNormalized.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,11 +29,7 @@ * * Description: * Implementation for all supported moment features. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ #include @@ -53,14 +48,14 @@ #include /*! - Computes interaction matrix for centered and normalized moment. Called - internally. The moment primitives must be computed before calling this. This - feature depends on: - - vpFeatureMomentGravityCenter - - vpMomentGravityCenter - - vpMomentAreaNormalized - - vpFeatureMomentAreaNormalized -*/ + * Computes interaction matrix for centered and normalized moment. Called + * internally. The moment primitives must be computed before calling this. This + * feature depends on: + * - vpFeatureMomentGravityCenter + * - vpMomentGravityCenter + * - vpMomentAreaNormalized + * - vpFeatureMomentAreaNormalized + */ void vpFeatureMomentGravityCenterNormalized::compute_interaction() { bool found_moment_gravity; @@ -72,11 +67,11 @@ void vpFeatureMomentGravityCenterNormalized::compute_interaction() const vpMomentAreaNormalized &momentSurfaceNormalized = static_cast( moments.get("vpMomentAreaNormalized", found_moment_surface_normalized)); const vpMomentGravityCenter &momentGravity = - static_cast(moments.get("vpMomentGravityCenter", found_moment_gravity)); + static_cast(moments.get("vpMomentGravityCenter", found_moment_gravity)); vpFeatureMomentGravityCenter &featureMomentGravity = (static_cast( - featureMomentsDataBase->get("vpFeatureMomentGravityCenter", found_featuremoment_gravity))); + featureMomentsDataBase->get("vpFeatureMomentGravityCenter", found_featuremoment_gravity))); vpFeatureMomentAreaNormalized featureMomentAreaNormalized = (static_cast( - featureMomentsDataBase->get("vpFeatureMomentAreaNormalized", found_featuremoment_surfacenormalized))); + featureMomentsDataBase->get("vpFeatureMomentAreaNormalized", found_featuremoment_surfacenormalized))); if (!found_moment_surface_normalized) throw vpException(vpException::notInitialized, "vpMomentAreaNormalized not found"); @@ -92,9 +87,9 @@ void vpFeatureMomentGravityCenterNormalized::compute_interaction() interaction_matrices[1].resize(1, 6); interaction_matrices[0] = momentGravity.get()[0] * featureMomentAreaNormalized.interaction(1) + - momentSurfaceNormalized.get()[0] * featureMomentGravity.interaction(1); + momentSurfaceNormalized.get()[0] * featureMomentGravity.interaction(1); interaction_matrices[1] = momentGravity.get()[1] * featureMomentAreaNormalized.interaction(1) + - momentSurfaceNormalized.get()[0] * featureMomentGravity.interaction(2); + momentSurfaceNormalized.get()[0] * featureMomentGravity.interaction(2); } #else @@ -110,24 +105,23 @@ void vpFeatureMomentGravityCenterNormalized::compute_interaction() #include /*! - Computes interaction matrix for centered and normalized moment. Called - internally. The moment primitives must be computed before calling this. This - feature depends on: - - vpMomentCentered - - vpMomentAreaNormalized - - vpMomentGravityCenter -*/ + * Computes interaction matrix for centered and normalized moment. Called + * internally. The moment primitives must be computed before calling this. This + * feature depends on: + * - vpMomentCentered + * - vpMomentAreaNormalized + * - vpMomentGravityCenter + */ void vpFeatureMomentGravityCenterNormalized::compute_interaction() { - bool found_moment_surface_normalized; bool found_moment_gravity; bool found_moment_centered; const vpMomentCentered &momentCentered = - static_cast(moments.get("vpMomentCentered", found_moment_centered)); + static_cast(moments.get("vpMomentCentered", found_moment_centered)); const vpMomentGravityCenter &momentGravity = - static_cast(moments.get("vpMomentGravityCenter", found_moment_gravity)); + static_cast(moments.get("vpMomentGravityCenter", found_moment_gravity)); const vpMomentAreaNormalized &momentSurfaceNormalized = static_cast( moments.get("vpMomentAreaNormalized", found_moment_surface_normalized)); @@ -174,19 +168,20 @@ void vpFeatureMomentGravityCenterNormalized::compute_interaction() Xnvy = A * Xn * e11 + n02 * B * Xn / NA; Xnwx = An * e11 * NA + Yn * n10 - Xn * Xg * e11 + Xn * n01 + Xn * n10 * e11 - Xn * e21 + - (-Xn * n03 + (Xn * n01 - Yn * Xg) * n02) / NA; + (-Xn * n03 + (Xn * n01 - Yn * Xg) * n02) / NA; Xnwy = -An * NA + Xn * e12 + Xn * Xg - An + e11 * Xg * Yn - Xn * n01 * e11 - 2 * Xn * n10 + Xn * e30 + n02 * An + - (-Xn * Xg + Xn * n10) * n02 / NA; + (-Xn * Xg + Xn * n10) * n02 / NA; Ynvx = (Yn - n02 * Yn / NA) * A + Yn * e11 * B; Ynvy = (-Xn + e11 * Yn) * A + (-Yn + n02 * Yn / NA) * B - An * C; Ynwx = n02 * An + Yn * n10 * e11 - e11 * Xg * Yn + An - Yn * e21 + Yn * n01 + - (-Yn * n03 + (Yn * n01 - Yn * Yg) * n02) / NA; + (-Yn * n03 + (Yn * n01 - Yn * Yg) * n02) / NA; Ynwy = -An * e11 * NA + Yn * e11 * Yg - Yn * n01 * e11 + Yn * Xg + Yn * e12 + Yn * e30 - Xn * n01 - 2 * Yn * n10 + - (Yn * n10 - Yn * Xg) * n02 / NA; + (Yn * n10 - Yn * Xg) * n02 / NA; - } else { + } + else { Xnvx = -An * C - A * Xn - Yn * B; Xnvy = (0.5) * B * Xn; diff --git a/modules/visual_features/src/visual-feature/vpFeatureSegment.cpp b/modules/visual_features/src/visual-feature/vpFeatureSegment.cpp index b88c03e09e..7a724ab5a5 100644 --- a/modules/visual_features/src/visual-feature/vpFeatureSegment.cpp +++ b/modules/visual_features/src/visual-feature/vpFeatureSegment.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,11 +29,7 @@ * * Description: * Segment visual feature. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ #include #include @@ -51,16 +46,13 @@ #include /*! - \file vpFeatureSegment.cpp - \brief class that defines the vpFeatureSegment visual feature -*/ + * \file vpFeatureSegment.cpp + * \brief class that defines the vpFeatureSegment visual feature + */ /*! - - Initialise the memory space requested for segment visual - feature. - -*/ + * Initialise the memory space requested for segment visual feature. + */ void vpFeatureSegment::init() { // feature dimension @@ -76,12 +68,12 @@ void vpFeatureSegment::init() } /*! - Default constructor that builds an empty segment visual feature. - - \param normalized : If true, use normalized features \f${\bf s} = (x_n, y_n, - l_n, \alpha)\f$. If false, use non normalized features \f${\bf s} = (x_c, - y_c, l_c, \alpha)\f$. -*/ + * Default constructor that builds an empty segment visual feature. + * + * \param normalized : If true, use normalized features \f${\bf s} = (x_n, y_n, + * l_n, \alpha)\f$. If false, use non normalized features \f${\bf s} = (x_c, + * y_c, l_c, \alpha)\f$. + */ vpFeatureSegment::vpFeatureSegment(bool normalized) : xc_(0), yc_(0), l_(0), alpha_(0), Z1_(0), Z2_(0), cos_a_(0), sin_a_(0), normalized_(normalized) { @@ -89,104 +81,102 @@ vpFeatureSegment::vpFeatureSegment(bool normalized) } /*! - Compute and return the interaction matrix \f$ L \f$ associated to a - subset of the possible features \f${\bf s} = (x_c, y_c, l, \alpha)\f$ - or \f${\bf s} = (x_n, y_n, l_n, \alpha)\f$. - - The interaction matrix of the non normalized feature set is of the following -form: \f[ - {\bf L} = \left[ - \begin{array}{c} - L_{x_c} \\ - L_{y_c} \\ - L_{l} \\ - L_{\alpha} - \end{array} - \right] = - \left[ - \begin{array}{cccccc} - -\lambda_2 & 0 & \lambda_2 x_c - \lambda_1 l \frac{\cos \alpha}{4} & - x_c y_c + l^2 \frac{\cos \alpha \sin \alpha}{4} & - -(1 + {x_{c}}^{2} + l^2 \frac{\cos^2\alpha}{4}) & - y_c \\ - 0 & -\lambda_2 & \lambda_2 y_c - \lambda_1 l \frac{\sin \alpha}{4} & - 1 + {y_{c}}^{2} + l^2 \frac{\sin^2 \alpha}{4} & - -x_c y_c-l^2 \frac{\cos \alpha \sin \alpha}{4} & - -x_c \\ - \lambda_1 \cos \alpha & \lambda_1 \sin \alpha & - \lambda_2 l - \lambda_1 (x_c \cos \alpha + y_c \sin \alpha) & - l (x_c \cos \alpha \sin \alpha + y_c (1 + \sin^2 \alpha)) & - -l (x_c (1 + \cos^2 \alpha)+y_c \cos \alpha \sin \alpha) & - 0 \\ - -\lambda_1 \frac{\sin \alpha}{l} & \lambda_1 \frac{\cos \alpha}{l} & - \lambda_1 \frac{x_c \sin \alpha - y_c \cos \alpha}{l} & - -x_c \sin^2 \alpha + y_c \cos \alpha \sin \alpha & - x_c \cos \alpha \sin \alpha - y_c \cos^2 \alpha & - -1 - \end{array} - \right] - \f] - - with \f$ \lambda_1 = \frac{Z_1 - Z_2}{Z_1 Z_2}\f$ and \f$ \lambda_2 = -\frac{Z_1 + Z_2}{2 Z_1 Z_2}\f$ where \f$Z_i\f$ are the depths of the points. - - - \param select : Selection of a subset of the possible segment features. - - To compute the interaction matrix for all the four - subset features \f$(x_c \f$, \f$ y_c \f$, \f$ l \f$, \f$ \alpha)\f$ or - \f$(x_n \f$, \f$ y_n \f$, \f$ l_n \f$, \f$ \alpha)\f$ use -vpBasicFeature::FEATURE_ALL. In that case the dimension of the interaction -matrix is \f$ [4 \times 6] \f$. - - To compute the interaction matrix for only one of the subset - use one of the following functions: - selectXc(), selectYc(), selectL(), selectAlpha(). In that case, the -returned interaction matrix is of dimension \f$ [1 \times 6] \f$ . - - \return The interaction matrix computed from the segment features. - - The code below shows how to compute the interaction matrix associated to - the visual feature \f${\bf s} = (x_c, y_c, l, \alpha)\f$. - \code -#include -#include - -int main() -{ - // Define two 3D points in the object frame - vpPoint p1(.1, .1, 0.), p2(.3, .2, 0.); - - // Define the camera pose wrt the object - vpHomogeneousMatrix cMo (0, 0, 1, 0, 0, 0); // Z=1 meter - // Compute the coordinates of the points in the camera frame - p1.changeFrame(cMo); - p2.changeFrame(cMo); - // Compute the coordinates of the points in the image plane by perspective projection - p1.project(); p2.project(); - - // Build the segment visual feature - vpFeatureSegment s; - s.buildFrom(p1.get_x(), p1.get_y(), p1.get_Z(), p2.get_x(), p2.get_y(), p2.get_Z()); - - // Compute the interaction matrix - vpMatrix L = s.interaction( vpBasicFeature::FEATURE_ALL ); -} - \endcode - - In this case, L is a 4 by 6 matrix. - - It is also possible to build the interaction matrix associated to - one of the possible features. The code below shows how to modify the -previous code to consider as visual feature \f$s = (l, \alpha)\f$. - - \code - vpMatrix L = s.interaction( vpFeatureSegment::selectL() | vpFeatureSegment::selectAlpha() ); - \endcode - - In that case, L is a 2 by 6 matrix. -*/ + * Compute and return the interaction matrix \f$ L \f$ associated to a + * subset of the possible features \f${\bf s} = (x_c, y_c, l, \alpha)\f$ + * or \f${\bf s} = (x_n, y_n, l_n, \alpha)\f$. + * + * The interaction matrix of the non normalized feature set is of the following + * form: \f[ + * {\bf L} = \left[ + * \begin{array}{c} + * L_{x_c} \\ + * L_{y_c} \\ + * L_{l} \\ + * L_{\alpha} + * \end{array} + * \right] = + * \left[ + * \begin{array}{cccccc} + * -\lambda_2 & 0 & \lambda_2 x_c - \lambda_1 l \frac{\cos \alpha}{4} & + * x_c y_c + l^2 \frac{\cos \alpha \sin \alpha}{4} & + * -(1 + {x_{c}}^{2} + l^2 \frac{\cos^2\alpha}{4}) & + * y_c \\ + * 0 & -\lambda_2 & \lambda_2 y_c - \lambda_1 l \frac{\sin \alpha}{4} & + * 1 + {y_{c}}^{2} + l^2 \frac{\sin^2 \alpha}{4} & + * -x_c y_c-l^2 \frac{\cos \alpha \sin \alpha}{4} & + * -x_c \\ + * \lambda_1 \cos \alpha & \lambda_1 \sin \alpha & + * \lambda_2 l - \lambda_1 (x_c \cos \alpha + y_c \sin \alpha) & + * l (x_c \cos \alpha \sin \alpha + y_c (1 + \sin^2 \alpha)) & + * -l (x_c (1 + \cos^2 \alpha)+y_c \cos \alpha \sin \alpha) & + * 0 \\ + * -\lambda_1 \frac{\sin \alpha}{l} & \lambda_1 \frac{\cos \alpha}{l} & + * \lambda_1 \frac{x_c \sin \alpha - y_c \cos \alpha}{l} & + * -x_c \sin^2 \alpha + y_c \cos \alpha \sin \alpha & + * x_c \cos \alpha \sin \alpha - y_c \cos^2 \alpha & + * -1 + * \end{array} + * \right] + * \f] + * + * with \f$ \lambda_1 = \frac{Z_1 - Z_2}{Z_1 Z_2}\f$ and \f$ \lambda_2 = + * \frac{Z_1 + Z_2}{2 Z_1 Z_2}\f$ where \f$Z_i\f$ are the depths of the points. + * + * \param select : Selection of a subset of the possible segment features. + * - To compute the interaction matrix for all the four + * subset features \f$(x_c \f$, \f$ y_c \f$, \f$ l \f$, \f$ \alpha)\f$ or + * \f$(x_n \f$, \f$ y_n \f$, \f$ l_n \f$, \f$ \alpha)\f$ use + * vpBasicFeature::FEATURE_ALL. In that case the dimension of the interaction + * matrix is \f$ [4 \times 6] \f$. + * - To compute the interaction matrix for only one of the subset + * use one of the following functions: + * selectXc(), selectYc(), selectL(), selectAlpha(). In that case, the + * returned interaction matrix is of dimension \f$ [1 \times 6] \f$ . + * + * \return The interaction matrix computed from the segment features. + * + * The code below shows how to compute the interaction matrix associated to + * the visual feature \f${\bf s} = (x_c, y_c, l, \alpha)\f$. + * \code + * #include + * #include + * + * int main() + * { + * // Define two 3D points in the object frame + * vpPoint p1(.1, .1, 0.), p2(.3, .2, 0.); + * + * // Define the camera pose wrt the object + * vpHomogeneousMatrix cMo (0, 0, 1, 0, 0, 0); // Z=1 meter + * // Compute the coordinates of the points in the camera frame + * p1.changeFrame(cMo); + * p2.changeFrame(cMo); + * // Compute the coordinates of the points in the image plane by perspective projection + * p1.project(); p2.project(); + * + * // Build the segment visual feature + * vpFeatureSegment s; + * s.buildFrom(p1.get_x(), p1.get_y(), p1.get_Z(), p2.get_x(), p2.get_y(), p2.get_Z()); + * + * // Compute the interaction matrix + * vpMatrix L = s.interaction( vpBasicFeature::FEATURE_ALL ); + * } + * \endcode + * + * In this case, L is a 4 by 6 matrix. + * + * It is also possible to build the interaction matrix associated to + * one of the possible features. The code below shows how to modify the + * previous code to consider as visual feature \f$s = (l, \alpha)\f$. + * + * \code + * vpMatrix L = s.interaction( vpFeatureSegment::selectL() | vpFeatureSegment::selectAlpha() ); + * \endcode + * + * In that case, L is a 2 by 6 matrix. + */ vpMatrix vpFeatureSegment::interaction(unsigned int select) { - vpMatrix L; L.resize(0, 6); @@ -286,7 +276,8 @@ vpMatrix vpFeatureSegment::interaction(unsigned int select) Lalpha[0][5] = -1; L = vpMatrix::stack(L, Lalpha); } - } else { + } + else { if (vpFeatureSegment::selectXc() & select) { vpMatrix Lxc(1, 6); Lxc[0][0] = -lambda2; @@ -335,30 +326,29 @@ vpMatrix vpFeatureSegment::interaction(unsigned int select) } /*! - Computes the error between the current and the desired visual features from - a subset of the possible features \f${\bf s} = (x_c, y_c, l, \alpha)\f$ or - \f${\bf s} = (x_n, y_n, l_n, \alpha)\f$. - - For the angular component \f$\alpha\f$, we define the error as - \f$\alpha \ominus \alpha^*\f$, where \f$\ominus\f$ is modulo \f$2\pi\f$ - subtraction. - - \param s_star : Desired 2D segment feature. - - \param select : The error can be computed for a selection of a - subset of the possible segment features. - - To compute the error for all the four parameters use - vpBasicFeature::FEATURE_ALL. In that case the error vector is a 4 - dimension column vector. - - To compute the error for only one subfeature of - \f${\bf s} = (x_c, y_c, l, \alpha)\f$ or \f${\bf s} = (x_n, y_n, l_n, - \alpha)\f$ feature set use one of the following functions: selectXc(), - selectYc(), selectL(), selectAlpha(). - - \return The error between the current and the desired - visual feature. - -*/ + * Computes the error between the current and the desired visual features from + * a subset of the possible features \f${\bf s} = (x_c, y_c, l, \alpha)\f$ or + * \f${\bf s} = (x_n, y_n, l_n, \alpha)\f$. + * + * For the angular component \f$\alpha\f$, we define the error as + * \f$\alpha \ominus \alpha^*\f$, where \f$\ominus\f$ is modulo \f$2\pi\f$ + * subtraction. + * + * \param s_star : Desired 2D segment feature. + * + * \param select : The error can be computed for a selection of a + * subset of the possible segment features. + * - To compute the error for all the four parameters use + * vpBasicFeature::FEATURE_ALL. In that case the error vector is a 4 + * dimension column vector. + * - To compute the error for only one subfeature of + * \f${\bf s} = (x_c, y_c, l, \alpha)\f$ or \f${\bf s} = (x_n, y_n, l_n, + * \alpha)\f$ feature set use one of the following functions: selectXc(), + * selectYc(), selectL(), selectAlpha(). + * + * \return The error between the current and the desired + * visual feature. + */ vpColVector vpFeatureSegment::error(const vpBasicFeature &s_star, unsigned int select) { vpColVector e(0); @@ -394,32 +384,32 @@ vpColVector vpFeatureSegment::error(const vpBasicFeature &s_star, unsigned int s } /*! - Print to stdout the values of the current visual feature \f$ s \f$. - - \param select : Selection of a subset of the possible segement features (\f$ - x_c \f$,\f$ y_c \f$,\f$ l \f$,\f$ \alpha \f$). - - \code - s.print(); - \endcode - - produces the following output: - - \code - vpFeatureSegment: (xc = -0.255634; yc = -0.13311; l = 0.105005; alpha = 92.1305 deg) - \endcode - - while - \code - s.print( vpFeatureSegment::selectL() | vpFeatureSegment::selectAlpha() ); - \endcode - - produces the following output: - - \code - vpFeatureSegment: (l = 0.105005; alpha = 92.1305 deg) - \endcode -*/ + * Print to stdout the values of the current visual feature \f$ s \f$. + * + * \param select : Selection of a subset of the possible segment features (\f$ + * x_c \f$,\f$ y_c \f$,\f$ l \f$,\f$ \alpha \f$). + * + * \code + * s.print(); + * \endcode + * + * produces the following output: + * + * \code + * vpFeatureSegment: (xc = -0.255634; yc = -0.13311; l = 0.105005; alpha = 92.1305 deg) + * \endcode + * + * while + * \code + * s.print( vpFeatureSegment::selectL() | vpFeatureSegment::selectAlpha() ); + * \endcode + * + * produces the following output: + * + * \code + * vpFeatureSegment: (l = 0.105005; alpha = 92.1305 deg) + * \endcode + */ void vpFeatureSegment::print(unsigned int select) const { std::cout << "vpFeatureSegment: ("; @@ -451,15 +441,14 @@ void vpFeatureSegment::print(unsigned int select) const } /*! - Create an object with the same type. - - \code - vpBasicFeature *s_star; - vpFeatureSegment s; - s_star = s.duplicate(); // s_star is now a vpFeatureSegment - \endcode - -*/ + * Create an object with the same type. + * + * \code + * vpBasicFeature *s_star; + * vpFeatureSegment s; + * s_star = s.duplicate(); // s_star is now a vpFeatureSegment + * \endcode + */ vpFeatureSegment *vpFeatureSegment::duplicate() const { vpFeatureSegment *feature; @@ -469,16 +458,14 @@ vpFeatureSegment *vpFeatureSegment::duplicate() const } /*! - - Displays a segment representing the feature on a greyscale image. - The two limiting points are displayed in cyan and yellow. - - \param cam : Camera parameters. - \param I : Image. - \param color : Color to use for the segment. - \param thickness : Thickness of the feature representation. - -*/ + * Displays a segment representing the feature on a grayscale image. + * The two limiting points are displayed in cyan and yellow. + * + * \param cam : Camera parameters. + * \param I : Image. + * \param color : Color to use for the segment. + * \param thickness : Thickness of the feature representation. + */ void vpFeatureSegment::display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color, unsigned int thickness) const { @@ -487,7 +474,8 @@ void vpFeatureSegment::display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color, unsigned int thickness) const { @@ -525,7 +512,8 @@ void vpFeatureSegment::display(const vpCameraParameters &cam, const vpImage -/*! - \file vpGenericFeature.cpp - Class that defines what is a generic feature. This class could be used to - create new features not implemented in ViSP. -*/ - -vpGenericFeature::~vpGenericFeature() {} - void vpGenericFeature::init() { s = 0; } /*! @@ -102,7 +94,7 @@ void vpGenericFeature::setError(const vpColVector &error_vector) vpERROR_TRACE("size mismatch between error dimension" "and feature dimension"); throw(vpFeatureException(vpFeatureException::sizeMismatchError, "size mismatch between error dimension" - "and feature dimension")); + "and feature dimension")); } errorStatus = errorInitialized; err = error_vector; @@ -169,7 +161,7 @@ vpColVector vpGenericFeature::error(const vpBasicFeature &s_star, unsigned int s vpERROR_TRACE("size mismatch between s* dimension " "and feature dimension"); throw(vpFeatureException(vpFeatureException::sizeMismatchError, "size mismatch between s* dimension " - "and feature dimension")); + "and feature dimension")); } vpColVector e(0); @@ -181,7 +173,8 @@ vpColVector vpGenericFeature::error(const vpBasicFeature &s_star, unsigned int s "in you visual servoing loop"); throw(vpFeatureException(vpFeatureException::badErrorVectorError, "Error has no been updated since last iteration")); - } else if (errorStatus == errorInitialized) { + } + else if (errorStatus == errorInitialized) { vpDEBUG_TRACE(25, "Error init: e=e."); errorStatus = errorHasToBeUpdated; for (unsigned int i = 0; i < dim_s; i++) @@ -191,7 +184,8 @@ vpColVector vpGenericFeature::error(const vpBasicFeature &s_star, unsigned int s e = vpColVector::stack(e, ex); } - } else { + } + else { vpDEBUG_TRACE(25, "Error not init: e=s-s*."); for (unsigned int i = 0; i < dim_s; i++) @@ -202,7 +196,8 @@ vpColVector vpGenericFeature::error(const vpBasicFeature &s_star, unsigned int s e = vpColVector::stack(e, ex); } } - } catch (...) { + } + catch (...) { throw; } return e; @@ -259,7 +254,8 @@ vpColVector vpGenericFeature::error(unsigned int select) "in you visual servoing loop"); throw(vpFeatureException(vpFeatureException::badErrorVectorError, "Error has no been updated since last iteration")); - } else if (errorStatus == errorInitialized) { + } + else if (errorStatus == errorInitialized) { errorStatus = errorHasToBeUpdated; for (unsigned int i = 0; i < dim_s; i++) if (FEATURE_LINE[i] & select) { @@ -268,7 +264,8 @@ vpColVector vpGenericFeature::error(unsigned int select) e = vpColVector::stack(e, ex); } - } else { + } + else { for (unsigned int i = 0; i < dim_s; i++) if (FEATURE_LINE[i] & select) { @@ -278,7 +275,8 @@ vpColVector vpGenericFeature::error(unsigned int select) e = vpColVector::stack(e, ex); } } - } catch (...) { + } + catch (...) { throw; } @@ -345,7 +343,7 @@ vpMatrix vpGenericFeature::interaction(unsigned int select) std::cout << "with Ls=s* (default) or vice versa" << std::endl; throw(vpFeatureException(vpFeatureException::notInitializedError, "size mismatch between s* dimension " - "and feature dimension")); + "and feature dimension")); } vpMatrix Ls; @@ -382,7 +380,7 @@ void vpGenericFeature::setInteractionMatrix(const vpMatrix &L_) vpERROR_TRACE("size mismatch between interaction matrix size " "and feature dimension"); throw(vpFeatureException(vpFeatureException::sizeMismatchError, "size mismatch between interaction matrix size " - "and feature dimension")); + "and feature dimension")); } this->L = L_; @@ -405,7 +403,7 @@ void vpGenericFeature::set_s(const vpColVector &s_vector) vpERROR_TRACE("size mismatch between s dimension" "and feature dimension"); throw(vpFeatureException(vpFeatureException::sizeMismatchError, "size mismatch between s dimension" - "and feature dimension")); + "and feature dimension")); } this->s = s_vector; } @@ -426,7 +424,7 @@ void vpGenericFeature::get_s(vpColVector &s_vector) const vpERROR_TRACE("size mismatch between s dimension" "and feature dimension"); throw(vpFeatureException(vpFeatureException::sizeMismatchError, "size mismatch between s dimension" - "and feature dimension")); + "and feature dimension")); } s_vector = this->s; } @@ -452,7 +450,7 @@ void vpGenericFeature::set_s(double s0, double s1, double s2) vpERROR_TRACE("size mismatch between number of parameters" "and feature dimension"); throw(vpFeatureException(vpFeatureException::sizeMismatchError, "size mismatch between number of parameters" - "and feature dimension")); + "and feature dimension")); } s[0] = s0; s[1] = s1; @@ -480,7 +478,7 @@ void vpGenericFeature::get_s(double &s0, double &s1, double &s2) const vpERROR_TRACE("size mismatch between number of parameters" "and feature dimension"); throw(vpFeatureException(vpFeatureException::sizeMismatchError, "size mismatch between number of parameters" - "and feature dimension")); + "and feature dimension")); } s0 = s[0]; s1 = s[1]; @@ -505,7 +503,7 @@ void vpGenericFeature::set_s(double s0, double s1) vpERROR_TRACE("size mismatch between number of parameters" "and feature dimension"); throw(vpFeatureException(vpFeatureException::sizeMismatchError, "size mismatch between number of parameters" - "and feature dimension")); + "and feature dimension")); } s[0] = s0; s[1] = s1; @@ -529,7 +527,7 @@ void vpGenericFeature::get_s(double &s0, double &s1) const vpERROR_TRACE("size mismatch between number of parameters" "and feature dimension"); throw(vpFeatureException(vpFeatureException::sizeMismatchError, "size mismatch between number of parameters" - "and feature dimension")); + "and feature dimension")); } s0 = s[0]; s1 = s[1]; @@ -551,7 +549,7 @@ void vpGenericFeature::set_s(double s0) vpERROR_TRACE("size mismatch between number of parameters" "and feature dimension"); throw(vpFeatureException(vpFeatureException::sizeMismatchError, "size mismatch between number of parameters" - "and feature dimension")); + "and feature dimension")); } s[0] = s0; } @@ -572,7 +570,7 @@ void vpGenericFeature::get_s(double &s0) const vpERROR_TRACE("size mismatch between number of parameters" "and feature dimension"); throw(vpFeatureException(vpFeatureException::sizeMismatchError, "size mismatch between number of parameters" - "and feature dimension")); + "and feature dimension")); } s0 = s[0]; } From 6cc1fa26d4573d918b63e438bade287cc1c7f92e Mon Sep 17 00:00:00 2001 From: Samuel Felton Date: Tue, 24 Oct 2023 20:17:38 +0200 Subject: [PATCH 16/43] remove unnecessary includes in test --- modules/vision/test/keypoint-with-dataset/testKeyPoint-5.cpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/modules/vision/test/keypoint-with-dataset/testKeyPoint-5.cpp b/modules/vision/test/keypoint-with-dataset/testKeyPoint-5.cpp index fbb3ddb5cf..2a8297ce4a 100644 --- a/modules/vision/test/keypoint-with-dataset/testKeyPoint-5.cpp +++ b/modules/vision/test/keypoint-with-dataset/testKeyPoint-5.cpp @@ -48,11 +48,6 @@ #include #include -#include -#include -#include -#include -#include From 7dd377cb2e7236d940b80b507c623c8386d0cc26 Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Tue, 24 Oct 2023 21:58:49 +0200 Subject: [PATCH 17/43] Fix previous commit when OpenCV is used --- .../include/visp3/mbt/vpMbDepthDenseTracker.h | 6 +- .../visp3/mbt/vpMbDepthNormalTracker.h | 6 +- .../include/visp3/mbt/vpMbEdgeKltTracker.h | 363 ++++++++------- .../include/visp3/mbt/vpMbGenericTracker.h | 308 ++++++------- .../mbt/include/visp3/mbt/vpMbKltTracker.h | 425 +++++++++--------- 5 files changed, 552 insertions(+), 556 deletions(-) diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbDepthDenseTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbDepthDenseTracker.h index b42f849550..0dd98709d4 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbDepthDenseTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbDepthDenseTracker.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Model-based tracker using depth dense features. - * -*****************************************************************************/ + */ #ifndef _vpMbDepthDenseTracker_h_ #define _vpMbDepthDenseTracker_h_ diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbDepthNormalTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbDepthNormalTracker.h index 72678f823e..c48d2fd5f4 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbDepthNormalTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbDepthNormalTracker.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Model-based tracker using depth normal features. - * -*****************************************************************************/ + */ #ifndef _vpMbDepthNormalTracker_h_ #define _vpMbDepthNormalTracker_h_ diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbEdgeKltTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbEdgeKltTracker.h index 73152356c9..6d6a6be8fd 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbEdgeKltTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbEdgeKltTracker.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,13 +29,12 @@ * * Description: * Hybrid tracker based on edges (vpMbt) and points of interests (KLT) - * -*****************************************************************************/ + */ /*! - \file vpMbEdgeKltTracker.h - \brief Hybrid tracker based on edges (vpMbt) and points of interests (KLT) -*/ + * \file vpMbEdgeKltTracker.h + * \brief Hybrid tracker based on edges (vpMbt) and points of interests (KLT) + */ #ifndef _vpMbEdgeKltTracker_h_ #define _vpMbEdgeKltTracker_h_ @@ -55,156 +53,156 @@ #include /*! - \class vpMbEdgeKltTracker - \ingroup group_mbt_trackers - \warning This class is deprecated for user usage. You should rather use the high level - vpMbGenericTracker class. - \warning This class is only available if OpenCV is installed, and used. - - \brief Hybrid tracker based on moving-edges and keypoints tracked using KLT - tracker. - - The \ref tutorial-tracking-mb-deprecated is a good starting point to use this class. - - The tracker requires the knowledge of the 3D model that could be provided in - a vrml or in a cao file. The cao format is described in loadCAOModel(). It may - also use an xml file used to tune the behavior of the tracker and an init file - used to compute the pose at the very first image. - - The following code shows the simplest way to use the tracker. The \ref - tutorial-tracking-mb-deprecated is also a good starting point to use this class. - -\code -#include -#include -#include -#include -#include -#include - -int main() -{ -#if defined VISP_HAVE_OPENCV - vpMbEdgeKltTracker tracker; // Create an hybrid model based tracker. - vpImage I; - vpHomogeneousMatrix cMo; // Pose computed using the tracker. - vpCameraParameters cam; - - // Acquire an image - vpImageIo::read(I, "cube.pgm"); - -#if defined(VISP_HAVE_X11) - vpDisplayX display; - display.init(I,100,100,"Mb Hybrid Tracker"); -#endif - - tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker - // Load the 3d model in cao format. No 3rd party library is required - tracker.loadModel("cube.cao"); - // Get the camera parameters used by the tracker (from the configuration file). - tracker.getCameraParameters(cam); - // Initialise manually the pose by clicking on the image points associated to the 3d points contained in the - // cube.init file. - tracker.initClick(I, "cube.init"); - - while(true){ - // Acquire a new image - vpDisplay::display(I); - tracker.track(I); // Track the object on this image - tracker.getPose(cMo); // Get the pose - - tracker.display(I, cMo, cam, vpColor::darkRed, 1); // Display the model at the computed pose. - vpDisplay::flush(I); - } - - return 0; -#endif -} -\endcode - - The tracker can also be used without display, in that case the initial pose - must be known (object always at the same initial pose for example) or -computed using another method: - -\code -#include -#include -#include -#include -#include - -int main() -{ -#if defined VISP_HAVE_OPENCV - vpMbEdgeKltTracker tracker; // Create an hybrid model based tracker. - vpImage I; - vpHomogeneousMatrix cMo; // Pose used in entry (has to be defined), then computed using the tracker. - - //acquire an image - vpImageIo::read(I, "cube.pgm"); // Example of acquisition - - tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker - // load the 3d model, to read .wrl model coin is required, if coin is not installed .cao file can be used. - tracker.loadModel("cube.cao"); - tracker.initFromPose(I, cMo); // initialise the tracker with the given pose. - - while(true){ - // acquire a new image - tracker.track(I); // track the object on this image - tracker.getPose(cMo); // get the pose - } - - return 0; -#endif -} -\endcode - - Finally it can be used not to track an object but just to display a model at -a given pose: - -\code -#include -#include -#include -#include -#include -#include - -int main() -{ -#if defined VISP_HAVE_OPENCV - vpMbEdgeKltTracker tracker; // Create an hybrid model based tracker. - vpImage I; - vpHomogeneousMatrix cMo; // Pose used to display the model. - vpCameraParameters cam; - - // Acquire an image - vpImageIo::read(I, "cube.pgm"); - -#if defined(VISP_HAVE_X11) - vpDisplayX display; - display.init(I,100,100,"Mb Hybrid Tracker"); -#endif - - tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker - tracker.getCameraParameters(cam); // Get the camera parameters used by the tracker (from the configuration file). - // load the 3d model, to read .wrl model coin is required, if coin is not installed .cao file can be used. - tracker.loadModel("cube.cao"); - - while(true){ - // acquire a new image - // Get the pose using any method - vpDisplay::display(I); - tracker.display(I, cMo, cam, vpColor::darkRed, 1, true); // Display the model at the computed pose. - vpDisplay::flush(I); - } - -#endif - - return 0; -} -\endcode -*/ + * \class vpMbEdgeKltTracker + * \ingroup group_mbt_trackers + * \warning This class is deprecated for user usage. You should rather use the high level + * vpMbGenericTracker class. + * \warning This class is only available if OpenCV is installed, and used. + * + * \brief Hybrid tracker based on moving-edges and keypoints tracked using KLT + * tracker. + * + * The \ref tutorial-tracking-mb-deprecated is a good starting point to use this class. + * + * The tracker requires the knowledge of the 3D model that could be provided in + * a vrml or in a cao file. The cao format is described in loadCAOModel(). It may + * also use an xml file used to tune the behavior of the tracker and an init file + * used to compute the pose at the very first image. + * + * The following code shows the simplest way to use the tracker. The \ref + * tutorial-tracking-mb-deprecated is also a good starting point to use this class. + * + * \code + * #include + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * #if defined VISP_HAVE_OPENCV + * vpMbEdgeKltTracker tracker; // Create an hybrid model based tracker. + * vpImage I; + * vpHomogeneousMatrix cMo; // Pose computed using the tracker. + * vpCameraParameters cam; + * + * // Acquire an image + * vpImageIo::read(I, "cube.pgm"); + * + * #if defined(VISP_HAVE_X11) + * vpDisplayX display; + * display.init(I,100,100,"Mb Hybrid Tracker"); + * #endif + * + * tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker + * // Load the 3d model in cao format. No 3rd party library is required + * tracker.loadModel("cube.cao"); + * // Get the camera parameters used by the tracker (from the configuration file). + * tracker.getCameraParameters(cam); + * // Initialise manually the pose by clicking on the image points associated to the 3d points contained in the + * // cube.init file. + * tracker.initClick(I, "cube.init"); + * + * while(true){ + * // Acquire a new image + * vpDisplay::display(I); + * tracker.track(I); // Track the object on this image + * tracker.getPose(cMo); // Get the pose + * + * tracker.display(I, cMo, cam, vpColor::darkRed, 1); // Display the model at the computed pose. + * vpDisplay::flush(I); + * } + * + * return 0; + * #endif + * } + * \endcode + * + * The tracker can also be used without display, in that case the initial pose + * must be known (object always at the same initial pose for example) or + * computed using another method: + * + * \code + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * #if defined VISP_HAVE_OPENCV + * vpMbEdgeKltTracker tracker; // Create an hybrid model based tracker. + * vpImage I; + * vpHomogeneousMatrix cMo; // Pose used in entry (has to be defined), then computed using the tracker. + * + * //acquire an image + * vpImageIo::read(I, "cube.pgm"); // Example of acquisition + * + * tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker + * // load the 3d model, to read .wrl model coin is required, if coin is not installed .cao file can be used. + * tracker.loadModel("cube.cao"); + * tracker.initFromPose(I, cMo); // initialise the tracker with the given pose. + * + * while(true){ + * // acquire a new image + * tracker.track(I); // track the object on this image + * tracker.getPose(cMo); // get the pose + * } + * + * return 0; + * #endif + * } + * \endcode + * + * Finally it can be used not to track an object but just to display a model at + * a given pose: + * + * \code + * #include + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * #if defined VISP_HAVE_OPENCV + * vpMbEdgeKltTracker tracker; // Create an hybrid model based tracker. + * vpImage I; + * vpHomogeneousMatrix cMo; // Pose used to display the model. + * vpCameraParameters cam; + * + * // Acquire an image + * vpImageIo::read(I, "cube.pgm"); + * + * #if defined(VISP_HAVE_X11) + * vpDisplayX display; + * display.init(I,100,100,"Mb Hybrid Tracker"); + * #endif + * + * tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker + * tracker.getCameraParameters(cam); // Get the camera parameters used by the tracker (from the configuration file). + * // load the 3d model, to read .wrl model coin is required, if coin is not installed .cao file can be used. + * tracker.loadModel("cube.cao"); + * + * while(true){ + * // acquire a new image + * // Get the pose using any method + * vpDisplay::display(I); + * tracker.display(I, cMo, cam, vpColor::darkRed, 1, true); // Display the model at the computed pose. + * vpDisplay::flush(I); + * } + * + * #endif + * + * return 0; + * } + * \endcode + */ class VISP_EXPORT vpMbEdgeKltTracker : #if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO) public vpMbKltTracker, @@ -246,11 +244,11 @@ class VISP_EXPORT vpMbEdgeKltTracker : virtual inline vpColVector getRobustWeights() const override { return m_w_hybrid; } /*! - Get the near distance for clipping. - - \return Near clipping value. + * Get the near distance for clipping. + * + * \return Near clipping value. */ - virtual inline double getNearClippingDistance() const { return vpMbKltTracker::getNearClippingDistance(); } + virtual inline double getNearClippingDistance() const override { return vpMbKltTracker::getNearClippingDistance(); } virtual void loadConfigFile(const std::string &configFile, bool verbose = true) override; @@ -261,35 +259,35 @@ class VISP_EXPORT vpMbEdgeKltTracker : virtual void setCameraParameters(const vpCameraParameters &cam) override; /*! - Specify which clipping to use. - - \sa vpMbtPolygonClipping - - \param flags : New clipping flags. + * Specify which clipping to use. + * + * \sa vpMbtPolygonClipping + * + * \param flags : New clipping flags. */ virtual void setClipping(const unsigned int &flags) override { vpMbEdgeTracker::setClipping(flags); } /*! - Set the far distance for clipping. - - \param dist : Far clipping value. + * Set the far distance for clipping. + * + * \param dist : Far clipping value. */ virtual void setFarClippingDistance(const double &dist) override { vpMbEdgeTracker::setFarClippingDistance(dist); } /*! - Set the near distance for clipping. - - \param dist : Near clipping value. + * Set the near distance for clipping. + * + * \param dist : Near clipping value. */ virtual void setNearClippingDistance(const double &dist) override { vpMbEdgeTracker::setNearClippingDistance(dist); } /*! - Use Ogre3D for visibility tests - - \warning This function has to be called before the initialization of the - tracker. - - \param v : True to use it, False otherwise + * Use Ogre3D for visibility tests + * + * \warning This function has to be called before the initialization of the + * tracker. + * + * \param v : True to use it, False otherwise */ virtual void setOgreVisibilityTest(const bool &v) override { @@ -300,10 +298,10 @@ class VISP_EXPORT vpMbEdgeKltTracker : } /*! - Use Scanline algorithm for visibility tests - - \param v : True to use it, False otherwise - */ + * Use Scanline algorithm for visibility tests + * + * \param v : True to use it, False otherwise + */ virtual void setScanLineVisibilityTest(const bool &v) override { vpMbEdgeTracker::setScanLineVisibilityTest(v); @@ -312,6 +310,7 @@ class VISP_EXPORT vpMbEdgeKltTracker : virtual void setPose(const vpImage &I, const vpHomogeneousMatrix &cdMo) override; virtual void setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo) override; + /*! * Set if the projection error criteria has to be computed. * diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h index fd8e63dc8d..ca3b07483b 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,12 +29,12 @@ * * Description: * Generic model-based tracker. - * -*****************************************************************************/ + */ + /*! - \file vpMbGenericTracker.h - \brief Generic model-based tracker -*/ + * \file vpMbGenericTracker.h + *\brief Generic model-based tracker + */ #ifndef _vpMbGenericTracker_h_ #define _vpMbGenericTracker_h_ @@ -51,151 +50,150 @@ #endif /*! - \class vpMbGenericTracker - \ingroup group_mbt_trackers - \brief Real-time 6D object pose tracking using its CAD model. - - The tracker requires the knowledge of the 3D model that could be provided in - a vrml or in a cao file. The cao format is described in loadCAOModel(). It may - also use an xml file used to tune the behavior of the tracker and an init file - used to compute the pose at the very first image. - - This class allows tracking an object or a scene given its 3D model. More information in \cite Trinh18a. - A lot of videos can be found on YouTube VispTeam channel. - - \htmlonly - - - - - \endhtmlonly - - The \ref tutorial-tracking-mb-generic is a good starting point to use this - class. If you want to track an object with a stereo camera refer to - \ref tutorial-tracking-mb-generic-stereo. If you want rather use a RGB-D camera and exploit - the depth information, you may see \ref tutorial-tracking-mb-generic-rgbd. - There is also \ref tutorial-detection-object that shows how to initialize the tracker from - an initial pose provided by a detection algorithm. - - JSON serialization - - Since ViSP 3.6.0, if ViSP is build with \ref soft_tool_json 3rd-party we introduce JSON serialization capabilities for vpMbGenericTracker. - The following sample code shows how to save a model-based tracker settings in a file named `mbt.json` - and reload the values from this JSON file. - \code - #include - - int main() - { - #if defined(VISP_HAVE_NLOHMANN_JSON) - std::string filename = "mbt-generic.json"; - { - vpMbGenericTracker mbt; - mbt.saveConfigFile(filename); - } - { - vpMbGenericTracker mbt; - bool verbose = false; - std::cout << "Read model-based tracker settings from " << filename << std::endl; - mbt.loadConfigFile(filename, verbose); - } - #endif - } - \endcode - If you build and execute the sample code, it will produce the following output: - \code{.unparsed} - Read model-based tracker settings from mbt-generic.json - \endcode - - The content of the `mbt.json` file is the following: - \code{.unparsed} - $ cat mbt-generic.json - { - "referenceCameraName": "Camera", - "trackers": { - "Camera": { - "angleAppear": 89.0, - "angleDisappear": 89.0, - "camTref": { - "cols": 4, - "data": [ - 1.0, - 0.0, - 0.0, - 0.0, - 0.0, - 1.0, - 0.0, - 0.0, - 0.0, - 0.0, - 1.0, - 0.0, - 0.0, - 0.0, - 0.0, - 1.0 - ], - "rows": 4, - "type": "vpHomogeneousMatrix" - }, - "camera": { - "model": "perspectiveWithoutDistortion", - "px": 600.0, - "py": 600.0, - "u0": 192.0, - "v0": 144.0 - }, - "clipping": { - "far": 100.0, - "flags": [ - "none" - ], - "near": 0.001 - }, - "display": { - "features": false, - "projectionError": false - }, - "edge": { - "maskSign": 0, - "maskSize": 5, - "minSampleStep": 4.0, - "mu": [ - 0.5, - 0.5 - ], - "nMask": 180, - "ntotalSample": 0, - "pointsToTrack": 500, - "range": 4, - "sampleStep": 10.0, - "strip": 2, - "threshold": 1500.0 - }, - "lod": { - "minLineLengthThresholdGeneral": 50.0, - "minPolygonAreaThresholdGeneral": 2500.0, - "useLod": false - }, - "type": [ - "edge" - ], - "visibilityTest": { - "ogre": false, - "scanline": false - } - } - }, - "version": "1.0" - } - \endcode - -*/ + * \class vpMbGenericTracker + * \ingroup group_mbt_trackers + * \brief Real-time 6D object pose tracking using its CAD model. + * + * The tracker requires the knowledge of the 3D model that could be provided in + * a vrml or in a cao file. The cao format is described in loadCAOModel(). It may + * also use an xml file used to tune the behavior of the tracker and an init file + * used to compute the pose at the very first image. + * + * This class allows tracking an object or a scene given its 3D model. More information in \cite Trinh18a. + * A lot of videos can be found on YouTube VispTeam channel. + * + * \htmlonly + * + * + * + * + * \endhtmlonly + * + * The \ref tutorial-tracking-mb-generic is a good starting point to use this + * class. If you want to track an object with a stereo camera refer to + * \ref tutorial-tracking-mb-generic-stereo. If you want rather use a RGB-D camera and exploit + * the depth information, you may see \ref tutorial-tracking-mb-generic-rgbd. + * There is also \ref tutorial-detection-object that shows how to initialize the tracker from + * an initial pose provided by a detection algorithm. + * + * JSON serialization + * + * Since ViSP 3.6.0, if ViSP is build with \ref soft_tool_json 3rd-party we introduce JSON serialization capabilities for vpMbGenericTracker. + * The following sample code shows how to save a model-based tracker settings in a file named `mbt.json` + * and reload the values from this JSON file. + * \code + * #include + * + * int main() + * { + * #if defined(VISP_HAVE_NLOHMANN_JSON) + * std::string filename = "mbt-generic.json"; + * { + * vpMbGenericTracker mbt; + * mbt.saveConfigFile(filename); + * } + * { + * vpMbGenericTracker mbt; + * bool verbose = false; + * std::cout << "Read model-based tracker settings from " << filename << std::endl; + * mbt.loadConfigFile(filename, verbose); + * } + * #endif + * } + * \endcode + * If you build and execute the sample code, it will produce the following output: + * \code{.unparsed} + * Read model-based tracker settings from mbt-generic.json + * \endcode + * + * The content of the `mbt.json` file is the following: + * \code{.unparsed} + * $ cat mbt-generic.json + * { + * "referenceCameraName": "Camera", + * "trackers": { + * "Camera": { + * "angleAppear": 89.0, + * "angleDisappear": 89.0, + * "camTref": { + * "cols": 4, + * "data": [ + * 1.0, + * 0.0, + * 0.0, + * 0.0, + * 0.0, + * 1.0, + * 0.0, + * 0.0, + * 0.0, + * 0.0, + * 1.0, + * 0.0, + * 0.0, + * 0.0, + * 0.0, + * 1.0 + * ], + * "rows": 4, + * "type": "vpHomogeneousMatrix" + * }, + * "camera": { + * "model": "perspectiveWithoutDistortion", + * "px": 600.0, + * "py": 600.0, + * "u0": 192.0, + * "v0": 144.0 + * }, + * "clipping": { + * "far": 100.0, + * "flags": [ + * "none" + * ], + * "near": 0.001 + * }, + * "display": { + * "features": false, + * "projectionError": false + * }, + * "edge": { + * "maskSign": 0, + * "maskSize": 5, + * "minSampleStep": 4.0, + * "mu": [ + * 0.5, + * 0.5 + * ], + * "nMask": 180, + * "ntotalSample": 0, + * "pointsToTrack": 500, + * "range": 4, + * "sampleStep": 10.0, + * "strip": 2, + * "threshold": 1500.0 + * }, + * "lod": { + * "minLineLengthThresholdGeneral": 50.0, + * "minPolygonAreaThresholdGeneral": 2500.0, + * "useLod": false + * }, + * "type": [ + * "edge" + * ], + * "visibilityTest": { + * "ogre": false, + * "scanline": false + * } + * } + * }, + * "version": "1.0" + * } + * \endcode + */ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker { public: @@ -319,16 +317,19 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker * Return the number of depth dense features taken into account in the virtual visual-servoing scheme. */ virtual inline unsigned int getNbFeaturesDepthDense() const { return m_nb_feat_depthDense; } + /*! * Return the number of depth normal features features taken into account in the virtual visual-servoing scheme. */ virtual inline unsigned int getNbFeaturesDepthNormal() const { return m_nb_feat_depthNormal; } + /*! * Return the number of moving-edges features taken into account in the virtual visual-servoing scheme. * * This function is similar to getNbPoints(). */ virtual inline unsigned int getNbFeaturesEdge() const { return m_nb_feat_edge; } + /*! * Return the number of klt keypoints features taken into account in the virtual visual-servoing scheme. */ @@ -849,7 +850,7 @@ NLOHMANN_JSON_SERIALIZE_ENUM(vpMbGenericTracker::vpTrackerType, { * @brief Serialize a tracker wrapper's settings into a JSON representation. * \sa from_json for more details on what is serialized * @param j The modified json object. -* @param t The tracker to serialize. +* @param t The tracker to serialize. */ inline void to_json(nlohmann::json &j, const vpMbGenericTracker::TrackerWrapper &t) { @@ -1047,5 +1048,4 @@ inline void from_json(const nlohmann::json &j, vpMbGenericTracker::TrackerWrappe #endif - #endif diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h index 87b99217b2..448f7b4bc3 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,12 +29,12 @@ * * Description: * Model based tracker using only KLT - * -*****************************************************************************/ + */ + /*! - \file vpMbKltTracker.h - \brief Model based tracker using only KLT -*/ + * \file vpMbKltTracker.h + * \brief Model based tracker using only KLT + */ #ifndef _vpMbKltTracker_h_ #define _vpMbKltTracker_h_ @@ -57,152 +56,152 @@ #include /*! - \class vpMbKltTracker - \ingroup group_mbt_trackers - \warning This class is deprecated for user usage. You should rather use the high level - vpMbGenericTracker class. - \warning This class is only available if OpenCV is installed, and used. - - \brief Model based tracker using only KLT. - - The \ref tutorial-tracking-mb-deprecated is a good starting point to use this class. - - The tracker requires the knowledge of the 3D model that could be provided in - a vrml or in a cao file. The cao format is described in loadCAOModel(). It may - also use an xml file used to tune the behavior of the tracker and an init file - used to compute the pose at the very first image. - - The following code shows the simplest way to use the tracker. The \ref - tutorial-tracking-mb-deprecated is also a good starting point to use this class. - -\code -#include -#include -#include -#include -#include -#include - -int main() -{ -#if defined VISP_HAVE_OPENCV - vpMbKltTracker tracker; // Create a model based tracker via KLT points. - vpImage I; - vpHomogeneousMatrix cMo; // Pose computed using the tracker. - vpCameraParameters cam; - - // Acquire an image - vpImageIo::read(I, "cube.pgm"); - -#if defined(VISP_HAVE_X11) - vpDisplayX display; - display.init(I,100,100,"Mb Klt Tracker"); -#endif - - tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker - tracker.getCameraParameters(cam); // Get the camera parameters used by the tracker (from the configuration file). - tracker.loadModel("cube.cao"); // Load the 3d model in cao format. No 3rd party library is required - // Initialise manually the pose by clicking on the image points associated to the 3d points contained in the - // cube.init file. - tracker.initClick(I, "cube.init"); - - while(true){ - // Acquire a new image - vpDisplay::display(I); - tracker.track(I); // Track the object on this image - tracker.getPose(cMo); // Get the pose - - tracker.display(I, cMo, cam, vpColor::darkRed, 1); // Display the model at the computed pose. - vpDisplay::flush(I); - } - - return 0; -#endif -} -\endcode - - The tracker can also be used without display, in that case the initial pose - must be known (object always at the same initial pose for example) or -computed using another method: - -\code -#include -#include -#include -#include -#include - -int main() -{ -#if defined VISP_HAVE_OPENCV - vpMbKltTracker tracker; // Create a model based tracker via Klt Points. - vpImage I; - vpHomogeneousMatrix cMo; // Pose used in entry (has to be defined), then computed using the tracker. - - //acquire an image - vpImageIo::read(I, "cube.pgm"); // Example of acquisition - - tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker - // load the 3d model, to read .wrl model coin is required, if coin is not installed .cao file can be used. - tracker.loadModel("cube.cao"); - tracker.initFromPose(I, cMo); // initialize the tracker with the given pose. - - while(true){ - // acquire a new image - tracker.track(I); // track the object on this image - tracker.getPose(cMo); // get the pose - } - - return 0; -#endif -} -\endcode - - Finally it can be used not to track an object but just to display a model at -a given pose: - -\code -#include -#include -#include -#include -#include -#include - -int main() -{ -#if defined VISP_HAVE_OPENCV - vpMbKltTracker tracker; // Create a model based tracker via Klt Points. - vpImage I; - vpHomogeneousMatrix cMo; // Pose used to display the model. - vpCameraParameters cam; - - // Acquire an image - vpImageIo::read(I, "cube.pgm"); - -#if defined(VISP_HAVE_X11) - vpDisplayX display; - display.init(I,100,100,"Mb Klt Tracker"); -#endif - - tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker - tracker.getCameraParameters(cam); // Get the camera parameters used by the tracker (from the configuration file). - // load the 3d model, to read .wrl model coin is required, if coin is not installed .cao file can be used. - tracker.loadModel("cube.cao"); - - while(true){ - // acquire a new image - // Get the pose using any method - vpDisplay::display(I); - tracker.display(I, cMo, cam, vpColor::darkRed, 1, true); // Display the model at the computed pose. - vpDisplay::flush(I); - } - - return 0; -#endif -} -\endcode -*/ + * \class vpMbKltTracker + * \ingroup group_mbt_trackers + * \warning This class is deprecated for user usage. You should rather use the high level + * vpMbGenericTracker class. + * \warning This class is only available if OpenCV is installed, and used. + * + * \brief Model based tracker using only KLT. + * + * The \ref tutorial-tracking-mb-deprecated is a good starting point to use this class. + * + * The tracker requires the knowledge of the 3D model that could be provided in + * a vrml or in a cao file. The cao format is described in loadCAOModel(). It may + * also use an xml file used to tune the behavior of the tracker and an init file + * used to compute the pose at the very first image. + * + * The following code shows the simplest way to use the tracker. The \ref + * tutorial-tracking-mb-deprecated is also a good starting point to use this class. + * + * \code + * #include + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * #if defined VISP_HAVE_OPENCV + * vpMbKltTracker tracker; // Create a model based tracker via KLT points. + * vpImage I; + * vpHomogeneousMatrix cMo; // Pose computed using the tracker. + * vpCameraParameters cam; + * + * // Acquire an image + * vpImageIo::read(I, "cube.pgm"); + * + * #if defined(VISP_HAVE_X11) + * vpDisplayX display; + * display.init(I,100,100,"Mb Klt Tracker"); + * #endif + * + * tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker + * tracker.getCameraParameters(cam); // Get the camera parameters used by the tracker (from the configuration file). + * tracker.loadModel("cube.cao"); // Load the 3d model in cao format. No 3rd party library is required + * // Initialise manually the pose by clicking on the image points associated to the 3d points contained in the + * // cube.init file. + * tracker.initClick(I, "cube.init"); + * + * while(true){ + * // Acquire a new image + * vpDisplay::display(I); + * tracker.track(I); // Track the object on this image + * tracker.getPose(cMo); // Get the pose + * + * tracker.display(I, cMo, cam, vpColor::darkRed, 1); // Display the model at the computed pose. + * vpDisplay::flush(I); + * } + * + * return 0; + * #endif + * } + * \endcode + * + * The tracker can also be used without display, in that case the initial pose + * must be known (object always at the same initial pose for example) or + * computed using another method: + * + * \code + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * #if defined VISP_HAVE_OPENCV + * vpMbKltTracker tracker; // Create a model based tracker via Klt Points. + * vpImage I; + * vpHomogeneousMatrix cMo; // Pose used in entry (has to be defined), then computed using the tracker. + * + * //acquire an image + * vpImageIo::read(I, "cube.pgm"); // Example of acquisition + * + * tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker + * // load the 3d model, to read .wrl model coin is required, if coin is not installed .cao file can be used. + * tracker.loadModel("cube.cao"); + * tracker.initFromPose(I, cMo); // initialize the tracker with the given pose. + * + * while(true){ + * // acquire a new image + * tracker.track(I); // track the object on this image + * tracker.getPose(cMo); // get the pose + * } + * + * return 0; + * #endif + * } + * \endcode + * + * Finally it can be used not to track an object but just to display a model at + * a given pose: + * + * \code + * #include + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * #if defined VISP_HAVE_OPENCV + * vpMbKltTracker tracker; // Create a model based tracker via Klt Points. + * vpImage I; + * vpHomogeneousMatrix cMo; // Pose used to display the model. + * vpCameraParameters cam; + * + * // Acquire an image + * vpImageIo::read(I, "cube.pgm"); + * + * #if defined(VISP_HAVE_X11) + * vpDisplayX display; + * display.init(I,100,100,"Mb Klt Tracker"); + * #endif + * + * tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker + * tracker.getCameraParameters(cam); // Get the camera parameters used by the tracker (from the configuration file). + * // load the 3d model, to read .wrl model coin is required, if coin is not installed .cao file can be used. + * tracker.loadModel("cube.cao"); + * + * while(true){ + * // acquire a new image + * // Get the pose using any method + * vpDisplay::display(I); + * tracker.display(I, cMo, cam, vpColor::darkRed, 1, true); // Display the model at the computed pose. + * vpDisplay::flush(I); + * } + * + * return 0; + * #endif + * } + * \endcode + */ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker { protected: @@ -269,11 +268,11 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker /*! Return the address of the Klt feature list. */ virtual std::list &getFeaturesKlt() { return kltPolygons; } -/*! - Get the current list of KLT points. - - \return the list of KLT points through vpKltOpencv. - */ + /*! + * Get the current list of KLT points. + * + * \return the list of KLT points through vpKltOpencv. + */ inline std::vector getKltPoints() const { return tracker.getFeatures(); } std::vector getKltImagePoints() const; @@ -281,31 +280,31 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker std::map getKltImagePointsWithId() const; /*! - Get the klt tracker at the current state. - - \return klt tracker. + * Get the klt tracker at the current state. + * + * \return klt tracker. */ inline vpKltOpencv getKltOpencv() const { return tracker; } /*! - Get the erosion of the mask used on the Model faces. - - \return The erosion. + * Get the erosion of the mask used on the Model faces. + * + * \return The erosion. */ inline unsigned int getKltMaskBorder() const { return maskBorder; } /*! - Get the current number of klt points. - - \return the number of features + * Get the current number of klt points. + * + * \return the number of features */ inline int getKltNbPoints() const { return tracker.getNbFeatures(); } /*! - Get the threshold for the acceptation of a point. - - \return threshold_outlier : Threshold for the weight below which a point - is rejected. + * Get the threshold for the acceptation of a point. + * + * \return threshold_outlier : Threshold for the weight below which a point + * is rejected. */ inline double getKltThresholdAcceptation() const { return threshold_outlier; } @@ -321,15 +320,15 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker virtual void loadConfigFile(const std::string &configFile, bool verbose = true) override; virtual void reInitModel(const vpImage &I, const std::string &cad_name, const vpHomogeneousMatrix &cMo, - bool verbose = false, const vpHomogeneousMatrix &T = vpHomogeneousMatrix()) override; + bool verbose = false, const vpHomogeneousMatrix &T = vpHomogeneousMatrix()); void resetTracker() override; void setCameraParameters(const vpCameraParameters &cam) override; /*! - Set the erosion of the mask used on the Model faces. - - \param e : The desired erosion. + * Set the erosion of the mask used on the Model faces. + * + * \param e : The desired erosion. */ inline void setKltMaskBorder(const unsigned int &e) { @@ -341,19 +340,19 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker virtual void setKltOpencv(const vpKltOpencv &t); /*! - Set the threshold for the acceptation of a point. - - \param th : Threshold for the weight below which a point is rejected. + * Set the threshold for the acceptation of a point. + * + * \param th : Threshold for the weight below which a point is rejected. */ inline void setKltThresholdAcceptation(double th) { threshold_outlier = th; } /*! - Use Ogre3D for visibility tests - - \warning This function has to be called before the initialization of the - tracker. - - \param v : True to use it, False otherwise + * Use Ogre3D for visibility tests + * + * \warning This function has to be called before the initialization of the + * tracker. + * + * \param v : True to use it, False otherwise */ virtual void setOgreVisibilityTest(const bool &v) override { @@ -364,9 +363,9 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker } /*! - Use Scanline algorithm for visibility tests - - \param v : True to use it, False otherwise + * Use Scanline algorithm for visibility tests + * + * \param v : True to use it, False otherwise */ virtual void setScanLineVisibilityTest(const bool &v) override { @@ -380,11 +379,11 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker virtual void setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo) override; /*! - Set if the projection error criteria has to be computed. - - \param flag : True if the projection error criteria has to be computed, - false otherwise - */ + * Set if the projection error criteria has to be computed. + * + * \param flag : True if the projection error criteria has to be computed, + * false otherwise + */ virtual void setProjectionErrorComputation(const bool &flag) override { if (flag) @@ -405,32 +404,34 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker //@{ /*! - Get the erosion of the mask used on the Model faces. - \deprecated Use rather getkltMaskBorder() - - \return The erosion. + * Get the erosion of the mask used on the Model faces. + * \deprecated Use rather getkltMaskBorder() + * + * \return The erosion. */ /* vp_deprecated */ inline unsigned int getMaskBorder() const { return maskBorder; } - /*! - Get the current number of klt points. - \deprecated Use rather getKltNbPoints() - \return the number of features + /*! + * Get the current number of klt points. + * \deprecated Use rather getKltNbPoints() + * + * \return the number of features */ /* vp_deprecated */ inline int getNbKltPoints() const { return tracker.getNbFeatures(); } /*! - Get the threshold for the acceptation of a point. - \deprecated Use rather getKltThresholdAcceptation() - - \return threshold_outlier : Threshold for the weight below which a point - is rejected. + * Get the threshold for the acceptation of a point. + * \deprecated Use rather getKltThresholdAcceptation() + * + * \return threshold_outlier : Threshold for the weight below which a point + * is rejected. */ /* vp_deprecated */ inline double getThresholdAcceptation() const { return threshold_outlier; } - /*! - Set the erosion of the mask used on the Model faces. - \param e : The desired erosion. + /*! + * Set the erosion of the mask used on the Model faces. + * + * \param e : The desired erosion. */ /* vp_deprecated */ inline void setMaskBorder(const unsigned int &e) { @@ -440,10 +441,10 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker } /*! - Set the threshold for the acceptation of a point. - \deprecated Use rather setKltThresholdAcceptation() - - \param th : Threshold for the weight below which a point is rejected. + * Set the threshold for the acceptation of a point. + * \deprecated Use rather setKltThresholdAcceptation() + * + * \param th : Threshold for the weight below which a point is rejected. */ /* vp_deprecated */ inline void setThresholdAcceptation(double th) { threshold_outlier = th; } From 0e5611bb0e19d0190c33877045410f79bf8c3ee6 Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Tue, 24 Oct 2023 22:23:32 +0200 Subject: [PATCH 18/43] Fix doxygen documentation warnings --- .../include/visp3/mbt/vpMbGenericTracker.h | 2 +- .../vision/include/visp3/vision/vpKeyPoint.h | 1042 ++++++++--------- modules/vision/src/key-point/vpKeyPoint.cpp | 6 +- .../visual_features/vpFeatureMomentCentered.h | 13 +- .../vpFeatureMomentCentered.cpp | 4 +- 5 files changed, 531 insertions(+), 536 deletions(-) diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h index ca3b07483b..b576899404 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h @@ -783,7 +783,7 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker using vpMbDepthDenseTracker::setPose; #endif virtual void setPose(const vpImage *const I, const vpImage *const I_color, - const vpHomogeneousMatrix &cdMo); + const vpHomogeneousMatrix &cdMo) override; }; #ifdef VISP_HAVE_NLOHMANN_JSON friend void to_json(nlohmann::json &j, const TrackerWrapper &t); diff --git a/modules/vision/include/visp3/vision/vpKeyPoint.h b/modules/vision/include/visp3/vision/vpKeyPoint.h index 68c907dd6f..e92de87162 100644 --- a/modules/vision/include/visp3/vision/vpKeyPoint.h +++ b/modules/vision/include/visp3/vision/vpKeyPoint.h @@ -369,40 +369,40 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint unsigned int width); /*! - Build the reference keypoints list in a region of interest in the image. + * Build the reference keypoints list in a region of interest in the image. * - \param I : Input image. - \param rectangle : Rectangle of the region of interest. - \return The number of detected keypoints in the current image I. + * \param I : Input image. + * \param rectangle : Rectangle of the region of interest. + * \return The number of detected keypoints in the current image I. */ unsigned int buildReference(const vpImage &I, const vpRect &rectangle); /*! - Build the reference keypoints list and compute the 3D position - corresponding of the keypoints locations. + * Build the reference keypoints list and compute the 3D position + * corresponding of the keypoints locations. * - \param I : Input image. - \param trainKeyPoints : List of the train keypoints. - \param points3f : Output list of the 3D position corresponding of the keypoints locations. - \param append : If true, append the supply train keypoints with those already present. - \param class_id : The class id to be set to the input cv::KeyPoint if != -1. - \return The number of detected keypoints in the current image I. + * \param I : Input image. + * \param trainKeyPoints : List of the train keypoints. + * \param points3f : Output list of the 3D position corresponding of the keypoints locations. + * \param append : If true, append the supply train keypoints with those already present. + * \param class_id : The class id to be set to the input cv::KeyPoint if != -1. + * \return The number of detected keypoints in the current image I. */ unsigned int buildReference(const vpImage &I, std::vector &trainKeyPoints, std::vector &points3f, bool append = false, int class_id = -1); /*! - Build the reference keypoints list and compute the 3D position - corresponding of the keypoints locations. + * Build the reference keypoints list and compute the 3D position + * corresponding of the keypoints locations. * - \param I : Input image. - \param trainKeyPoints : List of the train keypoints. - \param points3f : List of the 3D position corresponding of the keypoints locations. - \param trainDescriptors : List of the train descriptors. - \param append : If true, append the supply train keypoints with those already present. - \param class_id : The class id to be set to the input cv::KeyPoint if != -1. + * \param I : Input image. + * \param trainKeyPoints : List of the train keypoints. + * \param points3f : List of the 3D position corresponding of the keypoints locations. + * \param trainDescriptors : List of the train descriptors. + * \param append : If true, append the supply train keypoints with those already present. + * \param class_id : The class id to be set to the input cv::KeyPoint if != -1. * - \return The number of keypoints in the current image I. + * \return The number of keypoints in the current image I. */ unsigned int buildReference(const vpImage &I, const std::vector &trainKeyPoints, const cv::Mat &trainDescriptors, const std::vector &points3f, @@ -417,107 +417,107 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint unsigned int buildReference(const vpImage &I_color); /*! - Build the reference keypoints list in a region of interest in the image. + * Build the reference keypoints list in a region of interest in the image. * - \param I_color : Input reference image. - \param iP : Position of the top-left corner of the region of interest. - \param height : Height of the region of interest. - \param width : Width of the region of interest. - \return The number of detected keypoints in the current image I. + * \param I_color : Input reference image. + * \param iP : Position of the top-left corner of the region of interest. + * \param height : Height of the region of interest. + * \param width : Width of the region of interest. + * \return The number of detected keypoints in the current image I. */ unsigned int buildReference(const vpImage &I_color, const vpImagePoint &iP, unsigned int height, unsigned int width); /*! - Build the reference keypoints list in a region of interest in the image. + * Build the reference keypoints list in a region of interest in the image. * - \param I_color : Input image. - \param rectangle : Rectangle of the region of interest. - \return The number of detected keypoints in the current image I. + * \param I_color : Input image. + * \param rectangle : Rectangle of the region of interest. + * \return The number of detected keypoints in the current image I. */ unsigned int buildReference(const vpImage &I_color, const vpRect &rectangle); /*! - Build the reference keypoints list and compute the 3D position - corresponding of the keypoints locations. + * Build the reference keypoints list and compute the 3D position + * corresponding of the keypoints locations. * - \param I_color : Input image. - \param trainKeyPoints : List of the train keypoints. - \param points3f : Output list of the 3D position corresponding of the keypoints locations. - \param append : If true, append the supply train keypoints with those already present. - \param class_id : The class id to be set to the input cv::KeyPoint if != -1. - \return The number of detected keypoints in the current image I. + * \param I_color : Input image. + * \param trainKeyPoints : List of the train keypoints. + * \param points3f : Output list of the 3D position corresponding of the keypoints locations. + * \param append : If true, append the supply train keypoints with those already present. + * \param class_id : The class id to be set to the input cv::KeyPoint if != -1. + * \return The number of detected keypoints in the current image I. */ unsigned int buildReference(const vpImage &I_color, std::vector &trainKeyPoints, std::vector &points3f, bool append = false, int class_id = -1); /*! - Build the reference keypoints list and compute the 3D position - corresponding of the keypoints locations. + * Build the reference keypoints list and compute the 3D position + * corresponding of the keypoints locations. * - \param I_color : Input image. - \param trainKeyPoints : List of the train keypoints. - \param points3f : List of the 3D position corresponding of the keypoints locations. - \param trainDescriptors : List of the train descriptors. - \param append : If true, append the supply train keypoints with those already present. - \param class_id : The class id to be set to the input cv::KeyPoint if != -1. - \return The number of detected keypoints in the current image I. + * \param I_color : Input image. + * \param trainKeyPoints : List of the train keypoints. + * \param points3f : List of the 3D position corresponding of the keypoints locations. + * \param trainDescriptors : List of the train descriptors. + * \param append : If true, append the supply train keypoints with those already present. + * \param class_id : The class id to be set to the input cv::KeyPoint if != -1. + * \return The number of detected keypoints in the current image I. */ - unsigned int buildReference(const vpImage &I, const std::vector &trainKeyPoints, + unsigned int buildReference(const vpImage &I_color, const std::vector &trainKeyPoints, const cv::Mat &trainDescriptors, const std::vector &points3f, bool append = false, int class_id = -1); /*! - Compute the 3D coordinate in the world/object frame given the 2D image - coordinate and under the assumption that the point is located on a plane - whose the plane equation is known in the camera frame. - The Z-coordinate is retrieved according to the proportional relationship - between the plane equation expressed in the normalized camera frame - (derived from the image coordinate) and the same plane equation expressed - in the camera frame. + * Compute the 3D coordinate in the world/object frame given the 2D image + * coordinate and under the assumption that the point is located on a plane + * whose the plane equation is known in the camera frame. + * The Z-coordinate is retrieved according to the proportional relationship + * between the plane equation expressed in the normalized camera frame + * (derived from the image coordinate) and the same plane equation expressed + * in the camera frame. * - \param candidate : Keypoint we want to compute the 3D coordinate. - \param roi : List of 3D points in the camera frame representing a planar face. - \param cam : Camera parameters. - \param cMo : Homogeneous matrix between the world and the camera frames. - \param point : 3D coordinate in the world/object frame computed. + * \param candidate : Keypoint we want to compute the 3D coordinate. + * \param roi : List of 3D points in the camera frame representing a planar face. + * \param cam : Camera parameters. + * \param cMo : Homogeneous matrix between the world and the camera frames. + * \param point : 3D coordinate in the world/object frame computed. */ static void compute3D(const cv::KeyPoint &candidate, const std::vector &roi, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point); /*! - Compute the 3D coordinate in the world/object frame given the 2D image - coordinate and under the assumption that the point is located on a plane - whose the plane equation is known in the camera frame. - The Z-coordinate is retrieved according to the proportional relationship - between the plane equation expressed in the normalized camera frame - (derived from the image coordinate) and the same plane equation expressed - in the camera frame. + * Compute the 3D coordinate in the world/object frame given the 2D image + * coordinate and under the assumption that the point is located on a plane + * whose the plane equation is known in the camera frame. + * The Z-coordinate is retrieved according to the proportional relationship + * between the plane equation expressed in the normalized camera frame + * (derived from the image coordinate) and the same plane equation expressed + * in the camera frame. * - \param candidate : vpImagePoint we want to compute the 3D coordinate. - \param roi : List of 3D points in the camera frame representing a planar face. - \param cam : Camera parameters. - \param cMo : Homogeneous matrix between the world and the camera frames. - \param point : 3D coordinate in the world/object frame computed. + * \param candidate : vpImagePoint we want to compute the 3D coordinate. + * \param roi : List of 3D points in the camera frame representing a planar face. + * \param cam : Camera parameters. + * \param cMo : Homogeneous matrix between the world and the camera frames. + * \param point : 3D coordinate in the world/object frame computed. */ static void compute3D(const vpImagePoint &candidate, const std::vector &roi, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, vpPoint &point); /*! - Keep only keypoints located on faces and compute for those keypoints the 3D - coordinate in the world/object frame given the 2D image coordinate and - under the assumption that the point is located on a plane. + * Keep only keypoints located on faces and compute for those keypoints the 3D + * coordinate in the world/object frame given the 2D image coordinate and + * under the assumption that the point is located on a plane. * - \param cMo : Homogeneous matrix between the world and the camera frames. - \param cam : Camera parameters. - \param candidates : In input, list of keypoints detected in the whole - image, in output, list of keypoints only located on planes. - \param polygons : List of 2D polygons representing the projection of the faces in - the image plane. - \param roisPt : List of faces, with the 3D coordinates known in the camera frame. - \param points : Output list of computed 3D coordinates (in - the world/object frame) of keypoints located only on faces. - \param descriptors : Optional parameter, pointer to the descriptors to filter. + * \param cMo : Homogeneous matrix between the world and the camera frames. + * \param cam : Camera parameters. + * \param candidates : In input, list of keypoints detected in the whole + * image, in output, list of keypoints only located on planes. + * \param polygons : List of 2D polygons representing the projection of the faces in + * the image plane. + * \param roisPt : List of faces, with the 3D coordinates known in the camera frame. + * \param points : Output list of computed 3D coordinates (in + * the world/object frame) of keypoints located only on faces. + * \param descriptors : Optional parameter, pointer to the descriptors to filter. */ static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector &candidates, @@ -526,20 +526,20 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint std::vector &points, cv::Mat *descriptors = NULL); /*! - Keep only keypoints located on faces and compute for those keypoints the 3D - coordinate in the world/object frame given the 2D image coordinate and - under the assumption that the point is located on a plane. + * Keep only keypoints located on faces and compute for those keypoints the 3D + * coordinate in the world/object frame given the 2D image coordinate and + * under the assumption that the point is located on a plane. * - \param cMo : Homogeneous matrix between the world and the camera frames. - \param cam : Camera parameters. - \param candidates : In input, list of vpImagePoint located in the whole - image, in output, list of vpImagePoint only located on planes. - \param polygons : List of 2D polygons representing the projection of the faces in - the image plane. - \param roisPt : List of faces, with the 3D coordinates known in the camera frame. - \param points : Output list of computed 3D coordinates (in the world/object frame) - of vpImagePoint located only on faces. - \param descriptors : Optional parameter, pointer to the descriptors to filter. + * \param cMo : Homogeneous matrix between the world and the camera frames. + * \param cam : Camera parameters. + * \param candidates : In input, list of vpImagePoint located in the whole + * image, in output, list of vpImagePoint only located on planes. + * \param polygons : List of 2D polygons representing the projection of the faces in + * the image plane. + * \param roisPt : List of faces, with the 3D coordinates known in the camera frame. + * \param points : Output list of computed 3D coordinates (in the world/object frame) + * of vpImagePoint located only on faces. + * \param descriptors : Optional parameter, pointer to the descriptors to filter. */ static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector &candidates, @@ -548,19 +548,19 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint std::vector &points, cv::Mat *descriptors = NULL); /*! - Keep only keypoints located on cylinders and compute the 3D coordinates in - the world/object frame given the 2D image coordinates. + * Keep only keypoints located on cylinders and compute the 3D coordinates in + * the world/object frame given the 2D image coordinates. * - \param cMo : Homogeneous matrix between the world and the camera frames. - \param cam : Camera parameters. - \param candidates : In input, list of keypoints detected in the whole - image, in output, list of keypoints only located on cylinders. - \param cylinders : List of vpCylinder corresponding of the cylinder objects in the - scene, projected in the camera frame. - \param vectorOfCylinderRois : For each cylinder, the corresponding list of bounding box. - \param points : Output list of computed 3D coordinates in the world/object frame for each - keypoint located on a cylinder. - \param descriptors : Optional parameter, pointer to the descriptors to filter. + * \param cMo : Homogeneous matrix between the world and the camera frames. + * \param cam : Camera parameters. + * \param candidates : In input, list of keypoints detected in the whole + * image, in output, list of keypoints only located on cylinders. + * \param cylinders : List of vpCylinder corresponding of the cylinder objects in the + * scene, projected in the camera frame. + * \param vectorOfCylinderRois : For each cylinder, the corresponding list of bounding box. + * \param points : Output list of computed 3D coordinates in the world/object frame for each + * keypoint located on a cylinder. + * \param descriptors : Optional parameter, pointer to the descriptors to filter. */ static void compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, @@ -569,19 +569,19 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint std::vector &points, cv::Mat *descriptors = NULL); /*! - Keep only vpImagePoint located on cylinders and compute the 3D coordinates - in the world/object frame given the 2D image coordinates. + * Keep only vpImagePoint located on cylinders and compute the 3D coordinates + * in the world/object frame given the 2D image coordinates. * - \param cMo : Homogeneous matrix between the world and the camera frames. - \param cam : Camera parameters. - \param candidates : In input, list of vpImagePoint located in the image, in - output, list of vpImagePoint only located on cylinders. - \param cylinders : List of vpCylinder corresponding of the cylinder objects in the scene, - projected in the camera frame. - \param vectorOfCylinderRois : For each cylinder, the corresponding list of bounding box. - \param points : Output list of computed 3D coordinates in the world/object frame for each - vpImagePoint located on a cylinder. - \param descriptors : Optional parameter, pointer to the descriptors to filter. + * \param cMo : Homogeneous matrix between the world and the camera frames. + * \param cam : Camera parameters. + * \param candidates : In input, list of vpImagePoint located in the image, in + * output, list of vpImagePoint only located on cylinders. + * \param cylinders : List of vpCylinder corresponding of the cylinder objects in the scene, + * projected in the camera frame. + * \param vectorOfCylinderRois : For each cylinder, the corresponding list of bounding box. + * \param points : Output list of computed 3D coordinates in the world/object frame for each + * vpImagePoint located on a cylinder. + * \param descriptors : Optional parameter, pointer to the descriptors to filter. */ static void compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, @@ -590,361 +590,361 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint std::vector &points, cv::Mat *descriptors = NULL); /*! - Compute the pose using the correspondence between 2D points and 3D points - using OpenCV function with RANSAC method. + * Compute the pose using the correspondence between 2D points and 3D points + * using OpenCV function with RANSAC method. * - \param imagePoints : List of 2D points corresponding to the location of the detected keypoints. - \param objectPoints : List of the 3D points in the object frame matched. - \param cam : Camera parameters. - \param cMo : Homogeneous matrix between the object frame and the camera frame. - \param inlierIndex : List of indexes of inliers. - \param elapsedTime : Elapsed time. - \param func : Function pointer to filter the final pose returned by OpenCV pose estimation method. - \return True if the pose has been computed, false otherwise (not enough points, or size list mismatch). - */ + * \param imagePoints : List of 2D points corresponding to the location of the detected keypoints. + * \param objectPoints : List of the 3D points in the object frame matched. + * \param cam : Camera parameters. + * \param cMo : Homogeneous matrix between the object frame and the camera frame. + * \param inlierIndex : List of indexes of inliers. + * \param elapsedTime : Elapsed time. + * \param func : Function pointer to filter the final pose returned by OpenCV pose estimation method. + * \return True if the pose has been computed, false otherwise (not enough points, or size list mismatch). + */ bool computePose(const std::vector &imagePoints, const std::vector &objectPoints, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector &inlierIndex, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = NULL); /*! - Compute the pose using the correspondence between 2D points and 3D points - using ViSP function with RANSAC method. + * Compute the pose using the correspondence between 2D points and 3D points + * using ViSP function with RANSAC method. * - \param objectVpPoints : List of vpPoint with coordinates expressed in the object and in the camera frame. - \param cMo : Homogeneous matrix between the object frame and the camera frame. - \param inliers : List of inliers. - \param elapsedTime : Elapsed time. - \param func : Function pointer to filter the pose in Ransac pose estimation, if we want - to eliminate the poses which do not respect some criterion - \return True if the pose has been computed, false otherwise (not enough points, or size list mismatch). + * \param objectVpPoints : List of vpPoint with coordinates expressed in the object and in the camera frame. + * \param cMo : Homogeneous matrix between the object frame and the camera frame. + * \param inliers : List of inliers. + * \param elapsedTime : Elapsed time. + * \param func : Function pointer to filter the pose in Ransac pose estimation, if we want + * to eliminate the poses which do not respect some criterion + * \return True if the pose has been computed, false otherwise (not enough points, or size list mismatch). */ bool computePose(const std::vector &objectVpPoints, vpHomogeneousMatrix &cMo, std::vector &inliers, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = NULL); /*! - Compute the pose using the correspondence between 2D points and 3D points - using ViSP function with RANSAC method. + * Compute the pose using the correspondence between 2D points and 3D points + * using ViSP function with RANSAC method. * - \param objectVpPoints : List of vpPoint with coordinates expressed in the object and in the camera frame. - \param cMo : Homogeneous matrix between the object frame and the camera frame. - \param inliers : List of inlier points. - \param inlierIndex : List of inlier index. - \param elapsedTime : Elapsed time. - \return True if the pose has been computed, false otherwise (not enough points, or size list mismatch). - \param func : Function pointer to filter the pose in Ransac pose estimation, if we want to eliminate the poses which - do not respect some criterion + * \param objectVpPoints : List of vpPoint with coordinates expressed in the object and in the camera frame. + * \param cMo : Homogeneous matrix between the object frame and the camera frame. + * \param inliers : List of inlier points. + * \param inlierIndex : List of inlier index. + * \param elapsedTime : Elapsed time. + * \return True if the pose has been computed, false otherwise (not enough points, or size list mismatch). + * \param func : Function pointer to filter the pose in Ransac pose estimation, if we want to eliminate the poses which + * do not respect some criterion */ bool computePose(const std::vector &objectVpPoints, vpHomogeneousMatrix &cMo, std::vector &inliers, std::vector &inlierIndex, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = NULL); /*! - Initialize the size of the matching image (case with a matching side by - side between IRef and ICurrent). + * Initialize the size of the matching image (case with a matching side by + * side between IRef and ICurrent). * - \param IRef : Reference image. - \param ICurrent : Current image. - \param IMatching : Image matching. + * \param IRef : Reference image. + * \param ICurrent : Current image. + * \param IMatching : Image matching. */ void createImageMatching(vpImage &IRef, vpImage &ICurrent, vpImage &IMatching); /*! - Initialize the size of the matching image with appropriate size according - to the number of training images. Used to display the matching of keypoints - detected in the current image with those detected in multiple training - images. + * Initialize the size of the matching image with appropriate size according + * to the number of training images. Used to display the matching of keypoints + * detected in the current image with those detected in multiple training + * images. * - \param ICurrent : Current image. - \param IMatching : Image initialized with appropriate size. + * \param ICurrent : Current image. + * \param IMatching : Image initialized with appropriate size. */ void createImageMatching(vpImage &ICurrent, vpImage &IMatching); /*! - Initialize the size of the matching image (case with a matching side by - side between IRef and ICurrent). + * Initialize the size of the matching image (case with a matching side by + * side between IRef and ICurrent). * - \param IRef : Reference image. - \param ICurrent : Current image. - \param IMatching : Image matching. + * \param IRef : Reference image. + * \param ICurrent : Current image. + * \param IMatching : Image matching. */ void createImageMatching(vpImage &IRef, vpImage &ICurrent, vpImage &IMatching); /*! - Initialize the size of the matching image with appropriate size according - to the number of training images. Used to display the matching of keypoints - detected in the current image with those detected in multiple training - images. + * Initialize the size of the matching image with appropriate size according + * to the number of training images. Used to display the matching of keypoints + * detected in the current image with those detected in multiple training + * images. * - \param ICurrent : Current image. - \param IMatching : Image initialized with appropriate size. + * \param ICurrent : Current image. + * \param IMatching : Image initialized with appropriate size. */ void createImageMatching(vpImage &ICurrent, vpImage &IMatching); /*! - Detect keypoints in the image. + * Detect keypoints in the image. * - \param I : Input image. - \param keyPoints : Output list of the detected keypoints. - \param rectangle : Optional rectangle of the region of interest. + * \param I : Input image. + * \param keyPoints : Output list of the detected keypoints. + * \param rectangle : Optional rectangle of the region of interest. */ void detect(const vpImage &I, std::vector &keyPoints, const vpRect &rectangle = vpRect()); /*! - Detect keypoints in the image. + * Detect keypoints in the image. * - \param I_color : Input image. - \param keyPoints : Output list of the detected keypoints. - \param rectangle : Optional rectangle of the region of interest. + * \param I_color : Input image. + * \param keyPoints : Output list of the detected keypoints. + * \param rectangle : Optional rectangle of the region of interest. */ void detect(const vpImage &I_color, std::vector &keyPoints, const vpRect &rectangle = vpRect()); /*! - Detect keypoints in the image. + * Detect keypoints in the image. * - \param matImg : Input image. - \param keyPoints : Output list of the detected keypoints. - \param mask : Optional 8-bit integer mask to detect only where mask[i][j] != 0. + * \param matImg : Input image. + * \param keyPoints : Output list of the detected keypoints. + * \param mask : Optional 8-bit integer mask to detect only where mask[i][j] != 0. */ void detect(const cv::Mat &matImg, std::vector &keyPoints, const cv::Mat &mask = cv::Mat()); /*! - Detect keypoints in the image. + * Detect keypoints in the image. * - \param I : Input image. - \param keyPoints : Output list of the detected keypoints. - \param elapsedTime : Elapsed time. - \param rectangle : Optional rectangle of the region of interest. + * \param I : Input image. + * \param keyPoints : Output list of the detected keypoints. + * \param elapsedTime : Elapsed time. + * \param rectangle : Optional rectangle of the region of interest. */ void detect(const vpImage &I, std::vector &keyPoints, double &elapsedTime, const vpRect &rectangle = vpRect()); /*! - Detect keypoints in the image. + * Detect keypoints in the image. * - \param I_color : Input image. - \param keyPoints : Output list of the detected keypoints. - \param elapsedTime : Elapsed time. - \param rectangle : Optional rectangle of the region of interest. + * \param I_color : Input image. + * \param keyPoints : Output list of the detected keypoints. + * \param elapsedTime : Elapsed time. + * \param rectangle : Optional rectangle of the region of interest. */ void detect(const vpImage &I_color, std::vector &keyPoints, double &elapsedTime, const vpRect &rectangle = vpRect()); /*! - Detect keypoints in the image. + * Detect keypoints in the image. * - \param matImg : Input image. - \param keyPoints : Output list of the detected keypoints. - \param elapsedTime : Elapsed time. - \param mask : Optional 8-bit integer mask to detect only where mask[i][j] != 0. + * \param matImg : Input image. + * \param keyPoints : Output list of the detected keypoints. + * \param elapsedTime : Elapsed time. + * \param mask : Optional 8-bit integer mask to detect only where mask[i][j] != 0. */ void detect(const cv::Mat &matImg, std::vector &keyPoints, double &elapsedTime, - const cv::Mat &mask = cv::Mat()); - - /*! - Apply a set of affine transformations to the image, detect keypoints and - reproject them into initial image coordinates. - See http://www.ipol.im/pub/algo/my_affine_sift/ for the details. - See https://github.com/Itseez/opencv/blob/master/samples/python2/asift.py - for the Python implementation by Itseez and Matt Sheckells for the current - implementation in C++. - \param I : Input image. - \param listOfKeypoints : List of detected keypoints in the multiple images after - affine transformations. - \param listOfDescriptors : Corresponding list of descriptors. - \param listOfAffineI : Optional parameter, list of images after affine - transformations. - */ + const cv::Mat &mask = cv::Mat()); + + /*! + * Apply a set of affine transformations to the image, detect keypoints and + * reproject them into initial image coordinates. + * See http://www.ipol.im/pub/algo/my_affine_sift/ for the details. + * See https://github.com/Itseez/opencv/blob/master/samples/python2/asift.py + * for the Python implementation by Itseez and Matt Sheckells for the current + * implementation in C++. + * \param I : Input image. + * \param listOfKeypoints : List of detected keypoints in the multiple images after + * affine transformations. + * \param listOfDescriptors : Corresponding list of descriptors. + * \param listOfAffineI : Optional parameter, list of images after affine + * transformations. + */ void detectExtractAffine(const vpImage &I, std::vector > &listOfKeypoints, std::vector &listOfDescriptors, std::vector > *listOfAffineI = NULL); /*! - Display the reference and the detected keypoints in the images. + * Display the reference and the detected keypoints in the images. * - \param IRef : Input reference image. - \param ICurrent : Input current image. - \param size : Size of the displayed cross. + * \param IRef : Input reference image. + * \param ICurrent : Input current image. + * \param size : Size of the displayed cross. */ void display(const vpImage &IRef, const vpImage &ICurrent, unsigned int size = 3); /*! - Display the reference keypoints. + * Display the reference keypoints. * - \param ICurrent : Input current image. - \param size : Size of the displayed crosses. - \param color : Color of the crosses. + * \param ICurrent : Input current image. + * \param size : Size of the displayed crosses. + * \param color : Color of the crosses. */ void display(const vpImage &ICurrent, unsigned int size = 3, const vpColor &color = vpColor::green); /*! - Display the reference and the detected keypoints in the images. + * Display the reference and the detected keypoints in the images. * - \param IRef : Input reference image. - \param ICurrent : Input current image. - \param size : Size of the displayed cross. + * \param IRef : Input reference image. + * \param ICurrent : Input current image. + * \param size : Size of the displayed cross. */ void display(const vpImage &IRef, const vpImage &ICurrent, unsigned int size = 3); /*! - Display the reference keypoints. + * Display the reference keypoints. * - \param ICurrent : Input current image. - \param size : Size of the displayed crosses. - \param color : Color of the crosses. + * \param ICurrent : Input current image. + * \param size : Size of the displayed crosses. + * \param color : Color of the crosses. */ void display(const vpImage &ICurrent, unsigned int size = 3, const vpColor &color = vpColor::green); /*! - Display the matching lines between the detected keypoints with those - detected in one training image. + * Display the matching lines between the detected keypoints with those + * detected in one training image. * - \param IRef : Reference image, used to have the x-offset. - \param IMatching : Resulting image matching. - \param crossSize : Size of the displayed crosses. - \param lineThickness : Thickness of the displayed lines. - \param color : Color to use, if none, we pick randomly a color for each pair - of matching. + * \param IRef : Reference image, used to have the x-offset. + * \param IMatching : Resulting image matching. + * \param crossSize : Size of the displayed crosses. + * \param lineThickness : Thickness of the displayed lines. + * \param color : Color to use, if none, we pick randomly a color for each pair + * of matching. */ void displayMatching(const vpImage &IRef, vpImage &IMatching, unsigned int crossSize, unsigned int lineThickness = 1, const vpColor &color = vpColor::green); /*! - Display matching between keypoints detected in the current image and with - those detected in the multiple training images. Display also RANSAC inliers - if the list is supplied. + * Display matching between keypoints detected in the current image and with + * those detected in the multiple training images. Display also RANSAC inliers + * if the list is supplied. * - \param ICurrent : Current image. - \param IMatching : Resulting matching image. - \param ransacInliers : List of Ransac inliers or empty list if not available. - \param crossSize : Size of the displayed crosses. - \param lineThickness : Thickness of the displayed line. + * \param ICurrent : Current image. + * \param IMatching : Resulting matching image. + * \param ransacInliers : List of Ransac inliers or empty list if not available. + * \param crossSize : Size of the displayed crosses. + * \param lineThickness : Thickness of the displayed line. */ void displayMatching(const vpImage &ICurrent, vpImage &IMatching, const std::vector &ransacInliers = std::vector(), unsigned int crossSize = 3, unsigned int lineThickness = 1); /*! - Display the matching lines between the detected keypoints with those - detected in one training image. + * Display the matching lines between the detected keypoints with those + * detected in one training image. * - \param IRef : Reference image, used to have the x-offset. - \param IMatching : Resulting image matching. - \param crossSize : Size of the displayed crosses. - \param lineThickness : Thickness of the displayed lines. - \param color : Color to use, if none, we pick randomly a color for each pair - of matching. + * \param IRef : Reference image, used to have the x-offset. + * \param IMatching : Resulting image matching. + * \param crossSize : Size of the displayed crosses. + * \param lineThickness : Thickness of the displayed lines. + * \param color : Color to use, if none, we pick randomly a color for each pair + * of matching. */ void displayMatching(const vpImage &IRef, vpImage &IMatching, unsigned int crossSize, unsigned int lineThickness = 1, const vpColor &color = vpColor::green); /*! - Display the matching lines between the detected keypoints with those - detected in one training image. + * Display the matching lines between the detected keypoints with those + * detected in one training image. * - \param IRef : Reference image, used to have the x-offset. - \param IMatching : Resulting image matching. - \param crossSize : Size of the displayed crosses. - \param lineThickness : Thickness of the displayed lines. - \param color : Color to use, if none, we pick randomly a color for each pair - of matching. + * \param IRef : Reference image, used to have the x-offset. + * \param IMatching : Resulting image matching. + * \param crossSize : Size of the displayed crosses. + * \param lineThickness : Thickness of the displayed lines. + * \param color : Color to use, if none, we pick randomly a color for each pair + * of matching. */ void displayMatching(const vpImage &IRef, vpImage &IMatching, unsigned int crossSize, unsigned int lineThickness = 1, const vpColor &color = vpColor::green); /*! - Display matching between keypoints detected in the current image and with - those detected in the multiple training images. Display also RANSAC inliers - if the list is supplied. + * Display matching between keypoints detected in the current image and with + * those detected in the multiple training images. Display also RANSAC inliers + * if the list is supplied. * - \param ICurrent : Current image. - \param IMatching : Resulting matching image. - \param ransacInliers : List of Ransac inliers or empty list if not available. - \param crossSize : Size of the displayed crosses. - \param lineThickness : Thickness of the displayed line. + * \param ICurrent : Current image. + * \param IMatching : Resulting matching image. + * \param ransacInliers : List of Ransac inliers or empty list if not available. + * \param crossSize : Size of the displayed crosses. + * \param lineThickness : Thickness of the displayed line. */ void displayMatching(const vpImage &ICurrent, vpImage &IMatching, const std::vector &ransacInliers = std::vector(), unsigned int crossSize = 3, unsigned int lineThickness = 1); /*! - Extract the descriptors for each keypoints of the list. + * Extract the descriptors for each keypoints of the list. * - \param I : Input image. - \param keyPoints : List of keypoints we want to extract their descriptors. - \param descriptors : Descriptors matrix with at each row the descriptors - values for each keypoint. - \param trainPoints : Pointer to the list of 3D train points, when a keypoint - cannot be extracted, we need to remove the corresponding 3D point. + * \param I : Input image. + * \param keyPoints : List of keypoints we want to extract their descriptors. + * \param descriptors : Descriptors matrix with at each row the descriptors + * values for each keypoint. + * \param trainPoints : Pointer to the list of 3D train points, when a keypoint + * cannot be extracted, we need to remove the corresponding 3D point. */ void extract(const vpImage &I, std::vector &keyPoints, cv::Mat &descriptors, std::vector *trainPoints = NULL); /*! - Extract the descriptors for each keypoints of the list. + * Extract the descriptors for each keypoints of the list. * - \param I_color : Input image. - \param keyPoints : List of keypoints we want to extract their descriptors. - \param descriptors : Descriptors matrix with at each row the descriptors - values for each keypoint. - \param trainPoints : Pointer to the list of 3D train points, when a keypoint - cannot be extracted, we need to remove the corresponding 3D point. + * \param I_color : Input image. + * \param keyPoints : List of keypoints we want to extract their descriptors. + * \param descriptors : Descriptors matrix with at each row the descriptors + * values for each keypoint. + * \param trainPoints : Pointer to the list of 3D train points, when a keypoint + * cannot be extracted, we need to remove the corresponding 3D point. */ void extract(const vpImage &I_color, std::vector &keyPoints, cv::Mat &descriptors, std::vector *trainPoints = NULL); /*! - Extract the descriptors for each keypoints of the list. + * Extract the descriptors for each keypoints of the list. * - \param matImg : Input image. - \param keyPoints : List of keypoints we want to extract their descriptors. - \param descriptors : Descriptors matrix with at each row the descriptors - values for each keypoint. - \param trainPoints : Pointer to the list of 3D train points, when a keypoint cannot - be extracted, we need to remove the corresponding 3D point. + * \param matImg : Input image. + * \param keyPoints : List of keypoints we want to extract their descriptors. + * \param descriptors : Descriptors matrix with at each row the descriptors + * values for each keypoint. + * \param trainPoints : Pointer to the list of 3D train points, when a keypoint cannot + * be extracted, we need to remove the corresponding 3D point. */ void extract(const cv::Mat &matImg, std::vector &keyPoints, cv::Mat &descriptors, std::vector *trainPoints = NULL); /*! - Extract the descriptors for each keypoints of the list. + * Extract the descriptors for each keypoints of the list. * - \param I : Input image. - \param keyPoints : List of keypoints we want to extract their descriptors. - \param descriptors : Descriptors matrix with at each row the descriptors - values for each keypoint. - \param elapsedTime : Elapsed time. - \param trainPoints : Pointer to the list of 3D train points, when a keypoint - cannot be extracted, we need to remove the corresponding 3D point. + * \param I : Input image. + * \param keyPoints : List of keypoints we want to extract their descriptors. + * \param descriptors : Descriptors matrix with at each row the descriptors + * values for each keypoint. + * \param elapsedTime : Elapsed time. + * \param trainPoints : Pointer to the list of 3D train points, when a keypoint + * cannot be extracted, we need to remove the corresponding 3D point. */ void extract(const vpImage &I, std::vector &keyPoints, cv::Mat &descriptors, double &elapsedTime, std::vector *trainPoints = NULL); /*! - Extract the descriptors for each keypoints of the list. + * Extract the descriptors for each keypoints of the list. * - \param I_color : Input image. - \param keyPoints : List of keypoints we want to extract their descriptors. - \param descriptors : Descriptors matrix with at each row the descriptors - values for each keypoint. - \param elapsedTime : Elapsed time. - \param trainPoints : Pointer to the list of 3D train points, when a keypoint - cannot be extracted, we need to remove the corresponding 3D point. + * \param I_color : Input image. + * \param keyPoints : List of keypoints we want to extract their descriptors. + * \param descriptors : Descriptors matrix with at each row the descriptors + * values for each keypoint. + * \param elapsedTime : Elapsed time. + * \param trainPoints : Pointer to the list of 3D train points, when a keypoint + * cannot be extracted, we need to remove the corresponding 3D point. */ void extract(const vpImage &I_color, std::vector &keyPoints, cv::Mat &descriptors, double &elapsedTime, std::vector *trainPoints = NULL); /*! - Extract the descriptors for each keypoints of the list. + * Extract the descriptors for each keypoints of the list. * - \param matImg : Input image. - \param keyPoints : List of keypoints we want to extract their descriptors. - \param descriptors : Descriptors matrix with at each row the descriptors - values for each keypoint. - \param elapsedTime : Elapsed time. - \param trainPoints : Pointer to the list of 3D train points, when a keypoint - cannot be extracted, we need to remove the corresponding 3D point. + * \param matImg : Input image. + * \param keyPoints : List of keypoints we want to extract their descriptors. + * \param descriptors : Descriptors matrix with at each row the descriptors + * values for each keypoint. + * \param elapsedTime : Elapsed time. + * \param trainPoints : Pointer to the list of 3D train points, when a keypoint + * cannot be extracted, we need to remove the corresponding 3D point. */ void extract(const cv::Mat &matImg, std::vector &keyPoints, cv::Mat &descriptors, double &elapsedTime, std::vector *trainPoints = NULL); @@ -1148,20 +1148,20 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint inline unsigned int getNbImages() const { return static_cast(m_mapOfImages.size()); } /*! - Get the 3D coordinates of the object points matched (the corresponding 3D - coordinates in the object frame of the keypoints detected in the current - image after the matching). + * Get the 3D coordinates of the object points matched (the corresponding 3D + * coordinates in the object frame of the keypoints detected in the current + * image after the matching). * - \param objectPoints : List of 3D coordinates in the object frame. + * \param objectPoints : List of 3D coordinates in the object frame. */ void getObjectPoints(std::vector &objectPoints) const; /*! - Get the 3D coordinates of the object points matched (the corresponding 3D - coordinates in the object frame of the keypoints detected in the current - image after the matching). + * Get the 3D coordinates of the object points matched (the corresponding 3D + * coordinates in the object frame of the keypoints detected in the current + * image after the matching). * - \param objectPoints : List of 3D coordinates in the object frame. + * \param objectPoints : List of 3D coordinates in the object frame. */ void getObjectPoints(std::vector &objectPoints) const; @@ -1181,22 +1181,22 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint inline cv::Mat getQueryDescriptors() const { return m_queryDescriptors; } /*! - Get the query keypoints list in OpenCV type. + * Get the query keypoints list in OpenCV type. * - \param matches : If false return the list of all query keypoints extracted in the current image. - If true, return only the query keypoints list that have matches. - \param keyPoints : List of query keypoints (or keypoints detected in the - current image). + * \param matches : If false return the list of all query keypoints extracted in the current image. + * If true, return only the query keypoints list that have matches. + * \param keyPoints : List of query keypoints (or keypoints detected in the + * current image). */ void getQueryKeyPoints(std::vector &keyPoints, bool matches = true) const; /*! - Get the query keypoints list in ViSP type. + * Get the query keypoints list in ViSP type. * - \param keyPoints : List of query keypoints (or keypoints detected in the - current image). - \param matches : If false return the list of all query keypoints extracted in the current image. - If true, return only the query keypoints list that have matches. + * \param keyPoints : List of query keypoints (or keypoints detected in the + * current image). + * \param matches : If false return the list of all query keypoints extracted in the current image. + * If true, return only the query keypoints list that have matches. */ void getQueryKeyPoints(std::vector &keyPoints, bool matches = true) const; @@ -1223,204 +1223,204 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint inline cv::Mat getTrainDescriptors() const { return m_trainDescriptors; } /*! - Get the train keypoints list in OpenCV type. + * Get the train keypoints list in OpenCV type. * - \param keyPoints : List of train keypoints (or reference keypoints). + * \param keyPoints : List of train keypoints (or reference keypoints). */ void getTrainKeyPoints(std::vector &keyPoints) const; /*! - Get the train keypoints list in ViSP type. + * Get the train keypoints list in ViSP type. * - \param keyPoints : List of train keypoints (or reference keypoints). + * \param keyPoints : List of train keypoints (or reference keypoints). */ void getTrainKeyPoints(std::vector &keyPoints) const; /*! - Get the train points (the 3D coordinates in the object frame) list in - OpenCV type. + * Get the train points (the 3D coordinates in the object frame) list in + * OpenCV type. * - \param points : List of train points (or reference points). + * \param points : List of train points (or reference points). */ void getTrainPoints(std::vector &points) const; /*! - Get the train points (the 3D coordinates in the object frame) list in ViSP - type. + * Get the train points (the 3D coordinates in the object frame) list in ViSP + * type. * - \param points : List of train points (or reference points). + * \param points : List of train points (or reference points). */ void getTrainPoints(std::vector &points) const; /*! - Initialize a matcher based on its name. + * Initialize a matcher based on its name. * - \param matcherName : Name of the matcher (e.g BruteForce, FlannBased). + * \param matcherName : Name of the matcher (e.g BruteForce, FlannBased). */ void initMatcher(const std::string &matcherName); /*! - Insert a reference image and a current image side-by-side. + * Insert a reference image and a current image side-by-side. * - \param IRef : Reference image. - \param ICurrent : Current image. - \param IMatching : Matching image for displaying all the matching between - the query keypoints and those detected in the training images. + * \param IRef : Reference image. + * \param ICurrent : Current image. + * \param IMatching : Matching image for displaying all the matching between + * the query keypoints and those detected in the training images. */ void insertImageMatching(const vpImage &IRef, const vpImage &ICurrent, vpImage &IMatching); /*! - Insert the different training images in the matching image. + * Insert the different training images in the matching image. * - \param ICurrent : Current image. - \param IMatching : Matching image for displaying all the matching between - the query keypoints and those detected in the training images + * \param ICurrent : Current image. + * \param IMatching : Matching image for displaying all the matching between + * the query keypoints and those detected in the training images */ void insertImageMatching(const vpImage &ICurrent, vpImage &IMatching); /*! - Insert a reference image and a current image side-by-side. + * Insert a reference image and a current image side-by-side. * - \param IRef : Reference image. - \param ICurrent : Current image. - \param IMatching : Matching image for displaying all the matching between - the query keypoints and those detected in the training images. + * \param IRef : Reference image. + * \param ICurrent : Current image. + * \param IMatching : Matching image for displaying all the matching between + * the query keypoints and those detected in the training images. */ void insertImageMatching(const vpImage &IRef, const vpImage &ICurrent, vpImage &IMatching); /*! - Insert the different training images in the matching image. + * Insert the different training images in the matching image. * - \param ICurrent : Current image. - \param IMatching : Matching image for displaying all the matching between - the query keypoints and those detected in the training images + * \param ICurrent : Current image. + * \param IMatching : Matching image for displaying all the matching between + * the query keypoints and those detected in the training images */ void insertImageMatching(const vpImage &ICurrent, vpImage &IMatching); /*! - Load configuration parameters from an XML config file. + * Load configuration parameters from an XML config file. * - \param configFile : Path to the XML config file. + * \param configFile : Path to the XML config file. */ void loadConfigFile(const std::string &configFile); /*! - Load learning data saved on disk. + * Load learning data saved on disk. * - \param filename : Path of the learning file. - \param binaryMode : If true, the learning file is in a binary mode, - otherwise it is in XML mode. - \param append : If true, concatenate the learning data, otherwise reset the variables. + * \param filename : Path of the learning file. + * \param binaryMode : If true, the learning file is in a binary mode, + * otherwise it is in XML mode. + * \param append : If true, concatenate the learning data, otherwise reset the variables. */ void loadLearningData(const std::string &filename, bool binaryMode = false, bool append = false); /*! - Match keypoints based on distance between their descriptors. + * Match keypoints based on distance between their descriptors. * - \param trainDescriptors : Train descriptors (or reference descriptors). - \param queryDescriptors : Query descriptors. - \param matches : Output list of matches. - \param elapsedTime : Elapsed time. + * \param trainDescriptors : Train descriptors (or reference descriptors). + * \param queryDescriptors : Query descriptors. + * \param matches : Output list of matches. + * \param elapsedTime : Elapsed time. */ void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector &matches, double &elapsedTime); /*! - Match keypoints detected in the image with those built in the reference - list. + * Match keypoints detected in the image with those built in the reference + * list. * - \param I : Input current image. - \return The number of matched keypoints. + * \param I : Input current image. + * \return The number of matched keypoints. */ unsigned int matchPoint(const vpImage &I); /*! - Match keypoints detected in a region of interest of the image with those - built in the reference list. + * Match keypoints detected in a region of interest of the image with those + * built in the reference list. * - \param I : Input image. - \param iP : Coordinate of the top-left corner of the region of interest. - \param height : Height of the region of interest. - \param width : Width of the region of interest. - \return The number of matched keypoints. + * \param I : Input image. + * \param iP : Coordinate of the top-left corner of the region of interest. + * \param height : Height of the region of interest. + * \param width : Width of the region of interest. + * \return The number of matched keypoints. */ unsigned int matchPoint(const vpImage &I, const vpImagePoint &iP, unsigned int height, unsigned int width); /*! - Match keypoints detected in a region of interest of the image with those - built in the reference list. + * Match keypoints detected in a region of interest of the image with those + * built in the reference list. * - \param I : Input image. - \param rectangle : Rectangle of the region of interest. - \return The number of matched keypoints. + * \param I : Input image. + * \param rectangle : Rectangle of the region of interest. + * \return The number of matched keypoints. */ unsigned int matchPoint(const vpImage &I, const vpRect &rectangle); /*! - Match query keypoints with those built in the reference list using buildReference(). + * Match query keypoints with those built in the reference list using buildReference(). * - \param queryKeyPoints : List of the query keypoints. - \param queryDescriptors : List of the query descriptors. - - \return The number of matched keypoints. + * \param queryKeyPoints : List of the query keypoints. + * \param queryDescriptors : List of the query descriptors. + * + * \return The number of matched keypoints. */ unsigned int matchPoint(const std::vector &queryKeyPoints, const cv::Mat &queryDescriptors); /*! - Match keypoints detected in the image with those built in the reference - list and compute the pose. + * Match keypoints detected in the image with those built in the reference + * list and compute the pose. * - \param I : Input image. - \param cam : Camera parameters. - \param cMo : Homogeneous matrix between the object frame and the camera frame. - \param func : Function pointer to filter the pose in Ransac pose - estimation, if we want to eliminate the poses which do not respect some criterion. - \param rectangle : Rectangle corresponding to the ROI (Region of Interest) to consider. - \return True if the matching and the pose estimation are OK, false otherwise. + * \param I : Input image. + * \param cam : Camera parameters. + * \param cMo : Homogeneous matrix between the object frame and the camera frame. + * \param func : Function pointer to filter the pose in Ransac pose + * estimation, if we want to eliminate the poses which do not respect some criterion. + * \param rectangle : Rectangle corresponding to the ROI (Region of Interest) to consider. + * \return True if the matching and the pose estimation are OK, false otherwise. */ bool matchPoint(const vpImage &I, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, bool (*func)(const vpHomogeneousMatrix &) = NULL, const vpRect &rectangle = vpRect()); /*! - Match keypoints detected in the image with those built in the reference - list and compute the pose. + * Match keypoints detected in the image with those built in the reference + * list and compute the pose. * - \param I : Input image. - \param cam : Camera parameters. - \param cMo : Homogeneous matrix between the object frame and the camera frame. - \param error : Reprojection mean square error (in pixel) between the - 2D points and the projection of the 3D points with the estimated pose. - \param elapsedTime : Time to detect, extract, match and compute the pose. - \param func : Function pointer to filter the pose in Ransac pose - estimation, if we want to eliminate the poses which do not respect some criterion. - \param rectangle : Rectangle corresponding to the ROI (Region of Interest) to consider. - \return True if the matching and the pose estimation are OK, false otherwise. + * \param I : Input image. + * \param cam : Camera parameters. + * \param cMo : Homogeneous matrix between the object frame and the camera frame. + * \param error : Reprojection mean square error (in pixel) between the + * 2D points and the projection of the 3D points with the estimated pose. + * \param elapsedTime : Time to detect, extract, match and compute the pose. + * \param func : Function pointer to filter the pose in Ransac pose + * estimation, if we want to eliminate the poses which do not respect some criterion. + * \param rectangle : Rectangle corresponding to the ROI (Region of Interest) to consider. + * \return True if the matching and the pose estimation are OK, false otherwise. */ bool matchPoint(const vpImage &I, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = NULL, const vpRect &rectangle = vpRect()); /*! - Match keypoints detected in the image with those built in the reference - list and return the bounding box and the center of gravity. - * - \param I : Input image. - \param boundingBox : Bounding box that contains the good matches. - \param centerOfGravity : Center of gravity computed from the location of - the good matches (could differ of the center of the bounding box). - \param isPlanarObject : If the object is planar, the homography matrix is - estimated to eliminate outliers, otherwise it is the fundamental matrix - which is estimated. - \param imPts1 : Pointer to the list of reference keypoints if not null. - \param imPts2 : Pointer to the list of current keypoints if not null. - \param meanDescriptorDistance : Pointer to the value - of the average distance of the descriptors if not null. - \param detection_score : Pointer to the value of the detection score if not null. - \param rectangle : Rectangle corresponding to the ROI (Region of Interest) - to consider. - \return True if the object is present, false otherwise. + * Match keypoints detected in the image with those built in the reference + * list and return the bounding box and the center of gravity. + * + * \param I : Input image. + * \param boundingBox : Bounding box that contains the good matches. + * \param centerOfGravity : Center of gravity computed from the location of + * the good matches (could differ of the center of the bounding box). + * \param isPlanarObject : If the object is planar, the homography matrix is + * estimated to eliminate outliers, otherwise it is the fundamental matrix + * which is estimated. + * \param imPts1 : Pointer to the list of reference keypoints if not null. + * \param imPts2 : Pointer to the list of current keypoints if not null. + * \param meanDescriptorDistance : Pointer to the value + * of the average distance of the descriptors if not null. + * \param detectionScore : Pointer to the value of the detection score if not null. + * \param rectangle : Rectangle corresponding to the ROI (Region of Interest) + * to consider. + * \return True if the object is present, false otherwise. */ bool matchPointAndDetect(const vpImage &I, vpRect &boundingBox, vpImagePoint ¢erOfGravity, const bool isPlanarObject = true, std::vector *imPts1 = NULL, @@ -1428,89 +1428,89 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint double *detectionScore = NULL, const vpRect &rectangle = vpRect()); /*! - Match keypoints detected in the image with those built in the reference - list, compute the pose and return also the bounding box and the center of - gravity. - * - \param I : Input image. - \param cam : Camera parameters. - \param cMo : Homogeneous matrix between the object frame and the camera frame. - \param error : Reprojection mean square error (in pixel) between the - 2D points and the projection of the 3D points with the estimated pose. - \param elapsedTime : Time to detect, extract, match and compute the pose. - \param boundingBox : Bounding box that contains the good matches. - \param centerOfGravity : Center of gravity computed from the location of - the good matches (could differ of the center of the bounding box). - \param func : Function pointer to filter the pose in Ransac pose estimation, if we - want to eliminate the poses which do not respect some criterion. - \param rectangle : Rectangle corresponding to the ROI (Region of Interest) to consider. - \return True if the matching and the pose estimation are OK, false otherwise. - */ + * Match keypoints detected in the image with those built in the reference + * list, compute the pose and return also the bounding box and the center of + * gravity. + * + * \param I : Input image. + * \param cam : Camera parameters. + * \param cMo : Homogeneous matrix between the object frame and the camera frame. + * \param error : Reprojection mean square error (in pixel) between the + * 2D points and the projection of the 3D points with the estimated pose. + * \param elapsedTime : Time to detect, extract, match and compute the pose. + * \param boundingBox : Bounding box that contains the good matches. + * \param centerOfGravity : Center of gravity computed from the location of + * the good matches (could differ of the center of the bounding box). + * \param func : Function pointer to filter the pose in Ransac pose estimation, if we + * want to eliminate the poses which do not respect some criterion. + * \param rectangle : Rectangle corresponding to the ROI (Region of Interest) to consider. + * \return True if the matching and the pose estimation are OK, false otherwise. + */ bool matchPointAndDetect(const vpImage &I, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, double &error, double &elapsedTime, vpRect &boundingBox, vpImagePoint ¢erOfGravity, bool (*func)(const vpHomogeneousMatrix &) = NULL, const vpRect &rectangle = vpRect()); /*! - Match keypoints detected in the image with those built in the reference - list. + * Match keypoints detected in the image with those built in the reference + * list. * - \param I_color : Input current image. - \return The number of matched keypoints. + * \param I_color : Input current image. + * \return The number of matched keypoints. */ unsigned int matchPoint(const vpImage &I_color); /*! - Match keypoints detected in a region of interest of the image with those - built in the reference list. + * Match keypoints detected in a region of interest of the image with those + * built in the reference list. * - \param I_color : Input image. - \param iP : Coordinate of the top-left corner of the region of interest. - \param height : Height of the region of interest. - \param width : Width of the region of interest. - \return The number of matched keypoints. + * \param I_color : Input image. + * \param iP : Coordinate of the top-left corner of the region of interest. + * \param height : Height of the region of interest. + * \param width : Width of the region of interest. + * \return The number of matched keypoints. */ unsigned int matchPoint(const vpImage &I_color, const vpImagePoint &iP, unsigned int height, unsigned int width); /*! - Match keypoints detected in a region of interest of the image with those - built in the reference list. + * Match keypoints detected in a region of interest of the image with those + * built in the reference list. * - \param I_color : Input image. - \param rectangle : Rectangle of the region of interest. - \return The number of matched keypoints. + * \param I_color : Input image. + * \param rectangle : Rectangle of the region of interest. + * \return The number of matched keypoints. */ unsigned int matchPoint(const vpImage &I_color, const vpRect &rectangle); /*! - Match keypoints detected in the image with those built in the reference - list and compute the pose. + * Match keypoints detected in the image with those built in the reference + * list and compute the pose. * - \param I_color : Input image. - \param cam : Camera parameters. - \param cMo : Homogeneous matrix between the object frame and the camera frame. - \param func : Function pointer to filter the pose in Ransac pose - estimation, if we want to eliminate the poses which do not respect some criterion. - \param rectangle : Rectangle corresponding to the ROI (Region of Interest) to consider. - \return True if the matching and the pose estimation are OK, false otherwise. + * \param I_color : Input image. + * \param cam : Camera parameters. + * \param cMo : Homogeneous matrix between the object frame and the camera frame. + * \param func : Function pointer to filter the pose in Ransac pose + * estimation, if we want to eliminate the poses which do not respect some criterion. + * \param rectangle : Rectangle corresponding to the ROI (Region of Interest) to consider. + * \return True if the matching and the pose estimation are OK, false otherwise. */ bool matchPoint(const vpImage &I_color, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, bool (*func)(const vpHomogeneousMatrix &) = NULL, const vpRect &rectangle = vpRect()); /*! - Match keypoints detected in the image with those built in the reference - list and compute the pose. + * Match keypoints detected in the image with those built in the reference + * list and compute the pose. * - \param I_color : Input image. - \param cam : Camera parameters. - \param cMo : Homogeneous matrix between the object frame and the camera frame. - \param error : Reprojection mean square error (in pixel) between the - 2D points and the projection of the 3D points with the estimated pose. - \param elapsedTime : Time to detect, extract, match and compute the pose. - \param func : Function pointer to filter the pose in Ransac pose - estimation, if we want to eliminate the poses which do not respect some criterion. - \param rectangle : Rectangle corresponding to the ROI (Region of Interest) to consider. - \return True if the matching and the pose estimation are OK, false otherwise. + * \param I_color : Input image. + * \param cam : Camera parameters. + * \param cMo : Homogeneous matrix between the object frame and the camera frame. + * \param error : Reprojection mean square error (in pixel) between the + * 2D points and the projection of the 3D points with the estimated pose. + * \param elapsedTime : Time to detect, extract, match and compute the pose. + * \param func : Function pointer to filter the pose in Ransac pose + * estimation, if we want to eliminate the poses which do not respect some criterion. + * \param rectangle : Rectangle corresponding to the ROI (Region of Interest) to consider. + * \return True if the matching and the pose estimation are OK, false otherwise. */ bool matchPoint(const vpImage &I_color, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = NULL, @@ -1522,12 +1522,12 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint void reset(); /*! - Save the learning data in a file in XML or binary mode. + * Save the learning data in a file in XML or binary mode. * - \param filename : Path of the save file. - \param binaryMode : If true, the data are saved in binary mode, otherwise - in XML mode. - \param saveTrainingImages : If true, save also the training images on disk. + * \param filename : Path of the save file. + * \param binaryMode : If true, the data are saved in binary mode, otherwise + * in XML mode. + * \param saveTrainingImages : If true, save also the training images on disk. */ void saveLearningData(const std::string &filename, bool binaryMode = false, bool saveTrainingImages = true); @@ -2107,18 +2107,18 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint void affineSkew(double tilt, double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai); /*! - Compute the pose estimation error, the mean square error (in pixel) between - the location of the detected keypoints and the location of the projection - of the 3D model with the estimated pose. + * Compute the pose estimation error, the mean square error (in pixel) between + * the location of the detected keypoints and the location of the projection + * of the 3D model with the estimated pose. * - \param matchKeyPoints : List of pairs between the detected keypoints and - the corresponding 3D points. - \param cam : Camera parameters. - \param cMo_est : Estimated pose of the object. + * \param matchKeyPoints : List of pairs between the detected keypoints and + * the corresponding 3D points. + * \param cam : Camera parameters. + * \param cMo_est : Estimated pose of the object. * - \return The mean square error (in pixel) between the location of the - detected keypoints and the location of the projection of the 3D model with - the estimated pose. + * \return The mean square error (in pixel) between the location of the + * detected keypoints and the location of the projection of the 3D model with + * the estimated pose. */ double computePoseEstimationError(const std::vector > &matchKeyPoints, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo_est); @@ -2129,38 +2129,38 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint void filterMatches(); /*! - Initialize method for RANSAC parameters and for detectors, extractors and - matcher, and for others parameters. + * Initialize method for RANSAC parameters and for detectors, extractors and + * matcher, and for others parameters. */ void init(); /*! - Initialize a keypoint detector based on its name. + * Initialize a keypoint detector based on its name. * - \param detectorName : Name of the detector (e.g FAST, SIFT, SURF, etc.). + * \param detectorName : Name of the detector (e.g FAST, SIFT, SURF, etc.). */ void initDetector(const std::string &detectorNames); /*! - Initialize a list of keypoints detectors if we want to concatenate multiple - detectors. + * Initialize a list of keypoints detectors if we want to concatenate multiple + * detectors. * - \param detectorNames : List of detector names. + * \param detectorNames : List of detector names. */ void initDetectors(const std::vector &detectorNames); /*! - Initialize a descriptor extractor based on its name. + * Initialize a descriptor extractor based on its name. * - \param extractorName : Name of the extractor (e.g SIFT, SURF, ORB, etc.). + * \param extractorName : Name of the extractor (e.g SIFT, SURF, ORB, etc.). */ void initExtractor(const std::string &extractorName); /*! - Initialize a list of descriptor extractors if we want to concatenate - multiple extractors. + * Initialize a list of descriptor extractors if we want to concatenate + * multiple extractors. * - \param extractorNames : List of extractor names. + * \param extractorNames : List of extractor names. */ void initExtractors(const std::vector &extractorNames); diff --git a/modules/vision/src/key-point/vpKeyPoint.cpp b/modules/vision/src/key-point/vpKeyPoint.cpp index c459d95982..ce49956d51 100644 --- a/modules/vision/src/key-point/vpKeyPoint.cpp +++ b/modules/vision/src/key-point/vpKeyPoint.cpp @@ -3266,7 +3266,7 @@ bool vpKeyPoint::matchPoint(const vpImage &I_color, const vpCameraParame bool vpKeyPoint::matchPointAndDetect(const vpImage &I, vpRect &boundingBox, vpImagePoint ¢erOfGravity, const bool isPlanarObject, std::vector *imPts1, std::vector *imPts2, - double *meanDescriptorDistance, double *detection_score, const vpRect &rectangle) + double *meanDescriptorDistance, double *detectionScore, const vpRect &rectangle) { if (imPts1 != NULL && imPts2 != NULL) { imPts1->clear(); @@ -3286,8 +3286,8 @@ bool vpKeyPoint::matchPointAndDetect(const vpImage &I, vpRect &bo if (meanDescriptorDistance != NULL) { *meanDescriptorDistance = meanDescriptorDistanceTmp; } - if (detection_score != NULL) { - *detection_score = score; + if (detectionScore != NULL) { + *detectionScore = score; } if (m_filteredMatches.size() >= 4) { diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCentered.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCentered.h index 2733ea9d0b..d48712d45d 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCentered.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCentered.h @@ -58,8 +58,9 @@ class vpMomentDatabase; * (i,j). * * vpFeatureMomentCentered computes interaction matrices all interaction - * matrices up to vpMomentObject::getOrder()-1. \attention The maximum order - * reached by vpFeatureMomentBasic is NOT the maximum order of the + * matrices up to vpMomentObject::getOrder()-1. + * + * \attention The maximum order reached by vpFeatureMomentBasic is NOT the maximum order of the * vpMomentObject, it is one unit smaller. For example if you define your * vpMomentObject up to order n then vpFeatureMomentBasic will be able to * compute interaction matrices up to order n-1 that is \f$ L_{m_{ij}} \f$ with @@ -72,7 +73,6 @@ class vpMomentDatabase; */ class VISP_EXPORT vpFeatureMomentCentered : public vpFeatureMoment { - protected: unsigned int order; /*! @@ -94,12 +94,7 @@ class VISP_EXPORT vpFeatureMomentCentered : public vpFeatureMoment throw vpException(vpException::functionNotImplementedError, "Not implemented!"); } #endif - /*! - * Interaction matrix corresponding to \f$ \mu_{ij} \f$ moment - * \param select_one : first index (i) - * \param select_two : second index (j) - * \return Interaction matrix corresponding to the moment - */ + vpMatrix interaction(unsigned int select_one, unsigned int select_two) const; /*! diff --git a/modules/visual_features/src/visual-feature/vpFeatureMomentCentered.cpp b/modules/visual_features/src/visual-feature/vpFeatureMomentCentered.cpp index 7373e2f280..a6642f887f 100644 --- a/modules/visual_features/src/visual-feature/vpFeatureMomentCentered.cpp +++ b/modules/visual_features/src/visual-feature/vpFeatureMomentCentered.cpp @@ -45,7 +45,7 @@ #include /*! - * Default constructor + * Default constructor. * \param moments_ : Database of moment primitives. * \param A_ : First plane coefficient for a plane equation of the following type Ax+By+C=1/Z. * \param B_ : Second plane coefficient for a plane equation of the following type Ax+By+C=1/Z. @@ -58,7 +58,7 @@ vpFeatureMomentCentered::vpFeatureMomentCentered(vpMomentDatabase &moments_, dou { } /*! - * Interaction matrix corresponding to \f$ \mu_{ij} \f$ moment + * Interaction matrix corresponding to \f$ \mu_{ij} \f$ moment. * \param select_one : first index (i). * \param select_two : second index (j). * \return Interaction matrix corresponding to the moment. From 6f09e697002e054a4dc7fb03ac0671cf61723961 Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Wed, 25 Oct 2023 10:47:56 +0200 Subject: [PATCH 19/43] Fix override --- .../gui/include/visp3/gui/vpDisplayWin32.h | 15 +- .../mbt/include/visp3/mbt/vpMbHiddenFaces.h | 451 +++++++++--------- 2 files changed, 222 insertions(+), 244 deletions(-) diff --git a/modules/gui/include/visp3/gui/vpDisplayWin32.h b/modules/gui/include/visp3/gui/vpDisplayWin32.h index 3d3c31f0fc..47a8d8611a 100644 --- a/modules/gui/include/visp3/gui/vpDisplayWin32.h +++ b/modules/gui/include/visp3/gui/vpDisplayWin32.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,11 +29,7 @@ * * Description: * Windows 32 display base class - * - * Authors: - * Bruno Renier - * -*****************************************************************************/ + */ #include @@ -165,10 +160,10 @@ class VISP_EXPORT vpDisplayWin32 : public vpDisplay void displayCircle(const vpImagePoint ¢er, unsigned int radius, const vpColor &color, bool fill = false, unsigned int thickness = 1) override; - void displayCross(const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness = 1); override + void displayCross(const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness = 1) override; - void displayDotLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, - unsigned int thickness = 1) override; + void displayDotLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, + unsigned int thickness = 1) override; void displayLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness = 1) override; diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbHiddenFaces.h b/modules/tracker/mbt/include/visp3/mbt/vpMbHiddenFaces.h index b5a5cea431..8edb9db333 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbHiddenFaces.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbHiddenFaces.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,13 +29,8 @@ * * Description: * Generic model based tracker. This class declares the methods to implement - *in order to have a model based tracker. - * - * Authors: - * Romain Tallonneau - * Aurelien Yol - * -*****************************************************************************/ + * in order to have a model based tracker. + */ #pragma once #ifndef vpMbHiddenFaces_HH @@ -60,12 +54,12 @@ template class vpMbHiddenFaces; template void swap(vpMbHiddenFaces &first, vpMbHiddenFaces &second); /*! - \class vpMbHiddenFaces - - \brief Implementation of the polygons management for the model-based - trackers. - - \ingroup group_mbt_faces + * \class vpMbHiddenFaces + * + * \brief Implementation of the polygons management for the model-based + * trackers. + * + * \ingroup group_mbt_faces */ template class vpMbHiddenFaces { @@ -118,10 +112,10 @@ template class vpMbHiddenFaces #endif /*! - Get the list of polygons. - - \return Mbt Klt polygons list. - */ + * Get the list of polygons. + * + * \return Mbt Klt polygons list. + */ std::vector &getPolygon() { return Lpol; } #ifdef VISP_HAVE_OGRE @@ -129,40 +123,40 @@ template class vpMbHiddenFaces #endif /*! - get the number of visible polygons. - - \return number of visible polygons. - */ + * Get the number of visible polygons. + * + * \return number of visible polygons. + */ unsigned int getNbVisiblePolygon() const { return nbVisiblePolygon; } #ifdef VISP_HAVE_OGRE /*! - Get the number of rays that will be sent toward each polygon for - visibility test. Each ray will go from the optic center of the camera to a - random point inside the considered polygon. - - \sa getGoodNbRayCastingAttemptsRatio() - - \return Number of rays sent. - */ + * Get the number of rays that will be sent toward each polygon for + * visibility test. Each ray will go from the optic center of the camera to a + * random point inside the considered polygon. + * + * \sa getGoodNbRayCastingAttemptsRatio() + * + * \return Number of rays sent. + */ unsigned int getNbRayCastingAttemptsForVisibility() { return nbRayAttempts; } /*! - Get the Ogre3D Context. - - \return A pointer on a vpAROgre instance. - */ + * Get the Ogre3D Context. + * + * \return A pointer on a vpAROgre instance. + */ vpAROgre *getOgreContext() { return ogre; } /*! - Get the ratio of visibility attempts that has to be successful to consider - a polygon as visible. - - \sa getNbRayCastingAttemptsForVisibility() - - \return Ratio of successful attempts that has to be considered. Value will - be between 0.0 (0%) and 1.0 (100%). - */ + * Get the ratio of visibility attempts that has to be successful to consider + * a polygon as visible. + * + * \sa getNbRayCastingAttemptsForVisibility() + * + * \return Ratio of successful attempts that has to be considered. Value will + * be between 0.0 (0%) and 1.0 (100%). + */ double getGoodNbRayCastingAttemptsRatio() { return ratioVisibleRay; } #endif @@ -170,69 +164,69 @@ template class vpMbHiddenFaces #ifdef VISP_HAVE_OGRE /*! - Tell whether if Ogre Context is initialised or not. - - \return True if it does, false otherwise. - */ + * Tell whether if Ogre Context is initialised or not. + * + * \return True if it does, false otherwise. + */ bool isOgreInitialised() { return ogreInitialised; } #endif /*! - Check if the polygon at position i in the list is visible. - - \param i : TPosition in the list. - - \return Return true if the polygon is visible. -*/ + * Check if the polygon at position i in the list is visible. + * + * \param i : TPosition in the list. + * + * \return Return true if the polygon is visible. + */ bool isVisible(unsigned int i) { return Lpol[i]->isVisible(); } #ifdef VISP_HAVE_OGRE bool isVisibleOgre(const vpTranslationVector &cameraPos, const unsigned int &index); #endif - //! operator[] as modifier. + //! Operator[] as modifier. inline PolygonType *operator[](unsigned int i) { return Lpol[i]; } - //! operator[] as reader. + //! Operator[] as reader. inline const PolygonType *operator[](unsigned int i) const { return Lpol[i]; } void reset(); #ifdef VISP_HAVE_OGRE /*! - Set the background size (by default it is 640x480). - The background size has to match with the size of the image that you are - using for the traking. - - \warning This function has to be called before initOgre(). - - \param h : Height of the background - \param w : Width of the background - */ + * Set the background size (by default it is 640x480). + * The background size has to match with the size of the image that you are + * using for the tracking. + * + * \warning This function has to be called before initOgre(). + * + * \param h : Height of the background + * \param w : Width of the background + */ void setBackgroundSizeOgre(const unsigned int &h, const unsigned int &w) { ogreBackground = vpImage(h, w, 0); } /*! - Set the number of rays that will be sent toward each polygon for - visibility test. Each ray will go from the optic center of the camera to a - random point inside the considered polygon. - - \sa setGoodNbRayCastingAttemptsRatio(const double &) - - \param attempts Number of rays to be sent. - */ + * Set the number of rays that will be sent toward each polygon for + * visibility test. Each ray will go from the optic center of the camera to a + * random point inside the considered polygon. + * + * \sa setGoodNbRayCastingAttemptsRatio(const double &) + * + * \param attempts Number of rays to be sent. + */ void setNbRayCastingAttemptsForVisibility(const unsigned int &attempts) { nbRayAttempts = attempts; } /*! - Set the ratio of visibility attempts that has to be successful to consider - a polygon as visible. - - \sa setNbRayCastingAttemptsForVisibility(const unsigned int &) - - \param ratio : Ratio of successful attempts that has to be considered. - Value has to be between 0.0 (0%) and 1.0 (100%). - */ + * Set the ratio of visibility attempts that has to be successful to consider + * a polygon as visible. + * + * \sa setNbRayCastingAttemptsForVisibility(const unsigned int &) + * + * \param ratio : Ratio of successful attempts that has to be considered. + * Value has to be between 0.0 (0%) and 1.0 (100%). + */ void setGoodNbRayCastingAttemptsRatio(const double &ratio) { ratioVisibleRay = ratio; @@ -242,16 +236,16 @@ template class vpMbHiddenFaces ratioVisibleRay = 0.0; } /*! - Enable/Disable the appearance of Ogre config dialog on startup. - - \warning This method has only effect when Ogre is used and Ogre visibility - test is enabled using setOgreVisibilityTest() with true parameter. - - \param showConfigDialog : if true, shows Ogre dialog window (used to set - Ogre rendering options) when Ogre visibility is enabled. By default, this - functionality is turned off. - */ - inline void setOgreShowConfigDialog(bool showConfigDialog) override { ogreShowConfigDialog = showConfigDialog; } + * Enable/Disable the appearance of Ogre config dialog on startup. + * + * \warning This method has only effect when Ogre is used and Ogre visibility + * test is enabled using setOgreVisibilityTest() with true parameter. + * + * \param showConfigDialog : if true, shows Ogre dialog window (used to set + * Ogre rendering options) when Ogre visibility is enabled. By default, this + * functionality is turned off. + */ + inline void setOgreShowConfigDialog(bool showConfigDialog) { ogreShowConfigDialog = showConfigDialog; } #endif unsigned int setVisible(unsigned int width, unsigned int height, const vpCameraParameters &cam, @@ -270,16 +264,16 @@ template class vpMbHiddenFaces bool &changed); #endif /*! - Get the number of polygons. - - \return Size of the list. - */ + * Get the number of polygons. + * + * \return Size of the list. + */ inline unsigned int size() const { return (unsigned int)Lpol.size(); } }; /*! - Basic constructor. -*/ + * Basic constructor. + */ template vpMbHiddenFaces::vpMbHiddenFaces() : Lpol(), nbVisiblePolygon(0), scanlineRender() { @@ -294,8 +288,8 @@ vpMbHiddenFaces::vpMbHiddenFaces() : Lpol(), nbVisiblePolygon(0), s } /*! - Basic destructor. -*/ + * Basic destructor. + */ template vpMbHiddenFaces::~vpMbHiddenFaces() { for (unsigned int i = 0; i < Lpol.size(); i++) { @@ -325,8 +319,8 @@ template vpMbHiddenFaces::~vpMbHiddenFaces() } /*! - \relates vpMbHiddenFaces -*/ + * \relates vpMbHiddenFaces + */ template vpMbHiddenFaces::vpMbHiddenFaces(const vpMbHiddenFaces ©) : Lpol(), nbVisiblePolygon(copy.nbVisiblePolygon), scanlineRender(copy.scanlineRender) @@ -360,8 +354,8 @@ template void swap(vpMbHiddenFaces &first, vpMb } /*! - Copy assignment operator. -*/ + * Copy assignment operator. + */ template vpMbHiddenFaces &vpMbHiddenFaces::operator=(vpMbHiddenFaces other) { @@ -371,10 +365,10 @@ vpMbHiddenFaces &vpMbHiddenFaces::operator=(vpMbHidden } /*! - Add a polygon to the list of polygons. - - \param p : The polygon to add. -*/ + * Add a polygon to the list of polygons. + * + * \param p : The polygon to add. + */ template void vpMbHiddenFaces::addPolygon(PolygonType *p) { PolygonType *p_new = new PolygonType; @@ -393,8 +387,8 @@ template void vpMbHiddenFaces::addPolygon(Polyg } /*! - Reset the Hidden faces (remove the list of PolygonType) -*/ + * Reset the hidden faces (remove the list of PolygonType). + */ template void vpMbHiddenFaces::reset() { nbVisiblePolygon = 0; @@ -431,12 +425,12 @@ template void vpMbHiddenFaces::reset() } /*! - Compute the clipped points of the polygons that have been added via - addPolygon(). - - \param cMo : Pose that will be used to clip the polygons. - \param cam : Camera parameters that will be used to clip the polygons. -*/ + * Compute the clipped points of the polygons that have been added via + * addPolygon(). + * + * \param cMo : Pose that will be used to clip the polygons. + * \param cam : Camera parameters that will be used to clip the polygons. + */ template void vpMbHiddenFaces::computeClippedPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam) { @@ -453,13 +447,13 @@ void vpMbHiddenFaces::computeClippedPolygons(const vpHomogeneousMat } /*! - Render the scene in order to perform, later via computeScanLineQuery(), - visibility tests. - - \param cam : Camera parameters that will be used to render the scene. - \param w : Width of the render window. - \param h : Height of the render window. -*/ + * Render the scene in order to perform, later via computeScanLineQuery(), + * visibility tests. + * + * \param cam : Camera parameters that will be used to render the scene. + * \param w : Width of the render window. + * \param h : Height of the render window. + */ template void vpMbHiddenFaces::computeScanLineRender(const vpCameraParameters &cam, const unsigned int &w, const unsigned int &h) @@ -487,16 +481,15 @@ void vpMbHiddenFaces::computeScanLineRender(const vpCameraParameter } /*! - Compute Scanline visibility results for a line. - - \warning computeScanLineRender() function has to be called before - - \param a : First point of the line. - \param b : Second point of the line. - \param lines : Result of the scanline visibility. List of the visible parts - of the line. \param displayResults : True if the results have to be - displayed. False otherwise -*/ + * Compute scanline visibility results for a line. + * + * \warning computeScanLineRender() function has to be called before + * + * \param a : First point of the line. + * \param b : Second point of the line. + * \param lines : Result of the scanline visibility. List of the visible parts of the line. + * \param displayResults : True if the results have to be displayed. False otherwise. + */ template void vpMbHiddenFaces::computeScanLineQuery(const vpPoint &a, const vpPoint &b, std::vector > &lines, @@ -506,19 +499,19 @@ void vpMbHiddenFaces::computeScanLineQuery(const vpPoint &a, const } /*! - Compute the number of visible polygons. - - \param cMo : The pose of the camera - \param angleAppears : Angle used to test the appearance of a face - \param angleDisappears : Angle used to test the disappearance of a face - \param changed : True if a face appeared, disappeared or too many points - have been lost. False otherwise \param useOgre : True if a Ogre is used to - test the visibility, False otherwise \param not_used : Unused parameter. - \param I : Image used to test if a face is entirely projected in the image. - \param cam : Camera parameters. - - \return Return the number of visible polygons -*/ + * Compute the number of visible polygons. + * + * \param cMo : The pose of the camera + * \param angleAppears : Angle used to test the appearance of a face + * \param angleDisappears : Angle used to test the disappearance of a face + * \param changed : True if a face appeared, disappeared or too many points have been lost. False otherwise + * \param useOgre : True if a Ogre is used to test the visibility, False otherwise + * \param not_used : Unused parameter. + * \param I : Image used to test if a face is entirely projected in the image. + * \param cam : Camera parameters. + * + * \return Return the number of visible polygons + */ template unsigned int vpMbHiddenFaces::setVisiblePrivate(const vpHomogeneousMatrix &cMo, const double &angleAppears, const double &angleDisappears, bool &changed, bool useOgre, @@ -549,23 +542,21 @@ unsigned int vpMbHiddenFaces::setVisiblePrivate(const vpHomogeneous } /*! - Compute the visibility of a given face index. - - \param cMo : The pose of the camera - \param angleAppears : Angle used to test the appearance of a face - \param angleDisappears : Angle used to test the disappearance of a face - \param changed : True if a face appeared, disappeared or too many points - have been lost. False otherwise - \param useOgre : True if a Ogre is used to test the visibility, False otherwise. - \param not_used : Unused parameter. - \param width, height Image size. - \param cam : Camera parameters. - \param cameraPos : Position of the camera. Used only when Ogre is used as - 3rd party. - \param index : Index of the face to consider. - - \return Return true if the face is visible. -*/ + * Compute the visibility of a given face index. + * + * \param cMo : The pose of the camera + * \param angleAppears : Angle used to test the appearance of a face + * \param angleDisappears : Angle used to test the disappearance of a face + * \param changed : True if a face appeared, disappeared or too many points have been lost. False otherwise. + * \param useOgre : True if a Ogre is used to test the visibility, False otherwise. + * \param not_used : Unused parameter. + * \param width, height Image size. + * \param cam : Camera parameters. + * \param cameraPos : Position of the camera. Used only when Ogre is used as 3rd party. + * \param index : Index of the face to consider. + * + * \return Return true if the face is visible. + */ template bool vpMbHiddenFaces::computeVisibility(const vpHomogeneousMatrix &cMo, const double &angleAppears, const double &angleDisappears, bool &changed, bool useOgre, @@ -653,18 +644,16 @@ bool vpMbHiddenFaces::computeVisibility(const vpHomogeneousMatrix & } /*! - Compute the number of visible polygons. - - \param width, height : Image size used to check if the region of interest is inside the - image. - \param cam : Camera parameters. - \param cMo : The pose of the camera. - \param angle : Angle used to test the appearance and disappearance of a face. - \param changed : True if a face appeared, disappeared or too many - points have been lost. False otherwise. - - \return Return the number of visible polygons -*/ + * Compute the number of visible polygons. + * + * \param width, height : Image size used to check if the region of interest is inside the image. + * \param cam : Camera parameters. + * \param cMo : The pose of the camera. + * \param angle : Angle used to test the appearance and disappearance of a face. + * \param changed : True if a face appeared, disappeared or too many points have been lost. False otherwise. + * + * \return Return the number of visible polygons + */ template unsigned int vpMbHiddenFaces::setVisible(unsigned int width, unsigned int height, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, @@ -674,19 +663,17 @@ unsigned int vpMbHiddenFaces::setVisible(unsigned int width, unsign } /*! - Compute the number of visible polygons. - - \param width, height : Image size used to check if the region of interest is inside the - image. - \param cam : Camera parameters. - \param cMo : The pose of the camera. - \param changed : True if a face appeared, disappeared or too many points - have been lost. False otherwise. - \param angleAppears : Angle used to test the appearance of a face. - \param angleDisappears : Angle used to test the disappearance of a face. - - \return Return the number of visible polygons -*/ + * Compute the number of visible polygons. + * + * \param width, height : Image size used to check if the region of interest is inside the image. + * \param cam : Camera parameters. + * \param cMo : The pose of the camera. + * \param changed : True if a face appeared, disappeared or too many points have been lost. False otherwise. + * \param angleAppears : Angle used to test the appearance of a face. + * \param angleDisappears : Angle used to test the disappearance of a face. + * + * \return Return the number of visible polygons + */ template unsigned int vpMbHiddenFaces::setVisible(unsigned int width, unsigned int height, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, @@ -697,16 +684,15 @@ unsigned int vpMbHiddenFaces::setVisible(unsigned int width, unsign } /*! - Compute the number of visible polygons. - - \param cMo : The pose of the camera - \param angleAppears : Angle used to test the appearance of a face - \param angleDisappears : Angle used to test the disappearance of a face - \param changed : True if a face appeared, disappeared or too many points - have been lost. False otherwise - - \return Return the number of visible polygons -*/ + * Compute the number of visible polygons. + * + * \param cMo : The pose of the camera + * \param angleAppears : Angle used to test the appearance of a face + * \param angleDisappears : Angle used to test the disappearance of a face + * \param changed : True if a face appeared, disappeared or too many points have been lost. False otherwise + * + * \return Return the number of visible polygons + */ template unsigned int vpMbHiddenFaces::setVisible(const vpHomogeneousMatrix &cMo, const double &angleAppears, const double &angleDisappears, bool &changed) @@ -716,10 +702,10 @@ unsigned int vpMbHiddenFaces::setVisible(const vpHomogeneousMatrix #ifdef VISP_HAVE_OGRE /*! - Initialise the ogre context for face visibility tests. - - \param cam : Camera parameters. -*/ + * Initialise the ogre context for face visibility tests. + * + * \param cam : Camera parameters. + */ template void vpMbHiddenFaces::initOgre(const vpCameraParameters &cam) { ogreInitialised = true; @@ -748,10 +734,10 @@ template void vpMbHiddenFaces::initOgre(const v } /*! - Update the display in Ogre Window. - - \param cMo : Pose used to display. -*/ + * Update the display in Ogre Window. + * + * \param cMo : Pose used to display. + */ template void vpMbHiddenFaces::displayOgre(const vpHomogeneousMatrix &cMo) { if (ogreInitialised && !ogre->isWindowHidden()) { @@ -767,19 +753,17 @@ template void vpMbHiddenFaces::displayOgre(cons } /*! - Compute the number of visible polygons through Ogre3D. - - \param width, height : Image size used to check if the region of interest is inside the - image. - \param cam : Camera parameters. - \param cMo : The pose of the camera. - \param changed : True if a face appeared, disappeared or too many points - have been lost. False otherwise. - \param angleAppears : Angle used to test the appearance of a face. - \param angleDisappears : Angle used to test the disappearance of a face. - - \return Return the number of visible polygons -*/ + * Compute the number of visible polygons through Ogre3D. + * + * \param width, height : Image size used to check if the region of interest is inside the image. + * \param cam : Camera parameters. + * \param cMo : The pose of the camera. + * \param changed : True if a face appeared, disappeared or too many points have been lost. False otherwise. + * \param angleAppears : Angle used to test the appearance of a face. + * \param angleDisappears : Angle used to test the disappearance of a face. + * + * \return Return the number of visible polygons + */ template unsigned int vpMbHiddenFaces::setVisibleOgre(unsigned int width, unsigned int height, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, @@ -790,16 +774,15 @@ unsigned int vpMbHiddenFaces::setVisibleOgre(unsigned int width, un } /*! - Compute the number of visible polygons through Ogre3D. - - \param cMo : The pose of the camera - \param angleAppears : Angle used to test the appearance of a face - \param angleDisappears : Angle used to test the disappearance of a face - \param changed : True if a face appeared, disappeared or too many points - have been lost. False otherwise - - \return Return the number of visible polygons -*/ + * Compute the number of visible polygons through Ogre3D. + * + * \param cMo : The pose of the camera + * \param angleAppears : Angle used to test the appearance of a face + * \param angleDisappears : Angle used to test the disappearance of a face + * \param changed : True if a face appeared, disappeared or too many points have been lost. False otherwise + * + * \return Return the number of visible polygons + */ template unsigned int vpMbHiddenFaces::setVisibleOgre(const vpHomogeneousMatrix &cMo, const double &angleAppears, const double &angleDisappears, bool &changed) @@ -808,13 +791,13 @@ unsigned int vpMbHiddenFaces::setVisibleOgre(const vpHomogeneousMat } /*! - Test the visibility of a polygon through Ogre3D via RayCasting. - - \param cameraPos : Position of the camera in the 3D world. - \param index : Index of the polygon. - - \return Return true if the polygon is visible, False otherwise. -*/ + * Test the visibility of a polygon through Ogre3D via RayCasting. + * + * \param cameraPos : Position of the camera in the 3D world. + * \param index : Index of the polygon. + * + * \return Return true if the polygon is visible, False otherwise. + */ template bool vpMbHiddenFaces::isVisibleOgre(const vpTranslationVector &cameraPos, const unsigned int &index) { From 643bbb3374ca6c78c457dbcc0e94e6c52b0ee638 Mon Sep 17 00:00:00 2001 From: rlagneau Date: Wed, 25 Oct 2023 11:33:54 +0200 Subject: [PATCH 20/43] [CORPS] Iterates both in the positive and negative direction of the gradient to find the peak --- .../core/src/image/vpCannyEdgeDetection.cpp | 99 ++++++++++++------- 1 file changed, 64 insertions(+), 35 deletions(-) diff --git a/modules/core/src/image/vpCannyEdgeDetection.cpp b/modules/core/src/image/vpCannyEdgeDetection.cpp index 2f3c3fec6e..d93b0a636a 100644 --- a/modules/core/src/image/vpCannyEdgeDetection.cpp +++ b/modules/core/src/image/vpCannyEdgeDetection.cpp @@ -332,6 +332,64 @@ getAbsoluteTheta(const vpImage &dIx, const vpImage &dIy, const int } return absoluteTheta; } + +/** + * \brief Search in the direction of the gradient for the highest value of the gradient. + * + * \param[in] dIx The gradient image along the x-axis. + * \param[in] dIy The gradient image along the y-axis. + * \param[in] row The row of the initial point that is considered. + * \param[in] col The column of the initial point that is considered. + * \param[in] thetaQuadrant The gradient orientation quadrant of the initial point. + * \param[in] dRowGrad The direction of the gradient for the vertical direction. + * \param[in] dColGrad The direction of the gradient for the horizontal direction. + * \param[out] pixelsSeen The list of pixels that are of same gradient orientation quadrant. + * \param[out] bestPixel The pixel having the highest absolute value of gradient. + * \param[out] bestGrad The highest absolute value of gradient. + */ +void +searchForBestGradientInGradientDirection(const vpImage &dIx, const vpImage &dIy, +const int &row, const int &col, const int &thetaQuadrant, const int &dRowGrad, const int &dColGrad, +std::vector > &pixelsSeen, std::pair &bestPixel, float &bestGrad) +{ + bool isGradientInTheSameDirection = true; + int rowCandidate = row + dRowGrad; + int colCandidate = col + dColGrad; + + while (isGradientInTheSameDirection) { + // Getting the gradients around the edge point + float gradPlus = getManhattanGradient(dIx, dIy, rowCandidate, colCandidate); + if (std::abs(gradPlus) < std::numeric_limits::epsilon()) { + // The gradient is almost null => ignoring the point + isGradientInTheSameDirection = false; + break; + } + int dRowGradPlusCandidate = 0, dRowGradMinusCandidate = 0; + int dColGradPlusCandidate = 0, dColGradMinusCandidate = 0; + float absThetaPlus = getAbsoluteTheta(dIx, dIy, rowCandidate, colCandidate); + int thetaQuadrantCandidate = getThetaQuadrant(absThetaPlus, dRowGradPlusCandidate, dRowGradMinusCandidate, dColGradPlusCandidate, dColGradMinusCandidate); + if (thetaQuadrantCandidate != thetaQuadrant) { + isGradientInTheSameDirection = false; + break; + } + + std::pair pixelCandidate(rowCandidate, colCandidate); + if (gradPlus > bestGrad) { + // The gradient is higher with the next pixel candidate + // Saving it + bestGrad = gradPlus; + pixelsSeen.push_back(bestPixel); + bestPixel = pixelCandidate; + } + else { + // Best pixel is still the best + pixelsSeen.push_back(pixelCandidate); + } + rowCandidate += dRowGrad; + colCandidate += dColGrad; + } +} + void vpCannyEdgeDetection::performEdgeThining() { @@ -358,45 +416,16 @@ vpCannyEdgeDetection::performEdgeThining() int dColGradPlus = 0, dColGradMinus = 0; int thetaQuadrant = getThetaQuadrant(absoluteTheta, dRowGradPlus, dRowGradMinus, dColGradPlus, dColGradMinus); - bool isGradientInTheSameDirection = true; std::vector > pixelsSeen; std::pair bestPixel(row, col); float bestGrad = grad; - int rowCandidate = row + dRowGradPlus; - int colCandidate = col + dColGradPlus; - - while (isGradientInTheSameDirection) { - // Getting the gradients around the edge point - float gradPlus = getManhattanGradient(dIx, dIy, rowCandidate, colCandidate); - if (std::abs(gradPlus) < std::numeric_limits::epsilon()) { - // The gradient is almost null => ignoring the point - isGradientInTheSameDirection = false; - break; - } - int dRowGradPlusCandidate = 0, dRowGradMinusCandidate = 0; - int dColGradPlusCandidate = 0, dColGradMinusCandidate = 0; - float absThetaPlus = getAbsoluteTheta(dIx, dIy, rowCandidate, colCandidate); - int thetaQuadrantCandidate = getThetaQuadrant(absThetaPlus, dRowGradPlusCandidate, dRowGradMinusCandidate, dColGradPlusCandidate, dColGradMinusCandidate); - if (thetaQuadrantCandidate != thetaQuadrant) { - isGradientInTheSameDirection = false; - break; - } - std::pair pixelCandidate(rowCandidate, colCandidate); - if (gradPlus > bestGrad) { - // The gradient is higher with the nex pixel candidate - // Saving it - bestGrad = gradPlus; - pixelsSeen.push_back(bestPixel); - bestPixel = pixelCandidate; - } - else { - // Best pixel is still the best - pixelsSeen.push_back(pixelCandidate); - } - rowCandidate += dRowGradPlus; - colCandidate += dColGradPlus; - } + // iterate over all the pixels having the same gradient orientation quadrant + searchForBestGradientInGradientDirection(dIx, dIy, row, col, thetaQuadrant, dRowGradPlus, dColGradPlus, + pixelsSeen, bestPixel, bestGrad); + + searchForBestGradientInGradientDirection(dIx, dIy, row, col, thetaQuadrant, dRowGradMinus, dColGradMinus, + pixelsSeen, bestPixel, bestGrad); // Keeping the edge point that has the highest gradient m_edgeCandidateAndGradient[bestPixel] = bestGrad; From bbe3cb837340aacd6ac60afa46e8903adbfd3534 Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Wed, 25 Oct 2023 13:59:22 +0200 Subject: [PATCH 21/43] New override fixes --- .../tracking/forward-projection/vpPoint.cpp | 94 +++++++++---------- .../include/visp3/mbt/vpMbGenericTracker.h | 7 +- .../mbt/include/visp3/mbt/vpMbKltTracker.h | 2 +- 3 files changed, 49 insertions(+), 54 deletions(-) diff --git a/modules/core/src/tracking/forward-projection/vpPoint.cpp b/modules/core/src/tracking/forward-projection/vpPoint.cpp index 75e7949761..efbec9be21 100644 --- a/modules/core/src/tracking/forward-projection/vpPoint.cpp +++ b/modules/core/src/tracking/forward-projection/vpPoint.cpp @@ -132,13 +132,15 @@ void vpPoint::setWorldCoordinates(const vpColVector &oP_) oP[1] = oP_[1]; oP[2] = oP_[2]; oP[3] = 1.; - } else if (oP_.size() == 4) { + } + else if (oP_.size() == 4) { oP[0] = oP_[0]; oP[1] = oP_[1]; oP[2] = oP_[2]; oP[3] = oP_[3]; oP /= oP[3]; - } else { + } + else { throw(vpException(vpException::dimensionError, "Cannot initialize vpPoint from vector with size %d", oP_.size())); } } @@ -160,13 +162,15 @@ void vpPoint::setWorldCoordinates(const std::vector &oP_) oP[1] = oP_[1]; oP[2] = oP_[2]; oP[3] = 1.; - } else if (oP_.size() == 4) { + } + else if (oP_.size() == 4) { oP[0] = oP_[0]; oP[1] = oP_[1]; oP[2] = oP_[2]; oP[3] = oP_[3]; oP /= oP[3]; - } else { + } + else { throw(vpException(vpException::dimensionError, "Cannot initialize vpPoint from vector with size %d", oP_.size())); } } @@ -283,36 +287,36 @@ void vpPoint::changeFrame(const vpHomogeneousMatrix &cMo) frame are set to the same coordinates than the one in the camera frame. */ const vpPoint -operator*(const vpHomogeneousMatrix &aMb, const vpPoint& bP) +operator*(const vpHomogeneousMatrix &aMb, const vpPoint &bP) { - vpPoint aP ; + vpPoint aP; - vpColVector v(4),v1(4) ; + vpColVector v(4), v1(4); - v[0] = bP.get_X() ; - v[1] = bP.get_Y() ; - v[2] = bP.get_Z() ; - v[3] = bP.get_W() ; + v[0] = bP.get_X(); + v[1] = bP.get_Y(); + v[2] = bP.get_Z(); + v[3] = bP.get_W(); - v1[0] = aMb[0][0]*v[0] + aMb[0][1]*v[1]+ aMb[0][2]*v[2]+ aMb[0][3]*v[3] ; - v1[1] = aMb[1][0]*v[0] + aMb[1][1]*v[1]+ aMb[1][2]*v[2]+ aMb[1][3]*v[3] ; - v1[2] = aMb[2][0]*v[0] + aMb[2][1]*v[1]+ aMb[2][2]*v[2]+ aMb[2][3]*v[3] ; - v1[3] = aMb[3][0]*v[0] + aMb[3][1]*v[1]+ aMb[3][2]*v[2]+ aMb[3][3]*v[3] ; + v1[0] = aMb[0][0]*v[0] + aMb[0][1]*v[1]+ aMb[0][2]*v[2]+ aMb[0][3]*v[3]; + v1[1] = aMb[1][0]*v[0] + aMb[1][1]*v[1]+ aMb[1][2]*v[2]+ aMb[1][3]*v[3]; + v1[2] = aMb[2][0]*v[0] + aMb[2][1]*v[1]+ aMb[2][2]*v[2]+ aMb[2][3]*v[3]; + v1[3] = aMb[3][0]*v[0] + aMb[3][1]*v[1]+ aMb[3][2]*v[2]+ aMb[3][3]*v[3]; - v1 /= v1[3] ; + v1 /= v1[3]; // v1 = M*v ; - aP.set_X(v1[0]) ; - aP.set_Y(v1[1]) ; - aP.set_Z(v1[2]) ; - aP.set_W(v1[3]) ; + aP.set_X(v1[0]); + aP.set_Y(v1[1]); + aP.set_Z(v1[2]); + aP.set_W(v1[3]); - aP.set_oX(v1[0]) ; - aP.set_oY(v1[1]) ; - aP.set_oZ(v1[2]) ; - aP.set_oW(v1[3]) ; + aP.set_oX(v1[0]); + aP.set_oY(v1[1]); + aP.set_oZ(v1[2]); + aP.set_oW(v1[3]); - return aP ; + return aP; } /*! @@ -325,25 +329,25 @@ operator*(const vpHomogeneousMatrix &aMb, const vpPoint& bP) \return A point with 2D coordinates in the image plane a. */ const vpPoint -operator*(const vpHomography &aHb, const vpPoint& bP) +operator*(const vpHomography &aHb, const vpPoint &bP) { - vpPoint aP ; - vpColVector v(3),v1(3) ; + vpPoint aP; + vpColVector v(3), v1(3); - v[0] = bP.get_x() ; - v[1] = bP.get_y() ; - v[2] = bP.get_w() ; + v[0] = bP.get_x(); + v[1] = bP.get_y(); + v[2] = bP.get_w(); - v1[0] = aHb[0][0]*v[0] + aHb[0][1]*v[1]+ aHb[0][2]*v[2] ; - v1[1] = aHb[1][0]*v[0] + aHb[1][1]*v[1]+ aHb[1][2]*v[2] ; - v1[2] = aHb[2][0]*v[0] + aHb[2][1]*v[1]+ aHb[2][2]*v[2] ; + v1[0] = aHb[0][0]*v[0] + aHb[0][1]*v[1]+ aHb[0][2]*v[2]; + v1[1] = aHb[1][0]*v[0] + aHb[1][1]*v[1]+ aHb[1][2]*v[2]; + v1[2] = aHb[2][0]*v[0] + aHb[2][1]*v[1]+ aHb[2][2]*v[2]; // v1 = M*v ; - aP.set_x(v1[0]) ; - aP.set_y(v1[1]) ; - aP.set_w(v1[2]) ; + aP.set_x(v1[0]); + aP.set_y(v1[1]); + aP.set_w(v1[2]); - return aP ; + return aP; } #endif //! For memory issue (used by the vpServo class only). @@ -402,20 +406,6 @@ void vpPoint::display(const vpImage &I, const vpHomogeneousMatrix &cMo, vpFeatureDisplay::displayPoint(_p[0], _p[1], cam, I, color, thickness); } -VISP_EXPORT std::ostream &operator<<(std::ostream &os, const vpPoint & /* vpp */) { return (os << "vpPoint"); } - -#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) -vpPoint &vpPoint::operator=(const vpPoint &vpp) -{ - p = vpp.p; - cP = vpp.cP; - oP = vpp.oP; - cPAvailable = vpp.cPAvailable; - - return *this; -} -#endif - /*! * Display the projection of a 3D point in image \e I. * diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h index b576899404..bdd129c628 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h @@ -782,8 +782,13 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker #endif using vpMbDepthDenseTracker::setPose; #endif - virtual void setPose(const vpImage *const I, const vpImage *const I_color, +#if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO) + virtual void setPose(const vpImage *I, const vpImage *I_color, const vpHomogeneousMatrix &cdMo) override; +#else + virtual void setPose(const vpImage *I, const vpImage *I_color, + const vpHomogeneousMatrix &cdMo); +#endif }; #ifdef VISP_HAVE_NLOHMANN_JSON friend void to_json(nlohmann::json &j, const TrackerWrapper &t); diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h index 448f7b4bc3..6bb6556938 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h @@ -468,7 +468,7 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker void preTracking(const vpImage &I); bool postTracking(const vpImage &I, vpColVector &w); virtual void reinit(const vpImage &I); - virtual void setPose(const vpImage *const I, const vpImage *const I_color, + virtual void setPose(const vpImage *I, const vpImage *I_color, const vpHomogeneousMatrix &cdMo); //@} }; From 2512b21f47d25f11e0e923114bcfad1e30dbd166 Mon Sep 17 00:00:00 2001 From: rlagneau Date: Wed, 25 Oct 2023 14:12:13 +0200 Subject: [PATCH 22/43] [CORPS] Avoids to look at points that have already been explored and rejected --- modules/core/src/image/vpCannyEdgeDetection.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/modules/core/src/image/vpCannyEdgeDetection.cpp b/modules/core/src/image/vpCannyEdgeDetection.cpp index d93b0a636a..274ba7f6b6 100644 --- a/modules/core/src/image/vpCannyEdgeDetection.cpp +++ b/modules/core/src/image/vpCannyEdgeDetection.cpp @@ -464,8 +464,10 @@ vpCannyEdgeDetection::performEdgeTracking() if (it->second == STRONG_EDGE) { m_edgeMap[it->first.first][it->first.second] = 255; } - else if (recursiveSearchForStrongEdge(it->first)) { - m_edgeMap[it->first.first][it->first.second] = 255; + else if (it->second == WEAK_EDGE) { + if (recursiveSearchForStrongEdge(it->first)) { + m_edgeMap[it->first.first][it->first.second] = 255; + } } } } From bf36aa624d13b10bed4b1bfdbdd5d16be9d33f84 Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Wed, 25 Oct 2023 18:36:22 +0200 Subject: [PATCH 23/43] Revert back changes in testImageCircle.cpp to see if test pass on ci --- .../test/tools/geometry/testImageCircle.cpp | 308 +++++++++--------- 1 file changed, 153 insertions(+), 155 deletions(-) diff --git a/modules/core/test/tools/geometry/testImageCircle.cpp b/modules/core/test/tools/geometry/testImageCircle.cpp index 66da386f36..b029a3a212 100644 --- a/modules/core/test/tools/geometry/testImageCircle.cpp +++ b/modules/core/test/tools/geometry/testImageCircle.cpp @@ -1,4 +1,5 @@ -/* +/**************************************************************************** + * * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -28,8 +29,9 @@ * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * Description: - * Test vpImageCircle. - */ + * Test vpRect. + * +*****************************************************************************/ #include #include @@ -45,12 +47,11 @@ bool compareAngles(const float &actualVal, const float &theoreticalVal) float ensureIsBetweenMinPiAndPi(const float &theta) { float theta1 = theta; - float pi = static_cast(M_PI); - if (theta1 > pi) { - theta1 -= 2.0 * pi; + if (theta1 > M_PI) { + theta1 -= 2.0 * M_PI; } - else if (theta1 < -pi) { - theta1 += 2.0 * pi; + else if (theta1 < -M_PI) { + theta1 += 2.0 * M_PI; } return theta1; } @@ -66,15 +67,12 @@ int main() const float HEIGHT_SWITCHED = WIDTH; // The RoI must be inverted in order to cross left and right axes while crossing only the top axis vpRect switchedRoI(OFFSET, OFFSET, WIDTH_SWITCHED, HEIGHT_SWITCHED); bool hasSucceeded = true; - float pi = static_cast(M_PI); - float pi_2 = static_cast(M_PI_2); - float pi_4 = static_cast(M_PI_4); // Test with no intersections { vpImageCircle circle(vpImagePoint(HEIGHT / 2.f, WIDTH / 2.f), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * pi * RADIUS; + float theoreticalValue = 2.f * M_PI * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -96,7 +94,7 @@ int main() vpRect roiSquare(OFFSET, OFFSET, HEIGHT, HEIGHT); vpImageCircle circle(vpImagePoint(OFFSET + HEIGHT / 2.f, OFFSET + HEIGHT / 2.f), HEIGHT / 2.f); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * pi * HEIGHT / 2.f; + float theoreticalValue = 2.f * M_PI * HEIGHT / 2.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -121,7 +119,7 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 4.f * pi * RADIUS /3.f; + float theoreticalValue = 4.f * M_PI * RADIUS /3.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -146,7 +144,7 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * pi * RADIUS /3.f; + float theoreticalValue = 2.f * M_PI * RADIUS /3.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -171,7 +169,7 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * pi * RADIUS; + float theoreticalValue = 2.f * M_PI * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -196,7 +194,7 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 4.f * pi * RADIUS /3.f; + float theoreticalValue = 4.f * M_PI * RADIUS /3.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -221,7 +219,7 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * pi * RADIUS /3.f; + float theoreticalValue = 2.f * M_PI * RADIUS /3.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -246,7 +244,7 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * pi * RADIUS; + float theoreticalValue = 2.f * M_PI * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -267,12 +265,12 @@ int main() { // v = vc - r sin(theta) // Formula: vc = OFFSET + RADIUS * sin(theta) - float theta = pi / 3.f; + float theta = M_PI / 3.f; float uc = OFFSET + 100.f; float vc = OFFSET + RADIUS * sin(theta); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 5.f * pi * RADIUS /3.f; + float theoreticalValue = 5.f * M_PI * RADIUS /3.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -293,12 +291,12 @@ int main() { // v = vc - r sin(theta) // Formula: vc = OFFSET + RADIUS * sin(theta) - float theta = -2.f * pi/3.f; + float theta = -2.f * M_PI/3.f; float uc = OFFSET + 100.f; float vc = OFFSET + RADIUS * std::sin(theta); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = pi * RADIUS /3.f; + float theoreticalValue = M_PI * RADIUS /3.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -319,12 +317,12 @@ int main() { // v = vc - r sin(theta) // Formula: vc = OFFSET + RADIUS * sin(theta) - float theta = pi_2; + float theta = M_PI_2; float uc = OFFSET + 100.f; float vc = OFFSET + RADIUS * sin(theta); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * pi * RADIUS; + float theoreticalValue = 2.f * M_PI * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -345,12 +343,12 @@ int main() { // v = vc - r sin(theta) // Formula: vc = OFFSET + HEIGHT + RADIUS * sin(theta) - float theta = -pi / 3.f; + float theta = -M_PI / 3.f; float uc = OFFSET + 100.f; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 5.f * pi * RADIUS /3.f; + float theoreticalValue = 5.f * M_PI * RADIUS /3.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -371,12 +369,12 @@ int main() { // v = vc - r sin(theta) // Formula: vc = OFFSET + HEIGHT + RADIUS * sin(theta) - float theta = pi / 3.f; + float theta = M_PI / 3.f; float uc = OFFSET + 100.f; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = pi * RADIUS /3.f; + float theoreticalValue = M_PI * RADIUS /3.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -400,7 +398,7 @@ int main() float vc = OFFSET + HEIGHT - RADIUS; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * pi * RADIUS; + float theoreticalValue = 2.f * M_PI * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -427,7 +425,7 @@ int main() float vc = OFFSET; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = pi_2 * RADIUS; + float theoreticalValue = M_PI_2 * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -453,13 +451,13 @@ int main() // (4): umin = uc + r cos(theta_v_max) ; v_cross_max = vc - r sin(theta_v_max) >= vmin && <= vmin + height // (3) & (4) => uc = umin - r cos(theta_v_min) = umin - r cos(theta_v_max) <=> theta_v_min = - theta_v_max // (3) & (4) => vc >= vmin + r sin(theta_v_min) && vc >= vmin + r sin (theta_v_max) - float theta_v_min = pi / 4.f; + float theta_v_min = M_PI / 4.f; float uc = OFFSET - RADIUS * std::cos(theta_v_min); float vc = OFFSET + RADIUS * std::sin(theta_v_min) + 1.f; vc = std::max(vc, OFFSET + RADIUS * std::sin(-theta_v_min) + 1.f); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = pi_2 * RADIUS; + float theoreticalValue = M_PI_2 * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -487,13 +485,13 @@ int main() // (1) => uc + r cos(theta_u_top_min) >= umin <=> uc >= umin - r cos(theta_u_top_min) // (2) => uc + r cos(theta_u_top_max) >= umin <=> uc >= umin - r cos(theta_u_top_max) - float theta_u_top_min = -1.1f * pi_2; + float theta_u_top_min = -1.1f * M_PI_2; float uc = OFFSET - RADIUS * std::cos(theta_u_top_min) + 1.f; - uc = std::max(uc, OFFSET - RADIUS * std::cos(pi - theta_u_top_min) + 1.f); + uc = std::max(uc, OFFSET - RADIUS * std::cos((float)M_PI - theta_u_top_min) + 1.f); float vc = OFFSET + RADIUS * std::sin(theta_u_top_min); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 0.2f * pi_2 * RADIUS; + float theoreticalValue = 0.2f * M_PI_2 * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -523,10 +521,10 @@ int main() // (3) & (4) =>{ uc = umin - r cos(theta_v_min) & { uc = umin - r cos(- theta_v_min) // (3) & (4) { vc >= vmin - r sin(theta_v_min) & { vc >= vmin - r cos(- theta_v_min) - float theta_u_top_min = 5.f * pi / 8.f; - float theta_u_top_max = pi - theta_u_top_min; + float theta_u_top_min = 5.f * M_PI / 8.f; + float theta_u_top_max = M_PI - theta_u_top_min; float uc = OFFSET - RADIUS * std::cos(theta_u_top_min) + 1.f; - uc = std::max(uc, OFFSET - RADIUS * std::cos(pi - theta_u_top_min) + 1.f); + uc = std::max(uc, OFFSET - RADIUS * std::cos((float)M_PI - theta_u_top_min) + 1.f); float vc = OFFSET + RADIUS * std::sin(theta_u_top_min); float theta_v_min = std::acos((OFFSET - uc)/RADIUS); theta_v_min = ensureIsBetweenMinPiAndPi(theta_v_min); @@ -566,13 +564,13 @@ int main() // (1) => vc = vmin + r sin(theta_u_top_min) // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_top_min = 2.f * pi / 3.f; - float theta_v_max = -pi_2; + float theta_u_top_min = 2.f * M_PI / 3.f; + float theta_v_max = -M_PI_2; float uc = OFFSET + WIDTH - RADIUS * std::cos(theta_v_max); float vc = OFFSET + RADIUS * std::sin(theta_u_top_min);; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (pi_2 + pi / 3.f) * RADIUS; + float theoreticalValue = (M_PI_2 + M_PI / 3.f) * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -601,13 +599,13 @@ int main() // (1) <=> asin((vc - vmin)/r) >= acos[(umin + width - uc)/r] <=> vc >= r sin(acos[(umin + width - uc)/r]) + vmin // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max - float theta_v_max = -7.f * pi / 8.f; + float theta_v_max = -7.f * M_PI / 8.f; float theta_v_min = -theta_v_max; float uc = OFFSET + WIDTH - RADIUS * std::cos(theta_v_max); float vc = RADIUS * std::sin(std::acos((OFFSET + WIDTH - uc)/RADIUS)) + OFFSET + 1.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * pi - (theta_v_min - theta_v_max)) * RADIUS; + float theoreticalValue = (2.f * M_PI - (theta_v_min - theta_v_max)) * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -636,8 +634,8 @@ int main() // Choice: theta_u_top_min = -0.9 * PI / 2 // (1) => vc = vmin + r sin(theta_u_top_min) // (2) vc - r sin(theta_v_min) <= vmin => asin((vc - vmin)/r) <= theta_v_min - float theta_u_top_min = -0.9f * pi_2; - float theta_u_top_max = pi - theta_u_top_min; + float theta_u_top_min = -0.9f * M_PI_2; + float theta_u_top_max = M_PI - theta_u_top_min; theta_u_top_max = ensureIsBetweenMinPiAndPi(theta_u_top_max); float vc = OFFSET + RADIUS * std::sin(theta_u_top_min); float theta_v_min = std::asin((vc - OFFSET)/RADIUS) + 1.f; @@ -674,10 +672,10 @@ int main() // (2) & (4) =>{ uc = umin - r cos(theta_v_min) & { uc = umin - r cos(- theta_v_min) // (2) & (4) { vc >= vmin - r sin(theta_v_min) & { vc >= vmin - r cos(- theta_v_min) - float theta_u_top_min = 5.f * pi / 8.f; - float theta_u_top_max = pi - theta_u_top_min; + float theta_u_top_min = 5.f * M_PI / 8.f; + float theta_u_top_max = M_PI - theta_u_top_min; float uc = OFFSET + WIDTH - RADIUS * std::cos(theta_u_top_min) - 1.f; - uc = std::min(uc, OFFSET + WIDTH - RADIUS * std::cos(pi - theta_u_top_min) - 1.f); + uc = std::min(uc, OFFSET + WIDTH - RADIUS * std::cos((float)M_PI - theta_u_top_min) - 1.f); float vc = OFFSET + RADIUS * std::sin(theta_u_top_min); float theta_v_min = std::acos((OFFSET + WIDTH - uc)/RADIUS); theta_v_min = ensureIsBetweenMinPiAndPi(theta_v_min); @@ -689,7 +687,7 @@ int main() } vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * pi - ((theta_u_top_min - theta_u_top_max) + (theta_v_min - theta_v_max))) * RADIUS; + float theoreticalValue = (2.f * M_PI - ((theta_u_top_min - theta_u_top_max) + (theta_v_min - theta_v_max))) * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -717,13 +715,13 @@ int main() // (3) => vc = vmin + height + r sin(theta_u_bot_max) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_v_min = pi_2; - float theta_u_bot_max = -pi / 3.f; + float theta_v_min = M_PI_2; + float theta_u_bot_max = -M_PI / 3.f; float uc = OFFSET - RADIUS * std::cos(theta_v_min); float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_max);; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (pi_2 + pi / 3.f) * RADIUS; + float theoreticalValue = (M_PI_2 + M_PI / 3.f) * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -753,7 +751,7 @@ int main() // (4) => vc <= vmin + height + r sin(theta_v_max) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_v_min = pi_4 / 2.f; + float theta_v_min = M_PI_4 / 2.f; float theta_v_max = -theta_v_min; float uc = OFFSET - RADIUS * std::cos(theta_v_min); float vc = std::min(OFFSET + HEIGHT + RADIUS * std::sin(theta_v_min) - 1.f, OFFSET + HEIGHT + RADIUS * std::sin(theta_v_max) - 1.f); @@ -789,8 +787,8 @@ int main() // (1) => uc >= umin - r cos(theta_u_bot_max) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_bot_min = 5.f * pi_4 / 2.f; - float theta_u_bot_max = pi - theta_u_bot_min; + float theta_u_bot_min = 5.f * M_PI_4 / 2.f; + float theta_u_bot_max = M_PI - theta_u_bot_min; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_min); float uc = std::max(OFFSET - RADIUS * std::cos(theta_u_bot_min) + 1.f, OFFSET - RADIUS * std::cos(theta_u_bot_max) + 1.f); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); @@ -825,10 +823,10 @@ int main() // (2) & (4) => vc < vmin + height + r sin(theta_v_min) & vc < vmin + height + r sin(-theta_v_min) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_bot_min = -5.f * pi / 8.f; - float theta_u_bot_max = pi - theta_u_bot_min; + float theta_u_bot_min = -5.f * M_PI / 8.f; + float theta_u_bot_max = M_PI - theta_u_bot_min; theta_u_bot_max = ensureIsBetweenMinPiAndPi(theta_u_bot_max); - float theta_v_min = 7.f * pi / 8.f; + float theta_v_min = 7.f * M_PI / 8.f; float theta_v_max = -theta_v_min; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_min); float uc = OFFSET - RADIUS * std::cos(theta_v_min); @@ -862,13 +860,13 @@ int main() // (1) => vc = vmin + height + r sin(theta_u_bot_min) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_bot_min = -2.f * pi / 3.f; - float theta_v_min = pi_2; + float theta_u_bot_min = -2.f * M_PI / 3.f; + float theta_v_min = M_PI_2; float uc = OFFSET + WIDTH - RADIUS * std::cos(theta_v_min); float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_min);; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (pi_2 + pi / 3.f) * RADIUS; + float theoreticalValue = (M_PI_2 + M_PI / 3.f) * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -897,12 +895,12 @@ int main() // (2) & (4) => vc <= vmin + height + r sin(theta_v_min) & vc <= vmin + height + r sin(-theta_v_min) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_v_min = 5.f * pi / 6.f; + float theta_v_min = 5.f * M_PI / 6.f; float uc = OFFSET + WIDTH - RADIUS * std::cos(theta_v_min); float vc = std::min(OFFSET + HEIGHT + RADIUS * std::sin(theta_v_min) - 1.f, OFFSET + HEIGHT + RADIUS * std::sin(-theta_v_min) - 1.f); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (pi / 3.f) * RADIUS; // <=> 2.f * M_PI / 6.f + float theoreticalValue = (M_PI / 3.f) * RADIUS; // <=> 2.f * M_PI / 6.f bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -931,12 +929,12 @@ int main() // (1) & (3) => uc < umin + width - r cos(theta_u_bot_min) & uc <= umin + width - r cos(PI - theta_u_bot_min) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_bot_min = 4.f * pi / 6.f; + float theta_u_bot_min = 4.f * M_PI / 6.f; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_min); - float uc = std::min(OFFSET + WIDTH - RADIUS * std::cos(theta_u_bot_min) - 1.f, OFFSET + WIDTH - RADIUS * std::cos((float)pi -theta_u_bot_min) - 1.f); + float uc = std::min(OFFSET + WIDTH - RADIUS * std::cos(theta_u_bot_min) - 1.f, OFFSET + WIDTH - RADIUS * std::cos((float)M_PI -theta_u_bot_min) - 1.f); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (pi / 3.f) * RADIUS; // <=> 2.f * M_PI / 6.f + float theoreticalValue = (M_PI / 3.f) * RADIUS; // <=> 2.f * M_PI / 6.f bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -966,16 +964,16 @@ int main() // (2) & (4) => vc < vmin + height + r sin(theta_v_min) & vc < vmin + height + r sin(-theta_v_min) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_bot_min = -7.f * pi / 8.f; - float theta_u_bot_max = pi - theta_u_bot_min; + float theta_u_bot_min = -7.f * M_PI / 8.f; + float theta_u_bot_max = M_PI - theta_u_bot_min; theta_u_bot_max = ensureIsBetweenMinPiAndPi(theta_u_bot_max); - float theta_v_max = -3.f * pi / 8.f; + float theta_v_max = -3.f * M_PI / 8.f; float theta_v_min = -theta_v_max; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_min); float uc = OFFSET - RADIUS * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * pi - ((theta_v_min - theta_v_max) + (theta_u_bot_max - theta_u_bot_min))) * RADIUS; + float theoreticalValue = (2.f * M_PI - ((theta_v_min - theta_v_max) + (theta_u_bot_max - theta_u_bot_min))) * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1004,12 +1002,12 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 5.f * pi / 8.f; - float theta_u_top_max = 3.f * pi / 8.f; - float theta_v_min = 7.f * pi / 8.f; + float theta_u_top_min = 5.f * M_PI / 8.f; + float theta_u_top_max = 3.f * M_PI / 8.f; + float theta_v_min = 7.f * M_PI / 8.f; float theta_v_max = -theta_v_min; - float theta_u_bottom_min = -5.f * pi / 8.f; - float theta_u_bottom_max = -3.f * pi / 8.f; + float theta_u_bottom_min = -5.f * M_PI / 8.f; + float theta_u_bottom_max = -3.f * M_PI / 8.f; float vc = OFFSET + HEIGHT / 2.f; float radius = -(OFFSET - vc)/ std::sin(theta_u_top_min); float uc = OFFSET - radius * std::cos(theta_v_min); @@ -1044,9 +1042,9 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_max = pi / 6.f; - float theta_u_top_min = pi - theta_u_top_max; - float theta_v_min = pi / 3.f; + float theta_u_top_max = M_PI / 6.f; + float theta_u_top_min = M_PI - theta_u_top_max; + float theta_v_min = M_PI / 3.f; float theta_u_bottom_max = -theta_u_top_max; float radius = HEIGHT; float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1082,9 +1080,9 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 4.f * pi / 6.f; - float theta_u_top_max = pi - theta_u_top_min; - float theta_v_min = pi; + float theta_u_top_min = 4.f * M_PI / 6.f; + float theta_u_top_max = M_PI - theta_u_top_min; + float theta_v_min = M_PI; float theta_u_bottom_min = -theta_u_top_min; float theta_u_bottom_max = -theta_u_top_max; float radius = HEIGHT / (2.f * std::sin(theta_u_top_min)); // vmin + h - vmin = (vc - r sin(-theta_u_top_min)) - (vc - r sin(theta_top_min)) @@ -1092,7 +1090,7 @@ int main() float uc = OFFSET - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * pi - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; + float theoreticalValue = (2.f * M_PI - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1121,8 +1119,8 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = pi_2; - float theta_v_min = pi_4; + float theta_u_top_min = M_PI_2; + float theta_v_min = M_PI_4; float theta_v_max = -theta_v_min; float radius = HEIGHT / 2.f; float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1158,8 +1156,8 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = pi_2; - float theta_v_min = 3.f * pi_4; + float theta_u_top_min = M_PI_2; + float theta_v_min = 3.f * M_PI_4; float theta_v_max = -theta_v_min; float radius = HEIGHT / 2.f; float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1196,8 +1194,8 @@ int main() // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max float theta_u_top_max = 0.f; - float theta_u_bot_max = -pi / 3.f; - float theta_v_max = -pi / 6.f; + float theta_u_bot_max = -M_PI / 3.f; + float theta_v_max = -M_PI / 6.f; float radius = HEIGHT / (std::sin(theta_u_top_max) - std::sin(theta_u_bot_max)); float uc = OFFSET - radius * std::cos(theta_v_max); float vc = OFFSET + radius * std::sin(theta_u_top_max); @@ -1232,9 +1230,9 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_max = pi / 3.f; + float theta_u_top_max = M_PI / 3.f; float theta_u_bot_max = 0.f; - float theta_v_min = pi / 6.f; + float theta_v_min = M_PI / 6.f; float radius = HEIGHT / (std::sin(theta_u_top_max) - std::sin(theta_u_bot_max)); float uc = OFFSET - radius * std::cos(theta_v_min); float vc = OFFSET + radius * std::sin(theta_u_top_max); @@ -1269,18 +1267,18 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 5.f * pi / 8.f; - float theta_u_top_max = 3.f * pi / 8.f; - float theta_v_min = 1.f * pi / 8.f; + float theta_u_top_min = 5.f * M_PI / 8.f; + float theta_u_top_max = 3.f * M_PI / 8.f; + float theta_v_min = 1.f * M_PI / 8.f; float theta_v_max = -theta_v_min; - float theta_u_bottom_min = -5.f * pi / 8.f; - float theta_u_bottom_max = -3.f * pi / 8.f; + float theta_u_bottom_min = -5.f * M_PI / 8.f; + float theta_u_bottom_max = -3.f * M_PI / 8.f; float vc = OFFSET + HEIGHT / 2.f; float radius = -(OFFSET - vc)/ std::sin(theta_u_top_min); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * pi - ((theta_u_top_min - theta_u_top_max) + (theta_v_min - theta_v_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; + float theoreticalValue = (2.f * M_PI - ((theta_u_top_min - theta_u_top_max) + (theta_v_min - theta_v_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1309,15 +1307,15 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 5.f * pi / 6.f; - float theta_v_min = 2.f * pi / 3.f; + float theta_u_top_min = 5.f * M_PI / 6.f; + float theta_v_min = 2.f * M_PI / 3.f; float theta_u_bottom_min = -theta_u_top_min; float radius = HEIGHT; float vc = OFFSET + radius * std::sin(theta_u_top_min); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * pi - (theta_u_top_min - theta_u_bottom_min)) * radius; + float theoreticalValue = (2.f * M_PI - (theta_u_top_min - theta_u_bottom_min)) * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1346,8 +1344,8 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 4.f * pi / 6.f; - float theta_u_top_max = pi - theta_u_top_min; + float theta_u_top_min = 4.f * M_PI / 6.f; + float theta_u_top_max = M_PI - theta_u_top_min; float theta_v_min = 0; float theta_u_bottom_min = -theta_u_top_min; float theta_u_bottom_max = -theta_u_top_max; @@ -1356,7 +1354,7 @@ int main() float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * pi - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; + float theoreticalValue = (2.f * M_PI - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1385,15 +1383,15 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = pi_2; - float theta_v_min = 3.f * pi_4; + float theta_u_top_min = M_PI_2; + float theta_v_min = 3.f * M_PI_4; float theta_v_max = -theta_v_min; float radius = HEIGHT / 2.f; float vc = OFFSET + radius * std::sin(theta_u_top_min); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * pi - (theta_v_min - theta_v_max)) * radius; + float theoreticalValue = (2.f * M_PI - (theta_v_min - theta_v_max)) * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1422,15 +1420,15 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = pi_2; - float theta_v_min = pi_4; + float theta_u_top_min = M_PI_2; + float theta_v_min = M_PI_4; float theta_v_max = -theta_v_min; float radius = HEIGHT / 2.f; float vc = OFFSET + radius * std::sin(theta_u_top_min); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * pi - (theta_v_min - theta_v_max)) * radius; + float theoreticalValue = (2.f * M_PI - (theta_v_min - theta_v_max)) * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1459,15 +1457,15 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = pi; - float theta_u_bot_min = -2.f * pi / 3.f; - float theta_v_max = -5.f * pi / 6.f; + float theta_u_top_min = M_PI; + float theta_u_bot_min = -2.f * M_PI / 3.f; + float theta_v_max = -5.f * M_PI / 6.f; float radius = HEIGHT / (std::sin(theta_u_top_min) - std::sin(theta_u_bot_min)); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_max); float vc = OFFSET + radius * std::sin(theta_u_top_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * pi - (theta_u_top_min - theta_v_max)) * radius; + float theoreticalValue = (2.f * M_PI - (theta_u_top_min - theta_v_max)) * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1496,9 +1494,9 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 2.f * pi / 3.f; - float theta_u_bot_min = pi; - float theta_v_min = 5.f * pi / 6.f; + float theta_u_top_min = 2.f * M_PI / 3.f; + float theta_u_bot_min = M_PI; + float theta_v_min = 5.f * M_PI / 6.f; float radius = HEIGHT / (std::sin(theta_u_top_min) - std::sin(theta_u_bot_min)); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1535,12 +1533,12 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_v_left_min = 7.f * pi / 8.f; + float theta_v_left_min = 7.f * M_PI / 8.f; float theta_v_left_max = -theta_v_left_min; - float theta_v_right_min = pi / 8.f; + float theta_v_right_min = M_PI / 8.f; float theta_v_right_max = -theta_v_right_min; - float theta_u_top_min = 5.f * pi / 8.f; - float theta_u_top_max = pi - theta_u_top_min; + float theta_u_top_min = 5.f * M_PI / 8.f; + float theta_u_top_max = M_PI - theta_u_top_min; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET + WIDTH_SWITCHED - radius * std::cos(theta_v_right_min); float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1576,12 +1574,12 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_top_min = -2.f * pi / 3.f; + float theta_u_top_min = -2.f * M_PI / 3.f; float uc = OFFSET + WIDTH_SWITCHED/2.f; float vc = OFFSET + RADIUS * std::sin(theta_u_top_min); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(switchedRoI); - float theoreticalValue = (pi/3.f) * RADIUS; + float theoreticalValue = (M_PI/3.f) * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1612,9 +1610,9 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_v_left_max = -5.f * pi / 8.f; - float theta_v_right_max = -3.f *pi / 8.f; - float theta_u_top_min = -7.f * pi / 8.f; + float theta_v_left_max = -5.f * M_PI / 8.f; + float theta_v_right_max = -3.f *M_PI / 8.f; + float theta_u_top_min = -7.f * M_PI / 8.f; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_max) - std::cos(theta_v_left_max)); float uc = OFFSET - radius * std::cos(theta_v_left_max); float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1651,9 +1649,9 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_top_max = -pi / 3.f; + float theta_u_top_max = -M_PI / 3.f; float theta_v_right_max = 0.f; - float theta_v_left_max = -pi_2; + float theta_v_left_max = -M_PI_2; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_max) - std::cos(theta_v_left_max)); float uc = OFFSET; float vc = OFFSET + radius * std::sin(theta_u_top_max); @@ -1690,9 +1688,9 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_top_min = -2.f * pi / 3.f; - float theta_v_left_max = pi; - float theta_v_right_max = -pi_2; + float theta_u_top_min = -2.f * M_PI / 3.f; + float theta_v_left_max = M_PI; + float theta_v_right_max = -M_PI_2; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_max) - std::cos(theta_v_left_max)); float uc = OFFSET + WIDTH_SWITCHED; float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1729,12 +1727,12 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_v_left_min = 7.f * pi / 8.f; + float theta_v_left_min = 7.f * M_PI / 8.f; float theta_v_left_max = -theta_v_left_min; - float theta_v_right_min = pi / 8.f; + float theta_v_right_min = M_PI / 8.f; float theta_v_right_max = -theta_v_right_min; - float theta_u_bot_min = -5.f * pi / 8.f; - float theta_u_bot_max = -pi - theta_u_bot_min; + float theta_u_bot_min = -5.f * M_PI / 8.f; + float theta_u_bot_max = -M_PI - theta_u_bot_min; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET + WIDTH_SWITCHED - radius * std::cos(theta_v_right_min); float vc = OFFSET + HEIGHT_SWITCHED + radius * std::sin(theta_u_bot_min); @@ -1770,10 +1768,10 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_bot_min = 2.f * pi / 3.f; - float theta_u_bot_max = pi - theta_u_bot_min; - float theta_v_left_min = 5.f * pi / 6.f; - float theta_v_right_min = pi / 6.f; + float theta_u_bot_min = 2.f * M_PI / 3.f; + float theta_u_bot_max = M_PI - theta_u_bot_min; + float theta_v_left_min = 5.f * M_PI / 6.f; + float theta_v_right_min = M_PI / 6.f; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET + WIDTH_SWITCHED - radius * std::cos(theta_v_right_min); float vc = OFFSET + HEIGHT_SWITCHED + radius * std::sin(theta_u_bot_min); @@ -1809,9 +1807,9 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_bot_min = 7.f * pi / 8.f; - float theta_v_left_min = 5.f * pi / 8.f; - float theta_v_right_min = 3.f * pi / 8.f; + float theta_u_bot_min = 7.f * M_PI / 8.f; + float theta_v_left_min = 5.f * M_PI / 8.f; + float theta_v_right_min = 3.f * M_PI / 8.f; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET + WIDTH_SWITCHED - radius * std::cos(theta_v_right_min); float vc = OFFSET + HEIGHT_SWITCHED + radius * std::sin(theta_u_bot_min); @@ -1847,8 +1845,8 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_bot_max = pi / 3.f; - float theta_v_left_min = pi_2; + float theta_u_bot_max = M_PI / 3.f; + float theta_v_left_min = M_PI_2; float theta_v_right_min = 0.f; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET; @@ -1885,9 +1883,9 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_bot_min = 2.f * pi / 3.f; - float theta_v_right_min = pi_2; - float theta_v_left_min = pi; + float theta_u_bot_min = 2.f * M_PI / 3.f; + float theta_v_right_min = M_PI_2; + float theta_v_left_min = M_PI; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET + WIDTH_SWITCHED; float vc = OFFSET + HEIGHT_SWITCHED + radius * std::sin(theta_u_bot_min); @@ -1919,17 +1917,17 @@ int main() // (6): u_cross_bot_max = uc + r cos(theta_u_bottom_max) <= umin_roi + width ; vmin_roi + height = vc - r sin(theta_u_bottom_max) // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 2.f * pi / 3.f; - float theta_u_top_max = pi / 3.f; - float theta_u_bottom_min = -2.f * pi / 3.f; - float theta_u_bottom_max = -pi / 3.f; + float theta_u_top_min = 2.f * M_PI / 3.f; + float theta_u_top_max = M_PI / 3.f; + float theta_u_bottom_min = -2.f * M_PI / 3.f; + float theta_u_bottom_max = -M_PI / 3.f; float uc = OFFSET + WIDTH / 2.f; float vc = OFFSET + HEIGHT / 2.f; float radius = -(OFFSET - vc)/ std::sin(theta_u_top_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * pi - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; + float theoreticalValue = (2.f * M_PI - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1955,9 +1953,9 @@ int main() // (6): u_min + width = uc + r cos(theta_v_right_max); v_cross_right_max = vc - r sin(theta_v_right_max) // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_v_left_min = 5.f * pi / 6.f; + float theta_v_left_min = 5.f * M_PI / 6.f; float theta_v_left_max = -theta_v_left_min; - float theta_v_right_min = pi / 6.f; + float theta_v_right_min = M_PI / 6.f; float theta_v_right_max = -theta_v_right_min; float uc = OFFSET + HEIGHT / 2.f; float vc = OFFSET + WIDTH / 2.f; @@ -1987,14 +1985,14 @@ int main() // Choosing theta_v_left_min = 7 PI / 8 and circle at the center of the RoI // umin = uc + r cos(theta_v_left_min) => r = (umin - uc) / cos(theta_v_left_min) vpRect squareRoI(OFFSET, OFFSET, HEIGHT, HEIGHT); - float theta_v_left_min = 7.f * pi / 8.f; + float theta_v_left_min = 7.f * M_PI / 8.f; float uc = OFFSET + HEIGHT / 2.f; float vc = OFFSET + HEIGHT / 2.f; float radius = (OFFSET - uc) / std::cos(theta_v_left_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(squareRoI); - float theoreticalValue = pi * radius; + float theoreticalValue = M_PI * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { From 7f239f9c0a18c2cb2ee8ced41b7fb54a880d95f3 Mon Sep 17 00:00:00 2001 From: Souriya Trinh Date: Fri, 27 Oct 2023 00:06:52 +0200 Subject: [PATCH 24/43] Fix variables assignment. --- .../mbt/test/generic-with-dataset/testGenericTracker.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/tracker/mbt/test/generic-with-dataset/testGenericTracker.cpp b/modules/tracker/mbt/test/generic-with-dataset/testGenericTracker.cpp index 9c0677ef5e..709128e746 100644 --- a/modules/tracker/mbt/test/generic-with-dataset/testGenericTracker.cpp +++ b/modules/tracker/mbt/test/generic-with-dataset/testGenericTracker.cpp @@ -497,9 +497,9 @@ bool run(const std::string &input_directory, bool opt_click_allowed, bool opt_di std::map > > mapOfModels; std::map mapOfW; mapOfW["Camera1"] = I.getWidth(); - mapOfW["Camera2"] = I.getHeight(); + mapOfW["Camera2"] = I_depth.getWidth(); std::map mapOfH; - mapOfH["Camera1"] = I_depth.getWidth(); + mapOfH["Camera1"] = I.getHeight(); mapOfH["Camera2"] = I_depth.getHeight(); std::map mapOfcMos; mapOfcMos["Camera1"] = cMo; From 2e3a5ab906146983f6d2c4a7b83961a629b996ca Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Fri, 27 Oct 2023 12:19:46 +0200 Subject: [PATCH 25/43] Fix testImageCircle.cpp since introduction of (float)M_PI using M_PIf macro - The arc angle corresponding to the part of the circle that is inside a roi could be negative - Fix consists in adding 4*pi when negative --- .../core/include/visp3/core/vpImageCircle.h | 25 +- modules/core/include/visp3/core/vpMath.h | 60 ++- modules/core/src/image/vpImageCircle.cpp | 117 ++--- .../test/tools/geometry/testImageCircle.cpp | 439 +++++++++--------- 4 files changed, 324 insertions(+), 317 deletions(-) diff --git a/modules/core/include/visp3/core/vpImageCircle.h b/modules/core/include/visp3/core/vpImageCircle.h index 33343e1f75..17fefa8b03 100644 --- a/modules/core/include/visp3/core/vpImageCircle.h +++ b/modules/core/include/visp3/core/vpImageCircle.h @@ -32,9 +32,9 @@ */ /*! - \file vpImageCircle.h - \brief Image circle, i.e. circle in the image space. -*/ + * \file vpImageCircle.h + * \brief Image circle, i.e. circle in the image space. + */ #ifndef _vpImageCircle_h_ #define _vpImageCircle_h_ @@ -50,8 +50,8 @@ #endif /** - * \brief Class that defines a 2D circle in an image. - */ + * \brief Class that defines a 2D circle in an image. + */ class VISP_EXPORT vpImageCircle { public: @@ -72,28 +72,25 @@ class VISP_EXPORT vpImageCircle vpImageCircle(const cv::Vec3f &vec); #endif - /*! - * Default destructor. - */ - virtual ~vpImageCircle(); - /*! * Compute the angular coverage, in terms of radians, that is contained in the Region of Interest (RoI). - * \sa \ref vpImageCircle::computeArcLengthInRoI() "vpImageCircle::computeArcLengthInRoI(const vpRect &roi)" + * \sa computeArcLengthInRoI(), computeArcLengthInRoI(const vpRect &roi) * \param[in] roi The rectangular RoI in which we want to know the number of pixels of the circle that are contained. - * \return Returns 2.f * M_PI for a circle that is fully visible in the RoI, or the sum of the angles of the arc(s) that is(are) visible in the RoI. + * \return Returns angular coverage of a circle in a ROI as an angle value in radians. + * More precisely, it returns 2.f * M_PI for a circle that is fully visible in the RoI, or the sum of the angles + * of the arc(s) that is(are) visible in the RoI. */ float computeAngularCoverageInRoI(const vpRect &roi) const; /*! * Compute the arc length, in terms of number of pixels, that is contained in the Region of Interest (RoI). - * \sa \ref vpImageCircle::computeAngularCoverageInRoI() "vpImageCircle::computeAngularCoverageInRoI(const vpRect &roi)" + * \sa computeAngularCoverageInRoI(), computeAngularCoverageInRoI(const vpRect &roi) * \param[in] roi The rectangular RoI in which we want to know the number of pixels of the circle that are contained. * \return The number of pixels of the circle that are contained in the RoI. */ float computeArcLengthInRoI(const vpRect &roi) const; - /*! + /*! * Get the center of the image (2D) circle * \return The center of the image (2D) circle. */ diff --git a/modules/core/include/visp3/core/vpMath.h b/modules/core/include/visp3/core/vpMath.h index 237b2decfb..2b56d70061 100644 --- a/modules/core/include/visp3/core/vpMath.h +++ b/modules/core/include/visp3/core/vpMath.h @@ -77,6 +77,19 @@ #endif +#endif + +#ifndef M_PIf +#define M_PIf 3.14159265358979323846f +#endif + +#ifndef M_PI_2f +#define M_PI_2f (M_PIf / 2.0f) +#endif + +#ifndef M_PI_4f +#define M_PI_4f (M_PIf / 4.0f) + #include #include @@ -88,12 +101,11 @@ class vpRxyzVector; class vpTranslationVector; /*! - \class vpMath - \ingroup group_core_math_tools - \brief Provides simple mathematics computation tools that are not - available in the C mathematics library (math.h) - -*/ + * \class vpMath + * \ingroup group_core_math_tools + * \brief Provides simple mathematics computation tools that are not + * available in the C mathematics library (math.h) + */ class VISP_EXPORT vpMath { public: @@ -117,6 +129,42 @@ class VISP_EXPORT vpMath static vpColVector rad(const vpColVector &r); + /*! + * Convert angle between \f$-\pi\f$ and \f$\pi\f$. + * + * \param[in] theta The input angle we want to ensure it is in the interval \f$[-\pi ; \pi]\f$. + * \return The corresponding angle in the interval \f$[-\pi ; \pi]\f$. + */ + static float getAngleBetweenMinPiAndPi(const float &theta) + { + float theta1 = theta; + if (theta1 > M_PIf) { + theta1 -= 2.0f * M_PIf; + } + else if (theta1 < -M_PIf) { + theta1 += 2.0f * M_PIf; + } + return theta1; + } + + /*! + * Convert angle between \f$-\pi\f$ and \f$\pi\f$. + * + * \param[in] theta The input angle we want to ensure it is in the interval \f$[-\pi ; \pi]\f$. + * \return The corresponding angle in the interval \f$[-\pi ; \pi]\f$. + */ + static double getAngleBetweenMinPiAndPi(const double &theta) + { + double theta1 = theta; + if (theta1 > M_PI) { + theta1 -= 2.0 * M_PI; + } + else if (theta1 < -M_PI) { + theta1 += 2.0 * M_PI; + } + return theta1; + } + /*! Compute x square value. \return Square value \f$ x^2 \f$. diff --git a/modules/core/src/image/vpImageCircle.cpp b/modules/core/src/image/vpImageCircle.cpp index b0508f9b37..f6b30c3bac 100644 --- a/modules/core/src/image/vpImageCircle.cpp +++ b/modules/core/src/image/vpImageCircle.cpp @@ -32,6 +32,7 @@ */ #include +#include vpImageCircle::vpImageCircle() : m_center() @@ -51,31 +52,10 @@ vpImageCircle::vpImageCircle(const vpImagePoint ¢er, const float &radius) vpImageCircle::vpImageCircle(const cv::Vec3f &vec) : m_center(vec[1], vec[0]) , m_radius(vec[2]) -{ } -#endif - -vpImageCircle::~vpImageCircle() -{ } - -/*! - * \brief Express \b theta between \f$-\pi\f$ and \f$\pi\f$. - * - * \param[in] theta The input angle we want to ensure it is in the interval \f$[-\pi ; \pi]\f$. - * \return The input angle in the interval \f$[-\pi ; \pi]\f$. - */ -float getAngleBetweenMinPiAndPi(const float &theta) { - float theta1 = theta; - float pi = static_cast(M_PI); - if (theta1 > pi) { - theta1 -= 2.0f * pi; - } - else if (theta1 < -pi) { - theta1 += 2.0f * pi; - } - return theta1; } +#endif /*! * \brief Compute the length of the angular interval of the circle when it intersects @@ -92,7 +72,7 @@ void computeIntersectionsLeftBorderOnly(const float &u_c, const float &umin_roi, // umin_roi = u_c + r cos(theta) // theta = acos((umin_roi - u_c) / r) float theta1 = std::acos((umin_roi - u_c)/ radius); - theta1 = getAngleBetweenMinPiAndPi(theta1); + theta1 = vpMath::getAngleBetweenMinPiAndPi(theta1); float theta2 = -1.f * theta1; float theta_min = std::min(theta1, theta2); float theta_max = std::max(theta1, theta2); @@ -114,12 +94,11 @@ void computeIntersectionsRightBorderOnly(const float &u_c, const float &umax_roi // u = u_c + r cos(theta) // theta = acos((u - u_c) / r) float theta1 = std::acos((umax_roi - u_c) / radius); - theta1 = getAngleBetweenMinPiAndPi(theta1); + theta1 = vpMath::getAngleBetweenMinPiAndPi(theta1); float theta2 = -1.f * theta1; float theta_min = std::min(theta1, theta2); float theta_max = std::max(theta1, theta2); - float pi = static_cast(M_PI); - delta_theta = 2.f * pi - (theta_max - theta_min); + delta_theta = 2.f * M_PIf - (theta_max - theta_min); } /*! @@ -137,25 +116,24 @@ void computeIntersectionsTopBorderOnly(const float &v_c, const float &vmin_roi, // v = vc - r sin(theta) because the v-axis goes down // theta = asin((vc - v)/r) float theta1 = std::asin((v_c - vmin_roi) / radius); - float pi = static_cast(M_PI); - theta1 = getAngleBetweenMinPiAndPi(theta1); + theta1 = vpMath::getAngleBetweenMinPiAndPi(theta1); float theta2 = 0.f; if (theta1 >= 0.f) { - theta2 = pi - theta1; + theta2 = M_PIf - theta1; } else { - theta2 = -theta1 - pi; + theta2 = -theta1 - M_PIf; } float theta_min = std::min(theta1, theta2); float theta_max = std::max(theta1, theta2); if (std::abs(theta_max - theta_min) * radius < 1.f) { // Between the maximum and minimum theta there is less than 1 pixel of difference // It meens that the full circle is visible - delta_theta = 2.f * pi; + delta_theta = 2.f * M_PIf; } else if (theta1 > 0.f) { - delta_theta = 2.f * pi - (theta_max - theta_min); + delta_theta = 2.f * M_PIf - (theta_max - theta_min); } else { delta_theta = theta_max - theta_min; @@ -177,28 +155,27 @@ void computeIntersectionsBottomBorderOnly(const float &v_c, const float &vmax_ro // v = vc - r sin(theta) because the v-axis goes down // theta = asin((vc - v)/r) float theta1 = std::asin((v_c - vmax_roi) / radius); - float pi = static_cast(M_PI); - theta1 = getAngleBetweenMinPiAndPi(theta1); + theta1 = vpMath::getAngleBetweenMinPiAndPi(theta1); float theta2 = 0.f; if (theta1 >= 0.f) { - theta2 = pi - theta1; + theta2 = M_PIf - theta1; } else { - theta2 = -theta1 - pi; + theta2 = -theta1 - M_PIf; } float theta_min = std::min(theta1, theta2); float theta_max = std::max(theta1, theta2); if (std::abs(theta_max - theta_min) * radius < 1.f) { // Between the maximum and minimum theta there is less than 1 pixel of difference - // It means that the full circle is visible - delta_theta = 2.f * pi; + // It meens that the full circle is visible + delta_theta = 2.f * M_PIf; } else if (theta1 > 0.f) { delta_theta = theta_max - theta_min; } else { - delta_theta = 2.f * pi - (theta_max - theta_min); + delta_theta = 2.f * M_PIf - (theta_max - theta_min); } } @@ -225,14 +202,13 @@ void computePerpendicularAxesIntersections(const float &u_c, const float &v_c, c // v = vc - r sin(theta) because the v-axis goes down // theta = asin((vc - v)/r) float theta_u_cross = std::asin((v_c - crossing_u)/radius); - float pi = static_cast(M_PI); - theta_u_cross = getAngleBetweenMinPiAndPi(theta_u_cross); + theta_u_cross = vpMath::getAngleBetweenMinPiAndPi(theta_u_cross); float theta_u_cross_2 = 0.f; if (theta_u_cross > 0) { - theta_u_cross_2 = pi - theta_u_cross; + theta_u_cross_2 = M_PIf - theta_u_cross; } else { - theta_u_cross_2 = -pi - theta_u_cross; + theta_u_cross_2 = -M_PIf - theta_u_cross; } // Computing the corresponding u-coordinates at which the u-axis is crossed float u_ucross = u_c + radius * std::cos(theta_u_cross); @@ -255,7 +231,7 @@ void computePerpendicularAxesIntersections(const float &u_c, const float &v_c, c // u = u_c + r cos(theta) // theta = acos((u - u_c) / r) float theta_v_cross = std::acos((crossing_v - u_c)/radius); - theta_v_cross = getAngleBetweenMinPiAndPi(theta_v_cross); + theta_v_cross = vpMath::getAngleBetweenMinPiAndPi(theta_v_cross); float theta_v_cross_2 = -theta_v_cross; // Computing the corresponding v-coordinates at which the v-axis is crossed // v = v_c - radius sin(theta) because the v-axis is oriented towards the bottom @@ -352,20 +328,19 @@ void computeIntersectionsTopRight(const float &u_c, const float &v_c, const floa float u_umax = crossing_theta_u_max.second; float v_vmin = crossing_theta_v_min.second; float v_vmax = crossing_theta_v_max.second; - float pi = static_cast(M_PI); if (u_umin <= umax_roi && v_vmin < vmin_roi && u_umax >= umax_roi && v_vmax >= vmin_roi) { // The circle crosses only once each axis and the center is below the top border //Case crossing once delta_theta = theta_v_max - theta_u_min; if (delta_theta < 0) { // The arc cannot be negative - delta_theta += 2.f * pi; + delta_theta += 2.f * M_PIf; } } else if (u_umin <= umax_roi && v_vmin >= vmin_roi && u_umax <= umax_roi && v_vmax >= vmin_roi) { // The circle crosses twice each axis //Case crossing twice - delta_theta = 2.f * pi - ((theta_u_min - theta_u_max)+(theta_v_min - theta_v_max)); + delta_theta = 2 * M_PIf - ((theta_u_min - theta_u_max)+(theta_v_min - theta_v_max)); } else if (u_umin >= umax_roi && v_vmin >= vmin_roi && u_umax >= umax_roi && v_vmax >= vmin_roi) { // The circle crosses the u-axis outside the roi @@ -459,20 +434,19 @@ void computeIntersectionsBottomRight(const float &u_c, const float &v_c, const f float u_umax = crossing_theta_u_max.second; float v_vmin = crossing_theta_v_min.second; float v_vmax = crossing_theta_v_max.second; - float pi = static_cast(M_PI); if (u_umin <= umax_roi && u_umax > umax_roi && v_vmin <= vmax_roi && v_vmax > vmax_roi) { // The circle crosses only once each axis //Case crossing once delta_theta = theta_u_min - theta_v_min; if (delta_theta < 0) { // An arc length cannot be negative it means that theta_u_max was comprise in the bottom left quadrant of the circle - delta_theta += 2.f * pi; + delta_theta += 2.f * M_PIf; } } else if (u_umin <= umax_roi && u_umax <= umax_roi && v_vmin <= vmax_roi && v_vmax <= vmax_roi) { // The circle crosses twice each axis //Case crossing twice - delta_theta = 2.f * pi - ((theta_v_min - theta_v_max) + (theta_u_max - theta_u_min)); + delta_theta = 2.f * M_PIf - ((theta_v_min - theta_v_max) + (theta_u_max - theta_u_min)); } else if (u_umin > umax_roi && u_umax > umax_roi && v_vmin <= vmax_roi && v_vmax <= vmax_roi) { // The circle crosses the u-axis outside the roi @@ -528,23 +502,23 @@ void computeIntersectionsTopLeftBottom(const float &u_c, const float &v_c, const float u_umin_bottom = crossing_theta_u_min.second; float u_umax_bottom = crossing_theta_u_max.second; if (u_umin_top >= umin_roi && u_umin_bottom >= umin_roi && v_vmin >= vmin_roi && v_vmax <= vmax_roi) { - // case intersection top + left + bottom twice + // case intersection top + left + bottom twice delta_theta = (theta_v_min - theta_u_min_top) + (theta_u_max_top - theta_u_max_bottom) + (theta_u_min_bottom - theta_v_max); } else if (u_umin_top <= umin_roi && v_vmin <= vmin_roi && u_umin_bottom <= umin_roi && v_vmax >= vmax_roi) { - // case intersection top and bottom + // case intersection top and bottom delta_theta = (theta_u_max_top - theta_u_max_bottom); } else if (u_umax_top <= umin_roi && u_umax_bottom <= umin_roi && v_vmin >= vmin_roi && v_vmax <= vmax_roi) { - // case left only + // case left only computeIntersectionsLeftBorderOnly(u_c, umin_roi, radius, delta_theta); } else if (u_umax_bottom > umin_roi && v_vmin >= vmin_roi) { - // case bottom/left corner + // case bottom/left corner computeIntersectionsBottomLeft(u_c, v_c, umin_roi, vmax_roi, radius, delta_theta); } else if (u_umax_top > umin_roi && v_vmax <= vmax_roi) { - // case top/left corner + // case top/left corner computeIntersectionsTopLeft(u_c, v_c, umin_roi, vmin_roi, radius, delta_theta); } } @@ -588,10 +562,9 @@ void computeIntersectionsTopRightBottom(const float &u_c, const float &v_c, cons float theta_u_max_bottom = crossing_theta_u_max.first; float u_umin_bottom = crossing_theta_u_min.second; float u_umax_bottom = crossing_theta_u_max.second; - float pi = static_cast(M_PI); if (u_umax_top <= umax_roi && u_umax_bottom <= umax_roi && v_vmin >= vmin_roi && v_vmax <= vmax_roi) { // case intersection top + right + bottom twice - delta_theta = 2.f * pi - ((theta_u_min_top - theta_u_max_top) + (theta_v_min - theta_v_max) + (theta_u_max_bottom - theta_u_min_bottom)); + delta_theta = 2.f * M_PIf - ((theta_u_min_top - theta_u_max_top) + (theta_v_min - theta_v_max) + (theta_u_max_bottom - theta_u_min_bottom)); } else if (u_umin_top <= umax_roi && u_umax_top > umax_roi && v_vmin <= vmin_roi && u_umin_bottom <= umax_roi && u_umax_bottom > umax_roi && v_vmax >= vmax_roi) { // case intersection top and bottom @@ -629,14 +602,13 @@ void computeIntersectionsTopBottomOnly(const float &u_c, const float &v_c, const // v = vc - r sin(theta) because the v-axis goes down // theta = asin((vc - vmin_roi)/r) float theta_u_cross_top = std::asin((v_c - vmin_roi)/radius); - theta_u_cross_top = getAngleBetweenMinPiAndPi(theta_u_cross_top); + theta_u_cross_top = vpMath::getAngleBetweenMinPiAndPi(theta_u_cross_top); float theta_u_cross_top_2 = 0.f; - float pi = static_cast(M_PI); if (theta_u_cross_top > 0) { - theta_u_cross_top_2 = pi - theta_u_cross_top; + theta_u_cross_top_2 = M_PIf - theta_u_cross_top; } else { - theta_u_cross_top_2 = -pi - theta_u_cross_top; + theta_u_cross_top_2 = -M_PIf - theta_u_cross_top; } // Computing the corresponding u-coordinates at which the u-axis is crossed @@ -657,13 +629,13 @@ void computeIntersectionsTopBottomOnly(const float &u_c, const float &v_c, const // v = vc - r sin(theta) because the v-axis goes down // theta = asin((vc - vmax_roi)/r) float theta_u_cross_bottom = std::asin((v_c - vmax_roi)/radius); - theta_u_cross_bottom = getAngleBetweenMinPiAndPi(theta_u_cross_bottom); + theta_u_cross_bottom = vpMath::getAngleBetweenMinPiAndPi(theta_u_cross_bottom); float theta_u_cross_bottom_2 = 0.f; if (theta_u_cross_bottom > 0) { - theta_u_cross_bottom_2 = pi - theta_u_cross_bottom; + theta_u_cross_bottom_2 = M_PIf - theta_u_cross_bottom; } else { - theta_u_cross_bottom_2 = -pi - theta_u_cross_bottom; + theta_u_cross_bottom_2 = -M_PIf - theta_u_cross_bottom; } // Computing the corresponding u-coordinates at which the u-axis is crossed @@ -683,7 +655,7 @@ void computeIntersectionsTopBottomOnly(const float &u_c, const float &v_c, const // Computing the the length of the angular interval of the circle when it intersects // only with the top and bottom borders of the Region of Interest (RoI) - delta_theta = 2.f * pi - ((theta_u_cross_top_min - theta_u_cross_top_max) + (theta_u_cross_bottom_max - theta_u_cross_bottom_min)); + delta_theta = 2.f * M_PIf - ((theta_u_cross_top_min - theta_u_cross_top_max) + (theta_u_cross_bottom_max - theta_u_cross_bottom_min)); } /*! @@ -833,7 +805,7 @@ void computeIntersectionsLeftRightOnly(const float &u_c, const float &v_c, const // theta = acos((umin_roi - u_c)/r) // theta_min = -theta_max float theta_v_cross_left = std::acos((umin_roi - u_c)/radius); - theta_v_cross_left = getAngleBetweenMinPiAndPi(theta_v_cross_left); + theta_v_cross_left = vpMath::getAngleBetweenMinPiAndPi(theta_v_cross_left); float theta_v_cross_left_2 = -theta_v_cross_left; // Computing the corresponding v-coordinates at which the v-axis is crossed @@ -855,7 +827,7 @@ void computeIntersectionsLeftRightOnly(const float &u_c, const float &v_c, const // theta = acos((umin_roi - u_c)/r) // theta_min = -theta_max float theta_v_cross_right = std::acos((umax_roi - u_c)/radius); - theta_v_cross_right = getAngleBetweenMinPiAndPi(theta_v_cross_right); + theta_v_cross_right = vpMath::getAngleBetweenMinPiAndPi(theta_v_cross_right); float theta_v_cross_right_2 = -theta_v_cross_right; // Computing the corresponding v-coordinates at which the v-axis is crossed @@ -942,12 +914,11 @@ float vpImageCircle::computeAngularCoverageInRoI(const vpRect &roi) const bool touchBottomBorder = (v_c + radius) >= vmax_roi; bool isHorizontallyOK = (!touchLeftBorder && !touchRightBorder); bool isVerticallyOK = (!touchTopBorder && !touchBottomBorder); - float pi = static_cast(M_PI); if (isHorizontallyOK && isVerticallyOK && roi.isInside(m_center)) { // Easy case // The circle has its center in the image and its radius is not too great // to make it fully contained in the RoI - delta_theta = 2.f * pi; + delta_theta = 2.f * M_PIf; } else if (touchBottomBorder && !touchLeftBorder && !touchRightBorder && !touchTopBorder) { // Touches/intersects only the bottom border of the RoI @@ -982,6 +953,7 @@ float vpImageCircle::computeAngularCoverageInRoI(const vpRect &roi) const computeIntersectionsTopRight(u_c, v_c, vmin_roi, umax_roi, radius, delta_theta); } else if (touchBottomBorder && touchTopBorder && touchLeftBorder && !touchRightBorder) { + std::cout << "DEBUG ici" << std::endl; // Touches/intersects the top, left and bottom borders of the RoI computeIntersectionsTopLeftBottom(u_c, v_c, umin_roi, vmin_roi, vmax_roi, radius, delta_theta); } @@ -1023,8 +995,11 @@ float vpImageCircle::computeAngularCoverageInRoI(const vpRect &roi) const float vpImageCircle::computeArcLengthInRoI(const vpRect &roi) const { float delta_theta = computeAngularCoverageInRoI(roi); - float arcLength = delta_theta * m_radius; - return arcLength; + if (delta_theta < 0) { // Needed since M_PIf is used + delta_theta += 4 * M_PIf; + } + + return delta_theta * m_radius; } vpImagePoint vpImageCircle::getCenter() const diff --git a/modules/core/test/tools/geometry/testImageCircle.cpp b/modules/core/test/tools/geometry/testImageCircle.cpp index b029a3a212..bc4cd432b1 100644 --- a/modules/core/test/tools/geometry/testImageCircle.cpp +++ b/modules/core/test/tools/geometry/testImageCircle.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -29,33 +28,21 @@ * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * Description: - * Test vpRect. - * -*****************************************************************************/ + * Test vpRect and vpImagePoint. + */ #include #include #include +#include #include -bool compareAngles(const float &actualVal, const float &theoreticalVal) +bool equal(const float &actualVal, const float &theoreticalVal) { // Allow up to 1 pixel of difference, due to rounding effects return (std::abs(theoreticalVal - actualVal) < 1.f); } -float ensureIsBetweenMinPiAndPi(const float &theta) -{ - float theta1 = theta; - if (theta1 > M_PI) { - theta1 -= 2.0 * M_PI; - } - else if (theta1 < -M_PI) { - theta1 += 2.0 * M_PI; - } - return theta1; -} - int main() { const float OFFSET = 5.f; @@ -72,8 +59,8 @@ int main() { vpImageCircle circle(vpImagePoint(HEIGHT / 2.f, WIDTH / 2.f), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * M_PI * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = 2.f * M_PIf * RADIUS; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -94,8 +81,8 @@ int main() vpRect roiSquare(OFFSET, OFFSET, HEIGHT, HEIGHT); vpImageCircle circle(vpImagePoint(OFFSET + HEIGHT / 2.f, OFFSET + HEIGHT / 2.f), HEIGHT / 2.f); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * M_PI * HEIGHT / 2.f; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = 2.f * M_PIf * HEIGHT / 2.f; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -119,8 +106,8 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 4.f * M_PI * RADIUS /3.f; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = 4.f * M_PIf * RADIUS /3.f; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -144,8 +131,8 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * M_PI * RADIUS /3.f; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = 2.f * M_PIf * RADIUS /3.f; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -169,8 +156,8 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * M_PI * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = 2.f * M_PIf * RADIUS; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -194,8 +181,8 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 4.f * M_PI * RADIUS /3.f; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = 4.f * M_PIf * RADIUS /3.f; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -219,8 +206,8 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * M_PI * RADIUS /3.f; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = 2.f * M_PIf * RADIUS /3.f; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -244,8 +231,8 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * M_PI * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = 2.f * M_PIf * RADIUS; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -265,13 +252,13 @@ int main() { // v = vc - r sin(theta) // Formula: vc = OFFSET + RADIUS * sin(theta) - float theta = M_PI / 3.f; + float theta = M_PIf / 3.f; float uc = OFFSET + 100.f; float vc = OFFSET + RADIUS * sin(theta); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 5.f * M_PI * RADIUS /3.f; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = 5.f * M_PIf * RADIUS /3.f; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -291,13 +278,13 @@ int main() { // v = vc - r sin(theta) // Formula: vc = OFFSET + RADIUS * sin(theta) - float theta = -2.f * M_PI/3.f; + float theta = -2.f * M_PIf/3.f; float uc = OFFSET + 100.f; float vc = OFFSET + RADIUS * std::sin(theta); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = M_PI * RADIUS /3.f; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = M_PIf * RADIUS /3.f; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -317,13 +304,13 @@ int main() { // v = vc - r sin(theta) // Formula: vc = OFFSET + RADIUS * sin(theta) - float theta = M_PI_2; + float theta = M_PI_2f; float uc = OFFSET + 100.f; float vc = OFFSET + RADIUS * sin(theta); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * M_PI * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = 2.f * M_PIf * RADIUS; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -343,13 +330,13 @@ int main() { // v = vc - r sin(theta) // Formula: vc = OFFSET + HEIGHT + RADIUS * sin(theta) - float theta = -M_PI / 3.f; + float theta = -M_PIf / 3.f; float uc = OFFSET + 100.f; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 5.f * M_PI * RADIUS /3.f; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = 5.f * M_PIf * RADIUS /3.f; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -369,13 +356,13 @@ int main() { // v = vc - r sin(theta) // Formula: vc = OFFSET + HEIGHT + RADIUS * sin(theta) - float theta = M_PI / 3.f; + float theta = M_PIf / 3.f; float uc = OFFSET + 100.f; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = M_PI * RADIUS /3.f; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = M_PIf * RADIUS /3.f; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -398,8 +385,8 @@ int main() float vc = OFFSET + HEIGHT - RADIUS; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * M_PI * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = 2.f * M_PIf * RADIUS; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -425,8 +412,8 @@ int main() float vc = OFFSET; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = M_PI_2 * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = M_PI_2f * RADIUS; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -451,14 +438,14 @@ int main() // (4): umin = uc + r cos(theta_v_max) ; v_cross_max = vc - r sin(theta_v_max) >= vmin && <= vmin + height // (3) & (4) => uc = umin - r cos(theta_v_min) = umin - r cos(theta_v_max) <=> theta_v_min = - theta_v_max // (3) & (4) => vc >= vmin + r sin(theta_v_min) && vc >= vmin + r sin (theta_v_max) - float theta_v_min = M_PI / 4.f; + float theta_v_min = M_PIf / 4.f; float uc = OFFSET - RADIUS * std::cos(theta_v_min); float vc = OFFSET + RADIUS * std::sin(theta_v_min) + 1.f; vc = std::max(vc, OFFSET + RADIUS * std::sin(-theta_v_min) + 1.f); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = M_PI_2 * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = M_PI_2f * RADIUS; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -485,14 +472,14 @@ int main() // (1) => uc + r cos(theta_u_top_min) >= umin <=> uc >= umin - r cos(theta_u_top_min) // (2) => uc + r cos(theta_u_top_max) >= umin <=> uc >= umin - r cos(theta_u_top_max) - float theta_u_top_min = -1.1f * M_PI_2; + float theta_u_top_min = -1.1f * M_PI_2f; float uc = OFFSET - RADIUS * std::cos(theta_u_top_min) + 1.f; - uc = std::max(uc, OFFSET - RADIUS * std::cos((float)M_PI - theta_u_top_min) + 1.f); + uc = std::max(uc, OFFSET - RADIUS * std::cos(M_PIf - theta_u_top_min) + 1.f); float vc = OFFSET + RADIUS * std::sin(theta_u_top_min); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 0.2f * M_PI_2 * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = 0.2f * M_PI_2f * RADIUS; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -521,13 +508,13 @@ int main() // (3) & (4) =>{ uc = umin - r cos(theta_v_min) & { uc = umin - r cos(- theta_v_min) // (3) & (4) { vc >= vmin - r sin(theta_v_min) & { vc >= vmin - r cos(- theta_v_min) - float theta_u_top_min = 5.f * M_PI / 8.f; - float theta_u_top_max = M_PI - theta_u_top_min; + float theta_u_top_min = 5.f * M_PIf / 8.f; + float theta_u_top_max = M_PIf - theta_u_top_min; float uc = OFFSET - RADIUS * std::cos(theta_u_top_min) + 1.f; - uc = std::max(uc, OFFSET - RADIUS * std::cos((float)M_PI - theta_u_top_min) + 1.f); + uc = std::max(uc, OFFSET - RADIUS * std::cos(M_PIf - theta_u_top_min) + 1.f); float vc = OFFSET + RADIUS * std::sin(theta_u_top_min); float theta_v_min = std::acos((OFFSET - uc)/RADIUS); - theta_v_min = ensureIsBetweenMinPiAndPi(theta_v_min); + theta_v_min = vpMath::getAngleBetweenMinPiAndPi(theta_v_min); float theta_v_max = -theta_v_min; if (theta_v_max < 0) { float temp = theta_v_max; @@ -537,7 +524,7 @@ int main() vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); float theoreticalValue = ((theta_v_max - theta_u_top_min) + (theta_u_top_max - theta_v_min)) * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -564,14 +551,14 @@ int main() // (1) => vc = vmin + r sin(theta_u_top_min) // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_top_min = 2.f * M_PI / 3.f; - float theta_v_max = -M_PI_2; + float theta_u_top_min = 2.f * M_PIf / 3.f; + float theta_v_max = -M_PI_2f; float uc = OFFSET + WIDTH - RADIUS * std::cos(theta_v_max); float vc = OFFSET + RADIUS * std::sin(theta_u_top_min);; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (M_PI_2 + M_PI / 3.f) * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (M_PI_2f + M_PIf / 3.f) * RADIUS; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -599,14 +586,14 @@ int main() // (1) <=> asin((vc - vmin)/r) >= acos[(umin + width - uc)/r] <=> vc >= r sin(acos[(umin + width - uc)/r]) + vmin // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max - float theta_v_max = -7.f * M_PI / 8.f; + float theta_v_max = -7.f * M_PIf / 8.f; float theta_v_min = -theta_v_max; float uc = OFFSET + WIDTH - RADIUS * std::cos(theta_v_max); float vc = RADIUS * std::sin(std::acos((OFFSET + WIDTH - uc)/RADIUS)) + OFFSET + 1.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - (theta_v_min - theta_v_max)) * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (2.f * M_PIf - (theta_v_min - theta_v_max)) * RADIUS; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -634,16 +621,16 @@ int main() // Choice: theta_u_top_min = -0.9 * PI / 2 // (1) => vc = vmin + r sin(theta_u_top_min) // (2) vc - r sin(theta_v_min) <= vmin => asin((vc - vmin)/r) <= theta_v_min - float theta_u_top_min = -0.9f * M_PI_2; - float theta_u_top_max = M_PI - theta_u_top_min; - theta_u_top_max = ensureIsBetweenMinPiAndPi(theta_u_top_max); + float theta_u_top_min = -0.9f * M_PI_2f; + float theta_u_top_max = M_PIf - theta_u_top_min; + theta_u_top_max = vpMath::getAngleBetweenMinPiAndPi(theta_u_top_max); float vc = OFFSET + RADIUS * std::sin(theta_u_top_min); float theta_v_min = std::asin((vc - OFFSET)/RADIUS) + 1.f; float uc = OFFSET + WIDTH - RADIUS * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); float theoreticalValue = std::abs(theta_u_top_min - theta_u_top_max) * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -672,13 +659,13 @@ int main() // (2) & (4) =>{ uc = umin - r cos(theta_v_min) & { uc = umin - r cos(- theta_v_min) // (2) & (4) { vc >= vmin - r sin(theta_v_min) & { vc >= vmin - r cos(- theta_v_min) - float theta_u_top_min = 5.f * M_PI / 8.f; - float theta_u_top_max = M_PI - theta_u_top_min; + float theta_u_top_min = 5.f * M_PIf / 8.f; + float theta_u_top_max = M_PIf - theta_u_top_min; float uc = OFFSET + WIDTH - RADIUS * std::cos(theta_u_top_min) - 1.f; - uc = std::min(uc, OFFSET + WIDTH - RADIUS * std::cos((float)M_PI - theta_u_top_min) - 1.f); + uc = std::min(uc, OFFSET + WIDTH - RADIUS * std::cos(M_PIf - theta_u_top_min) - 1.f); float vc = OFFSET + RADIUS * std::sin(theta_u_top_min); float theta_v_min = std::acos((OFFSET + WIDTH - uc)/RADIUS); - theta_v_min = ensureIsBetweenMinPiAndPi(theta_v_min); + theta_v_min = vpMath::getAngleBetweenMinPiAndPi(theta_v_min); float theta_v_max = -theta_v_min; if (theta_v_min < 0) { float temp = theta_v_min; @@ -687,8 +674,8 @@ int main() } vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - ((theta_u_top_min - theta_u_top_max) + (theta_v_min - theta_v_max))) * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (2.f * M_PIf - ((theta_u_top_min - theta_u_top_max) + (theta_v_min - theta_v_max))) * RADIUS; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -715,14 +702,14 @@ int main() // (3) => vc = vmin + height + r sin(theta_u_bot_max) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_v_min = M_PI_2; - float theta_u_bot_max = -M_PI / 3.f; + float theta_v_min = M_PI_2f; + float theta_u_bot_max = -M_PIf / 3.f; float uc = OFFSET - RADIUS * std::cos(theta_v_min); float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_max);; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (M_PI_2 + M_PI / 3.f) * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (M_PI_2f + M_PIf / 3.f) * RADIUS; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -751,14 +738,14 @@ int main() // (4) => vc <= vmin + height + r sin(theta_v_max) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_v_min = M_PI_4 / 2.f; + float theta_v_min = M_PI_4f / 2.f; float theta_v_max = -theta_v_min; float uc = OFFSET - RADIUS * std::cos(theta_v_min); float vc = std::min(OFFSET + HEIGHT + RADIUS * std::sin(theta_v_min) - 1.f, OFFSET + HEIGHT + RADIUS * std::sin(theta_v_max) - 1.f); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); float theoreticalValue = (2.f * theta_v_min) * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -787,14 +774,14 @@ int main() // (1) => uc >= umin - r cos(theta_u_bot_max) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_bot_min = 5.f * M_PI_4 / 2.f; - float theta_u_bot_max = M_PI - theta_u_bot_min; + float theta_u_bot_min = 5.f * M_PI_4f / 2.f; + float theta_u_bot_max = M_PIf - theta_u_bot_min; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_min); float uc = std::max(OFFSET - RADIUS * std::cos(theta_u_bot_min) + 1.f, OFFSET - RADIUS * std::cos(theta_u_bot_max) + 1.f); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); float theoreticalValue = (theta_u_bot_min - theta_u_bot_max) * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -823,17 +810,17 @@ int main() // (2) & (4) => vc < vmin + height + r sin(theta_v_min) & vc < vmin + height + r sin(-theta_v_min) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_bot_min = -5.f * M_PI / 8.f; - float theta_u_bot_max = M_PI - theta_u_bot_min; - theta_u_bot_max = ensureIsBetweenMinPiAndPi(theta_u_bot_max); - float theta_v_min = 7.f * M_PI / 8.f; + float theta_u_bot_min = -5.f * M_PIf / 8.f; + float theta_u_bot_max = M_PIf - theta_u_bot_min; + theta_u_bot_max = vpMath::getAngleBetweenMinPiAndPi(theta_u_bot_max); + float theta_v_min = 7.f * M_PIf / 8.f; float theta_v_max = -theta_v_min; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_min); float uc = OFFSET - RADIUS * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); float theoreticalValue = ((theta_v_min - theta_u_bot_max) + (theta_u_bot_min - theta_v_max)) * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -860,14 +847,14 @@ int main() // (1) => vc = vmin + height + r sin(theta_u_bot_min) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_bot_min = -2.f * M_PI / 3.f; - float theta_v_min = M_PI_2; + float theta_u_bot_min = -2.f * M_PIf / 3.f; + float theta_v_min = M_PI_2f; float uc = OFFSET + WIDTH - RADIUS * std::cos(theta_v_min); float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_min);; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (M_PI_2 + M_PI / 3.f) * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (M_PI_2f + M_PIf / 3.f) * RADIUS; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -895,13 +882,13 @@ int main() // (2) & (4) => vc <= vmin + height + r sin(theta_v_min) & vc <= vmin + height + r sin(-theta_v_min) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_v_min = 5.f * M_PI / 6.f; + float theta_v_min = 5.f * M_PIf / 6.f; float uc = OFFSET + WIDTH - RADIUS * std::cos(theta_v_min); float vc = std::min(OFFSET + HEIGHT + RADIUS * std::sin(theta_v_min) - 1.f, OFFSET + HEIGHT + RADIUS * std::sin(-theta_v_min) - 1.f); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (M_PI / 3.f) * RADIUS; // <=> 2.f * M_PI / 6.f - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (M_PIf / 3.f) * RADIUS; // <=> 2.f * M_PIf / 6.f + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -929,13 +916,13 @@ int main() // (1) & (3) => uc < umin + width - r cos(theta_u_bot_min) & uc <= umin + width - r cos(PI - theta_u_bot_min) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_bot_min = 4.f * M_PI / 6.f; + float theta_u_bot_min = 4.f * M_PIf / 6.f; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_min); - float uc = std::min(OFFSET + WIDTH - RADIUS * std::cos(theta_u_bot_min) - 1.f, OFFSET + WIDTH - RADIUS * std::cos((float)M_PI -theta_u_bot_min) - 1.f); + float uc = std::min(OFFSET + WIDTH - RADIUS * std::cos(theta_u_bot_min) - 1.f, OFFSET + WIDTH - RADIUS * std::cos(M_PIf -theta_u_bot_min) - 1.f); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (M_PI / 3.f) * RADIUS; // <=> 2.f * M_PI / 6.f - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (M_PIf / 3.f) * RADIUS; // <=> 2.f * M_PIf / 6.f + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -964,17 +951,17 @@ int main() // (2) & (4) => vc < vmin + height + r sin(theta_v_min) & vc < vmin + height + r sin(-theta_v_min) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_bot_min = -7.f * M_PI / 8.f; - float theta_u_bot_max = M_PI - theta_u_bot_min; - theta_u_bot_max = ensureIsBetweenMinPiAndPi(theta_u_bot_max); - float theta_v_max = -3.f * M_PI / 8.f; + float theta_u_bot_min = -7.f * M_PIf / 8.f; + float theta_u_bot_max = M_PIf - theta_u_bot_min; + theta_u_bot_max = vpMath::getAngleBetweenMinPiAndPi(theta_u_bot_max); + float theta_v_max = -3.f * M_PIf / 8.f; float theta_v_min = -theta_v_max; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_min); float uc = OFFSET - RADIUS * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - ((theta_v_min - theta_v_max) + (theta_u_bot_max - theta_u_bot_min))) * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (2.f * M_PIf - ((theta_v_min - theta_v_max) + (theta_u_bot_max - theta_u_bot_min))) * RADIUS; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1002,19 +989,19 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 5.f * M_PI / 8.f; - float theta_u_top_max = 3.f * M_PI / 8.f; - float theta_v_min = 7.f * M_PI / 8.f; + float theta_u_top_min = 5.f * M_PIf / 8.f; + float theta_u_top_max = 3.f * M_PIf / 8.f; + float theta_v_min = 7.f * M_PIf / 8.f; float theta_v_max = -theta_v_min; - float theta_u_bottom_min = -5.f * M_PI / 8.f; - float theta_u_bottom_max = -3.f * M_PI / 8.f; + float theta_u_bottom_min = -5.f * M_PIf / 8.f; + float theta_u_bottom_max = -3.f * M_PIf / 8.f; float vc = OFFSET + HEIGHT / 2.f; float radius = -(OFFSET - vc)/ std::sin(theta_u_top_min); float uc = OFFSET - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); float theoreticalValue = ((theta_v_min - theta_u_top_min) + (theta_u_top_max - theta_u_bottom_max) + (theta_u_bottom_min - theta_v_max)) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1042,9 +1029,9 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_max = M_PI / 6.f; - float theta_u_top_min = M_PI - theta_u_top_max; - float theta_v_min = M_PI / 3.f; + float theta_u_top_max = M_PIf / 6.f; + float theta_u_top_min = M_PIf - theta_u_top_max; + float theta_v_min = M_PIf / 3.f; float theta_u_bottom_max = -theta_u_top_max; float radius = HEIGHT; float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1052,7 +1039,7 @@ int main() vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); float theoreticalValue = (theta_u_top_max - theta_u_bottom_max) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1080,9 +1067,9 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 4.f * M_PI / 6.f; - float theta_u_top_max = M_PI - theta_u_top_min; - float theta_v_min = M_PI; + float theta_u_top_min = 4.f * M_PIf / 6.f; + float theta_u_top_max = M_PIf - theta_u_top_min; + float theta_v_min = M_PIf; float theta_u_bottom_min = -theta_u_top_min; float theta_u_bottom_max = -theta_u_top_max; float radius = HEIGHT / (2.f * std::sin(theta_u_top_min)); // vmin + h - vmin = (vc - r sin(-theta_u_top_min)) - (vc - r sin(theta_top_min)) @@ -1090,8 +1077,8 @@ int main() float uc = OFFSET - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (2.f * M_PIf - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1119,8 +1106,8 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = M_PI_2; - float theta_v_min = M_PI_4; + float theta_u_top_min = M_PI_2f; + float theta_v_min = M_PI_4f; float theta_v_max = -theta_v_min; float radius = HEIGHT / 2.f; float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1128,7 +1115,7 @@ int main() vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); float theoreticalValue = (theta_v_min - theta_v_max) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1156,8 +1143,8 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = M_PI_2; - float theta_v_min = 3.f * M_PI_4; + float theta_u_top_min = M_PI_2f; + float theta_v_min = 3.f * M_PI_4f; float theta_v_max = -theta_v_min; float radius = HEIGHT / 2.f; float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1165,7 +1152,7 @@ int main() vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); float theoreticalValue = (theta_v_min - theta_v_max) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1194,15 +1181,15 @@ int main() // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max float theta_u_top_max = 0.f; - float theta_u_bot_max = -M_PI / 3.f; - float theta_v_max = -M_PI / 6.f; + float theta_u_bot_max = -M_PIf / 3.f; + float theta_v_max = -M_PIf / 6.f; float radius = HEIGHT / (std::sin(theta_u_top_max) - std::sin(theta_u_bot_max)); float uc = OFFSET - radius * std::cos(theta_v_max); float vc = OFFSET + radius * std::sin(theta_u_top_max); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); float theoreticalValue = (theta_u_top_max - theta_v_max) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1230,16 +1217,16 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_max = M_PI / 3.f; + float theta_u_top_max = M_PIf / 3.f; float theta_u_bot_max = 0.f; - float theta_v_min = M_PI / 6.f; + float theta_v_min = M_PIf / 6.f; float radius = HEIGHT / (std::sin(theta_u_top_max) - std::sin(theta_u_bot_max)); float uc = OFFSET - radius * std::cos(theta_v_min); float vc = OFFSET + radius * std::sin(theta_u_top_max); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); float theoreticalValue = (theta_v_min - theta_u_bot_max) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1267,19 +1254,19 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 5.f * M_PI / 8.f; - float theta_u_top_max = 3.f * M_PI / 8.f; - float theta_v_min = 1.f * M_PI / 8.f; + float theta_u_top_min = 5.f * M_PIf / 8.f; + float theta_u_top_max = 3.f * M_PIf / 8.f; + float theta_v_min = 1.f * M_PIf / 8.f; float theta_v_max = -theta_v_min; - float theta_u_bottom_min = -5.f * M_PI / 8.f; - float theta_u_bottom_max = -3.f * M_PI / 8.f; + float theta_u_bottom_min = -5.f * M_PIf / 8.f; + float theta_u_bottom_max = -3.f * M_PIf / 8.f; float vc = OFFSET + HEIGHT / 2.f; float radius = -(OFFSET - vc)/ std::sin(theta_u_top_min); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - ((theta_u_top_min - theta_u_top_max) + (theta_v_min - theta_v_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (2.f * M_PIf - ((theta_u_top_min - theta_u_top_max) + (theta_v_min - theta_v_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1307,16 +1294,16 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 5.f * M_PI / 6.f; - float theta_v_min = 2.f * M_PI / 3.f; + float theta_u_top_min = 5.f * M_PIf / 6.f; + float theta_v_min = 2.f * M_PIf / 3.f; float theta_u_bottom_min = -theta_u_top_min; float radius = HEIGHT; float vc = OFFSET + radius * std::sin(theta_u_top_min); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - (theta_u_top_min - theta_u_bottom_min)) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (2.f * M_PIf - (theta_u_top_min - theta_u_bottom_min)) * radius; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1344,8 +1331,8 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 4.f * M_PI / 6.f; - float theta_u_top_max = M_PI - theta_u_top_min; + float theta_u_top_min = 4.f * M_PIf / 6.f; + float theta_u_top_max = M_PIf - theta_u_top_min; float theta_v_min = 0; float theta_u_bottom_min = -theta_u_top_min; float theta_u_bottom_max = -theta_u_top_max; @@ -1354,8 +1341,8 @@ int main() float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (2.f * M_PIf - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1383,16 +1370,16 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = M_PI_2; - float theta_v_min = 3.f * M_PI_4; + float theta_u_top_min = M_PI_2f; + float theta_v_min = 3.f * M_PI_4f; float theta_v_max = -theta_v_min; float radius = HEIGHT / 2.f; float vc = OFFSET + radius * std::sin(theta_u_top_min); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - (theta_v_min - theta_v_max)) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (2.f * M_PIf - (theta_v_min - theta_v_max)) * radius; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1420,16 +1407,16 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = M_PI_2; - float theta_v_min = M_PI_4; + float theta_u_top_min = M_PI_2f; + float theta_v_min = M_PI_4f; float theta_v_max = -theta_v_min; float radius = HEIGHT / 2.f; float vc = OFFSET + radius * std::sin(theta_u_top_min); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - (theta_v_min - theta_v_max)) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (2.f * M_PIf - (theta_v_min - theta_v_max)) * radius; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1457,16 +1444,16 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = M_PI; - float theta_u_bot_min = -2.f * M_PI / 3.f; - float theta_v_max = -5.f * M_PI / 6.f; + float theta_u_top_min = M_PIf; + float theta_u_bot_min = -2.f * M_PIf / 3.f; + float theta_v_max = -5.f * M_PIf / 6.f; float radius = HEIGHT / (std::sin(theta_u_top_min) - std::sin(theta_u_bot_min)); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_max); float vc = OFFSET + radius * std::sin(theta_u_top_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - (theta_u_top_min - theta_v_max)) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (2.f * M_PIf - (theta_u_top_min - theta_v_max)) * radius; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1494,16 +1481,16 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 2.f * M_PI / 3.f; - float theta_u_bot_min = M_PI; - float theta_v_min = 5.f * M_PI / 6.f; + float theta_u_top_min = 2.f * M_PIf / 3.f; + float theta_u_bot_min = M_PIf; + float theta_v_min = 5.f * M_PIf / 6.f; float radius = HEIGHT / (std::sin(theta_u_top_min) - std::sin(theta_u_bot_min)); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); float vc = OFFSET + radius * std::sin(theta_u_top_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); float theoreticalValue = (theta_u_bot_min - theta_v_min) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1533,19 +1520,19 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_v_left_min = 7.f * M_PI / 8.f; + float theta_v_left_min = 7.f * M_PIf / 8.f; float theta_v_left_max = -theta_v_left_min; - float theta_v_right_min = M_PI / 8.f; + float theta_v_right_min = M_PIf / 8.f; float theta_v_right_max = -theta_v_right_min; - float theta_u_top_min = 5.f * M_PI / 8.f; - float theta_u_top_max = M_PI - theta_u_top_min; + float theta_u_top_min = 5.f * M_PIf / 8.f; + float theta_u_top_max = M_PIf - theta_u_top_min; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET + WIDTH_SWITCHED - radius * std::cos(theta_v_right_min); float vc = OFFSET + radius * std::sin(theta_u_top_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(switchedRoI); float theoreticalValue = ((theta_v_left_min - theta_u_top_min) + (theta_u_top_max - theta_v_right_min) + (theta_v_right_max - theta_v_left_max)) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1574,13 +1561,13 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_top_min = -2.f * M_PI / 3.f; + float theta_u_top_min = -2.f * M_PIf / 3.f; float uc = OFFSET + WIDTH_SWITCHED/2.f; float vc = OFFSET + RADIUS * std::sin(theta_u_top_min); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(switchedRoI); - float theoreticalValue = (M_PI/3.f) * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (M_PIf/3.f) * RADIUS; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1610,16 +1597,16 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_v_left_max = -5.f * M_PI / 8.f; - float theta_v_right_max = -3.f *M_PI / 8.f; - float theta_u_top_min = -7.f * M_PI / 8.f; + float theta_v_left_max = -5.f * M_PIf / 8.f; + float theta_v_right_max = -3.f *M_PIf / 8.f; + float theta_u_top_min = -7.f * M_PIf / 8.f; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_max) - std::cos(theta_v_left_max)); float uc = OFFSET - radius * std::cos(theta_v_left_max); float vc = OFFSET + radius * std::sin(theta_u_top_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(switchedRoI); float theoreticalValue = (theta_v_right_max - theta_v_left_max) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1649,16 +1636,16 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_top_max = -M_PI / 3.f; + float theta_u_top_max = -M_PIf / 3.f; float theta_v_right_max = 0.f; - float theta_v_left_max = -M_PI_2; + float theta_v_left_max = -M_PI_2f; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_max) - std::cos(theta_v_left_max)); float uc = OFFSET; float vc = OFFSET + radius * std::sin(theta_u_top_max); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(switchedRoI); float theoreticalValue = (theta_u_top_max - theta_v_left_max) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1688,16 +1675,16 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_top_min = -2.f * M_PI / 3.f; - float theta_v_left_max = M_PI; - float theta_v_right_max = -M_PI_2; + float theta_u_top_min = -2.f * M_PIf / 3.f; + float theta_v_left_max = M_PIf; + float theta_v_right_max = -M_PI_2f; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_max) - std::cos(theta_v_left_max)); float uc = OFFSET + WIDTH_SWITCHED; float vc = OFFSET + radius * std::sin(theta_u_top_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(switchedRoI); float theoreticalValue = (theta_v_right_max - theta_u_top_min) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1727,19 +1714,19 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_v_left_min = 7.f * M_PI / 8.f; + float theta_v_left_min = 7.f * M_PIf / 8.f; float theta_v_left_max = -theta_v_left_min; - float theta_v_right_min = M_PI / 8.f; + float theta_v_right_min = M_PIf / 8.f; float theta_v_right_max = -theta_v_right_min; - float theta_u_bot_min = -5.f * M_PI / 8.f; - float theta_u_bot_max = -M_PI - theta_u_bot_min; + float theta_u_bot_min = -5.f * M_PIf / 8.f; + float theta_u_bot_max = -M_PIf - theta_u_bot_min; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET + WIDTH_SWITCHED - radius * std::cos(theta_v_right_min); float vc = OFFSET + HEIGHT_SWITCHED + radius * std::sin(theta_u_bot_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(switchedRoI); float theoreticalValue = ((theta_v_left_min - theta_v_right_min) + (theta_v_right_max - theta_u_bot_max) + (theta_u_bot_min - theta_v_left_max)) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1768,17 +1755,17 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_bot_min = 2.f * M_PI / 3.f; - float theta_u_bot_max = M_PI - theta_u_bot_min; - float theta_v_left_min = 5.f * M_PI / 6.f; - float theta_v_right_min = M_PI / 6.f; + float theta_u_bot_min = 2.f * M_PIf / 3.f; + float theta_u_bot_max = M_PIf - theta_u_bot_min; + float theta_v_left_min = 5.f * M_PIf / 6.f; + float theta_v_right_min = M_PIf / 6.f; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET + WIDTH_SWITCHED - radius * std::cos(theta_v_right_min); float vc = OFFSET + HEIGHT_SWITCHED + radius * std::sin(theta_u_bot_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(switchedRoI); float theoreticalValue = ((theta_u_bot_min - theta_u_bot_max)) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1807,16 +1794,16 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_bot_min = 7.f * M_PI / 8.f; - float theta_v_left_min = 5.f * M_PI / 8.f; - float theta_v_right_min = 3.f * M_PI / 8.f; + float theta_u_bot_min = 7.f * M_PIf / 8.f; + float theta_v_left_min = 5.f * M_PIf / 8.f; + float theta_v_right_min = 3.f * M_PIf / 8.f; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET + WIDTH_SWITCHED - radius * std::cos(theta_v_right_min); float vc = OFFSET + HEIGHT_SWITCHED + radius * std::sin(theta_u_bot_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(switchedRoI); float theoreticalValue = ((theta_v_left_min - theta_v_right_min)) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1845,8 +1832,8 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_bot_max = M_PI / 3.f; - float theta_v_left_min = M_PI_2; + float theta_u_bot_max = M_PIf / 3.f; + float theta_v_left_min = M_PI_2f; float theta_v_right_min = 0.f; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET; @@ -1854,7 +1841,7 @@ int main() vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(switchedRoI); float theoreticalValue = ((theta_v_left_min - theta_u_bot_max)) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1883,16 +1870,16 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_bot_min = 2.f * M_PI / 3.f; - float theta_v_right_min = M_PI_2; - float theta_v_left_min = M_PI; + float theta_u_bot_min = 2.f * M_PIf / 3.f; + float theta_v_right_min = M_PI_2f; + float theta_v_left_min = M_PIf; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET + WIDTH_SWITCHED; float vc = OFFSET + HEIGHT_SWITCHED + radius * std::sin(theta_u_bot_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(switchedRoI); float theoreticalValue = ((theta_u_bot_min - theta_v_right_min)) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1917,18 +1904,18 @@ int main() // (6): u_cross_bot_max = uc + r cos(theta_u_bottom_max) <= umin_roi + width ; vmin_roi + height = vc - r sin(theta_u_bottom_max) // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 2.f * M_PI / 3.f; - float theta_u_top_max = M_PI / 3.f; - float theta_u_bottom_min = -2.f * M_PI / 3.f; - float theta_u_bottom_max = -M_PI / 3.f; + float theta_u_top_min = 2.f * M_PIf / 3.f; + float theta_u_top_max = M_PIf / 3.f; + float theta_u_bottom_min = -2.f * M_PIf / 3.f; + float theta_u_bottom_max = -M_PIf / 3.f; float uc = OFFSET + WIDTH / 2.f; float vc = OFFSET + HEIGHT / 2.f; float radius = -(OFFSET - vc)/ std::sin(theta_u_top_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (2.f * M_PIf - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1953,9 +1940,9 @@ int main() // (6): u_min + width = uc + r cos(theta_v_right_max); v_cross_right_max = vc - r sin(theta_v_right_max) // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_v_left_min = 5.f * M_PI / 6.f; + float theta_v_left_min = 5.f * M_PIf / 6.f; float theta_v_left_max = -theta_v_left_min; - float theta_v_right_min = M_PI / 6.f; + float theta_v_right_min = M_PIf / 6.f; float theta_v_right_max = -theta_v_right_min; float uc = OFFSET + HEIGHT / 2.f; float vc = OFFSET + WIDTH / 2.f; @@ -1964,7 +1951,7 @@ int main() vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(switchedRoI); float theoreticalValue = ((theta_v_left_min - theta_v_right_min) + (theta_v_right_max - theta_v_left_max)) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1985,15 +1972,15 @@ int main() // Choosing theta_v_left_min = 7 PI / 8 and circle at the center of the RoI // umin = uc + r cos(theta_v_left_min) => r = (umin - uc) / cos(theta_v_left_min) vpRect squareRoI(OFFSET, OFFSET, HEIGHT, HEIGHT); - float theta_v_left_min = 7.f * M_PI / 8.f; + float theta_v_left_min = 7.f * M_PIf / 8.f; float uc = OFFSET + HEIGHT / 2.f; float vc = OFFSET + HEIGHT / 2.f; float radius = (OFFSET - uc) / std::cos(theta_v_left_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(squareRoI); - float theoreticalValue = M_PI * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = M_PIf * radius; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -2010,9 +1997,9 @@ int main() } if (hasSucceeded) { - std::cout << "testImageCircle overall result: SUCCESS"; + std::cout << "testImageCircle overall result: SUCCESS" << std::endl; return EXIT_SUCCESS; } - std::cout << "testImageCircle overall result: FAILED"; + std::cout << "testImageCircle overall result: FAILED" << std::endl; return EXIT_FAILURE; } From 4c26fd7466b354241d25c938522d5fee11f1264b Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Fri, 27 Oct 2023 13:53:19 +0200 Subject: [PATCH 26/43] Add missing vpMath.h include --- modules/core/src/camera/vpCameraParameters.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/core/src/camera/vpCameraParameters.cpp b/modules/core/src/camera/vpCameraParameters.cpp index 8993f054d2..9abf1488f2 100644 --- a/modules/core/src/camera/vpCameraParameters.cpp +++ b/modules/core/src/camera/vpCameraParameters.cpp @@ -46,6 +46,7 @@ #include #include #include +#include const double vpCameraParameters::DEFAULT_PX_PARAMETER = 600.0; const double vpCameraParameters::DEFAULT_PY_PARAMETER = 600.0; From 9730575f2341ed75176a2b47ce822f67de1c7d89 Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Fri, 27 Oct 2023 14:11:43 +0200 Subject: [PATCH 27/43] Fix wrong changes in vpMath.h --- modules/core/include/visp3/core/vpMath.h | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/modules/core/include/visp3/core/vpMath.h b/modules/core/include/visp3/core/vpMath.h index 2b56d70061..db8bf2e9c7 100644 --- a/modules/core/include/visp3/core/vpMath.h +++ b/modules/core/include/visp3/core/vpMath.h @@ -77,8 +77,6 @@ #endif -#endif - #ifndef M_PIf #define M_PIf 3.14159265358979323846f #endif @@ -89,6 +87,7 @@ #ifndef M_PI_4f #define M_PI_4f (M_PIf / 4.0f) +#endif #include #include @@ -194,9 +193,9 @@ class VISP_EXPORT vpMath } return (v < lower) ? lower : (upper < v) ? upper : v; #endif - } + } - // round x to the nearest integer + // round x to the nearest integer static inline int round(double x); // return the sign of x (+-1) @@ -331,14 +330,14 @@ class VISP_EXPORT vpMath private: static const double ang_min_sinc; static const double ang_min_mc; -}; + }; -// Begining of the inline functions definition + // Begining of the inline functions definition -/*! - Computes and returns x! - \param x : parameter of factorial function. -*/ + /*! + Computes and returns x! + \param x : parameter of factorial function. + */ double vpMath::fact(unsigned int x) { if ((x == 1) || (x == 0)) From ae3f1ad0d369a211b5a865e2329668c48b9b3c43 Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Fri, 27 Oct 2023 15:07:50 +0200 Subject: [PATCH 28/43] Improve helper message in tutorial-canny.cpp --- tutorial/image/tutorial-canny.cpp | 67 ++++++++++++++++++------------- 1 file changed, 39 insertions(+), 28 deletions(-) diff --git a/tutorial/image/tutorial-canny.cpp b/tutorial/image/tutorial-canny.cpp index e75e6c16cb..2e74f9ac36 100644 --- a/tutorial/image/tutorial-canny.cpp +++ b/tutorial/image/tutorial-canny.cpp @@ -89,7 +89,9 @@ void setGradientOutsideClass(const vpImage &I, const int &gaussia drawingHelpers::display(dIy, title, true); } -void usage(const std::string &softName) +void usage(const std::string &softName, int gaussianKernelSize, float gaussianStdev, float lowerThresh, float upperThresh, + int apertureSize, vpImageFilter::vpCannyFilteringAndGradientType filteringType, + float lowerThreshRatio, float upperThreshRatio, vpImageFilter::vpCannyBackendType backend) { std::cout << "NAME" << std::endl; std::cout << softName << ": software to test the vpCannyEdgeComputation class and vpImageFilter::canny method" << std::endl; @@ -100,43 +102,51 @@ void usage(const std::string &softName) << " [-t, --thresh ]" << " [-a, --aperture ]" << " [-f, --filter ]" - << " [-r, --ratio ]" + << " [-r, --ratio ]" << " [-b, --backend ]" - << " [-h, --help]" + << " [-h, --help]" << std::endl << std::endl; std::cout << "DESCRIPTION" << std::endl; - std::cout << "\t-i, --image" << std::endl - << "\t\tPermits to load an image on which will be tested the vpCanny class." + std::cout << "\t-i, --image " << std::endl + << "\t\tPermits to load an image on which will be tested the vpCanny class." << std::endl + << "\t\tWhen empty uses a simulated image." << std::endl << std::endl; - std::cout << "\t-g, --gradient" << std::endl - << "\t\tPermits to compute the gradients of the image outside the vpCanny class." - << "\t\tFirst parameter is the size of the Gaussian kernel used to compute the gradients." - << "\t\tSecond parameter is the standard deviation of the Gaussian kernel used to compute the gradients." + std::cout << "\t-g, --gradient " << std::endl + << "\t\tPermits to compute the gradients of the image outside the vpCanny class." << std::endl + << "\t\tFirst parameter is the size of the Gaussian kernel used to compute the gradients." << std::endl + << "\t\tSecond parameter is the standard deviation of the Gaussian kernel used to compute the gradients." << std::endl + << "\t\tDefault: " << gaussianKernelSize << " " << gaussianStdev << std::endl << std::endl; - std::cout << "\t-t, --thresh" << std::endl - << "\t\tPermits to set the lower and upper thresholds of the vpCanny class." - << "\t\tFirst parameter is the lower threshold." - << "\t\tSecond parameter is the upper threshold." + std::cout << "\t-t, --thresh " << std::endl + << "\t\tPermits to set the lower and upper thresholds of the vpCanny class." << std::endl + << "\t\tFirst parameter is the lower threshold." << std::endl + << "\t\tSecond parameter is the upper threshold." << std::endl + << "\t\tWhen set to -1 thresholds are computed automatically." << std::endl + << "\t\tDefault: " << lowerThresh << " " << upperThresh << std::endl << std::endl; - std::cout << "\t-a, --aperture" << std::endl - << "\t\tPermits to set the size of the gradient filter kernel." - << "\t\tParameter must be odd and positive." + std::cout << "\t-a, --aperture " << std::endl + << "\t\tPermits to set the size of the gradient filter kernel." << std::endl + << "\t\tParameter must be odd and positive." << std::endl + << "\t\tDefault: " << apertureSize << std::endl << std::endl; - std::cout << "\t-f, --filter" << std::endl - << "\t\tPermits to choose the type of filter to apply to compute the gradient." - << "\t\tAvailable values = " << vpImageFilter::vpCannyFilteringAndGradientTypeList() << std::endl + std::cout << "\t-f, --filter " << std::endl + << "\t\tPermits to choose the type of filter to apply to compute the gradient." << std::endl + << "\t\tAvailable values: " << vpImageFilter::vpCannyFilteringAndGradientTypeList("<", " | ", ">") << std::endl + << "\t\tDefault: " << vpImageFilter::vpCannyFilteringAndGradientTypeToString(filteringType) << std::endl << std::endl; - std::cout << "\t-r, --ratio" << std::endl - << "\t\tPermits to set the lower and upper thresholds ratio of the vpCanny class." - << "\t\tFirst parameter is the lower threshold ratio." - << "\t\tSecond parameter is the upper threshold ratio." + std::cout << "\t-r, --ratio " << std::endl + << "\t\tPermits to set the lower and upper thresholds ratio of the vpCanny class." << std::endl + << "\t\tFirst parameter is the lower threshold ratio." << std::endl + << "\t\tSecond parameter is the upper threshold ratio." << std::endl + << "\t\tDefault: " << lowerThreshRatio << " " << upperThreshRatio << std::endl << std::endl; - std::cout << "\t-b, --backend" << std::endl - << "\t\tPermits to use the vpImageFilter::canny method for comparison." - << "\t\tAvailable values = " << vpImageFilter::vpCannyBackendTypeList() << std::endl + std::cout << "\t-b, --backend " << std::endl + << "\t\tPermits to use the vpImageFilter::canny method for comparison." << std::endl + << "\t\tAvailable values: " << vpImageFilter::vpCannyBackendTypeList("<", " | ", ">") << std::endl + << "\t\tDefault: " << vpImageFilter::vpCannyBackendTypeToString(backend) << std::endl << std::endl; std::cout << "\t-h, --help" << std::endl - << "\t\tPermits to display the different arguments this software handles." + << "\t\tPermits to display the different arguments this software handles." << std::endl << std::endl; } @@ -190,7 +200,8 @@ int main(int argc, const char *argv[]) i++; } else if (argv_str == "-h" || argv_str == "--help") { - usage(std::string(argv[0])); + usage(std::string(argv[0]), opt_gaussianKernelSize, opt_gaussianStdev, opt_lowerThresh, opt_upperThresh, + opt_apertureSize, opt_filteringType, opt_lowerThreshRatio, opt_upperThreshRatio, opt_backend); return EXIT_SUCCESS; } else { From ee4622376dc61060cb69095ea153997659c4cd98 Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Fri, 27 Oct 2023 16:19:34 +0200 Subject: [PATCH 29/43] Revert changes to re-introduce std::ostream &operator<<(...) in vpPoint.cpp - fix for visp_java build --- modules/core/src/tracking/forward-projection/vpPoint.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/core/src/tracking/forward-projection/vpPoint.cpp b/modules/core/src/tracking/forward-projection/vpPoint.cpp index efbec9be21..50e439619b 100644 --- a/modules/core/src/tracking/forward-projection/vpPoint.cpp +++ b/modules/core/src/tracking/forward-projection/vpPoint.cpp @@ -406,6 +406,8 @@ void vpPoint::display(const vpImage &I, const vpHomogeneousMatrix &cMo, vpFeatureDisplay::displayPoint(_p[0], _p[1], cam, I, color, thickness); } +VISP_EXPORT std::ostream &operator<<(std::ostream &os, const vpPoint & /* vpp */) { return (os << "vpPoint"); } + /*! * Display the projection of a 3D point in image \e I. * From e05808391c5667a4e0e8b780ec80387894738d72 Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Fri, 27 Oct 2023 16:22:04 +0200 Subject: [PATCH 30/43] Update java generator to avoid warning around removal finalize() make visp_java produced the following warning: [removal] finalize() in Object has been deprecated and marked for removal --- modules/java/generator/gen_java.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/modules/java/generator/gen_java.py b/modules/java/generator/gen_java.py index 907655c6d5..d7e980ac1f 100755 --- a/modules/java/generator/gen_java.py +++ b/modules/java/generator/gen_java.py @@ -1145,13 +1145,15 @@ def gen_class(self, ci): if ci.name != 'VpImgproc' and ci.name != self.Module or ci.base: # finalize() - ci.j_code.write( - """ - @Override - protected void finalize() throws Throwable { - delete(nativeObj); - } - """) + # Note 2023.10.27 warning: [removal] finalize() in Object has been deprecated and marked for removal + # Comment for now +# ci.j_code.write( +# """ +# @Override +# protected void finalize() throws Throwable { +# delete(nativeObj); +# } +# """) ci.jn_code.write( """ From 26bba30c66b88584739ae1e1032999a429da0857 Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Fri, 27 Oct 2023 16:24:01 +0200 Subject: [PATCH 31/43] Update java installation tutorial for macos --- doc/tutorial/java/tutorial-install-java.dox | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/doc/tutorial/java/tutorial-install-java.dox b/doc/tutorial/java/tutorial-install-java.dox index 23e29d1583..65ba0a7018 100644 --- a/doc/tutorial/java/tutorial-install-java.dox +++ b/doc/tutorial/java/tutorial-install-java.dox @@ -40,7 +40,10 @@ java 17.0.2 2022-01-18 LTS \subsection java_install_jdk_osx On Mac OSX platform -\warning On macOS Catalina 10.15.7 with Xcode 12.4, the installation of JDK 11 or 15 from the [Oracle](http://www.oracle.com/technetwork/java/javase/downloads/index.html) website by downloading and installing `jdk-11.0.10_osx-x64_bin.dmg` or `jdk-15.0.2_osx-x64_bin.dmg` doesn't allow the detection of JNI necessary to build `visp_java.jar`. That's why we recommend to install JDK using brew. +\warning On macOS Catalina 10.15.7 with Xcode 12.4, the installation of JDK 11 or 15 from the +[Oracle](http://www.oracle.com/technetwork/java/javase/downloads/index.html) website by downloading and installing +`jdk-11.0.10_osx-x64_bin.dmg` or `jdk-15.0.2_osx-x64_bin.dmg` doesn't allow the detection of JNI necessary to build +`visp_java.jar`. That's why we recommend to install JDK using brew. \verbatim $ more ViSP-third-party.txt ... @@ -71,13 +74,20 @@ openjdk: stable 17.0.1 (bottled) [keg-only] - Now for the system Java wrappers to find this JDK, symlink it with: \verbatim -$ sudo ln -sfn /usr/local/opt/openjdk/libexec/openjdk.jdk /Library/Java/JavaVirtualMachines/openjdk.jdk +$ sudo ln -sfn /opt/homebrew/opt/openjdk/libexec/openjdk.jdk /Library/Java/JavaVirtualMachines/openjdk.jdk +\endverbatim +\note Depending on the OpenJDK version, the symlink instruction can differ. To know which one is to use, follow +instructions provided by `brew info openjdk`. At the time this tutorial was updated, we got: +\verbatim +$ brew info openjdk +For the system Java wrappers to find this JDK, symlink it with + sudo ln -sfn /opt/homebrew/opt/openjdk/libexec/openjdk.jdk /Library/Java/JavaVirtualMachines/openjdk.jdk \endverbatim - Set `JAVA_HOME` env var to help JNI headers and libraries detection \verbatim -$ echo 'export JAVA_HOME=$(/usr/libexec/java_home)' >> ~/.bashrc -$ source ~/.bashrc +$ echo 'export JAVA_HOME=$(/usr/libexec/java_home)' >> ~/.zshrc +$ source ~/.zshrc \endverbatim - After installation check JDK version: From 44ad9cf923d5b9f7190bcc057313677f9b8a812b Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Fri, 27 Oct 2023 16:24:35 +0200 Subject: [PATCH 32/43] Fix typo --- modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h index 6bb6556938..0cf2f5a162 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h @@ -205,7 +205,7 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker { protected: -//! Temporary OpenCV image for fast conversion. + //! Temporary OpenCV image for fast conversion. cv::Mat cur; //! Initial pose. vpHomogeneousMatrix c0Mo; From 11ca9fc89833c02b00ced6212ee251f28be1735d Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Fri, 27 Oct 2023 16:46:46 +0200 Subject: [PATCH 33/43] Fix build when no display gui available - error reported by ros2 jenkins buildfarm --- tutorial/image/drawingHelpers.cpp | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/tutorial/image/drawingHelpers.cpp b/tutorial/image/drawingHelpers.cpp index 9345b1b6aa..98602dc10c 100644 --- a/tutorial/image/drawingHelpers.cpp +++ b/tutorial/image/drawingHelpers.cpp @@ -33,25 +33,31 @@ #include #if defined(VISP_HAVE_X11) - vpDisplayX drawingHelpers::d; +vpDisplayX drawingHelpers::d; #elif defined(HAVE_OPENCV_HIGHGUI) - vpDisplayOpenCV drawingHelpers::d; +vpDisplayOpenCV drawingHelpers::d; #elif defined(VISP_HAVE_GTK) - vpDisplayGTK drawingHelpers::d; +vpDisplayGTK drawingHelpers::d; #elif defined(VISP_HAVE_GDI) - vpDisplayGDI drawingHelpers::d; +vpDisplayGDI drawingHelpers::d; #elif defined(VISP_HAVE_D3D9) - vpDisplayD3D drawingHelpers::d; +vpDisplayD3D drawingHelpers::d; #endif vpImage drawingHelpers::I_disp; bool drawingHelpers::display(vpImage &I, const std::string &title, const bool &blockingMode) -{ +{ I_disp = I; - d.init(I_disp); - vpDisplay::setTitle(I_disp, title.c_str()); - +#if defined(VISP_HAVE_DISPLAY) + if (!d.isInitialised()) { + d.init(I_disp); + vpDisplay::setTitle(I_disp, title); + } +#else + (void)title; +#endif + vpDisplay::display(I_disp); vpDisplay::displayText(I_disp, 15, 15, "Left click to continue...", vpColor::red); vpDisplay::displayText(I_disp, 35, 15, "Right click to stop...", vpColor::red); @@ -59,21 +65,20 @@ bool drawingHelpers::display(vpImage &I, const std::string &title, const vpMouseButton::vpMouseButtonType button; vpDisplay::getClick(I_disp, button, blockingMode); bool hasToContinue = true; - if (button == vpMouseButton::button3) - { + if (button == vpMouseButton::button3) { // Right click => stop the program hasToContinue = false; } return hasToContinue; } - + bool drawingHelpers::display(vpImage &D, const std::string &title, const bool &blockingMode) { vpImage I; // Image to display vpImageConvert::convert(D, I); return display(I, title, blockingMode); -} +} bool drawingHelpers::display(vpImage &D, const std::string &title, const bool &blockingMode) { From 66fab3a13c1904948cb743115662606ab5153548 Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Sat, 28 Oct 2023 16:58:29 +0200 Subject: [PATCH 34/43] Set cxx11 standard as minimal required standard - remove all the code that was specialized for cxx98 that is no more supported --- .github/workflows/ubuntu-dep-apt.yml | 4 +- .github/workflows/ubuntu-dep-src.yml | 2 +- .github/workflows/ubuntu-ustk.yml | 2 +- CMakeLists.txt | 43 +- ChangeLog.txt | 2 + .../visp-acquire-franka-calib-data.cpp | 5 +- ...sp-acquire-universal-robots-calib-data.cpp | 5 +- .../visp-compute-hand-eye-calibration.cpp | 6 +- cmake/VISPDetectCXXStandard.cmake | 36 +- cmake/templates/VISPConfig.cmake.in | 3 - cmake/templates/vpConfig.h.in | 8 +- doc/config-doxygen.in | 3 - .../tutorial-detection-object-deprecated.dox | 4 +- .../detection/tutorial-detection-object.dox | 4 +- .../device/framegrabber/getRealSense2Info.cpp | 15 +- example/device/framegrabber/grabRealSense.cpp | 9 +- .../device/framegrabber/grabRealSense2.cpp | 32 +- .../framegrabber/grabRealSense2_T265.cpp | 18 +- .../framegrabber/grabV4l2MultiCpp11Thread.cpp | 3 +- .../device/framegrabber/readRealSenseData.cpp | 4 +- .../device/framegrabber/saveRealSenseData.cpp | 2 +- example/math/quadprog.cpp | 10 +- example/math/quadprog_eq.cpp | 10 +- .../servo-afma6/servoAfma6AprilTagIBVS.cpp | 63 +- .../servo-afma6/servoAfma6AprilTagPBVS.cpp | 61 +- example/servo-bebop2/servoBebop2.cpp | 123 +-- example/servo-franka/servoFrankaIBVS.cpp | 79 +- example/servo-franka/servoFrankaPBVS.cpp | 77 +- .../servoPtu46Point2DArtVelocity.cpp | 2 +- .../UniversalRobotsMoveToPosition.cpp | 21 +- .../UniversalRobotsSavePosition.cpp | 19 +- .../servoUniversalRobotsIBVS.cpp | 78 +- .../servoUniversalRobotsPBVS.cpp | 76 +- modules/core/include/visp3/core/vpArray2D.h | 31 +- .../include/visp3/core/vpCameraParameters.h | 2 +- modules/core/include/visp3/core/vpColVector.h | 9 +- modules/core/include/visp3/core/vpColormap.h | 24 +- .../include/visp3/core/vpHomogeneousMatrix.h | 22 +- modules/core/include/visp3/core/vpImage.h | 4 - .../core/include/visp3/core/vpImageFilter.h | 786 +++++++++--------- .../core/include/visp3/core/vpImagePoint.h | 30 +- modules/core/include/visp3/core/vpLinProg.h | 179 ++-- modules/core/include/visp3/core/vpMatrix.h | 10 - .../core/include/visp3/core/vpPoseVector.h | 3 - modules/core/include/visp3/core/vpQuadProg.h | 104 +-- .../include/visp3/core/vpQuaternionVector.h | 2 - modules/core/include/visp3/core/vpRGBa.h | 11 +- modules/core/include/visp3/core/vpRGBf.h | 11 +- .../core/include/visp3/core/vpRectOriented.h | 8 - modules/core/include/visp3/core/vpRobust.h | 11 +- .../include/visp3/core/vpRotationMatrix.h | 6 - modules/core/include/visp3/core/vpRowVector.h | 8 - .../core/include/visp3/core/vpRxyzVector.h | 4 - .../core/include/visp3/core/vpRzyxVector.h | 4 - .../core/include/visp3/core/vpRzyzVector.h | 4 - .../core/include/visp3/core/vpThetaUVector.h | 4 - modules/core/include/visp3/core/vpTime.h | 5 +- .../include/visp3/core/vpTranslationVector.h | 4 - modules/core/include/visp3/core/vpUniRand.h | 8 - .../src/camera/vpColorDepthConversion.cpp | 121 +-- .../src/image/private/vpImageConvert_impl.h | 22 +- modules/core/src/image/vpColormap.cpp | 4 - modules/core/src/image/vpFont.cpp | 59 +- modules/core/src/image/vpImageFilter.cpp | 170 ++-- modules/core/src/image/vpRGBa.cpp | 2 - modules/core/src/image/vpRGBf.cpp | 8 +- modules/core/src/math/matrix/vpColVector.cpp | 5 - modules/core/src/math/matrix/vpMatrix.cpp | 15 +- modules/core/src/math/matrix/vpRowVector.cpp | 6 +- modules/core/src/math/misc/vpMath.cpp | 9 +- modules/core/src/math/robust/vpRobust.cpp | 41 +- .../transformation/vpHomogeneousMatrix.cpp | 4 - .../transformation/vpQuaternionVector.cpp | 35 +- .../math/transformation/vpRotationMatrix.cpp | 8 - .../src/math/transformation/vpRxyzVector.cpp | 12 +- .../src/math/transformation/vpRzyxVector.cpp | 12 +- .../src/math/transformation/vpRzyzVector.cpp | 12 +- .../math/transformation/vpThetaUVector.cpp | 25 +- .../transformation/vpTranslationVector.cpp | 2 - modules/core/src/tools/file/vpIoTools.cpp | 21 +- modules/core/src/tools/geometry/vpPolygon.cpp | 56 +- .../src/tools/geometry/vpRectOriented.cpp | 25 +- .../core/src/tools/optimization/vpLinProg.cpp | 5 - .../src/tools/optimization/vpQuadProg.cpp | 39 +- modules/core/src/tools/time/vpTime.cpp | 3 +- .../core/test/image/testImageOwnership.cpp | 5 +- .../test/math/testJsonArrayConversion.cpp | 2 +- modules/core/test/math/testMath.cpp | 17 +- modules/core/test/math/testMatrix.cpp | 17 +- .../test/math/testMatrixInitialization.cpp | 76 +- .../core/test/tools/geometry/testPolygon2.cpp | 25 +- .../visp3/gui/vpColorBlindFriendlyPalette.h | 2 - .../gui/include/visp3/gui/vpDisplayOpenCV.h | 4 - modules/gui/include/visp3/gui/vpPclViewer.h | 2 +- modules/gui/src/display/vpDisplayOpenCV.cpp | 288 ++++--- .../vpColorBlindFriendlyPalette.cpp | 3 - modules/gui/src/pointcloud/vpPclViewer.cpp | 2 +- .../visp3/imgproc/vpCircleHoughTransform.h | 5 - .../imgproc/src/vpCircleHoughTransform.cpp | 2 - modules/imgproc/src/vpRetinex.cpp | 5 +- modules/io/include/visp3/io/vpImageQueue.h | 3 - .../include/visp3/io/vpImageStorageWorker.h | 3 - .../io/src/image/private/vpImageIoBackend.h | 4 - .../io/src/image/private/vpImageIoTinyEXR.cpp | 24 +- modules/io/src/image/vpImageIo.cpp | 202 +++-- .../visp3/robot/vpRobotUniversalRobots.h | 2 +- .../vpRobotUniversalRobots.cpp | 36 +- .../test/servo-afma6/testRobotAfma6Pose.cpp | 2 +- .../testUniversalRobotsCartPosition.cpp | 24 +- .../testUniversalRobotsCartVelocity.cpp | 24 +- .../testUniversalRobotsGetData.cpp | 36 +- .../testUniversalRobotsJointPosition.cpp | 24 +- .../testUniversalRobotsJointVelocity.cpp | 24 +- .../servo-viper/testRobotViper850Pose.cpp | 2 +- .../sensor/include/visp3/sensor/vpLaserScan.h | 11 +- .../visp3/sensor/vpOccipitalStructure.h | 2 +- .../sensor/include/visp3/sensor/vpRealSense.h | 13 +- .../include/visp3/sensor/vpRealSense2.h | 2 +- .../sensor/include/visp3/sensor/vpScanPoint.h | 6 +- .../framegrabber/1394/vp1394CMUGrabber.cpp | 6 +- .../vpOccipitalStructure.cpp | 70 +- .../src/rgb-depth/realsense/vpRealSense.cpp | 37 +- .../src/rgb-depth/realsense/vpRealSense2.cpp | 192 +++-- .../sensor/test/mocap/testMocapQualisys.cpp | 84 +- modules/sensor/test/mocap/testMocapVicon.cpp | 84 +- .../testOccipitalStructure_Core_images.cpp | 17 +- .../testOccipitalStructure_Core_imu.cpp | 15 +- .../testOccipitalStructure_Core_pcl.cpp | 15 +- .../test/rgb-depth/testRealSense2_D435.cpp | 12 +- .../rgb-depth/testRealSense2_D435_align.cpp | 32 +- .../rgb-depth/testRealSense2_D435_opencv.cpp | 22 +- .../rgb-depth/testRealSense2_D435_pcl.cpp | 30 +- .../test/rgb-depth/testRealSense2_SR300.cpp | 88 +- .../rgb-depth/testRealSense2_T265_images.cpp | 12 +- .../testRealSense2_T265_images_odometry.cpp | 14 +- ...tRealSense2_T265_images_odometry_async.cpp | 23 +- .../rgb-depth/testRealSense2_T265_imu.cpp | 16 +- .../testRealSense2_T265_odometry.cpp | 14 +- .../testRealSense2_T265_undistort.cpp | 14 +- .../test/rgb-depth/testRealSense_R200.cpp | 63 +- .../test/rgb-depth/testRealSense_SR300.cpp | 64 +- .../include/visp3/mbt/vpMbtTukeyEstimator.h | 11 +- .../mbt/src/depth/vpMbtFaceDepthDense.cpp | 85 +- .../mbt/src/depth/vpMbtFaceDepthNormal.cpp | 159 ++-- .../mbt/src/edge/vpMbtDistanceCircle.cpp | 52 +- .../mbt/src/edge/vpMbtDistanceCylinder.cpp | 141 ++-- .../mbt/src/edge/vpMbtDistanceLine.cpp | 100 +-- .../mbt/src/klt/vpMbtDistanceKltCylinder.cpp | 71 +- .../mbt/src/klt/vpMbtDistanceKltPoints.cpp | 56 +- .../tracker/mbt/src/vpMbGenericTracker.cpp | 4 +- modules/tracker/mbt/src/vpMbTracker.cpp | 266 +++--- .../perfGenericTracker.cpp | 32 +- .../testGenericTracker.cpp | 10 +- .../testGenericTrackerDepth.cpp | 65 +- modules/tracker/me/include/visp3/me/vpMe.h | 2 - .../tracker/me/include/visp3/me/vpMeEllipse.h | 4 - .../tracker/me/include/visp3/me/vpMeLine.h | 4 - modules/tracker/me/src/moving-edges/vpMe.cpp | 2 - modules/tracker/me/test/testJsonMe.cpp | 10 +- modules/vision/include/visp3/vision/vpPose.h | 4 +- .../include/visp3/vision/vpPoseFeatures.h | 14 - .../src/pose-estimation/vpPoseFeatures.cpp | 4 - .../src/pose-estimation/vpPoseRansac.cpp | 10 - modules/vision/test/pose/testFindMatch.cpp | 2 +- modules/vision/test/pose/testPoseFeatures.cpp | 11 +- modules/vision/test/pose/testPoseRansac.cpp | 2 +- .../opencv/tutorial-bridge-opencv-matrix.cpp | 4 +- ...torial-pose-from-points-realsense-T265.cpp | 38 +- tutorial/grabber/tutorial-grabber-1394.cpp | 110 +-- .../grabber/tutorial-grabber-basler-pylon.cpp | 131 +-- tutorial/grabber/tutorial-grabber-bebop2.cpp | 127 +-- .../grabber/tutorial-grabber-flycapture.cpp | 112 +-- .../grabber/tutorial-grabber-ids-ueye.cpp | 235 +++--- .../tutorial-grabber-multiple-realsense.cpp | 38 +- tutorial/grabber/tutorial-grabber-opencv.cpp | 9 +- .../tutorial-grabber-realsense-T265.cpp | 91 +- .../grabber/tutorial-grabber-realsense.cpp | 135 +-- ...torial-grabber-rgbd-D435-structurecore.cpp | 6 +- .../tutorial-grabber-structure-core.cpp | 131 +-- tutorial/grabber/tutorial-grabber-v4l2.cpp | 129 +-- .../pcl-visualizer/ClassUsingPclViewer.cpp | 2 +- .../gui/pcl-visualizer/ClassUsingPclViewer.h | 2 +- .../pcl-visualizer/tutorial-pcl-viewer.cpp | 4 +- tutorial/image/drawingHelpers.h | 112 +-- tutorial/image/tutorial-image-colormap.cpp | 38 +- .../hough-transform/tutorial-circle-hough.cpp | 9 - 186 files changed, 3458 insertions(+), 3793 deletions(-) diff --git a/.github/workflows/ubuntu-dep-apt.yml b/.github/workflows/ubuntu-dep-apt.yml index 48f47dfcf2..bb77160c2c 100644 --- a/.github/workflows/ubuntu-dep-apt.yml +++ b/.github/workflows/ubuntu-dep-apt.yml @@ -15,7 +15,7 @@ jobs: matrix: os: [ubuntu-20.04, ubuntu-22.04] compiler: [ {CC: /usr/bin/gcc-9, CXX: /usr/bin/g++-9}, {CC: /usr/bin/gcc-10, CXX: /usr/bin/g++-10}, {CC: /usr/bin/clang, CXX: /usr/bin/clang++} ] - standard: [ 98, 11, 14, 17 ] + standard: [ 11, 14, 17 ] steps: # https://github.com/marketplace/actions/cancel-workflow-action @@ -36,7 +36,7 @@ jobs: - name: Print compiler information run: dpkg --list | grep compiler - - name: Install dependencies for ubuntu 18.04 and 20.04 + - name: Install dependencies for ubuntu 20.04 if: matrix.os != 'ubuntu-22.04' run: sudo apt-get update && sudo apt-get install -y libx11-dev libdc1394-22-dev libv4l-dev liblapack-dev libopenblas-dev libeigen3-dev libopencv-dev nlohmann-json3-dev diff --git a/.github/workflows/ubuntu-dep-src.yml b/.github/workflows/ubuntu-dep-src.yml index dff5233a84..c481c39a38 100644 --- a/.github/workflows/ubuntu-dep-src.yml +++ b/.github/workflows/ubuntu-dep-src.yml @@ -31,7 +31,7 @@ jobs: - name: Print OS information run: lsb_release -a - - name: Install dependencies for ubuntu 18.04 and 20.04 + - name: Install dependencies for ubuntu 20.04 if: matrix.os != 'ubuntu-22.04' run: | sudo apt-get update && sudo apt-get install -y libdc1394-22-dev diff --git a/.github/workflows/ubuntu-ustk.yml b/.github/workflows/ubuntu-ustk.yml index 23eea4f098..fab02f8d6b 100644 --- a/.github/workflows/ubuntu-ustk.yml +++ b/.github/workflows/ubuntu-ustk.yml @@ -34,7 +34,7 @@ jobs: - name: Print compiler information run: dpkg --list | grep compiler - - name: Install dependencies for ubuntu 18.04 and 20.04 + - name: Install dependencies for ubuntu 20.04 if: matrix.os != 'ubuntu-22.04' run: sudo apt-get update && sudo apt-get install -y libx11-dev libdc1394-22-dev libv4l-dev liblapack-dev libopenblas-dev libeigen3-dev libopencv-dev nlohmann-json3-dev diff --git a/CMakeLists.txt b/CMakeLists.txt index d8110b8b79..b15c567720 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -551,8 +551,8 @@ VP_OPTION(USE_SOXT SOXT "" "Include Coin/SoXt support" "" OF set(THREADS_PREFER_PTHREAD_FLAG ON) VP_OPTION(USE_THREADS Threads "" "Include std::thread support" "" ON IF NOT (WIN32 OR MINGW)) -# We need threads with c++11 -if((VISP_CXX_STANDARD GREATER VISP_CXX_STANDARD_98) AND NOT USE_THREADS) +# We need threads. To be changed to make threads optional +if(NOT USE_THREADS) if(Threads_FOUND) message(WARNING "We need std::thread. We turn USE_THREADS=ON.") unset(USE_THREADS) @@ -589,38 +589,7 @@ VP_OPTION(USE_NLOHMANN_JSON nlohmann_json QUIET "Include nlohmann json support" # ---------------------------------------------------------------------------- # Handle cxx standard depending on specific 3rd parties. Should be before module parsing and VISP3rdParty.cmake include # ---------------------------------------------------------------------------- -# if c++ standard is not at leat c++11, force 3rd parties that require at least c++11 to OFF -if(VISP_CXX_STANDARD LESS VISP_CXX_STANDARD_11) - if(USE_REALSENSE) - message(WARNING "librealsense 3rd party was detected and needs at least c++11 standard compiler flag while you have set c++${USE_CXX_STANDARD}. Thus we disable librealsense usage turning USE_REALSENSE=OFF.") - unset(USE_REALSENSE) - set(USE_REALSENSE OFF CACHE BOOL "Include RealSense SDK support" FORCE) - endif() - if(USE_REALSENSE2) - message(WARNING "librealsense2 3rd party was detected and needs at least c++11 standard compiler flag while you have set c++${USE_CXX_STANDARD}. Thus we disable librealsense2 usage turning USE_REALSENSE2=OFF.") - unset(USE_REALSENSE2) - set(USE_REALSENSE2 OFF CACHE BOOL "Include RealSense2 SDK support" FORCE) - endif() - if(USE_FRANKA) - message(WARNING "libfranka 3rd party was detected and needs at least c++11 standard compiler flag while you have set c++${USE_CXX_STANDARD}. Thus we disable libfranka usage turning USE_FRANKA=OFF.") - unset(USE_FRANKA) - set(USE_FRANKA OFF CACHE BOOL "Include libfranka SDK support for Franka Emika robots" FORCE) - endif() - if(USE_UR_RTDE) - message(WARNING "ur_rtde 3rd party was detected and needs at least c++11 standard compiler flag while you have set c++${USE_CXX_STANDARD}. Thus we disable ur_rtde usage turning USE_UR_RTDE=OFF.") - unset(USE_UR_RTDE) - set(USE_UR_RTDE OFF CACHE BOOL "Include ur_rtde SDK support for Universal Robots robots" FORCE) - endif() - if((OpenCV_VERSION VERSION_GREATER 4.0.0) OR (OpenCV_VERSION VERSION_EQUAL 4.0.0)) - # if c++ standard is not at least 11: disable opencv 4.x usage - message(WARNING "OpenCV ${OpenCV_VERSION} 3rd party was detected and needs c++11 or higher compiler flag while you have set c++${USE_CXX_STANDARD}. Thus we disable OpenCV usage turning USE_OPENCV=OFF") - unset(USE_OPENCV) - set(USE_OPENCV OFF CACHE BOOL "Include OpenCV support" FORCE) - endif() - # Downgrade to cxx98 to completely disable cxx11 - unset(CMAKE_CXX_STANDARD) -endif() - +# if c++ standard is not at leat c++17, force 3rd parties that require at least c++17 to OFF if(VISP_CXX_STANDARD LESS VISP_CXX_STANDARD_17) if(USE_MAVSDK) message(WARNING "mavsdk 3rd party was detected and needs at least c++17 standard compiler flag while you have set c++${USE_CXX_STANDARD}. Thus we disable MAVSDK usage turning USE_MAVSDK=OFF.") @@ -665,9 +634,9 @@ VP_OPTION(WITH_APRILTAG_BIG_FAMILY "" "" "Build AprilTag big family (41h12, VP_OPTION(WITH_ATIDAQ "" "" "Build atidaq-c as built-in library" "" ON IF USE_COMEDI AND NOT WINRT) VP_OPTION(WITH_CLIPPER "" "" "Build clipper as built-in library" "" ON IF USE_OPENCV) VP_OPTION(WITH_LAPACK "" "" "Build lapack as built-in library" "" ON IF NOT USE_LAPACK) -VP_OPTION(WITH_QBDEVICE "" "" "Build qbdevice-api as built-in library" "" ON IF (VISP_CXX_STANDARD GREATER VISP_CXX_STANDARD_98) AND (NOT WINRT) AND (NOT IOS)) -VP_OPTION(WITH_TAKKTILE2 "" "" "Build Right Hand takktile2 driver as built-in library" "" ON IF (VISP_CXX_STANDARD GREATER VISP_CXX_STANDARD_98) AND (NOT WIN32) AND (NOT WINRT) AND (NOT IOS) AND (NOT ANDROID)) -VP_OPTION(WITH_CATCH2 "" "" "Use catch2" "" ON IF (VISP_CXX_STANDARD GREATER VISP_CXX_STANDARD_98)) +VP_OPTION(WITH_QBDEVICE "" "" "Build qbdevice-api as built-in library" "" ON IF (NOT WINRT) AND (NOT IOS)) +VP_OPTION(WITH_TAKKTILE2 "" "" "Build Right Hand takktile2 driver as built-in library" "" ON IF (NOT WIN32) AND (NOT WINRT) AND (NOT IOS) AND (NOT ANDROID)) +VP_OPTION(WITH_CATCH2 "" "" "Use catch2 for testing" "" ON) # ---------------------------------------------------------------------------- # Check for specific functions. Should be after cxx standard detection in VISPDetectCXXStandard.cmake and potential modification depending on pcl, realsense2, libfranka diff --git a/ChangeLog.txt b/ChangeLog.txt index d19f2e9c7c..55ca45d97c 100644 --- a/ChangeLog.txt +++ b/ChangeLog.txt @@ -12,9 +12,11 @@ ViSP 3.x.x (Version in development) . - Deprecated removed . vpPlanarObjectDetector, vpFernClassifier classes are removed + . End of supporting c++98 standard. As a consequence, ViSP is no more compatible with Ubuntu 12.04 - New features and improvements . Introduce applications in apps folder, a collection of useful tools that have a dependency to the install target + . Bump minimal c++ standard to c++11 - Applications . Migrate eye-to-hand tutorials in apps - Tutorials diff --git a/apps/calibration/visp-acquire-franka-calib-data.cpp b/apps/calibration/visp-acquire-franka-calib-data.cpp index f1bfcdfaca..3498622c4b 100644 --- a/apps/calibration/visp-acquire-franka-calib-data.cpp +++ b/apps/calibration/visp-acquire-franka-calib-data.cpp @@ -42,7 +42,7 @@ #include #include -#if defined(VISP_HAVE_REALSENSE2) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) && \ +#if defined(VISP_HAVE_REALSENSE2) && \ (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) && defined(VISP_HAVE_FRANKA) && \ defined(VISP_HAVE_MODULE_GUI) && defined(VISP_HAVE_MODULE_ROBOT) && defined(VISP_HAVE_MODULE_SENSOR) // optional @@ -169,9 +169,6 @@ int main() #if !defined(VISP_HAVE_REALSENSE2) std::cout << "Install librealsense-2.x." << std::endl; #endif -#if !(VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - std::cout << "Build ViSP with c++11 or higher compiler flag (cmake -DUSE_CXX_STANDARD=11)." << std::endl; -#endif #if !defined(VISP_HAVE_FRANKA) std::cout << "Install libfranka." << std::endl; #endif diff --git a/apps/calibration/visp-acquire-universal-robots-calib-data.cpp b/apps/calibration/visp-acquire-universal-robots-calib-data.cpp index 6bb11a27e3..7ecadd446d 100644 --- a/apps/calibration/visp-acquire-universal-robots-calib-data.cpp +++ b/apps/calibration/visp-acquire-universal-robots-calib-data.cpp @@ -42,7 +42,7 @@ #include #include -#if defined(VISP_HAVE_REALSENSE2) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) && \ +#if defined(VISP_HAVE_REALSENSE2) && \ (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) && defined(VISP_HAVE_UR_RTDE) && \ defined(VISP_HAVE_MODULE_GUI) && defined(VISP_HAVE_MODULE_ROBOT) && defined(VISP_HAVE_MODULE_SENSOR) // optional @@ -171,9 +171,6 @@ int main() #if !defined(VISP_HAVE_REALSENSE2) std::cout << "Install librealsense-2.x." << std::endl; #endif -#if !(VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - std::cout << "Build ViSP with c++11 or higher compiler flag (cmake -DUSE_CXX_STANDARD=11)." << std::endl; -#endif #if !defined(VISP_HAVE_UR_RTDE) std::cout << "ViSP is not build with libur_rtde 3rd party used to control a robot from Universal Robots..." << std::endl; diff --git a/apps/calibration/visp-compute-hand-eye-calibration.cpp b/apps/calibration/visp-compute-hand-eye-calibration.cpp index e72024e146..1ed28ed68c 100644 --- a/apps/calibration/visp-compute-hand-eye-calibration.cpp +++ b/apps/calibration/visp-compute-hand-eye-calibration.cpp @@ -187,11 +187,9 @@ int main(int argc, const char *argv[]) // save eMc std::string name_we = vpIoTools::createFilePath(vpIoTools::getParent(opt_eMc_file), vpIoTools::getNameWE(opt_eMc_file)) + ".txt"; std::cout << std::endl << "Save transformation matrix eMc as an homogeneous matrix in: " << name_we << std::endl; -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) + std::ofstream file_eMc(name_we); -#else - std::ofstream file_eMc(name_we.c_str()); -#endif + eMc.save(file_eMc); vpPoseVector pose_vec(eMc); diff --git a/cmake/VISPDetectCXXStandard.cmake b/cmake/VISPDetectCXXStandard.cmake index 53c1d0f1e1..ab040d821c 100644 --- a/cmake/VISPDetectCXXStandard.cmake +++ b/cmake/VISPDetectCXXStandard.cmake @@ -1,22 +1,19 @@ -set(VISP_CXX_STANDARD_98 199711L) +set(VISP_CXX_STANDARD_98 199711L) # Keep for compat with previous releases set(VISP_CXX_STANDARD_11 201103L) set(VISP_CXX_STANDARD_14 201402L) set(VISP_CXX_STANDARD_17 201703L) if(DEFINED USE_CXX_STANDARD) - if(USE_CXX_STANDARD STREQUAL "98") - set(CMAKE_CXX_STANDARD 98) - set(VISP_CXX_STANDARD ${VISP_CXX_STANDARD_98}) - elseif(USE_CXX_STANDARD STREQUAL "11") - set(CMAKE_CXX_STANDARD 11) - set(VISP_CXX_STANDARD ${VISP_CXX_STANDARD_11}) + if(USE_CXX_STANDARD STREQUAL "11") + set(CMAKE_CXX_STANDARD 11) + set(VISP_CXX_STANDARD ${VISP_CXX_STANDARD_11}) elseif(USE_CXX_STANDARD STREQUAL "14") - set(CMAKE_CXX_STANDARD 14) - set(VISP_CXX_STANDARD ${VISP_CXX_STANDARD_14}) + set(CMAKE_CXX_STANDARD 14) + set(VISP_CXX_STANDARD ${VISP_CXX_STANDARD_14}) elseif(USE_CXX_STANDARD STREQUAL "17") - set(CMAKE_CXX_STANDARD 17) - set(VISP_CXX_STANDARD ${VISP_CXX_STANDARD_17}) + set(CMAKE_CXX_STANDARD 17) + set(VISP_CXX_STANDARD ${VISP_CXX_STANDARD_17}) endif() set(CMAKE_CXX_EXTENSIONS OFF) # use -std=c++11 instead of -std=gnu++11 @@ -24,15 +21,10 @@ if(DEFINED USE_CXX_STANDARD) else() set(AVAILABLE_CXX_STANDARD TRUE) - set(VISP_CXX_STANDARD ${VISP_CXX_STANDARD_98}) + set(VISP_CXX_STANDARD ${VISP_CXX_STANDARD_11}) set(CMAKE_CXX_STANDARD_REQUIRED FALSE) - if(CMAKE_CXX98_COMPILE_FEATURES) - set(CXX98_STANDARD_FOUND ON CACHE STRING "cxx98 standard") - mark_as_advanced(CXX98_STANDARD_FOUND) - endif() - if(CMAKE_CXX11_COMPILE_FEATURES) # cxx11 implementation maybe incomplete especially with g++ 4.6.x. # That's why we check more in depth for cxx_override that is not available with g++ 4.6.3 @@ -60,8 +52,6 @@ else() set(USE_CXX_STANDARD "14" CACHE STRING "Selected cxx standard") elseif(CXX11_STANDARD_FOUND) set(USE_CXX_STANDARD "11" CACHE STRING "Selected cxx standard") - elseif(CXX98_STANDARD_FOUND) - set(USE_CXX_STANDARD "98" CACHE STRING "Selected cxx standard") else() set(AVAILABLE_CXX_STANDARD FALSE) endif() @@ -72,9 +62,6 @@ else() endif() if(AVAILABLE_CXX_STANDARD) - if(CXX98_STANDARD_FOUND) - set_property(CACHE USE_CXX_STANDARD APPEND_STRING PROPERTY STRINGS "98") - endif() if(CXX11_STANDARD_FOUND) set_property(CACHE USE_CXX_STANDARD APPEND_STRING PROPERTY STRINGS ";11") endif() @@ -85,10 +72,7 @@ else() set_property(CACHE USE_CXX_STANDARD APPEND_STRING PROPERTY STRINGS ";17") endif() - if(USE_CXX_STANDARD STREQUAL "98") - set(CMAKE_CXX_STANDARD 98) - set(VISP_CXX_STANDARD ${VISP_CXX_STANDARD_98}) - elseif(USE_CXX_STANDARD STREQUAL "11") + if(USE_CXX_STANDARD STREQUAL "11") set(CMAKE_CXX_STANDARD 11) set(VISP_CXX_STANDARD ${VISP_CXX_STANDARD_11}) elseif(USE_CXX_STANDARD STREQUAL "14") diff --git a/cmake/templates/VISPConfig.cmake.in b/cmake/templates/VISPConfig.cmake.in index 0ed3aaaf2e..fd4b9b6bdb 100644 --- a/cmake/templates/VISPConfig.cmake.in +++ b/cmake/templates/VISPConfig.cmake.in @@ -201,9 +201,6 @@ set(VISP_HAVE_CMU1394 "@VISP_HAVE_CMU1394@") set(VISP_HAVE_COIN3D "@VISP_HAVE_COIN3D@") set(VISP_HAVE_COIN3D_AND_GUI "@VISP_HAVE_COIN3D_AND_GUI@") set(VISP_HAVE_COMEDI "@VISP_HAVE_COMEDI@") -set(VISP_HAVE_CPP11_COMPATIBILITY "@VISP_HAVE_CXX11@") # to keep compat with previous releases -set(VISP_HAVE_CXX11 "@VISP_HAVE_CXX11@") -set(VISP_CXX_STANDARD_98 "@VISP_CXX_STANDARD_98@") set(VISP_CXX_STANDARD_11 "@VISP_CXX_STANDARD_11@") set(VISP_CXX_STANDARD_14 "@VISP_CXX_STANDARD_14@") set(VISP_CXX_STANDARD_17 "@VISP_CXX_STANDARD_17@") diff --git a/cmake/templates/vpConfig.h.in b/cmake/templates/vpConfig.h.in index 26c9ef50d4..ebdd634d43 100644 --- a/cmake/templates/vpConfig.h.in +++ b/cmake/templates/vpConfig.h.in @@ -485,19 +485,13 @@ #cmakedefine VISP_HAVE_NLOHMANN_JSON // Define c++ standard values also available in __cplusplus when gcc is used -#define VISP_CXX_STANDARD_98 ${VISP_CXX_STANDARD_98} +#define VISP_CXX_STANDARD_98 ${VISP_CXX_STANDARD_98} # Keep for compat with previous releases #define VISP_CXX_STANDARD_11 ${VISP_CXX_STANDARD_11} #define VISP_CXX_STANDARD_14 ${VISP_CXX_STANDARD_14} #define VISP_CXX_STANDARD_17 ${VISP_CXX_STANDARD_17} #define VISP_CXX_STANDARD ${VISP_CXX_STANDARD} -// Defined to keep compat with previous releases when c++ 11 available -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) -# define VISP_HAVE_CXX11 -# define VISP_HAVE_CPP11_COMPATIBILITY -#endif - // Defined if isnan macro is available #cmakedefine VISP_HAVE_FUNC_ISNAN diff --git a/doc/config-doxygen.in b/doc/config-doxygen.in index 144e0aac01..fc2f3cc9fa 100644 --- a/doc/config-doxygen.in +++ b/doc/config-doxygen.in @@ -2153,9 +2153,6 @@ PREDEFINED = @DOXYGEN_SHOULD_SKIP_THIS@ \ VISP_HAVE_COIN3D \ VISP_HAVE_COIN3D_AND_GUI \ VISP_HAVE_COMEDI \ - VISP_HAVE_CPP11_COMPATIBILITY \ - VISP_HAVE_CXX11 \ - VISP_CXX_STANDARD_98=@VISP_CXX_STANDARD_98@ \ VISP_CXX_STANDARD_11=@VISP_CXX_STANDARD_11@ \ VISP_CXX_STANDARD_14=@VISP_CXX_STANDARD_14@ \ VISP_CXX_STANDARD_17=@VISP_CXX_STANDARD_17@ \ diff --git a/doc/tutorial/detection/tutorial-detection-object-deprecated.dox b/doc/tutorial/detection/tutorial-detection-object-deprecated.dox index 46e7a39bec..4611967283 100644 --- a/doc/tutorial/detection/tutorial-detection-object-deprecated.dox +++ b/doc/tutorial/detection/tutorial-detection-object-deprecated.dox @@ -77,7 +77,7 @@ Otherwise, the configuration must be made in the code. \snippet tutorial-detection-object-mbt-deprecated.cpp Keypoint code config -We then detect keypoints in the reference image with the object we want to learn : +We then detect keypoints in the reference image with the object we want to learn : \snippet tutorial-detection-object-mbt-deprecated.cpp Keypoints reference detection @@ -173,7 +173,7 @@ And finally, we build the reference keypoints and we set the flag append to true \subsection dep_detection_object_display_multiple_images How to display the matching when the learning is done on multiple images -In this section we will explain how to display the matching between keypoints detected in the current image and their correspondances in the reference images that are used during the learning stage, as given in the next video: +In this section we will explain how to display the matching between keypoints detected in the current image and their correspondences in the reference images that are used during the learning stage, as given in the next video: \htmlonly diff --git a/doc/tutorial/detection/tutorial-detection-object.dox b/doc/tutorial/detection/tutorial-detection-object.dox index c48b094d39..74bd74e6fd 100644 --- a/doc/tutorial/detection/tutorial-detection-object.dox +++ b/doc/tutorial/detection/tutorial-detection-object.dox @@ -75,7 +75,7 @@ Otherwise, the configuration must be made in the code. \snippet tutorial-detection-object-mbt.cpp Keypoint code config -We then detect keypoints in the reference image with the object we want to learn : +We then detect keypoints in the reference image with the object we want to learn : \snippet tutorial-detection-object-mbt.cpp Keypoints reference detection @@ -171,7 +171,7 @@ And finally, we build the reference keypoints and we set the flag append to true \subsection detection_object_display_multiple_images How to display the matching when the learning is done on multiple images -In this section we will explain how to display the matching between keypoints detected in the current image and their correspondances in the reference images that are used during the learning stage, as given in the next video: +In this section we will explain how to display the matching between keypoints detected in the current image and their correspondences in the reference images that are used during the learning stage, as given in the next video: \htmlonly diff --git a/example/device/framegrabber/getRealSense2Info.cpp b/example/device/framegrabber/getRealSense2Info.cpp index f56b0cc209..3346e2ef2e 100644 --- a/example/device/framegrabber/getRealSense2Info.cpp +++ b/example/device/framegrabber/getRealSense2Info.cpp @@ -42,7 +42,7 @@ #include #include -#if defined(VISP_HAVE_REALSENSE2) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_REALSENSE2) int main() { @@ -57,11 +57,13 @@ int main() if (rs.getDepthScale() != 0) // If it has depth sensor std::cout << "Depth scale: " << std::setprecision(std::numeric_limits::max_digits10) << rs.getDepthScale() - << std::endl; + << std::endl; - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cerr << "RealSense error " << e.what() << std::endl; - } catch (const std::exception &e) { + } + catch (const std::exception &e) { std::cerr << e.what() << std::endl; } @@ -75,11 +77,6 @@ int main() std::cout << "Tip:" << std::endl; std::cout << "- Install librealsense2, configure again ViSP using cmake and build again this example" << std::endl; return EXIT_SUCCESS; -#endif -#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "You do not build ViSP with c++11 or higher compiler flag" << std::endl; - std::cout << "Tip:" << std::endl; - std::cout << "- Configure ViSP again using cmake -DUSE_CXX_STANDARD=11, and build again this example" << std::endl; #endif return EXIT_SUCCESS; } diff --git a/example/device/framegrabber/grabRealSense.cpp b/example/device/framegrabber/grabRealSense.cpp index 0aff9af270..517d2b3379 100644 --- a/example/device/framegrabber/grabRealSense.cpp +++ b/example/device/framegrabber/grabRealSense.cpp @@ -48,7 +48,7 @@ #include #include -#if defined(VISP_HAVE_REALSENSE) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_REALSENSE) // Using a thread to display the pointcloud with PCL produces a segfault on // OSX @@ -113,7 +113,7 @@ vpThread::Return displayPointcloudFunction(vpThread::Args args) int main() { -#if defined(VISP_HAVE_REALSENSE) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_REALSENSE) try { vpRealSense rs; rs.setStreamSettings(rs::stream::color, vpRealSense::vpRsStreamParams(640, 480, rs::format::rgba8, 30)); @@ -254,11 +254,6 @@ int main() #elif !defined(VISP_HAVE_REALSENSE) std::cout << "This deprecated example is only working with librealsense 1.x" << std::endl; -#elif (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "You do not build ViSP with c++11 or higher compiler flag" << std::endl; - std::cout << "Tip:" << std::endl; - std::cout << "- Configure ViSP again using cmake -DUSE_CXX_STANDARD=11, and build again this example" << std::endl; - << std::endl; #endif return EXIT_SUCCESS; } diff --git a/example/device/framegrabber/grabRealSense2.cpp b/example/device/framegrabber/grabRealSense2.cpp index b6f16da5a9..18f5bcd345 100644 --- a/example/device/framegrabber/grabRealSense2.cpp +++ b/example/device/framegrabber/grabRealSense2.cpp @@ -44,8 +44,7 @@ #include #include -#if defined(VISP_HAVE_REALSENSE2) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) && \ - (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) +#if defined(VISP_HAVE_REALSENSE2) && (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) #ifdef VISP_HAVE_PCL #include @@ -64,7 +63,7 @@ bool cancelled = false, update_pointcloud = false; class ViewerWorker { public: - explicit ViewerWorker(bool color_mode, std::mutex &mutex) : m_colorMode(color_mode), m_mutex(mutex) {} + explicit ViewerWorker(bool color_mode, std::mutex &mutex) : m_colorMode(color_mode), m_mutex(mutex) { } void run() { @@ -94,7 +93,8 @@ class ViewerWorker if (local_update) { if (m_colorMode) { local_pointcloud_color = pointcloud_color->makeShared(); - } else { + } + else { local_pointcloud = pointcloud->makeShared(); } } @@ -109,15 +109,18 @@ class ViewerWorker viewer->addPointCloud(local_pointcloud_color, rgb, "RGB sample cloud"); viewer->setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 1, "RGB sample cloud"); - } else { + } + else { viewer->addPointCloud(local_pointcloud, "sample cloud"); viewer->setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 1, "sample cloud"); } init = false; - } else { + } + else { if (m_colorMode) { viewer->updatePointCloud(local_pointcloud_color, rgb, "RGB sample cloud"); - } else { + } + else { viewer->updatePointCloud(local_pointcloud, "sample cloud"); } } @@ -160,9 +163,9 @@ int main() rs.open(config); std::cout << rs.getCameraParameters(RS2_STREAM_COLOR, vpCameraParameters::perspectiveProjWithoutDistortion) - << std::endl; + << std::endl; std::cout << rs.getCameraParameters(RS2_STREAM_COLOR, vpCameraParameters::perspectiveProjWithDistortion) - << std::endl; + << std::endl; std::cout << "Extrinsics cMd: \n" << rs.getTransformation(RS2_STREAM_COLOR, RS2_STREAM_DEPTH) << std::endl; std::cout << "Extrinsics dMc: \n" << rs.getTransformation(RS2_STREAM_DEPTH, RS2_STREAM_COLOR) << std::endl; std::cout << "Extrinsics cMi: \n" << rs.getTransformation(RS2_STREAM_COLOR, RS2_STREAM_INFRARED) << std::endl; @@ -235,9 +238,11 @@ int main() viewer_thread.join(); #endif - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cerr << "RealSense error " << e.what() << std::endl; - } catch (const std::exception &e) { + } + catch (const std::exception &e) { std::cerr << e.what() << std::endl; } @@ -251,11 +256,6 @@ int main() std::cout << "Tip:" << std::endl; std::cout << "- Install librealsense2, configure again ViSP using cmake and build again this example" << std::endl; return EXIT_SUCCESS; -#endif -#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "You do not build ViSP with c++11 or higher compiler flag" << std::endl; - std::cout << "Tip:" << std::endl; - std::cout << "- Configure ViSP again using cmake -DUSE_CXX_STANDARD=11, and build again this example" << std::endl; #endif return EXIT_SUCCESS; } diff --git a/example/device/framegrabber/grabRealSense2_T265.cpp b/example/device/framegrabber/grabRealSense2_T265.cpp index 4f8a22ec3b..959fe60067 100644 --- a/example/device/framegrabber/grabRealSense2_T265.cpp +++ b/example/device/framegrabber/grabRealSense2_T265.cpp @@ -50,7 +50,7 @@ #include #include -#if defined(VISP_HAVE_REALSENSE2) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) && \ +#if defined(VISP_HAVE_REALSENSE2) && \ (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) && (RS2_API_VERSION > ((2 * 10000) + (31 * 100) + 0)) int main() @@ -62,7 +62,7 @@ int main() double ts; vpImagePoint frame_origin; std::list > - frame_origins; // Frame origin's history for trajectory visualization + frame_origins; // Frame origin's history for trajectory visualization unsigned int display_scale = 2; try { @@ -160,15 +160,17 @@ int main() std::cout << "Loop time: " << std::setw(8) << vpTime::measureTimeMs() - t << "ms. Confidence = " << confidence; std::cout << " Gyro vel: x = " << std::setw(10) << std::setprecision(3) << imu_vel[0] << " y = " << std::setw(10) - << std::setprecision(3) << imu_vel[1] << " z = " << std::setw(8) << imu_vel[2]; + << std::setprecision(3) << imu_vel[1] << " z = " << std::setw(8) << imu_vel[2]; std::cout << " Accel: x = " << std::setw(10) << std::setprecision(3) << imu_acc[0] << " y = " << std::setw(10) - << std::setprecision(3) << imu_acc[1] << " z = " << std::setw(8) << imu_acc[2]; + << std::setprecision(3) << imu_acc[1] << " z = " << std::setw(8) << imu_acc[2]; std::cout << std::endl; } - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cerr << "RealSense error " << e.what() << std::endl; - } catch (const std::exception &e) { + } + catch (const std::exception &e) { std::cerr << e.what() << std::endl; } @@ -182,10 +184,6 @@ int main() std::cout << "Tip:" << std::endl; std::cout << "- Install librealsense2, configure again ViSP using cmake and build again this example" << std::endl; return EXIT_SUCCESS; -#elif (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "You do not build ViSP with c++11 or higher compiler flag" << std::endl; - std::cout << "Tip:" << std::endl; - std::cout << "- Configure ViSP again using cmake -DUSE_CXX_STANDARD=11, and build again this example" << std::endl; #elif !(defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) std::cout << "You don't have X11 or GDI display capabilities" << std::endl; #elif !(RS2_API_VERSION > ((2 * 10000) + (31 * 100) + 0)) diff --git a/example/device/framegrabber/grabV4l2MultiCpp11Thread.cpp b/example/device/framegrabber/grabV4l2MultiCpp11Thread.cpp index 898de9e557..ea1a2dc508 100644 --- a/example/device/framegrabber/grabV4l2MultiCpp11Thread.cpp +++ b/example/device/framegrabber/grabV4l2MultiCpp11Thread.cpp @@ -45,8 +45,7 @@ #include -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) && defined(VISP_HAVE_V4L2) && \ - (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GTK)) +#if defined(VISP_HAVE_V4L2) && (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GTK)) #include #include diff --git a/example/device/framegrabber/readRealSenseData.cpp b/example/device/framegrabber/readRealSenseData.cpp index f63a4989a4..43b160a9da 100644 --- a/example/device/framegrabber/readRealSenseData.cpp +++ b/example/device/framegrabber/readRealSenseData.cpp @@ -40,7 +40,7 @@ #include -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) && (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) +#if (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) #include #include #include @@ -51,9 +51,7 @@ #include #include #include -#endif -#if defined(VISP_HAVE_PCL) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) #define USE_PCL_VIEWER #endif diff --git a/example/device/framegrabber/saveRealSenseData.cpp b/example/device/framegrabber/saveRealSenseData.cpp index 4b8155b29f..7548872130 100644 --- a/example/device/framegrabber/saveRealSenseData.cpp +++ b/example/device/framegrabber/saveRealSenseData.cpp @@ -39,7 +39,7 @@ #include #include -#if (defined(VISP_HAVE_REALSENSE) || defined(VISP_HAVE_REALSENSE2)) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) && \ +#if (defined(VISP_HAVE_REALSENSE) || defined(VISP_HAVE_REALSENSE2)) && \ (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) #include diff --git a/example/math/quadprog.cpp b/example/math/quadprog.cpp index 19609c80df..116b570804 100644 --- a/example/math/quadprog.cpp +++ b/example/math/quadprog.cpp @@ -47,7 +47,7 @@ #include #include -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) && defined(VISP_HAVE_LAPACK) +#if defined(VISP_HAVE_LAPACK) #include #include @@ -177,14 +177,6 @@ int main(int argc, char **argv) #endif return EXIT_SUCCESS; } -#elif !(VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) -int main() -{ - std::cout << "You did not build ViSP with c++11 or higher compiler flag" << std::endl; - std::cout << "Tip:" << std::endl; - std::cout << "- Configure ViSP again using cmake -DUSE_CXX_STANDARD=11, and build again this example" << std::endl; - return EXIT_SUCCESS; -} #else int main() { diff --git a/example/math/quadprog_eq.cpp b/example/math/quadprog_eq.cpp index cd4ba09fa2..49c20b23de 100644 --- a/example/math/quadprog_eq.cpp +++ b/example/math/quadprog_eq.cpp @@ -47,7 +47,7 @@ #include #include -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) && defined(VISP_HAVE_LAPACK) +#if defined(VISP_HAVE_LAPACK) #include "qp_plot.h" #include @@ -204,14 +204,6 @@ int main(int argc, char **argv) } #endif } -#elif !(VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) -int main() -{ - std::cout << "You did not build ViSP with c++11 or higher compiler flag" << std::endl; - std::cout << "Tip:" << std::endl; - std::cout << "- Configure ViSP again using cmake -DUSE_CXX_STANDARD=11, and build again this example" << std::endl; - return EXIT_SUCCESS; -} #else int main() { diff --git a/example/servo-afma6/servoAfma6AprilTagIBVS.cpp b/example/servo-afma6/servoAfma6AprilTagIBVS.cpp index c600d48cd2..f20cfed7b3 100644 --- a/example/servo-afma6/servoAfma6AprilTagIBVS.cpp +++ b/example/servo-afma6/servoAfma6AprilTagIBVS.cpp @@ -66,8 +66,7 @@ #include #include -#if defined(VISP_HAVE_REALSENSE2) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) && \ - (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) && defined(VISP_HAVE_AFMA6) +#if defined(VISP_HAVE_REALSENSE2) && (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) && defined(VISP_HAVE_AFMA6) void display_point_trajectory(const vpImage &I, const std::vector &vip, std::vector *traj_vip) @@ -78,7 +77,8 @@ void display_point_trajectory(const vpImage &I, const std::vector if (vpImagePoint::distance(vip[i], traj_vip[i].back()) > 1.) { traj_vip[i].push_back(vip[i]); } - } else { + } + else { traj_vip[i].push_back(vip[i]); } } @@ -103,24 +103,31 @@ int main(int argc, char **argv) for (int i = 1; i < argc; i++) { if (std::string(argv[i]) == "--tag_size" && i + 1 < argc) { opt_tagSize = std::stod(argv[i + 1]); - } else if (std::string(argv[i]) == "--verbose") { + } + else if (std::string(argv[i]) == "--verbose") { opt_verbose = true; - } else if (std::string(argv[i]) == "--plot") { + } + else if (std::string(argv[i]) == "--plot") { opt_plot = true; - } else if (std::string(argv[i]) == "--adaptive_gain") { + } + else if (std::string(argv[i]) == "--adaptive_gain") { opt_adaptive_gain = true; - } else if (std::string(argv[i]) == "--task_sequencing") { + } + else if (std::string(argv[i]) == "--task_sequencing") { opt_task_sequencing = true; - } else if (std::string(argv[i]) == "--quad_decimate" && i + 1 < argc) { + } + else if (std::string(argv[i]) == "--quad_decimate" && i + 1 < argc) { opt_quad_decimate = std::stoi(argv[i + 1]); - } else if (std::string(argv[i]) == "--no-convergence-threshold") { + } + else if (std::string(argv[i]) == "--no-convergence-threshold") { convergence_threshold = 0.; - } else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { + } + else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { std::cout - << argv[0] << " --tag_size ] " - << "[--quad_decimate ] [--adaptive_gain] [--plot] [--task_sequencing] [--no-convergence-threshold] [--verbose] [--help] [-h]" - << "\n"; + << argv[0] << " --tag_size ] " + << "[--quad_decimate ] [--adaptive_gain] [--plot] [--task_sequencing] [--no-convergence-threshold] [--verbose] [--help] [-h]" + << "\n"; return EXIT_SUCCESS; } } @@ -129,8 +136,8 @@ int main(int argc, char **argv) try { std::cout << "WARNING: This example will move the robot! " - << "Please make sure to have the user stop button at hand!" << std::endl - << "Press Enter to continue..." << std::endl; + << "Please make sure to have the user stop button at hand!" << std::endl + << "Press Enter to continue..." << std::endl; std::cin.ignore(); /* @@ -158,7 +165,7 @@ int main(int argc, char **argv) // Get camera intrinsics vpCameraParameters cam = - rs.getCameraParameters(RS2_STREAM_COLOR, vpCameraParameters::perspectiveProjWithDistortion); + rs.getCameraParameters(RS2_STREAM_COLOR, vpCameraParameters::perspectiveProjWithDistortion); std::cout << "cam:\n" << cam << "\n"; vpImage I(height, width); @@ -182,7 +189,7 @@ int main(int argc, char **argv) // Desired pose used to compute the desired features vpHomogeneousMatrix cdMo(vpTranslationVector(0, 0, opt_tagSize * 3), // 3 times tag with along camera z axis - vpRotationMatrix({1, 0, 0, 0, -1, 0, 0, 0, -1})); + vpRotationMatrix({ 1, 0, 0, 0, -1, 0, 0, 0, -1 })); // Create visual features std::vector p(4), pd(4); // We use 4 points @@ -205,7 +212,8 @@ int main(int argc, char **argv) if (opt_adaptive_gain) { vpAdaptiveGain lambda(1.5, 0.4, 30); // lambda(0)=4, lambda(oo)=0.4 and lambda'(0)=30 task.setLambda(lambda); - } else { + } + else { task.setLambda(0.5); } @@ -277,7 +285,8 @@ int main(int argc, char **argv) } if (std::fabs(v_cdMc[0].getThetaUVector().getTheta()) < std::fabs(v_cdMc[1].getThetaUVector().getTheta())) { oMo = v_oMo[0]; - } else { + } + else { std::cout << "Desired frame modified to avoid PI rotation of the camera" << std::endl; oMo = v_oMo[1]; // Introduce PI rotation } @@ -316,7 +325,8 @@ int main(int argc, char **argv) t_init_servo = vpTime::measureTimeMs(); } v_c = task.computeControlLaw((vpTime::measureTimeMs() - t_init_servo) / 1000.); - } else { + } + else { v_c = task.computeControlLaw(); } @@ -359,7 +369,7 @@ int main(int argc, char **argv) if (error < convergence_threshold) { has_converged = true; std::cout << "Servo task has converged" - << "\n"; + << "\n"; vpDisplay::displayText(I, 100, 20, "Servo task has converged", vpColor::red); } if (first_time) { @@ -427,12 +437,14 @@ int main(int argc, char **argv) if (traj_corners) { delete[] traj_corners; } - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "ViSP exception: " << e.what() << std::endl; std::cout << "Stop the robot " << std::endl; robot.setRobotState(vpRobot::STATE_STOP); return EXIT_FAILURE; - } catch (const std::exception &e) { + } + catch (const std::exception &e) { std::cout << "ur_rtde exception: " << e.what() << std::endl; return EXIT_FAILURE; } @@ -445,9 +457,6 @@ int main() #if !defined(VISP_HAVE_REALSENSE2) std::cout << "Install librealsense-2.x" << std::endl; #endif -#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "Build ViSP with c++11 or higher compiler flag (cmake -DUSE_CXX_STANDARD=11)." << std::endl; -#endif #if !defined(VISP_HAVE_AFMA6) std::cout << "ViSP is not build with Afma6 robot support..." << std::endl; #endif diff --git a/example/servo-afma6/servoAfma6AprilTagPBVS.cpp b/example/servo-afma6/servoAfma6AprilTagPBVS.cpp index 992798bc1d..cb847dcb01 100644 --- a/example/servo-afma6/servoAfma6AprilTagPBVS.cpp +++ b/example/servo-afma6/servoAfma6AprilTagPBVS.cpp @@ -63,8 +63,7 @@ #include #include -#if defined(VISP_HAVE_REALSENSE2) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) && \ - (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) && defined(VISP_HAVE_AFMA6) +#if defined(VISP_HAVE_REALSENSE2) && (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) && defined(VISP_HAVE_AFMA6) void display_point_trajectory(const vpImage &I, const std::vector &vip, std::vector *traj_vip) @@ -75,7 +74,8 @@ void display_point_trajectory(const vpImage &I, const std::vector if (vpImagePoint::distance(vip[i], traj_vip[i].back()) > 1.) { traj_vip[i].push_back(vip[i]); } - } else { + } + else { traj_vip[i].push_back(vip[i]); } } @@ -101,25 +101,32 @@ int main(int argc, char **argv) for (int i = 1; i < argc; i++) { if (std::string(argv[i]) == "--tag_size" && i + 1 < argc) { opt_tagSize = std::stod(argv[i + 1]); - } else if (std::string(argv[i]) == "--verbose") { + } + else if (std::string(argv[i]) == "--verbose") { opt_verbose = true; - } else if (std::string(argv[i]) == "--plot") { + } + else if (std::string(argv[i]) == "--plot") { opt_plot = true; - } else if (std::string(argv[i]) == "--adaptive_gain") { + } + else if (std::string(argv[i]) == "--adaptive_gain") { opt_adaptive_gain = true; - } else if (std::string(argv[i]) == "--task_sequencing") { + } + else if (std::string(argv[i]) == "--task_sequencing") { opt_task_sequencing = true; - } else if (std::string(argv[i]) == "--quad_decimate" && i + 1 < argc) { + } + else if (std::string(argv[i]) == "--quad_decimate" && i + 1 < argc) { opt_quad_decimate = std::stoi(argv[i + 1]); - } else if (std::string(argv[i]) == "--no-convergence-threshold") { + } + else if (std::string(argv[i]) == "--no-convergence-threshold") { convergence_threshold_t = 0.; convergence_threshold_tu = 0.; - } else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { + } + else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { std::cout - << argv[0] << " [--tag_size ] " - << "[--quad_decimate ] [--adaptive_gain] [--plot] [--task_sequencing] [--no-convergence-threshold] [--verbose] [--help] [-h]" - << "\n"; + << argv[0] << " [--tag_size ] " + << "[--quad_decimate ] [--adaptive_gain] [--plot] [--task_sequencing] [--no-convergence-threshold] [--verbose] [--help] [-h]" + << "\n"; return EXIT_SUCCESS; } } @@ -128,8 +135,8 @@ int main(int argc, char **argv) try { std::cout << "WARNING: This example will move the robot! " - << "Please make sure to have the user stop button at hand!" << std::endl - << "Press Enter to continue..." << std::endl; + << "Please make sure to have the user stop button at hand!" << std::endl + << "Press Enter to continue..." << std::endl; std::cin.ignore(); /* @@ -156,7 +163,7 @@ int main(int argc, char **argv) // Get camera intrinsics vpCameraParameters cam = - rs.getCameraParameters(RS2_STREAM_COLOR, vpCameraParameters::perspectiveProjWithDistortion); + rs.getCameraParameters(RS2_STREAM_COLOR, vpCameraParameters::perspectiveProjWithDistortion); std::cout << "cam:\n" << cam << "\n"; vpImage I(height, width); @@ -180,7 +187,7 @@ int main(int argc, char **argv) // Desired pose to reach vpHomogeneousMatrix cdMo(vpTranslationVector(0, 0, opt_tagSize * 3), // 3 times tag with along camera z axis - vpRotationMatrix({1, 0, 0, 0, -1, 0, 0, 0, -1})); + vpRotationMatrix({ 1, 0, 0, 0, -1, 0, 0, 0, -1 })); cdMc = cdMo * cMo.inverse(); vpFeatureTranslation t(vpFeatureTranslation::cdMc); @@ -200,7 +207,8 @@ int main(int argc, char **argv) if (opt_adaptive_gain) { vpAdaptiveGain lambda(1.5, 0.4, 30); // lambda(0)=4, lambda(oo)=0.4 and lambda'(0)=30 task.setLambda(lambda); - } else { + } + else { task.setLambda(0.5); } @@ -268,7 +276,8 @@ int main(int argc, char **argv) } if (std::fabs(v_cdMc[0].getThetaUVector().getTheta()) < std::fabs(v_cdMc[1].getThetaUVector().getTheta())) { oMo = v_oMo[0]; - } else { + } + else { std::cout << "Desired frame modified to avoid PI rotation of the camera" << std::endl; oMo = v_oMo[1]; // Introduce PI rotation } @@ -287,7 +296,8 @@ int main(int argc, char **argv) t_init_servo = vpTime::measureTimeMs(); } v_c = task.computeControlLaw((vpTime::measureTimeMs() - t_init_servo) / 1000.); - } else { + } + else { v_c = task.computeControlLaw(); } @@ -400,12 +410,14 @@ int main(int argc, char **argv) if (traj_vip) { delete[] traj_vip; } - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "ViSP exception: " << e.what() << std::endl; std::cout << "Stop the robot " << std::endl; robot.setRobotState(vpRobot::STATE_STOP); return EXIT_FAILURE; - } catch (const std::exception &e) { + } + catch (const std::exception &e) { std::cout << "ur_rtde exception: " << e.what() << std::endl; return EXIT_FAILURE; } @@ -418,9 +430,6 @@ int main() #if !defined(VISP_HAVE_REALSENSE2) std::cout << "Install librealsense-2.x" << std::endl; #endif -#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "Build ViSP with c++11 or higher compiler flag (cmake -DUSE_CXX_STANDARD=11)." << std::endl; -#endif #if !defined(VISP_HAVE_AFMA6) std::cout << "ViSP is not build with Afma-6 robot support..." << std::endl; #endif diff --git a/example/servo-bebop2/servoBebop2.cpp b/example/servo-bebop2/servoBebop2.cpp index f95f8d053e..98bd516911 100644 --- a/example/servo-bebop2/servoBebop2.cpp +++ b/example/servo-bebop2/servoBebop2.cpp @@ -73,13 +73,7 @@ int main() std::cout << "\nThis example requires ffmpeg library. You should install it.\n" << std::endl; return EXIT_SUCCESS; } -#elif (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) -int main() -{ - std::cout << "\nThis example requires cxx11 standard or higher. Turn it on using cmake -DUSE_CXX_STANDARD=11.\n" - << std::endl; - return EXIT_SUCCESS; -} + #else bool compareImagePoint(std::pair p1, std::pair p2) @@ -125,7 +119,8 @@ int main(int argc, char **argv) if (std::string(argv[i]) == "--ip" && i + 1 < argc) { ip_address = std::string(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--distance_to_tag" && i + 1 < argc) { + } + else if (std::string(argv[i]) == "--distance_to_tag" && i + 1 < argc) { opt_distance_to_tag = std::atof(argv[i + 1]); if (opt_distance_to_tag <= 0) { std::cout << "Error : invalid distance to tag." << std::endl << "See " << argv[0] << " --help" << std::endl; @@ -133,65 +128,71 @@ int main(int argc, char **argv) } opt_has_distance_to_tag = true; i++; - } else if (std::string(argv[i]) == "--intrinsic") { + } + else if (std::string(argv[i]) == "--intrinsic") { opt_cam_parameters = std::string(argv[i + 1]); opt_has_cam_parameters = true; i++; - } else if (std::string(argv[i]) == "--hd_stream") { + } + else if (std::string(argv[i]) == "--hd_stream") { stream_res = 1; - } else if (std::string(argv[i]) == "--verbose" || std::string(argv[i]) == "-v") { + } + else if (std::string(argv[i]) == "--verbose" || std::string(argv[i]) == "-v") { verbose = true; - } else { + } + else { std::cout << "Error : unknown parameter " << argv[i] << std::endl - << "See " << argv[0] << " --help" << std::endl; + << "See " << argv[0] << " --help" << std::endl; return EXIT_FAILURE; } } - } else if (argc >= 2 && (std::string(argv[1]) == "--help" || std::string(argv[1]) == "-h")) { + } + else if (argc >= 2 && (std::string(argv[1]) == "--help" || std::string(argv[1]) == "-h")) { std::cout << "\nUsage:\n" - << " " << argv[0] - << " [--tag_size ] [--ip ] [--distance_to_tag ] [--intrinsic ] " - << "[--hd_stream] [--verbose] [-v] [--help] [-h]\n" - << std::endl - << "Description:\n" - << " --tag_size \n" - << " The size of the tag to detect in meters, required.\n\n" - << " --ip \n" - << " Ip address of the drone to which you want to connect (default : 192.168.42.1).\n\n" - << " --distance_to_tag \n" - << " The desired distance to the tag in meters (default: 1 meter).\n\n" - << " --intrinsic \n" - << " XML file containing computed intrinsic camera parameters (default: empty).\n\n" - << " --hd_stream\n" - << " Enables HD 720p streaming instead of default 480p.\n" - << " Allows to increase range and accuracy of the tag detection,\n" - << " but increases latency and computation time.\n" - << " Caution: camera calibration settings are different for the two resolutions.\n" - << " Make sure that if you pass custom intrinsic camera parameters,\n" - << " they were obtained with the correct resolution.\n\n" - << " --verbose, -v\n" - << " Enables verbose (drone information messages and velocity commands\n" - << " are then displayed).\n\n" - << " --help, -h\n" - << " Print help message.\n" - << std::endl; + << " " << argv[0] + << " [--tag_size ] [--ip ] [--distance_to_tag ] [--intrinsic ] " + << "[--hd_stream] [--verbose] [-v] [--help] [-h]\n" + << std::endl + << "Description:\n" + << " --tag_size \n" + << " The size of the tag to detect in meters, required.\n\n" + << " --ip \n" + << " Ip address of the drone to which you want to connect (default : 192.168.42.1).\n\n" + << " --distance_to_tag \n" + << " The desired distance to the tag in meters (default: 1 meter).\n\n" + << " --intrinsic \n" + << " XML file containing computed intrinsic camera parameters (default: empty).\n\n" + << " --hd_stream\n" + << " Enables HD 720p streaming instead of default 480p.\n" + << " Allows to increase range and accuracy of the tag detection,\n" + << " but increases latency and computation time.\n" + << " Caution: camera calibration settings are different for the two resolutions.\n" + << " Make sure that if you pass custom intrinsic camera parameters,\n" + << " they were obtained with the correct resolution.\n\n" + << " --verbose, -v\n" + << " Enables verbose (drone information messages and velocity commands\n" + << " are then displayed).\n\n" + << " --help, -h\n" + << " Print help message.\n" + << std::endl; return EXIT_SUCCESS; - } else { + } + else { std::cout << "Error : tag size parameter required." << std::endl << "See " << argv[0] << " --help" << std::endl; return EXIT_FAILURE; } std::cout - << "\nWARNING: \n - This program does no sensing or avoiding of " - "obstacles, \n" - "the drone WILL collide with any objects in the way! Make sure " - "the \n" - "drone has approximately 3 meters of free space on all sides.\n" - " - The drone uses a downward-facing camera for horizontal speed estimation,\n make sure the drone flies " - "above a non-uniform flooring,\n or its movement will be inacurate and dangerous !\n" + << "\nWARNING: \n - This program does no sensing or avoiding of " + "obstacles, \n" + "the drone WILL collide with any objects in the way! Make sure " + "the \n" + "drone has approximately 3 meters of free space on all sides.\n" + " - The drone uses a downward-facing camera for horizontal speed estimation,\n make sure the drone flies " + "above a non-uniform flooring,\n or its movement will be inacurate and dangerous !\n" - << std::endl; + << std::endl; vpRobotBebop2 drone( verbose, true, ip_address); // Create the drone with desired verbose level, settings reset, and corresponding IP @@ -254,15 +255,18 @@ int main(int argc, char **argv) std::cout << "Cannot find parameters in XML file " << opt_cam_parameters << std::endl; if (drone.getVideoHeight() == 720) { // 720p streaming cam.initPersProjWithoutDistortion(785.6412585, 785.3322447, 637.9049857, 359.7524531); - } else { // 480p streaming + } + else { // 480p streaming cam.initPersProjWithoutDistortion(531.9213063, 520.8495788, 429.133986, 240.9464457); } } - } else { + } + else { std::cout << "Setting default camera parameters ... " << std::endl; if (drone.getVideoHeight() == 720) { // 720p streaming cam.initPersProjWithoutDistortion(785.6412585, 785.3322447, 637.9049857, 359.7524531); - } else { // 480p streaming + } + else { // 480p streaming cam.initPersProjWithoutDistortion(531.9213063, 520.8495788, 429.133986, 240.9464457); } } @@ -291,7 +295,7 @@ int main(int argc, char **argv) vpRotationMatrix c1Rc2(c1_rxyz_c2); // Rotation between camera 1 and 2 vpHomogeneousMatrix c1Mc2(vpTranslationVector(), c1Rc2); // Homogeneous matrix between c1 and c2 - vpRotationMatrix c1Re{0, 1, 0, 0, 0, 1, 1, 0, 0}; // Rotation between camera 1 and E + vpRotationMatrix c1Re { 0, 1, 0, 0, 0, 1, 1, 0, 0 }; // Rotation between camera 1 and E vpTranslationVector c1te(0, 0, -0.09); // Translation between camera 1 and E vpHomogeneousMatrix c1Me(c1te, c1Re); // Homogeneous matrix between c1 and E @@ -311,8 +315,8 @@ int main(int argc, char **argv) double Z_d = (opt_has_distance_to_tag ? opt_distance_to_tag : 1.); // Define the desired polygon corresponding the the AprilTag CLOCKWISE - double X[4] = {tagSize / 2., tagSize / 2., -tagSize / 2., -tagSize / 2.}; - double Y[4] = {tagSize / 2., -tagSize / 2., -tagSize / 2., tagSize / 2.}; + double X[4] = { tagSize / 2., tagSize / 2., -tagSize / 2., -tagSize / 2. }; + double Y[4] = { tagSize / 2., -tagSize / 2., -tagSize / 2., tagSize / 2. }; std::vector vec_P, vec_P_d; for (int i = 0; i < 4; i++) { @@ -499,7 +503,8 @@ int main(int argc, char **argv) vpDisplay::displayLine(I, vec_ip[vec_ip_sorted[2].first], vec_ip[vec_ip_sorted[3].first], vpColor::red, 1, false); - } else { + } + else { std::stringstream sserr; sserr << "Failed to detect an Apriltag, or detected multiple ones"; vpDisplay::displayText(I, 120, 20, sserr.str(), vpColor::red); @@ -528,11 +533,13 @@ int main(int argc, char **argv) return EXIT_SUCCESS; - } else { + } + else { std::cout << "ERROR : failed to setup drone control." << std::endl; return EXIT_FAILURE; } - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "Caught an exception: " << e << std::endl; return EXIT_FAILURE; } diff --git a/example/servo-franka/servoFrankaIBVS.cpp b/example/servo-franka/servoFrankaIBVS.cpp index 6e78536aaa..d2cee5acd0 100644 --- a/example/servo-franka/servoFrankaIBVS.cpp +++ b/example/servo-franka/servoFrankaIBVS.cpp @@ -74,8 +74,7 @@ #include #include -#if defined(VISP_HAVE_REALSENSE2) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) && \ - (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) && defined(VISP_HAVE_FRANKA) +#if defined(VISP_HAVE_REALSENSE2) && (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) && defined(VISP_HAVE_FRANKA) void display_point_trajectory(const vpImage &I, const std::vector &vip, std::vector *traj_vip) @@ -86,7 +85,8 @@ void display_point_trajectory(const vpImage &I, const std::vector if (vpImagePoint::distance(vip[i], traj_vip[i].back()) > 1.) { traj_vip[i].push_back(vip[i]); } - } else { + } + else { traj_vip[i].push_back(vip[i]); } } @@ -113,29 +113,38 @@ int main(int argc, char **argv) for (int i = 1; i < argc; i++) { if (std::string(argv[i]) == "--tag_size" && i + 1 < argc) { opt_tagSize = std::stod(argv[i + 1]); - } else if (std::string(argv[i]) == "--ip" && i + 1 < argc) { + } + else if (std::string(argv[i]) == "--ip" && i + 1 < argc) { opt_robot_ip = std::string(argv[i + 1]); - } else if (std::string(argv[i]) == "--eMc" && i + 1 < argc) { + } + else if (std::string(argv[i]) == "--eMc" && i + 1 < argc) { opt_eMc_filename = std::string(argv[i + 1]); - } else if (std::string(argv[i]) == "--verbose") { + } + else if (std::string(argv[i]) == "--verbose") { opt_verbose = true; - } else if (std::string(argv[i]) == "--plot") { + } + else if (std::string(argv[i]) == "--plot") { opt_plot = true; - } else if (std::string(argv[i]) == "--adaptive_gain") { + } + else if (std::string(argv[i]) == "--adaptive_gain") { opt_adaptive_gain = true; - } else if (std::string(argv[i]) == "--task_sequencing") { + } + else if (std::string(argv[i]) == "--task_sequencing") { opt_task_sequencing = true; - } else if (std::string(argv[i]) == "--quad_decimate" && i + 1 < argc) { + } + else if (std::string(argv[i]) == "--quad_decimate" && i + 1 < argc) { opt_quad_decimate = std::stoi(argv[i + 1]); - } else if (std::string(argv[i]) == "--no-convergence-threshold") { + } + else if (std::string(argv[i]) == "--no-convergence-threshold") { convergence_threshold = 0.; - } else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { + } + else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { std::cout - << argv[0] << " [--ip ] [--tag_size ] [--eMc ] " - << "[--quad_decimate ] [--adaptive_gain] [--plot] [--task_sequencing] [--no-convergence-threshold] [--verbose] [--help] [-h]" - << "\n"; + << argv[0] << " [--ip ] [--tag_size ] [--eMc ] " + << "[--quad_decimate ] [--adaptive_gain] [--plot] [--task_sequencing] [--no-convergence-threshold] [--verbose] [--help] [-h]" + << "\n"; return EXIT_SUCCESS; } } @@ -166,16 +175,17 @@ int main(int argc, char **argv) // If provided, read camera extrinsics from --eMc if (!opt_eMc_filename.empty()) { ePc.loadYAML(opt_eMc_filename, ePc); - } else { + } + else { std::cout << "Warning, opt_eMc_filename is empty! Use hard coded values." - << "\n"; + << "\n"; } vpHomogeneousMatrix eMc(ePc); std::cout << "eMc:\n" << eMc << "\n"; // Get camera intrinsics vpCameraParameters cam = - rs.getCameraParameters(RS2_STREAM_COLOR, vpCameraParameters::perspectiveProjWithDistortion); + rs.getCameraParameters(RS2_STREAM_COLOR, vpCameraParameters::perspectiveProjWithDistortion); std::cout << "cam:\n" << cam << "\n"; vpImage I(height, width); @@ -199,7 +209,7 @@ int main(int argc, char **argv) // Desired pose used to compute the desired features vpHomogeneousMatrix cdMo(vpTranslationVector(0, 0, opt_tagSize * 3), // 3 times tag with along camera z axis - vpRotationMatrix({1, 0, 0, 0, -1, 0, 0, 0, -1})); + vpRotationMatrix({ 1, 0, 0, 0, -1, 0, 0, 0, -1 })); // Create visual features std::vector p(4), pd(4); // We use 4 points @@ -222,7 +232,8 @@ int main(int argc, char **argv) if (opt_adaptive_gain) { vpAdaptiveGain lambda(1.5, 0.4, 30); // lambda(0)=4, lambda(oo)=0.4 and lambda'(0)=30 task.setLambda(lambda); - } else { + } + else { task.setLambda(0.5); } @@ -295,7 +306,8 @@ int main(int argc, char **argv) } if (std::fabs(v_cdMc[0].getThetaUVector().getTheta()) < std::fabs(v_cdMc[1].getThetaUVector().getTheta())) { oMo = v_oMo[0]; - } else { + } + else { std::cout << "Desired frame modified to avoid PI rotation of the camera" << std::endl; oMo = v_oMo[1]; // Introduce PI rotation } @@ -334,7 +346,8 @@ int main(int argc, char **argv) t_init_servo = vpTime::measureTimeMs(); } v_c = task.computeControlLaw((vpTime::measureTimeMs() - t_init_servo) / 1000.); - } else { + } + else { v_c = task.computeControlLaw(); } @@ -377,7 +390,7 @@ int main(int argc, char **argv) if (error < convergence_threshold) { has_converged = true; std::cout << "Servo task has converged" - << "\n"; + << "\n"; vpDisplay::displayText(I, 100, 20, "Servo task has converged", vpColor::red); } if (first_time) { @@ -445,18 +458,21 @@ int main(int argc, char **argv) if (traj_corners) { delete[] traj_corners; } - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "ViSP exception: " << e.what() << std::endl; std::cout << "Stop the robot " << std::endl; robot.setRobotState(vpRobot::STATE_STOP); return EXIT_FAILURE; - } catch (const franka::NetworkException &e) { + } + catch (const franka::NetworkException &e) { std::cout << "Franka network exception: " << e.what() << std::endl; std::cout << "Check if you are connected to the Franka robot" - << " or if you specified the right IP using --ip command line option set by default to 192.168.1.1. " - << std::endl; + << " or if you specified the right IP using --ip command line option set by default to 192.168.1.1. " + << std::endl; return EXIT_FAILURE; - } catch (const std::exception &e) { + } + catch (const std::exception &e) { std::cout << "Franka exception: " << e.what() << std::endl; return EXIT_FAILURE; } @@ -469,9 +485,6 @@ int main() #if !defined(VISP_HAVE_REALSENSE2) std::cout << "Install librealsense-2.x" << std::endl; #endif -#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "Build ViSP with c++11 or higher compiler flag (cmake -DUSE_CXX_STANDARD=11)." << std::endl; -#endif #if !defined(VISP_HAVE_FRANKA) std::cout << "Install libfranka." << std::endl; #endif diff --git a/example/servo-franka/servoFrankaPBVS.cpp b/example/servo-franka/servoFrankaPBVS.cpp index 3925f45c82..df6caa27a0 100644 --- a/example/servo-franka/servoFrankaPBVS.cpp +++ b/example/servo-franka/servoFrankaPBVS.cpp @@ -71,8 +71,7 @@ #include #include -#if defined(VISP_HAVE_REALSENSE2) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) && \ - (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) && defined(VISP_HAVE_FRANKA) +#if defined(VISP_HAVE_REALSENSE2) && (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) && defined(VISP_HAVE_FRANKA) void display_point_trajectory(const vpImage &I, const std::vector &vip, std::vector *traj_vip) @@ -83,7 +82,8 @@ void display_point_trajectory(const vpImage &I, const std::vector if (vpImagePoint::distance(vip[i], traj_vip[i].back()) > 1.) { traj_vip[i].push_back(vip[i]); } - } else { + } + else { traj_vip[i].push_back(vip[i]); } } @@ -111,30 +111,39 @@ int main(int argc, char **argv) for (int i = 1; i < argc; i++) { if (std::string(argv[i]) == "--tag_size" && i + 1 < argc) { opt_tagSize = std::stod(argv[i + 1]); - } else if (std::string(argv[i]) == "--ip" && i + 1 < argc) { + } + else if (std::string(argv[i]) == "--ip" && i + 1 < argc) { opt_robot_ip = std::string(argv[i + 1]); - } else if (std::string(argv[i]) == "--eMc" && i + 1 < argc) { + } + else if (std::string(argv[i]) == "--eMc" && i + 1 < argc) { opt_eMc_filename = std::string(argv[i + 1]); - } else if (std::string(argv[i]) == "--verbose") { + } + else if (std::string(argv[i]) == "--verbose") { opt_verbose = true; - } else if (std::string(argv[i]) == "--plot") { + } + else if (std::string(argv[i]) == "--plot") { opt_plot = true; - } else if (std::string(argv[i]) == "--adaptive_gain") { + } + else if (std::string(argv[i]) == "--adaptive_gain") { opt_adaptive_gain = true; - } else if (std::string(argv[i]) == "--task_sequencing") { + } + else if (std::string(argv[i]) == "--task_sequencing") { opt_task_sequencing = true; - } else if (std::string(argv[i]) == "--quad_decimate" && i + 1 < argc) { + } + else if (std::string(argv[i]) == "--quad_decimate" && i + 1 < argc) { opt_quad_decimate = std::stoi(argv[i + 1]); - } else if (std::string(argv[i]) == "--no-convergence-threshold") { + } + else if (std::string(argv[i]) == "--no-convergence-threshold") { convergence_threshold_t = 0.; convergence_threshold_tu = 0.; - } else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { + } + else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { std::cout - << argv[0] << " [--ip ] [--tag_size ] [--eMc ] " - << "[--quad_decimate ] [--adaptive_gain] [--plot] [--task_sequencing] [--no-convergence-threshold] [--verbose] [--help] [-h]" - << "\n"; + << argv[0] << " [--ip ] [--tag_size ] [--eMc ] " + << "[--quad_decimate ] [--adaptive_gain] [--plot] [--task_sequencing] [--no-convergence-threshold] [--verbose] [--help] [-h]" + << "\n"; return EXIT_SUCCESS; } } @@ -165,16 +174,17 @@ int main(int argc, char **argv) // If provided, read camera extrinsics from --eMc if (!opt_eMc_filename.empty()) { ePc.loadYAML(opt_eMc_filename, ePc); - } else { + } + else { std::cout << "Warning, opt_eMc_filename is empty! Use hard coded values." - << "\n"; + << "\n"; } vpHomogeneousMatrix eMc(ePc); std::cout << "eMc:\n" << eMc << "\n"; // Get camera intrinsics vpCameraParameters cam = - rs.getCameraParameters(RS2_STREAM_COLOR, vpCameraParameters::perspectiveProjWithDistortion); + rs.getCameraParameters(RS2_STREAM_COLOR, vpCameraParameters::perspectiveProjWithDistortion); std::cout << "cam:\n" << cam << "\n"; vpImage I(height, width); @@ -198,7 +208,7 @@ int main(int argc, char **argv) // Desired pose to reach vpHomogeneousMatrix cdMo(vpTranslationVector(0, 0, opt_tagSize * 3), // 3 times tag with along camera z axis - vpRotationMatrix({1, 0, 0, 0, -1, 0, 0, 0, -1})); + vpRotationMatrix({ 1, 0, 0, 0, -1, 0, 0, 0, -1 })); cdMc = cdMo * cMo.inverse(); vpFeatureTranslation t(vpFeatureTranslation::cdMc); @@ -218,7 +228,8 @@ int main(int argc, char **argv) if (opt_adaptive_gain) { vpAdaptiveGain lambda(1.5, 0.4, 30); // lambda(0)=4, lambda(oo)=0.4 and lambda'(0)=30 task.setLambda(lambda); - } else { + } + else { task.setLambda(0.5); } @@ -287,7 +298,8 @@ int main(int argc, char **argv) } if (std::fabs(v_cdMc[0].getThetaUVector().getTheta()) < std::fabs(v_cdMc[1].getThetaUVector().getTheta())) { oMo = v_oMo[0]; - } else { + } + else { std::cout << "Desired frame modified to avoid PI rotation of the camera" << std::endl; oMo = v_oMo[1]; // Introduce PI rotation } @@ -306,7 +318,8 @@ int main(int argc, char **argv) t_init_servo = vpTime::measureTimeMs(); } v_c = task.computeControlLaw((vpTime::measureTimeMs() - t_init_servo) / 1000.); - } else { + } + else { v_c = task.computeControlLaw(); } @@ -419,18 +432,21 @@ int main(int argc, char **argv) if (traj_vip) { delete[] traj_vip; } - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "ViSP exception: " << e.what() << std::endl; std::cout << "Stop the robot " << std::endl; robot.setRobotState(vpRobot::STATE_STOP); return EXIT_FAILURE; - } catch (const franka::NetworkException &e) { + } + catch (const franka::NetworkException &e) { std::cout << "Franka network exception: " << e.what() << std::endl; std::cout << "Check if you are connected to the Franka robot" - << " or if you specified the right IP using --ip command line option set by default to 192.168.1.1. " - << std::endl; + << " or if you specified the right IP using --ip command line option set by default to 192.168.1.1. " + << std::endl; return EXIT_FAILURE; - } catch (const std::exception &e) { + } + catch (const std::exception &e) { std::cout << "Franka exception: " << e.what() << std::endl; return EXIT_FAILURE; } @@ -443,9 +459,6 @@ int main() #if !defined(VISP_HAVE_REALSENSE2) std::cout << "Install librealsense-2.x" << std::endl; #endif -#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "Build ViSP with c++11 or higher compiler flag (cmake -DUSE_CXX_STANDARD=11)." << std::endl; -#endif #if !defined(VISP_HAVE_FRANKA) std::cout << "Install libfranka." << std::endl; #endif diff --git a/example/servo-ptu46/servoPtu46Point2DArtVelocity.cpp b/example/servo-ptu46/servoPtu46Point2DArtVelocity.cpp index 7b5011991f..8fa7735c17 100644 --- a/example/servo-ptu46/servoPtu46Point2DArtVelocity.cpp +++ b/example/servo-ptu46/servoPtu46Point2DArtVelocity.cpp @@ -59,7 +59,7 @@ #endif #include -#if (defined(VISP_HAVE_PTU46) & defined(VISP_HAVE_DC1394) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)) +#if defined(VISP_HAVE_PTU46) && defined(VISP_HAVE_DC1394) #include diff --git a/example/servo-universal-robots/UniversalRobotsMoveToPosition.cpp b/example/servo-universal-robots/UniversalRobotsMoveToPosition.cpp index f6038c801f..4cb3808e1f 100644 --- a/example/servo-universal-robots/UniversalRobotsMoveToPosition.cpp +++ b/example/servo-universal-robots/UniversalRobotsMoveToPosition.cpp @@ -55,12 +55,14 @@ int main(int argc, char **argv) for (int i = 1; i < argc; i++) { if (std::string(argv[i]) == "--ip" && i + 1 < argc) { opt_robot_ip = std::string(argv[i + 1]); - } else if (std::string(argv[i]) == "--read" && i + 1 < argc) { + } + else if (std::string(argv[i]) == "--read" && i + 1 < argc) { opt_position_filename = std::string(argv[i + 1]); - } else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { + } + else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { std::cout << "Move UR robot to a position specified from a file." << std::endl; std::cout << argv[0] << " [--ip ] [--read ] [--help] [-h]\n" - << std::endl; + << std::endl; std::cout << "Example:\n" << argv[0] << " --ip 192.168.0.100 --read position.pos\n" << std::endl; return EXIT_SUCCESS; @@ -74,7 +76,7 @@ int main(int argc, char **argv) } if (!vpIoTools::checkFilename(opt_position_filename)) { std::cout << "\nError: position filename \"" << opt_position_filename << "\" given as input doesn't exist. " - << std::endl; + << std::endl; std::cout << "Call \"" << argv[0] << " --help\" to get usage" << std::endl; return EXIT_FAILURE; } @@ -85,12 +87,14 @@ int main(int argc, char **argv) robot.connect(opt_robot_ip); robot.move(opt_position_filename); - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "ViSP exception: " << e.what() << std::endl; std::cout << "Stop the robot " << std::endl; robot.setRobotState(vpRobot::STATE_STOP); return EXIT_FAILURE; - } catch (const std::exception &e) { + } + catch (const std::exception &e) { std::cout << "ur_rtde exception: " << e.what() << std::endl; return EXIT_FAILURE; } @@ -102,10 +106,7 @@ int main() { #if !defined(VISP_HAVE_UR_RTDE) std::cout << "ViSP is not build with libur_rtde 3rd party used to control a robot from Universal Robots..." - << std::endl; -#endif -#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "Build ViSP with c++11 or higher compiler flag (cmake -DUSE_CXX_STANDARD=11)." << std::endl; + << std::endl; #endif return EXIT_SUCCESS; } diff --git a/example/servo-universal-robots/UniversalRobotsSavePosition.cpp b/example/servo-universal-robots/UniversalRobotsSavePosition.cpp index e8a7413d16..851e82f532 100644 --- a/example/servo-universal-robots/UniversalRobotsSavePosition.cpp +++ b/example/servo-universal-robots/UniversalRobotsSavePosition.cpp @@ -54,13 +54,15 @@ int main(int argc, char **argv) for (int i = 1; i < argc; i++) { if (std::string(argv[i]) == "--ip" && i + 1 < argc) { opt_robot_ip = std::string(argv[i + 1]); - } else if (std::string(argv[i]) == "--save" && i + 1 < argc) { + } + else if (std::string(argv[i]) == "--save" && i + 1 < argc) { opt_position_filename = std::string(argv[i + 1]); - } else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { + } + else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { std::cout << "Save UR robot position in a file." << std::endl; std::cout << "Usage:\n" << std::endl; std::cout << argv[0] << " [--ip ] [--save ] [--help] [-h]\n" - << std::endl; + << std::endl; std::cout << "Example:\n" << argv[0] << " --ip 192.168.0.100 --save position.pos\n" << std::endl; return EXIT_SUCCESS; @@ -77,12 +79,14 @@ int main(int argc, char **argv) robot.savePosFile(opt_position_filename, q); std::cout << "Robot position saved in \"" << opt_position_filename << "\"" << std::endl; - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "ViSP exception: " << e.what() << std::endl; std::cout << "Stop the robot " << std::endl; robot.setRobotState(vpRobot::STATE_STOP); return EXIT_FAILURE; - } catch (const std::exception &e) { + } + catch (const std::exception &e) { std::cout << "ur_rtde exception: " << e.what() << std::endl; return EXIT_FAILURE; } @@ -94,10 +98,7 @@ int main() { #if !defined(VISP_HAVE_UR_RTDE) std::cout << "ViSP is not build with libur_rtde 3rd party used to control a robot from Universal Robots..." - << std::endl; -#endif -#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "Build ViSP with c++11 or higher compiler flag (cmake -DUSE_CXX_STANDARD=11)." << std::endl; + << std::endl; #endif return EXIT_SUCCESS; } diff --git a/example/servo-universal-robots/servoUniversalRobotsIBVS.cpp b/example/servo-universal-robots/servoUniversalRobotsIBVS.cpp index 65cfca7f2a..6d049fb8db 100644 --- a/example/servo-universal-robots/servoUniversalRobotsIBVS.cpp +++ b/example/servo-universal-robots/servoUniversalRobotsIBVS.cpp @@ -71,8 +71,7 @@ #include #include -#if defined(VISP_HAVE_REALSENSE2) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) && \ - (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) && defined(VISP_HAVE_UR_RTDE) +#if defined(VISP_HAVE_REALSENSE2) && (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) && defined(VISP_HAVE_UR_RTDE) void display_point_trajectory(const vpImage &I, const std::vector &vip, std::vector *traj_vip) @@ -83,7 +82,8 @@ void display_point_trajectory(const vpImage &I, const std::vector if (vpImagePoint::distance(vip[i], traj_vip[i].back()) > 1.) { traj_vip[i].push_back(vip[i]); } - } else { + } + else { traj_vip[i].push_back(vip[i]); } } @@ -110,29 +110,38 @@ int main(int argc, char **argv) for (int i = 1; i < argc; i++) { if (std::string(argv[i]) == "--tag_size" && i + 1 < argc) { opt_tagSize = std::stod(argv[i + 1]); - } else if (std::string(argv[i]) == "--ip" && i + 1 < argc) { + } + else if (std::string(argv[i]) == "--ip" && i + 1 < argc) { opt_robot_ip = std::string(argv[i + 1]); - } else if (std::string(argv[i]) == "--eMc" && i + 1 < argc) { + } + else if (std::string(argv[i]) == "--eMc" && i + 1 < argc) { opt_eMc_filename = std::string(argv[i + 1]); - } else if (std::string(argv[i]) == "--verbose") { + } + else if (std::string(argv[i]) == "--verbose") { opt_verbose = true; - } else if (std::string(argv[i]) == "--plot") { + } + else if (std::string(argv[i]) == "--plot") { opt_plot = true; - } else if (std::string(argv[i]) == "--adaptive_gain") { + } + else if (std::string(argv[i]) == "--adaptive_gain") { opt_adaptive_gain = true; - } else if (std::string(argv[i]) == "--task_sequencing") { + } + else if (std::string(argv[i]) == "--task_sequencing") { opt_task_sequencing = true; - } else if (std::string(argv[i]) == "--quad_decimate" && i + 1 < argc) { + } + else if (std::string(argv[i]) == "--quad_decimate" && i + 1 < argc) { opt_quad_decimate = std::stoi(argv[i + 1]); - } else if (std::string(argv[i]) == "--no-convergence-threshold") { + } + else if (std::string(argv[i]) == "--no-convergence-threshold") { convergence_threshold = 0.; - } else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { + } + else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { std::cout - << argv[0] << " [--ip ] [--tag_size ] [--eMc ] " - << "[--quad_decimate ] [--adaptive_gain] [--plot] [--task_sequencing] [--no-convergence-threshold] [--verbose] [--help] [-h]" - << "\n"; + << argv[0] << " [--ip ] [--tag_size ] [--eMc ] " + << "[--quad_decimate ] [--adaptive_gain] [--plot] [--task_sequencing] [--no-convergence-threshold] [--verbose] [--help] [-h]" + << "\n"; return EXIT_SUCCESS; } } @@ -143,8 +152,8 @@ int main(int argc, char **argv) robot.connect(opt_robot_ip); std::cout << "WARNING: This example will move the robot! " - << "Please make sure to have the user stop button at hand!" << std::endl - << "Press Enter to continue..." << std::endl; + << "Please make sure to have the user stop button at hand!" << std::endl + << "Press Enter to continue..." << std::endl; std::cin.ignore(); /* @@ -182,16 +191,17 @@ int main(int argc, char **argv) // If provided, read camera extrinsics from --eMc if (!opt_eMc_filename.empty()) { ePc.loadYAML(opt_eMc_filename, ePc); - } else { + } + else { std::cout << "Warning, opt_eMc_filename is empty! Use hard coded values." - << "\n"; + << "\n"; } vpHomogeneousMatrix eMc(ePc); std::cout << "eMc:\n" << eMc << "\n"; // Get camera intrinsics vpCameraParameters cam = - rs.getCameraParameters(RS2_STREAM_COLOR, vpCameraParameters::perspectiveProjWithDistortion); + rs.getCameraParameters(RS2_STREAM_COLOR, vpCameraParameters::perspectiveProjWithDistortion); std::cout << "cam:\n" << cam << "\n"; vpImage I(height, width); @@ -215,7 +225,7 @@ int main(int argc, char **argv) // Desired pose used to compute the desired features vpHomogeneousMatrix cdMo(vpTranslationVector(0, 0, opt_tagSize * 3), // 3 times tag with along camera z axis - vpRotationMatrix({1, 0, 0, 0, -1, 0, 0, 0, -1})); + vpRotationMatrix({ 1, 0, 0, 0, -1, 0, 0, 0, -1 })); // Create visual features std::vector p(4), pd(4); // We use 4 points @@ -238,7 +248,8 @@ int main(int argc, char **argv) if (opt_adaptive_gain) { vpAdaptiveGain lambda(1.5, 0.4, 30); // lambda(0)=4, lambda(oo)=0.4 and lambda'(0)=30 task.setLambda(lambda); - } else { + } + else { task.setLambda(0.5); } @@ -311,7 +322,8 @@ int main(int argc, char **argv) } if (std::fabs(v_cdMc[0].getThetaUVector().getTheta()) < std::fabs(v_cdMc[1].getThetaUVector().getTheta())) { oMo = v_oMo[0]; - } else { + } + else { std::cout << "Desired frame modified to avoid PI rotation of the camera" << std::endl; oMo = v_oMo[1]; // Introduce PI rotation } @@ -350,7 +362,8 @@ int main(int argc, char **argv) t_init_servo = vpTime::measureTimeMs(); } v_c = task.computeControlLaw((vpTime::measureTimeMs() - t_init_servo) / 1000.); - } else { + } + else { v_c = task.computeControlLaw(); } @@ -393,7 +406,7 @@ int main(int argc, char **argv) if (error < convergence_threshold) { has_converged = true; std::cout << "Servo task has converged" - << "\n"; + << "\n"; vpDisplay::displayText(I, 100, 20, "Servo task has converged", vpColor::red); } if (first_time) { @@ -461,12 +474,14 @@ int main(int argc, char **argv) if (traj_corners) { delete[] traj_corners; } - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "ViSP exception: " << e.what() << std::endl; std::cout << "Stop the robot " << std::endl; robot.setRobotState(vpRobot::STATE_STOP); return EXIT_FAILURE; - } catch (const std::exception &e) { + } + catch (const std::exception &e) { std::cout << "ur_rtde exception: " << e.what() << std::endl; return EXIT_FAILURE; } @@ -479,12 +494,9 @@ int main() #if !defined(VISP_HAVE_REALSENSE2) std::cout << "Install librealsense-2.x" << std::endl; #endif -#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "Build ViSP with c++11 or higher compiler flag (cmake -DUSE_CXX_STANDARD=11)." << std::endl; -#endif #if !defined(VISP_HAVE_UR_RTDE) std::cout << "ViSP is not build with libur_rtde 3rd party used to control a robot from Universal Robots..." - << std::endl; + << std::endl; #endif return EXIT_SUCCESS; } diff --git a/example/servo-universal-robots/servoUniversalRobotsPBVS.cpp b/example/servo-universal-robots/servoUniversalRobotsPBVS.cpp index db3a9aef03..af7861ef76 100644 --- a/example/servo-universal-robots/servoUniversalRobotsPBVS.cpp +++ b/example/servo-universal-robots/servoUniversalRobotsPBVS.cpp @@ -68,8 +68,7 @@ #include #include -#if defined(VISP_HAVE_REALSENSE2) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) && \ - (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) && defined(VISP_HAVE_UR_RTDE) +#if defined(VISP_HAVE_REALSENSE2) && (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) && defined(VISP_HAVE_UR_RTDE) void display_point_trajectory(const vpImage &I, const std::vector &vip, std::vector *traj_vip) @@ -80,7 +79,8 @@ void display_point_trajectory(const vpImage &I, const std::vector if (vpImagePoint::distance(vip[i], traj_vip[i].back()) > 1.) { traj_vip[i].push_back(vip[i]); } - } else { + } + else { traj_vip[i].push_back(vip[i]); } } @@ -108,30 +108,39 @@ int main(int argc, char **argv) for (int i = 1; i < argc; i++) { if (std::string(argv[i]) == "--tag_size" && i + 1 < argc) { opt_tagSize = std::stod(argv[i + 1]); - } else if (std::string(argv[i]) == "--ip" && i + 1 < argc) { + } + else if (std::string(argv[i]) == "--ip" && i + 1 < argc) { opt_robot_ip = std::string(argv[i + 1]); - } else if (std::string(argv[i]) == "--eMc" && i + 1 < argc) { + } + else if (std::string(argv[i]) == "--eMc" && i + 1 < argc) { opt_eMc_filename = std::string(argv[i + 1]); - } else if (std::string(argv[i]) == "--verbose") { + } + else if (std::string(argv[i]) == "--verbose") { opt_verbose = true; - } else if (std::string(argv[i]) == "--plot") { + } + else if (std::string(argv[i]) == "--plot") { opt_plot = true; - } else if (std::string(argv[i]) == "--adaptive_gain") { + } + else if (std::string(argv[i]) == "--adaptive_gain") { opt_adaptive_gain = true; - } else if (std::string(argv[i]) == "--task_sequencing") { + } + else if (std::string(argv[i]) == "--task_sequencing") { opt_task_sequencing = true; - } else if (std::string(argv[i]) == "--quad_decimate" && i + 1 < argc) { + } + else if (std::string(argv[i]) == "--quad_decimate" && i + 1 < argc) { opt_quad_decimate = std::stoi(argv[i + 1]); - } else if (std::string(argv[i]) == "--no-convergence-threshold") { + } + else if (std::string(argv[i]) == "--no-convergence-threshold") { convergence_threshold_t = 0.; convergence_threshold_tu = 0.; - } else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { + } + else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { std::cout - << argv[0] << " [--ip ] [--tag_size ] [--eMc ] " - << "[--quad_decimate ] [--adaptive_gain] [--plot] [--task_sequencing] [--no-convergence-threshold] [--verbose] [--help] [-h]" - << "\n"; + << argv[0] << " [--ip ] [--tag_size ] [--eMc ] " + << "[--quad_decimate ] [--adaptive_gain] [--plot] [--task_sequencing] [--no-convergence-threshold] [--verbose] [--help] [-h]" + << "\n"; return EXIT_SUCCESS; } } @@ -142,8 +151,8 @@ int main(int argc, char **argv) robot.connect(opt_robot_ip); std::cout << "WARNING: This example will move the robot! " - << "Please make sure to have the user stop button at hand!" << std::endl - << "Press Enter to continue..." << std::endl; + << "Please make sure to have the user stop button at hand!" << std::endl + << "Press Enter to continue..." << std::endl; std::cin.ignore(); /* @@ -181,16 +190,17 @@ int main(int argc, char **argv) // If provided, read camera extrinsics from --eMc if (!opt_eMc_filename.empty()) { ePc.loadYAML(opt_eMc_filename, ePc); - } else { + } + else { std::cout << "Warning, opt_eMc_filename is empty! Use hard coded values." - << "\n"; + << "\n"; } vpHomogeneousMatrix eMc(ePc); std::cout << "eMc:\n" << eMc << "\n"; // Get camera intrinsics vpCameraParameters cam = - rs.getCameraParameters(RS2_STREAM_COLOR, vpCameraParameters::perspectiveProjWithDistortion); + rs.getCameraParameters(RS2_STREAM_COLOR, vpCameraParameters::perspectiveProjWithDistortion); std::cout << "cam:\n" << cam << "\n"; vpImage I(height, width); @@ -214,7 +224,7 @@ int main(int argc, char **argv) // Desired pose to reach vpHomogeneousMatrix cdMo(vpTranslationVector(0, 0, opt_tagSize * 3), // 3 times tag with along camera z axis - vpRotationMatrix({1, 0, 0, 0, -1, 0, 0, 0, -1})); + vpRotationMatrix({ 1, 0, 0, 0, -1, 0, 0, 0, -1 })); cdMc = cdMo * cMo.inverse(); vpFeatureTranslation t(vpFeatureTranslation::cdMc); @@ -234,7 +244,8 @@ int main(int argc, char **argv) if (opt_adaptive_gain) { vpAdaptiveGain lambda(1.5, 0.4, 30); // lambda(0)=4, lambda(oo)=0.4 and lambda'(0)=30 task.setLambda(lambda); - } else { + } + else { task.setLambda(0.5); } @@ -303,7 +314,8 @@ int main(int argc, char **argv) } if (std::fabs(v_cdMc[0].getThetaUVector().getTheta()) < std::fabs(v_cdMc[1].getThetaUVector().getTheta())) { oMo = v_oMo[0]; - } else { + } + else { std::cout << "Desired frame modified to avoid PI rotation of the camera" << std::endl; oMo = v_oMo[1]; // Introduce PI rotation } @@ -322,7 +334,8 @@ int main(int argc, char **argv) t_init_servo = vpTime::measureTimeMs(); } v_c = task.computeControlLaw((vpTime::measureTimeMs() - t_init_servo) / 1000.); - } else { + } + else { v_c = task.computeControlLaw(); } @@ -435,12 +448,14 @@ int main(int argc, char **argv) if (traj_vip) { delete[] traj_vip; } - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "ViSP exception: " << e.what() << std::endl; std::cout << "Stop the robot " << std::endl; robot.setRobotState(vpRobot::STATE_STOP); return EXIT_FAILURE; - } catch (const std::exception &e) { + } + catch (const std::exception &e) { std::cout << "ur_rtde exception: " << e.what() << std::endl; return EXIT_FAILURE; } @@ -453,12 +468,9 @@ int main() #if !defined(VISP_HAVE_REALSENSE2) std::cout << "Install librealsense-2.x" << std::endl; #endif -#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "Build ViSP with c++11 or higher compiler flag (cmake -DUSE_CXX_STANDARD=11)." << std::endl; -#endif #if !defined(VISP_HAVE_UR_RTDE) std::cout << "ViSP is not build with libur_rtde 3rd party used to control a robot from Universal Robots..." - << std::endl; + << std::endl; #endif return EXIT_SUCCESS; } diff --git a/modules/core/include/visp3/core/vpArray2D.h b/modules/core/include/visp3/core/vpArray2D.h index b3248c656f..595ade35ca 100644 --- a/modules/core/include/visp3/core/vpArray2D.h +++ b/modules/core/include/visp3/core/vpArray2D.h @@ -97,20 +97,16 @@ int main() int main() { -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpArray2D a{ {-1, -2, -3}, {4, 5.5, 6.0f} }; std::cout << "a:\n" << a << std::endl; -#endif } \endcode The array could also be initialized using operator=(const std::initializer_list< std::initializer_list< Type > > &) \code int main() { -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpArray2D a; a = { {-1, -2, -3}, {4, 5.5, 6.0f} }; -#endif } \endcode @@ -120,10 +116,8 @@ int main() int main() { -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpArray2D a{ -1, -2, -3, 4, 5.5, 6.0f }; a.reshape(2, 3); -#endif } \endcode */ @@ -154,12 +148,7 @@ template class vpArray2D Copy constructor of a 2D array. */ vpArray2D(const vpArray2D &A) - : -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - vpArray2D() -#else - rowNum(0), colNum(0), rowPtrs(NULL), dsize(0), data(NULL) -#endif + : vpArray2D() { resize(A.rowNum, A.colNum, false, false); memcpy(data, A.data, (size_t)rowNum * (size_t)colNum * sizeof(Type)); @@ -172,12 +161,7 @@ template class vpArray2D \param c : Array number of columns. */ vpArray2D(unsigned int r, unsigned int c) - : -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - vpArray2D() -#else - rowNum(0), colNum(0), rowPtrs(NULL), dsize(0), data(NULL) -#endif + : vpArray2D() { resize(r, c); } @@ -190,18 +174,12 @@ template class vpArray2D \param val : Each element of the array is set to \e val. */ vpArray2D(unsigned int r, unsigned int c, Type val) - : -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - vpArray2D() -#else - rowNum(0), colNum(0), rowPtrs(NULL), dsize(0), data(NULL) -#endif + : vpArray2D() { resize(r, c, false, false); *this = val; } -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpArray2D(vpArray2D &&A) noexcept { rowNum = A.rowNum; @@ -251,7 +229,6 @@ template class vpArray2D std::copy(it->begin(), it->end(), rowPtrs[i]); } } -#endif /*! Destructor that deallocate memory. @@ -460,7 +437,6 @@ template class vpArray2D return *this; } -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpArray2D &operator=(vpArray2D &&other) noexcept { if (this != &other) { @@ -513,7 +489,6 @@ template class vpArray2D #ifdef VISP_HAVE_NLOHMANN_JSON vpArray2D &operator=(const nlohmann::json &j) = delete; -#endif #endif //! Set element \f$A_{ij} = x\f$ using A[i][j] = x diff --git a/modules/core/include/visp3/core/vpCameraParameters.h b/modules/core/include/visp3/core/vpCameraParameters.h index ada6c7074d..1a9f2b283d 100644 --- a/modules/core/include/visp3/core/vpCameraParameters.h +++ b/modules/core/include/visp3/core/vpCameraParameters.h @@ -159,7 +159,7 @@ to estimate the parameters corresponding to the model implemented in this class. - \note Note also that \ref tutorial-bridge-opencv gives the correspondance + \note Note also that \ref tutorial-bridge-opencv gives the correspondence between ViSP and OpenCV camera modelization. \note The conversion from pixel coordinates \f$(u,v)\f$ in the normalized diff --git a/modules/core/include/visp3/core/vpColVector.h b/modules/core/include/visp3/core/vpColVector.h index 0236d46bc1..6f0bc5ef96 100644 --- a/modules/core/include/visp3/core/vpColVector.h +++ b/modules/core/include/visp3/core/vpColVector.h @@ -104,20 +104,16 @@ class vpPoseVector; * * int main() * { - * #if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) * vpColVector v({-1, -2.1, -3}); * std::cout << "v:\n" << v << std::endl; - * #endif * } * \endcode * The vector could also be initialized using operator=(const std::initializer_list< double > &) * \code * int main() * { - * #if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) * vpColVector v; * v = {-1, -2.1, -3}; - * #endif * } * \endcode * @@ -244,7 +240,7 @@ class VISP_EXPORT vpColVector : public vpArray2D * Constructor that creates a column vector from a std vector of float. */ vpColVector(const std::vector &v); -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) + /*! * Move constructor that take rvalue. */ @@ -253,7 +249,6 @@ class VISP_EXPORT vpColVector : public vpArray2D { std::copy(list.begin(), list.end(), data); } -#endif /*! * Removes all elements from the vector (which are destroyed), @@ -632,7 +627,6 @@ class VISP_EXPORT vpColVector : public vpArray2D */ vpColVector &operator=(double x); -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) /*! * Overloaded move assignment operator taking rvalue. */ @@ -662,7 +656,6 @@ class VISP_EXPORT vpColVector : public vpArray2D * \sa operator<<() */ vpColVector &operator=(const std::initializer_list &list); -#endif /*! * Compare two column vectors. diff --git a/modules/core/include/visp3/core/vpColormap.h b/modules/core/include/visp3/core/vpColormap.h index e30ddd189c..2609e6f3a0 100644 --- a/modules/core/include/visp3/core/vpColormap.h +++ b/modules/core/include/visp3/core/vpColormap.h @@ -36,25 +36,24 @@ #define _vpColormap_h_ /*! - \file vpColormap.h - - \brief Colormap tool to have a mapping between 256 values and RGB values. -*/ + * \file vpColormap.h + * + * \brief Colormap tool to have a mapping between 256 values and RGB values. + */ #include #include #include -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) /*! - \class vpColormap - - \ingroup group_core_image - - \brief Creates a colormap class to be able to recolor an image with different grayscale - values into some corresponding color values, for better visualization for example. -*/ + * \class vpColormap + * + * \ingroup group_core_image + * + * \brief Creates a colormap class to be able to recolor an image with different grayscale + * values into some corresponding color values, for better visualization for example. + */ class VISP_EXPORT vpColormap { public: @@ -98,4 +97,3 @@ class VISP_EXPORT vpColormap }; #endif -#endif diff --git a/modules/core/include/visp3/core/vpHomogeneousMatrix.h b/modules/core/include/visp3/core/vpHomogeneousMatrix.h index 6e4291f992..4c7f498ad1 100644 --- a/modules/core/include/visp3/core/vpHomogeneousMatrix.h +++ b/modules/core/include/visp3/core/vpHomogeneousMatrix.h @@ -137,7 +137,6 @@ class vpPoint; int main() { - #if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) { vpHomogeneousMatrix M( vpTranslationVector(0.1, 0.2, 0.3), vpRotationMatrix( {0, 0, -1, 0, -1, 0, -1, 0, 0} ) ); std::cout << "M:\n" << M << std::endl; @@ -148,7 +147,6 @@ class vpPoint; -1, 0, 0, 0.3 }; std::cout << "M:\n" << M << std::endl; } - #endif } \endcode @@ -209,9 +207,7 @@ class VISP_EXPORT vpHomogeneousMatrix : public vpArray2D explicit vpHomogeneousMatrix(const std::vector &v); explicit vpHomogeneousMatrix(const std::vector &v); vpHomogeneousMatrix(double tx, double ty, double tz, double tux, double tuy, double tuz); -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpHomogeneousMatrix(const std::initializer_list &list); -#endif void buildFrom(const vpTranslationVector &t, const vpRotationMatrix &R); void buildFrom(const vpTranslationVector &t, const vpThetaUVector &tu); @@ -352,10 +348,10 @@ class VISP_EXPORT vpHomogeneousMatrix : public vpArray2D void print() const; /*! - This function is not applicable to an homogeneous matrix that is always a - 4-by-4 matrix. - \exception vpException::fatalError When this function is called. - */ + * This function is not applicable to an homogeneous matrix that is always a + * 4-by-4 matrix. + * \exception vpException::fatalError When this function is called. + */ void resize(unsigned int nrows, unsigned int ncols, bool flagNullify = true) { (void)nrows; @@ -383,16 +379,16 @@ class VISP_EXPORT vpHomogeneousMatrix : public vpArray2D #if defined(VISP_BUILD_DEPRECATED_FUNCTIONS) /*! - @name Deprecated functions - */ + * @name Deprecated functions + */ //@{ /*! - \deprecated Provided only for compat with previous releases. - This function does nothing. + * \deprecated Provided only for compat with previous releases. + * This function does nothing. */ vp_deprecated void init() { } /*! - \deprecated You should rather use eye(). + * \deprecated You should rather use eye(). */ vp_deprecated void setIdentity(); //@} diff --git a/modules/core/include/visp3/core/vpImage.h b/modules/core/include/visp3/core/vpImage.h index 0f56a92b36..76dc22b44c 100644 --- a/modules/core/include/visp3/core/vpImage.h +++ b/modules/core/include/visp3/core/vpImage.h @@ -143,10 +143,8 @@ template class vpImage vpImage(); //! copy constructor vpImage(const vpImage &); -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) //! move constructor vpImage(vpImage &&); -#endif //! constructor set the size of the image vpImage(unsigned int height, unsigned int width); //! constructor set the size of the image and init all the pixel @@ -861,7 +859,6 @@ vpImage::vpImage(const vpImage &I) memcpy(static_cast(bitmap), static_cast(I.bitmap), I.npixels * sizeof(Type)); } -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) /*! \relates vpImage */ @@ -878,7 +875,6 @@ vpImage::vpImage(vpImage &&I) I.row = NULL; I.hasOwnership = false; } -#endif /*! * \brief Return the maximum value within the bitmap diff --git a/modules/core/include/visp3/core/vpImageFilter.h b/modules/core/include/visp3/core/vpImageFilter.h index a977d73509..eb671845d6 100644 --- a/modules/core/include/visp3/core/vpImageFilter.h +++ b/modules/core/include/visp3/core/vpImageFilter.h @@ -35,10 +35,9 @@ #define _vpImageFilter_h_ /*! - \file vpImageFilter.h - \brief Various image filter, convolution, etc... - -*/ + * \file vpImageFilter.h + * \brief Various image filter, convolution, etc... + */ #include #include @@ -60,13 +59,12 @@ #endif /*! - \class vpImageFilter - - \ingroup group_core_image - - \brief Various image filter, convolution, etc... - -*/ + * \class vpImageFilter + * + * \ingroup group_core_image + * + * \brief Various image filter, convolution, etc... + */ class VISP_EXPORT vpImageFilter { public: @@ -113,13 +111,241 @@ class VISP_EXPORT vpImageFilter const float &upperThresholdRatio, const bool &normalizeGradients, const vpCannyBackendType &cannyBackend, const vpCannyFilteringAndGradientType &cannyFilteringSteps); - /*! - Apply a 1x3 derivative filter to an image pixel. +#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) + static float computeCannyThreshold(const cv::Mat &cv_I, const cv::Mat *p_cv_dIx, const cv::Mat *p_cv_dIy, + float &lowerThresh, const unsigned int &gaussianKernelSize = 5, + const float &gaussianStdev = 2.f, const unsigned int &apertureGradient = 3, + const float &lowerThresholdRatio = 0.6, const float &upperThresholdRatio = 0.8, + const vpCannyFilteringAndGradientType &filteringType = CANNY_GBLUR_SOBEL_FILTERING); - \param I : Image to filter - \param r : coordinates (row) of the pixel - \param c : coordinates (column) of the pixel - */ + static void computePartialDerivatives(const cv::Mat &cv_I, + cv::Mat &cv_dIx, cv::Mat &cv_dIy, + const bool &computeDx = true, const bool &computeDy = true, const bool &normalize = true, + const unsigned int &gaussianKernelSize = 5, const float &gaussianStdev = 2.f, + const unsigned int &apertureGradient = 3, + const vpCannyFilteringAndGradientType &filteringType = CANNY_GBLUR_SOBEL_FILTERING); +#endif + + /** + * \brief Compute the partial derivatives (i.e. horizontal and vertical gradients) of the input image. + * + * \tparam ImageType Either unsigned char, float or double + * \tparam FilterType Either float or double. + * \param[in] I The input image we want the partial derivatives. + * \param[out] dIx The horizontal partial derivative, i.e. horizontal gradient. + * \param[out] dIy The vertical partial derivative, i.e. vertical gradient. + * \param[in] computeDx Indicate if we must compute the horizontal gradient. + * \param[in] computeDy Indicate if we must compute the vertical gradient. + * \param[in] normalize Indicate if we must normalize the gradient filters. + * \param[in] gaussianKernelSize The size of the kernel of the Gaussian filter used to blur the image. + * If it is non-positive, it is computed from kernel size (`gaussianKernelSize` parameter) as + * \f$\sigma = 0.3*((gaussianKernelSize-1)*0.5 - 1) + 0.8\f$. + * \param[in] gaussianStdev The standard deviation of the Gaussian filter used to blur the image. + * \param[in] apertureGradient The size of the kernel of the gradient filter. + * \param[in] filteringType The type of filters to apply to compute the gradients. + * \param[in] backend The type of backend to use to compute the gradients. + */ + template + inline static void computePartialDerivatives(const vpImage &I, + vpImage &dIx, vpImage &dIy, + const bool &computeDx = true, const bool &computeDy = true, const bool &normalize = true, + const unsigned int &gaussianKernelSize = 5, const FilterType &gaussianStdev = 2.f, + const unsigned int &apertureGradient = 3, + const vpCannyFilteringAndGradientType &filteringType = CANNY_GBLUR_SOBEL_FILTERING, + const vpCannyBackendType &backend = CANNY_VISP_BACKEND) + { + if (backend == CANNY_OPENCV_BACKEND) { +#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) + cv::Mat cv_I, cv_dIx, cv_dIy; + vpImageConvert::convert(I, cv_I); + computePartialDerivatives(cv_I, cv_dIx, cv_dIy, computeDx, computeDy, normalize, gaussianKernelSize, + gaussianStdev, apertureGradient, filteringType); + if (computeDx) { + vpImageConvert::convert(cv_dIx, dIx); + } + if (computeDy) { + vpImageConvert::convert(cv_dIy, dIy); + } +#else + throw(vpException(vpException::badValue, "You need to compile ViSP with OpenCV to use CANNY_OPENCV_BACKEND")); +#endif + } + else { + if (filteringType == CANNY_GBLUR_SCHARR_FILTERING || filteringType == CANNY_GBLUR_SOBEL_FILTERING) { + dIx.resize(I.getHeight(), I.getWidth()); + dIy.resize(I.getHeight(), I.getWidth()); + + // Computing the Gaussian blur + gradients of the image + vpImage Iblur; + vpImageFilter::gaussianBlur(I, Iblur, gaussianKernelSize, gaussianStdev); + + vpArray2D gradientFilterX(apertureGradient, apertureGradient); // Gradient filter along the X-axis + vpArray2D gradientFilterY(apertureGradient, apertureGradient); // Gradient filter along the Y-axis + + // Helper to apply the scale to the raw values of the filters + auto scaleFilter = [](vpArray2D &filter, const float &scale) { + for (unsigned int r = 0; r < filter.getRows(); r++) { + for (unsigned int c = 0; c < filter.getCols(); c++) { + filter[r][c] = filter[r][c] * scale; + } + }}; + + // Scales to apply to the filters to get a normalized gradient filter that gives a gradient + // between 0 and 255 for an vpImage + float scaleX = 1.f; + float scaleY = 1.f; + + if (filteringType == CANNY_GBLUR_SOBEL_FILTERING) { + if (computeDx) { + scaleX = vpImageFilter::getSobelKernelX(gradientFilterX.data, (apertureGradient - 1)/2); + } + if (computeDy) { + scaleY = vpImageFilter::getSobelKernelY(gradientFilterY.data, (apertureGradient - 1)/2); + } + } + else if (filteringType == CANNY_GBLUR_SCHARR_FILTERING) { + if (computeDx) { + scaleX = vpImageFilter::getScharrKernelX(gradientFilterX.data, (apertureGradient - 1)/2); + } + if (computeDy) { + scaleY = vpImageFilter::getScharrKernelY(gradientFilterY.data, (apertureGradient - 1)/2); + } + } + + // Scale the gradient filters to have a normalized gradient filter + if (normalize) { + if (computeDx) { + scaleFilter(gradientFilterX, scaleX); + } + if (computeDy) { + scaleFilter(gradientFilterY, scaleY); + } + } + + // Apply the gradient filters to get the gradients + if (computeDx) { + vpImageFilter::filter(Iblur, dIx, gradientFilterX); + } + + if (computeDy) { + vpImageFilter::filter(Iblur, dIy, gradientFilterY); + } + } + else { + std::string errMsg = "[vpImageFilter::computePartialDerivatives] Filtering + gradient method \""; + errMsg += vpCannyFilteringAndGradientTypeToString(filteringType); + errMsg += "\" is not implemented yet\n"; + throw(vpException(vpException::notImplementedError, errMsg)); + } + } + } + + template + inline static void computePartialDerivatives(const vpImage &I, + vpImage &dIx, vpImage &dIy, + const bool &computeDx = true, const bool &computeDy = true, const bool &normalize = true, + const unsigned int &gaussianKernelSize = 5, const FilterType &gaussianStdev = 2.f, + const unsigned int &apertureGradient = 3, + const vpCannyFilteringAndGradientType &filteringType = CANNY_GBLUR_SOBEL_FILTERING, + const vpCannyBackendType &backend = CANNY_VISP_BACKEND) = delete; + + template + inline static void computePartialDerivatives(const vpImage &I, + vpImage &dIx, vpImage &dIy, + const bool &computeDx = true, const bool &computeDy = true, const bool &normalize = true, + const unsigned int &gaussianKernelSize = 5, const unsigned char &gaussianStdev = 2.f, + const unsigned int &apertureGradient = 3, + const vpCannyFilteringAndGradientType &filteringType = CANNY_GBLUR_SOBEL_FILTERING, + const vpCannyBackendType &backend = CANNY_VISP_BACKEND) = delete; + + template + inline static void computePartialDerivatives(const vpImage &I, + vpImage &dIx, vpImage &dIy, + const bool &computeDx = true, const bool &computeDy = true, const bool &normalize = true, + const unsigned int gaussianKernelSize = 5, const vpRGBa gaussianStdev = vpRGBa(), + const unsigned int apertureGradient = 3, + const vpCannyFilteringAndGradientType &filteringType = CANNY_GBLUR_SOBEL_FILTERING, + const vpCannyBackendType &backend = CANNY_VISP_BACKEND) = delete; + + /** + * \brief Compute the upper Canny edge filter threshold, using Gaussian blur + Sobel or + Scharr operators to compute + * the gradient of the image. + * + * \tparam OutType : Either float, to accelerate the computation time, or double, to have greater precision. + * \param[in] I : The gray-scale image, in ViSP format. + * \param[in] p_dIx : If different from nullptr, must contain the gradient of the image with regard to the horizontal axis. + * \param[in] p_dIy : If different from nullptr, must contain the gradient of the image with regard to the vertical axis. + * \param[in] lowerThresh : Canny lower threshold. + * \param[in] gaussianKernelSize : The size of the mask of the Gaussian filter to apply (an odd number). + * \param[in] gaussianStdev : The standard deviation of the Gaussian filter to apply. + * \param[in] apertureGradient : Size of the mask for the Sobel operator (odd number). + * \param[in] lowerThresholdRatio : The ratio of the upper threshold the lower threshold must be equal to. + * \param[in] upperThresholdRatio : The ratio of pixels whose absolute gradient Gabs is lower or equal to define + * the upper threshold. + * \param[in] filteringType : The gradient filter to apply to compute the gradient, if \b p_dIx and \b p_dIy are + * nullptr. + * \return The upper Canny edge filter threshold. + */ + template + inline static float computeCannyThreshold(const vpImage &I, float &lowerThresh, + const vpImage *p_dIx = nullptr, const vpImage *p_dIy = nullptr, + const unsigned int &gaussianKernelSize = 5, + const OutType &gaussianStdev = 2.f, const unsigned int &apertureGradient = 3, + const float &lowerThresholdRatio = 0.6, const float &upperThresholdRatio = 0.8, + const vpCannyFilteringAndGradientType &filteringType = CANNY_GBLUR_SOBEL_FILTERING) + { + double w = I.getWidth(); + double h = I.getHeight(); + + vpImage dI(h, w); + vpImage dIx(h, w), dIy(h, w); + if (p_dIx != nullptr && p_dIy != nullptr) { + dIx = *p_dIx; + dIy = *p_dIy; + } + else { + computePartialDerivatives(I, dIx, dIy, true, true, true, gaussianKernelSize, gaussianStdev, + apertureGradient, filteringType); + } + + // Computing the absolute gradient of the image G = |dIx| + |dIy| + for (unsigned int r = 0; r < h; r++) { + for (unsigned int c = 0; c < w; c++) { + float dx = (float)dIx[r][c]; + float dy = (float)dIy[r][c]; + float gradient = std::abs(dx) + std::abs(dy); + float gradientClamped = std::min(gradient, (float)std::numeric_limits::max()); + dI[r][c] = gradientClamped; + } + } + + // Compute the histogram + vpHistogram hist; + const unsigned int nbBins = 256; + hist.calculate(dI, nbBins); + float accu = 0; + float t = (float)(upperThresholdRatio * w * h); + float bon = 0; + for (unsigned int i = 0; i < nbBins; i++) { + float tf = hist[i]; + accu = accu + tf; + if (accu > t) { + bon = (float)i; + break; + } + } + float upperThresh = std::max(bon, 1.f); + lowerThresh = lowerThresholdRatio * bon; + return upperThresh; + } + + /*! + * Apply a 1x3 derivative filter to an image pixel. + * + * \param I : Image to filter + * \param r : coordinates (row) of the pixel + * \param c : coordinates (column) of the pixel + */ template static double derivativeFilterX(const vpImage &I, unsigned int r, unsigned int c) { return (2047.0 * (I[r][c + 1] - I[r][c - 1]) + 913.0 * (I[r][c + 2] - I[r][c - 2]) + @@ -127,12 +353,12 @@ class VISP_EXPORT vpImageFilter } /*! - Apply a 3x1 derivative filter to an image pixel. - - \param I : Image to filter - \param r : coordinates (row) of the pixel - \param c : coordinates (column) of the pixel - */ + * Apply a 3x1 derivative filter to an image pixel. + * + * \param I : Image to filter + * \param r : coordinates (row) of the pixel + * \param c : coordinates (column) of the pixel + */ template static double derivativeFilterY(const vpImage &I, unsigned int r, unsigned int c) { return (2047.0 * (I[r + 1][c] - I[r - 1][c]) + 913.0 * (I[r + 2][c] - I[r - 2][c]) + @@ -140,18 +366,18 @@ class VISP_EXPORT vpImageFilter } /*! - Apply a 1 x size Derivative Filter in X to an image pixel. - - \tparam FilterType : Either float, to accelerate the computation time, or double, to have greater precision. - \param I : Image to filter - \param r : Coordinates(row) of the pixel - \param c : Coordinates(column) of the pixel - \param filter : Coefficients of the filter to be initialized using - vpImageFilter::getGaussianDerivativeKernel(). - \param size : Size of the filter. - - \sa vpImageFilter::getGaussianDerivativeKernel() - */ + * Apply a 1 x size Derivative Filter in X to an image pixel. + * + * \tparam FilterType : Either float, to accelerate the computation time, or double, to have greater precision. + * \param I : Image to filter + * \param r : Coordinates(row) of the pixel + * \param c : Coordinates(column) of the pixel + * \param filter : Coefficients of the filter to be initialized using + * vpImageFilter::getGaussianDerivativeKernel(). + * \param size : Size of the filter. + * + * \sa vpImageFilter::getGaussianDerivativeKernel() + */ template static FilterType derivativeFilterX(const vpImage &I, unsigned int r, unsigned int c, const FilterType *filter, unsigned int size) { @@ -167,18 +393,18 @@ class VISP_EXPORT vpImageFilter } /*! - Apply a size x 1 Derivative Filter in Y to an image pixel. - - \tparam FilterType : Either float, to accelerate the computation time, or double, to have greater precision. - \param I : Image to filter. - \param r : Coordinates (row) of the pixel. - \param c : Coordinates (column) of the pixel. - \param filter : Coefficients of the filter to be initialized using - vpImageFilter::getGaussianDerivativeKernel(). - \param size : Size of the filter. - - \sa vpImageFilter::getGaussianDerivativeKernel() - */ + * Apply a size x 1 Derivative Filter in Y to an image pixel. + * + * \tparam FilterType : Either float, to accelerate the computation time, or double, to have greater precision. + * \param I : Image to filter. + * \param r : Coordinates (row) of the pixel. + * \param c : Coordinates (column) of the pixel. + * \param filter : Coefficients of the filter to be initialized using + * vpImageFilter::getGaussianDerivativeKernel(). + * \param size : Size of the filter. + * + * \sa vpImageFilter::getGaussianDerivativeKernel() + */ template static FilterType derivativeFilterY(const vpImage &I, unsigned int r, unsigned int c, const FilterType *filter, unsigned int size) { @@ -194,33 +420,33 @@ class VISP_EXPORT vpImageFilter } /*! - Apply a filter to an image. - \tparam FilterType : Either float, to accelerate the computation time, or double, to have greater precision. - \param I : Image to filter - \param If : Filtered image. - \param M : Filter kernel. - \param convolve : If true, perform a convolution otherwise a correlation. - - \note By default it performs a correlation: - \f[ - \textbf{I\_filtered} \left( u,v \right) = - \sum_{y=0}^{\textbf{kernel\_h}} - \sum_{x=0}^{\textbf{kernel\_w}} - \textbf{M} \left( x,y \right ) \times - \textbf{I} \left( - u-\frac{\textbf{kernel\_w}}{2}+x,v-\frac{\textbf{kernel\_h}}{2}+y \right) - \f] - The convolution is almost the same operation: - \f[ - \textbf{I\_filtered} \left( u,v \right) = - \sum_{y=0}^{\textbf{kernel\_h}} - \sum_{x=0}^{\textbf{kernel\_w}} - \textbf{M} \left( x,y \right ) \times - \textbf{I} \left( - u+\frac{\textbf{kernel\_w}}{2}-x,v+\frac{\textbf{kernel\_h}}{2}-y \right) - \f] - Only pixels in the input image fully covered by the kernel are considered. - */ + * Apply a filter to an image. + * \tparam FilterType : Either float, to accelerate the computation time, or double, to have greater precision. + * \param I : Image to filter + * \param If : Filtered image. + * \param M : Filter kernel. + * \param convolve : If true, perform a convolution otherwise a correlation. + * + * \note By default it performs a correlation: + * \f[ + * \textbf{I\_filtered} \left( u,v \right) = + * \sum_{y=0}^{\textbf{kernel\_h}} + * \sum_{x=0}^{\textbf{kernel\_w}} + * \textbf{M} \left( x,y \right ) \times + * \textbf{I} \left( + * u-\frac{\textbf{kernel\_w}}{2}+x,v-\frac{\textbf{kernel\_h}}{2}+y \right) + * \f] + * The convolution is almost the same operation: + * \f[ + * \textbf{I\_filtered} \left( u,v \right) = + * \sum_{y=0}^{\textbf{kernel\_h}} + * \sum_{x=0}^{\textbf{kernel\_w}} + * \textbf{M} \left( x,y \right ) \times + * \textbf{I} \left( + * u+\frac{\textbf{kernel\_w}}{2}-x,v+\frac{\textbf{kernel\_h}}{2}-y \right) + * \f] + * Only pixels in the input image fully covered by the kernel are considered. + */ template static void filter(const vpImage &I, vpImage &If, const vpArray2D &M, bool convolve = false) { @@ -265,17 +491,17 @@ class VISP_EXPORT vpImageFilter static void filter(const vpImage &I, vpImage &If, const vpArray2D &M, bool convolve = false) = delete; /*! - Apply a filter to an image: - \f[ - \textbf{I}_u = \textbf{M} \ast \textbf{I} \textbf{ and } \textbf{I}_v = - \textbf{M}^t \ast \textbf{I} \f] - \tparam FilterType : Either float, to accelerate the computation time, or double, to have greater precision. - \param I : Image to filter - \param Iu : Filtered image along the horizontal axis (u = columns). - \param Iv : Filtered image along the vertical axis (v = rows). - \param M : Filter kernel. - \param convolve : If true, perform a convolution otherwise a correlation. - */ + * Apply a filter to an image: + * \f[ + * \textbf{I}_u = \textbf{M} \ast \textbf{I} \textbf{ and } \textbf{I}_v = + * \textbf{M}^t \ast \textbf{I} \f] + * \tparam FilterType : Either float, to accelerate the computation time, or double, to have greater precision. + * \param I : Image to filter + * \param Iu : Filtered image along the horizontal axis (u = columns). + * \param Iv : Filtered image along the vertical axis (v = rows). + * \param M : Filter kernel. + * \param convolve : If true, perform a convolution otherwise a correlation. + */ template static void filter(const vpImage &I, vpImage &Iu, vpImage &Iv, const vpArray2D &M, bool convolve = false) @@ -333,13 +559,13 @@ class VISP_EXPORT vpImageFilter static void sepFilter(const vpImage &I, vpImage &If, const vpColVector &kernelH, const vpColVector &kernelV); /*! - Apply a separable filter. - \tparam FilterType : Either float, to accelerate the computation time, or double, to have greater precision. - \param I: The original image. - \param GI: The filtered image. - \param filter: The separable filter. - \param size: The size of the filter. - */ + * Apply a separable filter. + * \tparam FilterType : Either float, to accelerate the computation time, or double, to have greater precision. + * \param I: The original image. + * \param GI: The filtered image. + * \param filter: The separable filter. + * \param size: The size of the filter. + */ template static void filter(const vpImage &I, vpImage &GI, const FilterType *filter, unsigned int size) { @@ -761,17 +987,17 @@ class VISP_EXPORT vpImageFilter } /*! - Apply a Gaussian blur to an image. - \tparam FilterType : Either float, to accelerate the computation time, or double, to have greater precision. - \param I : Input image. - \param GI : Filtered image. - \param size : Filter size. This value should be odd. - \param sigma : Gaussian standard deviation. If it is equal to zero or - negative, it is computed from filter size as sigma = (size-1)/6. - \param normalize : Flag indicating whether to normalize the filter coefficients or not. - - \sa getGaussianKernel() to know which kernel is used. - */ + * Apply a Gaussian blur to an image. + * \tparam FilterType : Either float, to accelerate the computation time, or double, to have greater precision. + * \param I : Input image. + * \param GI : Filtered image. + * \param size : Filter size. This value should be odd. + * \param sigma : Gaussian standard deviation. If it is equal to zero or + * negative, it is computed from filter size as sigma = (size-1)/6. + * \param normalize : Flag indicating whether to normalize the filter coefficients or not. + * + * \sa getGaussianKernel() to know which kernel is used. + */ template static void gaussianBlur(const vpImage &I, vpImage &GI, unsigned int size = 7, FilterType sigma = 0., bool normalize = true) { @@ -787,12 +1013,12 @@ class VISP_EXPORT vpImageFilter static void gaussianBlur(const vpImage &I, vpImage &GI, unsigned int size = 7, double sigma = 0., bool normalize = true); /*! - Apply a 5x5 Gaussian filter to an image pixel. - - \param fr : Image to filter - \param r : coordinates (row) of the pixel - \param c : coordinates (column) of the pixel - */ + * Apply a 5x5 Gaussian filter to an image pixel. + * + * \param fr : Image to filter + * \param r : coordinates (row) of the pixel + * \param c : coordinates (column) of the pixel + */ template static double gaussianFilter(const vpImage &fr, unsigned int r, unsigned int c) { return (15.0 * fr[r][c] + 12.0 * (fr[r - 1][c] + fr[r][c - 1] + fr[r + 1][c] + fr[r][c + 1]) + @@ -808,21 +1034,21 @@ class VISP_EXPORT vpImageFilter static void getGaussYPyramidal(const vpImage &I, vpImage &GI); /*! - Return the coefficients \f$G_i\f$ of a Gaussian filter. - \tparam FilterType : Either float, to accelerate the computation time, or double, to have greater precision. - \param[out] filter : Pointer to the half size filter kernel that should refer to a - (size+1)/2 array. The first value refers to the central coefficient, the - next one to the right coefficients. Left coefficients could be deduced by - symmetry. - \param[in] size : Filter size. This value should be odd and positive. - \param[in] sigma : Gaussian standard deviation \f$ \sigma \f$. If it is equal to zero or negative, it is - computed from filter size as sigma = (size-1)/6. - \param[in] normalize : Flag indicating whether to normalize the filter coefficients or not. In that case \f$\Sigma G_i - = 1 \f$. - - The function computes the \e (size+1)/2 values of the Gaussian filter coefficients \f$ G_i \f$ as: - \f[ G_i = \frac{1}{\sigma \sqrt{2 \pi}} \exp{(-i^2 / (2. * \sigma^2))}\f] - */ + * Return the coefficients \f$G_i\f$ of a Gaussian filter. + * \tparam FilterType : Either float, to accelerate the computation time, or double, to have greater precision. + * \param[out] filter : Pointer to the half size filter kernel that should refer to a + * (size+1)/2 array. The first value refers to the central coefficient, the + * next one to the right coefficients. Left coefficients could be deduced by + * symmetry. + * \param[in] size : Filter size. This value should be odd and positive. + * \param[in] sigma : Gaussian standard deviation \f$ \sigma \f$. If it is equal to zero or negative, it is + * computed from filter size as sigma = (size-1)/6. + * \param[in] normalize : Flag indicating whether to normalize the filter coefficients or not. In that case + * \f$\Sigma G_i = 1 \f$. + * + * The function computes the \e (size+1)/2 values of the Gaussian filter coefficients \f$ G_i \f$ as: + * \f[ G_i = \frac{1}{\sigma \sqrt{2 \pi}} \exp{(-i^2 / (2. * \sigma^2))}\f] + */ template static void getGaussianKernel(FilterType *filter, unsigned int size, FilterType sigma = 0., bool normalize = true) { @@ -854,19 +1080,19 @@ class VISP_EXPORT vpImageFilter } /*! - Return the coefficients of a Gaussian derivative filter that may be used to - compute spatial image derivatives after applying a Gaussian blur. - - \tparam FilterType : Either float, to accelerate the computation time, or double, to have greater precision. - \param filter : Pointer to the filter kernel that should refer to a - (size+1)/2 array. The first value refers to the central coefficient, the - next one to the right coefficients. Left coefficients could be deduced by - symmetry. - \param size : Filter size. This value should be odd. - \param sigma : Gaussian standard deviation. If it is equal to zero or negative, it is - computed from filter size as sigma = (size-1)/6. - \param normalize : Flag indicating whether to normalize the filter coefficients or not. - */ + * Return the coefficients of a Gaussian derivative filter that may be used to + * compute spatial image derivatives after applying a Gaussian blur. + * + * \tparam FilterType : Either float, to accelerate the computation time, or double, to have greater precision. + * \param filter : Pointer to the filter kernel that should refer to a + * (size+1)/2 array. The first value refers to the central coefficient, the + * next one to the right coefficients. Left coefficients could be deduced by + * symmetry. + * \param size : Filter size. This value should be odd. + * \param sigma : Gaussian standard deviation. If it is equal to zero or negative, it is + * computed from filter size as sigma = (size-1)/6. + * \param normalize : Flag indicating whether to normalize the filter coefficients or not. + */ template static void getGaussianDerivativeKernel(FilterType *filter, unsigned int size, FilterType sigma = 0., bool normalize = true) { @@ -937,15 +1163,15 @@ class VISP_EXPORT vpImageFilter } /*! - Compute the gradient along X after applying a gaussian filter along Y. - \tparam FilterType : Either float, to accelerate the computation time, or double, to have greater precision. - \param I : Input image - \param dIx : Gradient along X. - \param gaussianKernel : Gaussian kernel which values should be computed using vpImageFilter::getGaussianKernel(). - \param gaussianDerivativeKernel : Gaussian derivative kernel which values should be computed using - vpImageFilter::getGaussianDerivativeKernel(). - \param size : Size of the Gaussian and Gaussian derivative kernels. - */ + * Compute the gradient along X after applying a gaussian filter along Y. + * \tparam FilterType : Either float, to accelerate the computation time, or double, to have greater precision. + * \param I : Input image + * \param dIx : Gradient along X. + * \param gaussianKernel : Gaussian kernel which values should be computed using vpImageFilter::getGaussianKernel(). + * \param gaussianDerivativeKernel : Gaussian derivative kernel which values should be computed using + * vpImageFilter::getGaussianDerivativeKernel(). + * \param size : Size of the Gaussian and Gaussian derivative kernels. + */ template static void getGradXGauss2D(const vpImage &I, vpImage &dIx, const FilterType *gaussianKernel, const FilterType *gaussianDerivativeKernel, unsigned int size) @@ -999,15 +1225,15 @@ class VISP_EXPORT vpImageFilter } /*! - Compute the gradient along Y after applying a gaussian filter along X. - \tparam FilterType : Either float, to accelerate the computation time, or double, to have greater precision. - \param I : Input image - \param dIy : Gradient along Y. - \param gaussianKernel : Gaussian kernel which values should be computed using vpImageFilter::getGaussianKernel(). - \param gaussianDerivativeKernel : Gaussian derivative kernel which values should be computed using - vpImageFilter::getGaussianDerivativeKernel(). - \param size : Size of the Gaussian and Gaussian derivative kernels. - */ + * Compute the gradient along Y after applying a gaussian filter along X. + * \tparam FilterType : Either float, to accelerate the computation time, or double, to have greater precision. + * \param I : Input image + * \param dIy : Gradient along Y. + * \param gaussianKernel : Gaussian kernel which values should be computed using vpImageFilter::getGaussianKernel(). + * \param gaussianDerivativeKernel : Gaussian derivative kernel which values should be computed using + * vpImageFilter::getGaussianDerivativeKernel(). + * \param size : Size of the Gaussian and Gaussian derivative kernels. + */ template static void getGradYGauss2D(const vpImage &I, vpImage &dIy, const FilterType *gaussianKernel, const FilterType *gaussianDerivativeKernel, unsigned int size) @@ -1068,12 +1294,12 @@ class VISP_EXPORT vpImageFilter } /*! - Get Sobel kernel for X-direction. - \tparam FilterType: Either float, to accelerate the computation time, or double, to have greater precision. - \param filter : Pointer to a double array already allocated. - \param size : Kernel size computed as: kernel_size = size*2 + 1 (max size is 20). - \return Scaling factor to normalize the Sobel kernel. - */ + * Get Sobel kernel for X-direction. + * \tparam FilterType: Either float, to accelerate the computation time, or double, to have greater precision. + * \param filter : Pointer to a double array already allocated. + * \param size : Kernel size computed as: kernel_size = size*2 + 1 (max size is 20). + * \return Scaling factor to normalize the Sobel kernel. + */ template inline static FilterType getSobelKernelX(FilterType *filter, unsigned int size) { @@ -1089,12 +1315,12 @@ class VISP_EXPORT vpImageFilter } /*! - Get Sobel kernel for Y-direction. - \tparam FilterType : Either float, to accelerate the computation time, or double, to have greater precision. - \param filter : Pointer to a double array already allocated. - \param size : Kernel size computed as: kernel_size = size*2 + 1 (max size is 20). - \return Scaling factor to normalize the Sobel kernel. - */ + * Get Sobel kernel for Y-direction. + * \tparam FilterType : Either float, to accelerate the computation time, or double, to have greater precision. + * \param filter : Pointer to a double array already allocated. + * \param size : Kernel size computed as: kernel_size = size*2 + 1 (max size is 20). + * \return Scaling factor to normalize the Sobel kernel. + */ template inline static FilterType getSobelKernelY(FilterType *filter, unsigned int size) { @@ -1151,232 +1377,6 @@ class VISP_EXPORT vpImageFilter return scale; } -#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) - static float computeCannyThreshold(const cv::Mat &cv_I, const cv::Mat *p_cv_dIx, const cv::Mat *p_cv_dIy, - float &lowerThresh, const unsigned int &gaussianKernelSize = 5, - const float &gaussianStdev = 2.f, const unsigned int &apertureGradient = 3, - const float &lowerThresholdRatio = 0.6, const float &upperThresholdRatio = 0.8, - const vpCannyFilteringAndGradientType &filteringType = CANNY_GBLUR_SOBEL_FILTERING); - - static void computePartialDerivatives(const cv::Mat &cv_I, - cv::Mat &cv_dIx, cv::Mat &cv_dIy, - const bool &computeDx = true, const bool &computeDy = true, const bool &normalize = true, - const unsigned int &gaussianKernelSize = 5, const float &gaussianStdev = 2.f, - const unsigned int &apertureGradient = 3, - const vpCannyFilteringAndGradientType &filteringType = CANNY_GBLUR_SOBEL_FILTERING); -#endif - - /** - * \brief Compute the partial derivatives (i.e. horizontal and vertical gradients) of the input image. - * - * \tparam ImageType Either unsigned char, float or double - * \tparam FilterType Either float or double. - * \param[in] I The input image we want the partial derivatives. - * \param[out] dIx The horizontal partial derivative, i.e. horizontal gradient. - * \param[out] dIy The vertical partial derivative, i.e. vertical gradient. - * \param[in] computeDx Idicate if we must compute the horizontal gradient. - * \param[in] computeDy Idicate if we must compute the vertical gradient. - * \param[in] normalize Idicate if we must normalize the gradient filters. - * \param[in] gaussianKernelSize The size of the kernel of the Gaussian filter used to blur the image. - * \param[in] gaussianStdev The standard deviation of the Gaussian filter used to blur the image. - * \param[in] apertureGradient The size of the kernel of the gradient filter. - * \param[in] filteringType The type of filters to apply to compute the gradients. - * \param[in] backend The type of backend to use to compute the gradients. - */ - template - inline static void computePartialDerivatives(const vpImage &I, - vpImage &dIx, vpImage &dIy, - const bool &computeDx = true, const bool &computeDy = true, const bool &normalize = true, - const unsigned int &gaussianKernelSize = 5, const FilterType &gaussianStdev = 2.f, - const unsigned int &apertureGradient = 3, - const vpCannyFilteringAndGradientType &filteringType = CANNY_GBLUR_SOBEL_FILTERING, - const vpCannyBackendType &backend = CANNY_VISP_BACKEND) - { - if (backend == CANNY_OPENCV_BACKEND) { -#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) - cv::Mat cv_I, cv_dIx, cv_dIy; - vpImageConvert::convert(I, cv_I); - computePartialDerivatives(cv_I, cv_dIx, cv_dIy, computeDx, computeDy, normalize, gaussianKernelSize, - gaussianStdev, apertureGradient, filteringType); - if (computeDx) { - vpImageConvert::convert(cv_dIx, dIx); - } - if (computeDy) { - vpImageConvert::convert(cv_dIy, dIy); - } -#else - throw(vpException(vpException::badValue, "You need to compile ViSP with OpenCV to use CANNY_OPENCV_BACKEND")); -#endif - } - else { - if (filteringType == CANNY_GBLUR_SCHARR_FILTERING || filteringType == CANNY_GBLUR_SOBEL_FILTERING) { - dIx.resize(I.getHeight(), I.getWidth()); - dIy.resize(I.getHeight(), I.getWidth()); - - // Computing the Gaussian blur + gradients of the image - vpImage Iblur; - vpImageFilter::gaussianBlur(I, Iblur, gaussianKernelSize, gaussianStdev); - - vpArray2D gradientFilterX(apertureGradient, apertureGradient); // Gradient filter along the X-axis - vpArray2D gradientFilterY(apertureGradient, apertureGradient); // Gradient filter along the Y-axis - - // Helper to apply the scale to the raw values of the filters - auto scaleFilter = [](vpArray2D &filter, const float &scale) { - for (unsigned int r = 0; r < filter.getRows(); r++) { - for (unsigned int c = 0; c < filter.getCols(); c++) { - filter[r][c] = filter[r][c] * scale; - } - }}; - - // Scales to apply to the filters to get a normalized gradient filter that gives a gradient - // between 0 and 255 for an vpImage - float scaleX = 1.f; - float scaleY = 1.f; - - if (filteringType == CANNY_GBLUR_SOBEL_FILTERING) { - if (computeDx) { - scaleX = vpImageFilter::getSobelKernelX(gradientFilterX.data, (apertureGradient - 1)/2); - } - if (computeDy) { - scaleY = vpImageFilter::getSobelKernelY(gradientFilterY.data, (apertureGradient - 1)/2); - } - } - else if (filteringType == CANNY_GBLUR_SCHARR_FILTERING) { - if (computeDx) { - scaleX = vpImageFilter::getScharrKernelX(gradientFilterX.data, (apertureGradient - 1)/2); - } - if (computeDy) { - scaleY = vpImageFilter::getScharrKernelY(gradientFilterY.data, (apertureGradient - 1)/2); - } - } - - // Scale the gradient filters to have a normalized gradient filter - if (normalize) { - if (computeDx) { - scaleFilter(gradientFilterX, scaleX); - } - if (computeDy) { - scaleFilter(gradientFilterY, scaleY); - } - } - - // Apply the gradient filters to get the gradients - if (computeDx) { - vpImageFilter::filter(Iblur, dIx, gradientFilterX); - } - - if (computeDy) { - vpImageFilter::filter(Iblur, dIy, gradientFilterY); - } - } - else { - std::string errMsg = "[vpImageFilter::computePartialDerivatives] Filtering + gradient method \""; - errMsg += vpCannyFilteringAndGradientTypeToString(filteringType); - errMsg += "\" is not implemented yet\n"; - throw(vpException(vpException::notImplementedError, errMsg)); - } - } - } - - template - inline static void computePartialDerivatives(const vpImage &I, - vpImage &dIx, vpImage &dIy, - const bool &computeDx = true, const bool &computeDy = true, const bool &normalize = true, - const unsigned int &gaussianKernelSize = 5, const FilterType &gaussianStdev = 2.f, - const unsigned int &apertureGradient = 3, - const vpCannyFilteringAndGradientType &filteringType = CANNY_GBLUR_SOBEL_FILTERING, - const vpCannyBackendType &backend = CANNY_VISP_BACKEND) = delete; - - template - inline static void computePartialDerivatives(const vpImage &I, - vpImage &dIx, vpImage &dIy, - const bool &computeDx = true, const bool &computeDy = true, const bool &normalize = true, - const unsigned int &gaussianKernelSize = 5, const unsigned char &gaussianStdev = 2.f, - const unsigned int &apertureGradient = 3, - const vpCannyFilteringAndGradientType &filteringType = CANNY_GBLUR_SOBEL_FILTERING, - const vpCannyBackendType &backend = CANNY_VISP_BACKEND) = delete; - - template - inline static void computePartialDerivatives(const vpImage &I, - vpImage &dIx, vpImage &dIy, - const bool &computeDx = true, const bool &computeDy = true, const bool &normalize = true, - const unsigned int gaussianKernelSize = 5, const vpRGBa gaussianStdev = vpRGBa(), - const unsigned int apertureGradient = 3, - const vpCannyFilteringAndGradientType &filteringType = CANNY_GBLUR_SOBEL_FILTERING, - const vpCannyBackendType &backend = CANNY_VISP_BACKEND) = delete; - - /** - * \brief Compute the upper Canny edge filter threshold, using Gaussian blur + Sobel or + Scharr operators to compute - * the gradient of the image. - * - * \tparam OutType : Either float, to accelerate the computation time, or double, to have greater precision. - * \param[in] I : The gray-scale image, in ViSP format. - * \param[in] p_dIx : If different from nullptr, must contain the gradient of the image with regard to the horizontal axis. - * \param[in] p_dIy : If different from nullptr, must contain the gradient of the image with regard to the vertical axis. - * \param[in] lowerThresh : Canny lower threshold. - * \param[in] gaussianFilterSize : The size of the mask of the Gaussian filter to apply (an odd number). - * \param[in] gaussianStdev : The standard deviation of the Gaussian filter to apply. - * \param[in] apertureGradient : Size of the mask for the Sobel operator (odd number). - * \param[in] lowerThresholdRatio : The ratio of the upper threshold the lower threshold must be equal to. - * \param[in] upperThresholdRatio : The ratio of pixels whose absolute gradient Gabs is lower or equal to define - * the upper threshold. - * \param[in] filteringType : The gradient filter to apply to compute the gradient, if \b p_dIx and \b p_dIy are - * nullptr. - * \return The upper Canny edge filter threshold. - */ - template - inline static float computeCannyThreshold(const vpImage &I, float &lowerThresh, - const vpImage *p_dIx = nullptr, const vpImage *p_dIy = nullptr, - const unsigned int &gaussianKernelSize = 5, - const OutType &gaussianStdev = 2.f, const unsigned int &apertureGradient = 3, - const float &lowerThresholdRatio = 0.6, const float &upperThresholdRatio = 0.8, - const vpCannyFilteringAndGradientType &filteringType = CANNY_GBLUR_SOBEL_FILTERING) - { - double w = I.getWidth(); - double h = I.getHeight(); - - vpImage dI(h, w); - vpImage dIx(h, w), dIy(h, w); - if (p_dIx != nullptr && p_dIy != nullptr) { - dIx = *p_dIx; - dIy = *p_dIy; - } - else { - computePartialDerivatives(I, dIx, dIy, true, true, true, gaussianKernelSize, gaussianStdev, - apertureGradient, filteringType); - } - - // Computing the absolute gradient of the image G = |dIx| + |dIy| - for (unsigned int r = 0; r < h; r++) { - for (unsigned int c = 0; c < w; c++) { - float dx = (float)dIx[r][c]; - float dy = (float)dIy[r][c]; - float gradient = std::abs(dx) + std::abs(dy); - float gradientClamped = std::min(gradient, (float)std::numeric_limits::max()); - dI[r][c] = gradientClamped; - } - } - - // Compute the histogram - vpHistogram hist; - const unsigned int nbBins = 256; - hist.calculate(dI, nbBins); - float accu = 0; - float t = (float)(upperThresholdRatio * w * h); - float bon = 0; - for (unsigned int i = 0; i < nbBins; i++) { - float tf = hist[i]; - accu = accu + tf; - if (accu > t) { - bon = (float)i; - break; - } - } - float upperThresh = std::max(bon, 1.f); - lowerThresh = lowerThresholdRatio * bon; - return upperThresh; - } - #if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) static float median(const cv::Mat &cv_I); static float median(const vpImage &Isrc); diff --git a/modules/core/include/visp3/core/vpImagePoint.h b/modules/core/include/visp3/core/vpImagePoint.h index 9618b273e4..f1547df123 100644 --- a/modules/core/include/visp3/core/vpImagePoint.h +++ b/modules/core/include/visp3/core/vpImagePoint.h @@ -85,12 +85,12 @@ class VISP_EXPORT vpImagePoint Default constructor that initialize the coordinates of the image point to zero. */ - inline vpImagePoint() : i(0), j(0) {} + inline vpImagePoint() : i(0), j(0) { } /*! Default constructor that initialize the coordinates of the image thanks to the parameters \f$ ii \f$ and \f$ jj \f$. */ - inline vpImagePoint(double ii, double jj) : i(ii), j(jj) {} + inline vpImagePoint(double ii, double jj) : i(ii), j(jj) { } /*! Copy constructor. @@ -98,9 +98,9 @@ class VISP_EXPORT vpImagePoint \param ip : An image point. */ - inline vpImagePoint(const vpImagePoint &ip) : i(ip.i), j(ip.j) {} + inline vpImagePoint(const vpImagePoint &ip) : i(ip.i), j(ip.j) { } //! Destructor. - inline virtual ~vpImagePoint() {} + inline virtual ~vpImagePoint() { } /*! @@ -163,8 +163,8 @@ class VISP_EXPORT vpImagePoint { return ((end.get_j() >= start.get_j() && end.get_j() >= this->j && this->j >= start.get_j()) || (end.get_j() <= start.get_j() && end.get_j() <= this->j && this->j <= start.get_j())) && - ((end.get_i() >= start.get_i() && end.get_i() >= this->i && this->i >= start.get_i()) || - (end.get_i() <= start.get_i() && end.get_i() <= this->i && this->i <= start.get_i())); + ((end.get_i() >= start.get_i() && end.get_i() >= this->i && this->i >= start.get_i()) || + (end.get_i() <= start.get_i() && end.get_i() <= this->i && this->i <= start.get_i())); } /*! @@ -217,18 +217,11 @@ class VISP_EXPORT vpImagePoint const double line_slope = (end.get_i() - start.get_i()) / (end.get_j() - start.get_j()); if (fabs(end.get_j() - this->j) > fabs(end.get_i() - this->i)) { double j_ = (end.get_j() > this->j ? this->j + 1 : this->j - 1); -#if (VISP_CXX_STANDARD > VISP_CXX_STANDARD_98) - return {end.get_i() - line_slope * (end.get_j() - j_), j_}; -#else - return vpImagePoint(end.get_i() - line_slope * (end.get_j() - j_), j_); -#endif - } else { + return { end.get_i() - line_slope * (end.get_j() - j_), j_ }; + } + else { double i_ = (end.get_i() > this->i ? this->i + 1 : this->i - 1); -#if (VISP_CXX_STANDARD > VISP_CXX_STANDARD_98) - return {i_, end.get_j() - ((end.get_i() - i_) / line_slope)}; -#else - return vpImagePoint(i_, end.get_j() - ((end.get_i() - i_) / line_slope)); -#endif + return { i_, end.get_j() - ((end.get_i() - i_) / line_slope) }; } } @@ -241,7 +234,7 @@ class VISP_EXPORT vpImagePoint this->j = ip.j; return *this; } -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) + /*! Move operator. */ @@ -251,7 +244,6 @@ class VISP_EXPORT vpImagePoint this->j = ip.j; return *this; } -#endif vpImagePoint &operator+=(const vpImagePoint &ip); diff --git a/modules/core/include/visp3/core/vpLinProg.h b/modules/core/include/visp3/core/vpLinProg.h index 38586d341b..8483699958 100644 --- a/modules/core/include/visp3/core/vpLinProg.h +++ b/modules/core/include/visp3/core/vpLinProg.h @@ -61,59 +61,55 @@ class VISP_EXPORT vpLinProg { public: -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) /*! - Used to pass a list of bounded variables to solveLP(), as a list of (index, bound). - - The type is compatible with C++11's braced initialization. - Construction can be done in the call to solveLP or before, as shown in this example: - - \f$\begin{array}{lll} - (x,y,z) = & \arg\min & -2x -3y -4z\\ - & \text{s.t.}& 3x + 2y + z \leq 10\\ - & \text{s.t.}& 2x + 5y + 3z \leq 15\\ - & \text{s.t.}& x, y, z \geq 0\\ - & \text{s.t.}& z \leq 6\end{array}\f$ - - Here the lower bound is built explicitly while the upper one is built during the call to solveLP(): - - \warning This function is only available if c++11 or higher is activated during compilation. Configure ViSP using - cmake -DUSE_CXX_STANDARD=11. - - \code - #include - - int main() - { - vpColVector c(3), x; - vpMatrix C(2, 3); - vpColVector d(2); - c[0] = -2; c[1] = -3; c[2] = -4; - C[0][0] = 3; C[0][1] = 2; C[0][2] = 1; d[0] = 10; - C[1][0] = 2; C[1][1] = 5; C[1][2] = 3; d[1] = 15; - - // build lower bounds explicitly as a std::vector of std::pair - std::vector lower_bound; - for(unsigned int i = 0; i < 3; ++i) - { - vpLinProg::BoundedIndex bound; - bound.first = i; // index - bound.second = 0; // lower bound for this index - lower_bound.push_back(bound); - } - - if(vpLinProg::solveLP(c, vpMatrix(0,0), vpColVector(0), C, d, x, - lower_bound, - {{2,6}})) // upper bound is passed with braced initialization - { - std::cout << "x: " << x.t() << std::endl; - std::cout << "cost: " << c.t()*x << std::endl; - } - } - \endcode - - \sa solveLP() - */ + * Used to pass a list of bounded variables to solveLP(), as a list of (index, bound). + * + * The type is compatible with C++11's braced initialization. + * Construction can be done in the call to solveLP or before, as shown in this example: + * + * \f$\begin{array}{lll} + * (x,y,z) = & \arg\min & -2x -3y -4z\\ + * & \text{s.t.}& 3x + 2y + z \leq 10\\ + * & \text{s.t.}& 2x + 5y + 3z \leq 15\\ + * & \text{s.t.}& x, y, z \geq 0\\ + * & \text{s.t.}& z \leq 6\end{array}\f$ + * + * Here the lower bound is built explicitly while the upper one is built during the call to solveLP(): + * + * \code + * #include + * + * int main() + * { + * vpColVector c(3), x; + * vpMatrix C(2, 3); + * vpColVector d(2); + * c[0] = -2; c[1] = -3; c[2] = -4; + * C[0][0] = 3; C[0][1] = 2; C[0][2] = 1; d[0] = 10; + * C[1][0] = 2; C[1][1] = 5; C[1][2] = 3; d[1] = 15; + * + * // build lower bounds explicitly as a std::vector of std::pair + * std::vector lower_bound; + * for(unsigned int i = 0; i < 3; ++i) + * { + * vpLinProg::BoundedIndex bound; + * bound.first = i; // index + * bound.second = 0; // lower bound for this index + * lower_bound.push_back(bound); + * } + * + * if(vpLinProg::solveLP(c, vpMatrix(0,0), vpColVector(0), C, d, x, + * lower_bound, + * {{2,6}})) // upper bound is passed with braced initialization + * { + * std::cout << "x: " << x.t() << std::endl; + * std::cout << "cost: " << c.t()*x << std::endl; + * } + * } + * \endcode + * + * \sa solveLP() + */ typedef std::pair BoundedIndex; /** @name Solvers */ @@ -125,7 +121,6 @@ class VISP_EXPORT vpLinProg const double &tol = 1e-6); //@} -#endif /** @name Dimension reduction for equality constraints */ //@{ @@ -137,13 +132,13 @@ class VISP_EXPORT vpLinProg /** @name Vector and equality checking */ //@{ /*! - Check if all elements of \f$x\f$ are near zero. - - \param x : vector to be checked - \param tol : tolerance - - \return True if \f$\forall i, |\mathbf{x}_i| < \text{~tol} \f$ - */ + * Check if all elements of \f$x\f$ are near zero. + * + * \param x : vector to be checked + * \param tol : tolerance + * + * \return True if \f$\forall i, |\mathbf{x}_i| < \text{~tol} \f$ + */ static bool allZero(const vpColVector &x, const double &tol = 1e-6) { for (unsigned int i = 0; i < x.getRows(); ++i) { @@ -154,15 +149,15 @@ class VISP_EXPORT vpLinProg } /*! - Check if \f$\mathbf{A}\mathbf{x}\f$ is near \f$\mathbf{b}\f$. - - \param A : matrix (dimension m x n) - \param x : vector (dimension n) - \param b : vector (dimension m) - \param tol : tolerance - - \return True if \f$ \forall i, |\mathbf{A}_i\mathbf{x} - \mathbf{b}_i| < \text{~tol}\f$ - */ + * Check if \f$\mathbf{A}\mathbf{x}\f$ is near \f$\mathbf{b}\f$. + * + * \param A : matrix (dimension m x n) + * \param x : vector (dimension n) + * \param b : vector (dimension m) + * \param tol : tolerance + * + * \return True if \f$ \forall i, |\mathbf{A}_i\mathbf{x} - \mathbf{b}_i| < \text{~tol}\f$ + */ static bool allClose(const vpMatrix &A, const vpColVector &x, const vpColVector &b, const double &tol = 1e-6) { for (unsigned int i = 0; i < b.getRows(); ++i) { @@ -173,14 +168,14 @@ class VISP_EXPORT vpLinProg } /*! - Check if all elements of \f$\mathbf{C}\mathbf{x} - \mathbf{d}\f$ are lesser or equal to threshold. - \param C : matrix (dimension m x n) - \param x : vector (dimension n) - \param d : vector (dimension m) - \param thr : threshold - - \return True if \f$ \forall i, \mathbf{C}_i\mathbf{x} - \mathbf{d}_i \leq \text{~thr}\f$ - */ + * Check if all elements of \f$\mathbf{C}\mathbf{x} - \mathbf{d}\f$ are lesser or equal to threshold. + * \param C : matrix (dimension m x n) + * \param x : vector (dimension n) + * \param d : vector (dimension m) + * \param thr : threshold + * + * \return True if \f$ \forall i, \mathbf{C}_i\mathbf{x} - \mathbf{d}_i \leq \text{~thr}\f$ + */ static bool allLesser(const vpMatrix &C, const vpColVector &x, const vpColVector &d, const double &thr = 1e-6) { for (unsigned int i = 0; i < d.getRows(); ++i) { @@ -191,13 +186,13 @@ class VISP_EXPORT vpLinProg } /*! - Check if all elements of \f$\mathbf{x}\f$ are lesser or equal to threshold. - - \param x : vector (dimension n) - \param thr : threshold - - \return True if \f$ \forall i, \mathbf{x}_i \leq \text{~thr}\f$ - */ + * Check if all elements of \f$\mathbf{x}\f$ are lesser or equal to threshold. + * + * \param x : vector (dimension n) + * \param thr : threshold + * + * \return True if \f$ \forall i, \mathbf{x}_i \leq \text{~thr}\f$ + */ static bool allLesser(const vpColVector &x, const double &thr = 1e-6) { for (unsigned int i = 0; i < x.getRows(); ++i) { @@ -208,13 +203,13 @@ class VISP_EXPORT vpLinProg } /*! - Check if all elements of \f$\mathbf{x}\f$ are greater or equal to threshold. - - \param x : vector (dimension n) - \param thr : threshold - - \return True if \f$ \forall i, \mathbf{x}_i \geq \text{~thr}\f$ - */ + * Check if all elements of \f$\mathbf{x}\f$ are greater or equal to threshold. + * + * \param x : vector (dimension n) + * \param thr : threshold + * + * \return True if \f$ \forall i, \mathbf{x}_i \geq \text{~thr}\f$ + */ static bool allGreater(const vpColVector &x, const double &thr = 1e-6) { for (unsigned int i = 0; i < x.getRows(); ++i) { @@ -225,4 +220,4 @@ class VISP_EXPORT vpLinProg } //@} }; -#endif // vpLinProgh +#endif diff --git a/modules/core/include/visp3/core/vpMatrix.h b/modules/core/include/visp3/core/vpMatrix.h index f1dac9443c..4048374153 100644 --- a/modules/core/include/visp3/core/vpMatrix.h +++ b/modules/core/include/visp3/core/vpMatrix.h @@ -116,10 +116,8 @@ int main() int main() { -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpMatrix M( {-1, -2, -3}, {4, 5.5, 6.0f} ); std::cout << "M:\n" << M << std::endl; -#endif } \endcode You can also create and initialize a matrix this way: @@ -128,9 +126,7 @@ int main() int main() { -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpMatrix M(2, 3, {-1, -2, -3, 4, 5.5, 6.0f} ); -#endif } \endcode @@ -138,10 +134,8 @@ int main() \code int main() { -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpMatrix M; M = { {-1, -2, -3}, {4, 5.5, 6.0f} }; -#endif } \endcode @@ -201,12 +195,10 @@ vpMatrix M(R); vpMatrix(const vpMatrix &A) : vpArray2D(A) { } -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpMatrix(vpMatrix &&A); explicit vpMatrix(const std::initializer_list &list); explicit vpMatrix(unsigned int nrows, unsigned int ncols, const std::initializer_list &list); explicit vpMatrix(const std::initializer_list > &lists); -#endif /*! Removes all elements from the matrix (which are destroyed), @@ -279,13 +271,11 @@ vpMatrix M(R); vpMatrix &operator<<(double val); vpMatrix &operator,(double val); vpMatrix &operator=(const vpArray2D &A); -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpMatrix &operator=(const vpMatrix &A); vpMatrix &operator=(vpMatrix &&A); vpMatrix &operator=(const std::initializer_list &list); vpMatrix &operator=(const std::initializer_list > &lists); -#endif vpMatrix &operator=(double x); //@} diff --git a/modules/core/include/visp3/core/vpPoseVector.h b/modules/core/include/visp3/core/vpPoseVector.h index fb59bf63f2..0491d38df4 100644 --- a/modules/core/include/visp3/core/vpPoseVector.h +++ b/modules/core/include/visp3/core/vpPoseVector.h @@ -137,11 +137,8 @@ class vpRowVector; { vpTranslationVector t; vpThetaUVector tu; - - #if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) t = { 0.1, 0.2, 0.3 }; tu = { M_PI, M_PI_2, M_PI_4 }; - #endif vpPoseVector pose(t, tu); } \endcode diff --git a/modules/core/include/visp3/core/vpQuadProg.h b/modules/core/include/visp3/core/vpQuadProg.h index e63d638631..138f431316 100644 --- a/modules/core/include/visp3/core/vpQuadProg.h +++ b/modules/core/include/visp3/core/vpQuadProg.h @@ -31,8 +31,8 @@ * Quadratic Programming */ -#ifndef vpQuadProgh -#define vpQuadProgh +#ifndef _vpQuadProg_h_ +#define _vpQuadProg_h_ #include #include @@ -42,34 +42,33 @@ #include /*! - \file vpQuadProg.h - \brief Implementation of Quadratic Program with Active Sets. -*/ + * \file vpQuadProg.h + * \brief Implementation of Quadratic Program with Active Sets. + */ /*! - \class vpQuadProg - \ingroup group_core_optim - \brief This class provides a solver for Quadratic Programs. - - The cost function is written under the form \f$ \min ||\mathbf{Q}\mathbf{x} - \mathbf{r}||^2\f$. - - If a cost function is written under the canonical form \f$\min \frac{1}{2}\mathbf{x}^T\mathbf{H}\mathbf{x} + - \mathbf{c}^T\mathbf{x}\f$ then fromCanonicalCost() can be used to retrieve Q and r from H and c. - - Equality constraints are solved through projection into the kernel. - - Inequality constraints are solved with active sets. - - In order to be used sequentially, the decomposition of the equality constraint may be stored. - The last active set is always stored and used to warm start the next call. - - \warning The solvers are only available if c++11 or higher is activated during build. - Configure ViSP using cmake -DUSE_CXX_STANDARD=11. -*/ + * \class vpQuadProg + * \ingroup group_core_optim + * \brief This class provides a solver for Quadratic Programs. + * + * The cost function is written under the form \f$ \min ||\mathbf{Q}\mathbf{x} - \mathbf{r}||^2\f$. + * + * If a cost function is written under the canonical form \f$\min \frac{1}{2}\mathbf{x}^T\mathbf{H}\mathbf{x} + + * \mathbf{c}^T\mathbf{x}\f$ then fromCanonicalCost() can be used to retrieve Q and r from H and c. + * + * Equality constraints are solved through projection into the kernel. + * + * Inequality constraints are solved with active sets. + * + * In order to be used sequentially, the decomposition of the equality constraint may be stored. + * The last active set is always stored and used to warm start the next call. + * + * \warning The solvers are only available if c++11 or higher is activated during build. + * Configure ViSP using cmake -DUSE_CXX_STANDARD=11. + */ class VISP_EXPORT vpQuadProg { public: -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) /** @name Instantiated solvers */ //@{ bool solveQPe(const vpMatrix &Q, const vpColVector &r, vpColVector &x, const double &tol = 1e-6) const; @@ -84,9 +83,10 @@ class VISP_EXPORT vpQuadProg /** @name Managing sequential calls to solvers */ //@{ bool setEqualityConstraint(const vpMatrix &A, const vpColVector &b, const double &tol = 1e-6); + /*! - Resets the active set that was found by a previous call to solveQP() or solveQPi(), if any. - */ + * Resets the active set that was found by a previous call to solveQP() or solveQPi(), if any. + */ void resetActiveSet() { active.clear(); } //@} @@ -97,20 +97,23 @@ class VISP_EXPORT vpQuadProg protected: /*! - Active set from the last call to solveQP() or solveQPi(). Used for warm starting the next call. - */ + * Active set from the last call to solveQP() or solveQPi(). Used for warm starting the next call. + */ std::vector active; + /*! - Inactive set from the last call to solveQP() or solveQPi(). Used for warm starting the next call. - */ + * Inactive set from the last call to solveQP() or solveQPi(). Used for warm starting the next call. + */ std::vector inactive; + /*! - Stored particular solution from the last call to setEqualityConstraint(). - */ + * Stored particular solution from the last call to setEqualityConstraint(). + */ vpColVector x1; + /*! - Stored projection to the kernel from the last call to setEqualityConstraint(). - */ + * Stored projection to the kernel from the last call to setEqualityConstraint(). + */ vpMatrix Z; static vpColVector solveSVDorQR(const vpMatrix &A, const vpColVector &b); @@ -119,20 +122,20 @@ class VISP_EXPORT vpQuadProg const double &tol = 1e-6); /*! - Performs a dimension check of passed QP matrices and vectors. - - If any inconsistency is detected, displays a summary and throws an exception. - - \param Q : cost matrix (dimension c x n) - \param r : cost vector (dimension c) - \param A : pointer to the equality matrix (if any, dimension m x n) - \param b : pointer to the equality vector (if any, dimension m) - \param C : pointer to the inequality matrix (if any, dimension p x n) - \param d : pointer to the inequality vector (if any, dimension p) - \param fct : name of the solver that called this function - - \return the dimension of the search space. - */ + * Performs a dimension check of passed QP matrices and vectors. + * + * If any inconsistency is detected, displays a summary and throws an exception. + * + * \param Q : cost matrix (dimension c x n) + * \param r : cost vector (dimension c) + * \param A : pointer to the equality matrix (if any, dimension m x n) + * \param b : pointer to the equality vector (if any, dimension m) + * \param C : pointer to the inequality matrix (if any, dimension p x n) + * \param d : pointer to the inequality vector (if any, dimension p) + * \param fct : name of the solver that called this function + * + * \return the dimension of the search space. + */ static unsigned int checkDimensions(const vpMatrix &Q, const vpColVector &r, const vpMatrix *A, const vpColVector *b, const vpMatrix *C, const vpColVector *d, const std::string fct) { @@ -144,7 +147,7 @@ class VISP_EXPORT vpQuadProg if ((Ab && n != A->getCols()) || (Cd && n != C->getCols()) || (Ab && A->getRows() != b->getRows()) || (Cd && C->getRows() != d->getRows()) || Q.getRows() != r.getRows()) { std::cout << "vpQuadProg::" << fct << ": wrong dimension\n" - << "Q: " << Q.getRows() << "x" << Q.getCols() << " - r: " << r.getRows() << std::endl; + << "Q: " << Q.getRows() << "x" << Q.getCols() << " - r: " << r.getRows() << std::endl; if (Ab) std::cout << "A: " << A->getRows() << "x" << A->getCols() << " - b: " << b->getRows() << std::endl; if (Cd) @@ -153,6 +156,5 @@ class VISP_EXPORT vpQuadProg } return n; } -#endif }; #endif diff --git a/modules/core/include/visp3/core/vpQuaternionVector.h b/modules/core/include/visp3/core/vpQuaternionVector.h index 93402d38b4..af3fd9e751 100644 --- a/modules/core/include/visp3/core/vpQuaternionVector.h +++ b/modules/core/include/visp3/core/vpQuaternionVector.h @@ -141,10 +141,8 @@ class VISP_EXPORT vpQuaternionVector : public vpRotationVector vpQuaternionVector operator*(const vpQuaternionVector &rq) const; vpQuaternionVector operator/(double l) const; vpQuaternionVector &operator=(const vpColVector &q); -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpQuaternionVector &operator=(const vpQuaternionVector &q) = default; vpQuaternionVector &operator=(const std::initializer_list &list); -#endif vpQuaternionVector conjugate() const; vpQuaternionVector inverse() const; diff --git a/modules/core/include/visp3/core/vpRGBa.h b/modules/core/include/visp3/core/vpRGBa.h index f44bf0e735..b36d8af960 100644 --- a/modules/core/include/visp3/core/vpRGBa.h +++ b/modules/core/include/visp3/core/vpRGBa.h @@ -67,7 +67,7 @@ class VISP_EXPORT vpRGBa Build a black value. */ - inline vpRGBa() : R(0), G(0), B(0), A(vpRGBa::alpha_default) {} + inline vpRGBa() : R(0), G(0), B(0), A(vpRGBa::alpha_default) { } /*! Constructor. @@ -81,8 +81,7 @@ class VISP_EXPORT vpRGBa */ inline vpRGBa(unsigned char r, unsigned char g, unsigned char b, unsigned char a = vpRGBa::alpha_default) : R(r), G(g), B(b), A(a) - { - } + { } /*! Constructor. @@ -91,12 +90,12 @@ class VISP_EXPORT vpRGBa \param v : Value to set. */ - inline vpRGBa(unsigned char v) : R(v), G(v), B(v), A(v) {} + inline vpRGBa(unsigned char v) : R(v), G(v), B(v), A(v) { } /*! Copy constructor. */ - inline vpRGBa(const vpRGBa &v) : R(v.R), G(v.G), B(v.B), A(v.A) {} + inline vpRGBa(const vpRGBa &v) : R(v.R), G(v.G), B(v.B), A(v.A) { } /*! Create a RGBa value from a 4 dimension column vector. @@ -115,9 +114,7 @@ class VISP_EXPORT vpRGBa vpRGBa &operator=(const unsigned char &v); vpRGBa &operator=(const vpRGBa &v); -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpRGBa &operator=(const vpRGBa &&v); -#endif vpRGBa &operator=(const vpColVector &v); bool operator==(const vpRGBa &v) const; bool operator!=(const vpRGBa &v) const; diff --git a/modules/core/include/visp3/core/vpRGBf.h b/modules/core/include/visp3/core/vpRGBf.h index 1db9f4b63e..ba03ebbc21 100644 --- a/modules/core/include/visp3/core/vpRGBf.h +++ b/modules/core/include/visp3/core/vpRGBf.h @@ -61,7 +61,7 @@ class VISP_EXPORT vpRGBf Build a black value. */ - inline vpRGBf() : R(0), G(0), B(0) {} + inline vpRGBf() : R(0), G(0), B(0) { } /*! Constructor. @@ -74,8 +74,7 @@ class VISP_EXPORT vpRGBf */ inline vpRGBf(float r, float g, float b) : R(r), G(g), B(b) - { - } + { } /*! Constructor. @@ -84,12 +83,12 @@ class VISP_EXPORT vpRGBf \param v : Value to set. */ - inline vpRGBf(float v) : R(v), G(v), B(v) {} + inline vpRGBf(float v) : R(v), G(v), B(v) { } /*! Copy constructor. */ - inline vpRGBf(const vpRGBf &v) : R(v.R), G(v.G), B(v.B) {} + inline vpRGBf(const vpRGBf &v) : R(v.R), G(v.G), B(v.B) { } /*! Create a RGB value from a 3 dimensional column vector. @@ -102,9 +101,7 @@ class VISP_EXPORT vpRGBf vpRGBf &operator=(float v); vpRGBf &operator=(const vpRGBf &v); -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpRGBf &operator=(const vpRGBf &&v); -#endif vpRGBf &operator=(const vpColVector &v); bool operator==(const vpRGBf &v) const; bool operator!=(const vpRGBf &v) const; diff --git a/modules/core/include/visp3/core/vpRectOriented.h b/modules/core/include/visp3/core/vpRectOriented.h index bfdd116a07..7819f3a91e 100644 --- a/modules/core/include/visp3/core/vpRectOriented.h +++ b/modules/core/include/visp3/core/vpRectOriented.h @@ -47,21 +47,13 @@ class VISP_EXPORT vpRectOriented { public: vpRectOriented(); -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpRectOriented(const vpRectOriented &rect) = default; -#else - vpRectOriented(const vpRectOriented &rect); -#endif vpRectOriented(const vpImagePoint ¢er, double width, double height, double theta = 0); vpRectOriented(const vpRect &rect); -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpRectOriented &operator=(const vpRectOriented &rect) = default; -#else - vpRectOriented &operator=(const vpRectOriented &rect); -#endif vpRectOriented &operator=(const vpRect &rect); diff --git a/modules/core/include/visp3/core/vpRobust.h b/modules/core/include/visp3/core/vpRobust.h index 0746e9a23c..c01fa0cb25 100644 --- a/modules/core/include/visp3/core/vpRobust.h +++ b/modules/core/include/visp3/core/vpRobust.h @@ -83,7 +83,8 @@ class VISP_EXPORT vpRobust { public: //! Enumeration of influence functions - typedef enum { + typedef enum + { TUKEY, //!< Tukey influence function. CAUCHY, //!< Cauchy influence function. HUBER //!< Huber influence function. @@ -115,7 +116,7 @@ class VISP_EXPORT vpRobust vpRobust(const vpRobust &other); //! Destructor - virtual ~vpRobust(){}; + virtual ~vpRobust() { }; /*! * Return residual vector Median Absolute Deviation (MAD). @@ -130,7 +131,7 @@ class VISP_EXPORT vpRobust /*! * Return the min value used to threshold residual vector Median Absolute Deviation (MAD). - * This value corresponds to the mimimal value of \f$\sigma\f$ computed in MEstimator(). + * This value corresponds to the minimal value of \f$\sigma\f$ computed in MEstimator(). * * \sa setMinMedianAbsoluteDeviation() */ @@ -139,12 +140,10 @@ class VISP_EXPORT vpRobust void MEstimator(const vpRobustEstimatorType method, const vpColVector &residues, vpColVector &weights); vpRobust &operator=(const vpRobust &other); -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpRobust &operator=(const vpRobust &&other); -#endif /*! - * Set minimal median absolute deviation (MAD) value corresponding to the mimimal value of + * Set minimal median absolute deviation (MAD) value corresponding to the minimal value of * \f$\sigma\f$ computed in MEstimator() with * \f$ \sigma = 1.48{Med}(|r_i - {Med}(r_i)|) \f$. * \param mad_min : Minimal Median Absolute Deviation value. diff --git a/modules/core/include/visp3/core/vpRotationMatrix.h b/modules/core/include/visp3/core/vpRotationMatrix.h index 09c66521ce..c0464c3146 100644 --- a/modules/core/include/visp3/core/vpRotationMatrix.h +++ b/modules/core/include/visp3/core/vpRotationMatrix.h @@ -107,10 +107,8 @@ int main() int main() { -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpRotationMatrix R{ 0, 0, -1, 0, -1, 0, -1, 0, 0 }; std::cout << "R:\n" << R << std::endl; -#endif } \endcode */ @@ -129,9 +127,7 @@ class VISP_EXPORT vpRotationMatrix : public vpArray2D explicit vpRotationMatrix(const vpMatrix &R); vpRotationMatrix(double tux, double tuy, double tuz); -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) explicit vpRotationMatrix(const std::initializer_list &list); -#endif vpRotationMatrix buildFrom(const vpHomogeneousMatrix &M); vpRotationMatrix buildFrom(const vpThetaUVector &v); @@ -156,9 +152,7 @@ class VISP_EXPORT vpRotationMatrix : public vpArray2D vpRotationMatrix &operator=(const vpRotationMatrix &R); // copy operator from vpMatrix (handle with care) vpRotationMatrix &operator=(const vpMatrix &M); -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpRotationMatrix &operator=(const std::initializer_list &list); -#endif // operation c = A * b (A is unchanged) vpTranslationVector operator*(const vpTranslationVector &tv) const; // operation C = A * B (A is unchanged) diff --git a/modules/core/include/visp3/core/vpRowVector.h b/modules/core/include/visp3/core/vpRowVector.h index be20e8f889..aeffe809be 100644 --- a/modules/core/include/visp3/core/vpRowVector.h +++ b/modules/core/include/visp3/core/vpRowVector.h @@ -90,20 +90,16 @@ int main() int main() { -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpRowVector v{-1, -2.1, -3}; std::cout << "v:\n" << v << std::endl; -#endif } \endcode The vector could also be initialized using operator=(const std::initializer_list< double > &) \code int main() { -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpRowVector v; v = {-1, -2.1, -3}; -#endif } \endcode */ @@ -125,10 +121,8 @@ class VISP_EXPORT vpRowVector : public vpArray2D vpRowVector(const vpMatrix &M, unsigned int i); vpRowVector(const std::vector &v); vpRowVector(const std::vector &v); -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpRowVector(vpRowVector &&v); vpRowVector(const std::initializer_list &list) : vpArray2D(list) { } -#endif /*! Removes all elements from the vector (which are destroyed), @@ -213,10 +207,8 @@ class VISP_EXPORT vpRowVector : public vpArray2D vpRowVector &operator=(const std::vector &v); vpRowVector &operator=(const std::vector &v); vpRowVector &operator=(double x); -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpRowVector &operator=(vpRowVector &&v); vpRowVector &operator=(const std::initializer_list &list); -#endif //! Comparison operator. bool operator==(const vpRowVector &v) const; bool operator!=(const vpRowVector &v) const; diff --git a/modules/core/include/visp3/core/vpRxyzVector.h b/modules/core/include/visp3/core/vpRxyzVector.h index aa5d3c086e..02d288feef 100644 --- a/modules/core/include/visp3/core/vpRxyzVector.h +++ b/modules/core/include/visp3/core/vpRxyzVector.h @@ -121,9 +121,7 @@ class vpThetaUVector; \endcode Or you can also initialize the vector from a list of doubles if ViSP is build with c++11 enabled: \code -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) rxyz = {M_PI_4, M_PI_2, M_PI}; -#endif \endcode To get the values [rad] use: @@ -201,10 +199,8 @@ class VISP_EXPORT vpRxyzVector : public vpRotationVector vpRxyzVector &operator=(const vpColVector &rxyz); vpRxyzVector &operator=(double x); -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpRxyzVector &operator=(const vpRxyzVector &rxyz) = default; vpRxyzVector &operator=(const std::initializer_list &list); -#endif }; #endif diff --git a/modules/core/include/visp3/core/vpRzyxVector.h b/modules/core/include/visp3/core/vpRzyxVector.h index da55f25b90..e21bd61a25 100644 --- a/modules/core/include/visp3/core/vpRzyxVector.h +++ b/modules/core/include/visp3/core/vpRzyxVector.h @@ -124,9 +124,7 @@ class vpThetaUVector; \endcode Or you can also initialize the vector from a list of doubles if ViSP is build with c++11 enabled: \code -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) rzyx = {M_PI_4, M_PI_2, M_PI}; -#endif \endcode To get the values [rad] use: @@ -202,10 +200,8 @@ class VISP_EXPORT vpRzyxVector : public vpRotationVector vpRzyxVector &operator=(const vpColVector &rzyx); vpRzyxVector &operator=(double x); -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpRzyxVector &operator=(const vpRzyxVector &rzyx) = default; vpRzyxVector &operator=(const std::initializer_list &list); -#endif }; #endif diff --git a/modules/core/include/visp3/core/vpRzyzVector.h b/modules/core/include/visp3/core/vpRzyzVector.h index bfc5c3ae09..82fd27d331 100644 --- a/modules/core/include/visp3/core/vpRzyzVector.h +++ b/modules/core/include/visp3/core/vpRzyzVector.h @@ -123,9 +123,7 @@ class vpThetaUVector; \endcode Or you can also initialize the vector from a list of doubles if ViSP is build with c++11 enabled: \code -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) rzyz = {M_PI_4, M_PI_2, M_PI}; -#endif \endcode To get the values [rad] use: @@ -201,9 +199,7 @@ class VISP_EXPORT vpRzyzVector : public vpRotationVector vpRzyzVector &operator=(const vpColVector &rzyz); vpRzyzVector &operator=(double x); -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpRzyzVector &operator=(const vpRzyzVector &rzyz) = default; vpRzyzVector &operator=(const std::initializer_list &list); -#endif }; #endif diff --git a/modules/core/include/visp3/core/vpThetaUVector.h b/modules/core/include/visp3/core/vpThetaUVector.h index fb442147ba..58df309e97 100644 --- a/modules/core/include/visp3/core/vpThetaUVector.h +++ b/modules/core/include/visp3/core/vpThetaUVector.h @@ -113,9 +113,7 @@ class vpQuaternionVector; \endcode Or you can also initialize the vector from a list of doubles if ViSP is build with c++11 enabled: \code -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) tu = {M_PI_4, M_PI_2, M_PI}; -#endif \endcode To get the values [rad] use: @@ -218,9 +216,7 @@ class VISP_EXPORT vpThetaUVector : public vpRotationVector vpThetaUVector &operator=(double x); vpThetaUVector operator*(const vpThetaUVector &tu_b) const; -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpThetaUVector &operator=(const std::initializer_list &list); -#endif }; #endif diff --git a/modules/core/include/visp3/core/vpTime.h b/modules/core/include/visp3/core/vpTime.h index 3bc5d5ae2b..0977febeec 100644 --- a/modules/core/include/visp3/core/vpTime.h +++ b/modules/core/include/visp3/core/vpTime.h @@ -41,9 +41,7 @@ #include #include #include -#if VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11 #include -#endif #include @@ -94,8 +92,7 @@ class VISP_EXPORT vpChrono private: double m_durationMs; -#if VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11 && \ - (defined(_MSC_VER) && _MSC_VER >= 1900 /* VS2015 */ || !defined(_MSC_VER)) +#if (defined(_MSC_VER) && _MSC_VER >= 1900 /* VS2015 */ || !defined(_MSC_VER)) std::chrono::steady_clock::time_point m_lastTimePoint; #else double m_lastTimePoint; diff --git a/modules/core/include/visp3/core/vpTranslationVector.h b/modules/core/include/visp3/core/vpTranslationVector.h index d8576c2ab0..0d9c9a4fd2 100644 --- a/modules/core/include/visp3/core/vpTranslationVector.h +++ b/modules/core/include/visp3/core/vpTranslationVector.h @@ -73,9 +73,7 @@ \endcode Or you can also initialize the vector from a list of doubles if ViSP is build with c++11 enabled: \code -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) t = {0, 0.1, 0.5}; -#endif \endcode To get the values [meters] use: @@ -150,9 +148,7 @@ class VISP_EXPORT vpTranslationVector : public vpArray2D vpTranslationVector &operator=(const vpColVector &tv); vpTranslationVector &operator=(const vpTranslationVector &tv); vpTranslationVector &operator=(double x); -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpTranslationVector &operator=(const std::initializer_list &list); -#endif //! Operator that allows to set a value of an element \f$t_i\f$: t[i] = x inline double &operator[](unsigned int n) { return *(data + n); } diff --git a/modules/core/include/visp3/core/vpUniRand.h b/modules/core/include/visp3/core/vpUniRand.h index 8b17b16a24..34dfd32833 100644 --- a/modules/core/include/visp3/core/vpUniRand.h +++ b/modules/core/include/visp3/core/vpUniRand.h @@ -72,13 +72,9 @@ typedef unsigned __int32 uint32_t; #include #endif -#if (VISP_CXX_STANDARD <= VISP_CXX_STANDARD_11) -#include // std::random_shuffle -#else #include // std::shuffle #include // std::mt19937 #include // std::iota -#endif #include /*! @@ -156,11 +152,7 @@ class VISP_EXPORT vpUniRand inline static std::vector shuffleVector(const std::vector &inputVector) { std::vector shuffled = inputVector; -#if (VISP_CXX_STANDARD <= VISP_CXX_STANDARD_11) - std::random_shuffle(shuffled.begin(), shuffled.end()); -#else std::shuffle(shuffled.begin(), shuffled.end(), std::mt19937 { std::random_device{}() }); -#endif return shuffled; } diff --git a/modules/core/src/camera/vpColorDepthConversion.cpp b/modules/core/src/camera/vpColorDepthConversion.cpp index c49b8fb5cd..67774be7e3 100644 --- a/modules/core/src/camera/vpColorDepthConversion.cpp +++ b/modules/core/src/camera/vpColorDepthConversion.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,12 @@ * * Description: * Color to depth conversion. - * - * Authors: - * Julien Dufour - * -*****************************************************************************/ + */ /*! - \file vpColorDepthConversion.cpp - \brief color to depth conversion -*/ + * \file vpColorDepthConversion.cpp + * \brief color to depth conversion + */ #include @@ -64,11 +59,7 @@ namespace */ vpImagePoint adjust2DPointToBoundary(const vpImagePoint &ip, double width, double height) { -#if (VISP_CXX_STANDARD > VISP_CXX_STANDARD_98) - return {vpMath::clamp(ip.get_i(), 0., height), vpMath::clamp(ip.get_j(), 0., width)}; -#else - return vpImagePoint(vpMath::clamp(ip.get_i(), 0., height), vpMath::clamp(ip.get_j(), 0., width)); -#endif + return { vpMath::clamp(ip.get_i(), 0., height), vpMath::clamp(ip.get_j(), 0., width) }; } /*! @@ -80,15 +71,9 @@ vpImagePoint adjust2DPointToBoundary(const vpImagePoint &ip, double width, doubl */ vpColVector transform(const vpHomogeneousMatrix &extrinsics_params, vpColVector from_point) { -#if (VISP_CXX_STANDARD > VISP_CXX_STANDARD_98) - from_point = {from_point, 0, 3}; - from_point.stack(1.); - return {extrinsics_params * from_point, 0, 3}; -#else - from_point = vpColVector(from_point, 0, 3); + from_point = { from_point, 0, 3 }; from_point.stack(1.); - return vpColVector(extrinsics_params * from_point, 0, 3); -#endif + return { extrinsics_params * from_point, 0, 3 }; } /*! @@ -100,11 +85,7 @@ vpColVector transform(const vpHomogeneousMatrix &extrinsics_params, vpColVector */ vpImagePoint project(const vpCameraParameters &intrinsic_cam_params, const vpColVector &point) { -#if (VISP_CXX_STANDARD > VISP_CXX_STANDARD_98) - vpImagePoint iP{}; -#else - vpImagePoint iP; -#endif + vpImagePoint iP {}; vpMeterPixelConversion::convertPoint(intrinsic_cam_params, point[0] / point[2], point[1] / point[2], iP); return iP; @@ -120,20 +101,9 @@ vpImagePoint project(const vpCameraParameters &intrinsic_cam_params, const vpCol */ vpColVector deproject(const vpCameraParameters &intrinsic_cam_params, const vpImagePoint &pixel, double depth) { -#if (VISP_CXX_STANDARD > VISP_CXX_STANDARD_98) - double x{0.}, y{0.}; + double x { 0. }, y { 0. }; vpPixelMeterConversion::convertPoint(intrinsic_cam_params, pixel, x, y); - return {x * depth, y * depth, depth}; -#else - double x = 0., y = 0.; - vpPixelMeterConversion::convertPoint(intrinsic_cam_params, pixel, x, y); - - vpColVector p(3); - p[0] = x * depth; - p[1] = y * depth; - p[2] = depth; - return p; -#endif + return { x * depth, y * depth, depth }; } } // namespace @@ -143,12 +113,15 @@ vpColVector deproject(const vpCameraParameters &intrinsic_cam_params, const vpIm * * \param[in] I_depth : Depth raw image. * \param[in] depth_scale : Depth scale to convert depth raw values in [m]. If depth raw values in `I_depth` are in - * [mm], depth scale should be 0.001. \param[in] depth_min : Minimal depth value for correspondance [m]. \param[in] - * depth_max : Maximal depth value for correspondance [m]. \param[in] depth_intrinsics : Intrinsic depth camera - * parameters. \param[in] color_intrinsics : Intrinsic color camera parameters. \param[in] color_M_depth : Relationship - * between color and depth cameras (ie, extrinsic rgbd camera parameters). \param[in] depth_M_color : Relationship - * between depth and color cameras (ie, extrinsic rgbd camera parameters). \param[in] from_pixel : Image point expressed - * into the color camera frame. \return Image point expressed into the depth camera frame. + * [mm], depth scale should be 0.001. + * \param[in] depth_min : Minimal depth value for correspondence [m]. + * \param[in] depth_max : Maximal depth value for correspondence [m]. + * \param[in] depth_intrinsics : Intrinsic depth camera parameters. + * \param[in] color_intrinsics : Intrinsic color camera parameters. + * \param[in] color_M_depth : Relationship between color and depth cameras (ie, extrinsic rgb-d camera parameters). + * \param[in] depth_M_color : Relationship between depth and color cameras (ie, extrinsic rgb-d camera parameters). + * \param[in] from_pixel : Image point expressed into the color camera frame. + * \return Image point expressed into the depth camera frame. */ vpImagePoint vpColorDepthConversion::projectColorToDepth( const vpImage &I_depth, double depth_scale, double depth_min, double depth_max, @@ -164,12 +137,15 @@ vpImagePoint vpColorDepthConversion::projectColorToDepth( * * \param[in] data : Depth raw values. * \param[in] depth_scale : Depth scale to convert depth raw values in [m]. If depth raw values in `data` are in [mm], - * depth scale should be 0.001. \param[in] depth_min : Minimal depth value for correspondance [m]. \param[in] depth_max - * : Maximal depth value for correspondance [m]. \param[in] depth_width : Depth image width [pixel]. \param[in] - * depth_height : Depth image height [pixel]. \param[in] depth_intrinsics : Intrinsic depth camera parameters. + * depth scale should be 0.001. + * \param[in] depth_min : Minimal depth value for correspondence [m]. + * \param[in] depth_max : Maximal depth value for correspondence [m]. + * \param[in] depth_width : Depth image width [pixel]. + * \param[in] depth_height : Depth image height [pixel]. + * \param[in] depth_intrinsics : Intrinsic depth camera parameters. * \param[in] color_intrinsics : Intrinsic color camera parameters. - * \param[in] color_M_depth : Relationship between color and depth cameras (ie, extrinsic rgbd camera parameters). - * \param[in] depth_M_color : Relationship between depth and color cameras (ie, extrinsic rgbd camera parameters). + * \param[in] color_M_depth : Relationship between color and depth cameras (ie, extrinsic rgb-d camera parameters). + * \param[in] depth_M_color : Relationship between depth and color cameras (ie, extrinsic rgb-d camera parameters). * \param[in] from_pixel : Image point expressed into the color camera frame. * \return Image point expressed into the depth camera frame. */ @@ -178,8 +154,7 @@ vpImagePoint vpColorDepthConversion::projectColorToDepth( double depth_height, const vpCameraParameters &depth_intrinsics, const vpCameraParameters &color_intrinsics, const vpHomogeneousMatrix &color_M_depth, const vpHomogeneousMatrix &depth_M_color, const vpImagePoint &from_pixel) { -#if (VISP_CXX_STANDARD > VISP_CXX_STANDARD_98) - vpImagePoint depth_pixel{}; + vpImagePoint depth_pixel {}; // Find line start pixel const auto min_point = deproject(color_intrinsics, from_pixel, depth_min); @@ -206,48 +181,12 @@ vpImagePoint vpColorDepthConversion::projectColorToDepth( const auto projected_pixel = project(color_intrinsics, transformed_point); const auto new_dist = vpMath::sqr(projected_pixel.get_v() - from_pixel.get_v()) + - vpMath::sqr(projected_pixel.get_u() - from_pixel.get_u()); - if (new_dist < min_dist || min_dist < 0) { - min_dist = new_dist; - depth_pixel = curr_pixel; - } - } - -#else - vpImagePoint depth_pixel; - - // Find line start pixel - const vpColVector min_point = deproject(color_intrinsics, from_pixel, depth_min); - const vpColVector min_transformed_point = transform(depth_M_color, min_point); - vpImagePoint start_pixel = project(depth_intrinsics, min_transformed_point); - start_pixel = adjust2DPointToBoundary(start_pixel, depth_width, depth_height); - - // Find line end depth pixel - const vpColVector max_point = deproject(color_intrinsics, from_pixel, depth_max); - const vpColVector max_transformed_point = transform(depth_M_color, max_point); - vpImagePoint end_pixel = project(depth_intrinsics, max_transformed_point); - end_pixel = adjust2DPointToBoundary(end_pixel, depth_width, depth_height); - - // search along line for the depth pixel that it's projected pixel is the closest to the input pixel - double min_dist = -1.; - for (vpImagePoint curr_pixel = start_pixel; curr_pixel.inSegment(start_pixel, end_pixel) && curr_pixel != end_pixel; - curr_pixel = curr_pixel.nextInSegment(start_pixel, end_pixel)) { - const double depth = depth_scale * data[static_cast(curr_pixel.get_v() * depth_width + curr_pixel.get_u())]; - if (std::fabs(depth) <= std::numeric_limits::epsilon()) - continue; - - const vpColVector point = deproject(depth_intrinsics, curr_pixel, depth); - const vpColVector transformed_point = transform(color_M_depth, point); - const vpImagePoint projected_pixel = project(color_intrinsics, transformed_point); - - const double new_dist = vpMath::sqr(projected_pixel.get_v() - from_pixel.get_v()) + - vpMath::sqr(projected_pixel.get_u() - from_pixel.get_u()); + vpMath::sqr(projected_pixel.get_u() - from_pixel.get_u()); if (new_dist < min_dist || min_dist < 0) { min_dist = new_dist; depth_pixel = curr_pixel; } } -#endif return depth_pixel; } diff --git a/modules/core/src/image/private/vpImageConvert_impl.h b/modules/core/src/image/private/vpImageConvert_impl.h index b057180907..975c8cdd23 100644 --- a/modules/core/src/image/private/vpImageConvert_impl.h +++ b/modules/core/src/image/private/vpImageConvert_impl.h @@ -41,7 +41,7 @@ #ifndef vpIMAGECONVERT_impl_H #define vpIMAGECONVERT_impl_H -#if defined(VISP_HAVE_OPENMP) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_OPENMP) #include #include #endif @@ -60,7 +60,7 @@ void vp_createDepthHistogram(const vpImage &src_depth, vpImage= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_OPENMP) int nThreads = omp_get_max_threads(); std::vector > histograms(nThreads); for (int i = 0; i < nThreads; i++) { @@ -100,9 +100,10 @@ void vp_createDepthHistogram(const vpImage &src_depth, vpImage(src_depth.bitmap[i]); if (d) { unsigned char f = - static_cast(histogram[d] * 255 / histogram[0xFFFF]); // 0-255 based on histogram location + static_cast(histogram[d] * 255 / histogram[0xFFFF]); // 0-255 based on histogram location dest_depth.bitmap[i] = f; - } else { + } + else { dest_depth.bitmap[i] = 0; } } @@ -119,7 +120,7 @@ void vp_createDepthHistogram(const vpImage &src_depth, vpImage= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_OPENMP) int nThreads = omp_get_max_threads(); std::vector > histograms(nThreads); for (int i = 0; i < nThreads; i++) { @@ -155,9 +156,10 @@ void vp_createDepthHistogram(const vpImage &src_depth, vpImage(histogram[d] * 255 / histogram[0xFFFF]); // 0-255 based on histogram location + static_cast(histogram[d] * 255 / histogram[0xFFFF]); // 0-255 based on histogram location dest_depth.bitmap[i] = f; - } else { + } + else { dest_depth.bitmap[i] = 0; } } @@ -198,7 +200,8 @@ void vp_createDepthHistogram(const vpImage &src_depth, vpImage &d dest_depth.bitmap[i].G = 0; dest_depth.bitmap[i].B = f; dest_depth.bitmap[i].A = vpRGBa::alpha_default; - } else { + } + else { dest_depth.bitmap[i].R = 20; dest_depth.bitmap[i].G = 5; dest_depth.bitmap[i].B = 0; @@ -240,7 +243,8 @@ void vp_createDepthHistogram(const vpImage &src_depth, vpImage dest_depth.bitmap[i].G = 0; dest_depth.bitmap[i].B = f; dest_depth.bitmap[i].A = vpRGBa::alpha_default; - } else { + } + else { dest_depth.bitmap[i].R = 20; dest_depth.bitmap[i].G = 5; dest_depth.bitmap[i].B = 0; diff --git a/modules/core/src/image/vpColormap.cpp b/modules/core/src/image/vpColormap.cpp index 982ac9eb6d..2c1ff37eba 100644 --- a/modules/core/src/image/vpColormap.cpp +++ b/modules/core/src/image/vpColormap.cpp @@ -34,8 +34,6 @@ #include -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - namespace { unsigned char m_autumnSrgbBytes[256][3] = { @@ -1219,5 +1217,3 @@ void vpColormap::convert(const vpImage &I, vpImage &Icolor) } convert(I_float, Icolor); } - -#endif diff --git a/modules/core/src/image/vpFont.cpp b/modules/core/src/image/vpFont.cpp index 24d71eda9f..ff72d5b283 100644 --- a/modules/core/src/image/vpFont.cpp +++ b/modules/core/src/image/vpFont.cpp @@ -61,11 +61,7 @@ #include #include #include -#if (VISP_CXX_STANDARD > VISP_CXX_STANDARD_98) #include -#else -#include -#endif #include "private/Font.hpp" #define STB_TRUETYPE_IMPLEMENTATION @@ -203,7 +199,8 @@ class vpFont::Impl curr.x += _currentSize.x; size.x = std::max(size.x, curr.x); size.y = std::max(size.y, curr.y + _currentSize.y); - } else if (text[i] == '\n') { + } + else if (text[i] == '\n') { curr.x = 0; curr.y += _currentSize.y; } @@ -345,7 +342,7 @@ class vpFont::Impl int coeff = 255 - _fontBuffer[y][x]; unsigned char gray = - static_cast((color * _fontBuffer[y][x] + coeff * canvas[dstY][dstX]) / 255); + static_cast((color * _fontBuffer[y][x] + coeff * canvas[dstY][dstX]) / 255); canvas[dstY][dstX] = gray; } } @@ -420,7 +417,7 @@ class vpFont::Impl int G = (color.G * _alpha[i][j] + coeff * canvas[dstY][dstX].G) / 255; int B = (color.B * _alpha[i][j] + coeff * canvas[dstY][dstX].B) / 255; canvas[dstY][dstX] = - vpRGBa(static_cast(R), static_cast(G), static_cast(B)); + vpRGBa(static_cast(R), static_cast(G), static_cast(B)); } } } @@ -459,7 +456,7 @@ class vpFont::Impl int G = (color.G * _fontBuffer[y][x] + coeff * canvas[dstY][dstX].G) / 255; int B = (color.B * _fontBuffer[y][x] + coeff * canvas[dstY][dstX].B) / 255; canvas[dstY][dstX] = - vpRGBa(static_cast(R), static_cast(G), static_cast(B)); + vpRGBa(static_cast(R), static_cast(G), static_cast(B)); } } @@ -504,7 +501,8 @@ class vpFont::Impl } private: - struct Symbol { + struct Symbol + { char value; vpImage image; }; @@ -551,7 +549,8 @@ class vpFont::Impl symbols.push_back(value); } curr.x += _currentSize.x; - } else if (value == '\n') { + } + else if (value == '\n') { curr.x = 0; curr.y += _currentSize.y; } @@ -611,7 +610,8 @@ class vpFont::Impl } } } - } catch (...) { + } + catch (...) { _originalSize = Point(); _originalSymbols.clear(); return false; @@ -2527,7 +2527,6 @@ class vpFont::Impl void LoadTTF(const std::string &filename) { -#if (VISP_CXX_STANDARD > VISP_CXX_STANDARD_98) std::ifstream fontFile(filename.c_str(), std::ios::binary | std::ios::ate); fontFile >> std::noskipws; @@ -2539,25 +2538,6 @@ class vpFont::Impl std::copy(std::istream_iterator(fontFile), std::istream_iterator(), std::back_inserter(fontBuffer)); -#else - // for compatibility with old platform (e.g. Ubuntu 12.04) - FILE *fontFile = fopen(filename.c_str(), "rb"); - if (!fontFile) { - fclose(fontFile); - throw vpIoException(vpIoException::ioError, "Cannot open TTF file: %s", filename.c_str()); - } - - fseek(fontFile, 0, SEEK_END); - size_t fileSize = ftell(fontFile); /* how long is the file? */ - fseek(fontFile, 0, SEEK_SET); /* reset */ - - fontBuffer.resize(fileSize); - if (fread(&fontBuffer[0], sizeof(unsigned char), fontBuffer.size(), fontFile) != fontBuffer.size()) { - fclose(fontFile); - throw vpIoException(vpIoException::ioError, "Error when reading the font file."); - } - fclose(fontFile); -#endif /* prepare font */ if (!stbtt_InitFont(&_info, fontBuffer.data(), 0)) { @@ -2574,21 +2554,25 @@ class vpFont::Impl unsigned int charcode; if (ch <= 127) { charcode = ch; - } else if (ch <= 223 && i + 1 < str.size() && (str[i + 1] & 0xc0) == 0x80) { + } + else if (ch <= 223 && i + 1 < str.size() && (str[i + 1] & 0xc0) == 0x80) { charcode = ((ch & 31) << 6) | (str[i + 1] & 63); i++; - } else if (ch <= 239 && i + 2 < str.size() && (str[i + 1] & 0xc0) == 0x80 && (str[i + 2] & 0xc0) == 0x80) { + } + else if (ch <= 239 && i + 2 < str.size() && (str[i + 1] & 0xc0) == 0x80 && (str[i + 2] & 0xc0) == 0x80) { charcode = ((ch & 15) << 12) | ((str[i + 1] & 63) << 6) | (str[i + 2] & 63); i += 2; - } else if (ch <= 247 && i + 3 < str.size() && (str[i + 1] & 0xc0) == 0x80 && (str[i + 2] & 0xc0) == 0x80 && - (str[i + 3] & 0xc0) == 0x80) { + } + else if (ch <= 247 && i + 3 < str.size() && (str[i + 1] & 0xc0) == 0x80 && (str[i + 2] & 0xc0) == 0x80 && + (str[i + 3] & 0xc0) == 0x80) { int val = (int)(((ch & 15) << 18) | ((str[i + 1] & 63) << 12) | ((str[i + 2] & 63) << 6) | (str[i + 3] & 63)); if (val > 1114111) { val = 65533; } charcode = val; i += 3; - } else { + } + else { charcode = 65533; while (i + 1 < str.size() && (str[i + 1] & 0xc0) == 0x80) { i++; @@ -2613,8 +2597,7 @@ class vpFont::Impl */ vpFont::vpFont(unsigned int height, const vpFontFamily &fontFamily, const std::string &ttfFilename) : m_impl(new Impl(height, fontFamily, ttfFilename)) -{ -} +{ } /*! Destructor. diff --git a/modules/core/src/image/vpImageFilter.cpp b/modules/core/src/image/vpImageFilter.cpp index 94d1111019..20f55874a9 100644 --- a/modules/core/src/image/vpImageFilter.cpp +++ b/modules/core/src/image/vpImageFilter.cpp @@ -643,7 +643,7 @@ std::vector vpImageFilter::median(const vpImage &Isrc) * \param[in] p_cv_dIx : If different from nullptr, the gradient of cv_I with regard to the horizontal axis. * \param[in] p_cv_dIy : If different from nullptr, the gradient of cv_I with regard to the vertical axis. * \param[out] lowerThresh : The lower threshold for the Canny edge filter. - * \param[in] gaussianFilterSize : The size of the mask of the Gaussian filter to apply (an odd number). + * \param[in] gaussianKernelSize : The size of the mask of the Gaussian filter to apply (an odd number). * \param[in] gaussianStdev : The standard deviation of the Gaussian filter to apply. * \param[in] apertureGradient : Size of the mask for the Sobel operator (odd number). * \param[in] lowerThresholdRatio : The ratio of the upper threshold the lower threshold must be equal to. @@ -654,10 +654,10 @@ std::vector vpImageFilter::median(const vpImage &Isrc) * \return The upper Canny edge filter threshold. */ float vpImageFilter::computeCannyThreshold(const cv::Mat &cv_I, const cv::Mat *p_cv_dIx, const cv::Mat *p_cv_dIy, - float &lowerThresh, const unsigned int &gaussianKernelSize, - const float &gaussianStdev, const unsigned int &apertureGradient, - const float &lowerThresholdRatio, const float &upperThresholdRatio, - const vpImageFilter::vpCannyFilteringAndGradientType &filteringType) + float &lowerThresh, const unsigned int &gaussianKernelSize, + const float &gaussianStdev, const unsigned int &apertureGradient, + const float &lowerThresholdRatio, const float &upperThresholdRatio, + const vpImageFilter::vpCannyFilteringAndGradientType &filteringType) { double w = cv_I.cols; double h = cv_I.rows; @@ -711,21 +711,22 @@ float vpImageFilter::computeCannyThreshold(const cv::Mat &cv_I, const cv::Mat *p * \param[in] cv_I The input image we want the partial derivatives. * \param[out] cv_dIx The horizontal partial derivative, i.e. horizontal gradient. * \param[out] cv_dIy The vertical partial derivative, i.e. vertical gradient. - * \param[in] computeDx Idicate if we must compute the horizontal gradient. - * \param[in] computeDy Idicate if we must compute the vertical gradient. - * \param[in] normalize Idicate if we must normalize the gradient filters. + * \param[in] computeDx Indicate if we must compute the horizontal gradient. + * \param[in] computeDy Indicate if we must compute the vertical gradient. + * \param[in] normalize Indicate if we must normalize the gradient filters. * \param[in] gaussianKernelSize The size of the kernel of the Gaussian filter used to blur the image. * \param[in] gaussianStdev The standard deviation of the Gaussian filter used to blur the image. + * If it is non-positive, it is computed from kernel size (`gaussianKernelSize` parameter) as + * \f$\sigma = 0.3*((gaussianKernelSize-1)*0.5 - 1) + 0.8\f$. * \param[in] apertureGradient The size of the kernel of the gradient filter. * \param[in] filteringType The type of filters to apply to compute the gradients. - * \param[in] backend The type of backend to use to compute the gradients. */ void vpImageFilter::computePartialDerivatives(const cv::Mat &cv_I, - cv::Mat &cv_dIx, cv::Mat &cv_dIy, - const bool &computeDx, const bool &computeDy, const bool &normalize, - const unsigned int &gaussianKernelSize, const float &gaussianStdev, - const unsigned int &apertureGradient, - const vpImageFilter::vpCannyFilteringAndGradientType &filteringType) + cv::Mat &cv_dIx, cv::Mat &cv_dIy, + const bool &computeDx, const bool &computeDy, const bool &normalize, + const unsigned int &gaussianKernelSize, const float &gaussianStdev, + const unsigned int &apertureGradient, + const vpImageFilter::vpCannyFilteringAndGradientType &filteringType) { if (filteringType == vpImageFilter::CANNY_GBLUR_SCHARR_FILTERING || filteringType == vpImageFilter::CANNY_GBLUR_SOBEL_FILTERING) { @@ -950,80 +951,81 @@ void vpImageFilter::canny(const vpImage &Isrc, vpImage -#include - -int main() -{ - // Constants for the Canny operator. - const unsigned int gaussianFilterSize = 5; - const float gaussianStdev = 2.0f; - const float upperThresholdCanny = 15.f; - const float lowerThresholdCanny = 5.f; - const float upperThresholdRatio = 0.8f; - const float lowerThresholdRatio = 0.6f; - const unsigned int apertureSobel = 3; - const bool normalizeGradient = true; - const vpCannyBackendType cannyBackend = CANNY_OPENCV_BACKEND; // or CANNY_VISP_BACKEND; - const vpCannyFilteringAndGradientType filteringType = CANNY_GBLUR_SOBEL_FILTERING; // or CANNY_GBLUR_SCHARR_FILTERING - - // Image for the Canny edge operator - vpImage Isrc; - vpImage Icanny; - - // First grab the source image Isrc. - - // Apply the Canny edge operator and set the Icanny image. - vpImageFilter::canny(Isrc, Icanny, gaussianFilterSize, lowerThresholdCanny, upperThresholdCanny, apertureSobel, - gaussianStdev, lowerThresholdRatio, upperThresholdRatio, normalizeGradient, - cannyBackend, filteringType); - return (0); -} - \endcode - - \param[in] Isrc : Image to apply the Canny edge detector to. - \param[out] Ires : Filtered image (255 means an edge, 0 otherwise). - \param[in] gaussianFilterSize : The size of the mask of the Gaussian filter to - apply (an odd number). - \param[in] lowerThreshold : The lower threshold for the Canny operator. Values lower - than this value are rejected. If negative, it will be set to one third - of the thresholdCanny . - \param[in] upperThreshold : The upper threshold for the Canny operator. Only value - greater than this value are marked as an edge. If negative, it will be automatically - computed, along with the lower threshold. Otherwise, the lower threshold will be set to one third - of the upper threshold . - \param[in] apertureGradient : Size of the mask for the gardient (Sobel or Scharr) operator (odd number). - \param[in] gaussianStdev : The standard deviation of the Gaussian filter to apply. - \param[in] lowerThresholdRatio : The ratio of the upper threshold the lower threshold must be equal to. - It is used only if the user asks to compute the Canny thresholds. - \param[in] upperThresholdRatio : The ratio of pixels whose absolute gradient Gabs is lower or equal to define - the upper threshold. It is used only if the user asks to compute the Canny thresholds. - \param[in] normalizeGradients : Needs to be true if asking to compute the \b upperThreshold , otherwise it depends on - the user application and user-defined thresholds. - \param[in] cannyBackend : The backend to use to perform the Canny edge filtering. - \param[in] cannyFilteringSteps : The filtering + gradient operators to apply to compute the gradient in the early - stage of the Canny algoritgm. -*/ + * Apply the Canny edge operator on the image \e Isrc and return the resulting + * image \e Ires. + * + * The following example shows how to use the method: + * + * \code + * #include + * #include + * + * int main() + * { + * // Constants for the Canny operator. + * const unsigned int gaussianFilterSize = 5; + * const float gaussianStdev = 2.0f; + * const float upperThresholdCanny = 15.f; + * const float lowerThresholdCanny = 5.f; + * const float upperThresholdRatio = 0.8f; + * const float lowerThresholdRatio = 0.6f; + * const unsigned int apertureSobel = 3; + * const bool normalizeGradient = true; + * const vpCannyBackendType cannyBackend = CANNY_OPENCV_BACKEND; // or CANNY_VISP_BACKEND; + * const vpCannyFilteringAndGradientType filteringType = CANNY_GBLUR_SOBEL_FILTERING; // or CANNY_GBLUR_SCHARR_FILTERING + * + * // Image for the Canny edge operator + * vpImage Isrc; + * vpImage Icanny; + * + * // First grab the source image Isrc. + * + * // Apply the Canny edge operator and set the Icanny image. + * vpImageFilter::canny(Isrc, Icanny, gaussianFilterSize, lowerThresholdCanny, upperThresholdCanny, apertureSobel, + * gaussianStdev, lowerThresholdRatio, upperThresholdRatio, normalizeGradient, + * cannyBackend, filteringType); + * return (0); + * } + * \endcode + * + * \param[in] Isrc : Image to apply the Canny edge detector to. + * \param[out] Ires : Filtered image (255 means an edge, 0 otherwise). + * \param[in] gaussianFilterSize : The size of the mask of the Gaussian filter to + * apply (an odd number). + * \param[in] lowerThreshold : The lower threshold for the Canny operator. Values lower + * than this value are rejected. If negative, it will be set to one third + * of the thresholdCanny. + * \param[in] upperThreshold : The upper threshold for the Canny operator. Only value + * greater than this value are marked as an edge. If negative, it will be automatically + * computed, along with the lower threshold. Otherwise, the lower threshold will be set to one third + * of the upper threshold. + * \param[in] apertureGradient : Size of the mask for the gradient (Sobel or Scharr) operator (odd number). + * \param[in] gaussianStdev : The standard deviation of the Gaussian filter to apply. + * If it is non-positive, it is computed from kernel size (`gaussianKernelSize` parameter) as + * \f$\sigma = 0.3*((gaussianKernelSize-1)*0.5 - 1) + 0.8\f$. + * \param[in] lowerThresholdRatio : The ratio of the upper threshold the lower threshold must be equal to. + * It is used only if the user asks to compute the Canny thresholds. + * \param[in] upperThresholdRatio : The ratio of pixels whose absolute gradient is lower or equal to define + * the upper threshold. It is used only if the user asks to compute the Canny thresholds. + * \param[in] normalizeGradients : Needs to be true if asking to compute the \b upperThreshold, otherwise it depends on + * the user application and user-defined thresholds. + * \param[in] cannyBackend : The backend to use to perform the Canny edge filtering. + * \param[in] cannyFilteringSteps : The filtering + gradient operators to apply to compute the gradient in the early + * stage of the Canny algorithm. + */ void vpImageFilter::canny(const vpImage &Isrc, vpImage &Ires, - const unsigned int &gaussianFilterSize, - const float &lowerThreshold, const float &upperThreshold, const unsigned int &apertureGradient, - const float &gaussianStdev, const float &lowerThresholdRatio, const float &upperThresholdRatio, - const bool &normalizeGradients, - const vpCannyBackendType &cannyBackend, const vpCannyFilteringAndGradientType &cannyFilteringSteps -) + const unsigned int &gaussianFilterSize, + const float &lowerThreshold, const float &upperThreshold, const unsigned int &apertureGradient, + const float &gaussianStdev, const float &lowerThresholdRatio, const float &upperThresholdRatio, + const bool &normalizeGradients, + const vpCannyBackendType &cannyBackend, const vpCannyFilteringAndGradientType &cannyFilteringSteps) { if (cannyBackend == CANNY_OPENCV_BACKEND) { #if defined(HAVE_OPENCV_IMGPROC) cv::Mat img_cvmat, cv_dx, cv_dy, edges_cvmat; vpImageConvert::convert(Isrc, img_cvmat); computePartialDerivatives(img_cvmat, cv_dx, cv_dy, true, true, normalizeGradients, gaussianFilterSize, - gaussianStdev, apertureGradient, cannyFilteringSteps); + gaussianStdev, apertureGradient, cannyFilteringSteps); float upperCannyThresh = upperThreshold; float lowerCannyThresh = lowerThreshold; if (upperCannyThresh < 0) { @@ -1034,7 +1036,13 @@ void vpImageFilter::canny(const vpImage &Isrc, vpImage= 0x030200) cv::Canny(cv_dx, cv_dy, edges_cvmat, lowerCannyThresh, upperCannyThresh, false); +#else + cv::GaussianBlur(img_cvmat, img_cvmat, cv::Size((int)gaussianFilterSize, (int)gaussianFilterSize), + gaussianStdev, gaussianStdev); + cv::Canny(img_cvmat, edges_cvmat, lowerCannyThresh, upperCannyThresh); +#endif vpImageConvert::convert(edges_cvmat, Ires); #else std::string errMsg("[vpImageFilter::canny]You asked for CANNY_OPENCV_BACKEND but ViSP has not been compiled with OpenCV"); diff --git a/modules/core/src/image/vpRGBa.cpp b/modules/core/src/image/vpRGBa.cpp index 69147272c4..61660abc70 100644 --- a/modules/core/src/image/vpRGBa.cpp +++ b/modules/core/src/image/vpRGBa.cpp @@ -70,7 +70,6 @@ vpRGBa &vpRGBa::operator=(const vpRGBa &v) return *this; } -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) /*! Move operator. */ @@ -82,7 +81,6 @@ vpRGBa &vpRGBa::operator=(const vpRGBa &&v) this->A = std::move(v.A); return *this; } -#endif /*! Cast a vpColVector in a vpRGBa diff --git a/modules/core/src/image/vpRGBf.cpp b/modules/core/src/image/vpRGBf.cpp index 7822445047..5510bece32 100644 --- a/modules/core/src/image/vpRGBf.cpp +++ b/modules/core/src/image/vpRGBf.cpp @@ -69,7 +69,6 @@ vpRGBf &vpRGBf::operator=(const vpRGBf &v) return *this; } -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) /*! Move operator. */ @@ -80,7 +79,6 @@ vpRGBf &vpRGBf::operator=(const vpRGBf &&v) this->B = std::move(v.B); return *this; } -#endif /*! Cast a vpColVector in a vpRGBf @@ -96,9 +94,9 @@ vpRGBf &vpRGBf::operator=(const vpColVector &v) vpERROR_TRACE("Bad vector dimension"); throw(vpException(vpException::dimensionError, "Bad vector dimension")); } - R = (float) v[0]; - G = (float) v[1]; - B = (float) v[2]; + R = (float)v[0]; + G = (float)v[1]; + B = (float)v[2]; return *this; } diff --git a/modules/core/src/math/matrix/vpColVector.cpp b/modules/core/src/math/matrix/vpColVector.cpp index 01fac52a25..7a06e90e30 100644 --- a/modules/core/src/math/matrix/vpColVector.cpp +++ b/modules/core/src/math/matrix/vpColVector.cpp @@ -214,7 +214,6 @@ vpColVector::vpColVector(const std::vector &v) : vpArray2D((unsig (*this)[i] = (double)(v[i]); } -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpColVector::vpColVector(vpColVector &&v) : vpArray2D() { rowNum = v.rowNum; @@ -229,7 +228,6 @@ vpColVector::vpColVector(vpColVector &&v) : vpArray2D() v.dsize = 0; v.data = NULL; } -#endif vpColVector vpColVector::operator-() const { @@ -404,8 +402,6 @@ std::vector vpColVector::toStdVector() const return v; } -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - vpColVector &vpColVector::operator=(vpColVector &&other) { if (this != &other) { @@ -434,7 +430,6 @@ vpColVector &vpColVector::operator=(const std::initializer_list &list) std::copy(list.begin(), list.end(), data); return *this; } -#endif bool vpColVector::operator==(const vpColVector &v) const { diff --git a/modules/core/src/math/matrix/vpMatrix.cpp b/modules/core/src/math/matrix/vpMatrix.cpp index cb7885d26f..d238d8e998 100644 --- a/modules/core/src/math/matrix/vpMatrix.cpp +++ b/modules/core/src/math/matrix/vpMatrix.cpp @@ -197,7 +197,6 @@ vpMatrix::vpMatrix(const vpMatrix &M, unsigned int r, unsigned int c, unsigned i init(M, r, c, nrows, ncols); } -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpMatrix::vpMatrix(vpMatrix &&A) : vpArray2D() { rowNum = A.rowNum; @@ -224,11 +223,9 @@ vpMatrix::vpMatrix(vpMatrix &&A) : vpArray2D() int main() { -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpMatrix M( {-1, -2, -3, 4, 5.5, 6.0f} ); M.reshape(2, 3); std::cout << "M:\n" << M << std::endl; -#endif } \endcode It produces the following output: @@ -251,10 +248,8 @@ vpMatrix::vpMatrix(const std::initializer_list &list) : vpArray2D= VISP_CXX_STANDARD_11) vpMatrix M(2, 3, {-1, -2, -3, 4, 5.5, 6}); std::cout << "M:\n" << M << std::endl; -#endif } \endcode It produces the following output: @@ -277,10 +272,8 @@ vpMatrix::vpMatrix(unsigned int nrows, unsigned int ncols, const std::initialize int main() { -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpMatrix M( { {-1, -2, -3}, {4, 5.5, 6} } ); std::cout << "M:\n" << M << std::endl; -#endif } \endcode It produces the following output: @@ -292,8 +285,6 @@ std::cout << "M:\n" << M << std::endl; */ vpMatrix::vpMatrix(const std::initializer_list > &lists) : vpArray2D(lists) { } -#endif - /*! Initialize the matrix from a part of an input matrix \e M. @@ -658,7 +649,6 @@ vpMatrix &vpMatrix::operator=(const vpArray2D &A) return *this; } -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpMatrix &vpMatrix::operator=(const vpMatrix &A) { resize(A.getRows(), A.getCols(), false, false); @@ -765,7 +755,6 @@ vpMatrix &vpMatrix::operator=(const std::initializer_list int main() @@ -5029,7 +5018,7 @@ int main() int rank_out = A.pseudoInverse(A_p, sv, rank_in, imA, imAt, kerAt); if (rank_out != rank_in) { std::cout << "There is a possibility that the pseudo-inverse in wrong." << std::endl; -std::cout << "Are you sure that the matrix rank is " << rank_in << std::endl; + std::cout << "Are you sure that the matrix rank is " << rank_in << std::endl; } A_p.print(std::cout, 10, "A^+ (pseudo-inverse): "); diff --git a/modules/core/src/math/matrix/vpRowVector.cpp b/modules/core/src/math/matrix/vpRowVector.cpp index 30561703c7..2fda61340a 100644 --- a/modules/core/src/math/matrix/vpRowVector.cpp +++ b/modules/core/src/math/matrix/vpRowVector.cpp @@ -122,7 +122,6 @@ vpRowVector &vpRowVector::operator=(double x) return *this; } -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpRowVector &vpRowVector::operator=(vpRowVector &&other) { if (this != &other) { @@ -169,7 +168,6 @@ vpRowVector &vpRowVector::operator=(const std::initializer_list &list) std::copy(list.begin(), list.end(), data); return *this; } -#endif bool vpRowVector::operator==(const vpRowVector &v) const { @@ -578,7 +576,6 @@ vpRowVector::vpRowVector(const vpRowVector &v, unsigned int c, unsigned int ncol init(v, c, ncols); } -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpRowVector::vpRowVector(vpRowVector &&v) : vpArray2D() { rowNum = v.rowNum; @@ -593,10 +590,9 @@ vpRowVector::vpRowVector(vpRowVector &&v) : vpArray2D() v.dsize = 0; v.data = NULL; } -#endif /*! - Normalise the vector given as input parameter and return the normalized + Normalize the vector given as input parameter and return the normalized vector: \f[ diff --git a/modules/core/src/math/misc/vpMath.cpp b/modules/core/src/math/misc/vpMath.cpp index ba6b5a12a0..7c3855cc15 100644 --- a/modules/core/src/math/misc/vpMath.cpp +++ b/modules/core/src/math/misc/vpMath.cpp @@ -351,11 +351,7 @@ double vpMath::getStdev(const std::vector &v, bool useBesselCorrection) double mean = getMean(v); std::vector diff(v.size()); -#if VISP_CXX_STANDARD > VISP_CXX_STANDARD_98 std::transform(v.begin(), v.end(), diff.begin(), std::bind(std::minus(), std::placeholders::_1, mean)); -#else - std::transform(v.begin(), v.end(), diff.begin(), std::bind2nd(std::minus(), mean)); -#endif double sq_sum = std::inner_product(diff.begin(), diff.end(), diff.begin(), 0.0); double divisor = (double)v.size(); @@ -580,11 +576,8 @@ std::vector > vpMath::computeRegularPointsOnSphere(uns double d_phi = a / d_theta; std::vector > lonlat_vec; -#if (VISP_CXX_STANDARD > VISP_CXX_STANDARD_98) lonlat_vec.reserve(static_cast(std::sqrt(maxPoints))); -#else - lonlat_vec.reserve(static_cast(std::sqrt(static_cast(maxPoints)))); -#endif + for (int m = 0; m < m_theta; m++) { double theta = M_PI * (m + 0.5) / m_theta; int m_phi = static_cast(round(2.0 * M_PI * sin(theta) / d_phi)); diff --git a/modules/core/src/math/robust/vpRobust.cpp b/modules/core/src/math/robust/vpRobust.cpp index 652840ca5d..14fcf12cf6 100644 --- a/modules/core/src/math/robust/vpRobust.cpp +++ b/modules/core/src/math/robust/vpRobust.cpp @@ -59,11 +59,10 @@ vpRobust::vpRobust() : m_normres(), m_sorted_normres(), m_sorted_residues(), m_mad_min(0.0017), m_mad_prev(0), #if defined(VISP_BUILD_DEPRECATED_FUNCTIONS) - m_iter(0), + m_iter(0), #endif - m_size(0), m_mad(0) -{ -} + m_size(0), m_mad(0) +{ } /*! Copy constructor. @@ -88,7 +87,6 @@ vpRobust &vpRobust::operator=(const vpRobust &other) return *this; } -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) /*! Move operator. */ @@ -105,7 +103,6 @@ vpRobust &vpRobust::operator=(const vpRobust &&other) m_size = std::move(other.m_size); return *this; } -#endif /*! Resize containers. @@ -205,7 +202,8 @@ void vpRobust::psiTukey(double sig, const vpColVector &x, vpColVector &weights) if (xi > 1.) { weights[i] = 0; - } else { + } + else { xi = 1 - xi; xi *= xi; weights[i] = xi; @@ -312,9 +310,9 @@ double vpRobust::select(vpColVector &a, int l, int r, int k) vpRobust::vpRobust(unsigned int n_data) : m_normres(), m_sorted_normres(), m_sorted_residues(), m_mad_min(0.0017), m_mad_prev(0), #if defined(VISP_BUILD_DEPRECATED_FUNCTIONS) - m_iter(0), + m_iter(0), #endif - m_size(n_data), m_mad(0) + m_size(n_data), m_mad(0) { vpCDEBUG(2) << "vpRobust constructor reached" << std::endl; @@ -441,8 +439,9 @@ vpColVector vpRobust::simultMEstimator(vpColVector &residues) double normmedian = select(norm_res, 0, n_data - 1, ind_med); // Normalized Median // 1.48 keeps scale estimate consistent for a normal probability dist. m_mad = 1.4826 * normmedian; // Median Absolute Deviation - } else { - // compute simultaneous scale estimate + } + else { + // compute simultaneous scale estimate m_mad = simultscale(residues); } @@ -546,9 +545,10 @@ double vpRobust::constrainedChiTukey(double x) // sct = // (vpMath::sqr(s*a-x)*vpMath::sqr(s*a+x)*vpMath::sqr(x))/(s*vpMath::sqr(vpMath::sqr(a*vpMath::sqr(s)))); sct = (vpMath::sqr(s * a) * x - s * vpMath::sqr(s * a) - x * vpMath::sqr(x)) * - (vpMath::sqr(s * a) * x + s * vpMath::sqr(s * a) - x * vpMath::sqr(x)) / s * - vpMath::sqr(vpMath::sqr(vpMath::sqr(s))) / vpMath::sqr(vpMath::sqr(a)); - } else + (vpMath::sqr(s * a) * x + s * vpMath::sqr(s * a) - x * vpMath::sqr(x)) / s * + vpMath::sqr(vpMath::sqr(vpMath::sqr(s))) / vpMath::sqr(vpMath::sqr(a)); + } + else sct = -1 / s; return sct; @@ -589,8 +589,9 @@ double vpRobust::simult_chi_huber(double x) if (fabs(u) <= c) { // sct = 0.5*vpMath::sqr(u); sct = vpMath::sqr(u); - } else { - // sct = 0.5*vpMath::sqr(c); + } + else { + // sct = 0.5*vpMath::sqr(c); sct = vpMath::sqr(c); } @@ -609,7 +610,8 @@ double vpRobust::gammp(double a, double x) if (x < (a + 1.0)) { gser(&gamser, a, x, &gln); return gamser; - } else { + } + else { gcf(&gammcf, a, x, &gln); return 1.0 - gammcf; } @@ -623,7 +625,8 @@ void vpRobust::gser(double *gamser, double a, double x, double *gln) std::cout << "x less than 0 in routine GSER"; *gamser = 0.0; return; - } else { + } + else { double ap = a; double sum = 1.0 / a; double del = sum; @@ -673,7 +676,7 @@ void vpRobust::gcf(double *gammcf, double a, double x, double *gln) double vpRobust::gammln(double xx) { double x, tmp, ser; - static double cof[6] = {76.18009173, -86.50532033, 24.01409822, -1.231739516, 0.120858003e-2, -0.536382e-5}; + static double cof[6] = { 76.18009173, -86.50532033, 24.01409822, -1.231739516, 0.120858003e-2, -0.536382e-5 }; x = xx - 1.0; tmp = x + 5.5; diff --git a/modules/core/src/math/transformation/vpHomogeneousMatrix.cpp b/modules/core/src/math/transformation/vpHomogeneousMatrix.cpp index 061a36c035..ddc87caa35 100644 --- a/modules/core/src/math/transformation/vpHomogeneousMatrix.cpp +++ b/modules/core/src/math/transformation/vpHomogeneousMatrix.cpp @@ -148,7 +148,6 @@ vpHomogeneousMatrix::vpHomogeneousMatrix(const std::vector &v) : vpArray2 (*this)[3][3] = 1.; } -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) /*! Construct an homogeneous matrix from a list of 12 or 16 double values. \param list : List of double. @@ -158,7 +157,6 @@ vpHomogeneousMatrix::vpHomogeneousMatrix(const std::vector &v) : vpArray2 int main() { -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpHomogeneousMatrix M { 0, 0, 1, 0.1, 0, 1, 0, 0.2, @@ -170,7 +168,6 @@ int main() 1, 0, 0, 0.3, 0, 0, 0, 1 }; std::cout << "N:\n" << N << std::endl; -#endif } \endcode It produces the following output: @@ -243,7 +240,6 @@ vpHomogeneousMatrix::vpHomogeneousMatrix(const std::initializer_list &li } } } -#endif /*! Construct an homogeneous matrix from a vector of double. diff --git a/modules/core/src/math/transformation/vpQuaternionVector.cpp b/modules/core/src/math/transformation/vpQuaternionVector.cpp index 7bf8bd44af..64268a6617 100644 --- a/modules/core/src/math/transformation/vpQuaternionVector.cpp +++ b/modules/core/src/math/transformation/vpQuaternionVector.cpp @@ -52,10 +52,10 @@ const double vpQuaternionVector::minimum = 0.0001; */ /*! Default constructor that initialize all the 4 angles to zero. */ -vpQuaternionVector::vpQuaternionVector() : vpRotationVector(4) {} +vpQuaternionVector::vpQuaternionVector() : vpRotationVector(4) { } /*! Copy constructor. */ -vpQuaternionVector::vpQuaternionVector(const vpQuaternionVector &q) : vpRotationVector(q) {} +vpQuaternionVector::vpQuaternionVector(const vpQuaternionVector &q) : vpRotationVector(q) { } //! Constructor from doubles. vpQuaternionVector::vpQuaternionVector(double x_, double y_, double z_, double w_) : vpRotationVector(4) @@ -281,7 +281,8 @@ vpQuaternionVector vpQuaternionVector::inverse() const double mag_square = w() * w() + x() * x() + y() * y() + z() * z(); if (!vpMath::nul(mag_square, std::numeric_limits::epsilon())) { q_inv = this->conjugate() / mag_square; - } else { + } + else { std::cerr << "The current quaternion is null ! The inverse cannot be computed !" << std::endl; } @@ -314,30 +315,29 @@ void vpQuaternionVector::normalize() \return The dot product between q0 and q1. */ -double vpQuaternionVector::dot(const vpQuaternionVector& q0, const vpQuaternionVector& q1) +double vpQuaternionVector::dot(const vpQuaternionVector &q0, const vpQuaternionVector &q1) { return q0.x() * q1.x() + q0.y() * q1.y() + q0.z() * q1.z() + q0.w() * q1.w(); } //! Returns the x-component of the quaternion. -const double& vpQuaternionVector::x() const { return data[0]; } +const double &vpQuaternionVector::x() const { return data[0]; } //! Returns the y-component of the quaternion. -const double& vpQuaternionVector::y() const { return data[1]; } +const double &vpQuaternionVector::y() const { return data[1]; } //! Returns the z-component of the quaternion. -const double& vpQuaternionVector::z() const { return data[2]; } +const double &vpQuaternionVector::z() const { return data[2]; } //! Returns the w-component of the quaternion. -const double& vpQuaternionVector::w() const { return data[3]; } +const double &vpQuaternionVector::w() const { return data[3]; } //! Returns a reference to the x-component of the quaternion. -double& vpQuaternionVector::x() { return data[0]; } +double &vpQuaternionVector::x() { return data[0]; } //! Returns a reference to the y-component of the quaternion. -double& vpQuaternionVector::y() { return data[1]; } +double &vpQuaternionVector::y() { return data[1]; } //! Returns a reference to the z-component of the quaternion. -double& vpQuaternionVector::z() { return data[2]; } +double &vpQuaternionVector::z() { return data[2]; } //! Returns a reference to the w-component of the quaternion. -double& vpQuaternionVector::w() { return data[3]; } +double &vpQuaternionVector::w() { return data[3]; } -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) /*! Set vector from a list of 4 double angle values. \code @@ -359,14 +359,13 @@ vpQuaternionVector &vpQuaternionVector::operator=(const std::initializer_list size()) { throw(vpException( - vpException::dimensionError, - "Cannot set quaternion vector out of bounds. It has only %d values while you try to initialize with %d values", - size(), list.size())); + vpException::dimensionError, + "Cannot set quaternion vector out of bounds. It has only %d values while you try to initialize with %d values", + size(), list.size())); } std::copy(list.begin(), list.end(), data); return *this; } -#endif /*! Compute Quaternion Linear intERPolation (LERP). @@ -440,7 +439,7 @@ vpQuaternionVector vpQuaternionVector::nlerp(const vpQuaternionVector &q0, const \return The interpolated quaternion using the SLERP method. */ -vpQuaternionVector vpQuaternionVector::slerp(const vpQuaternionVector& q0, const vpQuaternionVector& q1, double t) +vpQuaternionVector vpQuaternionVector::slerp(const vpQuaternionVector &q0, const vpQuaternionVector &q1, double t) { assert(t >= 0 && t <= 1); // Some additional references: diff --git a/modules/core/src/math/transformation/vpRotationMatrix.cpp b/modules/core/src/math/transformation/vpRotationMatrix.cpp index 074dd1bb8f..d2e996dba7 100644 --- a/modules/core/src/math/transformation/vpRotationMatrix.cpp +++ b/modules/core/src/math/transformation/vpRotationMatrix.cpp @@ -89,7 +89,6 @@ vpRotationMatrix &vpRotationMatrix::operator=(const vpRotationMatrix &R) return *this; } -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) /*! Set a rotation matrix from a list of 9 double values. \param list : List of double. @@ -99,11 +98,9 @@ vpRotationMatrix &vpRotationMatrix::operator=(const vpRotationMatrix &R) int main() { -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpRotationMatrix R R = { 0, 0, -1, 0, -1, 0, -1, 0, 0 }; std::cout << "R:\n" << R << std::endl; -#endif } \endcode It produces the following output: @@ -137,7 +134,6 @@ vpRotationMatrix &vpRotationMatrix::operator=(const std::initializer_list(3, 3), m_index(0) { buildFrom(q); } -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) /*! Construct a rotation matrix from a list of 9 double values. \param list : List of double. @@ -549,10 +544,8 @@ vpRotationMatrix::vpRotationMatrix(const vpQuaternionVector &q) : vpArray2D= VISP_CXX_STANDARD_11) vpRotationMatrix R{ 0, 0, -1, 0, -1, 0, -1, 0, 0 }; std::cout << "R:\n" << R << std::endl; -#endif } \endcode It produces the following output: @@ -577,7 +570,6 @@ vpRotationMatrix::vpRotationMatrix(const std::initializer_list &list) } } } -#endif /*! Return the rotation matrix transpose which is also the inverse of the diff --git a/modules/core/src/math/transformation/vpRxyzVector.cpp b/modules/core/src/math/transformation/vpRxyzVector.cpp index 4f026d9a80..d4c48da25d 100644 --- a/modules/core/src/math/transformation/vpRxyzVector.cpp +++ b/modules/core/src/math/transformation/vpRxyzVector.cpp @@ -45,10 +45,10 @@ */ /*! Default constructor that initialize all the 3 angles to zero. */ -vpRxyzVector::vpRxyzVector() : vpRotationVector(3) {} +vpRxyzVector::vpRxyzVector() : vpRotationVector(3) { } /*! Copy constructor. */ -vpRxyzVector::vpRxyzVector(const vpRxyzVector &rxyz) : vpRotationVector(rxyz) {} +vpRxyzVector::vpRxyzVector(const vpRxyzVector &rxyz) : vpRotationVector(rxyz) { } /*! Constructor from 3 angles (in radian). @@ -226,7 +226,6 @@ vpRxyzVector &vpRxyzVector::operator=(const vpColVector &rxyz) return *this; } -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) /*! Set vector from a list of 3 double angle values in radians. \code @@ -248,11 +247,10 @@ vpRxyzVector &vpRxyzVector::operator=(const std::initializer_list &list) { if (list.size() > size()) { throw(vpException( - vpException::dimensionError, - "Cannot set Euler x-y-z vector out of bounds. It has only %d values while you try to initialize with %d values", - size(), list.size())); + vpException::dimensionError, + "Cannot set Euler x-y-z vector out of bounds. It has only %d values while you try to initialize with %d values", + size(), list.size())); } std::copy(list.begin(), list.end(), data); return *this; } -#endif diff --git a/modules/core/src/math/transformation/vpRzyxVector.cpp b/modules/core/src/math/transformation/vpRzyxVector.cpp index ed6243ab34..69dbae4959 100644 --- a/modules/core/src/math/transformation/vpRzyxVector.cpp +++ b/modules/core/src/math/transformation/vpRzyxVector.cpp @@ -44,10 +44,10 @@ */ /*! Default constructor that initialize all the 3 angles to zero. */ -vpRzyxVector::vpRzyxVector() : vpRotationVector(3) {} +vpRzyxVector::vpRzyxVector() : vpRotationVector(3) { } /*! Copy constructor. */ -vpRzyxVector::vpRzyxVector(const vpRzyxVector &rzyx) : vpRotationVector(rzyx) {} +vpRzyxVector::vpRzyxVector(const vpRzyxVector &rzyx) : vpRotationVector(rzyx) { } /*! Constructor from 3 angles (in radian). @@ -232,7 +232,6 @@ vpRzyxVector &vpRzyxVector::operator=(const vpColVector &rzyx) return *this; } -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) /*! Set vector from a list of 3 double angle values in radians. \code @@ -254,11 +253,10 @@ vpRzyxVector &vpRzyxVector::operator=(const std::initializer_list &list) { if (list.size() > size()) { throw(vpException( - vpException::dimensionError, - "Cannot set Euler x-y-z vector out of bounds. It has only %d values while you try to initialize with %d values", - size(), list.size())); + vpException::dimensionError, + "Cannot set Euler x-y-z vector out of bounds. It has only %d values while you try to initialize with %d values", + size(), list.size())); } std::copy(list.begin(), list.end(), data); return *this; } -#endif diff --git a/modules/core/src/math/transformation/vpRzyzVector.cpp b/modules/core/src/math/transformation/vpRzyzVector.cpp index 0d68df40e2..50fd92dab4 100644 --- a/modules/core/src/math/transformation/vpRzyzVector.cpp +++ b/modules/core/src/math/transformation/vpRzyzVector.cpp @@ -44,9 +44,9 @@ */ /*! Default constructor that initialize all the 3 angles to zero. */ -vpRzyzVector::vpRzyzVector() : vpRotationVector(3) {} +vpRzyzVector::vpRzyzVector() : vpRotationVector(3) { } /*! Copy constructor. */ -vpRzyzVector::vpRzyzVector(const vpRzyzVector &rzyz) : vpRotationVector(rzyz) {} +vpRzyzVector::vpRzyzVector(const vpRzyzVector &rzyz) : vpRotationVector(rzyz) { } /*! Constructor from 3 angles (in radian). @@ -223,7 +223,6 @@ vpRzyzVector &vpRzyzVector::operator=(const vpColVector &rzyz) return *this; } -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) /*! Set vector from a list of 3 double angle values in radians. \code @@ -245,11 +244,10 @@ vpRzyzVector &vpRzyzVector::operator=(const std::initializer_list &list) { if (list.size() > size()) { throw(vpException( - vpException::dimensionError, - "Cannot set Euler x-y-z vector out of bounds. It has only %d values while you try to initialize with %d values", - size(), list.size())); + vpException::dimensionError, + "Cannot set Euler x-y-z vector out of bounds. It has only %d values while you try to initialize with %d values", + size(), list.size())); } std::copy(list.begin(), list.end(), data); return *this; } -#endif diff --git a/modules/core/src/math/transformation/vpThetaUVector.cpp b/modules/core/src/math/transformation/vpThetaUVector.cpp index 88e1a05cde..04731f48e7 100644 --- a/modules/core/src/math/transformation/vpThetaUVector.cpp +++ b/modules/core/src/math/transformation/vpThetaUVector.cpp @@ -47,9 +47,9 @@ rotation const double vpThetaUVector::minimum = 0.0001; /*! Default constructor that initialize all the 3 angles to zero. */ -vpThetaUVector::vpThetaUVector() : vpRotationVector(3) {} +vpThetaUVector::vpThetaUVector() : vpRotationVector(3) { } /*! Copy constructor. */ -vpThetaUVector::vpThetaUVector(const vpThetaUVector &tu) : vpRotationVector(tu) {} +vpThetaUVector::vpThetaUVector(const vpThetaUVector &tu) : vpRotationVector(tu) { } /*! Copy constructor from a 3-dimension vector. */ vpThetaUVector::vpThetaUVector(const vpColVector &tu) : vpRotationVector(3) { buildFrom(tu); } /*! @@ -137,7 +137,7 @@ vpThetaUVector vpThetaUVector::buildFrom(const vpRotationMatrix &R) double s, c, theta; s = (R[1][0] - R[0][1]) * (R[1][0] - R[0][1]) + (R[2][0] - R[0][2]) * (R[2][0] - R[0][2]) + - (R[2][1] - R[1][2]) * (R[2][1] - R[1][2]); + (R[2][1] - R[1][2]) * (R[2][1] - R[1][2]); s = sqrt(s) / 2.0; c = (R[0][0] + R[1][1] + R[2][2] - 1.0) / 2.0; theta = atan2(s, c); /* theta in [0, PI] since s > 0 */ @@ -150,7 +150,8 @@ vpThetaUVector vpThetaUVector::buildFrom(const vpRotationMatrix &R) data[0] = (R[2][1] - R[1][2]) / (2 * sinc); data[1] = (R[0][2] - R[2][0]) / (2 * sinc); data[2] = (R[1][0] - R[0][1]) / (2 * sinc); - } else /* theta near PI */ + } + else /* theta near PI */ { double x = 0; if ((R[0][0] - c) > std::numeric_limits::epsilon()) @@ -171,14 +172,16 @@ vpThetaUVector vpThetaUVector::buildFrom(const vpRotationMatrix &R) y = -y; if (vpMath::sign(x) * vpMath::sign(z) != vpMath::sign(R[0][2] + R[2][0])) z = -z; - } else if (y > z) { + } + else if (y > z) { if ((R[0][2] - R[2][0]) < 0) y = -y; if (vpMath::sign(y) * vpMath::sign(x) != vpMath::sign(R[1][0] + R[0][1])) x = -x; if (vpMath::sign(y) * vpMath::sign(z) != vpMath::sign(R[1][2] + R[2][1])) z = -z; - } else { + } + else { if ((R[1][0] - R[0][1]) < 0) z = -z; if (vpMath::sign(z) * vpMath::sign(x) != vpMath::sign(R[2][0] + R[0][2])) @@ -458,13 +461,12 @@ vpThetaUVector vpThetaUVector::operator*(const vpThetaUVector &tu_b) const vpColVector b_hat_sin_2 = b_hat * std::sin(b_2); double c = 2 * std::acos(std::cos(a_2) * std::cos(b_2) - vpColVector::dotProd(a_hat_sin_2, b_hat_sin_2)); vpColVector d = std::sin(a_2) * std::cos(b_2) * a_hat + std::cos(a_2) * std::sin(b_2) * b_hat + - std::sin(a_2) * std::sin(b_2) * vpColVector::crossProd(a_hat, b_hat); + std::sin(a_2) * std::sin(b_2) * vpColVector::crossProd(a_hat, b_hat); d = c * d / std::sin(c / 2); return vpThetaUVector(d); } -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) /*! Set vector from a list of 3 double angle values in radians. \code @@ -486,11 +488,10 @@ vpThetaUVector &vpThetaUVector::operator=(const std::initializer_list &l { if (list.size() > size()) { throw(vpException( - vpException::dimensionError, - "Cannot set theta u vector out of bounds. It has only %d values while you try to initialize with %d values", - size(), list.size())); + vpException::dimensionError, + "Cannot set theta u vector out of bounds. It has only %d values while you try to initialize with %d values", + size(), list.size())); } std::copy(list.begin(), list.end(), data); return *this; } -#endif diff --git a/modules/core/src/math/transformation/vpTranslationVector.cpp b/modules/core/src/math/transformation/vpTranslationVector.cpp index 19767242a8..2f1babf15e 100644 --- a/modules/core/src/math/transformation/vpTranslationVector.cpp +++ b/modules/core/src/math/transformation/vpTranslationVector.cpp @@ -506,7 +506,6 @@ vpTranslationVector &vpTranslationVector::operator=(double x) return *this; } -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) /*! Set vector from a list of 3 double values in meters. \code @@ -535,7 +534,6 @@ vpTranslationVector &vpTranslationVector::operator=(const std::initializer_list< std::copy(list.begin(), list.end(), data); return *this; } -#endif /*! Set vector first element value. diff --git a/modules/core/src/tools/file/vpIoTools.cpp b/modules/core/src/tools/file/vpIoTools.cpp index 11910f1817..a5d90de5f5 100644 --- a/modules/core/src/tools/file/vpIoTools.cpp +++ b/modules/core/src/tools/file/vpIoTools.cpp @@ -130,25 +130,18 @@ void replaceAll(std::string &str, const std::string &search, const std::string & std::string <rim(std::string &s) { -#if VISP_CXX_STANDARD > VISP_CXX_STANDARD_98 s.erase(s.begin(), std::find_if(s.begin(), s.end(), [](int c) { return !std::isspace(c); })); -#else - s.erase(s.begin(), std::find_if(s.begin(), s.end(), std::not1(std::ptr_fun(std::isspace)))); -#endif + return s; } std::string &rtrim(std::string &s) { -#if VISP_CXX_STANDARD > VISP_CXX_STANDARD_98 s.erase(std::find_if(s.rbegin(), s.rend(), [](int c) { return !std::isspace(c); }).base(), s.end()); -#else - s.erase(std::find_if(s.rbegin(), s.rend(), std::not1(std::ptr_fun(std::isspace))).base(), s.end()); -#endif return s; } } // namespace @@ -1694,15 +1687,9 @@ std::string vpIoTools::getParent(const std::string &pathname) std::string vpIoTools::toLowerCase(const std::string &input) { std::string out; -#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - for (size_t i = 0; i < input.size(); i++) { - out += std::tolower(input[i]); - } -#else for (std::string::const_iterator it = input.cbegin(); it != input.cend(); it++) { out += std::tolower(*it); } -#endif return out; } @@ -1717,15 +1704,9 @@ std::string vpIoTools::toLowerCase(const std::string &input) std::string vpIoTools::toUpperCase(const std::string &input) { std::string out; -#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - for (size_t i = 0; i < input.size(); i++) { - out += std::toupper(input[i]); - } -#else for (std::string::const_iterator it = input.cbegin(); it != input.cend(); it++) { out += std::toupper(*it); } -#endif return out; } diff --git a/modules/core/src/tools/geometry/vpPolygon.cpp b/modules/core/src/tools/geometry/vpPolygon.cpp index 99d14fa690..ed23cd6642 100644 --- a/modules/core/src/tools/geometry/vpPolygon.cpp +++ b/modules/core/src/tools/geometry/vpPolygon.cpp @@ -67,14 +67,10 @@ template std::vector convexHull(const IpCon std::transform(cbegin(ips), cend(ips), std::back_inserter(cv_pts), [](const vpImagePoint &ip) { return cv::Point(static_cast(ip.get_u()), static_cast(ip.get_v())); }); -#elif (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#else std::transform(begin(ips), end(ips), std::back_inserter(cv_pts), [](const vpImagePoint &ip) { return cv::Point(static_cast(ip.get_u()), static_cast(ip.get_v())); }); -#else - for (typename IpContainer::const_iterator it = ips.begin(); it != ips.end(); ++it) { - cv_pts.push_back(cv::Point(static_cast(it->get_u()), static_cast(it->get_v()))); - } #endif // Get convex hull from OpenCV @@ -86,18 +82,13 @@ template std::vector convexHull(const IpCon #if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_14) std::transform(cbegin(cv_conv_hull_corners), cend(cv_conv_hull_corners), std::back_inserter(conv_hull_corners), [](const cv::Point &pt) { - return vpImagePoint{static_cast(pt.y), static_cast(pt.x)}; + return vpImagePoint { static_cast(pt.y), static_cast(pt.x) }; }); -#elif (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#else std::transform(begin(cv_conv_hull_corners), end(cv_conv_hull_corners), std::back_inserter(conv_hull_corners), [](const cv::Point &pt) { - return vpImagePoint{static_cast(pt.y), static_cast(pt.x)}; + return vpImagePoint { static_cast(pt.y), static_cast(pt.x) }; }); -#else - for (std::vector::const_iterator it = cv_conv_hull_corners.begin(); it != cv_conv_hull_corners.end(); - ++it) { - conv_hull_corners.push_back(vpImagePoint(static_cast(it->y), static_cast(it->x))); - } #endif return conv_hull_corners; @@ -110,8 +101,7 @@ template std::vector convexHull(const IpCon */ vpPolygon::vpPolygon() : _corners(), _center(), _area(0.), _goodPoly(true), _bbox(), m_PnPolyConstants(), m_PnPolyMultiples() -{ -} +{ } /*! Constructor which initialises the polygon thanks to the given corners. @@ -154,14 +144,13 @@ vpPolygon::vpPolygon(const std::list &corners) */ vpPolygon::vpPolygon(const vpPolygon &poly) : _corners(poly._corners), _center(poly._center), _area(poly._area), _goodPoly(poly._goodPoly), _bbox(poly._bbox), - m_PnPolyConstants(poly.m_PnPolyConstants), m_PnPolyMultiples(poly.m_PnPolyMultiples) -{ -} + m_PnPolyConstants(poly.m_PnPolyConstants), m_PnPolyMultiples(poly.m_PnPolyMultiples) +{ } /*! Basic destructor */ -vpPolygon::~vpPolygon() {} +vpPolygon::~vpPolygon() { } /*! Equal operator. @@ -197,7 +186,8 @@ void vpPolygon::buildFrom(const std::vector &corners, const bool c #else vpException(vpException::notImplementedError, "Cannot build a convex hull without OpenCV imgproc module"); #endif - } else { + } + else { init(corners); } } @@ -219,7 +209,8 @@ void vpPolygon::buildFrom(const std::list &corners, const bool cre #else vpException(vpException::notImplementedError, "Cannot build a convex hull without OpenCV imgproc module"); #endif - } else { + } + else { init(corners); } } @@ -423,7 +414,8 @@ bool vpPolygon::isInside(const vpImagePoint &ip, const PointInPolygonMethod &met try { intersection = testIntersectionSegments(ip1, ip2, ip, infPoint); - } catch (...) { + } + catch (...) { return isInside(ip); } @@ -468,10 +460,11 @@ void vpPolygon::precalcValuesPnPoly() if (vpMath::equal(_corners[j].get_v(), _corners[i].get_v(), std::numeric_limits::epsilon())) { m_PnPolyConstants[i] = _corners[i].get_u(); m_PnPolyMultiples[i] = 0.0; - } else { + } + else { m_PnPolyConstants[i] = _corners[i].get_u() - - (_corners[i].get_v() * _corners[j].get_u()) / (_corners[j].get_v() - _corners[i].get_v()) + - (_corners[i].get_v() * _corners[i].get_u()) / (_corners[j].get_v() - _corners[i].get_v()); + (_corners[i].get_v() * _corners[j].get_u()) / (_corners[j].get_v() - _corners[i].get_v()) + + (_corners[i].get_v() * _corners[i].get_u()) / (_corners[j].get_v() - _corners[i].get_v()); m_PnPolyMultiples[i] = (_corners[j].get_u() - _corners[i].get_u()) / (_corners[j].get_v() - _corners[i].get_v()); } @@ -529,28 +522,29 @@ void vpPolygon::updateCenter() double i_tmp = 0; double j_tmp = 0; #if 0 - for(unsigned int i=0; i<(_corners.size()-1); ++i){ + for (unsigned int i = 0; i<(_corners.size()-1); ++i) { i_tmp += (_corners[i].get_i() + _corners[i+1].get_i()) * - (_corners[i+1].get_i() * _corners[i].get_j() - _corners[i+1].get_j() * _corners[i].get_i()); + (_corners[i+1].get_i() * _corners[i].get_j() - _corners[i+1].get_j() * _corners[i].get_i()); j_tmp += (_corners[i].get_j() + _corners[i+1].get_j()) * - (_corners[i+1].get_i() * _corners[i].get_j() - _corners[i+1].get_j() * _corners[i].get_i()); + (_corners[i+1].get_i() * _corners[i].get_j() - _corners[i+1].get_j() * _corners[i].get_i()); } #else for (unsigned int i = 0; i < _corners.size(); ++i) { unsigned int i_p_1 = (i + 1) % _corners.size(); i_tmp += (_corners[i].get_i() + _corners[i_p_1].get_i()) * - (_corners[i_p_1].get_i() * _corners[i].get_j() - _corners[i_p_1].get_j() * _corners[i].get_i()); + (_corners[i_p_1].get_i() * _corners[i].get_j() - _corners[i_p_1].get_j() * _corners[i].get_i()); j_tmp += (_corners[i].get_j() + _corners[i_p_1].get_j()) * - (_corners[i_p_1].get_i() * _corners[i].get_j() - _corners[i_p_1].get_j() * _corners[i].get_i()); + (_corners[i_p_1].get_i() * _corners[i].get_j() - _corners[i_p_1].get_j() * _corners[i].get_i()); } #endif if (_area > 0) { _center.set_i(fabs(i_tmp / (6 * _area))); _center.set_j(fabs(j_tmp / (6 * _area))); - } else { + } + else { _center = _corners[0]; _goodPoly = false; } diff --git a/modules/core/src/tools/geometry/vpRectOriented.cpp b/modules/core/src/tools/geometry/vpRectOriented.cpp index f7dc9862e4..480e5fe6fb 100644 --- a/modules/core/src/tools/geometry/vpRectOriented.cpp +++ b/modules/core/src/tools/geometry/vpRectOriented.cpp @@ -82,30 +82,7 @@ vpRectOriented::vpRectOriented(const vpRect &rect) m_topRight.set_j(m_center.get_j() + m_width / 2.0); } -#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) -/** Copy constructor. - * @param rectOriented Oriented rectangle to copy. - */ -vpRectOriented::vpRectOriented(const vpRectOriented &rectOriented) { *this = rectOriented; } - -/** Assignement operator. - * @param rectOriented Oriented rectangle to copy. - */ -vpRectOriented &vpRectOriented::operator=(const vpRectOriented &rectOriented) -{ - m_center = rectOriented.getCenter(); - m_width = rectOriented.getWidth(); - m_height = rectOriented.getHeight(); - m_theta = rectOriented.getOrientation(); - m_topLeft = rectOriented.getTopLeft(); - m_bottomLeft = rectOriented.getBottomLeft(); - m_bottomRight = rectOriented.getBottomRight(); - m_topRight = rectOriented.getTopRight(); - return *this; -} -#endif - -/** Assignement operator from vpRect. +/** Assignment operator from vpRect. * @param rect Rectangle to copy. */ vpRectOriented &vpRectOriented::operator=(const vpRect &rect) diff --git a/modules/core/src/tools/optimization/vpLinProg.cpp b/modules/core/src/tools/optimization/vpLinProg.cpp index 7929acd959..0510854830 100644 --- a/modules/core/src/tools/optimization/vpLinProg.cpp +++ b/modules/core/src/tools/optimization/vpLinProg.cpp @@ -277,7 +277,6 @@ bool vpLinProg::rowReduction(vpMatrix &A, vpColVector &b, const double &tol) return false; } -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) /*! Solves a Linear Program under various constraints @@ -303,9 +302,6 @@ bool vpLinProg::rowReduction(vpMatrix &A, vpColVector &b, const double &tol) Lower and upper bounds may be passed as a list of (index, bound) with C++11's braced initialization. - \warning This function is only available if c++11 or higher is activated during compilation. Configure ViSP using -cmake -DUSE_CXX_STANDARD=11. - Here is an example: \f$\begin{array}{lll} @@ -727,4 +723,3 @@ bool vpLinProg::simplex(const vpColVector &c, vpMatrix A, vpColVector b, vpColVe std::swap(B[k], N[j]); } } -#endif diff --git a/modules/core/src/tools/optimization/vpQuadProg.cpp b/modules/core/src/tools/optimization/vpQuadProg.cpp index e463bf596d..a7feb40b91 100644 --- a/modules/core/src/tools/optimization/vpQuadProg.cpp +++ b/modules/core/src/tools/optimization/vpQuadProg.cpp @@ -40,8 +40,6 @@ #include #include -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - /*! Changes a canonical quadratic cost \f$\min \frac{1}{2}\mathbf{x}^T\mathbf{H}\mathbf{x} + \mathbf{c}^T\mathbf{x}\f$ to the formulation used by this class \f$ \min ||\mathbf{Q}\mathbf{x} - \mathbf{r}||^2\f$. @@ -127,7 +125,7 @@ void vpQuadProg::fromCanonicalCost(const vpMatrix &H, const vpColVector &c, vpMa r = -Q.t().pseudoInverse() * c; #else throw(vpException(vpException::functionNotImplementedError, "Symmetric matrix decomposition is not implemented. You " - "should install Lapack, Eigen3 or OpenCV 3rd party")); + "should install Lapack, Eigen3 or OpenCV 3rd party")); #endif } @@ -184,7 +182,8 @@ bool vpQuadProg::solveByProjection(const vpMatrix &Q, const vpColVector &r, vpMa x = b + A * (Q * A).solveBySVD(r - Q * b); else x = b; - } else + } + else x = Q.solveBySVD(r); return true; } @@ -246,7 +245,7 @@ bool vpQuadProg::solveQPe(const vpMatrix &Q, const vpColVector &r, vpColVector & unsigned int n = Q.getCols(); if (Q.getRows() != r.getRows() || Z.getRows() != n || x1.getRows() != n) { std::cout << "vpQuadProg::solveQPe: wrong dimension\n" - << "Q: " << Q.getRows() << "x" << Q.getCols() << " - r: " << r.getRows() << std::endl; + << "Q: " << Q.getRows() << "x" << Q.getCols() << " - r: " << r.getRows() << std::endl; std::cout << "Z: " << Z.getRows() << "x" << Z.getCols() << " - x1: " << x1.getRows() << std::endl; throw vpMatrixException::dimensionError; } @@ -255,7 +254,8 @@ bool vpQuadProg::solveQPe(const vpMatrix &Q, const vpColVector &r, vpColVector & x = x1 + Z * (Q * Z).solveBySVD(r - Q * x1); else x = x1; - } else + } + else x = Q.solveBySVD(r); return true; } @@ -386,7 +386,8 @@ bool vpQuadProg::solveQP(const vpMatrix &Q, const vpColVector &r, vpMatrix A, vp if (A.getCols() && solveQPi(Q * A, r - Q * b, C * A, d - C * b, x, false, tol)) { x = b + A * x; return true; - } else if (vpLinProg::allLesser(C, b, d, tol)) // Ax = b has only 1 solution + } + else if (vpLinProg::allLesser(C, b, d, tol)) // Ax = b has only 1 solution { x = b; return true; @@ -451,13 +452,15 @@ bool vpQuadProg::solveQPi(const vpMatrix &Q, const vpColVector &r, const vpMatri // back to initial solution x = x1 + Z * x; return true; - } else if (vpLinProg::allLesser(C, x1, d, tol)) { + } + else if (vpLinProg::allLesser(C, x1, d, tol)) { x = x1; return true; } std::cout << "vpQuadProg::solveQPi: inequality constraint infeasible" << std::endl; return false; - } else + } + else std::cout << "vpQuadProg::solveQPi: use_equality before setEqualityConstraint" << std::endl; } @@ -532,7 +535,8 @@ bool vpQuadProg::solveQPi(const vpMatrix &Q, const vpColVector &r, const vpMatri A_lp[i][2 * n + p + l] = -1; xc[2 * n + p + l] = -e[i]; l++; - } else + } + else xc[2 * n + i] = e[i]; } vpLinProg::simplex(c, A_lp, e, xc); @@ -555,7 +559,8 @@ bool vpQuadProg::solveQPi(const vpMatrix &Q, const vpColVector &r, const vpMatri else active.push_back(i); } - } else // warm start feasible + } + else // warm start feasible { // using previous active set, check that inactive is sync if (active.size() + inactive.size() != p) { @@ -616,7 +621,8 @@ bool vpQuadProg::solveQPi(const vpMatrix &Q, const vpColVector &r, const vpMatri else active.erase(active.begin() + ineqInd); update_Ap = true; - } else // u != 0, can improve xc + } + else // u != 0, can improve xc { unsigned int ineqInd = 0; // step length to next constraint @@ -638,11 +644,13 @@ bool vpQuadProg::solveQPi(const vpMatrix &Q, const vpColVector &r, const vpMatri while (it != active.end() && *it < inactive[ineqInd]) it++; active.insert(it, inactive[ineqInd]); - } else + } + else active.push_back(inactive[ineqInd]); inactive.erase(inactive.begin() + ineqInd); update_Ap = true; - } else + } + else update_Ap = false; // update x for next iteration x += alpha * u; @@ -668,6 +676,3 @@ vpColVector vpQuadProg::solveSVDorQR(const vpMatrix &A, const vpColVector &b) return A.solveBySVD(b); return A.solveByQR(b); } -#else -void dummy_vpQuadProg(){}; -#endif diff --git a/modules/core/src/tools/time/vpTime.cpp b/modules/core/src/tools/time/vpTime.cpp index 408f0c87ab..f1c58f4d91 100644 --- a/modules/core/src/tools/time/vpTime.cpp +++ b/modules/core/src/tools/time/vpTime.cpp @@ -39,8 +39,7 @@ #include // https://devblogs.microsoft.com/cppblog/c14-stl-features-fixes-and-breaking-changes-in-visual-studio-14-ctp1/ -#if VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11 && \ - (defined(_MSC_VER) && _MSC_VER >= 1900 /* VS2015 */ || !defined(_MSC_VER)) +#if (defined(_MSC_VER) && _MSC_VER >= 1900 /* VS2015 */ || !defined(_MSC_VER)) #define USE_CXX11_CHRONO 1 #else #define USE_CXX11_CHRONO 0 diff --git a/modules/core/test/image/testImageOwnership.cpp b/modules/core/test/image/testImageOwnership.cpp index 3f384cd41d..be80db7ead 100644 --- a/modules/core/test/image/testImageOwnership.cpp +++ b/modules/core/test/image/testImageOwnership.cpp @@ -138,7 +138,6 @@ int main(int /* argc */, const char ** /* argv */) delete[] bitmap; } -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) { unsigned char *bitmap = new unsigned char[12]; vpImage I = std::move(vpImage(bitmap, 3, 4, false)); @@ -157,8 +156,8 @@ int main(int /* argc */, const char ** /* argv */) } delete[] bitmap; } -#endif - } catch (const std::exception &e) { + } + catch (const std::exception &e) { std::cerr << "Exception: " << e.what() << std::endl; return EXIT_FAILURE; } diff --git a/modules/core/test/math/testJsonArrayConversion.cpp b/modules/core/test/math/testJsonArrayConversion.cpp index c1d51a1361..4a0c9195c7 100644 --- a/modules/core/test/math/testJsonArrayConversion.cpp +++ b/modules/core/test/math/testJsonArrayConversion.cpp @@ -41,7 +41,7 @@ #include -#if defined(VISP_HAVE_NLOHMANN_JSON) && defined(VISP_HAVE_CATCH2) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_NLOHMANN_JSON) && defined(VISP_HAVE_CATCH2) #include #include diff --git a/modules/core/test/math/testMath.cpp b/modules/core/test/math/testMath.cpp index 9b9d6266ad..8fedb34bd7 100644 --- a/modules/core/test/math/testMath.cpp +++ b/modules/core/test/math/testMath.cpp @@ -602,21 +602,10 @@ int main() // Test clamp { -#if (VISP_CXX_STANDARD > VISP_CXX_STANDARD_98) const double lower { -10. }, upper { +10. }; - std::vector testing_values{5., -15., 15.}; - std::vector expected_values{5., -10., 10.}; -#else - const double lower = -10., upper = +10.; - std::vector testing_values; - testing_values.push_back(5.); - testing_values.push_back(-15.); - testing_values.push_back(15.); - std::vector expected_values; - expected_values.push_back(5.); - expected_values.push_back(-10.); - expected_values.push_back(10.); -#endif + std::vector testing_values { 5., -15., 15. }; + std::vector expected_values { 5., -10., 10. }; + for (size_t i = 0u; i < testing_values.size(); i++) { const double clamp_val = vpMath::clamp(testing_values.at(i), lower, upper); diff --git a/modules/core/test/math/testMatrix.cpp b/modules/core/test/math/testMatrix.cpp index 44d610c409..f32ded2b3a 100644 --- a/modules/core/test/math/testMatrix.cpp +++ b/modules/core/test/math/testMatrix.cpp @@ -414,7 +414,7 @@ int main(int argc, char *argv[]) unsigned int nb = ctest ? 10 : 100; // 10000; const unsigned int size = ctest ? 10 : 100; - vpMatrix m_big(nb * size, 6); + vpMatrix m_big(nb *size, 6); std::vector submatrices(nb); for (size_t cpt = 0; cpt < submatrices.size(); cpt++) { vpMatrix m(size, 6); @@ -562,7 +562,7 @@ int main(int argc, char *argv[]) vpMatrix m_big_stack_static = generateRandomMatrix(ctest ? 100 : 1000, ctest ? 10 : 100, -1000.0, 1000.0); std::cout << "m_big_stack_static: " << m_big_stack_static.getRows() << "x" << m_big_stack_static.getCols() - << std::endl; + << std::endl; vpMatrix m_big_stack_static_row, m_big_stack_static_row_tmp; t = vpTime::measureTimeMs(); @@ -575,8 +575,8 @@ int main(int argc, char *argv[]) if (!equalMatrix(m_big_stack_static, m_big_stack_static_row)) { std::cerr << "Problem with vpMatrix::stack(vpMatrix, vpRowVector, " - "vpMatrix)!" - << std::endl; + "vpMatrix)!" + << std::endl; return EXIT_FAILURE; } @@ -595,8 +595,8 @@ int main(int argc, char *argv[]) if (!equalMatrix(m_big_stack_static, m_big_stack_static_col)) { std::cerr << "Problem with vpMatrix::stack(vpMatrix, vpColVector, " - "vpMatrix)!" - << std::endl; + "vpMatrix)!" + << std::endl; return EXIT_FAILURE; } } @@ -669,7 +669,6 @@ int main(int argc, char *argv[]) std::cout << "juxtaposeM:\n" << juxtaposeM << std::endl; } -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) { std::vector vec_mat; vec_mat.emplace_back(5, 5); @@ -684,7 +683,6 @@ int main(int argc, char *argv[]) res2 = A + B; std::cout << "\n2) A+B:\n" << res2 << std::endl; } -#endif { std::cout << "\n------------------------" << std::endl; @@ -840,7 +838,8 @@ int main(int argc, char *argv[]) std::cout << "\nAll tests succeeded" << std::endl; return EXIT_SUCCESS; - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "Catch an exception: " << e << std::endl; return EXIT_FAILURE; } diff --git a/modules/core/test/math/testMatrixInitialization.cpp b/modules/core/test/math/testMatrixInitialization.cpp index 0e8e83a126..d7d9e4e0fe 100644 --- a/modules/core/test/math/testMatrixInitialization.cpp +++ b/modules/core/test/math/testMatrixInitialization.cpp @@ -106,11 +106,10 @@ bool equal(const vpRowVector &a1, const vpRowVector &a2, double epsilon) int main() { double epsilon = 1e-10; -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) { - vpArray2D a{1.f, 2.f, 3.f}; + vpArray2D a { 1.f, 2.f, 3.f }; std::cout << "a:\n" << a << std::endl; - a = {-1, -2, -3, 4, 5.5, 6.0f}; + a = { -1, -2, -3, 4, 5.5, 6.0f }; std::cout << "a:\n" << a << std::endl; a.reshape(2, 3); std::cout << "a.reshape(2,3):\n" << a << std::endl; @@ -119,47 +118,46 @@ int main() vpArray2D a2; a2.resize(2, 2); - a2 = {1, 2, 3, 4}; + a2 = { 1, 2, 3, 4 }; std::cout << "a2:\n" << a2 << std::endl; - vpArray2D a3(2, 3, {1, 2, 3, 4, 5, 6}); + vpArray2D a3(2, 3, { 1, 2, 3, 4, 5, 6 }); std::cout << "a3:\n" << a3 << std::endl; - vpArray2D a4{{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}; + vpArray2D a4 { {1, 2, 3}, {4, 5, 6}, {7, 8, 9} }; std::cout << "a4:\n" << a4 << std::endl; vpArray2D a5; - a5 = {{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}; + a5 = { {1, 2, 3}, {4, 5, 6}, {7, 8, 9} }; std::cout << "a5:\n" << a5 << std::endl; - vpArray2D a6{a5}; + vpArray2D a6 { a5 }; std::cout << "a6:\n" << a6 << std::endl; - vpMatrix m{1, 2, 3}; + vpMatrix m { 1, 2, 3 }; std::cout << "m:\n" << m << std::endl; - m = {-1, -2, -3, -4}; + m = { -1, -2, -3, -4 }; std::cout << "m:\n" << m << std::endl; m.reshape(2, 2); std::cout << "m:\n" << m << std::endl; - vpMatrix m2(3, 2, {1, 2, 3, 4, 5, 6}); + vpMatrix m2(3, 2, { 1, 2, 3, 4, 5, 6 }); std::cout << "m2:\n" << m2 << std::endl; - vpMatrix m3{{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}; + vpMatrix m3 { {1, 2, 3}, {4, 5, 6}, {7, 8, 9} }; std::cout << "m3:\n" << m3 << std::endl; vpMatrix m4; - m4 = {{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}; + m4 = { {1, 2, 3}, {4, 5, 6}, {7, 8, 9} }; std::cout << "m4:\n" << m4 << std::endl; - vpMatrix m5{m4}; + vpMatrix m5 { m4 }; std::cout << "m5:\n" << m5 << std::endl; // vpMatrix m6; // m6 = {m2}; // Fails on travis // std::cout << "m6:\n" << m6 << std::endl; } -#endif { vpMatrix m1; @@ -189,9 +187,8 @@ int main() c_ref[i] = i; } std::cout << "c_ref: " << c_ref.t() << std::endl; -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) { - vpColVector c{0, 1, 2, 3, 4, 5}; + vpColVector c { 0, 1, 2, 3, 4, 5 }; std::cout << "c: " << c.t() << std::endl; if (!equal(c_ref, c, epsilon)) { return EXIT_FAILURE; @@ -199,7 +196,7 @@ int main() c_ref.resize(3, false); c_ref *= -1; std::cout << "c_ref: " << c_ref.t() << std::endl; - c = {0, -1, -2}; + c = { 0, -1, -2 }; std::cout << "c: " << c.t() << std::endl; if (!equal(c_ref, c, epsilon)) { return EXIT_FAILURE; @@ -221,7 +218,6 @@ int main() return EXIT_FAILURE; } } -#endif { vpColVector c; c << 1, 2, 3, 4; @@ -232,7 +228,8 @@ int main() std::cout << "after c.reshape(2, 2): " << c.t() << std::endl; c = c.reshape(2, 2); std::cout << "c:" << c << std::endl; - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cerr << "Exception expected: c = c.reshape(2, 2);\n" << e.what() << std::endl; } @@ -253,9 +250,8 @@ int main() r_ref[i] = i; } std::cout << "r_ref: " << r_ref << std::endl; -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) { - vpRowVector r{0, 1, 2, 3, 4, 5}; + vpRowVector r { 0, 1, 2, 3, 4, 5 }; std::cout << "r: " << r << std::endl; if (!equal(r_ref, r, epsilon)) { return EXIT_FAILURE; @@ -263,7 +259,7 @@ int main() r_ref.resize(3, false); r_ref *= -1; std::cout << "r_ref: " << r_ref << std::endl; - r = {0, -1, -2}; + r = { 0, -1, -2 }; std::cout << "r: " << r << std::endl; if (!equal(r_ref, r, epsilon)) { return EXIT_FAILURE; @@ -285,7 +281,6 @@ int main() return EXIT_FAILURE; } } -#endif { vpRowVector r; r << 1, 2, 3; @@ -297,7 +292,8 @@ int main() try { r.reshape(3, 1); std::cout << "after r.reshape(3, 1): " << r << std::endl; - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cerr << "Exception: r.reshape(3, 1);\n" << e.what() << std::endl; } } @@ -307,15 +303,13 @@ int main() std::cout << "** Test vpThetaUVector" << std::endl; vpThetaUVector tu_ref(0, M_PI_2, M_PI); std::cout << "tu_ref: " << tu_ref.t() << std::endl; -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) { - vpThetaUVector tu = {0, M_PI_2, M_PI}; + vpThetaUVector tu = { 0, M_PI_2, M_PI }; std::cout << "tu: " << tu.t() << std::endl; if (!equal(tu_ref, tu, epsilon)) { return EXIT_FAILURE; } } -#endif { vpThetaUVector tu; tu << 0, M_PI_2, M_PI; @@ -336,15 +330,13 @@ int main() std::cout << "** Test vpRxyzVector" << std::endl; vpRxyzVector rxyz_ref(0, M_PI_2, M_PI); std::cout << "rxyz_ref: " << rxyz_ref.t() << std::endl; -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) { - vpRxyzVector rxyz = {0, M_PI_2, M_PI}; + vpRxyzVector rxyz = { 0, M_PI_2, M_PI }; std::cout << "rxyz: " << rxyz.t() << std::endl; if (!equal(rxyz_ref, rxyz, epsilon)) { return EXIT_FAILURE; } } -#endif { vpRxyzVector rxyz; rxyz << 0, M_PI_2, M_PI; @@ -365,15 +357,13 @@ int main() std::cout << "** Test vpRzyxVector" << std::endl; vpRzyxVector rzyx_ref(0, M_PI_2, M_PI); std::cout << "rzyx_ref: " << rzyx_ref.t() << std::endl; -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) { - vpRzyxVector rzyx = {0, M_PI_2, M_PI}; + vpRzyxVector rzyx = { 0, M_PI_2, M_PI }; std::cout << "rzyx: " << rzyx.t() << std::endl; if (!equal(rzyx_ref, rzyx, epsilon)) { return EXIT_FAILURE; } } -#endif { vpRzyxVector rzyx; rzyx << 0, M_PI_2, M_PI; @@ -394,15 +384,13 @@ int main() std::cout << "** Test vpRzyzVector" << std::endl; vpRzyzVector rzyz_ref(0, M_PI_2, M_PI); std::cout << "rzyz_ref: " << rzyz_ref.t() << std::endl; -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) { - vpRzyzVector rzyz = {0, M_PI_2, M_PI}; + vpRzyzVector rzyz = { 0, M_PI_2, M_PI }; std::cout << "rzyz: " << rzyz.t() << std::endl; if (!equal(rzyz_ref, rzyz, epsilon)) { return EXIT_FAILURE; } } -#endif { vpRzyzVector rzyz; rzyz << 0, M_PI_2, M_PI; @@ -424,15 +412,13 @@ int main() vpThetaUVector tu_ref(0, M_PI_2, M_PI); vpQuaternionVector q_ref(tu_ref); std::cout << "q_ref: " << q_ref.t() << std::endl; -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) { - vpQuaternionVector q = {q_ref[0], q_ref[1], q_ref[2], q_ref[3]}; + vpQuaternionVector q = { q_ref[0], q_ref[1], q_ref[2], q_ref[3] }; std::cout << "q: " << q.t() << std::endl; if (!equal(q_ref, q, epsilon)) { return EXIT_FAILURE; } } -#endif { vpQuaternionVector q; q << q_ref[0], q_ref[1], q_ref[2], q_ref[3]; @@ -453,15 +439,13 @@ int main() std::cout << "** Test vpTranslationVector" << std::endl; vpTranslationVector t_ref(0, 0.1, 0.5); std::cout << "t_ref: " << t_ref.t() << std::endl; -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) { - vpTranslationVector t = {t_ref[0], t_ref[1], t_ref[2]}; + vpTranslationVector t = { t_ref[0], t_ref[1], t_ref[2] }; std::cout << "t: " << t.t() << std::endl; if (!equal(t_ref, t, epsilon)) { return EXIT_FAILURE; } } -#endif { vpTranslationVector t; t << 0, 0.1, 0.5; @@ -482,9 +466,8 @@ int main() std::cout << "** Test vpRotationMatrix" << std::endl; vpRotationMatrix R_ref(vpRxyzVector(0, -M_PI_2, M_PI)); std::cout << "R_ref:\n" << R_ref << std::endl; -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) { - vpRotationMatrix R({0, 0, -1, 0, -1, 0, -1, 0, 0}); + vpRotationMatrix R({ 0, 0, -1, 0, -1, 0, -1, 0, 0 }); std::cout << "R:\n" << R << std::endl; if (!equal(R_ref, R, epsilon)) { return EXIT_FAILURE; @@ -492,13 +475,12 @@ int main() } { vpRotationMatrix R; - R = {0, 0, -1, 0, -1, 0, -1, 0, 0}; + R = { 0, 0, -1, 0, -1, 0, -1, 0, 0 }; std::cout << "R:\n" << R << std::endl; if (!equal(R_ref, R, epsilon)) { return EXIT_FAILURE; } } -#endif { vpRotationMatrix R; R << 0, 0, -1, 0, -1, 0, -1, 0, 0; diff --git a/modules/core/test/tools/geometry/testPolygon2.cpp b/modules/core/test/tools/geometry/testPolygon2.cpp index 6cbdbf513d..7653e78a15 100644 --- a/modules/core/test/tools/geometry/testPolygon2.cpp +++ b/modules/core/test/tools/geometry/testPolygon2.cpp @@ -55,26 +55,21 @@ TEST_CASE("Check OpenCV-bsed convex hull") { SECTION("From vpRect") { - const vpRect rect{0, 0, 200, 400}; - const std::vector rect_corners{rect.getTopLeft(), rect.getTopRight(), rect.getBottomRight(), - rect.getBottomLeft()}; + const vpRect rect { 0, 0, 200, 400 }; + const std::vector rect_corners { rect.getTopLeft(), rect.getTopRight(), rect.getBottomRight(), + rect.getBottomLeft() }; - vpPolygon poly{}; + vpPolygon poly {}; poly.buildFrom(rect_corners, true); #if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_14) for (const auto &poly_corner : poly.getCorners()) { REQUIRE(std::find(cbegin(rect_corners), cend(rect_corners), poly_corner) != cend(rect_corners)); } -#elif (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#else for (const auto &poly_corner : poly.getCorners()) { REQUIRE(std::find(begin(rect_corners), end(rect_corners), poly_corner) != end(rect_corners)); } -#else - for (std::vector::const_iterator it = poly.getCorners().begin(); it != poly.getCorners().end(); - ++it) { - REQUIRE(std::find(rect_corners.begin(), rect_corners.end(), *it) != rect_corners.end()); - } #endif } } @@ -109,18 +104,12 @@ bool testConvexHull() return false; } } -#elif (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#else for (const auto &poly_corner : poly.getCorners()) { if (std::find(begin(rect_corners), end(rect_corners), poly_corner) == end(rect_corners)) { return false; } } -#else - for (std::vector::const_iterator it = poly.getCorners().begin(); it != poly.getCorners().end(); ++it) { - if (std::find(rect_corners.begin(), rect_corners.end(), *it) == rect_corners.end()) { - return false; - } - } #endif #endif @@ -130,7 +119,7 @@ bool testConvexHull() int main() { - if (! testConvexHull()) { + if (!testConvexHull()) { return EXIT_FAILURE; } diff --git a/modules/gui/include/visp3/gui/vpColorBlindFriendlyPalette.h b/modules/gui/include/visp3/gui/vpColorBlindFriendlyPalette.h index b0974ecb5e..b95e7f5dcc 100755 --- a/modules/gui/include/visp3/gui/vpColorBlindFriendlyPalette.h +++ b/modules/gui/include/visp3/gui/vpColorBlindFriendlyPalette.h @@ -40,7 +40,6 @@ #include #include -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) /** * \brief Class that furnishes a set of colors that color blind people * should be able to distinguish one from another. @@ -199,5 +198,4 @@ std::ostream &operator<<(std::ostream &os, const vpColorBlindFriendlyPalette &co */ std::istream &operator>>(std::istream &is, vpColorBlindFriendlyPalette &color); -#endif #endif // _vpColorBlindFliendlyPalette_h_ diff --git a/modules/gui/include/visp3/gui/vpDisplayOpenCV.h b/modules/gui/include/visp3/gui/vpDisplayOpenCV.h index 1fdff217ee..b977619fbf 100644 --- a/modules/gui/include/visp3/gui/vpDisplayOpenCV.h +++ b/modules/gui/include/visp3/gui/vpDisplayOpenCV.h @@ -38,9 +38,7 @@ #if defined(HAVE_OPENCV_HIGHGUI) -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) #include -#endif #include #include @@ -267,9 +265,7 @@ class VISP_EXPORT vpDisplayOpenCV : public vpDisplay static void on_mouse(int event, int x, int y, int flags, void *param); -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) void overlay(std::function overlay_function, double opacity); -#endif }; #endif diff --git a/modules/gui/include/visp3/gui/vpPclViewer.h b/modules/gui/include/visp3/gui/vpPclViewer.h index 5f2ea4e227..ace4fb1363 100755 --- a/modules/gui/include/visp3/gui/vpPclViewer.h +++ b/modules/gui/include/visp3/gui/vpPclViewer.h @@ -37,7 +37,7 @@ #include -#if defined(VISP_HAVE_PCL) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_PCL) // System #include #include diff --git a/modules/gui/src/display/vpDisplayOpenCV.cpp b/modules/gui/src/display/vpDisplayOpenCV.cpp index 69f4b2f354..56e28f2bac 100644 --- a/modules/gui/src/display/vpDisplayOpenCV.cpp +++ b/modules/gui/src/display/vpDisplayOpenCV.cpp @@ -102,14 +102,14 @@ unsigned int vpDisplayOpenCV::m_nbWindows = 0; vpDisplayOpenCV::vpDisplayOpenCV(vpImage &I, vpScaleType scaleType) : vpDisplay(), #if (VISP_HAVE_OPENCV_VERSION < 0x020408) - m_background(NULL), col(NULL), cvcolor(), font(NULL), + m_background(NULL), col(NULL), cvcolor(), font(NULL), #else - m_background(), col(NULL), cvcolor(), font(cv::FONT_HERSHEY_PLAIN), fontScale(0.8f), + m_background(), col(NULL), cvcolor(), font(cv::FONT_HERSHEY_PLAIN), fontScale(0.8f), #endif - fontHeight(10), x_move(0), y_move(0), move(false), x_lbuttondown(0), y_lbuttondown(0), lbuttondown(false), - x_mbuttondown(0), y_mbuttondown(0), mbuttondown(false), x_rbuttondown(0), y_rbuttondown(0), rbuttondown(false), - x_lbuttonup(0), y_lbuttonup(0), lbuttonup(false), x_mbuttonup(0), y_mbuttonup(0), mbuttonup(false), x_rbuttonup(0), - y_rbuttonup(0), rbuttonup(false) + fontHeight(10), x_move(0), y_move(0), move(false), x_lbuttondown(0), y_lbuttondown(0), lbuttondown(false), + x_mbuttondown(0), y_mbuttondown(0), mbuttondown(false), x_rbuttondown(0), y_rbuttondown(0), rbuttondown(false), + x_lbuttonup(0), y_lbuttonup(0), lbuttonup(false), x_mbuttonup(0), y_mbuttonup(0), mbuttonup(false), x_rbuttonup(0), + y_rbuttonup(0), rbuttonup(false) { setScale(scaleType, I.getWidth(), I.getHeight()); init(I); @@ -142,14 +142,14 @@ vpDisplayOpenCV::vpDisplayOpenCV(vpImage &I, int x, int y, const vpScaleType scaleType) : vpDisplay(), #if (VISP_HAVE_OPENCV_VERSION < 0x020408) - m_background(NULL), col(NULL), cvcolor(), font(NULL), + m_background(NULL), col(NULL), cvcolor(), font(NULL), #else - m_background(), col(NULL), cvcolor(), font(cv::FONT_HERSHEY_PLAIN), fontScale(0.8f), + m_background(), col(NULL), cvcolor(), font(cv::FONT_HERSHEY_PLAIN), fontScale(0.8f), #endif - fontHeight(10), x_move(0), y_move(0), move(false), x_lbuttondown(0), y_lbuttondown(0), lbuttondown(false), - x_mbuttondown(0), y_mbuttondown(0), mbuttondown(false), x_rbuttondown(0), y_rbuttondown(0), rbuttondown(false), - x_lbuttonup(0), y_lbuttonup(0), lbuttonup(false), x_mbuttonup(0), y_mbuttonup(0), mbuttonup(false), x_rbuttonup(0), - y_rbuttonup(0), rbuttonup(false) + fontHeight(10), x_move(0), y_move(0), move(false), x_lbuttondown(0), y_lbuttondown(0), lbuttondown(false), + x_mbuttondown(0), y_mbuttondown(0), mbuttondown(false), x_rbuttondown(0), y_rbuttondown(0), rbuttondown(false), + x_lbuttonup(0), y_lbuttonup(0), lbuttonup(false), x_mbuttonup(0), y_mbuttonup(0), mbuttonup(false), x_rbuttonup(0), + y_rbuttonup(0), rbuttonup(false) { setScale(scaleType, I.getWidth(), I.getHeight()); init(I, x, y, title); @@ -176,14 +176,14 @@ vpDisplayOpenCV::vpDisplayOpenCV(vpImage &I, int x, int y, const vpDisplayOpenCV::vpDisplayOpenCV(vpImage &I, vpScaleType scaleType) : #if (VISP_HAVE_OPENCV_VERSION < 0x020408) - m_background(NULL), col(NULL), cvcolor(), font(NULL), + m_background(NULL), col(NULL), cvcolor(), font(NULL), #else - m_background(), col(NULL), cvcolor(), font(cv::FONT_HERSHEY_PLAIN), fontScale(0.8f), + m_background(), col(NULL), cvcolor(), font(cv::FONT_HERSHEY_PLAIN), fontScale(0.8f), #endif - fontHeight(10), x_move(0), y_move(0), move(false), x_lbuttondown(0), y_lbuttondown(0), lbuttondown(false), - x_mbuttondown(0), y_mbuttondown(0), mbuttondown(false), x_rbuttondown(0), y_rbuttondown(0), rbuttondown(false), - x_lbuttonup(0), y_lbuttonup(0), lbuttonup(false), x_mbuttonup(0), y_mbuttonup(0), mbuttonup(false), x_rbuttonup(0), - y_rbuttonup(0), rbuttonup(false) + fontHeight(10), x_move(0), y_move(0), move(false), x_lbuttondown(0), y_lbuttondown(0), lbuttondown(false), + x_mbuttondown(0), y_mbuttondown(0), mbuttondown(false), x_rbuttondown(0), y_rbuttondown(0), rbuttondown(false), + x_lbuttonup(0), y_lbuttonup(0), lbuttonup(false), x_mbuttonup(0), y_mbuttonup(0), mbuttonup(false), x_rbuttonup(0), + y_rbuttonup(0), rbuttonup(false) { setScale(scaleType, I.getWidth(), I.getHeight()); init(I); @@ -212,14 +212,14 @@ vpDisplayOpenCV::vpDisplayOpenCV(vpImage &I, vpScaleType scaleType) vpDisplayOpenCV::vpDisplayOpenCV(vpImage &I, int x, int y, const std::string &title, vpScaleType scaleType) : #if (VISP_HAVE_OPENCV_VERSION < 0x020408) - m_background(NULL), col(NULL), cvcolor(), font(NULL), + m_background(NULL), col(NULL), cvcolor(), font(NULL), #else - m_background(), col(NULL), cvcolor(), font(cv::FONT_HERSHEY_PLAIN), fontScale(0.8f), + m_background(), col(NULL), cvcolor(), font(cv::FONT_HERSHEY_PLAIN), fontScale(0.8f), #endif - fontHeight(10), x_move(0), y_move(0), move(false), x_lbuttondown(0), y_lbuttondown(0), lbuttondown(false), - x_mbuttondown(0), y_mbuttondown(0), mbuttondown(false), x_rbuttondown(0), y_rbuttondown(0), rbuttondown(false), - x_lbuttonup(0), y_lbuttonup(0), lbuttonup(false), x_mbuttonup(0), y_mbuttonup(0), mbuttonup(false), x_rbuttonup(0), - y_rbuttonup(0), rbuttonup(false) + fontHeight(10), x_move(0), y_move(0), move(false), x_lbuttondown(0), y_lbuttondown(0), lbuttondown(false), + x_mbuttondown(0), y_mbuttondown(0), mbuttondown(false), x_rbuttondown(0), y_rbuttondown(0), rbuttondown(false), + x_lbuttonup(0), y_lbuttonup(0), lbuttonup(false), x_mbuttonup(0), y_mbuttonup(0), mbuttonup(false), x_rbuttonup(0), + y_rbuttonup(0), rbuttonup(false) { setScale(scaleType, I.getWidth(), I.getHeight()); init(I, x, y, title); @@ -250,21 +250,22 @@ int main() vpDisplayOpenCV::vpDisplayOpenCV(int x, int y, const std::string &title) : #if (VISP_HAVE_OPENCV_VERSION < 0x020408) - m_background(NULL), col(NULL), cvcolor(), font(NULL), + m_background(NULL), col(NULL), cvcolor(), font(NULL), #else - m_background(), col(NULL), cvcolor(), font(cv::FONT_HERSHEY_PLAIN), fontScale(0.8f), + m_background(), col(NULL), cvcolor(), font(cv::FONT_HERSHEY_PLAIN), fontScale(0.8f), #endif - fontHeight(10), x_move(0), y_move(0), move(false), x_lbuttondown(0), y_lbuttondown(0), lbuttondown(false), - x_mbuttondown(0), y_mbuttondown(0), mbuttondown(false), x_rbuttondown(0), y_rbuttondown(0), rbuttondown(false), - x_lbuttonup(0), y_lbuttonup(0), lbuttonup(false), x_mbuttonup(0), y_mbuttonup(0), mbuttonup(false), x_rbuttonup(0), - y_rbuttonup(0), rbuttonup(false) + fontHeight(10), x_move(0), y_move(0), move(false), x_lbuttondown(0), y_lbuttondown(0), lbuttondown(false), + x_mbuttondown(0), y_mbuttondown(0), mbuttondown(false), x_rbuttondown(0), y_rbuttondown(0), rbuttondown(false), + x_lbuttonup(0), y_lbuttonup(0), lbuttonup(false), x_mbuttonup(0), y_mbuttonup(0), mbuttonup(false), x_rbuttonup(0), + y_rbuttonup(0), rbuttonup(false) { m_windowXPosition = x; m_windowYPosition = y; if (!title.empty()) { m_title = title; - } else { + } + else { std::ostringstream s; s << m_nbWindows++; m_title = std::string("Window ") + s.str(); @@ -309,16 +310,15 @@ int main() vpDisplayOpenCV::vpDisplayOpenCV() : #if (VISP_HAVE_OPENCV_VERSION < 0x020408) - m_background(NULL), col(NULL), cvcolor(), font(NULL), + m_background(NULL), col(NULL), cvcolor(), font(NULL), #else - m_background(), col(NULL), cvcolor(), font(cv::FONT_HERSHEY_PLAIN), fontScale(0.8f), + m_background(), col(NULL), cvcolor(), font(cv::FONT_HERSHEY_PLAIN), fontScale(0.8f), #endif - fontHeight(10), x_move(0), y_move(0), move(false), x_lbuttondown(0), y_lbuttondown(0), lbuttondown(false), - x_mbuttondown(0), y_mbuttondown(0), mbuttondown(false), x_rbuttondown(0), y_rbuttondown(0), rbuttondown(false), - x_lbuttonup(0), y_lbuttonup(0), lbuttonup(false), x_mbuttonup(0), y_mbuttonup(0), mbuttonup(false), x_rbuttonup(0), - y_rbuttonup(0), rbuttonup(false) -{ -} + fontHeight(10), x_move(0), y_move(0), move(false), x_lbuttondown(0), y_lbuttondown(0), lbuttondown(false), + x_mbuttondown(0), y_mbuttondown(0), mbuttondown(false), x_rbuttondown(0), y_rbuttondown(0), rbuttondown(false), + x_lbuttonup(0), y_lbuttonup(0), lbuttonup(false), x_mbuttonup(0), y_mbuttonup(0), mbuttonup(false), x_rbuttonup(0), + y_rbuttonup(0), rbuttonup(false) +{ } /*! Destructor. @@ -401,7 +401,8 @@ void vpDisplayOpenCV::init(unsigned int w, unsigned int h, int x, int y, const s if (m_title.empty()) { if (!title.empty()) { m_title = std::string(title); - } else { + } + else { std::ostringstream s; s << m_nbWindows++; @@ -561,7 +562,8 @@ void vpDisplayOpenCV::setWindowPosition(int winx, int winy) #else cv::moveWindow(this->m_title.c_str(), winx, winy); #endif - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "OpenCV not initialized")); } } @@ -590,7 +592,8 @@ void vpDisplayOpenCV::displayImage(const vpImage &I) cvReleaseImage(&m_background); m_background = cvCreateImage(size, depth, channels); } - } else { + } + else { m_background = cvCreateImage(size, depth, channels); } @@ -604,7 +607,8 @@ void vpDisplayOpenCV::displayImage(const vpImage &I) *(dst_24++) = val; } } - } else { + } + else { for (unsigned int i = 0; i < m_height; i++) { unsigned char *dst_24 = (unsigned char *)m_background->imageData + (int)(i * m_background->widthStep); for (unsigned int j = 0; j < m_width; j++) { @@ -635,7 +639,8 @@ void vpDisplayOpenCV::displayImage(const vpImage &I) *(dst_24++) = val; } } - } else { + } + else { for (unsigned int i = 0; i < m_height; i++) { unsigned char *dst_24 = (unsigned char *)m_background.data + (int)(i * 3 * m_width); for (unsigned int j = 0; j < m_width; j++) { @@ -648,7 +653,8 @@ void vpDisplayOpenCV::displayImage(const vpImage &I) } #endif - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "OpenCV not initialized")); } } @@ -683,7 +689,8 @@ void vpDisplayOpenCV::displayImageROI(const vpImage &I, const vpI cvReleaseImage(&m_background); m_background = cvCreateImage(size, depth, channels); } - } else { + } + else { m_background = cvCreateImage(size, depth, channels); } @@ -694,7 +701,7 @@ void vpDisplayOpenCV::displayImageROI(const vpImage &I, const vpI unsigned int j_max = (std::min)(j_min + w, m_width); for (unsigned int i = i_min; i < i_max; i++) { unsigned char *dst_24 = - (unsigned char *)m_background->imageData + (int)(i * m_background->widthStep + j_min * 3); + (unsigned char *)m_background->imageData + (int)(i * m_background->widthStep + j_min * 3); for (unsigned int j = j_min; j < j_max; j++) { unsigned char val = I[i][j]; *(dst_24++) = val; @@ -702,14 +709,15 @@ void vpDisplayOpenCV::displayImageROI(const vpImage &I, const vpI *(dst_24++) = val; } } - } else { + } + else { int i_min = (std::max)((int)ceil(iP.get_i() / m_scale), 0); int j_min = (std::max)((int)ceil(iP.get_j() / m_scale), 0); int i_max = (std::min)((int)ceil((iP.get_i() + h) / m_scale), (int)m_height); int j_max = (std::min)((int)ceil((iP.get_j() + w) / m_scale), (int)m_width); for (int i = i_min; i < i_max; i++) { unsigned char *dst_24 = - (unsigned char *)m_background->imageData + (int)(i * m_background->widthStep + j_min * 3); + (unsigned char *)m_background->imageData + (int)(i * m_background->widthStep + j_min * 3); for (int j = j_min; j < j_max; j++) { unsigned char val = I[i * m_scale][j * m_scale]; *(dst_24++) = val; @@ -742,7 +750,8 @@ void vpDisplayOpenCV::displayImageROI(const vpImage &I, const vpI *(dst_24++) = val; } } - } else { + } + else { int i_min = (std::max)((int)ceil(iP.get_i() / m_scale), 0); int j_min = (std::max)((int)ceil(iP.get_j() / m_scale), 0); int i_max = (std::min)((int)ceil((iP.get_i() + h) / m_scale), (int)m_height); @@ -758,7 +767,8 @@ void vpDisplayOpenCV::displayImageROI(const vpImage &I, const vpI } } #endif - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "OpenCV not initialized")); } } @@ -789,7 +799,8 @@ void vpDisplayOpenCV::displayImage(const vpImage &I) cvReleaseImage(&m_background); m_background = cvCreateImage(size, depth, channels); } - } else { + } + else { m_background = cvCreateImage(size, depth, channels); } @@ -803,7 +814,8 @@ void vpDisplayOpenCV::displayImage(const vpImage &I) *(dst_24++) = val.R; } } - } else { + } + else { for (unsigned int i = 0; i < m_height; i++) { unsigned char *dst_24 = (unsigned char *)m_background->imageData + (int)(i * m_background->widthStep); for (unsigned int j = 0; j < m_width; j++) { @@ -833,7 +845,8 @@ void vpDisplayOpenCV::displayImage(const vpImage &I) *(dst_24++) = val.R; } } - } else { + } + else { for (unsigned int i = 0; i < m_height; i++) { unsigned char *dst_24 = (unsigned char *)m_background.data + (int)(i * 3 * m_width); for (unsigned int j = 0; j < m_width; j++) { @@ -845,7 +858,8 @@ void vpDisplayOpenCV::displayImage(const vpImage &I) } } #endif - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "OpenCV not initialized")); } } @@ -879,7 +893,8 @@ void vpDisplayOpenCV::displayImageROI(const vpImage &I, const vpImagePoi cvReleaseImage(&m_background); m_background = cvCreateImage(size, depth, channels); } - } else { + } + else { m_background = cvCreateImage(size, depth, channels); } @@ -890,7 +905,7 @@ void vpDisplayOpenCV::displayImageROI(const vpImage &I, const vpImagePoi unsigned int j_max = (std::min)(j_min + w, m_width); for (unsigned int i = i_min; i < i_max; i++) { unsigned char *dst_24 = - (unsigned char *)m_background->imageData + (int)(i * m_background->widthStep + j_min * 3); + (unsigned char *)m_background->imageData + (int)(i * m_background->widthStep + j_min * 3); for (unsigned int j = j_min; j < j_max; j++) { vpRGBa val = I[i][j]; *(dst_24++) = val.B; @@ -898,14 +913,15 @@ void vpDisplayOpenCV::displayImageROI(const vpImage &I, const vpImagePoi *(dst_24++) = val.R; } } - } else { + } + else { int i_min = (std::max)((int)ceil(iP.get_i() / m_scale), 0); int j_min = (std::max)((int)ceil(iP.get_j() / m_scale), 0); int i_max = (std::min)((int)ceil((iP.get_i() + h) / m_scale), (int)m_height); int j_max = (std::min)((int)ceil((iP.get_j() + w) / m_scale), (int)m_width); for (int i = i_min; i < i_max; i++) { unsigned char *dst_24 = - (unsigned char *)m_background->imageData + (int)(i * m_background->widthStep + j_min * 3); + (unsigned char *)m_background->imageData + (int)(i * m_background->widthStep + j_min * 3); for (int j = j_min; j < j_max; j++) { vpRGBa val = I[i * m_scale][j * m_scale]; *(dst_24++) = val.B; @@ -937,7 +953,8 @@ void vpDisplayOpenCV::displayImageROI(const vpImage &I, const vpImagePoi *(dst_24++) = val.R; } } - } else { + } + else { int i_min = (std::max)((int)ceil(iP.get_i() / m_scale), 0); int j_min = (std::max)((int)ceil(iP.get_j() / m_scale), 0); int i_max = (std::min)((int)ceil((iP.get_i() + h) / m_scale), (int)m_height); @@ -953,7 +970,8 @@ void vpDisplayOpenCV::displayImageROI(const vpImage &I, const vpImagePoi } } #endif - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "OpenCV not initialized")); } } @@ -1019,7 +1037,8 @@ void vpDisplayOpenCV::flushDisplay() cv::imshow(this->m_title, m_background); cv::waitKey(5); #endif - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "OpenCV not initialized")); } } @@ -1040,7 +1059,8 @@ void vpDisplayOpenCV::flushDisplayROI(const vpImagePoint & /*iP*/, const unsigne cv::imshow(this->m_title.c_str(), m_background); cv::waitKey(5); #endif - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "OpenCV not initialized")); } } @@ -1076,7 +1096,8 @@ void vpDisplayOpenCV::displayArrow(const vpImagePoint &ip1, const vpImagePoint & if ((std::fabs(a) <= std::numeric_limits::epsilon()) && (std::fabs(b) <= std::numeric_limits::epsilon())) { // DisplayCrossLarge(i1,j1,3,col) ; - } else { + } + else { a /= lg; b /= lg; @@ -1099,7 +1120,8 @@ void vpDisplayOpenCV::displayArrow(const vpImagePoint &ip1, const vpImagePoint & displayLine(ip1, ip2, color, thickness); } - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "OpenCV not initialized")); } } @@ -1128,7 +1150,8 @@ void vpDisplayOpenCV::displayCharString(const vpImagePoint &ip, const char *text cv::Point(vpMath::round(ip.get_u() / m_scale), vpMath::round(ip.get_v() / m_scale + fontHeight)), font, fontScale, col[color.id]); #endif - } else { + } + else { cvcolor = CV_RGB(color.R, color.G, color.B); #if VISP_HAVE_OPENCV_VERSION < 0x020408 cvPutText(m_background, text, @@ -1140,7 +1163,8 @@ void vpDisplayOpenCV::displayCharString(const vpImagePoint &ip, const char *text font, fontScale, cvcolor); #endif } - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "OpenCV not initialized")); } } @@ -1166,7 +1190,8 @@ void vpDisplayOpenCV::displayCircle(const vpImagePoint ¢er, unsigned int rad cv::Scalar cv_color; if (color.id < vpColor::id_unknown) { cv_color = col[color.id]; - } else { + } + else { cv_color = CV_RGB(color.R, color.G, color.B); } @@ -1177,13 +1202,13 @@ void vpDisplayOpenCV::displayCircle(const vpImagePoint ¢er, unsigned int rad #else cv::circle(m_background, cv::Point(x, y), r, cv_color, cv_thickness); #endif - } else { + } + else { #if VISP_HAVE_OPENCV_VERSION >= 0x030000 int filled = cv::FILLED; #else int filled = CV_FILLED; #endif -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) double opacity = static_cast(color.A) / 255.0; #if VISP_HAVE_OPENCV_VERSION < 0x020408 overlay([x, y, r, cv_color, filled](cv::Mat image) { cvCircle(image, cvPoint(x, y), r, cv_color, filled); }, @@ -1191,16 +1216,10 @@ void vpDisplayOpenCV::displayCircle(const vpImagePoint ¢er, unsigned int rad #else overlay([x, y, r, cv_color, filled](cv::Mat image) { cv::circle(image, cv::Point(x, y), r, cv_color, filled); }, opacity); -#endif -#else -#if VISP_HAVE_OPENCV_VERSION < 0x020408 - cvCircle(m_background, cvPoint(x, y), r, cv_color, filled); -#else - cv::circle(m_background, cv::Point(x, y), r, cv_color, filled); -#endif #endif } - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "OpenCV not initialized")); } } @@ -1227,7 +1246,8 @@ void vpDisplayOpenCV::displayCross(const vpImagePoint &ip, unsigned int size, co right.set_j(ip.get_j() + size / 2); displayLine(top, bottom, color, thickness); displayLine(left, right, color, thickness); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "OpenCV not initialized")); } } @@ -1253,7 +1273,8 @@ void vpDisplayOpenCV::displayDotLine(const vpImagePoint &ip1, const vpImagePoint if (ip2_.get_i() < ip1_.get_i()) { std::swap(ip1_, ip2_); } - } else if (ip2_.get_j() < ip1_.get_j()) { + } + else if (ip2_.get_j() < ip1_.get_j()) { std::swap(ip1_, ip2_); } @@ -1268,13 +1289,15 @@ void vpDisplayOpenCV::displayDotLine(const vpImagePoint &ip1, const vpImagePoint double j = ip1_.get_j(); displayLine(vpImagePoint(i, j), vpImagePoint(i + deltai, j), color, thickness); } - } else { + } + else { for (unsigned int j = (unsigned int)ip1_.get_j(); j < ip2_.get_j(); j += (unsigned int)(2 * deltaj)) { double i = slope * j + orig; displayLine(vpImagePoint(i, j), vpImagePoint(i + deltai, j + deltaj), color, thickness); } } - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "OpenCV not initialized")); } } @@ -1299,7 +1322,8 @@ void vpDisplayOpenCV::displayLine(const vpImagePoint &ip1, const vpImagePoint &i cv::Point(vpMath::round(ip2.get_u() / m_scale), vpMath::round(ip2.get_v() / m_scale)), col[color.id], (int)thickness); #endif - } else { + } + else { cvcolor = CV_RGB(color.R, color.G, color.B); #if VISP_HAVE_OPENCV_VERSION < 0x020408 cvLine(m_background, cvPoint(vpMath::round(ip1.get_u() / m_scale), vpMath::round(ip1.get_v() / m_scale)), @@ -1311,7 +1335,8 @@ void vpDisplayOpenCV::displayLine(const vpImagePoint &ip1, const vpImagePoint &i (int)thickness); #endif } - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "OpenCV not initialized")); } } @@ -1336,7 +1361,8 @@ void vpDisplayOpenCV::displayPoint(const vpImagePoint &ip, const vpColor &color, cv::Point(vpMath::round(ip.get_u() / m_scale + thickness - 1), vpMath::round(ip.get_v() / m_scale)), col[color.id], (int)thickness); #endif - } else { + } + else { cvcolor = CV_RGB(color.R, color.G, color.B); #if VISP_HAVE_OPENCV_VERSION < 0x020408 cvLine(m_background, cvPoint(vpMath::round(ip.get_u() / m_scale), vpMath::round(ip.get_v() / m_scale)), @@ -1349,7 +1375,8 @@ void vpDisplayOpenCV::displayPoint(const vpImagePoint &ip, const vpColor &color, #endif } } - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "OpenCV not initialized")); } } @@ -1380,7 +1407,8 @@ void vpDisplayOpenCV::displayRectangle(const vpImagePoint &topLeft, unsigned int cv::Scalar cv_color; if (color.id < vpColor::id_unknown) { cv_color = col[color.id]; - } else { + } + else { cv_color = CV_RGB(color.R, color.G, color.B); } @@ -1391,17 +1419,17 @@ void vpDisplayOpenCV::displayRectangle(const vpImagePoint &topLeft, unsigned int #else cv::rectangle(m_background, cv::Point(left, top), cv::Point(right, bottom), cv_color, cv_thickness); #endif - } else { + } + else { #if VISP_HAVE_OPENCV_VERSION >= 0x030000 int filled = cv::FILLED; #else int filled = CV_FILLED; #endif -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) double opacity = static_cast(color.A) / 255.0; #if VISP_HAVE_OPENCV_VERSION < 0x020408 overlay([left, top, right, bottom, cv_color, filled]( - cv::Mat image) { cvRectangle(image, cvPoint(left, top), cvPoint(right, bottom), cv_color, filled); }, + cv::Mat image) { cvRectangle(image, cvPoint(left, top), cvPoint(right, bottom), cv_color, filled); }, opacity); #else overlay( @@ -1409,16 +1437,10 @@ void vpDisplayOpenCV::displayRectangle(const vpImagePoint &topLeft, unsigned int cv::rectangle(image, cv::Point(left, top), cv::Point(right, bottom), cv_color, filled); }, opacity); -#endif -#else -#if VISP_HAVE_OPENCV_VERSION < 0x020408 - cvRectangle(m_background, cvPoint(left, top), cvPoint(right, bottom), cv_color, filled); -#else - cv::rectangle(m_background, cv::Point(left, top), cv::Point(right, bottom), cv_color, filled); -#endif #endif } - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "OpenCV not initialized")); } } @@ -1447,7 +1469,8 @@ void vpDisplayOpenCV::displayRectangle(const vpImagePoint &topLeft, const vpImag cv::Scalar cv_color; if (color.id < vpColor::id_unknown) { cv_color = col[color.id]; - } else { + } + else { cv_color = CV_RGB(color.R, color.G, color.B); } @@ -1458,17 +1481,17 @@ void vpDisplayOpenCV::displayRectangle(const vpImagePoint &topLeft, const vpImag #else cv::rectangle(m_background, cv::Point(left, top), cv::Point(right, bottom), cv_color, cv_thickness); #endif - } else { + } + else { #if VISP_HAVE_OPENCV_VERSION >= 0x030000 int filled = cv::FILLED; #else int filled = CV_FILLED; #endif -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) double opacity = static_cast(color.A) / 255.0; #if VISP_HAVE_OPENCV_VERSION < 0x020408 overlay([left, top, right, bottom, cv_color, filled]( - cv::Mat image) { cvRectangle(image, cvPoint(left, top), cvPoint(right, bottom), cv_color, filled); }, + cv::Mat image) { cvRectangle(image, cvPoint(left, top), cvPoint(right, bottom), cv_color, filled); }, opacity); #else overlay( @@ -1476,16 +1499,10 @@ void vpDisplayOpenCV::displayRectangle(const vpImagePoint &topLeft, const vpImag cv::rectangle(image, cv::Point(left, top), cv::Point(right, bottom), cv_color, filled); }, opacity); -#endif -#else -#if VISP_HAVE_OPENCV_VERSION < 0x020408 - cvRectangle(m_background, cvPoint(left, top), cvPoint(right, bottom), cv_color, filled); -#else - cv::rectangle(m_background, cv::Point(left, top), cv::Point(right, bottom), cv_color, filled); -#endif #endif } - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "OpenCV not initialized")); } } @@ -1513,7 +1530,8 @@ void vpDisplayOpenCV::displayRectangle(const vpRect &rectangle, const vpColor &c cv::Scalar cv_color; if (color.id < vpColor::id_unknown) { cv_color = col[color.id]; - } else { + } + else { cv_color = CV_RGB(color.R, color.G, color.B); } @@ -1524,17 +1542,17 @@ void vpDisplayOpenCV::displayRectangle(const vpRect &rectangle, const vpColor &c #else cv::rectangle(m_background, cv::Point(left, top), cv::Point(right, bottom), cv_color, cv_thickness); #endif - } else { + } + else { #if VISP_HAVE_OPENCV_VERSION >= 0x030000 int filled = cv::FILLED; #else int filled = CV_FILLED; #endif -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) double opacity = static_cast(color.A) / 255.0; #if VISP_HAVE_OPENCV_VERSION < 0x020408 overlay([left, top, right, bottom, cv_color, filled]( - cv::Mat image) { cvRectangle(image, cvPoint(left, top), cvPoint(right, bottom), cv_color, filled); }, + cv::Mat image) { cvRectangle(image, cvPoint(left, top), cvPoint(right, bottom), cv_color, filled); }, opacity); #else overlay( @@ -1542,16 +1560,10 @@ void vpDisplayOpenCV::displayRectangle(const vpRect &rectangle, const vpColor &c cv::rectangle(image, cv::Point(left, top), cv::Point(right, bottom), cv_color, filled); }, opacity); -#endif -#else -#if VISP_HAVE_OPENCV_VERSION < 0x020408 - cvRectangle(m_background, cvPoint(left, top), cvPoint(right, bottom), cv_color, filled); -#else - cv::rectangle(m_background, cv::Point(left, top), cv::Point(right, bottom), cv_color, filled); -#endif #endif } - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "OpenCV not initialized")); } } @@ -1601,7 +1613,8 @@ bool vpDisplayOpenCV::getClick(bool blocking) cv::waitKey(10); #endif } while (ret == false && blocking == true); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "OpenCV not initialized")); } return ret; @@ -1668,7 +1681,8 @@ bool vpDisplayOpenCV::getClick(vpImagePoint &ip, bool blocking) cv::waitKey(10); #endif } while (ret == false && blocking == true); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "OpenCV not initialized")); } return ret; @@ -1739,7 +1753,8 @@ bool vpDisplayOpenCV::getClick(vpImagePoint &ip, vpMouseButton::vpMouseButtonTyp cv::waitKey(10); #endif } while (ret == false && blocking == true); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "OpenCV not initialized")); } return ret; @@ -1812,7 +1827,8 @@ bool vpDisplayOpenCV::getClickUp(vpImagePoint &ip, vpMouseButton::vpMouseButtonT cv::waitKey(10); #endif } while (ret == false && blocking == true); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "OpenCV not initialized")); } return ret; @@ -1949,7 +1965,8 @@ bool vpDisplayOpenCV::getKeyboardEvent(bool blocking) if (key_pressed == -1) return false; return true; - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "OpenCV not initialized")); } // return false; // Never reached after throw() @@ -1996,7 +2013,8 @@ bool vpDisplayOpenCV::getKeyboardEvent(std::string &key, bool blocking) key = ss.str(); } return true; - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "OpenCV not initialized")); } // return false; // Never reached after throw() @@ -2056,7 +2074,8 @@ bool vpDisplayOpenCV::getPointerPosition(vpImagePoint &ip) ip.set_v(v); } return false; - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "OpenCV not initialized")); } } @@ -2103,7 +2122,7 @@ void vpDisplayOpenCV::getScreenSize(unsigned int &w, unsigned int &h) h = GetSystemMetrics(SM_CYSCREEN); #else throw(vpException(vpException::functionNotImplementedError, "The function vpDisplayOpenCV::getScreenSize() is not " - "implemented on winrt")); + "implemented on winrt")); #endif #endif } @@ -2128,7 +2147,6 @@ unsigned int vpDisplayOpenCV::getScreenHeight() return height; } -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) /*! * Initialize display overlay layer for transparency. * \param overlay_function : Overlay function @@ -2141,8 +2159,9 @@ void vpDisplayOpenCV::overlay(std::function overlay_function, d if (opacity < 1.0) { // Deep copy overlay = m_background.clone(); - } else { - // Shallow copy + } + else { + // Shallow copy overlay = m_background; } @@ -2153,10 +2172,9 @@ void vpDisplayOpenCV::overlay(std::function overlay_function, d cv::addWeighted(overlay, opacity, m_background, 1.0 - opacity, 0.0, m_background); } } -#endif #elif !defined(VISP_BUILD_SHARED_LIBS) // Work around to avoid warning: libvisp_core.a(vpDisplayOpenCV.cpp.o) has no // symbols -void dummy_vpDisplayOpenCV(){}; +void dummy_vpDisplayOpenCV() { }; #endif diff --git a/modules/gui/src/pointcloud/vpColorBlindFriendlyPalette.cpp b/modules/gui/src/pointcloud/vpColorBlindFriendlyPalette.cpp index 248582888a..c0874dfdf4 100755 --- a/modules/gui/src/pointcloud/vpColorBlindFriendlyPalette.cpp +++ b/modules/gui/src/pointcloud/vpColorBlindFriendlyPalette.cpp @@ -35,7 +35,6 @@ #include #include -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) std::vector vpColorBlindFriendlyPalette::s_paletteNames = { @@ -206,5 +205,3 @@ std::istream &operator>>(std::istream &is, vpColorBlindFriendlyPalette &color) color.set_fromString(nameColor); return is; } - -#endif diff --git a/modules/gui/src/pointcloud/vpPclViewer.cpp b/modules/gui/src/pointcloud/vpPclViewer.cpp index a8ed45b2cb..93e14b888d 100755 --- a/modules/gui/src/pointcloud/vpPclViewer.cpp +++ b/modules/gui/src/pointcloud/vpPclViewer.cpp @@ -35,7 +35,7 @@ #ifndef DOXYGEN_SHOULD_SKIP_THIS #include -#if defined(VISP_HAVE_PCL) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_PCL) // PCL #include diff --git a/modules/imgproc/include/visp3/imgproc/vpCircleHoughTransform.h b/modules/imgproc/include/visp3/imgproc/vpCircleHoughTransform.h index 953fbc920b..c711968932 100644 --- a/modules/imgproc/include/visp3/imgproc/vpCircleHoughTransform.h +++ b/modules/imgproc/include/visp3/imgproc/vpCircleHoughTransform.h @@ -46,8 +46,6 @@ #include #include -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - // 3rd parties inclue #ifdef VISP_HAVE_NLOHMANN_JSON #include @@ -439,7 +437,6 @@ class VISP_EXPORT vpCircleHoughTransform */ std::vector detect(const vpImage &I); -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) /** * \brief Perform Circle Hough Transform to detect the circles in in a gray-scale image. * Get only the \b nbCircles circles having the greatest number of votes. @@ -451,7 +448,6 @@ class VISP_EXPORT vpCircleHoughTransform * of votes detected in the image. */ std::vector detect(const vpImage &I, const int &nbCircles); -#endif //@} /** @name Configuration from files */ @@ -956,4 +952,3 @@ class VISP_EXPORT vpCircleHoughTransform std::vector m_finalCircleVotes; /*!< Number of votes for the final circles.*/ }; #endif -#endif diff --git a/modules/imgproc/src/vpCircleHoughTransform.cpp b/modules/imgproc/src/vpCircleHoughTransform.cpp index 20e4f40667..de36e4f398 100644 --- a/modules/imgproc/src/vpCircleHoughTransform.cpp +++ b/modules/imgproc/src/vpCircleHoughTransform.cpp @@ -34,7 +34,6 @@ #include -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpCircleHoughTransform::vpCircleHoughTransform() : m_algoParams() { @@ -98,7 +97,6 @@ vpCircleHoughTransform::saveConfigurationInJSON(const std::string &jsonPath) con { m_algoParams.saveConfigurationInJSON(jsonPath); } -#endif void vpCircleHoughTransform::initGaussianFilters() diff --git a/modules/imgproc/src/vpRetinex.cpp b/modules/imgproc/src/vpRetinex.cpp index 4ef77185dc..dea7f9a4da 100644 --- a/modules/imgproc/src/vpRetinex.cpp +++ b/modules/imgproc/src/vpRetinex.cpp @@ -215,11 +215,8 @@ void MSRCR(vpImage &I, int _scale, int scaleDiv, int level, double dynam std::vector diff(dest.size()); -#if VISP_CXX_STANDARD > VISP_CXX_STANDARD_98 std::transform(dest.begin(), dest.end(), diff.begin(), std::bind(std::minus(), std::placeholders::_1, mean)); -#else - std::transform(dest.begin(), dest.end(), diff.begin(), std::bind2nd(std::minus(), mean)); -#endif + double sq_sum = std::inner_product(diff.begin(), diff.end(), diff.begin(), 0.0); double stdev = std::sqrt(sq_sum / dest.size()); diff --git a/modules/io/include/visp3/io/vpImageQueue.h b/modules/io/include/visp3/io/vpImageQueue.h index 97ca1c4c91..f7ea820a87 100644 --- a/modules/io/include/visp3/io/vpImageQueue.h +++ b/modules/io/include/visp3/io/vpImageQueue.h @@ -36,8 +36,6 @@ #include -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - #include #include #include @@ -284,4 +282,3 @@ template class vpImageQueue }; #endif -#endif diff --git a/modules/io/include/visp3/io/vpImageStorageWorker.h b/modules/io/include/visp3/io/vpImageStorageWorker.h index 9fc16fab51..a69fe16750 100644 --- a/modules/io/include/visp3/io/vpImageStorageWorker.h +++ b/modules/io/include/visp3/io/vpImageStorageWorker.h @@ -36,8 +36,6 @@ #include -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - #include #include @@ -134,4 +132,3 @@ template class vpImageStorageWorker }; #endif -#endif diff --git a/modules/io/src/image/private/vpImageIoBackend.h b/modules/io/src/image/private/vpImageIoBackend.h index 10fde6b763..dc823322dc 100644 --- a/modules/io/src/image/private/vpImageIoBackend.h +++ b/modules/io/src/image/private/vpImageIoBackend.h @@ -90,10 +90,8 @@ void readSimdlib(vpImage &I, const std::string &filename); void readSimdlib(vpImage &I, const std::string &filename); // TinyEXR lib -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) void readEXRTiny(vpImage &I, const std::string &filename); void readEXRTiny(vpImage &I, const std::string &filename); -#endif void writeJPEGSimdlib(const vpImage &I, const std::string &filename, int quality); void writeJPEGSimdlib(const vpImage &I, const std::string &filename, int quality); @@ -112,9 +110,7 @@ void writePNGStb(const vpImage &I, const std::string &filename); void writePNGStb(const vpImage &I, const std::string &filename); // TinyEXR lib -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) void writeEXRTiny(const vpImage &I, const std::string &filename); void writeEXRTiny(const vpImage &I, const std::string &filename); -#endif #endif diff --git a/modules/io/src/image/private/vpImageIoTinyEXR.cpp b/modules/io/src/image/private/vpImageIoTinyEXR.cpp index 02766d6822..780c903ca5 100644 --- a/modules/io/src/image/private/vpImageIoTinyEXR.cpp +++ b/modules/io/src/image/private/vpImageIoTinyEXR.cpp @@ -38,8 +38,6 @@ #include "vpImageIoBackend.h" -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - #define TINYEXR_USE_MINIZ 0 #define TINYEXR_USE_STB_ZLIB 1 #include @@ -65,7 +63,7 @@ void readEXRTiny(vpImage &I, const std::string &filename) EXRHeader exr_header; InitEXRHeader(&exr_header); - const char* err = NULL; // or `nullptr` in C++11 or later. + const char *err = NULL; // or `nullptr` in C++11 or later. ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename.c_str(), &err); if (ret != 0) { std::string err_msg(err); @@ -97,7 +95,8 @@ void readEXRTiny(vpImage &I, const std::string &filename) if (exr_image.images) { I.resize(exr_image.height, exr_image.width); memcpy(I.bitmap, exr_image.images[0], exr_image.height*exr_image.width*sizeof(float)); - } else if (exr_image.tiles) { + } + else if (exr_image.tiles) { I.resize(exr_image.height, exr_image.width); size_t data_width = static_cast(exr_header.data_window.max_x - exr_header.data_window.min_x + 1); @@ -137,7 +136,7 @@ void readEXRTiny(vpImage &I, const std::string &filename) EXRHeader exr_header; InitEXRHeader(&exr_header); - const char* err = NULL; // or `nullptr` in C++11 or later. + const char *err = NULL; // or `nullptr` in C++11 or later. ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename.c_str(), &err); if (ret != 0) { std::string err_msg(err); @@ -175,7 +174,8 @@ void readEXRTiny(vpImage &I, const std::string &filename) I[i][j].B = reinterpret_cast(exr_image.images)[0][i * exr_image.width + j]; } } - } else if (exr_image.tiles) { + } + else if (exr_image.tiles) { I.resize(exr_image.height, exr_image.width); size_t data_width = static_cast(exr_header.data_window.max_x - exr_header.data_window.min_x + 1); @@ -219,7 +219,7 @@ void writeEXRTiny(const vpImage &I, const std::string &filename) image.num_channels = 1; - image.images = (unsigned char**)&I.bitmap; + image.images = (unsigned char **)&I.bitmap; image.width = I.getWidth(); image.height = I.getHeight(); @@ -236,7 +236,7 @@ void writeEXRTiny(const vpImage &I, const std::string &filename) header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // pixel type of output image to be stored in .EXR } - const char* err = NULL; // or nullptr in C++11 or later. + const char *err = NULL; // or nullptr in C++11 or later. int ret = SaveEXRImageToFile(&image, &header, filename.c_str(), &err); if (ret != TINYEXR_SUCCESS) { std::string err_msg(err); @@ -274,12 +274,12 @@ void writeEXRTiny(const vpImage &I, const std::string &filename) images[2][i] = I.bitmap[i].B; } - float* image_ptr[3]; + float *image_ptr[3]; image_ptr[0] = &(images[2].at(0)); // B image_ptr[1] = &(images[1].at(0)); // G image_ptr[2] = &(images[0].at(0)); // R - image.images = (unsigned char**)image_ptr; + image.images = (unsigned char **)image_ptr; image.width = I.getWidth(); image.height = I.getHeight(); @@ -298,7 +298,7 @@ void writeEXRTiny(const vpImage &I, const std::string &filename) header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // pixel type of output image to be stored in .EXR } - const char* err = NULL; // or nullptr in C++11 or later. + const char *err = NULL; // or nullptr in C++11 or later. int ret = SaveEXRImageToFile(&image, &header, filename.c_str(), &err); if (ret != TINYEXR_SUCCESS) { std::string err_msg(err); @@ -313,5 +313,3 @@ void writeEXRTiny(const vpImage &I, const std::string &filename) free(header.requested_pixel_types); free(header.pixel_types); } - -#endif diff --git a/modules/io/src/image/vpImageIo.cpp b/modules/io/src/image/vpImageIo.cpp index a74a61b529..9980c2f3d2 100644 --- a/modules/io/src/image/vpImageIo.cpp +++ b/modules/io/src/image/vpImageIo.cpp @@ -397,16 +397,18 @@ void vpImageIo::readJPEG(vpImage &I, const std::string &filename, if (backend == IO_SYSTEM_LIB_BACKEND) { #if !defined(VISP_HAVE_JPEG) std::string message = - "Libjpeg backend is not available to read file \"" + filename + "\": switch to stb_image backend"; + "Libjpeg backend is not available to read file \"" + filename + "\": switch to stb_image backend"; backend = IO_STB_IMAGE_BACKEND; #endif - } else if (backend == IO_OPENCV_BACKEND) { + } + else if (backend == IO_OPENCV_BACKEND) { #if !(defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGCODECS)) std::string message = - "OpenCV backend is not available to read file \"" + filename + "\": switch to stb_image backend"; + "OpenCV backend is not available to read file \"" + filename + "\": switch to stb_image backend"; backend = IO_STB_IMAGE_BACKEND; #endif - } else if (backend == IO_DEFAULT_BACKEND) { + } + else if (backend == IO_DEFAULT_BACKEND) { #if ((VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_IMGCODECS)) || ((VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI) && defined(HAVE_OPENCV_IMGPROC)) backend = IO_OPENCV_BACKEND; #elif defined(VISP_HAVE_JPEG) @@ -420,13 +422,16 @@ void vpImageIo::readJPEG(vpImage &I, const std::string &filename, #if defined(VISP_HAVE_JPEG) readJPEGLibjpeg(I, filename); #endif - } else if (backend == IO_OPENCV_BACKEND) { + } + else if (backend == IO_OPENCV_BACKEND) { #if ((VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_IMGCODECS)) || ((VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI) && defined(HAVE_OPENCV_IMGPROC)) readOpenCV(I, filename); #endif - } else if (backend == IO_STB_IMAGE_BACKEND) { + } + else if (backend == IO_STB_IMAGE_BACKEND) { readStb(I, filename); - } else if (backend == IO_SIMDLIB_BACKEND) { + } + else if (backend == IO_SIMDLIB_BACKEND) { readSimdlib(I, filename); } } @@ -446,16 +451,18 @@ void vpImageIo::readJPEG(vpImage &I, const std::string &filename, int ba if (backend == IO_SYSTEM_LIB_BACKEND) { #if !defined(VISP_HAVE_JPEG) std::string message = - "Libjpeg backend is not available to read file \"" + filename + "\": switch to stb_image backend"; + "Libjpeg backend is not available to read file \"" + filename + "\": switch to stb_image backend"; backend = IO_STB_IMAGE_BACKEND; #endif - } else if (backend == IO_OPENCV_BACKEND) { + } + else if (backend == IO_OPENCV_BACKEND) { #if !(defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGCODECS)) std::string message = - "OpenCV backend is not available to read file \"" + filename + "\": switch to stb_image backend"; + "OpenCV backend is not available to read file \"" + filename + "\": switch to stb_image backend"; backend = IO_STB_IMAGE_BACKEND; #endif - } else if (backend == IO_DEFAULT_BACKEND) { + } + else if (backend == IO_DEFAULT_BACKEND) { #if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGCODECS) backend = IO_OPENCV_BACKEND; #elif defined(VISP_HAVE_JPEG) @@ -469,13 +476,16 @@ void vpImageIo::readJPEG(vpImage &I, const std::string &filename, int ba #if defined(VISP_HAVE_JPEG) readJPEGLibjpeg(I, filename); #endif - } else if (backend == IO_OPENCV_BACKEND) { + } + else if (backend == IO_OPENCV_BACKEND) { #if ((VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_IMGCODECS)) || ((VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI) && defined(HAVE_OPENCV_IMGPROC)) readOpenCV(I, filename); #endif - } else if (backend == IO_STB_IMAGE_BACKEND) { + } + else if (backend == IO_STB_IMAGE_BACKEND) { readStb(I, filename); - } else if (backend == IO_SIMDLIB_BACKEND) { + } + else if (backend == IO_SIMDLIB_BACKEND) { readSimdlib(I, filename); } } @@ -495,16 +505,18 @@ void vpImageIo::readPNG(vpImage &I, const std::string &filename, if (backend == IO_SYSTEM_LIB_BACKEND) { #if !defined(VISP_HAVE_PNG) std::string message = - "Libpng backend is not available to read file \"" + filename + "\": switch to stb_image backend"; + "Libpng backend is not available to read file \"" + filename + "\": switch to stb_image backend"; backend = IO_STB_IMAGE_BACKEND; #endif - } else if (backend == IO_OPENCV_BACKEND) { + } + else if (backend == IO_OPENCV_BACKEND) { #if !(defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGCODECS)) std::string message = - "OpenCV backend is not available to read file \"" + filename + "\": switch to stb_image backend"; + "OpenCV backend is not available to read file \"" + filename + "\": switch to stb_image backend"; backend = IO_STB_IMAGE_BACKEND; #endif - } else if (backend == IO_DEFAULT_BACKEND) { + } + else if (backend == IO_DEFAULT_BACKEND) { #if defined(VISP_HAVE_PNG) backend = IO_SYSTEM_LIB_BACKEND; #elif defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGCODECS) @@ -518,13 +530,16 @@ void vpImageIo::readPNG(vpImage &I, const std::string &filename, #if defined(VISP_HAVE_PNG) readPNGLibpng(I, filename); #endif - } else if (backend == IO_OPENCV_BACKEND) { + } + else if (backend == IO_OPENCV_BACKEND) { #if ((VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_IMGCODECS)) || ((VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI) && defined(HAVE_OPENCV_IMGPROC)) readOpenCV(I, filename); #endif - } else if (backend == IO_STB_IMAGE_BACKEND) { + } + else if (backend == IO_STB_IMAGE_BACKEND) { readStb(I, filename); - } else if (backend == IO_SIMDLIB_BACKEND) { + } + else if (backend == IO_SIMDLIB_BACKEND) { readSimdlib(I, filename); } } @@ -544,16 +559,18 @@ void vpImageIo::readPNG(vpImage &I, const std::string &filename, int bac if (backend == IO_SYSTEM_LIB_BACKEND) { #if !defined(VISP_HAVE_PNG) std::string message = - "Libpng backend is not available to read file \"" + filename + "\": switch to stb_image backend"; + "Libpng backend is not available to read file \"" + filename + "\": switch to stb_image backend"; backend = IO_STB_IMAGE_BACKEND; #endif - } else if (backend == IO_OPENCV_BACKEND) { + } + else if (backend == IO_OPENCV_BACKEND) { #if !(defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGCODECS)) std::string message = - "OpenCV backend is not available to read file \"" + filename + "\": switch to stb_image backend"; + "OpenCV backend is not available to read file \"" + filename + "\": switch to stb_image backend"; backend = IO_STB_IMAGE_BACKEND; #endif - } else if (backend == IO_DEFAULT_BACKEND) { + } + else if (backend == IO_DEFAULT_BACKEND) { #if ((VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_IMGCODECS)) || ((VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI) && defined(HAVE_OPENCV_IMGPROC)) backend = IO_OPENCV_BACKEND; #else @@ -565,13 +582,16 @@ void vpImageIo::readPNG(vpImage &I, const std::string &filename, int bac #if defined(VISP_HAVE_PNG) readPNGLibpng(I, filename); #endif - } else if (backend == IO_OPENCV_BACKEND) { + } + else if (backend == IO_OPENCV_BACKEND) { #if ((VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_IMGCODECS)) || ((VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI) && defined(HAVE_OPENCV_IMGPROC)) readOpenCV(I, filename); #endif - } else if (backend == IO_STB_IMAGE_BACKEND) { + } + else if (backend == IO_STB_IMAGE_BACKEND) { readStb(I, filename); - } else if (backend == IO_SIMDLIB_BACKEND) { + } + else if (backend == IO_SIMDLIB_BACKEND) { readSimdlib(I, filename); } } @@ -588,12 +608,13 @@ void vpImageIo::readEXR(vpImage &I, const std::string &filename, int back { if (backend == IO_SYSTEM_LIB_BACKEND || backend == IO_SIMDLIB_BACKEND || backend == IO_STB_IMAGE_BACKEND) { std::string message = - "This backend cannot read file \"" + filename + "\": switch to the default TinyEXR backend"; + "This backend cannot read file \"" + filename + "\": switch to the default TinyEXR backend"; backend = IO_DEFAULT_BACKEND; - } else if (backend == IO_OPENCV_BACKEND) { + } + else if (backend == IO_OPENCV_BACKEND) { #if !(defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGCODECS)) std::string message = - "OpenCV backend is not available to read file \"" + filename + "\": switch to the default TinyEXR backend"; + "OpenCV backend is not available to read file \"" + filename + "\": switch to the default TinyEXR backend"; backend = IO_DEFAULT_BACKEND; #endif } @@ -602,15 +623,9 @@ void vpImageIo::readEXR(vpImage &I, const std::string &filename, int back #if ((VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_IMGCODECS)) || ((VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI) && defined(HAVE_OPENCV_IMGPROC)) readOpenCV(I, filename); #endif - } else if (backend == IO_DEFAULT_BACKEND) { -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) + } + else if (backend == IO_DEFAULT_BACKEND) { readEXRTiny(I, filename); -#else - (void)I; - std::string message = - "TinyEXR backend is not available to read file \"" + filename + "\": cxx standard should be greater or equal to cxx11"; - throw(vpImageException(vpImageException::ioError, message)); -#endif } } @@ -626,12 +641,13 @@ void vpImageIo::readEXR(vpImage &I, const std::string &filename, int bac { if (backend == IO_SYSTEM_LIB_BACKEND || backend == IO_SIMDLIB_BACKEND || backend == IO_STB_IMAGE_BACKEND) { std::string message = - "This backend cannot read file \"" + filename + "\": switch to the default TinyEXR backend"; + "This backend cannot read file \"" + filename + "\": switch to the default TinyEXR backend"; backend = IO_DEFAULT_BACKEND; - } else if (backend == IO_OPENCV_BACKEND) { + } + else if (backend == IO_OPENCV_BACKEND) { #if !(defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGCODECS)) std::string message = - "OpenCV backend is not available to read file \"" + filename + "\": switch to the default TinyEXR backend"; + "OpenCV backend is not available to read file \"" + filename + "\": switch to the default TinyEXR backend"; backend = IO_DEFAULT_BACKEND; #endif } @@ -640,15 +656,9 @@ void vpImageIo::readEXR(vpImage &I, const std::string &filename, int bac #if ((VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_IMGCODECS)) || ((VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI) && defined(HAVE_OPENCV_IMGPROC)) readOpenCV(I, filename); #endif - } else if (backend == IO_DEFAULT_BACKEND) { -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) + } + else if (backend == IO_DEFAULT_BACKEND) { readEXRTiny(I, filename); -#else - (void)I; - std::string message = - "TinyEXR backend is not available to read file \"" + filename + "\": cxx standard should be greater or equal to cxx11"; - throw(vpImageException(vpImageException::ioError, message)); -#endif } } @@ -670,12 +680,14 @@ void vpImageIo::writeJPEG(const vpImage &I, const std::string &fi std::string message = "Libjpeg backend is not available to save file \"" + filename + "\": switch to simd backend"; backend = IO_SIMDLIB_BACKEND; #endif - } else if (backend == IO_OPENCV_BACKEND) { + } + else if (backend == IO_OPENCV_BACKEND) { #if !(defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGCODECS)) std::string message = "OpenCV backend is not available to save file \"" + filename + "\": switch to simd backend"; backend = IO_SIMDLIB_BACKEND; #endif - } else if (backend == IO_DEFAULT_BACKEND) { + } + else if (backend == IO_DEFAULT_BACKEND) { #if defined(VISP_HAVE_JPEG) backend = IO_SYSTEM_LIB_BACKEND; #elif defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGCODECS) @@ -689,13 +701,16 @@ void vpImageIo::writeJPEG(const vpImage &I, const std::string &fi #if defined(VISP_HAVE_JPEG) writeJPEGLibjpeg(I, filename, quality); #endif - } else if (backend == IO_OPENCV_BACKEND) { + } + else if (backend == IO_OPENCV_BACKEND) { #if ((VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_IMGCODECS)) || ((VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI) && defined(HAVE_OPENCV_IMGPROC)) writeOpenCV(I, filename, quality); #endif - } else if (backend == IO_SIMDLIB_BACKEND) { + } + else if (backend == IO_SIMDLIB_BACKEND) { writeJPEGSimdlib(I, filename, quality); - } else if (backend == IO_STB_IMAGE_BACKEND) { + } + else if (backend == IO_STB_IMAGE_BACKEND) { writeJPEGStb(I, filename, quality); } } @@ -718,12 +733,14 @@ void vpImageIo::writeJPEG(const vpImage &I, const std::string &filename, std::string message = "Libjpeg backend is not available to save file \"" + filename + "\": switch to simd backend"; backend = IO_SIMDLIB_BACKEND; #endif - } else if (backend == IO_OPENCV_BACKEND) { + } + else if (backend == IO_OPENCV_BACKEND) { #if !(defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGCODECS)) std::string message = "OpenCV backend is not available to save file \"" + filename + "\": switch to simd backend"; backend = IO_SIMDLIB_BACKEND; #endif - } else if (backend == IO_DEFAULT_BACKEND) { + } + else if (backend == IO_DEFAULT_BACKEND) { #if defined(VISP_HAVE_JPEG) backend = IO_SYSTEM_LIB_BACKEND; #elif defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGCODECS) @@ -737,13 +754,16 @@ void vpImageIo::writeJPEG(const vpImage &I, const std::string &filename, #if defined(VISP_HAVE_JPEG) writeJPEGLibjpeg(I, filename, quality); #endif - } else if (backend == IO_OPENCV_BACKEND) { + } + else if (backend == IO_OPENCV_BACKEND) { #if ((VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_IMGCODECS)) || ((VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI) && defined(HAVE_OPENCV_IMGPROC)) writeOpenCV(I, filename, quality); #endif - } else if (backend == IO_SIMDLIB_BACKEND) { + } + else if (backend == IO_SIMDLIB_BACKEND) { writeJPEGSimdlib(I, filename, quality); - } else if (backend == IO_STB_IMAGE_BACKEND) { + } + else if (backend == IO_STB_IMAGE_BACKEND) { writeJPEGStb(I, filename, quality); } } @@ -765,12 +785,14 @@ void vpImageIo::writePNG(const vpImage &I, const std::string &fil std::string message = "Libpng backend is not available to save file \"" + filename + "\": switch to simd backend"; backend = IO_SIMDLIB_BACKEND; #endif - } else if (backend == IO_OPENCV_BACKEND) { + } + else if (backend == IO_OPENCV_BACKEND) { #if !(defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGCODECS)) std::string message = "OpenCV backend is not available to save file \"" + filename + "\": switch to simd backend"; backend = IO_SIMDLIB_BACKEND; #endif - } else if (backend == IO_DEFAULT_BACKEND) { + } + else if (backend == IO_DEFAULT_BACKEND) { #if ((VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_IMGCODECS)) || ((VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI) && defined(HAVE_OPENCV_IMGPROC)) backend = IO_OPENCV_BACKEND; #else @@ -782,11 +804,14 @@ void vpImageIo::writePNG(const vpImage &I, const std::string &fil #if ((VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_IMGCODECS)) || ((VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI) && defined(HAVE_OPENCV_IMGPROC)) writeOpenCV(I, filename, 90); #endif - } else if (backend == IO_SIMDLIB_BACKEND) { + } + else if (backend == IO_SIMDLIB_BACKEND) { writePNGSimdlib(I, filename); - } else if (backend == IO_STB_IMAGE_BACKEND) { + } + else if (backend == IO_STB_IMAGE_BACKEND) { writePNGStb(I, filename); - } else if (backend == IO_SYSTEM_LIB_BACKEND) { + } + else if (backend == IO_SYSTEM_LIB_BACKEND) { #if defined(VISP_HAVE_PNG) writePNGLibpng(I, filename); #endif @@ -810,12 +835,14 @@ void vpImageIo::writePNG(const vpImage &I, const std::string &filename, std::string message = "Libpng backend is not available to save file \"" + filename + "\": switch to simd backend"; backend = IO_SIMDLIB_BACKEND; #endif - } else if (backend == IO_OPENCV_BACKEND) { + } + else if (backend == IO_OPENCV_BACKEND) { #if !(defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGCODECS)) std::string message = "OpenCV backend is not available to save file \"" + filename + "\": switch to simd backend"; backend = IO_SIMDLIB_BACKEND; #endif - } else if (backend == IO_DEFAULT_BACKEND) { + } + else if (backend == IO_DEFAULT_BACKEND) { #if ((VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_IMGCODECS)) || ((VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI) && defined(HAVE_OPENCV_IMGPROC)) backend = IO_OPENCV_BACKEND; #else @@ -827,11 +854,14 @@ void vpImageIo::writePNG(const vpImage &I, const std::string &filename, #if ((VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_IMGCODECS)) || ((VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI) && defined(HAVE_OPENCV_IMGPROC)) writeOpenCV(I, filename, 90); #endif - } else if (backend == IO_SIMDLIB_BACKEND) { + } + else if (backend == IO_SIMDLIB_BACKEND) { writePNGSimdlib(I, filename); - } else if (backend == IO_STB_IMAGE_BACKEND) { + } + else if (backend == IO_STB_IMAGE_BACKEND) { writePNGStb(I, filename); - } else if (backend == IO_SYSTEM_LIB_BACKEND) { + } + else if (backend == IO_SYSTEM_LIB_BACKEND) { #if defined(VISP_HAVE_PNG) writePNGLibpng(I, filename); #endif @@ -850,13 +880,14 @@ void vpImageIo::writeEXR(const vpImage &I, const std::string &filename, i { if (backend == IO_SYSTEM_LIB_BACKEND || backend == IO_SIMDLIB_BACKEND || backend == IO_STB_IMAGE_BACKEND) { std::string message = - "This backend cannot save file \"" + filename + "\": switch to the default TinyEXR backend"; + "This backend cannot save file \"" + filename + "\": switch to the default TinyEXR backend"; backend = IO_DEFAULT_BACKEND; - } else if (backend == IO_OPENCV_BACKEND) { + } + else if (backend == IO_OPENCV_BACKEND) { #if !(defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGCODECS)) (void)I; std::string message = - "OpenCV backend is not available to save file \"" + filename + "\": switch to the default TinyEXR backend"; + "OpenCV backend is not available to save file \"" + filename + "\": switch to the default TinyEXR backend"; backend = IO_DEFAULT_BACKEND; #endif } @@ -865,14 +896,9 @@ void vpImageIo::writeEXR(const vpImage &I, const std::string &filename, i #if ((VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_IMGCODECS)) || ((VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI) && defined(HAVE_OPENCV_IMGPROC)) writeOpenCV(I, filename); #endif - } else if (backend == IO_DEFAULT_BACKEND) { -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) + } + else if (backend == IO_DEFAULT_BACKEND) { writeEXRTiny(I, filename); -#else - std::string message = - "TinyEXR backend is not available to save file \"" + filename + "\": cxx standard should be greater or equal to cxx11"; - throw(vpImageException(vpImageException::ioError, message)); -#endif } } @@ -888,13 +914,14 @@ void vpImageIo::writeEXR(const vpImage &I, const std::string &filename, { if (backend == IO_SYSTEM_LIB_BACKEND || backend == IO_SIMDLIB_BACKEND || backend == IO_STB_IMAGE_BACKEND) { std::string message = - "This backend cannot save file \"" + filename + "\": switch to the default TinyEXR backend"; + "This backend cannot save file \"" + filename + "\": switch to the default TinyEXR backend"; backend = IO_DEFAULT_BACKEND; - } else if (backend == IO_OPENCV_BACKEND) { + } + else if (backend == IO_OPENCV_BACKEND) { #if !(defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGCODECS)) (void)I; std::string message = - "OpenCV backend is not available to save file \"" + filename + "\": switch to the default TinyEXR backend"; + "OpenCV backend is not available to save file \"" + filename + "\": switch to the default TinyEXR backend"; backend = IO_DEFAULT_BACKEND; #endif } @@ -903,14 +930,9 @@ void vpImageIo::writeEXR(const vpImage &I, const std::string &filename, #if ((VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_IMGCODECS)) || ((VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI) && defined(HAVE_OPENCV_IMGPROC)) writeOpenCV(I, filename); #endif - } else if (backend == IO_DEFAULT_BACKEND) { -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) + } + else if (backend == IO_DEFAULT_BACKEND) { writeEXRTiny(I, filename); -#else - std::string message = - "TinyEXR backend is not available to save file \"" + filename + "\": cxx standard should be greater or equal to cxx11"; - throw(vpImageException(vpImageException::ioError, message)); -#endif } } diff --git a/modules/robot/include/visp3/robot/vpRobotUniversalRobots.h b/modules/robot/include/visp3/robot/vpRobotUniversalRobots.h index 30651d1c23..cab6ec6248 100644 --- a/modules/robot/include/visp3/robot/vpRobotUniversalRobots.h +++ b/modules/robot/include/visp3/robot/vpRobotUniversalRobots.h @@ -38,7 +38,7 @@ #include -#if defined(VISP_HAVE_UR_RTDE) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_UR_RTDE) #include diff --git a/modules/robot/src/real-robot/universal-robots/vpRobotUniversalRobots.cpp b/modules/robot/src/real-robot/universal-robots/vpRobotUniversalRobots.cpp index 73329e7ac6..950c6e5b0c 100644 --- a/modules/robot/src/real-robot/universal-robots/vpRobotUniversalRobots.cpp +++ b/modules/robot/src/real-robot/universal-robots/vpRobotUniversalRobots.cpp @@ -35,7 +35,7 @@ #include -#if defined(VISP_HAVE_UR_RTDE) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_UR_RTDE) #include #include @@ -473,8 +473,8 @@ void vpRobotUniversalRobots::setPosition(const vpRobot::vpControlFrameType frame if (vpRobot::STATE_POSITION_CONTROL != getRobotState()) { std::cout << "Robot is not in position-based control. " - "Modification of the robot state" - << std::endl; + "Modification of the robot state" + << std::endl; setRobotState(vpRobot::STATE_POSITION_CONTROL); } @@ -482,12 +482,14 @@ void vpRobotUniversalRobots::setPosition(const vpRobot::vpControlFrameType frame double speed_factor = m_positioningVelocity / 100.; std::vector new_q = position.toStdVector(); m_rtde_control->moveJ(new_q, m_max_joint_speed * speed_factor); - } else if (frame == vpRobot::END_EFFECTOR_FRAME) { + } + else if (frame == vpRobot::END_EFFECTOR_FRAME) { double speed_factor = m_positioningVelocity / 100.; std::vector new_pose = position.toStdVector(); // Move synchronously to ensure a the blocking behaviour m_rtde_control->moveL(new_pose, m_max_linear_speed * speed_factor); - } else if (frame == vpRobot::CAMERA_FRAME) { + } + else if (frame == vpRobot::CAMERA_FRAME) { double speed_factor = m_positioningVelocity / 100.; vpTranslationVector f_t_c(position.extract(0, 3)); @@ -498,7 +500,8 @@ void vpRobotUniversalRobots::setPosition(const vpRobot::vpControlFrameType frame std::vector new_pose = fPe.toStdVector(); // Move synchronously to ensure a the blocking behaviour m_rtde_control->moveL(new_pose, m_max_linear_speed * speed_factor); - } else { + } + else { throw(vpException(vpRobotException::functionNotImplementedError, "Cannot move the robot to a cartesian position. Only joint positioning is implemented")); } @@ -563,7 +566,7 @@ void vpRobotUniversalRobots::setPosition(const vpRobot::vpControlFrameType frame int main() { -#if defined(VISP_HAVE_UR_RTDE) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_UR_RTDE) vpRobotUniversalRobots robot; vpColVector qd(6); @@ -632,21 +635,25 @@ void vpRobotUniversalRobots::setVelocity(const vpRobot::vpControlFrameType frame double dt = 1.0 / 1000; // 2ms double acceleration = 0.5; m_rtde_control->speedJ(vel_sat.toStdVector(), acceleration, dt); - } else if (frame == vpRobot::REFERENCE_FRAME) { + } + else if (frame == vpRobot::REFERENCE_FRAME) { double dt = 1.0 / 1000; // 2ms double acceleration = 0.25; m_rtde_control->speedL(vel_sat.toStdVector(), acceleration, dt); - } else if (frame == vpRobot::END_EFFECTOR_FRAME) { + } + else if (frame == vpRobot::END_EFFECTOR_FRAME) { double dt = 1.0 / 1000; // 2ms double acceleration = 0.25; vpVelocityTwistMatrix fVe(get_fMe(), false); m_rtde_control->speedL((fVe * vel_sat).toStdVector(), acceleration, dt); - } else if (frame == vpRobot::CAMERA_FRAME) { + } + else if (frame == vpRobot::CAMERA_FRAME) { double dt = 1.0 / 1000; // 2ms double acceleration = 0.25; vpColVector w_v_e = vpVelocityTwistMatrix(get_fMe(), false) * vpVelocityTwistMatrix(m_eMc) * vel_sat; m_rtde_control->speedL(w_v_e.toStdVector(), acceleration, dt); - } else { + } + else { throw(vpException(vpRobotException::functionNotImplementedError, "Cannot move the robot in velocity in the specified frame: not implemented")); } @@ -847,8 +854,9 @@ vpRobot::vpRobotStateType vpRobotUniversalRobots::setRobotState(vpRobot::vpRobot throw(vpException(vpException::fatalError, "Cannot stop UR robot: robot is not connected")); } m_rtde_control->speedStop(); - } else { - // std::cout << "Change the control mode from stop to position control" << std::endl; + } + else { + // std::cout << "Change the control mode from stop to position control" << std::endl; } break; } @@ -868,5 +876,5 @@ vpRobot::vpRobotStateType vpRobotUniversalRobots::setRobotState(vpRobot::vpRobot #elif !defined(VISP_BUILD_SHARED_LIBS) // Work around to avoid warning: libvisp_robot.a(vpRobotUniversalRobots.cpp.o) has no // symbols -void dummy_vpRobotUniversalRobots(){}; +void dummy_vpRobotUniversalRobots() { }; #endif diff --git a/modules/robot/test/servo-afma6/testRobotAfma6Pose.cpp b/modules/robot/test/servo-afma6/testRobotAfma6Pose.cpp index dd67da4f26..5334db4234 100644 --- a/modules/robot/test/servo-afma6/testRobotAfma6Pose.cpp +++ b/modules/robot/test/servo-afma6/testRobotAfma6Pose.cpp @@ -149,7 +149,7 @@ int main() } // From now, in target[i], we have the 3D coordinates of a point in the - // object frame, and their correspondances in the camera frame. We can now + // object frame, and their correspondences in the camera frame. We can now // compute the pose cMo between the camera and the object. vpPose pose; // Add the 4 points to compute the pose diff --git a/modules/robot/test/servo-universal-robots/testUniversalRobotsCartPosition.cpp b/modules/robot/test/servo-universal-robots/testUniversalRobotsCartPosition.cpp index e97d42ecdb..032adcbcd3 100644 --- a/modules/robot/test/servo-universal-robots/testUniversalRobotsCartPosition.cpp +++ b/modules/robot/test/servo-universal-robots/testUniversalRobotsCartPosition.cpp @@ -43,7 +43,7 @@ #include -#if defined(VISP_HAVE_UR_RTDE) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_UR_RTDE) #include @@ -54,9 +54,10 @@ int main(int argc, char **argv) for (int i = 1; i < argc; i++) { if (std::string(argv[i]) == "--ip" && i + 1 < argc) { robot_ip = std::string(argv[i + 1]); - } else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { + } + else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { std::cout << argv[0] << " [--ip " << robot_ip << "] [--help] [-h]" - << "\n"; + << "\n"; return EXIT_SUCCESS; } } @@ -67,8 +68,8 @@ int main(int argc, char **argv) std::cout << "Connected robot model: " << robot.getRobotModel() << std::endl; std::cout << "WARNING: This example will move the robot! " - << "Please make sure to have the user stop button at hand!" << std::endl - << "Press Enter to continue..." << std::endl; + << "Please make sure to have the user stop button at hand!" << std::endl + << "Press Enter to continue..." << std::endl; std::cin.ignore(); /* @@ -106,10 +107,12 @@ int main(int argc, char **argv) // Move to cartesian position robot.setPosition(vpRobot::CAMERA_FRAME, vpPoseVector(fMc)); - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "ViSP exception: " << e.what() << std::endl; return EXIT_FAILURE; - } catch (const std::exception &e) { + } + catch (const std::exception &e) { std::cout << "ur_rtde exception: " << e.what() << std::endl; return EXIT_FAILURE; } @@ -121,12 +124,7 @@ int main(int argc, char **argv) #else int main() { -#if !defined(VISP_HAVE_UR_RTDE) std::cout << "ViSP is not build with libur_rtde 3rd party used to control a robot from Universal Robots..." - << std::endl; -#endif -#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "Build ViSP with c++11 or higher compiler flag (cmake -DUSE_CXX_STANDARD=11)." << std::endl; -#endif + << std::endl; } #endif diff --git a/modules/robot/test/servo-universal-robots/testUniversalRobotsCartVelocity.cpp b/modules/robot/test/servo-universal-robots/testUniversalRobotsCartVelocity.cpp index 7d47529d54..f1ca50e1b9 100644 --- a/modules/robot/test/servo-universal-robots/testUniversalRobotsCartVelocity.cpp +++ b/modules/robot/test/servo-universal-robots/testUniversalRobotsCartVelocity.cpp @@ -43,7 +43,7 @@ #include -#if defined(VISP_HAVE_UR_RTDE) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_UR_RTDE) #include @@ -54,9 +54,10 @@ int main(int argc, char **argv) for (int i = 1; i < argc; i++) { if (std::string(argv[i]) == "--ip" && i + 1 < argc) { robot_ip = std::string(argv[i + 1]); - } else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { + } + else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { std::cout << argv[0] << " [--ip " << robot_ip << "] [--help] [-h]" - << "\n"; + << "\n"; return EXIT_SUCCESS; } } @@ -66,8 +67,8 @@ int main(int argc, char **argv) robot.connect(robot_ip); std::cout << "WARNING: This example will move the robot! " - << "Please make sure to have the user stop button at hand!" << std::endl - << "Press Enter to continue..." << std::endl; + << "Please make sure to have the user stop button at hand!" << std::endl + << "Press Enter to continue..." << std::endl; std::cin.ignore(); /* @@ -120,10 +121,12 @@ int main(int argc, char **argv) std::cout << "Ask to stop the robot " << std::endl; robot.setRobotState(vpRobot::STATE_STOP); - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "ViSP exception: " << e.what() << std::endl; return EXIT_FAILURE; - } catch (const std::exception &e) { + } + catch (const std::exception &e) { std::cout << "ur_rtde exception: " << e.what() << std::endl; return EXIT_FAILURE; } @@ -135,13 +138,8 @@ int main(int argc, char **argv) #else int main() { -#if !defined(VISP_HAVE_UR_RTDE) std::cout << "ViSP is not build with libur_rtde 3rd party used to control a robot from Universal Robots..." - << std::endl; -#endif -#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "Build ViSP with c++11 or higher compiler flag (cmake -DUSE_CXX_STANDARD=11)." << std::endl; -#endif + << std::endl; } #endif diff --git a/modules/robot/test/servo-universal-robots/testUniversalRobotsGetData.cpp b/modules/robot/test/servo-universal-robots/testUniversalRobotsGetData.cpp index deeb52ec00..b4efff7e50 100644 --- a/modules/robot/test/servo-universal-robots/testUniversalRobotsGetData.cpp +++ b/modules/robot/test/servo-universal-robots/testUniversalRobotsGetData.cpp @@ -43,7 +43,7 @@ #include -#if defined(VISP_HAVE_UR_RTDE) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_UR_RTDE) #include @@ -54,9 +54,10 @@ int main(int argc, char **argv) for (int i = 1; i < argc; i++) { if (std::string(argv[i]) == "--ip" && i + 1 < argc) { robot_ip = std::string(argv[i + 1]); - } else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { + } + else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { std::cout << argv[0] << " [--ip " << robot_ip << "] [--help] [-h]" - << "\n"; + << "\n"; return EXIT_SUCCESS; } } @@ -74,10 +75,12 @@ int main(int argc, char **argv) std::cout << "Robot model : " << db_client->getRobotModel() << std::endl; std::cout << "PolyScope version: " << db_client->polyscopeVersion() << std::endl; robot.disconnect(); - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "ViSP exception: " << e.what() << std::endl; return EXIT_FAILURE; - } catch (const std::exception &e) { + } + catch (const std::exception &e) { std::cout << "ur_rtde exception: " << e.what() << std::endl; return EXIT_FAILURE; } @@ -147,13 +150,16 @@ int main(int argc, char **argv) return EXIT_FAILURE; } } - } else { + } + else { std::cout << "To proceed with this test you need to power on the robot" << std::endl; } - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "ViSP exception: " << e.what() << std::endl; return EXIT_FAILURE; - } catch (const std::exception &e) { + } + catch (const std::exception &e) { std::cout << "ur_rtde exception: " << e.what() << std::endl; return EXIT_FAILURE; } @@ -177,10 +183,12 @@ int main(int argc, char **argv) std::cout << "Camera or tool frame force/torque: " << cFc.t() << std::endl; vpTime::wait(10); } - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "ViSP exception: " << e.what() << std::endl; return EXIT_FAILURE; - } catch (const std::exception &e) { + } + catch (const std::exception &e) { std::cout << "ur_rtde exception: " << e.what() << std::endl; return EXIT_FAILURE; } @@ -192,12 +200,8 @@ int main(int argc, char **argv) #else int main() { -#if !defined(VISP_HAVE_UR_RTDE) std::cout << "ViSP is not build with libur_rtde 3rd party used to control a robot from Universal Robots..." - << std::endl; -#endif -#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "Build ViSP with c++11 or higher compiler flag (cmake -DUSE_CXX_STANDARD=11)." << std::endl; -#endif + << std::endl; + } #endif diff --git a/modules/robot/test/servo-universal-robots/testUniversalRobotsJointPosition.cpp b/modules/robot/test/servo-universal-robots/testUniversalRobotsJointPosition.cpp index 9b601f5cac..62512b5990 100644 --- a/modules/robot/test/servo-universal-robots/testUniversalRobotsJointPosition.cpp +++ b/modules/robot/test/servo-universal-robots/testUniversalRobotsJointPosition.cpp @@ -43,7 +43,7 @@ #include -#if defined(VISP_HAVE_UR_RTDE) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_UR_RTDE) #include @@ -54,9 +54,10 @@ int main(int argc, char **argv) for (int i = 1; i < argc; i++) { if (std::string(argv[i]) == "--ip" && i + 1 < argc) { robot_ip = std::string(argv[i + 1]); - } else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { + } + else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { std::cout << argv[0] << " [--ip " << robot_ip << "] [--help] [-h]" - << "\n"; + << "\n"; return EXIT_SUCCESS; } } @@ -66,8 +67,8 @@ int main(int argc, char **argv) robot.connect(robot_ip); std::cout << "WARNING: This example will move the robot! " - << "Please make sure to have the user stop button at hand!" << std::endl - << "Press Enter to continue..." << std::endl; + << "Please make sure to have the user stop button at hand!" << std::endl + << "Press Enter to continue..." << std::endl; std::cin.ignore(); // Get current position @@ -98,10 +99,12 @@ int main(int argc, char **argv) // Move back to initial position std::cout << "Move to joint position [rad]: " << q_init.t() << std::endl; robot.setPosition(vpRobot::JOINT_STATE, q_init); - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "ViSP exception: " << e.what() << std::endl; return EXIT_FAILURE; - } catch (const std::exception &e) { + } + catch (const std::exception &e) { std::cout << "ur_rtde exception: " << e.what() << std::endl; return EXIT_FAILURE; } @@ -113,12 +116,7 @@ int main(int argc, char **argv) #else int main() { -#if !defined(VISP_HAVE_UR_RTDE) std::cout << "ViSP is not build with libur_rtde 3rd party used to control a robot from Universal Robots..." - << std::endl; -#endif -#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "Build ViSP with c++11 or higher compiler flag (cmake -DUSE_CXX_STANDARD=11)." << std::endl; -#endif + << std::endl; } #endif diff --git a/modules/robot/test/servo-universal-robots/testUniversalRobotsJointVelocity.cpp b/modules/robot/test/servo-universal-robots/testUniversalRobotsJointVelocity.cpp index 8a13c81419..059ecf1a1a 100644 --- a/modules/robot/test/servo-universal-robots/testUniversalRobotsJointVelocity.cpp +++ b/modules/robot/test/servo-universal-robots/testUniversalRobotsJointVelocity.cpp @@ -43,7 +43,7 @@ #include -#if defined(VISP_HAVE_UR_RTDE) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_UR_RTDE) #include @@ -54,9 +54,10 @@ int main(int argc, char **argv) for (int i = 1; i < argc; i++) { if (std::string(argv[i]) == "--ip" && i + 1 < argc) { robot_ip = std::string(argv[i + 1]); - } else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { + } + else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { std::cout << argv[0] << " [--ip " << robot_ip << "] [--help] [-h]" - << "\n"; + << "\n"; return EXIT_SUCCESS; } } @@ -66,8 +67,8 @@ int main(int argc, char **argv) robot.connect(robot_ip); std::cout << "WARNING: This example will move the robot! " - << "Please make sure to have the user stop button at hand!" << std::endl - << "Press Enter to continue..." << std::endl; + << "Please make sure to have the user stop button at hand!" << std::endl + << "Press Enter to continue..." << std::endl; std::cin.ignore(); /* @@ -114,10 +115,12 @@ int main(int argc, char **argv) std::cout << "Ask to stop the robot " << std::endl; robot.setRobotState(vpRobot::STATE_STOP); - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "ViSP exception: " << e.what() << std::endl; return EXIT_FAILURE; - } catch (const std::exception &e) { + } + catch (const std::exception &e) { std::cout << "ur_rtde exception: " << e.what() << std::endl; return EXIT_FAILURE; } @@ -129,12 +132,7 @@ int main(int argc, char **argv) #else int main() { -#if !defined(VISP_HAVE_UR_RTDE) std::cout << "ViSP is not build with libur_rtde 3rd party used to control a robot from Universal Robots..." - << std::endl; -#endif -#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "Build ViSP with c++11 or higher compiler flag (cmake -DUSE_CXX_STANDARD=11)." << std::endl; -#endif + << std::endl; } #endif diff --git a/modules/robot/test/servo-viper/testRobotViper850Pose.cpp b/modules/robot/test/servo-viper/testRobotViper850Pose.cpp index ad6847b7f5..c53da9058c 100644 --- a/modules/robot/test/servo-viper/testRobotViper850Pose.cpp +++ b/modules/robot/test/servo-viper/testRobotViper850Pose.cpp @@ -151,7 +151,7 @@ int main() } // From now, in target[i], we have the 3D coordinates of a point in the - // object frame, and their correspondances in the camera frame. We can now + // object frame, and their correspondences in the camera frame. We can now // compute the pose cMo between the camera and the object. vpPose pose; // Add the 4 points to compute the pose diff --git a/modules/sensor/include/visp3/sensor/vpLaserScan.h b/modules/sensor/include/visp3/sensor/vpLaserScan.h index e454eda119..2c659552f2 100644 --- a/modules/sensor/include/visp3/sensor/vpLaserScan.h +++ b/modules/sensor/include/visp3/sensor/vpLaserScan.h @@ -65,13 +65,12 @@ class VISP_EXPORT vpLaserScan */ vpLaserScan() : listScanPoints(), startTimestamp(0), endTimestamp(0), measurementId(0), numSteps(0), startAngle(0), stopAngle(0), - numPoints(0) - { - } + numPoints(0) + { } /*! Copy constructor. */ vpLaserScan(const vpLaserScan &scan) : listScanPoints(scan.listScanPoints), startTimestamp(0), endTimestamp(0), measurementId(0), numSteps(0), - startAngle(0), stopAngle(0), numPoints(0) + startAngle(0), stopAngle(0), numPoints(0) { startTimestamp = scan.startTimestamp; endTimestamp = scan.endTimestamp; @@ -82,7 +81,7 @@ class VISP_EXPORT vpLaserScan numPoints = scan.numPoints; } /*! Default destructor that does nothing. */ - virtual ~vpLaserScan(){}; + virtual ~vpLaserScan() { }; /*! Add the scan point at the end of the list. */ inline void addPoint(const vpScanPoint &p) { listScanPoints.push_back(p); } /*! Drop the list of points. */ @@ -90,9 +89,7 @@ class VISP_EXPORT vpLaserScan /*! Get the list of points. */ inline std::vector getScanPoints() { return listScanPoints; } -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpLaserScan &operator=(const vpLaserScan &scan) = default; -#endif /*! Specifies the id of former measurements and increases with every measurement. */ diff --git a/modules/sensor/include/visp3/sensor/vpOccipitalStructure.h b/modules/sensor/include/visp3/sensor/vpOccipitalStructure.h index f0d102746c..4f06577a7a 100644 --- a/modules/sensor/include/visp3/sensor/vpOccipitalStructure.h +++ b/modules/sensor/include/visp3/sensor/vpOccipitalStructure.h @@ -41,7 +41,7 @@ #include -#if defined(VISP_HAVE_OCCIPITAL_STRUCTURE) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_OCCIPITAL_STRUCTURE) #include #include diff --git a/modules/sensor/include/visp3/sensor/vpRealSense.h b/modules/sensor/include/visp3/sensor/vpRealSense.h index 50887f98ab..d64c5ed770 100644 --- a/modules/sensor/include/visp3/sensor/vpRealSense.h +++ b/modules/sensor/include/visp3/sensor/vpRealSense.h @@ -46,7 +46,7 @@ #include #include -#if defined(VISP_HAVE_REALSENSE) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_REALSENSE) #include @@ -413,7 +413,8 @@ class VISP_EXPORT vpRealSense friend VISP_EXPORT std::ostream &operator<<(std::ostream &os, const vpRealSense &rs); - struct vpRsStreamParams { + struct vpRsStreamParams + { int m_streamWidth; int m_streamHeight; rs::format m_streamFormat; @@ -421,15 +422,13 @@ class VISP_EXPORT vpRealSense vpRsStreamParams() : m_streamWidth(640), m_streamHeight(480), m_streamFormat(rs::format::rgba8), m_streamFramerate(30) - { - } + { } vpRsStreamParams(const int streamWidth, const int streamHeight, const rs::format &streamFormat, const int streamFramerate) : m_streamWidth(streamWidth), m_streamHeight(streamHeight), m_streamFormat(streamFormat), - m_streamFramerate(streamFramerate) - { - } + m_streamFramerate(streamFramerate) + { } }; void setEnableStream(const rs::stream &stream, bool status); diff --git a/modules/sensor/include/visp3/sensor/vpRealSense2.h b/modules/sensor/include/visp3/sensor/vpRealSense2.h index 3d7aa491d6..40f5c9c0f5 100644 --- a/modules/sensor/include/visp3/sensor/vpRealSense2.h +++ b/modules/sensor/include/visp3/sensor/vpRealSense2.h @@ -38,7 +38,7 @@ #include -#if defined(VISP_HAVE_REALSENSE2) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_REALSENSE2) #include #include diff --git a/modules/sensor/include/visp3/sensor/vpScanPoint.h b/modules/sensor/include/visp3/sensor/vpScanPoint.h index 9222320201..f2633e72ef 100644 --- a/modules/sensor/include/visp3/sensor/vpScanPoint.h +++ b/modules/sensor/include/visp3/sensor/vpScanPoint.h @@ -73,7 +73,7 @@ class /* VISP_EXPORT */ vpScanPoint // Note that here VISP_EXPORT should not { public: /*! Default constructor. */ - inline vpScanPoint() : rDist(0), hAngle(0), vAngle(0) {} + inline vpScanPoint() : rDist(0), hAngle(0), vAngle(0) { } /*! Copy constructor. */ inline vpScanPoint(const vpScanPoint &scanpoint) : rDist(0), hAngle(0), vAngle(0) { @@ -94,7 +94,7 @@ class /* VISP_EXPORT */ vpScanPoint // Note that here VISP_EXPORT should not this->vAngle = v_angle; } /*! Destructor that does nothing. */ - inline virtual ~vpScanPoint(){}; + inline virtual ~vpScanPoint() { }; /*! Set the polar point coordinates. \param r_dist : Radial distance in meter. @@ -143,9 +143,7 @@ class /* VISP_EXPORT */ vpScanPoint // Note that here VISP_EXPORT should not */ inline double getZ() const { return (rDist * cos(this->hAngle) * sin(this->vAngle)); } -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpScanPoint &operator=(const vpScanPoint &) = default; -#endif friend inline std::ostream &operator<<(std::ostream &s, const vpScanPoint &p); diff --git a/modules/sensor/src/framegrabber/1394/vp1394CMUGrabber.cpp b/modules/sensor/src/framegrabber/1394/vp1394CMUGrabber.cpp index 61f96720ea..f6aec9b132 100644 --- a/modules/sensor/src/framegrabber/1394/vp1394CMUGrabber.cpp +++ b/modules/sensor/src/framegrabber/1394/vp1394CMUGrabber.cpp @@ -517,7 +517,7 @@ void vp1394CMUGrabber::displayCameraModel() \param format : Camera video format. \param mode : Camera video mode. - See the following table for the correspondances between the input + See the following table for the correspondences between the input format and mode and the resulting video color coding. @@ -617,7 +617,7 @@ void vp1394CMUGrabber::setVideoMode(unsigned long format, unsigned long mode) Set camera framerate rate. This method has to be called before open(). \param fps : Value between 0 to 7 used to select a specific camera - framerate. See the following table for the correspondances between the input + framerate. See the following table for the correspondences between the input value and the framerate.
@@ -682,7 +682,7 @@ void vp1394CMUGrabber::setFramerate(unsigned long fps) Get the video framerate. \return Value between 0 to 7 corresponding to a specific camera framerate. - See the following table for the correspondances between the returned + See the following table for the correspondences between the returned value and the framerate.
diff --git a/modules/sensor/src/rgb-depth/occipital_structure/vpOccipitalStructure.cpp b/modules/sensor/src/rgb-depth/occipital_structure/vpOccipitalStructure.cpp index 340ef5bcf5..b5e8ac3fc8 100644 --- a/modules/sensor/src/rgb-depth/occipital_structure/vpOccipitalStructure.cpp +++ b/modules/sensor/src/rgb-depth/occipital_structure/vpOccipitalStructure.cpp @@ -35,7 +35,7 @@ #include -#if defined(VISP_HAVE_OCCIPITAL_STRUCTURE) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_OCCIPITAL_STRUCTURE) #include #include #include @@ -55,7 +55,7 @@ /*! * Default constructor. */ -vpOccipitalStructure::vpOccipitalStructure() : m_invalidDepthValue(0.0f), m_maxZ(15000.0f) {} +vpOccipitalStructure::vpOccipitalStructure() : m_invalidDepthValue(0.0f), m_maxZ(15000.0f) { } /*! * Default destructor that stops the streaming. @@ -571,7 +571,7 @@ void vpOccipitalStructure::getIMUData(vpColVector *imu_vel, vpColVector *imu_acc (*imu_vel)[1] = imu_rotationRate.y; (*imu_vel)[2] = imu_rotationRate.z; imu_vel_timestamp = - m_delegate.m_gyroscopeEvent.arrivalTimestamp(); // Relative to an unspecified epoch. (see documentation). + m_delegate.m_gyroscopeEvent.arrivalTimestamp(); // Relative to an unspecified epoch. (see documentation). } if (imu_acc != NULL) { @@ -610,9 +610,9 @@ bool vpOccipitalStructure::open(const ST::CaptureSessionSettings &settings) // recieves the first frame or a timeout. std::unique_lock u(m_delegate.m_sampleLock); std::cv_status var = - m_delegate.cv_sampleLock.wait_for(u, std::chrono::seconds(20)); // Make sure a device is connected. + m_delegate.cv_sampleLock.wait_for(u, std::chrono::seconds(20)); // Make sure a device is connected. - // In case the device is not connected, open() should return false. +// In case the device is not connected, open() should return false. if (var == std::cv_status::timeout) { m_init = false; return m_init; @@ -926,7 +926,7 @@ vpCameraParameters vpOccipitalStructure::getCameraParameters(const vpOccipitalSt if (proj_type == vpCameraParameters::perspectiveProjWithoutDistortion) m_visible_camera_parameters = - vpCameraParameters(cam_intrinsics.fx, cam_intrinsics.fy, cam_intrinsics.cx, cam_intrinsics.cy); + vpCameraParameters(cam_intrinsics.fx, cam_intrinsics.fy, cam_intrinsics.cx, cam_intrinsics.cy); return m_visible_camera_parameters; break; @@ -934,7 +934,7 @@ vpCameraParameters vpOccipitalStructure::getCameraParameters(const vpOccipitalSt case vpOccipitalStructureStream::depth: cam_intrinsics = m_delegate.m_depthFrame.intrinsics(); m_depth_camera_parameters = - vpCameraParameters(cam_intrinsics.fx, cam_intrinsics.fy, cam_intrinsics.cx, cam_intrinsics.cy); + vpCameraParameters(cam_intrinsics.fx, cam_intrinsics.fy, cam_intrinsics.cx, cam_intrinsics.cy); return m_depth_camera_parameters; break; @@ -1135,7 +1135,8 @@ void vpOccipitalStructure::getColoredPointcloud(pcl::PointCloudpoints[(size_t)depth_pixel_index].g = (uint8_t)0; pointcloud->points[(size_t)depth_pixel_index].b = (uint8_t)0; #endif - } else { + } + else { unsigned int i_ = (unsigned int)color_pixel.x; unsigned int j_ = (unsigned int)color_pixel.y; @@ -1143,44 +1144,48 @@ void vpOccipitalStructure::getColoredPointcloud(pcl::PointCloud(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel]) | - static_cast(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 1]) << 8 | - static_cast(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 2]) - << 16); - } else { + (static_cast(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel]) | + static_cast(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 1]) << 8 | + static_cast(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 2]) + << 16); + } + else { rgb = - (static_cast(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel]) << 16 | - static_cast(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 1]) << 8 | - static_cast(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 2])); + (static_cast(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel]) << 16 | + static_cast(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 1]) << 8 | + static_cast(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 2])); } pointcloud->points[(size_t)(i * depth_width + j)].rgb = *reinterpret_cast(&rgb); #else if (swap_rb) { pointcloud->points[(size_t)depth_pixel_index].b = - p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel]; + p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel]; pointcloud->points[(size_t)depth_pixel_index].g = - p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 1]; + p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 1]; pointcloud->points[(size_t)depth_pixel_index].r = - p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 2]; - } else { + p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 2]; + } + else { pointcloud->points[(size_t)depth_pixel_index].r = - p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel]; + p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel]; pointcloud->points[(size_t)depth_pixel_index].g = - p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 1]; + p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 1]; pointcloud->points[(size_t)depth_pixel_index].b = - p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 2]; + p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 2]; } #endif } - } else { + } + else { #if PCL_VERSION_COMPARE(<, 1, 1, 0) uint32_t rgb = 0; if (swap_rb) { rgb = (static_cast(p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel]) | static_cast(p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel + 1]) << 8 | static_cast(p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel + 2]) << 16); - } else { + } + else { rgb = (static_cast(p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel]) << 16 | static_cast(p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel + 1]) << 8 | static_cast(p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel + 2])); @@ -1190,18 +1195,19 @@ void vpOccipitalStructure::getColoredPointcloud(pcl::PointCloudpoints[(size_t)depth_pixel_index].b = - p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel]; + p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel]; pointcloud->points[(size_t)depth_pixel_index].g = - p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel + 1]; + p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel + 1]; pointcloud->points[(size_t)depth_pixel_index].r = - p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel + 2]; - } else { + p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel + 2]; + } + else { pointcloud->points[(size_t)depth_pixel_index].r = - p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel]; + p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel]; pointcloud->points[(size_t)depth_pixel_index].g = - p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel + 1]; + p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel + 1]; pointcloud->points[(size_t)depth_pixel_index].b = - p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel + 2]; + p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel + 2]; } #endif } diff --git a/modules/sensor/src/rgb-depth/realsense/vpRealSense.cpp b/modules/sensor/src/rgb-depth/realsense/vpRealSense.cpp index f3dee5d119..b1b084f5af 100644 --- a/modules/sensor/src/rgb-depth/realsense/vpRealSense.cpp +++ b/modules/sensor/src/rgb-depth/realsense/vpRealSense.cpp @@ -39,7 +39,7 @@ #include #include -#if defined(VISP_HAVE_REALSENSE) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_REALSENSE) #include "vpRealSense_impl.h" @@ -48,7 +48,7 @@ */ vpRealSense::vpRealSense() : m_context(), m_device(NULL), m_num_devices(0), m_serial_no(), m_intrinsics(), m_max_Z(8), m_enableStreams(), - m_useStreamPresets(), m_streamPresets(), m_streamParams(), m_invalidDepthValue(0.0f) + m_useStreamPresets(), m_streamPresets(), m_streamParams(), m_invalidDepthValue(0.0f) { initStream(); } @@ -137,8 +137,8 @@ void vpRealSense::open() // Enable only infrared2 stream if supported m_enableStreams[rs::stream::infrared2] = m_enableStreams[rs::stream::infrared2] - ? m_device->supports(rs::capabilities::infrared2) - : m_enableStreams[rs::stream::infrared2]; + ? m_device->supports(rs::capabilities::infrared2) + : m_enableStreams[rs::stream::infrared2]; if (m_device->is_streaming()) { m_device->stop(); @@ -155,7 +155,8 @@ void vpRealSense::open() if (m_enableStreams[rs::stream::color]) { if (m_useStreamPresets[rs::stream::color]) { m_device->enable_stream(rs::stream::color, m_streamPresets[rs::stream::color]); - } else { + } + else { m_device->enable_stream(rs::stream::color, m_streamParams[rs::stream::color].m_streamWidth, m_streamParams[rs::stream::color].m_streamHeight, m_streamParams[rs::stream::color].m_streamFormat, @@ -166,7 +167,8 @@ void vpRealSense::open() if (m_enableStreams[rs::stream::depth]) { if (m_useStreamPresets[rs::stream::depth]) { m_device->enable_stream(rs::stream::depth, m_streamPresets[rs::stream::depth]); - } else { + } + else { m_device->enable_stream(rs::stream::depth, m_streamParams[rs::stream::depth].m_streamWidth, m_streamParams[rs::stream::depth].m_streamHeight, m_streamParams[rs::stream::depth].m_streamFormat, @@ -177,7 +179,8 @@ void vpRealSense::open() if (m_enableStreams[rs::stream::infrared]) { if (m_useStreamPresets[rs::stream::infrared]) { m_device->enable_stream(rs::stream::infrared, m_streamPresets[rs::stream::infrared]); - } else { + } + else { m_device->enable_stream(rs::stream::infrared, m_streamParams[rs::stream::infrared].m_streamWidth, m_streamParams[rs::stream::infrared].m_streamHeight, m_streamParams[rs::stream::infrared].m_streamFormat, @@ -188,7 +191,8 @@ void vpRealSense::open() if (m_enableStreams[rs::stream::infrared2]) { if (m_useStreamPresets[rs::stream::infrared2]) { m_device->enable_stream(rs::stream::infrared2, m_streamPresets[rs::stream::infrared2]); - } else { + } + else { m_device->enable_stream(rs::stream::infrared2, m_streamParams[rs::stream::infrared2].m_streamWidth, m_streamParams[rs::stream::infrared2].m_streamHeight, m_streamParams[rs::stream::infrared2].m_streamFormat, @@ -213,19 +217,19 @@ void vpRealSense::open() if (m_enableStreams[rs::stream::depth]) { m_intrinsics[rs::stream::color_aligned_to_depth] = - m_device->get_stream_intrinsics(rs::stream::color_aligned_to_depth); + m_device->get_stream_intrinsics(rs::stream::color_aligned_to_depth); m_intrinsics[rs::stream::depth_aligned_to_color] = - m_device->get_stream_intrinsics(rs::stream::depth_aligned_to_color); + m_device->get_stream_intrinsics(rs::stream::depth_aligned_to_color); m_intrinsics[rs::stream::depth_aligned_to_rectified_color] = - m_device->get_stream_intrinsics(rs::stream::depth_aligned_to_rectified_color); + m_device->get_stream_intrinsics(rs::stream::depth_aligned_to_rectified_color); } } if (m_enableStreams[rs::stream::depth] && m_enableStreams[rs::stream::infrared2]) { m_intrinsics[rs::stream::depth_aligned_to_infrared2] = - m_device->get_stream_intrinsics(rs::stream::depth_aligned_to_infrared2); + m_device->get_stream_intrinsics(rs::stream::depth_aligned_to_infrared2); m_intrinsics[rs::stream::infrared2_aligned_to_depth] = - m_device->get_stream_intrinsics(rs::stream::infrared2_aligned_to_depth); + m_device->get_stream_intrinsics(rs::stream::infrared2_aligned_to_depth); } // Start device @@ -294,7 +298,8 @@ vpCameraParameters vpRealSense::getCameraParameters(const rs::stream &stream, if (type == vpCameraParameters::perspectiveProjWithDistortion) { double kdu = intrinsics.coeffs[0]; cam.initPersProjWithDistortion(px, py, u0, v0, -kdu, kdu); - } else { + } + else { cam.initPersProjWithoutDistortion(px, py, u0, v0); } return cam; @@ -1032,7 +1037,7 @@ std::ostream &operator<<(std::ostream &os, const vpRealSense &rs) auto intrin = rs.getHandler()->get_stream_intrinsics(stream); std::cout << "Capturing " << stream << " at " << intrin.width << " x " << intrin.height; std::cout << std::setprecision(1) << std::fixed << ", fov = " << intrin.hfov() << " x " << intrin.vfov() - << ", distortion = " << intrin.model() << std::endl; + << ", distortion = " << intrin.model() << std::endl; } std::cout.precision(ss); @@ -1042,5 +1047,5 @@ std::ostream &operator<<(std::ostream &os, const vpRealSense &rs) #elif !defined(VISP_BUILD_SHARED_LIBS) // Work around to avoid warning: libvisp_sensor.a(vpRealSense.cpp.o) has no // symbols -void dummy_vpRealSense(){}; +void dummy_vpRealSense() { }; #endif diff --git a/modules/sensor/src/rgb-depth/realsense/vpRealSense2.cpp b/modules/sensor/src/rgb-depth/realsense/vpRealSense2.cpp index 053906f150..4ed0468df4 100644 --- a/modules/sensor/src/rgb-depth/realsense/vpRealSense2.cpp +++ b/modules/sensor/src/rgb-depth/realsense/vpRealSense2.cpp @@ -35,7 +35,7 @@ #include -#if defined(VISP_HAVE_REALSENSE2) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_REALSENSE2) #include #include #include @@ -70,9 +70,8 @@ bool operator==(const rs2_extrinsics &lhs, const rs2_extrinsics &rhs) */ vpRealSense2::vpRealSense2() : m_depthScale(0.0f), m_invalidDepthValue(0.0f), m_max_Z(8.0f), m_pipe(), m_pipelineProfile(), m_pointcloud(), - m_points(), m_pos(), m_quat(), m_rot(), m_product_line(), m_init(false) -{ -} + m_points(), m_pos(), m_quat(), m_rot(), m_product_line(), m_init(false) +{ } /*! * Default destructor that stops the streaming. @@ -734,14 +733,17 @@ void vpRealSense2::getColorFrame(const rs2::frame &frame, vpImage &color if (frame.get_profile().format() == RS2_FORMAT_RGB8) { vpImageConvert::RGBToRGBa(const_cast(static_cast(frame.get_data())), reinterpret_cast(color.bitmap), width, height); - } else if (frame.get_profile().format() == RS2_FORMAT_RGBA8) { + } + else if (frame.get_profile().format() == RS2_FORMAT_RGBA8) { memcpy(reinterpret_cast(color.bitmap), const_cast(static_cast(frame.get_data())), width * height * sizeof(vpRGBa)); - } else if (frame.get_profile().format() == RS2_FORMAT_BGR8) { + } + else if (frame.get_profile().format() == RS2_FORMAT_BGR8) { vpImageConvert::BGRToRGBa(const_cast(static_cast(frame.get_data())), reinterpret_cast(color.bitmap), width, height); - } else { + } + else { throw vpException(vpException::fatalError, "RealSense Camera - color stream not supported!"); } } @@ -785,13 +787,16 @@ void vpRealSense2::getGreyFrame(const rs2::frame &frame, vpImage if (frame.get_profile().format() == RS2_FORMAT_RGB8) { vpImageConvert::RGBToGrey(const_cast(static_cast(frame.get_data())), grey.bitmap, width, height); - } else if (frame.get_profile().format() == RS2_FORMAT_RGBA8) { + } + else if (frame.get_profile().format() == RS2_FORMAT_RGBA8) { vpImageConvert::RGBaToGrey(const_cast(static_cast(frame.get_data())), grey.bitmap, width * height); - } else if (frame.get_profile().format() == RS2_FORMAT_BGR8) { + } + else if (frame.get_profile().format() == RS2_FORMAT_BGR8) { vpImageConvert::BGRToGrey(const_cast(static_cast(frame.get_data())), grey.bitmap, width, height); - } else { + } + else { throw vpException(vpException::fatalError, "RealSense Camera - color stream not supported!"); } } @@ -862,7 +867,7 @@ void vpRealSense2::getPointcloud(const rs2::depth_frame &depth_frame, std::vecto auto pixels_distance = m_depthScale * p_depth_frame[depth_pixel_index]; float points[3]; - const float pixel[] = {(float)j, (float)i}; + const float pixel[] = { (float)j, (float)i }; rs2_deproject_pixel_to_point(points, &depth_intrinsics, pixel, pixels_distance); if (pixels_distance > m_max_Z) @@ -915,7 +920,7 @@ void vpRealSense2::getPointcloud(const rs2::depth_frame &depth_frame, pcl::Point auto pixels_distance = m_depthScale * p_depth_frame[depth_pixel_index]; float points[3]; - const float pixel[] = {(float)j, (float)i}; + const float pixel[] = { (float)j, (float)i }; rs2_deproject_pixel_to_point(points, &depth_intrinsics, pixel, pixels_distance); if (pixels_distance > m_max_Z) @@ -935,7 +940,8 @@ void vpRealSense2::getPointcloud(const rs2::depth_frame &depth_frame, pcl::Point pointcloud->points[i].x = m_invalidDepthValue; pointcloud->points[i].y = m_invalidDepthValue; pointcloud->points[i].z = m_invalidDepthValue; - } else { + } + else { pointcloud->points[i].x = vertices[i].x; pointcloud->points[i].y = vertices[i].y; pointcloud->points[i].z = vertices[i].z; @@ -964,8 +970,8 @@ void vpRealSense2::getPointcloud(const rs2::depth_frame &depth_frame, const rs2: const uint16_t *p_depth_frame = reinterpret_cast(depth_frame.get_data()); const rs2_extrinsics depth2ColorExtrinsics = - depth_frame.get_profile().as().get_extrinsics_to( - color_frame.get_profile().as()); + depth_frame.get_profile().as().get_extrinsics_to( + color_frame.get_profile().as()); const rs2_intrinsics depth_intrinsics = depth_frame.get_profile().as().get_intrinsics(); const rs2_intrinsics color_intrinsics = color_frame.get_profile().as().get_intrinsics(); @@ -980,10 +986,10 @@ void vpRealSense2::getPointcloud(const rs2::depth_frame &depth_frame, const rs2: identity.rotation[i * 3 + i] = 1; } const bool registered_streams = - (depth2ColorExtrinsics == identity) && (color_width == depth_width) && (color_height == depth_height); + (depth2ColorExtrinsics == identity) && (color_width == depth_width) && (color_height == depth_height); - // Multi-threading if OpenMP - // Concurrent writes at different locations are safe +// Multi-threading if OpenMP +// Concurrent writes at different locations are safe #pragma omp parallel for schedule(dynamic) for (int i = 0; i < depth_height; i++) { auto depth_pixel_index = i * depth_width; @@ -1014,7 +1020,7 @@ void vpRealSense2::getPointcloud(const rs2::depth_frame &depth_frame, const rs2: auto pixels_distance = m_depthScale * p_depth_frame[depth_pixel_index]; float depth_point[3]; - const float pixel[] = {(float)j, (float)i}; + const float pixel[] = { (float)j, (float)i }; rs2_deproject_pixel_to_point(depth_point, &depth_intrinsics, pixel, pixels_distance); if (pixels_distance > m_max_Z) { @@ -1046,7 +1052,8 @@ void vpRealSense2::getPointcloud(const rs2::depth_frame &depth_frame, const rs2: pointcloud->points[(size_t)depth_pixel_index].g = (uint8_t)157; pointcloud->points[(size_t)depth_pixel_index].b = (uint8_t)198; #endif - } else { + } + else { unsigned int i_ = (unsigned int)color_pixel[1]; unsigned int j_ = (unsigned int)color_pixel[0]; @@ -1054,44 +1061,48 @@ void vpRealSense2::getPointcloud(const rs2::depth_frame &depth_frame, const rs2: uint32_t rgb = 0; if (swap_rb) { rgb = - (static_cast(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel]) | - static_cast(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 1]) << 8 | - static_cast(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 2]) - << 16); - } else { + (static_cast(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel]) | + static_cast(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 1]) << 8 | + static_cast(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 2]) + << 16); + } + else { rgb = - (static_cast(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel]) << 16 | - static_cast(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 1]) << 8 | - static_cast(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 2])); + (static_cast(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel]) << 16 | + static_cast(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 1]) << 8 | + static_cast(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 2])); } pointcloud->points[(size_t)(i * depth_width + j)].rgb = *reinterpret_cast(&rgb); #else if (swap_rb) { pointcloud->points[(size_t)depth_pixel_index].b = - p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel]; + p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel]; pointcloud->points[(size_t)depth_pixel_index].g = - p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 1]; + p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 1]; pointcloud->points[(size_t)depth_pixel_index].r = - p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 2]; - } else { + p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 2]; + } + else { pointcloud->points[(size_t)depth_pixel_index].r = - p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel]; + p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel]; pointcloud->points[(size_t)depth_pixel_index].g = - p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 1]; + p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 1]; pointcloud->points[(size_t)depth_pixel_index].b = - p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 2]; + p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 2]; } #endif } - } else { + } + else { #if PCL_VERSION_COMPARE(<, 1, 1, 0) uint32_t rgb = 0; if (swap_rb) { rgb = (static_cast(p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel]) | static_cast(p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel + 1]) << 8 | static_cast(p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel + 2]) << 16); - } else { + } + else { rgb = (static_cast(p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel]) << 16 | static_cast(p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel + 1]) << 8 | static_cast(p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel + 2])); @@ -1101,18 +1112,19 @@ void vpRealSense2::getPointcloud(const rs2::depth_frame &depth_frame, const rs2: #else if (swap_rb) { pointcloud->points[(size_t)depth_pixel_index].b = - p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel]; + p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel]; pointcloud->points[(size_t)depth_pixel_index].g = - p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel + 1]; + p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel + 1]; pointcloud->points[(size_t)depth_pixel_index].r = - p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel + 2]; - } else { + p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel + 2]; + } + else { pointcloud->points[(size_t)depth_pixel_index].r = - p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel]; + p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel]; pointcloud->points[(size_t)depth_pixel_index].g = - p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel + 1]; + p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel + 1]; pointcloud->points[(size_t)depth_pixel_index].b = - p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel + 2]; + p_color_frame[(i * (unsigned int)color_width + j) * nb_color_pixel + 2]; } #endif } @@ -1456,20 +1468,20 @@ void print(const rs2_intrinsics &intrinsics, std::ostream &os) { std::stringstream ss; ss << std::left << std::setw(14) << "Width: " - << "\t" << intrinsics.width << "\n" - << std::left << std::setw(14) << "Height: " - << "\t" << intrinsics.height << "\n" - << std::left << std::setw(14) << "PPX: " - << "\t" << std::setprecision(15) << intrinsics.ppx << "\n" - << std::left << std::setw(14) << "PPY: " - << "\t" << std::setprecision(15) << intrinsics.ppy << "\n" - << std::left << std::setw(14) << "Fx: " - << "\t" << std::setprecision(15) << intrinsics.fx << "\n" - << std::left << std::setw(14) << "Fy: " - << "\t" << std::setprecision(15) << intrinsics.fy << "\n" - << std::left << std::setw(14) << "Distortion: " - << "\t" << rs2_distortion_to_string(intrinsics.model) << "\n" - << std::left << std::setw(14) << "Coeffs: "; + << "\t" << intrinsics.width << "\n" + << std::left << std::setw(14) << "Height: " + << "\t" << intrinsics.height << "\n" + << std::left << std::setw(14) << "PPX: " + << "\t" << std::setprecision(15) << intrinsics.ppx << "\n" + << std::left << std::setw(14) << "PPY: " + << "\t" << std::setprecision(15) << intrinsics.ppy << "\n" + << std::left << std::setw(14) << "Fx: " + << "\t" << std::setprecision(15) << intrinsics.fx << "\n" + << std::left << std::setw(14) << "Fy: " + << "\t" << std::setprecision(15) << intrinsics.fy << "\n" + << std::left << std::setw(14) << "Distortion: " + << "\t" << rs2_distortion_to_string(intrinsics.model) << "\n" + << std::left << std::setw(14) << "Coeffs: "; for (size_t i = 0; i < sizeof(intrinsics.coeffs) / sizeof(intrinsics.coeffs[0]); ++i) ss << "\t" << std::setprecision(15) << intrinsics.coeffs[i] << " "; @@ -1481,15 +1493,16 @@ void safe_get_intrinsics(const rs2::video_stream_profile &profile, rs2_intrinsic { try { intrinsics = profile.get_intrinsics(); - } catch (...) { + } + catch (...) { } } bool operator==(const rs2_intrinsics &lhs, const rs2_intrinsics &rhs) { return lhs.width == rhs.width && lhs.height == rhs.height && lhs.ppx == rhs.ppx && lhs.ppy == rhs.ppy && - lhs.fx == rhs.fx && lhs.fy == rhs.fy && lhs.model == rhs.model && - !std::memcmp(lhs.coeffs, rhs.coeffs, sizeof(rhs.coeffs)); + lhs.fx == rhs.fx && lhs.fy == rhs.fy && lhs.model == rhs.model && + !std::memcmp(lhs.coeffs, rhs.coeffs, sizeof(rhs.coeffs)); } std::string get_str_formats(const std::set &formats) @@ -1501,7 +1514,8 @@ std::string get_str_formats(const std::set &formats) return ss.str(); } -struct stream_and_resolution { +struct stream_and_resolution +{ rs2_stream stream; int stream_index; int width; @@ -1515,7 +1529,8 @@ struct stream_and_resolution { } }; -struct stream_and_index { +struct stream_and_index +{ rs2_stream stream; int stream_index; @@ -1550,16 +1565,16 @@ std::ostream &operator<<(std::ostream &os, const vpRealSense2 &rs) { rs2::device dev = rs.m_pipelineProfile.get_device(); os << std::left << std::setw(30) << dev.get_info(RS2_CAMERA_INFO_NAME) << std::setw(20) - << dev.get_info(RS2_CAMERA_INFO_SERIAL_NUMBER) << std::setw(20) << dev.get_info(RS2_CAMERA_INFO_FIRMWARE_VERSION) - << std::endl; + << dev.get_info(RS2_CAMERA_INFO_SERIAL_NUMBER) << std::setw(20) << dev.get_info(RS2_CAMERA_INFO_FIRMWARE_VERSION) + << std::endl; - // Show which options are supported by this device + // Show which options are supported by this device os << " Device info: \n"; for (auto j = 0; j < RS2_CAMERA_INFO_COUNT; ++j) { auto param = static_cast(j); if (dev.supports(param)) os << " " << std::left << std::setw(30) << rs2_camera_info_to_string(rs2_camera_info(param)) << ": \t" - << dev.get_info(param) << "\n"; + << dev.get_info(param) << "\n"; } os << "\n"; @@ -1568,13 +1583,13 @@ std::ostream &operator<<(std::ostream &os, const vpRealSense2 &rs) os << "Options for " << sensor.get_info(RS2_CAMERA_INFO_NAME) << std::endl; os << std::setw(55) << " Supported options:" << std::setw(10) << "min" << std::setw(10) << " max" << std::setw(6) - << " step" << std::setw(10) << " default" << std::endl; + << " step" << std::setw(10) << " default" << std::endl; for (auto j = 0; j < RS2_OPTION_COUNT; ++j) { auto opt = static_cast(j); if (sensor.supports(opt)) { auto range = sensor.get_option_range(opt); os << " " << std::left << std::setw(50) << opt << " : " << std::setw(5) << range.min << "... " - << std::setw(12) << range.max << std::setw(6) << range.step << std::setw(10) << range.def << "\n"; + << std::setw(12) << range.max << std::setw(6) << range.step << std::setw(10) << range.def << "\n"; } } @@ -1585,14 +1600,15 @@ std::ostream &operator<<(std::ostream &os, const vpRealSense2 &rs) os << "Stream Profiles supported by " << sensor.get_info(RS2_CAMERA_INFO_NAME) << "\n"; os << std::setw(55) << " Supported modes:" << std::setw(10) << "stream" << std::setw(10) << " resolution" - << std::setw(6) << " fps" << std::setw(10) << " format" - << "\n"; - // Show which streams are supported by this device + << std::setw(6) << " fps" << std::setw(10) << " format" + << "\n"; + // Show which streams are supported by this device for (auto &&profile : sensor.get_stream_profiles()) { if (auto video = profile.as()) { os << " " << profile.stream_name() << "\t " << video.width() << "x" << video.height() << "\t@ " - << profile.fps() << "Hz\t" << profile.format() << "\n"; - } else { + << profile.fps() << "Hz\t" << profile.format() << "\n"; + } + else { os << " " << profile.stream_name() << "\t@ " << profile.fps() << "Hz\t" << profile.format() << "\n"; } } @@ -1606,20 +1622,21 @@ std::ostream &operator<<(std::ostream &os, const vpRealSense2 &rs) // Intrinsics for (auto &&profile : sensor.get_stream_profiles()) { if (auto video = profile.as()) { - if (streams.find(stream_and_index{profile.stream_type(), profile.stream_index()}) == streams.end()) { - streams[stream_and_index{profile.stream_type(), profile.stream_index()}] = profile; + if (streams.find(stream_and_index { profile.stream_type(), profile.stream_index() }) == streams.end()) { + streams[stream_and_index { profile.stream_type(), profile.stream_index() }] = profile; } - rs2_intrinsics intrinsics{}; - stream_and_resolution stream_res{profile.stream_type(), profile.stream_index(), video.width(), video.height(), - profile.stream_name()}; + rs2_intrinsics intrinsics {}; + stream_and_resolution stream_res { profile.stream_type(), profile.stream_index(), video.width(), video.height(), + profile.stream_name() }; safe_get_intrinsics(video, intrinsics); auto it = std::find_if( (intrinsics_map[stream_res]).begin(), (intrinsics_map[stream_res]).end(), [&](const std::pair, rs2_intrinsics> &kvp) { return intrinsics == kvp.second; }); if (it == (intrinsics_map[stream_res]).end()) { - (intrinsics_map[stream_res]).push_back({{profile.format()}, intrinsics}); - } else { + (intrinsics_map[stream_res]).push_back({ {profile.format()}, intrinsics }); + } + else { it->first.insert(profile.format()); // If the intrinsics are equals, // add the profile format to // format set @@ -1634,10 +1651,11 @@ std::ostream &operator<<(std::ostream &os, const vpRealSense2 &rs) for (auto &intrinsics : kvp.second) { auto formats = get_str_formats(intrinsics.first); os << "Intrinsic of \"" << stream_res.stream_name << "\"\t " << stream_res.width << "x" << stream_res.height - << "\t " << formats << "\n"; - if (intrinsics.second == rs2_intrinsics{}) { + << "\t " << formats << "\n"; + if (intrinsics.second == rs2_intrinsics {}) { os << "Intrinsic NOT available!\n\n"; - } else { + } + else { print(intrinsics.second, os); } } @@ -1648,8 +1666,8 @@ std::ostream &operator<<(std::ostream &os, const vpRealSense2 &rs) for (auto kvp1 = streams.begin(); kvp1 != streams.end(); ++kvp1) { for (auto kvp2 = streams.begin(); kvp2 != streams.end(); ++kvp2) { os << "Extrinsic from \"" << kvp1->second.stream_name() << "\"\t " - << "To" - << "\t \"" << kvp2->second.stream_name() << "\"\n"; + << "To" + << "\t \"" << kvp2->second.stream_name() << "\"\n"; auto extrinsics = kvp1->second.get_extrinsics_to(kvp2->second); print(extrinsics, os); } @@ -1661,5 +1679,5 @@ std::ostream &operator<<(std::ostream &os, const vpRealSense2 &rs) #elif !defined(VISP_BUILD_SHARED_LIBS) // Work around to avoid warning: libvisp_sensor.a(vpRealSense2.cpp.o) has no // symbols -void dummy_vpRealSense2(){}; +void dummy_vpRealSense2() { }; #endif diff --git a/modules/sensor/test/mocap/testMocapQualisys.cpp b/modules/sensor/test/mocap/testMocapQualisys.cpp index a674985472..ec1a79255b 100644 --- a/modules/sensor/test/mocap/testMocapQualisys.cpp +++ b/modules/sensor/test/mocap/testMocapQualisys.cpp @@ -41,7 +41,7 @@ #include -#if defined(VISP_HAVE_QUALISYS) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_QUALISYS) #include #include @@ -66,39 +66,39 @@ void quitHandler(int sig) void usage(const char *argv[], int error) { std::cout << "SYNOPSIS" << std::endl - << " " << argv[0] << " [--server-address
] [-sa]" - << " [--only-body] [-ob]" - << " [--all-bodies]" - << " [--verbose] [-v]" - << " [--help] [-h]" << std::endl - << std::endl; + << " " << argv[0] << " [--server-address
] [-sa]" + << " [--only-body] [-ob]" + << " [--all-bodies]" + << " [--verbose] [-v]" + << " [--help] [-h]" << std::endl + << std::endl; std::cout << "DESCRIPTION" << std::endl - << " --server-address
" << std::endl - << " Server address." << std::endl - << " Default: 192.168.30.42." << std::endl - << std::endl - << " --only-body " << std::endl - << " Name of the specific body you want to be displayed." << std::endl - << " Default: ''" << std::endl - << std::endl - << " --all-bodies" << std::endl - << " When used, get all bodies pose including non visible bodies." << std::endl - << std::endl - << " --verbose, -v" << std::endl - << " Enable verbose mode." << std::endl - << std::endl - << " --help, -h" << std::endl - << " Print this helper message." << std::endl - << std::endl; + << " --server-address
" << std::endl + << " Server address." << std::endl + << " Default: 192.168.30.42." << std::endl + << std::endl + << " --only-body " << std::endl + << " Name of the specific body you want to be displayed." << std::endl + << " Default: ''" << std::endl + << std::endl + << " --all-bodies" << std::endl + << " When used, get all bodies pose including non visible bodies." << std::endl + << std::endl + << " --verbose, -v" << std::endl + << " Enable verbose mode." << std::endl + << std::endl + << " --help, -h" << std::endl + << " Print this helper message." << std::endl + << std::endl; std::cout << "USAGE" << std::endl - << " Example to test Qualisys connection:" << std::endl - << " " << argv[0] << " --server-address 127.0.0.1 --verbose" << std::endl - << std::endl; + << " Example to test Qualisys connection:" << std::endl + << " " << argv[0] << " --server-address 127.0.0.1 --verbose" << std::endl + << std::endl; if (error) { std::cout << "Error" << std::endl - << " " - << "Unsupported parameter " << argv[error] << std::endl; + << " " + << "Unsupported parameter " << argv[error] << std::endl; } } @@ -119,7 +119,8 @@ void mocap_loop(std::mutex &lock, bool opt_verbose, bool opt_all_bodies, std::st if (!qualisys.getBodiesPose(bodies_pose, opt_all_bodies)) { std::cout << "Qualisys error. Check the Qualisys Task Manager" << std::endl; } - } else { + } + else { vpHomogeneousMatrix pose; if (!qualisys.getSpecificBodyPose(opt_onlyBody, pose)) { std::cout << "Qualisys error. Check the Qualisys Task Manager" << std::endl; @@ -149,7 +150,7 @@ void display_loop(std::mutex &lock, const std::mapfirst << std::endl; if (verbose) { std::cout << " Translation [m]: " << it->second.getTranslationVector().t() << std::endl - << " Quaternion: " << vpQuaternionVector(it->second.getRotationMatrix()).t() << std::endl; + << " Quaternion: " << vpQuaternionVector(it->second.getRotationMatrix()).t() << std::endl; std::cout << " Roll/pitch/yaw [deg]: "; for (unsigned int i = 0; i < 3; i++) { std::cout << vpMath::deg(rxyz[i]) << " "; @@ -178,18 +179,23 @@ int main(int argc, const char *argv[]) for (int i = 1; i < argc; i++) { if (std::string(argv[i]) == "--verbose" || std::string(argv[i]) == "-v") { opt_verbose = true; - } else if (std::string(argv[i]) == "--server-address" || std::string(argv[i]) == "-sa") { + } + else if (std::string(argv[i]) == "--server-address" || std::string(argv[i]) == "-sa") { opt_serverAddress = std::string(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--only-body" || std::string(argv[i]) == "-ob") { + } + else if (std::string(argv[i]) == "--only-body" || std::string(argv[i]) == "-ob") { opt_onlyBody = std::string(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--all-bodies") { + } + else if (std::string(argv[i]) == "--all-bodies") { opt_all_bodies = true; - } else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { + } + else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { usage(argv, 0); return EXIT_SUCCESS; - } else { + } + else { usage(argv, i); return EXIT_FAILURE; } @@ -211,12 +217,8 @@ int main(int argc, const char *argv[]) #else int main() { -#ifndef VISP_HAVE_QUALISYS std::cout << "Install qualisys_cpp_sdk to be able to test Qualisys Mocap System using ViSP" << std::endl; -#endif -#if !(VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - std::cout << "This test required c++11 or more recent c++ standard." << std::endl; -#endif + return EXIT_SUCCESS; } #endif diff --git a/modules/sensor/test/mocap/testMocapVicon.cpp b/modules/sensor/test/mocap/testMocapVicon.cpp index 23fd27844a..10c1f4c274 100644 --- a/modules/sensor/test/mocap/testMocapVicon.cpp +++ b/modules/sensor/test/mocap/testMocapVicon.cpp @@ -41,7 +41,7 @@ #include -#if defined(VISP_HAVE_VICON) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_VICON) #include #include @@ -68,39 +68,39 @@ void quitHandler(int sig) void usage(const char *argv[], int error) { std::cout << "SYNOPSIS" << std::endl - << " " << argv[0] << " [--server-address
]" - << " [--only-body]" - << " [--all-bodies]" - << " [--verbose] [-v]" - << " [--help] [-h]" << std::endl - << std::endl; + << " " << argv[0] << " [--server-address
]" + << " [--only-body]" + << " [--all-bodies]" + << " [--verbose] [-v]" + << " [--help] [-h]" << std::endl + << std::endl; std::cout << "DESCRIPTION" << std::endl - << " --server-address
" << std::endl - << " Server address." << std::endl - << " Default: 192.168.30.1." << std::endl - << std::endl - << " --only-body " << std::endl - << " Name of the specific body you want to be displayed." << std::endl - << " Default: ''" << std::endl - << std::endl - << " --all-bodies" << std::endl - << " When used, get all bodies pose including non visible bodies." << std::endl - << std::endl - << " --verbose, -v" << std::endl - << " Enable verbose mode." << std::endl - << std::endl - << " --help, -h" << std::endl - << " Print this helper message." << std::endl - << std::endl; + << " --server-address
" << std::endl + << " Server address." << std::endl + << " Default: 192.168.30.1." << std::endl + << std::endl + << " --only-body " << std::endl + << " Name of the specific body you want to be displayed." << std::endl + << " Default: ''" << std::endl + << std::endl + << " --all-bodies" << std::endl + << " When used, get all bodies pose including non visible bodies." << std::endl + << std::endl + << " --verbose, -v" << std::endl + << " Enable verbose mode." << std::endl + << std::endl + << " --help, -h" << std::endl + << " Print this helper message." << std::endl + << std::endl; std::cout << "USAGE" << std::endl - << " Example to test Vicon connection:" << std::endl - << " " << argv[0] << " --server-address 127.0.0.1 --verbose" << std::endl - << std::endl; + << " Example to test Vicon connection:" << std::endl + << " " << argv[0] << " --server-address 127.0.0.1 --verbose" << std::endl + << std::endl; if (error) { std::cout << "Error" << std::endl - << " " - << "Unsupported parameter " << argv[error] << std::endl; + << " " + << "Unsupported parameter " << argv[error] << std::endl; } } @@ -116,7 +116,8 @@ void mocap_loop(std::mutex &lock, bool opt_verbose, bool opt_all_bodies, std::st if (opt_onlyBody == "") { vicon.getBodiesPose(bodies_pose, opt_all_bodies); - } else { + } + else { vpHomogeneousMatrix pose; vicon.getSpecificBodyPose(opt_onlyBody, pose); bodies_pose[opt_onlyBody] = pose; @@ -144,7 +145,7 @@ void display_loop(std::mutex &lock, const std::mapfirst << std::endl; if (verbose) { std::cout << " Translation [m]: " << it->second.getTranslationVector().t() << std::endl - << " Quaternion: " << vpQuaternionVector(it->second.getRotationMatrix()).t() << std::endl; + << " Quaternion: " << vpQuaternionVector(it->second.getRotationMatrix()).t() << std::endl; std::cout << " Roll/pitch/yaw [deg]: "; for (unsigned int i = 0; i < 3; i++) { std::cout << vpMath::deg(rxyz[i]) << " "; @@ -172,18 +173,23 @@ int main(int argc, const char *argv[]) for (int i = 1; i < argc; i++) { if (std::string(argv[i]) == "--verbose" || std::string(argv[i]) == "-v") { opt_verbose = true; - } else if (std::string(argv[i]) == "--server-address") { + } + else if (std::string(argv[i]) == "--server-address") { opt_serverAddress = std::string(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--only-body") { + } + else if (std::string(argv[i]) == "--only-body") { opt_onlyBody = std::string(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--all-bodies") { + } + else if (std::string(argv[i]) == "--all-bodies") { opt_all_bodies = true; - } else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { + } + else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { usage(argv, 0); return EXIT_SUCCESS; - } else { + } + else { usage(argv, i); return EXIT_FAILURE; } @@ -205,12 +211,8 @@ int main(int argc, const char *argv[]) #else int main() { -#ifndef VISP_HAVE_VICON std::cout << "Install Vicon Datastream SDK to be able to test Vicon Mocap System using ViSP" << std::endl; -#endif -#if !(VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - std::cout << "This test required c++11 or more recent c++ standard." << std::endl; -#endif + return EXIT_SUCCESS; } #endif diff --git a/modules/sensor/test/rgb-depth/testOccipitalStructure_Core_images.cpp b/modules/sensor/test/rgb-depth/testOccipitalStructure_Core_images.cpp index e4e8445646..3fbbaee648 100644 --- a/modules/sensor/test/rgb-depth/testOccipitalStructure_Core_images.cpp +++ b/modules/sensor/test/rgb-depth/testOccipitalStructure_Core_images.cpp @@ -43,8 +43,7 @@ #include -#if defined(VISP_HAVE_OCCIPITAL_STRUCTURE) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) && \ - (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) +#if defined(VISP_HAVE_OCCIPITAL_STRUCTURE) && (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) #include #include @@ -79,12 +78,12 @@ int main() #if defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) I_visible = - vpImage(sc.getHeight(vpOccipitalStructure::visible), sc.getWidth(vpOccipitalStructure::visible), 0); + vpImage(sc.getHeight(vpOccipitalStructure::visible), sc.getWidth(vpOccipitalStructure::visible), 0); display_visible.setDownScalingFactor(display_scale); display_visible.init(I_visible, 10, 10, "Visible image"); I_depth_raw = - vpImage(sc.getHeight(vpOccipitalStructure::depth), sc.getWidth(vpOccipitalStructure::depth), 0); + vpImage(sc.getHeight(vpOccipitalStructure::depth), sc.getWidth(vpOccipitalStructure::depth), 0); I_depth = vpImage(sc.getHeight(vpOccipitalStructure::depth), sc.getWidth(vpOccipitalStructure::depth)); display_depth.setDownScalingFactor(display_scale); display_depth.init(I_depth, static_cast(I_visible.getWidth() / display_scale) + 20, 10, "Depth image"); @@ -109,9 +108,11 @@ int main() std::cout << "Loop time: " << vpTime::measureTimeMs() - t << std::endl; } - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cerr << "Structure SDK error " << e.what() << std::endl; - } catch (const std::exception &e) { + } + catch (const std::exception &e) { std::cerr << e.what() << std::endl; } @@ -125,10 +126,6 @@ int main() std::cout << "Tip:" << std::endl; std::cout << "- Install libStructure, configure again ViSP using cmake and build again this example" << std::endl; return EXIT_SUCCESS; -#elif (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "You do not build ViSP with c++11 or higher compiler flag" << std::endl; - std::cout << "Tip:" << std::endl; - std::cout << "- Configure ViSP again using cmake -DUSE_CXX_STANDARD=11, and build again this example" << std::endl; #elif !(defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) std::cout << "You don't have X11 or GDI display capabilities" << std::endl; #endif diff --git a/modules/sensor/test/rgb-depth/testOccipitalStructure_Core_imu.cpp b/modules/sensor/test/rgb-depth/testOccipitalStructure_Core_imu.cpp index 12d5fb39d0..05be0da7a8 100644 --- a/modules/sensor/test/rgb-depth/testOccipitalStructure_Core_imu.cpp +++ b/modules/sensor/test/rgb-depth/testOccipitalStructure_Core_imu.cpp @@ -43,7 +43,7 @@ #include -#if defined(VISP_HAVE_OCCIPITAL_STRUCTURE) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_OCCIPITAL_STRUCTURE) #include #include @@ -104,9 +104,11 @@ int main() quit = true; } } - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cerr << "Structure SDK error " << e.what() << std::endl; - } catch (const std::exception &e) { + } + catch (const std::exception &e) { std::cerr << e.what() << std::endl; } @@ -115,16 +117,9 @@ int main() #else int main() { -#if !defined(VISP_HAVE_OCCIPITAL_STRUCTURE) std::cout << "You do not have Occipital Structure SDK functionality enabled..." << std::endl; std::cout << "Tip:" << std::endl; std::cout << "- Install libStructure, configure again ViSP using cmake and build again this example" << std::endl; return EXIT_SUCCESS; -#elif (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "You do not build ViSP with c++11 or higher compiler flag" << std::endl; - std::cout << "Tip:" << std::endl; - std::cout << "- Configure ViSP again using cmake -DUSE_CXX_STANDARD=11, and build again this example" << std::endl; -#endif - return EXIT_SUCCESS; } #endif diff --git a/modules/sensor/test/rgb-depth/testOccipitalStructure_Core_pcl.cpp b/modules/sensor/test/rgb-depth/testOccipitalStructure_Core_pcl.cpp index 428c72375a..d585e7d3bc 100644 --- a/modules/sensor/test/rgb-depth/testOccipitalStructure_Core_pcl.cpp +++ b/modules/sensor/test/rgb-depth/testOccipitalStructure_Core_pcl.cpp @@ -43,7 +43,7 @@ #include -#if defined(VISP_HAVE_OCCIPITAL_STRUCTURE) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) && (defined(VISP_HAVE_PCL)) +#if defined(VISP_HAVE_OCCIPITAL_STRUCTURE) && (defined(VISP_HAVE_PCL)) #ifdef VISP_HAVE_PCL #include @@ -111,7 +111,8 @@ int main() viewer->addPointCloud(pointcloud, rgb, "sample cloud"); viewer->setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 1, "sample cloud"); update = true; - } else { + } + else { viewer->updatePointCloud(pointcloud, rgb, "sample cloud"); } @@ -119,9 +120,11 @@ int main() std::cout << "Loop time: " << vpTime::measureTimeMs() - t << std::endl; } - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cerr << "Structure SDK error " << e.what() << std::endl; - } catch (const std::exception &e) { + } + catch (const std::exception &e) { std::cerr << e.what() << std::endl; } @@ -135,10 +138,6 @@ int main() std::cout << "Tip:" << std::endl; std::cout << "- Install libStructure, configure again ViSP using cmake and build again this example" << std::endl; return EXIT_SUCCESS; -#elif (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "You do not build ViSP with c++11 or higher compiler flag" << std::endl; - std::cout << "Tip:" << std::endl; - std::cout << "- Configure ViSP again using cmake -DUSE_CXX_STANDARD=11, and build again this example" << std::endl; #elif !defined(VISP_HAVE_PCL) std::cout << "You do not have PCL 3rd party installed." << std::endl; #endif diff --git a/modules/sensor/test/rgb-depth/testRealSense2_D435.cpp b/modules/sensor/test/rgb-depth/testRealSense2_D435.cpp index 69fbbf0271..85b2c97c06 100644 --- a/modules/sensor/test/rgb-depth/testRealSense2_D435.cpp +++ b/modules/sensor/test/rgb-depth/testRealSense2_D435.cpp @@ -41,8 +41,7 @@ #include -#if defined(VISP_HAVE_REALSENSE2) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) && \ - (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) +#if defined(VISP_HAVE_REALSENSE2) && (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) #include #include @@ -135,7 +134,7 @@ int main(int argc, char *argv[]) d4.close(infrared2); std::cout << "Acquisition1 - Mean time: " << vpMath::getMean(time_vector) - << " ms ; Median time: " << vpMath::getMedian(time_vector) << " ms" << std::endl; + << " ms ; Median time: " << vpMath::getMedian(time_vector) << " ms" << std::endl; width = 640; height = 480; @@ -185,7 +184,7 @@ int main(int argc, char *argv[]) } std::cout << "Acquisition2 - Mean time: " << vpMath::getMean(time_vector) - << " ms ; Median time: " << vpMath::getMedian(time_vector) << " ms" << std::endl; + << " ms ; Median time: " << vpMath::getMedian(time_vector) << " ms" << std::endl; return EXIT_SUCCESS; } @@ -196,11 +195,6 @@ int main() #if !defined(VISP_HAVE_REALSENSE2) std::cout << "Install librealsense2 to make this test work." << std::endl; #endif -#if !(VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - std::cout << "Build ViSP with c++11 or higher compiler flag (cmake -DUSE_CXX_STANDARD=11) " - "to make this test work" - << std::endl; -#endif #if !defined(VISP_HAVE_X11) && !defined(VISP_HAVE_GDI) std::cout << "X11 or GDI are needed." << std::endl; #endif diff --git a/modules/sensor/test/rgb-depth/testRealSense2_D435_align.cpp b/modules/sensor/test/rgb-depth/testRealSense2_D435_align.cpp index 6191d5967b..be5ea0b7ab 100644 --- a/modules/sensor/test/rgb-depth/testRealSense2_D435_align.cpp +++ b/modules/sensor/test/rgb-depth/testRealSense2_D435_align.cpp @@ -40,8 +40,7 @@ #include #include -#if defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_PCL) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) && \ - (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) +#if defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_PCL) && (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) #include #include @@ -99,11 +98,14 @@ int main(int argc, char *argv[]) for (int i = 1; i < argc; i++) { if (std::string(argv[i]) == "--align_to_depth") { align_to_depth = true; - } else if (std::string(argv[i]) == "--color") { + } + else if (std::string(argv[i]) == "--color") { color_pointcloud = true; - } else if (std::string(argv[i]) == "--col_vector") { + } + else if (std::string(argv[i]) == "--col_vector") { col_vector = true; - } else if (std::string(argv[i]) == "--no_align") { + } + else if (std::string(argv[i]) == "--no_align") { no_align = true; } } @@ -142,14 +144,15 @@ int main(int argc, char *argv[]) rs2::align align_to(align_to_depth ? RS2_STREAM_DEPTH : RS2_STREAM_COLOR); vpCameraParameters cam_projection = - align_to_depth ? rs.getCameraParameters(RS2_STREAM_DEPTH) : rs.getCameraParameters(RS2_STREAM_COLOR); + align_to_depth ? rs.getCameraParameters(RS2_STREAM_DEPTH) : rs.getCameraParameters(RS2_STREAM_COLOR); while (true) { if (color_pointcloud) { rs.acquire(reinterpret_cast(I_color.bitmap), reinterpret_cast(I_depth_raw.bitmap), &vp_pointcloud, pointcloud_color, NULL, no_align ? NULL : &align_to); - } else { + } + else { rs.acquire(reinterpret_cast(I_color.bitmap), reinterpret_cast(I_depth_raw.bitmap), &vp_pointcloud, pointcloud, NULL, no_align ? NULL : &align_to); @@ -170,14 +173,15 @@ int main(int argc, char *argv[]) vpImagePoint imPt; vpMeterPixelConversion::convertPoint(cam_projection, x, y, imPt); unsigned int u = - std::min(static_cast(width - 1), static_cast(std::max(0.0, imPt.get_u()))); + std::min(static_cast(width - 1), static_cast(std::max(0.0, imPt.get_u()))); unsigned int v = - std::min(static_cast(height - 1), static_cast(std::max(0.0, imPt.get_v()))); + std::min(static_cast(height - 1), static_cast(std::max(0.0, imPt.get_v()))); I_pcl[v][u] = vpRGBa(pcl_pt.r, pcl_pt.g, pcl_pt.b); } } } - } else { + } + else { createDepthHist(histogram, pointcloud, depth_scale); for (uint32_t i = 0; i < pointcloud->height; i++) { @@ -191,9 +195,9 @@ int main(int argc, char *argv[]) vpImagePoint imPt; vpMeterPixelConversion::convertPoint(cam_projection, x, y, imPt); unsigned int u = - std::min(static_cast(width - 1), static_cast(std::max(0.0, imPt.get_u()))); + std::min(static_cast(width - 1), static_cast(std::max(0.0, imPt.get_u()))); unsigned int v = - std::min(static_cast(height - 1), static_cast(std::max(0.0, imPt.get_v()))); + std::min(static_cast(height - 1), static_cast(std::max(0.0, imPt.get_v()))); unsigned char depth_viz = getDepthColor(histogram, pcl_pt.z, depth_scale); I_pcl[v][u] = vpRGBa(depth_viz, depth_viz, depth_viz); } @@ -213,9 +217,9 @@ int main(int argc, char *argv[]) vpImagePoint imPt; vpMeterPixelConversion::convertPoint(cam_projection, x, y, imPt); unsigned int u = - std::min(static_cast(width - 1), static_cast(std::max(0.0, imPt.get_u()))); + std::min(static_cast(width - 1), static_cast(std::max(0.0, imPt.get_u()))); unsigned int v = - std::min(static_cast(height - 1), static_cast(std::max(0.0, imPt.get_v()))); + std::min(static_cast(height - 1), static_cast(std::max(0.0, imPt.get_v()))); unsigned char depth_viz = getDepthColor(histogram2, Z, depth_scale); I_pcl2[v][u] = vpRGBa(depth_viz, depth_viz, depth_viz); } diff --git a/modules/sensor/test/rgb-depth/testRealSense2_D435_opencv.cpp b/modules/sensor/test/rgb-depth/testRealSense2_D435_opencv.cpp index b01c7bd1e1..f2cc0fe227 100644 --- a/modules/sensor/test/rgb-depth/testRealSense2_D435_opencv.cpp +++ b/modules/sensor/test/rgb-depth/testRealSense2_D435_opencv.cpp @@ -41,7 +41,7 @@ #include -#if defined(VISP_HAVE_REALSENSE2) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) && \ +#if defined(VISP_HAVE_REALSENSE2) && \ (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) && (VISP_HAVE_OPENCV_VERSION >= 0x030000) #include @@ -54,10 +54,11 @@ namespace { -struct float3 { +struct float3 +{ float x, y, z; - float3() : x(0), y(0), z(0) {} - float3(float x_, float y_, float z_) : x(x_), y(y_), z(z_) {} + float3() : x(0), y(0), z(0) { } + float3(float x_, float y_, float z_) : x(x_), y(y_), z(z_) { } }; void getPointcloud(const rs2::depth_frame &depth_frame, std::vector &pointcloud) @@ -141,10 +142,12 @@ void frame_to_mat(const rs2::frame &f, cv::Mat &img) if (f.get_profile().format() == RS2_FORMAT_BGR8) { memcpy(static_cast(img.ptr()), f.get_data(), size * 3); - } else if (f.get_profile().format() == RS2_FORMAT_RGB8) { + } + else if (f.get_profile().format() == RS2_FORMAT_RGB8) { cv::Mat tmp(h, w, CV_8UC3, const_cast(f.get_data()), cv::Mat::AUTO_STEP); cv::cvtColor(tmp, img, cv::COLOR_RGB2BGR); - } else if (f.get_profile().format() == RS2_FORMAT_Y8) { + } + else if (f.get_profile().format() == RS2_FORMAT_Y8) { memcpy(img.ptr(), f.get_data(), size); } } @@ -238,7 +241,7 @@ int main() } std::cout << "Acquisition - Mean time: " << vpMath::getMean(time_vector) - << " ms ; Median time: " << vpMath::getMedian(time_vector) << " ms" << std::endl; + << " ms ; Median time: " << vpMath::getMedian(time_vector) << " ms" << std::endl; return EXIT_SUCCESS; } @@ -248,11 +251,6 @@ int main() #if !defined(VISP_HAVE_REALSENSE2) std::cout << "Install librealsense2 to make this test work." << std::endl; #endif -#if !(VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - std::cout << "Build ViSP with c++11 or higher compiler flag (cmake -DUSE_CXX_STANDARD=11) " - "to make this test work" - << std::endl; -#endif #if !(VISP_HAVE_OPENCV_VERSION >= 0x030000) std::cout << "Install OpenCV version >= 3 to make this test work." << std::endl; #endif diff --git a/modules/sensor/test/rgb-depth/testRealSense2_D435_pcl.cpp b/modules/sensor/test/rgb-depth/testRealSense2_D435_pcl.cpp index da220b1881..50b6005887 100644 --- a/modules/sensor/test/rgb-depth/testRealSense2_D435_pcl.cpp +++ b/modules/sensor/test/rgb-depth/testRealSense2_D435_pcl.cpp @@ -41,8 +41,7 @@ #include -#if defined(VISP_HAVE_REALSENSE2) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) && \ - (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) && defined(VISP_HAVE_PCL) +#if defined(VISP_HAVE_REALSENSE2) && (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) && defined(VISP_HAVE_PCL) #include #include @@ -66,7 +65,7 @@ bool cancelled = false, update_pointcloud = false; class ViewerWorker { public: - explicit ViewerWorker(bool color_mode, std::mutex &mutex) : m_colorMode(color_mode), m_mutex(mutex) {} + explicit ViewerWorker(bool color_mode, std::mutex &mutex) : m_colorMode(color_mode), m_mutex(mutex) { } void run() { @@ -96,7 +95,8 @@ class ViewerWorker if (local_update) { if (m_colorMode) { local_pointcloud_color = pointcloud_color->makeShared(); - } else { + } + else { local_pointcloud = pointcloud->makeShared(); } } @@ -111,15 +111,18 @@ class ViewerWorker viewer->addPointCloud(local_pointcloud_color, rgb, "RGB sample cloud"); viewer->setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 1, "RGB sample cloud"); - } else { + } + else { viewer->addPointCloud(local_pointcloud, "sample cloud"); viewer->setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 1, "sample cloud"); } init = false; - } else { + } + else { if (m_colorMode) { viewer->updatePointCloud(local_pointcloud_color, rgb, "RGB sample cloud"); - } else { + } + else { viewer->updatePointCloud(local_pointcloud, "sample cloud"); } } @@ -144,7 +147,8 @@ int main(int argc, char *argv[]) for (int i = 1; i < argc; i++) { if (std::string(argv[i]) == "--pcl_color") { pcl_color = true; - } else if (std::string(argv[i]) == "--show_infrared2") { + } + else if (std::string(argv[i]) == "--show_infrared2") { show_infrared2 = true; } } @@ -191,7 +195,8 @@ int main(int argc, char *argv[]) rs.acquire(reinterpret_cast(color.bitmap), reinterpret_cast(depth_raw.bitmap), NULL, pointcloud_color, reinterpret_cast(infrared1.bitmap), show_infrared2 ? reinterpret_cast(infrared2.bitmap) : NULL, NULL); - } else { + } + else { rs.acquire(reinterpret_cast(color.bitmap), reinterpret_cast(depth_raw.bitmap), NULL, pointcloud, reinterpret_cast(infrared1.bitmap), show_infrared2 ? reinterpret_cast(infrared2.bitmap) : NULL, NULL); @@ -232,7 +237,7 @@ int main(int argc, char *argv[]) viewer_thread.join(); std::cout << "Acquisition - Mean time: " << vpMath::getMean(time_vector) - << " ms ; Median time: " << vpMath::getMedian(time_vector) << " ms" << std::endl; + << " ms ; Median time: " << vpMath::getMedian(time_vector) << " ms" << std::endl; return EXIT_SUCCESS; } @@ -243,11 +248,6 @@ int main() #if !defined(VISP_HAVE_REALSENSE2) std::cout << "Install librealsense2 to make this test work." << std::endl; #endif -#if !(VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - std::cout << "Build ViSP with c++11 or higher compiler flag (cmake -DUSE_CXX_STANDARD=11) " - "to make this test work" - << std::endl; -#endif #if !defined(VISP_HAVE_X11) && !defined(VISP_HAVE_GDI) std::cout << "X11 or GDI are needed." << std::endl; #endif diff --git a/modules/sensor/test/rgb-depth/testRealSense2_SR300.cpp b/modules/sensor/test/rgb-depth/testRealSense2_SR300.cpp index 1b70b9dcd3..12cb8b1725 100644 --- a/modules/sensor/test/rgb-depth/testRealSense2_SR300.cpp +++ b/modules/sensor/test/rgb-depth/testRealSense2_SR300.cpp @@ -42,8 +42,7 @@ #include -#if defined(VISP_HAVE_REALSENSE2) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) && \ - (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) +#if defined(VISP_HAVE_REALSENSE2) && (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) #ifdef VISP_HAVE_PCL #include @@ -74,7 +73,7 @@ bool cancelled = false, update_pointcloud = false; class ViewerWorker { public: - explicit ViewerWorker(bool color_mode, std::mutex &mutex) : m_colorMode(color_mode), m_mutex(mutex) {} + explicit ViewerWorker(bool color_mode, std::mutex &mutex) : m_colorMode(color_mode), m_mutex(mutex) { } void run() { @@ -104,7 +103,8 @@ class ViewerWorker if (local_update) { if (m_colorMode) { local_pointcloud_color = pointcloud_color->makeShared(); - } else { + } + else { local_pointcloud = pointcloud->makeShared(); } } @@ -119,15 +119,18 @@ class ViewerWorker viewer->addPointCloud(local_pointcloud_color, rgb, "RGB sample cloud"); viewer->setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 1, "RGB sample cloud"); - } else { + } + else { viewer->addPointCloud(local_pointcloud, "sample cloud"); viewer->setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 1, "sample cloud"); } init = false; - } else { + } + else { if (m_colorMode) { viewer->updatePointCloud(local_pointcloud_color, rgb, "RGB sample cloud"); - } else { + } + else { viewer->updatePointCloud(local_pointcloud, "sample cloud"); } } @@ -161,7 +164,8 @@ void getPointcloud(const rs2::depth_frame &depth_frame, std::vector v[1] = vertices[i].y; v[2] = vertices[i].z; v[3] = 1.0; - } else { + } + else { v[0] = 0.0; v[1] = 0.0; v[2] = 0.0; @@ -213,10 +217,12 @@ void frame_to_mat(const rs2::frame &f, cv::Mat &img) if (f.get_profile().format() == RS2_FORMAT_BGR8) { memcpy(static_cast(img.ptr()), f.get_data(), size * 3); - } else if (f.get_profile().format() == RS2_FORMAT_RGB8) { + } + else if (f.get_profile().format() == RS2_FORMAT_RGB8) { cv::Mat tmp(h, w, CV_8UC3, (void *)f.get_data(), cv::Mat::AUTO_STEP); cv::cvtColor(tmp, img, cv::COLOR_RGB2BGR); - } else if (f.get_profile().format() == RS2_FORMAT_Y8) { + } + else if (f.get_profile().format() == RS2_FORMAT_Y8) { memcpy(img.ptr(), f.get_data(), size); } } @@ -242,10 +248,10 @@ int main(int argc, char *argv[]) else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { std::cout << argv[0] << " [--show_info]" #ifdef VISP_HAVE_PCL - << " [--pcl_color]" + << " [--pcl_color]" #endif - << " [--help] [-h]" - << "\n"; + << " [--help] [-h]" + << "\n"; return EXIT_SUCCESS; } } @@ -295,23 +301,23 @@ int main(int argc, char *argv[]) rs2::pipeline &pipe = rs.getPipeline(); std::cout << "Color intrinsics:\n" - << rs.getCameraParameters(RS2_STREAM_COLOR, vpCameraParameters::perspectiveProjWithoutDistortion) - << std::endl; + << rs.getCameraParameters(RS2_STREAM_COLOR, vpCameraParameters::perspectiveProjWithoutDistortion) + << std::endl; std::cout << "Color intrinsics:\n" - << rs.getCameraParameters(RS2_STREAM_COLOR, vpCameraParameters::perspectiveProjWithDistortion) << std::endl; + << rs.getCameraParameters(RS2_STREAM_COLOR, vpCameraParameters::perspectiveProjWithDistortion) << std::endl; std::cout << "Depth intrinsics:\n" - << rs.getCameraParameters(RS2_STREAM_DEPTH, vpCameraParameters::perspectiveProjWithoutDistortion) - << std::endl; + << rs.getCameraParameters(RS2_STREAM_DEPTH, vpCameraParameters::perspectiveProjWithoutDistortion) + << std::endl; std::cout << "Depth intrinsics:\n" - << rs.getCameraParameters(RS2_STREAM_DEPTH, vpCameraParameters::perspectiveProjWithDistortion) << std::endl; + << rs.getCameraParameters(RS2_STREAM_DEPTH, vpCameraParameters::perspectiveProjWithDistortion) << std::endl; std::cout << "Infrared intrinsics:\n" - << rs.getCameraParameters(RS2_STREAM_INFRARED, vpCameraParameters::perspectiveProjWithoutDistortion) - << std::endl; + << rs.getCameraParameters(RS2_STREAM_INFRARED, vpCameraParameters::perspectiveProjWithoutDistortion) + << std::endl; std::cout << "Infrared intrinsics:\n" - << rs.getCameraParameters(RS2_STREAM_INFRARED, vpCameraParameters::perspectiveProjWithDistortion) - << std::endl; + << rs.getCameraParameters(RS2_STREAM_INFRARED, vpCameraParameters::perspectiveProjWithDistortion) + << std::endl; std::cout << "depth_M_color:\n" << rs.getTransformation(RS2_STREAM_COLOR, RS2_STREAM_DEPTH) << std::endl; std::cout << "color_M_infrared:\n" << rs.getTransformation(RS2_STREAM_INFRARED, RS2_STREAM_COLOR) << std::endl; @@ -382,7 +388,7 @@ int main(int argc, char *argv[]) viewer_colvector_thread.join(); #endif std::cout << "Acquisition1 - Mean time: " << vpMath::getMean(time_vector) - << " ms ; Median time: " << vpMath::getMedian(time_vector) << " ms" << std::endl; + << " ms ; Median time: " << vpMath::getMedian(time_vector) << " ms" << std::endl; config.disable_all_streams(); config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 60); @@ -412,23 +418,23 @@ int main(int argc, char *argv[]) std::cout << "\n" << std::endl; std::cout << "Color intrinsics:\n" - << rs.getCameraParameters(RS2_STREAM_COLOR, vpCameraParameters::perspectiveProjWithoutDistortion) - << std::endl; + << rs.getCameraParameters(RS2_STREAM_COLOR, vpCameraParameters::perspectiveProjWithoutDistortion) + << std::endl; std::cout << "Color intrinsics:\n" - << rs.getCameraParameters(RS2_STREAM_COLOR, vpCameraParameters::perspectiveProjWithDistortion) << std::endl; + << rs.getCameraParameters(RS2_STREAM_COLOR, vpCameraParameters::perspectiveProjWithDistortion) << std::endl; std::cout << "Depth intrinsics:\n" - << rs.getCameraParameters(RS2_STREAM_DEPTH, vpCameraParameters::perspectiveProjWithoutDistortion) - << std::endl; + << rs.getCameraParameters(RS2_STREAM_DEPTH, vpCameraParameters::perspectiveProjWithoutDistortion) + << std::endl; std::cout << "Depth intrinsics:\n" - << rs.getCameraParameters(RS2_STREAM_DEPTH, vpCameraParameters::perspectiveProjWithDistortion) << std::endl; + << rs.getCameraParameters(RS2_STREAM_DEPTH, vpCameraParameters::perspectiveProjWithDistortion) << std::endl; std::cout << "Infrared intrinsics:\n" - << rs.getCameraParameters(RS2_STREAM_INFRARED, vpCameraParameters::perspectiveProjWithoutDistortion) - << std::endl; + << rs.getCameraParameters(RS2_STREAM_INFRARED, vpCameraParameters::perspectiveProjWithoutDistortion) + << std::endl; std::cout << "Infrared intrinsics:\n" - << rs.getCameraParameters(RS2_STREAM_INFRARED, vpCameraParameters::perspectiveProjWithDistortion) - << std::endl; + << rs.getCameraParameters(RS2_STREAM_INFRARED, vpCameraParameters::perspectiveProjWithDistortion) + << std::endl; std::cout << "depth_M_color:\n" << rs.getTransformation(RS2_STREAM_COLOR, RS2_STREAM_DEPTH) << std::endl; std::cout << "color_M_infrared:\n" << rs.getTransformation(RS2_STREAM_INFRARED, RS2_STREAM_COLOR) << std::endl; @@ -445,7 +451,8 @@ int main(int argc, char *argv[]) if (pcl_color) { rs.acquire((unsigned char *)color.bitmap, (unsigned char *)depth_raw.bitmap, NULL, pointcloud_color, (unsigned char *)infrared.bitmap); - } else { + } + else { rs.acquire((unsigned char *)color.bitmap, (unsigned char *)depth_raw.bitmap, NULL, pointcloud, (unsigned char *)infrared.bitmap); } @@ -487,7 +494,7 @@ int main(int argc, char *argv[]) d2.close(depth_color); d3.close(infrared); std::cout << "Acquisition2 - Mean time: " << vpMath::getMean(time_vector) - << " ms ; Median time: " << vpMath::getMedian(time_vector) << " ms" << std::endl; + << " ms ; Median time: " << vpMath::getMedian(time_vector) << " ms" << std::endl; #if VISP_HAVE_OPENCV_VERSION >= 0x030000 rs.close(); @@ -531,7 +538,7 @@ int main(int argc, char *argv[]) } std::cout << "Acquisition3 - Mean time: " << vpMath::getMean(time_vector) - << " ms ; Median time: " << vpMath::getMedian(time_vector) << " ms" << std::endl; + << " ms ; Median time: " << vpMath::getMedian(time_vector) << " ms" << std::endl; #endif #ifdef VISP_HAVE_PCL @@ -580,7 +587,7 @@ int main(int argc, char *argv[]) viewer_colvector_thread2.join(); std::cout << "Acquisition4 - Mean time: " << vpMath::getMean(time_vector) - << " ms ; Median time: " << vpMath::getMedian(time_vector) << " ms" << std::endl; + << " ms ; Median time: " << vpMath::getMedian(time_vector) << " ms" << std::endl; #endif return EXIT_SUCCESS; @@ -592,11 +599,6 @@ int main() #if !defined(VISP_HAVE_REALSENSE2) std::cout << "Install librealsense2 to make this test work." << std::endl; #endif -#if !(VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - std::cout << "Build ViSP with c++11 or higher compiler flag (cmake -DUSE_CXX_STANDARD=11) " - "to make this test work" - << std::endl; -#endif #if !defined(VISP_HAVE_X11) && !defined(VISP_HAVE_GDI) std::cout << "X11 or GDI are needed." << std::endl; #endif diff --git a/modules/sensor/test/rgb-depth/testRealSense2_T265_images.cpp b/modules/sensor/test/rgb-depth/testRealSense2_T265_images.cpp index dff61c40f4..895035d488 100644 --- a/modules/sensor/test/rgb-depth/testRealSense2_T265_images.cpp +++ b/modules/sensor/test/rgb-depth/testRealSense2_T265_images.cpp @@ -46,7 +46,7 @@ #include #include -#if defined(VISP_HAVE_REALSENSE2) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) && \ +#if defined(VISP_HAVE_REALSENSE2) && \ (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) && (RS2_API_VERSION > ((2 * 10000) + (31 * 100) + 0)) int main() @@ -111,9 +111,11 @@ int main() std::cout << "Loop time: " << vpTime::measureTimeMs() - t << std::endl; } - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cerr << "RealSense error " << e.what() << std::endl; - } catch (const std::exception &e) { + } + catch (const std::exception &e) { std::cerr << e.what() << std::endl; } @@ -127,10 +129,6 @@ int main() std::cout << "Tip:" << std::endl; std::cout << "- Install librealsense2, configure again ViSP using cmake and build again this example" << std::endl; return EXIT_SUCCESS; -#elif (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "You do not build ViSP with c++11 or higher compiler flag" << std::endl; - std::cout << "Tip:" << std::endl; - std::cout << "- Configure ViSP again using cmake -DUSE_CXX_STANDARD=11, and build again this example" << std::endl; #elif !(defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) std::cout << "You don't have X11 or GDI display capabilities" << std::endl; #elif !(RS2_API_VERSION > ((2 * 10000) + (31 * 100) + 0)) diff --git a/modules/sensor/test/rgb-depth/testRealSense2_T265_images_odometry.cpp b/modules/sensor/test/rgb-depth/testRealSense2_T265_images_odometry.cpp index 2bbdd914af..666d03f38c 100644 --- a/modules/sensor/test/rgb-depth/testRealSense2_T265_images_odometry.cpp +++ b/modules/sensor/test/rgb-depth/testRealSense2_T265_images_odometry.cpp @@ -47,7 +47,7 @@ #include #include -#if defined(VISP_HAVE_REALSENSE2) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) && \ +#if defined(VISP_HAVE_REALSENSE2) && \ (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) && (RS2_API_VERSION > ((2 * 10000) + (31 * 100) + 0)) #include @@ -61,7 +61,7 @@ int main() double ts; vpImagePoint frame_origin; std::list > - frame_origins; // Frame origin's history for trajectory visualization + frame_origins; // Frame origin's history for trajectory visualization unsigned int display_scale = 2; try { @@ -156,9 +156,11 @@ int main() std::cout << "Loop time: " << vpTime::measureTimeMs() - t << std::endl; } - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cerr << "RealSense error " << e.what() << std::endl; - } catch (const std::exception &e) { + } + catch (const std::exception &e) { std::cerr << e.what() << std::endl; } @@ -172,10 +174,6 @@ int main() std::cout << "Tip:" << std::endl; std::cout << "- Install librealsense2, configure again ViSP using cmake and build again this example" << std::endl; return EXIT_SUCCESS; -#elif (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "You do not build ViSP with c++11 or higher compiler flag" << std::endl; - std::cout << "Tip:" << std::endl; - std::cout << "- Configure ViSP again using cmake -DUSE_CXX_STANDARD=11, and build again this example" << std::endl; #elif !(defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) std::cout << "You don't have X11 or GDI display capabilities" << std::endl; #elif !(RS2_API_VERSION > ((2 * 10000) + (31 * 100) + 0)) diff --git a/modules/sensor/test/rgb-depth/testRealSense2_T265_images_odometry_async.cpp b/modules/sensor/test/rgb-depth/testRealSense2_T265_images_odometry_async.cpp index 7a6f8cfb0a..dea908efda 100644 --- a/modules/sensor/test/rgb-depth/testRealSense2_T265_images_odometry_async.cpp +++ b/modules/sensor/test/rgb-depth/testRealSense2_T265_images_odometry_async.cpp @@ -47,7 +47,7 @@ #include #include -#if defined(VISP_HAVE_REALSENSE2) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) && \ +#if defined(VISP_HAVE_REALSENSE2) && \ (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) && (RS2_API_VERSION > ((2 * 10000) + (31 * 100) + 0)) #include @@ -61,7 +61,7 @@ int main() unsigned int confidence; vpImagePoint frame_origin; std::list > - frame_origins; // Frame origin's history for trajectory visualization. + frame_origins; // Frame origin's history for trajectory visualization. unsigned int display_scale = 2; try { @@ -118,8 +118,9 @@ int main() odo_acc[5] = static_cast(pose_data.angular_acceleration.z); confidence = pose_data.tracker_confidence; - } else { - // Stream that bypass synchronization (such as IMU, Pose, ...) will produce single frames. + } + else { + // Stream that bypass synchronization (such as IMU, Pose, ...) will produce single frames. rs2_pose pose_data = frame.as().get_pose_data(); vpTranslationVector ctw(static_cast(pose_data.translation.x), static_cast(pose_data.translation.y), @@ -152,9 +153,9 @@ int main() vpHomogeneousMatrix cextMc = cextMw * cMw.inverse(); vpMeterPixelConversion::convertPoint(cam, cextMc[0][3] / cextMc[2][3], cextMc[1][3] / cextMc[2][3], frame_origin); frame_origins.push_back(std::make_pair(confidence, frame_origin)); - }; + }; - // Open vpRealSense2 object according to configuration and with the callback to be called. + // Open vpRealSense2 object according to configuration and with the callback to be called. g.open(config, callback); I_left.resize(g.getIntrinsics(RS2_STREAM_FISHEYE, 1).height, g.getIntrinsics(RS2_STREAM_FISHEYE, 1).width); @@ -225,9 +226,11 @@ int main() vpDisplay::flush(I_right); vpDisplay::flush(I_pose); } - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cerr << "RealSense error " << e.what() << std::endl; - } catch (const std::exception &e) { + } + catch (const std::exception &e) { std::cerr << e.what() << std::endl; } @@ -241,10 +244,6 @@ int main() std::cout << "Tip:" << std::endl; std::cout << "- Install librealsense2, configure again ViSP using cmake and build again this example" << std::endl; return EXIT_SUCCESS; -#elif (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "You do not build ViSP with c++11 or higher compiler flag" << std::endl; - std::cout << "Tip:" << std::endl; - std::cout << "- Configure ViSP again using cmake -DUSE_CXX_STANDARD=11, and build again this example" << std::endl; #elif !(defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) std::cout << "You don't have X11 or GDI display capabilities" << std::endl; #elif !(RS2_API_VERSION > ((2 * 10000) + (31 * 100) + 0)) diff --git a/modules/sensor/test/rgb-depth/testRealSense2_T265_imu.cpp b/modules/sensor/test/rgb-depth/testRealSense2_T265_imu.cpp index a0345aa320..c55622c7da 100644 --- a/modules/sensor/test/rgb-depth/testRealSense2_T265_imu.cpp +++ b/modules/sensor/test/rgb-depth/testRealSense2_T265_imu.cpp @@ -46,7 +46,7 @@ #include #include -#if defined(VISP_HAVE_REALSENSE2) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) && \ +#if defined(VISP_HAVE_REALSENSE2) && \ (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) && (RS2_API_VERSION > ((2 * 10000) + (31 * 100) + 0)) int main() @@ -72,14 +72,16 @@ int main() rs.getIMUData(&imu_acc, &imu_vel, NULL); std::cout << "Gyro vel: x = " << std::setw(12) << imu_vel[0] << " y = " << std::setw(12) << imu_vel[1] - << " z = " << std::setw(12) << imu_vel[2]; + << " z = " << std::setw(12) << imu_vel[2]; std::cout << " Accel: x = " << std::setw(12) << imu_acc[0] << " y = " << std::setw(12) << imu_acc[1] - << " z = " << std::setw(12) << imu_acc[2]; + << " z = " << std::setw(12) << imu_acc[2]; std::cout << std::endl; } - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cerr << "RealSense error " << e.what() << std::endl; - } catch (const std::exception &e) { + } + catch (const std::exception &e) { std::cerr << e.what() << std::endl; } @@ -93,10 +95,6 @@ int main() std::cout << "Tip:" << std::endl; std::cout << "- Install librealsense2, configure again ViSP using cmake and build again this example" << std::endl; return EXIT_SUCCESS; -#elif (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "You do not build ViSP with c++11 or higher compiler flag" << std::endl; - std::cout << "Tip:" << std::endl; - std::cout << "- Configure ViSP again using cmake -DUSE_CXX_STANDARD=11, and build again this example" << std::endl; #elif !(defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) std::cout << "You don't have X11 or GDI display capabilities" << std::endl; #elif !(RS2_API_VERSION > ((2 * 10000) + (31 * 100) + 0)) diff --git a/modules/sensor/test/rgb-depth/testRealSense2_T265_odometry.cpp b/modules/sensor/test/rgb-depth/testRealSense2_T265_odometry.cpp index c29afcce28..cf38353a98 100644 --- a/modules/sensor/test/rgb-depth/testRealSense2_T265_odometry.cpp +++ b/modules/sensor/test/rgb-depth/testRealSense2_T265_odometry.cpp @@ -46,7 +46,7 @@ #include #include -#if defined(VISP_HAVE_REALSENSE2) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) && \ +#if defined(VISP_HAVE_REALSENSE2) && \ (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) && (RS2_API_VERSION > ((2 * 10000) + (31 * 100) + 0)) int main() @@ -58,7 +58,7 @@ int main() vpImagePoint origin; vpImagePoint frame_origin; std::list > - frame_origins; // Frame origin's history for trajectory visualization + frame_origins; // Frame origin's history for trajectory visualization try { vpRealSense2 rs; @@ -132,9 +132,11 @@ int main() std::cout << "Loop time: " << vpTime::measureTimeMs() - t << std::endl; } - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cerr << "RealSense error " << e.what() << std::endl; - } catch (const std::exception &e) { + } + catch (const std::exception &e) { std::cerr << e.what() << std::endl; } @@ -148,10 +150,6 @@ int main() std::cout << "Tip:" << std::endl; std::cout << "- Install librealsense2, configure again ViSP using cmake and build again this example" << std::endl; return EXIT_SUCCESS; -#elif (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "You do not build ViSP with c++11 or higher compiler flag" << std::endl; - std::cout << "Tip:" << std::endl; - std::cout << "- Configure ViSP again using cmake -DUSE_CXX_STANDARD=11, and build again this example" << std::endl; #elif !(defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) std::cout << "You don't have X11 or GDI display capabilities" << std::endl; #elif !(RS2_API_VERSION > ((2 * 10000) + (31 * 100) + 0)) diff --git a/modules/sensor/test/rgb-depth/testRealSense2_T265_undistort.cpp b/modules/sensor/test/rgb-depth/testRealSense2_T265_undistort.cpp index 728f1f68e8..516a700a2f 100644 --- a/modules/sensor/test/rgb-depth/testRealSense2_T265_undistort.cpp +++ b/modules/sensor/test/rgb-depth/testRealSense2_T265_undistort.cpp @@ -48,7 +48,7 @@ #include #include -#if defined(VISP_HAVE_REALSENSE2) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) && \ +#if defined(VISP_HAVE_REALSENSE2) && \ (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) && (RS2_API_VERSION > ((2 * 10000) + (31 * 100) + 0)) int main() @@ -72,7 +72,7 @@ int main() config.enable_stream(RS2_STREAM_FISHEYE, 2, RS2_FORMAT_Y8); rs.open(config); cam_left = - rs.getCameraParameters(RS2_STREAM_FISHEYE, vpCameraParameters::ProjWithKannalaBrandtDistortion, cam_index); + rs.getCameraParameters(RS2_STREAM_FISHEYE, vpCameraParameters::ProjWithKannalaBrandtDistortion, cam_index); vpImage I((unsigned int)rs.getIntrinsics(RS2_STREAM_FISHEYE).height, (unsigned int)rs.getIntrinsics(RS2_STREAM_FISHEYE).width); @@ -124,9 +124,11 @@ int main() std::cout << "Loop time: " << vpTime::measureTimeMs() - t << std::endl; } - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cerr << "RealSense error " << e.what() << std::endl; - } catch (const std::exception &e) { + } + catch (const std::exception &e) { std::cerr << e.what() << std::endl; } @@ -140,10 +142,6 @@ int main() std::cout << "Tip:" << std::endl; std::cout << "- Install librealsense2, configure again ViSP using cmake and build again this example" << std::endl; return EXIT_SUCCESS; -#elif (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "You do not build ViSP with c++11 or higher compiler flag" << std::endl; - std::cout << "Tip:" << std::endl; - std::cout << "- Configure ViSP again using cmake -DUSE_CXX_STANDARD=11, and build again this example" << std::endl; #elif !(defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) std::cout << "You don't have X11 or GDI display capabilities" << std::endl; #elif !(RS2_API_VERSION > ((2 * 10000) + (31 * 100) + 0)) diff --git a/modules/sensor/test/rgb-depth/testRealSense_R200.cpp b/modules/sensor/test/rgb-depth/testRealSense_R200.cpp index dba605f4f3..571de313f4 100644 --- a/modules/sensor/test/rgb-depth/testRealSense_R200.cpp +++ b/modules/sensor/test/rgb-depth/testRealSense_R200.cpp @@ -46,8 +46,7 @@ #include #include -#if defined(VISP_HAVE_REALSENSE) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) && \ - (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) +#if defined(VISP_HAVE_REALSENSE) && (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) #include #include @@ -68,7 +67,7 @@ bool cancelled = false, update_pointcloud = false; class ViewerWorker { public: - explicit ViewerWorker(bool color_mode, std::mutex &mutex) : m_colorMode(color_mode), m_mutex(mutex) {} + explicit ViewerWorker(bool color_mode, std::mutex &mutex) : m_colorMode(color_mode), m_mutex(mutex) { } void run() { @@ -98,7 +97,8 @@ class ViewerWorker if (local_update) { if (m_colorMode) { local_pointcloud_color = pointcloud_color->makeShared(); - } else { + } + else { local_pointcloud = pointcloud->makeShared(); } } @@ -113,15 +113,18 @@ class ViewerWorker viewer->addPointCloud(local_pointcloud_color, rgb, "RGB sample cloud"); viewer->setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 1, "RGB sample cloud"); - } else { + } + else { viewer->addPointCloud(local_pointcloud, "sample cloud"); viewer->setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 1, "sample cloud"); } init = false; - } else { + } + else { if (m_colorMode) { viewer->updatePointCloud(local_pointcloud_color, rgb, "RGB sample cloud"); - } else { + } + else { viewer->updatePointCloud(local_pointcloud, "sample cloud"); } } @@ -234,7 +237,8 @@ void test_R200(vpRealSense &rs, const std::map &enables, case rs::stream::depth: if (depth_color_visualization) { dd.init(I_depth_color, (int)I_color.getWidth() + 80, 0, "Depth frame"); - } else { + } + else { dd.init(I_depth, (int)I_color.getWidth() + 80, 0, "Depth frame"); } break; @@ -289,17 +293,20 @@ void test_R200(vpRealSense &rs, const std::map &enables, rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, NULL, pointcloud_color, (unsigned char *)I_infrared.bitmap, (unsigned char *)I_infrared2.bitmap, color_stream, depth_stream, rs::stream::infrared, infrared2_stream); - } else { + } + else { rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, NULL, pointcloud, (unsigned char *)I_infrared.bitmap, (unsigned char *)I_infrared2.bitmap, color_stream, depth_stream, rs::stream::infrared, infrared2_stream); } - } else { + } + else { if (pcl_color) { rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, NULL, pointcloud_color, (unsigned char *)infrared.bitmap, (unsigned char *)infrared2.bitmap, color_stream, depth_stream, rs::stream::infrared, infrared2_stream); - } else { + } + else { rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, NULL, pointcloud, (unsigned char *)infrared.bitmap, (unsigned char *)infrared2.bitmap, color_stream, depth_stream, rs::stream::infrared, infrared2_stream); @@ -311,12 +318,14 @@ void test_R200(vpRealSense &rs, const std::map &enables, update_pointcloud = true; #endif - } else { + } + else { if (direct_infrared_conversion) { rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, NULL, (unsigned char *)I_infrared.bitmap, (unsigned char *)I_infrared2.bitmap, color_stream, depth_stream, rs::stream::infrared, infrared2_stream); - } else { + } + else { rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, NULL, (unsigned char *)infrared.bitmap, (unsigned char *)infrared2.bitmap, color_stream, depth_stream, rs::stream::infrared, infrared2_stream); @@ -327,14 +336,16 @@ void test_R200(vpRealSense &rs, const std::map &enables, if (depth_color_visualization) { vpImageConvert::createDepthHistogram(depth, I_depth_color); - } else { + } + else { vpImageConvert::createDepthHistogram(depth, I_depth); } vpDisplay::display(I_color); if (depth_color_visualization) { vpDisplay::display(I_depth_color); - } else { + } + else { vpDisplay::display(I_depth); } vpDisplay::display(I_infrared); @@ -343,7 +354,8 @@ void test_R200(vpRealSense &rs, const std::map &enables, vpDisplay::flush(I_color); if (depth_color_visualization) { vpDisplay::flush(I_depth_color); - } else { + } + else { vpDisplay::flush(I_depth); } vpDisplay::flush(I_infrared); @@ -373,7 +385,7 @@ void test_R200(vpRealSense &rs, const std::map &enables, } std::cout << title << " - Mean time: " << vpMath::getMean(time_vector) - << " ms ; Median time: " << vpMath::getMedian(time_vector) << " ms" << std::endl; + << " ms ; Median time: " << vpMath::getMedian(time_vector) << " ms" << std::endl; rs.close(); } @@ -394,7 +406,7 @@ int main(int argc, char *argv[]) std::cout << "API version: " << rs_get_api_version(nullptr) << std::endl; std::cout << "Firmware: " << rs_get_device_firmware_version((const rs_device *)rs.getHandler(), nullptr) - << std::endl; + << std::endl; std::cout << "RealSense sensor characteristics: \n" << rs << std::endl; rs.close(); @@ -549,12 +561,15 @@ int main(int argc, char *argv[]) false, rs::stream::color, rs::stream::depth, rs::stream::infrared2, true, (argc > 1 ? (bool)(atoi(argv[1]) > 0) : false)); #endif - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cerr << "RealSense error " << e.what() << std::endl; - } catch (const rs::error &e) { + } + catch (const rs::error &e) { std::cerr << "RealSense error calling " << e.get_failed_function() << "(" << e.get_failed_args() - << "): " << e.what() << std::endl; - } catch (const std::exception &e) { + << "): " << e.what() << std::endl; + } + catch (const std::exception &e) { std::cerr << e.what() << std::endl; } @@ -566,10 +581,6 @@ int main() { #if !defined(VISP_HAVE_REALSENSE) std::cout << "This deprecated example is only working with librealsense 1.x " << std::endl; -#elif !(VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - std::cout << "Build ViSP with c++11 or higher compiler flag (cmake -DUSE_CXX_STANDARD=11) " - "to make this test working" - << std::endl; #elif !defined(VISP_HAVE_X11) && !defined(VISP_HAVE_GDI) std::cout << "X11 or GDI are needed!" << std::endl; #endif diff --git a/modules/sensor/test/rgb-depth/testRealSense_SR300.cpp b/modules/sensor/test/rgb-depth/testRealSense_SR300.cpp index 40e095bcd2..3a91cc5ed9 100644 --- a/modules/sensor/test/rgb-depth/testRealSense_SR300.cpp +++ b/modules/sensor/test/rgb-depth/testRealSense_SR300.cpp @@ -46,8 +46,7 @@ #include #include -#if defined(VISP_HAVE_REALSENSE) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) && \ - (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) +#if defined(VISP_HAVE_REALSENSE) && (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) #include #include @@ -68,7 +67,7 @@ bool cancelled = false, update_pointcloud = false; class ViewerWorker { public: - explicit ViewerWorker(bool color_mode, std::mutex &mutex) : m_colorMode(color_mode), m_mutex(mutex) {} + explicit ViewerWorker(bool color_mode, std::mutex &mutex) : m_colorMode(color_mode), m_mutex(mutex) { } void run() { @@ -98,7 +97,8 @@ class ViewerWorker if (local_update) { if (m_colorMode) { local_pointcloud_color = pointcloud_color->makeShared(); - } else { + } + else { local_pointcloud = pointcloud->makeShared(); } } @@ -113,15 +113,18 @@ class ViewerWorker viewer->addPointCloud(local_pointcloud_color, rgb, "RGB sample cloud"); viewer->setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 1, "RGB sample cloud"); - } else { + } + else { viewer->addPointCloud(local_pointcloud, "sample cloud"); viewer->setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 1, "sample cloud"); } init = false; - } else { + } + else { if (m_colorMode) { viewer->updatePointCloud(local_pointcloud_color, rgb, "RGB sample cloud"); - } else { + } + else { viewer->updatePointCloud(local_pointcloud, "sample cloud"); } } @@ -220,7 +223,8 @@ void test_SR300(vpRealSense &rs, const std::map &enables, case rs::stream::depth: if (depth_color_visualization) { dd.init(I_depth_color, (int)I_color.getWidth() + 80, 0, "Depth frame"); - } else { + } + else { dd.init(I_depth, (int)I_color.getWidth() + 80, 0, "Depth frame"); } break; @@ -265,15 +269,18 @@ void test_SR300(vpRealSense &rs, const std::map &enables, if (pcl_color) { rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, NULL, pointcloud_color, (unsigned char *)I_infrared.bitmap, NULL, color_stream, depth_stream); - } else { + } + else { rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, NULL, pointcloud, (unsigned char *)I_infrared.bitmap, NULL, color_stream, depth_stream); } - } else { + } + else { if (pcl_color) { rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, NULL, pointcloud_color, (unsigned char *)infrared.bitmap, NULL, color_stream, depth_stream); - } else { + } + else { rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, NULL, pointcloud, (unsigned char *)infrared.bitmap, NULL, color_stream, depth_stream); } @@ -283,11 +290,13 @@ void test_SR300(vpRealSense &rs, const std::map &enables, update_pointcloud = true; #endif - } else { + } + else { if (direct_infrared_conversion) { rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, NULL, (unsigned char *)I_infrared.bitmap, NULL, color_stream, depth_stream); - } else { + } + else { rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, NULL, (unsigned char *)infrared.bitmap, NULL, color_stream, depth_stream); vpImageConvert::convert(infrared, I_infrared); @@ -296,14 +305,16 @@ void test_SR300(vpRealSense &rs, const std::map &enables, if (depth_color_visualization) { vpImageConvert::createDepthHistogram(depth, I_depth_color); - } else { + } + else { vpImageConvert::createDepthHistogram(depth, I_depth); } vpDisplay::display(I_color); if (depth_color_visualization) { vpDisplay::display(I_depth_color); - } else { + } + else { vpDisplay::display(I_depth); } vpDisplay::display(I_infrared); @@ -311,7 +322,8 @@ void test_SR300(vpRealSense &rs, const std::map &enables, vpDisplay::flush(I_color); if (depth_color_visualization) { vpDisplay::flush(I_depth_color); - } else { + } + else { vpDisplay::flush(I_depth); } vpDisplay::flush(I_infrared); @@ -340,7 +352,7 @@ void test_SR300(vpRealSense &rs, const std::map &enables, } std::cout << title << " - Mean time: " << vpMath::getMean(time_vector) - << " ms ; Median time: " << vpMath::getMedian(time_vector) << " ms" << std::endl; + << " ms ; Median time: " << vpMath::getMedian(time_vector) << " ms" << std::endl; rs.close(); } @@ -361,7 +373,7 @@ int main(int argc, char *argv[]) std::cout << "API version: " << rs_get_api_version(nullptr) << std::endl; std::cout << "Firmware: " << rs_get_device_firmware_version((const rs_device *)rs.getHandler(), nullptr) - << std::endl; + << std::endl; std::cout << "RealSense sensor characteristics: \n" << rs << std::endl; rs.close(); @@ -481,12 +493,15 @@ int main(int argc, char *argv[]) break; } } - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cerr << "RealSense error " << e.what() << std::endl; - } catch (const rs::error &e) { + } + catch (const rs::error &e) { std::cerr << "RealSense error calling " << e.get_failed_function() << "(" << e.get_failed_args() - << "): " << e.what() << std::endl; - } catch (const std::exception &e) { + << "): " << e.what() << std::endl; + } + catch (const std::exception &e) { std::cerr << e.what() << std::endl; } @@ -499,11 +514,6 @@ int main() #if !defined(VISP_HAVE_REALSENSE) std::cout << "Install librealsense to make this test work." << std::endl; #endif -#if !(VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - std::cout << "Build ViSP with c++11 or higher compiler flag (cmake -DUSE_CXX_STANDARD=11) " - "to make this test work." - << std::endl; -#endif #if !defined(VISP_HAVE_X11) && !defined(VISP_HAVE_GDI) std::cout << "X11 or GDI are needed." << std::endl; #endif diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbtTukeyEstimator.h b/modules/tracker/mbt/include/visp3/mbt/vpMbtTukeyEstimator.h index 564268e4aa..7b6b694538 100755 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbtTukeyEstimator.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbtTukeyEstimator.h @@ -79,7 +79,7 @@ template class vpMbtTukeyEstimator #include #define USE_TRANSFORM 1 -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) && USE_TRANSFORM +#if USE_TRANSFORM #define HAVE_TRANSFORM 1 #include #endif @@ -116,7 +116,8 @@ namespace #if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_14) auto AbsDiff = [](const auto &a, const auto &b) { return std::fabs(a - b); }; #else -template struct AbsDiff : public std::binary_function { +template struct AbsDiff : public std::binary_function +{ T operator()(const T a, const T b) const { return std::fabs(a - b); } }; #endif @@ -351,7 +352,8 @@ template void vpMbtTukeyEstimator::psiTukey(const T sig, std::ve if (xi > 1.) { weights[i] = 0; - } else { + } + else { xi = 1 - xi; xi *= xi; weights[i] = xi; @@ -450,7 +452,8 @@ template void vpMbtTukeyEstimator::psiTukey(const T sig, std::vecto if (xi > 1.) { weights[i] = 0; - } else { + } + else { xi = 1 - xi; xi *= xi; weights[i] = xi; diff --git a/modules/tracker/mbt/src/depth/vpMbtFaceDepthDense.cpp b/modules/tracker/mbt/src/depth/vpMbtFaceDepthDense.cpp index 9e1a4fb43c..07f25e06cd 100644 --- a/modules/tracker/mbt/src/depth/vpMbtFaceDepthDense.cpp +++ b/modules/tracker/mbt/src/depth/vpMbtFaceDepthDense.cpp @@ -47,7 +47,7 @@ // https://stackoverflow.com/a/40765925 #if !defined(__FMA__) && defined(__AVX2__) - #define __FMA__ 1 +#define __FMA__ 1 #endif #if defined _WIN32 && defined(_M_ARM64) @@ -81,18 +81,16 @@ #endif #if !USE_OPENCV_HAL && (USE_SSE || USE_NEON) -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) #include -#endif namespace { #if USE_SSE -inline void v_load_deinterleave(const uint64_t *ptr, __m128i& a, __m128i& b, __m128i& c) +inline void v_load_deinterleave(const uint64_t *ptr, __m128i &a, __m128i &b, __m128i &c) { - __m128i t0 = _mm_loadu_si128((const __m128i*)ptr); // a0, b0 - __m128i t1 = _mm_loadu_si128((const __m128i*)(ptr + 2)); // c0, a1 - __m128i t2 = _mm_loadu_si128((const __m128i*)(ptr + 4)); // b1, c1 + __m128i t0 = _mm_loadu_si128((const __m128i *)ptr); // a0, b0 + __m128i t1 = _mm_loadu_si128((const __m128i *)(ptr + 2)); // c0, a1 + __m128i t2 = _mm_loadu_si128((const __m128i *)(ptr + 4)); // b1, c1 t1 = _mm_shuffle_epi32(t1, 0x4e); // a1, c0 @@ -101,37 +99,37 @@ inline void v_load_deinterleave(const uint64_t *ptr, __m128i& a, __m128i& b, __m c = _mm_unpackhi_epi64(t1, t2); } -inline void v_load_deinterleave(const double* ptr, __m128d& a0, __m128d& b0, __m128d& c0) +inline void v_load_deinterleave(const double *ptr, __m128d &a0, __m128d &b0, __m128d &c0) { __m128i a1, b1, c1; - v_load_deinterleave((const uint64_t*)ptr, a1, b1, c1); + v_load_deinterleave((const uint64_t *)ptr, a1, b1, c1); a0 = _mm_castsi128_pd(a1); b0 = _mm_castsi128_pd(b1); c0 = _mm_castsi128_pd(c1); } -inline __m128d v_combine_low(const __m128d& a, const __m128d& b) +inline __m128d v_combine_low(const __m128d &a, const __m128d &b) { __m128i a1 = _mm_castpd_si128(a), b1 = _mm_castpd_si128(b); return _mm_castsi128_pd(_mm_unpacklo_epi64(a1, b1)); } -inline __m128d v_combine_high(const __m128d& a, const __m128d& b) +inline __m128d v_combine_high(const __m128d &a, const __m128d &b) { __m128i a1 = _mm_castpd_si128(a), b1 = _mm_castpd_si128(b); return _mm_castsi128_pd(_mm_unpackhi_epi64(a1, b1)); } -inline __m128d v_fma(const __m128d& a, const __m128d& b, const __m128d& c) +inline __m128d v_fma(const __m128d &a, const __m128d &b, const __m128d &c) { #if __FMA__ - return _mm_fmadd_pd(a, b, c); + return _mm_fmadd_pd(a, b, c); #else - return _mm_add_pd(_mm_mul_pd(a, b), c); + return _mm_add_pd(_mm_mul_pd(a, b), c); #endif } #else -inline void v_load_deinterleave(const double* ptr, float64x2_t& a0, float64x2_t& b0, float64x2_t& c0) +inline void v_load_deinterleave(const double *ptr, float64x2_t &a0, float64x2_t &b0, float64x2_t &c0) { float64x2x3_t v = vld3q_f64(ptr); a0 = v.val[0]; @@ -139,17 +137,17 @@ inline void v_load_deinterleave(const double* ptr, float64x2_t& a0, float64x2_t& c0 = v.val[2]; } -inline float64x2_t v_combine_low(const float64x2_t& a, const float64x2_t& b) +inline float64x2_t v_combine_low(const float64x2_t &a, const float64x2_t &b) { return vcombine_f64(vget_low_f64(a), vget_low_f64(b)); } -inline float64x2_t v_combine_high(const float64x2_t& a, const float64x2_t& b) +inline float64x2_t v_combine_high(const float64x2_t &a, const float64x2_t &b) { return vcombine_f64(vget_high_f64(a), vget_high_f64(b)); } -inline float64x2_t v_fma(const float64x2_t& a, const float64x2_t& b, const float64x2_t& c) +inline float64x2_t v_fma(const float64x2_t &a, const float64x2_t &b, const float64x2_t &c) { return vfmaq_f64(c, a, b); } @@ -159,12 +157,11 @@ inline float64x2_t v_fma(const float64x2_t& a, const float64x2_t& b, const float vpMbtFaceDepthDense::vpMbtFaceDepthDense() : m_cam(), m_clippingFlag(vpPolygon3D::NO_CLIPPING), m_distFarClip(100), m_distNearClip(0.001), m_hiddenFace(NULL), - m_planeObject(), m_polygon(NULL), m_useScanLine(false), - m_depthDenseFilteringMethod(DEPTH_OCCUPANCY_RATIO_FILTERING), m_depthDenseFilteringMaxDist(3.0), - m_depthDenseFilteringMinDist(0.8), m_depthDenseFilteringOccupancyRatio(0.3), m_isTrackedDepthDenseFace(true), - m_isVisible(false), m_listOfFaceLines(), m_planeCamera(), m_pointCloudFace(), m_polygonLines() -{ -} + m_planeObject(), m_polygon(NULL), m_useScanLine(false), + m_depthDenseFilteringMethod(DEPTH_OCCUPANCY_RATIO_FILTERING), m_depthDenseFilteringMaxDist(3.0), + m_depthDenseFilteringMinDist(0.8), m_depthDenseFilteringOccupancyRatio(0.3), m_isTrackedDepthDenseFace(true), + m_isVisible(false), m_listOfFaceLines(), m_planeCamera(), m_pointCloudFace(), m_polygonLines() +{ } vpMbtFaceDepthDense::~vpMbtFaceDepthDense() { @@ -312,7 +309,7 @@ bool vpMbtFaceDepthDense::computeDesiredFeatures(const vpHomogeneousMatrix &cMo, if ((m_useScanLine ? (i < m_hiddenFace->getMbScanLineRenderer().getPrimitiveIDs().getHeight() && j < m_hiddenFace->getMbScanLineRenderer().getPrimitiveIDs().getWidth() && m_hiddenFace->getMbScanLineRenderer().getPrimitiveIDs()[i][j] == m_polygon->getIndex()) - : polygon_2d.isInside(vpImagePoint(i, j)))) { + : polygon_2d.isInside(vpImagePoint(i, j)))) { totalTheoreticalPoints++; if (vpMeTracker::inMask(mask, i, j) && pcl::isFinite((*point_cloud)(j, i)) && (*point_cloud)(j, i).z > 0) { @@ -398,7 +395,7 @@ bool vpMbtFaceDepthDense::computeDesiredFeatures(const vpHomogeneousMatrix &cMo, if ((m_useScanLine ? (i < m_hiddenFace->getMbScanLineRenderer().getPrimitiveIDs().getHeight() && j < m_hiddenFace->getMbScanLineRenderer().getPrimitiveIDs().getWidth() && m_hiddenFace->getMbScanLineRenderer().getPrimitiveIDs()[i][j] == m_polygon->getIndex()) - : polygon_2d.isInside(vpImagePoint(i, j)))) { + : polygon_2d.isInside(vpImagePoint(i, j)))) { totalTheoreticalPoints++; if (vpMeTracker::inMask(mask, i, j) && point_cloud[i * width + j][2] > 0) { @@ -440,7 +437,8 @@ void vpMbtFaceDepthDense::computeVisibilityDisplay() int index = *itindex; if (index == -1) { isvisible = true; - } else { + } + else { if (line->hiddenface->isVisible((unsigned int)index)) { isvisible = true; } @@ -453,7 +451,8 @@ void vpMbtFaceDepthDense::computeVisibilityDisplay() if (isvisible) { line->setVisible(true); - } else { + } + else { line->setVisible(false); } } @@ -642,7 +641,8 @@ void vpMbtFaceDepthDense::computeInteractionMatrixAndResidu(const vpHomogeneousM error[(unsigned int)(cpt / 3)] = D + (normal.t() * pt); } #endif - } else { + } + else { vpColVector normal(3); normal[0] = nx; normal[1] = ny; @@ -739,19 +739,21 @@ void vpMbtFaceDepthDense::computeROI(const vpHomogeneousMatrix &cMo, unsigned in if (linesLst.empty()) { distanceToFace = std::numeric_limits::max(); - } else { + } + else { faceCentroid.set_X(faceCentroid.get_X() / (2 * linesLst.size())); faceCentroid.set_Y(faceCentroid.get_Y() / (2 * linesLst.size())); faceCentroid.set_Z(faceCentroid.get_Z() / (2 * linesLst.size())); distanceToFace = - sqrt(faceCentroid.get_X() * faceCentroid.get_X() + faceCentroid.get_Y() * faceCentroid.get_Y() + - faceCentroid.get_Z() * faceCentroid.get_Z()); + sqrt(faceCentroid.get_X() * faceCentroid.get_X() + faceCentroid.get_Y() * faceCentroid.get_Y() + + faceCentroid.get_Z() * faceCentroid.get_Z()); } } } - } else { - // Get polygon clipped + } + else { + // Get polygon clipped m_polygon->getRoiClipped(m_cam, roiPts, cMo); // Get 3D polygon clipped @@ -760,7 +762,8 @@ void vpMbtFaceDepthDense::computeROI(const vpHomogeneousMatrix &cMo, unsigned in if (polygonsClipped.empty()) { distanceToFace = std::numeric_limits::max(); - } else { + } + else { vpPoint faceCentroid; for (size_t i = 0; i < polygonsClipped.size(); i++) { @@ -788,7 +791,7 @@ void vpMbtFaceDepthDense::display(const vpImage &I, const vpHomog bool displayFullModel) { std::vector > models = - getModelForDisplay(I.getWidth(), I.getHeight(), cMo, cam, displayFullModel); + getModelForDisplay(I.getWidth(), I.getHeight(), cMo, cam, displayFullModel); for (size_t i = 0; i < models.size(); i++) { vpImagePoint ip1(models[i][1], models[i][2]); @@ -802,7 +805,7 @@ void vpMbtFaceDepthDense::display(const vpImage &I, const vpHomogeneousM bool displayFullModel) { std::vector > models = - getModelForDisplay(I.getWidth(), I.getHeight(), cMo, cam, displayFullModel); + getModelForDisplay(I.getWidth(), I.getHeight(), cMo, cam, displayFullModel); for (size_t i = 0; i < models.size(); i++) { vpImagePoint ip1(models[i][1], models[i][2]); @@ -814,14 +817,12 @@ void vpMbtFaceDepthDense::display(const vpImage &I, const vpHomogeneousM void vpMbtFaceDepthDense::displayFeature(const vpImage & /*I*/, const vpHomogeneousMatrix & /*cMo*/, const vpCameraParameters & /*cam*/, const double /*scale*/, const unsigned int /*thickness*/) -{ -} +{ } void vpMbtFaceDepthDense::displayFeature(const vpImage & /*I*/, const vpHomogeneousMatrix & /*cMo*/, const vpCameraParameters & /*cam*/, const double /*scale*/, const unsigned int /*thickness*/) -{ -} +{ } /*! Return a list of line parameters to display the primitive at a given pose and camera parameters. @@ -848,7 +849,7 @@ std::vector > vpMbtFaceDepthDense::getModelForDisplay(unsign ++it) { vpMbtDistanceLine *line = *it; std::vector > lineModels = - line->getModelForDisplay(width, height, cMo, cam, displayFullModel); + line->getModelForDisplay(width, height, cMo, cam, displayFullModel); models.insert(models.end(), lineModels.begin(), lineModels.end()); } } diff --git a/modules/tracker/mbt/src/depth/vpMbtFaceDepthNormal.cpp b/modules/tracker/mbt/src/depth/vpMbtFaceDepthNormal.cpp index 7dda21c822..d2c7390ac7 100644 --- a/modules/tracker/mbt/src/depth/vpMbtFaceDepthNormal.cpp +++ b/modules/tracker/mbt/src/depth/vpMbtFaceDepthNormal.cpp @@ -57,14 +57,13 @@ vpMbtFaceDepthNormal::vpMbtFaceDepthNormal() : m_cam(), m_clippingFlag(vpPolygon3D::NO_CLIPPING), m_distFarClip(100), m_distNearClip(0.001), m_hiddenFace(NULL), - m_planeObject(), m_polygon(NULL), m_useScanLine(false), m_faceActivated(false), - m_faceCentroidMethod(GEOMETRIC_CENTROID), m_faceDesiredCentroid(), m_faceDesiredNormal(), - m_featureEstimationMethod(ROBUST_FEATURE_ESTIMATION), m_isTrackedDepthNormalFace(true), m_isVisible(false), - m_listOfFaceLines(), m_planeCamera(), - m_pclPlaneEstimationMethod(2), // SAC_MSAC, see pcl/sample_consensus/method_types.h - m_pclPlaneEstimationRansacMaxIter(200), m_pclPlaneEstimationRansacThreshold(0.001), m_polygonLines() -{ -} + m_planeObject(), m_polygon(NULL), m_useScanLine(false), m_faceActivated(false), + m_faceCentroidMethod(GEOMETRIC_CENTROID), m_faceDesiredCentroid(), m_faceDesiredNormal(), + m_featureEstimationMethod(ROBUST_FEATURE_ESTIMATION), m_isTrackedDepthNormalFace(true), m_isVisible(false), + m_listOfFaceLines(), m_planeCamera(), + m_pclPlaneEstimationMethod(2), // SAC_MSAC, see pcl/sample_consensus/method_types.h + m_pclPlaneEstimationRansacMaxIter(200), m_pclPlaneEstimationRansacThreshold(0.001), m_polygonLines() +{ } vpMbtFaceDepthNormal::~vpMbtFaceDepthNormal() { @@ -202,9 +201,11 @@ bool vpMbtFaceDepthNormal::computeDesiredFeatures(const vpHomogeneousMatrix &cMo if (m_featureEstimationMethod == ROBUST_FEATURE_ESTIMATION) { point_cloud_face_custom.reserve((size_t)(3 * bb.getWidth() * bb.getHeight())); point_cloud_face_vec.reserve((size_t)(3 * bb.getWidth() * bb.getHeight())); - } else if (m_featureEstimationMethod == ROBUST_SVD_PLANE_ESTIMATION) { + } + else if (m_featureEstimationMethod == ROBUST_SVD_PLANE_ESTIMATION) { point_cloud_face_vec.reserve((size_t)(3 * bb.getWidth() * bb.getHeight())); - } else if (m_featureEstimationMethod == PCL_PLANE_ESTIMATION) { + } + else if (m_featureEstimationMethod == PCL_PLANE_ESTIMATION) { point_cloud_face->reserve((size_t)(bb.getWidth() * bb.getHeight())); } @@ -223,12 +224,13 @@ bool vpMbtFaceDepthNormal::computeDesiredFeatures(const vpHomogeneousMatrix &cMo (m_useScanLine ? (i < m_hiddenFace->getMbScanLineRenderer().getPrimitiveIDs().getHeight() && j < m_hiddenFace->getMbScanLineRenderer().getPrimitiveIDs().getWidth() && m_hiddenFace->getMbScanLineRenderer().getPrimitiveIDs()[i][j] == m_polygon->getIndex()) - : polygon_2d.isInside(vpImagePoint(i, j)))) { + : polygon_2d.isInside(vpImagePoint(i, j)))) { if (m_featureEstimationMethod == PCL_PLANE_ESTIMATION) { point_cloud_face->push_back((*point_cloud)(j, i)); - } else if (m_featureEstimationMethod == ROBUST_SVD_PLANE_ESTIMATION || - m_featureEstimationMethod == ROBUST_FEATURE_ESTIMATION) { + } + else if (m_featureEstimationMethod == ROBUST_SVD_PLANE_ESTIMATION || + m_featureEstimationMethod == ROBUST_FEATURE_ESTIMATION) { point_cloud_face_vec.push_back((*point_cloud)(j, i).x); point_cloud_face_vec.push_back((*point_cloud)(j, i).y); point_cloud_face_vec.push_back((*point_cloud)(j, i).z); @@ -244,7 +246,8 @@ bool vpMbtFaceDepthNormal::computeDesiredFeatures(const vpHomogeneousMatrix &cMo prev_x = x; prev_y = y; prev_z = (*point_cloud)(j, i).z; - } else { + } + else { push = false; point_cloud_face_custom.push_back(prev_x); point_cloud_face_custom.push_back(x); @@ -256,7 +259,8 @@ bool vpMbtFaceDepthNormal::computeDesiredFeatures(const vpHomogeneousMatrix &cMo point_cloud_face_custom.push_back((*point_cloud)(j, i).z); } #endif - } else { + } + else { point_cloud_face_custom.push_back(x); point_cloud_face_custom.push_back(y); point_cloud_face_custom.push_back((*point_cloud)(j, i).z); @@ -290,12 +294,15 @@ bool vpMbtFaceDepthNormal::computeDesiredFeatures(const vpHomogeneousMatrix &cMo if (!computeDesiredFeaturesPCL(point_cloud_face, desired_features, desired_normal, centroid_point)) { return false; } - } else if (m_featureEstimationMethod == ROBUST_SVD_PLANE_ESTIMATION) { + } + else if (m_featureEstimationMethod == ROBUST_SVD_PLANE_ESTIMATION) { computeDesiredFeaturesSVD(point_cloud_face_vec, cMo, desired_features, desired_normal, centroid_point); - } else if (m_featureEstimationMethod == ROBUST_FEATURE_ESTIMATION) { + } + else if (m_featureEstimationMethod == ROBUST_FEATURE_ESTIMATION) { computeDesiredFeaturesRobustFeatures(point_cloud_face_custom, point_cloud_face_vec, cMo, desired_features, desired_normal, centroid_point); - } else { + } + else { throw vpException(vpException::badValue, "Unknown feature estimation method!"); } @@ -376,8 +383,8 @@ bool vpMbtFaceDepthNormal::computeDesiredFeatures(const vpHomogeneousMatrix &cMo (m_useScanLine ? (i < m_hiddenFace->getMbScanLineRenderer().getPrimitiveIDs().getHeight() && j < m_hiddenFace->getMbScanLineRenderer().getPrimitiveIDs().getWidth() && m_hiddenFace->getMbScanLineRenderer().getPrimitiveIDs()[i][j] == m_polygon->getIndex()) - : polygon_2d.isInside(vpImagePoint(i, j)))) { - // Add point + : polygon_2d.isInside(vpImagePoint(i, j)))) { +// Add point point_cloud_face.push_back(point_cloud[i * width + j][0]); point_cloud_face.push_back(point_cloud[i * width + j][1]); point_cloud_face.push_back(point_cloud[i * width + j][2]); @@ -393,7 +400,8 @@ bool vpMbtFaceDepthNormal::computeDesiredFeatures(const vpHomogeneousMatrix &cMo prev_x = x; prev_y = y; prev_z = point_cloud[i * width + j][2]; - } else { + } + else { push = false; point_cloud_face_custom.push_back(prev_x); point_cloud_face_custom.push_back(x); @@ -405,7 +413,8 @@ bool vpMbtFaceDepthNormal::computeDesiredFeatures(const vpHomogeneousMatrix &cMo point_cloud_face_custom.push_back(point_cloud[i * width + j][2]); } #endif - } else { + } + else { point_cloud_face_custom.push_back(x); point_cloud_face_custom.push_back(y); point_cloud_face_custom.push_back(point_cloud[i * width + j][2]); @@ -445,16 +454,19 @@ bool vpMbtFaceDepthNormal::computeDesiredFeatures(const vpHomogeneousMatrix &cMo } computeDesiredFeaturesPCL(point_cloud_face_pcl, desired_features, desired_normal, centroid_point); - } else -#endif - if (m_featureEstimationMethod == ROBUST_SVD_PLANE_ESTIMATION) { - computeDesiredFeaturesSVD(point_cloud_face, cMo, desired_features, desired_normal, centroid_point); - } else if (m_featureEstimationMethod == ROBUST_FEATURE_ESTIMATION) { - computeDesiredFeaturesRobustFeatures(point_cloud_face_custom, point_cloud_face, cMo, desired_features, - desired_normal, centroid_point); - } else { - throw vpException(vpException::badValue, "Unknown feature estimation method!"); } + else +#endif + if (m_featureEstimationMethod == ROBUST_SVD_PLANE_ESTIMATION) { + computeDesiredFeaturesSVD(point_cloud_face, cMo, desired_features, desired_normal, centroid_point); + } + else if (m_featureEstimationMethod == ROBUST_FEATURE_ESTIMATION) { + computeDesiredFeaturesRobustFeatures(point_cloud_face_custom, point_cloud_face, cMo, desired_features, + desired_normal, centroid_point); + } + else { + throw vpException(vpException::badValue, "Unknown feature estimation method!"); + } computeDesiredNormalAndCentroid(cMo, desired_normal, centroid_point); @@ -514,7 +526,8 @@ bool vpMbtFaceDepthNormal::computeDesiredFeaturesPCL(const pcl::PointCloud &po for (size_t i = 0; i < points.size() - 1; i++) { // projection onto xy plane c_x1 += (points[i].get_X() + points[i + 1].get_X()) * - (points[i].get_X() * points[i + 1].get_Y() - points[i + 1].get_X() * points[i].get_Y()); + (points[i].get_X() * points[i + 1].get_Y() - points[i + 1].get_X() * points[i].get_Y()); c_y += (points[i].get_Y() + points[i + 1].get_Y()) * - (points[i].get_X() * points[i + 1].get_Y() - points[i + 1].get_X() * points[i].get_Y()); + (points[i].get_X() * points[i + 1].get_Y() - points[i + 1].get_X() * points[i].get_Y()); A1 += points[i].get_X() * points[i + 1].get_Y() - points[i + 1].get_X() * points[i].get_Y(); // projection onto xz plane c_x2 += (points[i].get_X() + points[i + 1].get_X()) * - (points[i].get_X() * points[i + 1].get_Z() - points[i + 1].get_X() * points[i].get_Z()); + (points[i].get_X() * points[i + 1].get_Z() - points[i + 1].get_X() * points[i].get_Z()); c_z += (points[i].get_Z() + points[i + 1].get_Z()) * - (points[i].get_X() * points[i + 1].get_Z() - points[i + 1].get_X() * points[i].get_Z()); + (points[i].get_X() * points[i + 1].get_Z() - points[i + 1].get_X() * points[i].get_Z()); A2 += points[i].get_X() * points[i + 1].get_Z() - points[i + 1].get_X() * points[i].get_Z(); } @@ -639,7 +653,8 @@ bool vpMbtFaceDepthNormal::computePolygonCentroid(const std::vector &po if (A1 > A2) { centroid.set_X(c_x1); - } else { + } + else { centroid.set_X(c_x2); } @@ -704,8 +719,9 @@ void vpMbtFaceDepthNormal::computeROI(const vpHomogeneousMatrix &cMo, unsigned i } } } - } else { - // Get polygon clipped + } + else { + // Get polygon clipped m_polygon->getRoiClipped(m_cam, roiPts, cMo); #if DEBUG_DISPLAY_DEPTH_NORMAL @@ -730,7 +746,8 @@ void vpMbtFaceDepthNormal::computeVisibilityDisplay() int index = *itindex; if (index == -1) { isvisible = true; - } else { + } + else { if (line->hiddenface->isVisible((unsigned int)index)) { isvisible = true; } @@ -743,7 +760,8 @@ void vpMbtFaceDepthNormal::computeVisibilityDisplay() if (isvisible) { line->setVisible(true); - } else { + } + else { line->setVisible(false); } } @@ -775,7 +793,8 @@ void vpMbtFaceDepthNormal::computeNormalVisibility(double nx, double ny, double e4[1] = -centroid.get_Y(); e4[2] = -centroid.get_Z(); e4.normalize(); - } else { + } + else { double centroid_x = 0.0; double centroid_y = 0.0; double centroid_z = 0.0; @@ -804,7 +823,8 @@ void vpMbtFaceDepthNormal::computeNormalVisibility(double nx, double ny, double double angle = acos(vpColVector::dotProd(e4, faceNormal)); if (angle < M_PI_2) { correct_normal = faceNormal; - } else { + } + else { correct_normal[0] = -faceNormal[0]; correct_normal[1] = -faceNormal[1]; correct_normal[2] = -faceNormal[2]; @@ -830,7 +850,8 @@ void vpMbtFaceDepthNormal::computeNormalVisibility(float nx, float ny, float nz, double angle = acos(vpColVector::dotProd(e4, faceNormal)); if (angle < M_PI_2) { face_normal = pcl::PointXYZ(faceNormal[0], faceNormal[1], faceNormal[2]); - } else { + } + else { face_normal = pcl::PointXYZ(-faceNormal[0], -faceNormal[1], -faceNormal[2]); } } @@ -906,7 +927,7 @@ void vpMbtFaceDepthNormal::display(const vpImage &I, const vpHomo bool displayFullModel) { std::vector > models = - getModelForDisplay(I.getWidth(), I.getHeight(), cMo, cam, displayFullModel); + getModelForDisplay(I.getWidth(), I.getHeight(), cMo, cam, displayFullModel); for (size_t i = 0; i < models.size(); i++) { vpImagePoint ip1(models[i][1], models[i][2]); @@ -920,7 +941,7 @@ void vpMbtFaceDepthNormal::display(const vpImage &I, const vpHomogeneous bool displayFullModel) { std::vector > models = - getModelForDisplay(I.getWidth(), I.getHeight(), cMo, cam, displayFullModel); + getModelForDisplay(I.getWidth(), I.getHeight(), cMo, cam, displayFullModel); for (size_t i = 0; i < models.size(); i++) { vpImagePoint ip1(models[i][1], models[i][2]); @@ -1093,7 +1114,7 @@ void vpMbtFaceDepthNormal::estimateFeatures(const std::vector &point_clo const __m128d vinvZi = _mm_div_pd(vones, vZi); const __m128d tmp = - _mm_add_pd(_mm_add_pd(_mm_mul_pd(vA, vxi), _mm_mul_pd(vB, vyi)), _mm_sub_pd(vC, vinvZi)); + _mm_add_pd(_mm_add_pd(_mm_mul_pd(vA, vxi), _mm_mul_pd(vB, vyi)), _mm_sub_pd(vC, vinvZi)); _mm_storeu_pd(ptr_residues, tmp); } } @@ -1260,7 +1281,8 @@ void vpMbtFaceDepthNormal::estimateFeatures(const std::vector &point_clo iter++; } // while ( std::fabs(error - prev_error) > 1e-6 && (iter < max_iter) ) #endif - } else { + } + else { while (std::fabs(error - prev_error) > 1e-6 && (iter < max_iter)) { if (iter == 0) { // Transform the plane equation for the current pose @@ -1372,8 +1394,9 @@ void vpMbtFaceDepthNormal::estimatePlaneEquationSVD(const std::vector &p for (unsigned int iter = 0; iter < max_iter && std::fabs(error - prev_error) > 1e-6; iter++) { if (iter != 0) { tukey.MEstimator(residues, weights, 1e-4); - } else { - // Transform the plane equation for the current pose + } + else { + // Transform the plane equation for the current pose m_planeCamera = m_planeObject; m_planeCamera.changeFrame(cMo); @@ -1386,7 +1409,7 @@ void vpMbtFaceDepthNormal::estimatePlaneEquationSVD(const std::vector &p for (size_t i = 0; i < point_cloud_face.size() / 3; i++) { residues[i] = std::fabs(A * point_cloud_face[3 * i] + B * point_cloud_face[3 * i + 1] + C * point_cloud_face[3 * i + 2] + D) / - sqrt(A * A + B * B + C * C); + sqrt(A * A + B * B + C * C); } tukey.MEstimator(residues, weights, 1e-4); @@ -1448,7 +1471,7 @@ void vpMbtFaceDepthNormal::estimatePlaneEquationSVD(const std::vector &p for (size_t i = 0; i < point_cloud_face.size() / 3; i++) { residues[i] = std::fabs(A * point_cloud_face[3 * i] + B * point_cloud_face[3 * i + 1] + C * point_cloud_face[3 * i + 2] + D) / - sqrt(A * A + B * B + C * C); + sqrt(A * A + B * B + C * C); error += weights[i] * residues[i]; } error /= total_w; @@ -1516,18 +1539,10 @@ vpMbtFaceDepthNormal::getFeaturesForDisplay(const vpHomogeneousMatrix &cMo, cons vpMeterPixelConversion::convertPoint(cam, pt_extremity.get_x(), pt_extremity.get_y(), im_extremity); { -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - std::vector params = {2, // desired normal + std::vector params = { 2, // desired normal im_centroid.get_i(), im_centroid.get_j(), im_extremity.get_i(), - im_extremity.get_j()}; -#else - std::vector params; - params.push_back(2); // desired normal - params.push_back(im_centroid.get_i()); - params.push_back(im_centroid.get_j()); - params.push_back(im_extremity.get_i()); - params.push_back(im_extremity.get_j()); -#endif + im_extremity.get_j() }; + features.push_back(params); } @@ -1554,18 +1569,10 @@ vpMbtFaceDepthNormal::getFeaturesForDisplay(const vpHomogeneousMatrix &cMo, cons vpMeterPixelConversion::convertPoint(cam, pt_extremity.get_x(), pt_extremity.get_y(), im_extremity); { -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - std::vector params = {3, // normal at current pose + std::vector params = { 3, // normal at current pose im_centroid.get_i(), im_centroid.get_j(), im_extremity.get_i(), - im_extremity.get_j()}; -#else - std::vector params; - params.push_back(3); // normal at current pose - params.push_back(im_centroid.get_i()); - params.push_back(im_centroid.get_j()); - params.push_back(im_extremity.get_i()); - params.push_back(im_extremity.get_j()); -#endif + im_extremity.get_j() }; + features.push_back(params); } } @@ -1598,7 +1605,7 @@ std::vector > vpMbtFaceDepthNormal::getModelForDisplay(unsig ++it) { vpMbtDistanceLine *line = *it; std::vector > lineModels = - line->getModelForDisplay(width, height, cMo, cam, displayFullModel); + line->getModelForDisplay(width, height, cMo, cam, displayFullModel); models.insert(models.end(), lineModels.begin(), lineModels.end()); } } diff --git a/modules/tracker/mbt/src/edge/vpMbtDistanceCircle.cpp b/modules/tracker/mbt/src/edge/vpMbtDistanceCircle.cpp index bd24b7b2be..e85a72d168 100644 --- a/modules/tracker/mbt/src/edge/vpMbtDistanceCircle.cpp +++ b/modules/tracker/mbt/src/edge/vpMbtDistanceCircle.cpp @@ -57,10 +57,9 @@ */ vpMbtDistanceCircle::vpMbtDistanceCircle() : name(), index(0), cam(), me(NULL), wmean(1), featureEllipse(), isTrackedCircle(true), meEllipse(NULL), circle(NULL), - radius(0.), p1(NULL), p2(NULL), p3(NULL), L(), error(), nbFeature(0), Reinit(false), hiddenface(NULL), - index_polygon(-1), isvisible(false) -{ -} + radius(0.), p1(NULL), p2(NULL), p3(NULL), L(), error(), nbFeature(0), Reinit(false), hiddenface(NULL), + index_polygon(-1), isvisible(false) +{ } /*! Basic destructor useful to deallocate the memory. @@ -151,7 +150,8 @@ bool vpMbtDistanceCircle::initMovingEdge(const vpImage &I, const try { circle->projection(); - } catch (...) { + } + catch (...) { std::cout << "Problem when projecting circle\n"; return false; } @@ -169,8 +169,9 @@ bool vpMbtDistanceCircle::initMovingEdge(const vpImage &I, const double n20_p, n11_p, n02_p; vpMeterPixelConversion::convertEllipse(cam, *circle, ic, n20_p, n11_p, n02_p); meEllipse->initTracking(I, ic, n20_p, n11_p, n02_p, doNotTrack); - } catch (...) { - // vpTRACE("the circle can't be initialized"); + } + catch (...) { + // vpTRACE("the circle can't be initialized"); return false; } } @@ -188,8 +189,9 @@ void vpMbtDistanceCircle::trackMovingEdge(const vpImage &I, const if (isvisible) { try { meEllipse->track(I); - } catch (...) { - // std::cout << "Track meEllipse failed" << std::endl; + } + catch (...) { + // std::cout << "Track meEllipse failed" << std::endl; meEllipse->reset(); Reinit = true; } @@ -215,7 +217,8 @@ void vpMbtDistanceCircle::updateMovingEdge(const vpImage &I, cons try { circle->projection(); - } catch (...) { + } + catch (...) { std::cout << "Problem when projecting circle\n"; } @@ -225,7 +228,8 @@ void vpMbtDistanceCircle::updateMovingEdge(const vpImage &I, cons double n20_p, n11_p, n02_p; vpMeterPixelConversion::convertEllipse(cam, *circle, ic, n20_p, n11_p, n02_p); meEllipse->updateParameters(I, ic, n20_p, n11_p, n02_p); - } catch (...) { + } + catch (...) { Reinit = true; } nbFeature = (unsigned int)meEllipse->getMeList().size(); @@ -317,16 +321,9 @@ std::vector > vpMbtDistanceCircle::getFeaturesForDisplay() for (std::list::const_iterator it = meEllipse->getMeList().begin(); it != meEllipse->getMeList().end(); ++it) { vpMeSite p_me = *it; -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - std::vector params = {0, // ME - p_me.get_ifloat(), p_me.get_jfloat(), static_cast(p_me.getState())}; -#else - std::vector params; - params.push_back(0); // ME - params.push_back(p_me.get_ifloat()); - params.push_back(p_me.get_jfloat()); - params.push_back(static_cast(p_me.getState())); -#endif + std::vector params = { 0, // ME + p_me.get_ifloat(), p_me.get_jfloat(), static_cast(p_me.getState()) }; + features.push_back(params); } } @@ -356,7 +353,8 @@ std::vector vpMbtDistanceCircle::getModelForDisplay(const vpHomogeneousM try { circle->projection(); - } catch (...) { + } + catch (...) { std::cout << "Cannot project the circle"; } @@ -412,7 +410,8 @@ void vpMbtDistanceCircle::initInteractionMatrixError() nbFeature = (unsigned int)meEllipse->getMeList().size(); L.resize(nbFeature, 6); error.resize(nbFeature); - } else + } + else nbFeature = 0; } @@ -427,7 +426,8 @@ void vpMbtDistanceCircle::computeInteractionMatrixError(const vpHomogeneousMatri circle->changeFrame(cMo); try { circle->projection(); - } catch (...) { + } + catch (...) { std::cout << "Problem projection circle\n"; } @@ -461,8 +461,8 @@ void vpMbtDistanceCircle::computeInteractionMatrixError(const vpHomogeneousMatri L[j][k] = H[0] * H1[0][k] + H[1] * H1[1][k] + H[2] * H1[2][k] + H[3] * H1[3][k] + H[4] * H1[4][k]; error[j] = n02 * vpMath::sqr(x) + n20 * vpMath::sqr(y) - 2 * n11 * x * y + 2 * (n11 * yg - n02 * xg) * x + - 2 * (n11 * xg - n20 * yg) * y + n02 * vpMath::sqr(xg) + n20 * vpMath::sqr(yg) - 2 * n11 * xg * yg + - 4.0 * vpMath::sqr(n11) - 4.0 * n20 * n02; + 2 * (n11 * xg - n20 * yg) * y + n02 * vpMath::sqr(xg) + n20 * vpMath::sqr(yg) - 2 * n11 * xg * yg + + 4.0 * vpMath::sqr(n11) - 4.0 * n20 * n02; j++; } } diff --git a/modules/tracker/mbt/src/edge/vpMbtDistanceCylinder.cpp b/modules/tracker/mbt/src/edge/vpMbtDistanceCylinder.cpp index 1a0d03810b..9ba91189f4 100644 --- a/modules/tracker/mbt/src/edge/vpMbtDistanceCylinder.cpp +++ b/modules/tracker/mbt/src/edge/vpMbtDistanceCylinder.cpp @@ -61,11 +61,10 @@ */ vpMbtDistanceCylinder::vpMbtDistanceCylinder() : name(), index(0), cam(), me(NULL), wmean1(1), wmean2(1), featureline1(), featureline2(), isTrackedCylinder(true), - meline1(NULL), meline2(NULL), cercle1(NULL), cercle2(NULL), radius(0), p1(NULL), p2(NULL), L(), error(), - nbFeature(0), nbFeaturel1(0), nbFeaturel2(0), Reinit(false), c(NULL), hiddenface(NULL), index_polygon(-1), - isvisible(false) -{ -} + meline1(NULL), meline2(NULL), cercle1(NULL), cercle2(NULL), radius(0), p1(NULL), p2(NULL), L(), error(), + nbFeature(0), nbFeaturel1(0), nbFeaturel2(0), Reinit(false), c(NULL), hiddenface(NULL), index_polygon(-1), + isvisible(false) +{ } /*! Basic destructor useful to deallocate the memory. @@ -192,14 +191,16 @@ bool vpMbtDistanceCylinder::initMovingEdge(const vpImage &I, cons p2->projection(); try { cercle1->projection(); - } catch (...) { - // std::cout<<"Problem when projecting circle 1\n"; + } + catch (...) { + // std::cout<<"Problem when projecting circle 1\n"; return false; } try { cercle2->projection(); - } catch (...) { - // std::cout<<"Problem when projecting circle 2\n"; + } + catch (...) { + // std::cout<<"Problem when projecting circle 2\n"; return false; } c->projection(); @@ -242,14 +243,16 @@ bool vpMbtDistanceCylinder::initMovingEdge(const vpImage &I, cons if (ip11.get_j() < ip12.get_j()) { meline1->jmin = (int)ip11.get_j() - marge; meline1->jmax = (int)ip12.get_j() + marge; - } else { + } + else { meline1->jmin = (int)ip12.get_j() - marge; meline1->jmax = (int)ip11.get_j() + marge; } if (ip11.get_i() < ip12.get_i()) { meline1->imin = (int)ip11.get_i() - marge; meline1->imax = (int)ip12.get_i() + marge; - } else { + } + else { meline1->imin = (int)ip12.get_i() - marge; meline1->imax = (int)ip11.get_i() + marge; } @@ -257,14 +260,16 @@ bool vpMbtDistanceCylinder::initMovingEdge(const vpImage &I, cons if (ip21.get_j() < ip22.get_j()) { meline2->jmin = (int)ip21.get_j() - marge; meline2->jmax = (int)ip22.get_j() + marge; - } else { + } + else { meline2->jmin = (int)ip22.get_j() - marge; meline2->jmax = (int)ip21.get_j() + marge; } if (ip21.get_i() < ip22.get_i()) { meline2->imin = (int)ip21.get_i() - marge; meline2->imax = (int)ip22.get_i() + marge; - } else { + } + else { meline2->imin = (int)ip22.get_i() - marge; meline2->imax = (int)ip21.get_i() + marge; } @@ -296,14 +301,16 @@ bool vpMbtDistanceCylinder::initMovingEdge(const vpImage &I, cons try { meline1->initTracking(I, ip11, ip12, rho1, theta1, doNotTrack); - } catch (...) { - // vpTRACE("the line can't be initialized"); + } + catch (...) { + // vpTRACE("the line can't be initialized"); return false; } try { meline2->initTracking(I, ip21, ip22, rho2, theta2, doNotTrack); - } catch (...) { - // vpTRACE("the line can't be initialized"); + } + catch (...) { + // vpTRACE("the line can't be initialized"); return false; } } @@ -321,15 +328,17 @@ void vpMbtDistanceCylinder::trackMovingEdge(const vpImage &I, con if (isvisible) { try { meline1->track(I); - } catch (...) { - // std::cout << "Track meline1 failed" << std::endl; + } + catch (...) { + // std::cout << "Track meline1 failed" << std::endl; meline1->reset(); Reinit = true; } try { meline2->track(I); - } catch (...) { - // std::cout << "Track meline2 failed" << std::endl; + } + catch (...) { + // std::cout << "Track meline2 failed" << std::endl; meline2->reset(); Reinit = true; } @@ -361,12 +370,14 @@ void vpMbtDistanceCylinder::updateMovingEdge(const vpImage &I, co p2->projection(); try { cercle1->projection(); - } catch (...) { + } + catch (...) { std::cout << "Probleme projection cercle 1\n"; } try { cercle2->projection(); - } catch (...) { + } + catch (...) { std::cout << "Probleme projection cercle 2\n"; } c->projection(); @@ -400,14 +411,16 @@ void vpMbtDistanceCylinder::updateMovingEdge(const vpImage &I, co if (ip11.get_j() < ip12.get_j()) { meline1->jmin = (int)ip11.get_j() - marge; meline1->jmax = (int)ip12.get_j() + marge; - } else { + } + else { meline1->jmin = (int)ip12.get_j() - marge; meline1->jmax = (int)ip11.get_j() + marge; } if (ip11.get_i() < ip12.get_i()) { meline1->imin = (int)ip11.get_i() - marge; meline1->imax = (int)ip12.get_i() + marge; - } else { + } + else { meline1->imin = (int)ip12.get_i() - marge; meline1->imax = (int)ip11.get_i() + marge; } @@ -415,14 +428,16 @@ void vpMbtDistanceCylinder::updateMovingEdge(const vpImage &I, co if (ip21.get_j() < ip22.get_j()) { meline2->jmin = (int)ip21.get_j() - marge; meline2->jmax = (int)ip22.get_j() + marge; - } else { + } + else { meline2->jmin = (int)ip22.get_j() - marge; meline2->jmax = (int)ip21.get_j() + marge; } if (ip21.get_i() < ip22.get_i()) { meline2->imin = (int)ip21.get_i() - marge; meline2->imax = (int)ip22.get_i() + marge; - } else { + } + else { meline2->imin = (int)ip22.get_i() - marge; meline2->imax = (int)ip21.get_i() + marge; } @@ -455,13 +470,15 @@ void vpMbtDistanceCylinder::updateMovingEdge(const vpImage &I, co try { // meline1->updateParameters(I,rho1,theta1); meline1->updateParameters(I, ip11, ip12, rho1, theta1); - } catch (...) { + } + catch (...) { Reinit = true; } try { // meline2->updateParameters(I,rho2,theta2); meline2->updateParameters(I, ip21, ip22, rho2, theta2); - } catch (...) { + } + catch (...) { Reinit = true; } @@ -515,7 +532,7 @@ void vpMbtDistanceCylinder::display(const vpImage &I, const vpHom bool displayFullModel) { std::vector > models = - getModelForDisplay(I.getWidth(), I.getHeight(), cMo, camera, displayFullModel); + getModelForDisplay(I.getWidth(), I.getHeight(), cMo, camera, displayFullModel); for (size_t i = 0; i < models.size(); i++) { vpImagePoint ip1(models[i][1], models[i][2]); @@ -540,7 +557,7 @@ void vpMbtDistanceCylinder::display(const vpImage &I, const vpHomogeneou bool displayFullModel) { std::vector > models = - getModelForDisplay(I.getWidth(), I.getHeight(), cMo, camera, displayFullModel); + getModelForDisplay(I.getWidth(), I.getHeight(), cMo, camera, displayFullModel); for (size_t i = 0; i < models.size(); i++) { vpImagePoint ip1(models[i][1], models[i][2]); @@ -562,16 +579,9 @@ std::vector > vpMbtDistanceCylinder::getFeaturesForDisplay() for (std::list::const_iterator it = meline1->getMeList().begin(); it != meline1->getMeList().end(); ++it) { vpMeSite p_me = *it; -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - std::vector params = {0, // ME - p_me.get_ifloat(), p_me.get_jfloat(), static_cast(p_me.getState())}; -#else - std::vector params; - params.push_back(0); // ME - params.push_back(p_me.get_ifloat()); - params.push_back(p_me.get_jfloat()); - params.push_back(static_cast(p_me.getState())); -#endif + std::vector params = { 0, // ME + p_me.get_ifloat(), p_me.get_jfloat(), static_cast(p_me.getState()) }; + features.push_back(params); } } @@ -580,16 +590,9 @@ std::vector > vpMbtDistanceCylinder::getFeaturesForDisplay() for (std::list::const_iterator it = meline2->getMeList().begin(); it != meline2->getMeList().end(); ++it) { vpMeSite p_me = *it; -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - std::vector params = {0, // ME - p_me.get_ifloat(), p_me.get_jfloat(), static_cast(p_me.getState())}; -#else - std::vector params; - params.push_back(0); // ME - params.push_back(p_me.get_ifloat()); - params.push_back(p_me.get_jfloat()); - params.push_back(static_cast(p_me.getState())); -#endif + std::vector params = { 0, // ME + p_me.get_ifloat(), p_me.get_jfloat(), static_cast(p_me.getState()) }; + features.push_back(params); } } @@ -626,12 +629,14 @@ std::vector > vpMbtDistanceCylinder::getModelForDisplay(unsi p2->projection(); try { cercle1->projection(); - } catch (...) { + } + catch (...) { std::cout << "Problem projection circle 1"; } try { cercle2->projection(); - } catch (...) { + } + catch (...) { std::cout << "Problem projection circle 2"; } c->projection(); @@ -659,24 +664,9 @@ std::vector > vpMbtDistanceCylinder::getModelForDisplay(unsi ip21.set_ij(i21, j21); ip22.set_ij(i22, j22); -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - std::vector params1 = {0, ip11.get_i(), ip11.get_j(), ip12.get_i(), ip12.get_j()}; - - std::vector params2 = {0, ip21.get_i(), ip21.get_j(), ip22.get_i(), ip22.get_j()}; -#else - std::vector params1, params2; - params1.push_back(0); - params1.push_back(ip11.get_i()); - params1.push_back(ip11.get_j()); - params1.push_back(ip12.get_i()); - params1.push_back(ip12.get_j()); - - params2.push_back(0); - params2.push_back(ip11.get_i()); - params2.push_back(ip11.get_j()); - params2.push_back(ip12.get_i()); - params2.push_back(ip12.get_j()); -#endif + std::vector params1 = { 0, ip11.get_i(), ip11.get_j(), ip12.get_i(), ip12.get_j() }; + + std::vector params2 = { 0, ip21.get_i(), ip21.get_j(), ip22.get_i(), ip22.get_j() }; models.push_back(params1); models.push_back(params2); @@ -730,7 +720,8 @@ void vpMbtDistanceCylinder::initInteractionMatrixError() nbFeature = nbFeaturel1 + nbFeaturel2; L.resize(nbFeature, 6); error.resize(nbFeature); - } else { + } + else { nbFeature = 0; nbFeaturel1 = 0; nbFeaturel2 = 0; @@ -752,12 +743,14 @@ void vpMbtDistanceCylinder::computeInteractionMatrixError(const vpHomogeneousMat cercle1->changeFrame(cMo); try { cercle1->projection(); - } catch (...) { + } + catch (...) { std::cout << "Problem projection circle 1\n"; } try { cercle2->projection(); - } catch (...) { + } + catch (...) { std::cout << "Problem projection circle 2\n"; } diff --git a/modules/tracker/mbt/src/edge/vpMbtDistanceLine.cpp b/modules/tracker/mbt/src/edge/vpMbtDistanceLine.cpp index 80083734bc..92651b06e4 100644 --- a/modules/tracker/mbt/src/edge/vpMbtDistanceLine.cpp +++ b/modules/tracker/mbt/src/edge/vpMbtDistanceLine.cpp @@ -56,10 +56,9 @@ void buildLine(vpPoint &P1, vpPoint &P2, vpPoint &P3, vpPoint &P4, vpLine &L); */ vpMbtDistanceLine::vpMbtDistanceLine() : name(), index(0), cam(), me(NULL), isTrackedLine(true), isTrackedLineWithVisibility(true), wmean(1), featureline(), - poly(), useScanLine(false), meline(), line(NULL), p1(NULL), p2(NULL), L(), error(), nbFeature(), nbFeatureTotal(0), - Reinit(false), hiddenface(NULL), Lindex_polygon(), Lindex_polygon_tracked(), isvisible(false) -{ -} + poly(), useScanLine(false), meline(), line(NULL), p1(NULL), p2(NULL), L(), error(), nbFeature(), nbFeatureTotal(0), + Reinit(false), hiddenface(NULL), Lindex_polygon(), Lindex_polygon_tracked(), isvisible(false) +{ } /*! Basic destructor useful to deallocate the memory. @@ -199,7 +198,8 @@ void vpMbtDistanceLine::buildFrom(vpPoint &_p1, vpPoint &_p2, vpUniRand &rand_ge vpPoint P3(V3[0], V3[1], V3[2]); vpPoint P4(V4[0], V4[1], V4[2]); buildLine(*p1, *p2, P3, P4, *line); - } else { + } + else { vpPoint P3(V1[0], V1[1], V1[2]); vpPoint P4(V2[0], V2[1], V2[2]); buildLine(*p1, *p2, P3, P4, *line); @@ -328,7 +328,8 @@ bool vpMbtDistanceLine::initMovingEdge(const vpImage &I, const vp if (useScanLine) { hiddenface->computeScanLineQuery(poly.polyClipped[0].first, poly.polyClipped[1].first, linesLst); - } else { + } + else { linesLst.push_back(std::make_pair(poly.polyClipped[0].first, poly.polyClipped[1].first)); } @@ -339,7 +340,8 @@ bool vpMbtDistanceLine::initMovingEdge(const vpImage &I, const vp line->changeFrame(cMo); try { line->projection(); - } catch (...) { + } + catch (...) { isvisible = false; return false; } @@ -378,14 +380,16 @@ bool vpMbtDistanceLine::initMovingEdge(const vpImage &I, const vp if (ip1.get_j() < ip2.get_j()) { melinePt->jmin = (int)ip1.get_j() - marge; melinePt->jmax = (int)ip2.get_j() + marge; - } else { + } + else { melinePt->jmin = (int)ip2.get_j() - marge; melinePt->jmax = (int)ip1.get_j() + marge; } if (ip1.get_i() < ip2.get_i()) { melinePt->imin = (int)ip1.get_i() - marge; melinePt->imax = (int)ip2.get_i() + marge; - } else { + } + else { melinePt->imin = (int)ip2.get_i() - marge; melinePt->imax = (int)ip1.get_i() + marge; } @@ -395,13 +399,15 @@ bool vpMbtDistanceLine::initMovingEdge(const vpImage &I, const vp meline.push_back(melinePt); nbFeature.push_back((unsigned int)melinePt->getMeList().size()); nbFeatureTotal += nbFeature.back(); - } catch (...) { + } + catch (...) { delete melinePt; isvisible = false; return false; } } - } else { + } + else { isvisible = false; } } @@ -425,7 +431,8 @@ void vpMbtDistanceLine::trackMovingEdge(const vpImage &I) nbFeature.push_back((unsigned int)meline[i]->getMeList().size()); nbFeatureTotal += (unsigned int)meline[i]->getMeList().size(); } - } catch (...) { + } + catch (...) { for (size_t i = 0; i < meline.size(); i++) { if (meline[i] != NULL) delete meline[i]; @@ -463,7 +470,8 @@ void vpMbtDistanceLine::updateMovingEdge(const vpImage &I, const if (useScanLine) { hiddenface->computeScanLineQuery(poly.polyClipped[0].first, poly.polyClipped[1].first, linesLst); - } else { + } + else { linesLst.push_back(std::make_pair(poly.polyClipped[0].first, poly.polyClipped[1].first)); } @@ -478,11 +486,13 @@ void vpMbtDistanceLine::updateMovingEdge(const vpImage &I, const nbFeatureTotal = 0; isvisible = false; Reinit = true; - } else { + } + else { line->changeFrame(cMo); try { line->projection(); - } catch (...) { + } + catch (...) { for (size_t j = 0; j < meline.size(); j++) { if (meline[j] != NULL) delete meline[j]; @@ -525,14 +535,16 @@ void vpMbtDistanceLine::updateMovingEdge(const vpImage &I, const if (ip1.get_j() < ip2.get_j()) { meline[i]->jmin = (int)ip1.get_j() - marge; meline[i]->jmax = (int)ip2.get_j() + marge; - } else { + } + else { meline[i]->jmin = (int)ip2.get_j() - marge; meline[i]->jmax = (int)ip1.get_j() + marge; } if (ip1.get_i() < ip2.get_i()) { meline[i]->imin = (int)ip1.get_i() - marge; meline[i]->imax = (int)ip2.get_i() + marge; - } else { + } + else { meline[i]->imin = (int)ip2.get_i() - marge; meline[i]->imax = (int)ip1.get_i() + marge; } @@ -541,7 +553,8 @@ void vpMbtDistanceLine::updateMovingEdge(const vpImage &I, const nbFeature[i] = (unsigned int)meline[i]->getMeList().size(); nbFeatureTotal += nbFeature[i]; } - } catch (...) { + } + catch (...) { for (size_t j = 0; j < meline.size(); j++) { if (meline[j] != NULL) delete meline[j]; @@ -554,7 +567,8 @@ void vpMbtDistanceLine::updateMovingEdge(const vpImage &I, const Reinit = true; } } - } else { + } + else { for (size_t i = 0; i < meline.size(); i++) { if (meline[i] != NULL) delete meline[i]; @@ -612,7 +626,7 @@ void vpMbtDistanceLine::display(const vpImage &I, const vpHomogen bool displayFullModel) { std::vector > models = - getModelForDisplay(I.getWidth(), I.getHeight(), cMo, camera, displayFullModel); + getModelForDisplay(I.getWidth(), I.getHeight(), cMo, camera, displayFullModel); for (size_t i = 0; i < models.size(); i++) { vpImagePoint ip1(models[i][1], models[i][2]); @@ -637,7 +651,7 @@ void vpMbtDistanceLine::display(const vpImage &I, const vpHomogeneousMat bool displayFullModel) { std::vector > models = - getModelForDisplay(I.getWidth(), I.getHeight(), cMo, camera, displayFullModel); + getModelForDisplay(I.getWidth(), I.getHeight(), cMo, camera, displayFullModel); for (size_t i = 0; i < models.size(); i++) { vpImagePoint ip1(models[i][1], models[i][2]); @@ -691,16 +705,9 @@ std::vector > vpMbtDistanceLine::getFeaturesForDisplay() if (me_l != NULL) { for (std::list::const_iterator it = me_l->getMeList().begin(); it != me_l->getMeList().end(); ++it) { vpMeSite p_me_l = *it; -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - std::vector params = {0, // ME - p_me_l.get_ifloat(), p_me_l.get_jfloat(), static_cast(p_me_l.getState())}; -#else - std::vector params; - params.push_back(0); // ME - params.push_back(p_me_l.get_ifloat()); - params.push_back(p_me_l.get_jfloat()); - params.push_back(static_cast(p_me_l.getState())); -#endif + std::vector params = { 0, // ME + p_me_l.get_ifloat(), p_me_l.get_jfloat(), static_cast(p_me_l.getState()) }; + features.push_back(params); } } @@ -749,7 +756,8 @@ std::vector > vpMbtDistanceLine::getModelForDisplay(unsigned std::vector > linesLst; if (useScanLine && !displayFullModel) { hiddenface->computeScanLineQuery(poly.polyClipped[0].first, poly.polyClipped[1].first, linesLst, true); - } else { + } + else { linesLst.push_back(std::make_pair(poly.polyClipped[0].first, poly.polyClipped[1].first)); } @@ -760,17 +768,9 @@ std::vector > vpMbtDistanceLine::getModelForDisplay(unsigned vpMeterPixelConversion::convertPoint(camera, linesLst[i].first.get_x(), linesLst[i].first.get_y(), ip1); vpMeterPixelConversion::convertPoint(camera, linesLst[i].second.get_x(), linesLst[i].second.get_y(), ip2); -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - std::vector params = {0, // 0 for line parameters - ip1.get_i(), ip1.get_j(), ip2.get_i(), ip2.get_j()}; -#else - std::vector params; - params.push_back(0); // 0 for line parameters - params.push_back(ip1.get_i()); - params.push_back(ip1.get_j()); - params.push_back(ip2.get_i()); - params.push_back(ip2.get_j()); -#endif + std::vector params = { 0, // 0 for line parameters + ip1.get_i(), ip1.get_j(), ip2.get_i(), ip2.get_j() }; + models.push_back(params); } } @@ -787,7 +787,8 @@ void vpMbtDistanceLine::initInteractionMatrixError() if (isvisible) { L.resize(nbFeatureTotal, 6); error.resize(nbFeatureTotal); - } else { + } + else { for (size_t i = 0; i < meline.size(); i++) { nbFeature[i] = 0; // To be consistent with nbFeature[i] = 0 @@ -850,9 +851,10 @@ void vpMbtDistanceLine::computeInteractionMatrixError(const vpHomogeneousMatrix j++; } } - } catch (...) { - // Handle potential exception: due to a degenerate case: the image of the straight line is a point! - // Set the corresponding interaction matrix part to zero + } + catch (...) { + // Handle potential exception: due to a degenerate case: the image of the straight line is a point! + // Set the corresponding interaction matrix part to zero unsigned int j = 0; for (size_t i = 0; i < meline.size(); i++) { for (std::list::const_iterator it = meline[i]->getMeList().begin(); @@ -894,8 +896,8 @@ bool vpMbtDistanceLine::closeToImageBorder(const vpImage &I, cons return true; } - if (((unsigned int)i_ > (I.getHeight() - threshold)) || (unsigned int)i_ < threshold || - ((unsigned int)j_ > (I.getWidth() - threshold)) || (unsigned int)j_ < threshold) { + if (((unsigned int)i_ >(I.getHeight() - threshold)) || (unsigned int)i_ < threshold || + ((unsigned int)j_ >(I.getWidth() - threshold)) || (unsigned int)j_ < threshold) { return true; } } diff --git a/modules/tracker/mbt/src/klt/vpMbtDistanceKltCylinder.cpp b/modules/tracker/mbt/src/klt/vpMbtDistanceKltCylinder.cpp index adc7cd4038..68ae531fc1 100644 --- a/modules/tracker/mbt/src/klt/vpMbtDistanceKltCylinder.cpp +++ b/modules/tracker/mbt/src/klt/vpMbtDistanceKltCylinder.cpp @@ -53,16 +53,15 @@ */ vpMbtDistanceKltCylinder::vpMbtDistanceKltCylinder() : c0Mo(), p1Ext(), p2Ext(), cylinder(), circle1(), circle2(), initPoints(), initPoints3D(), curPoints(), - curPointsInd(), nbPointsCur(0), nbPointsInit(0), minNbPoint(4), enoughPoints(false), cam(), - isTrackedKltCylinder(true), listIndicesCylinderBBox(), hiddenface(NULL), useScanLine(false) -{ -} + curPointsInd(), nbPointsCur(0), nbPointsInit(0), minNbPoint(4), enoughPoints(false), cam(), + isTrackedKltCylinder(true), listIndicesCylinderBBox(), hiddenface(NULL), useScanLine(false) +{ } /*! Basic destructor. */ -vpMbtDistanceKltCylinder::~vpMbtDistanceKltCylinder() {} +vpMbtDistanceKltCylinder::~vpMbtDistanceKltCylinder() { } void vpMbtDistanceKltCylinder::buildFrom(const vpPoint &p1, const vpPoint &p2, const double &r) { @@ -130,7 +129,8 @@ void vpMbtDistanceKltCylinder::init(const vpKltOpencv &_tracker, const vpHomogen break; } } - } else { + } + else { std::vector roi; for (unsigned int kc = 0; kc < listIndicesCylinderBBox.size(); kc++) { hiddenface->getPolygon()[(size_t)listIndicesCylinderBBox[kc]]->getRoiClipped(cam, roi); @@ -243,7 +243,8 @@ void vpMbtDistanceKltCylinder::removeOutliers(const vpColVector &_w, const doubl tmp[iter->first] = vpImagePoint(iter->second.get_i(), iter->second.get_j()); tmp2[iter->first] = curPointsInd[iter->first]; nbPointsCur++; - } else { + } + else { nbSupp++; initPoints.erase(iter->first); } @@ -321,7 +322,8 @@ void vpMbtDistanceKltCylinder::computeInteractionMatrixAndResidu(const vpHomogen _R[2 * index_] = (x0_transform - x_cur); _R[2 * index_ + 1] = (y0_transform - y_cur); index_++; - } else { + } + else { double invZ = 1.0 / Z; _J[2 * index_][0] = -invZ; @@ -424,7 +426,8 @@ void vpMbtDistanceKltCylinder::updateMask( for (size_t i = 0; i < solution[index_max].size(); i++) { roi_offset.push_back(vpImagePoint((double)(solution[index_max][i].Y), (double)(solution[index_max][i].X))); } - } else { + } + else { roi_offset = roi; } @@ -472,7 +475,8 @@ void vpMbtDistanceKltCylinder::updateMask( vpPolygon::isInside(roi, i_d - shiftBorder_d, j_d - shiftBorder_d)) { mask.at(i, j) = nb; } - } else { + } + else { if (vpPolygon::isInside(roi, i, j)) { mask.at(i, j) = nb; } @@ -579,18 +583,9 @@ std::vector > vpMbtDistanceKltCylinder::getFeaturesForDispla iP2.set_i(vpMath::round(iP.get_i() + 7)); iP2.set_j(vpMath::round(iP.get_j() + 7)); -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - std::vector params = {1, // KLT - iP.get_i(), iP.get_j(), iP2.get_i(), iP2.get_j(), static_cast(id)}; -#else - std::vector params; - params.push_back(1); // KLT - params.push_back(iP.get_i()); - params.push_back(iP.get_j()); - params.push_back(iP2.get_i()); - params.push_back(iP2.get_j()); - params.push_back(static_cast(id)); -#endif + std::vector params = { 1, // KLT + iP.get_i(), iP.get_j(), iP2.get_i(), iP2.get_j(), static_cast(id) }; + features.push_back(params); } @@ -619,12 +614,14 @@ std::vector > vpMbtDistanceKltCylinder::getModelForDisplay(c try { circle1.projection(); - } catch (...) { + } + catch (...) { std::cout << "Problem projection circle 1"; } try { circle2.projection(); - } catch (...) { + } + catch (...) { std::cout << "Problem projection circle 2"; } @@ -653,27 +650,13 @@ std::vector > vpMbtDistanceKltCylinder::getModelForDisplay(c ip21.set_ij(i21, j21); ip22.set_ij(i22, j22); -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - std::vector params1 = {0, // line parameters - ip11.get_i(), ip11.get_j(), ip12.get_i(), ip12.get_j()}; + std::vector params1 = { 0, // line parameters + ip11.get_i(), ip11.get_j(), ip12.get_i(), ip12.get_j() }; models.push_back(params1); - std::vector params2 = {0, // line parameters - ip21.get_i(), ip21.get_j(), ip22.get_i(), ip22.get_j()}; -#else - std::vector params1, params2; - params1.push_back(0); // line parameters - params1.push_back(ip11.get_i()); - params1.push_back(ip11.get_j()); - params1.push_back(ip12.get_i()); - params1.push_back(ip12.get_j()); - - params2.push_back(0); // line parameters - params2.push_back(ip21.get_i()); - params2.push_back(ip21.get_j()); - params2.push_back(ip22.get_i()); - params2.push_back(ip22.get_j()); -#endif + std::vector params2 = { 0, // line parameters + ip21.get_i(), ip21.get_j(), ip22.get_i(), ip22.get_j() }; + models.push_back(params1); models.push_back(params2); } @@ -701,5 +684,5 @@ double vpMbtDistanceKltCylinder::computeZ(const double &x, const double &y) #elif !defined(VISP_BUILD_SHARED_LIBS) // Work around to avoid warning: // libvisp_mbt.a(vpMbtDistanceKltCylinder.cpp.o) has no symbols -void dummy_vpMbtDistanceKltCylinder(){}; +void dummy_vpMbtDistanceKltCylinder() { }; #endif diff --git a/modules/tracker/mbt/src/klt/vpMbtDistanceKltPoints.cpp b/modules/tracker/mbt/src/klt/vpMbtDistanceKltPoints.cpp index 56bef81a4e..9c67083b1a 100644 --- a/modules/tracker/mbt/src/klt/vpMbtDistanceKltPoints.cpp +++ b/modules/tracker/mbt/src/klt/vpMbtDistanceKltPoints.cpp @@ -53,17 +53,16 @@ */ vpMbtDistanceKltPoints::vpMbtDistanceKltPoints() : H(), N(), N_cur(), invd0(1.), cRc0_0n(), initPoints(std::map()), - curPoints(std::map()), curPointsInd(std::map()), nbPointsCur(0), nbPointsInit(0), - minNbPoint(4), enoughPoints(false), dt(1.), d0(1.), cam(), isTrackedKltPoints(true), polygon(NULL), - hiddenface(NULL), useScanLine(false) -{ -} + curPoints(std::map()), curPointsInd(std::map()), nbPointsCur(0), nbPointsInit(0), + minNbPoint(4), enoughPoints(false), dt(1.), d0(1.), cam(), isTrackedKltPoints(true), polygon(NULL), + hiddenface(NULL), useScanLine(false) +{ } /*! Basic destructor. */ -vpMbtDistanceKltPoints::~vpMbtDistanceKltPoints() {} +vpMbtDistanceKltPoints::~vpMbtDistanceKltPoints() { } /*! Initialise the face to track. All the points in the map, representing all @@ -100,7 +99,8 @@ void vpMbtDistanceKltPoints::init(const vpKltOpencv &_tracker, const vpImagegetMbScanLineRenderer().getPrimitiveIDs()[(unsigned int)y_tmp][(unsigned int)x_tmp] == polygon->getIndex()) add = true; - } else if (vpPolygon::isInside(roi, y_tmp, x_tmp)) { + } + else if (vpPolygon::isInside(roi, y_tmp, x_tmp)) { add = true; } } @@ -205,7 +205,7 @@ void vpMbtDistanceKltPoints::computeInteractionMatrixAndResidu(vpColVector &_R, vpPixelMeterConversion::convertPoint(cam, iP0, x0, y0); double x0_transform, - y0_transform; // equivalent x and y in the first image (reference) + y0_transform; // equivalent x and y in the first image (reference) computeP_mu_t(x0, y0, x0_transform, y0_transform, H); double invZ = compute_1_over_Z(x_cur, y_cur); @@ -400,7 +400,8 @@ void vpMbtDistanceKltPoints::updateMask( for (size_t i = 0; i < solution[index_max].size(); i++) { roi_offset.push_back(vpImagePoint((double)(solution[index_max][i].Y), (double)(solution[index_max][i].X))); } - } else { + } + else { roi_offset = roi; } @@ -447,7 +448,8 @@ void vpMbtDistanceKltPoints::updateMask( vpPolygon::isInside(roi, i_d - shiftBorder_d, j_d - shiftBorder_d)) { mask.at(i, j) = nb; } - } else { + } + else { if (vpPolygon::isInside(roi, i, j)) { mask.at(i, j) = nb; } @@ -480,7 +482,8 @@ void vpMbtDistanceKltPoints::removeOutliers(const vpColVector &_w, const double tmp[iter->first] = vpImagePoint(iter->second.get_i(), iter->second.get_j()); tmp2[iter->first] = curPointsInd[iter->first]; nbPointsCur++; - } else { + } + else { nbSupp++; initPoints.erase(iter->first); } @@ -593,19 +596,8 @@ std::vector > vpMbtDistanceKltPoints::getFeaturesForDisplay( vpImagePoint iP2; iP2.set_i(vpMath::round(iP.get_i() + 7)); iP2.set_j(vpMath::round(iP.get_j() + 7)); - -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - std::vector params = {1, // KLT - iP.get_i(), iP.get_j(), iP2.get_i(), iP2.get_j(), static_cast(id)}; -#else - std::vector params; - params.push_back(1); // KLT - params.push_back(iP.get_i()); - params.push_back(iP.get_j()); - params.push_back(iP2.get_i()); - params.push_back(iP2.get_j()); - params.push_back(static_cast(id)); -#endif + std::vector params = { 1, // KLT + iP.get_i(), iP.get_j(), iP2.get_i(), iP2.get_j(), static_cast(id) }; features.push_back(params); } @@ -650,18 +642,8 @@ std::vector > vpMbtDistanceKltPoints::getModelForDisplay(con linesLst[i].second.project(); vpMeterPixelConversion::convertPoint(camera, linesLst[i].first.get_x(), linesLst[i].first.get_y(), ip1); vpMeterPixelConversion::convertPoint(camera, linesLst[i].second.get_x(), linesLst[i].second.get_y(), ip2); - -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - std::vector params = {0, // 0 for line parameters - ip1.get_i(), ip1.get_j(), ip2.get_i(), ip2.get_j()}; -#else - std::vector params; - params.push_back(0); // 0 for line parameters - params.push_back(ip1.get_i()); - params.push_back(ip1.get_j()); - params.push_back(ip2.get_i()); - params.push_back(ip2.get_j()); -#endif + std::vector params = { 0, // 0 for line parameters + ip1.get_i(), ip1.get_j(), ip2.get_i(), ip2.get_j() }; models.push_back(params); } } @@ -674,5 +656,5 @@ std::vector > vpMbtDistanceKltPoints::getModelForDisplay(con #elif !defined(VISP_BUILD_SHARED_LIBS) // Work around to avoid warning: libvisp_mbt.a(vpMbtDistanceKltPoints.cpp.o) // has no symbols -void dummy_vpMbtDistanceKltPoints(){}; +void dummy_vpMbtDistanceKltPoints() { }; #endif diff --git a/modules/tracker/mbt/src/vpMbGenericTracker.cpp b/modules/tracker/mbt/src/vpMbGenericTracker.cpp index 71b8ee65b0..d4b851ccef 100644 --- a/modules/tracker/mbt/src/vpMbGenericTracker.cpp +++ b/modules/tracker/mbt/src/vpMbGenericTracker.cpp @@ -7105,7 +7105,7 @@ void vpMbGenericTracker::TrackerWrapper::testTracking() } void vpMbGenericTracker::TrackerWrapper::track(const vpImage & -#if defined(VISP_HAVE_PCL) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_PCL) I #endif ) @@ -7119,7 +7119,7 @@ void vpMbGenericTracker::TrackerWrapper::track(const vpImage & return; } -#if defined(VISP_HAVE_PCL) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_PCL) track(&I, nullptr); #endif } diff --git a/modules/tracker/mbt/src/vpMbTracker.cpp b/modules/tracker/mbt/src/vpMbTracker.cpp index 0aa00ffc5f..b4beb1e3a6 100644 --- a/modules/tracker/mbt/src/vpMbTracker.cpp +++ b/modules/tracker/mbt/src/vpMbTracker.cpp @@ -90,22 +90,19 @@ #include #endif -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) #include -#endif #ifndef DOXYGEN_SHOULD_SKIP_THIS namespace { -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - std::mutex g_mutex_cout; -#endif +std::mutex g_mutex_cout; /*! Structure to store info about segment in CAO model files. */ -struct SegmentInfo { - SegmentInfo() : extremities(), name(), useLod(false), minLineLengthThresh(0.) {} +struct SegmentInfo +{ + SegmentInfo() : extremities(), name(), useLod(false), minLineLengthThresh(0.) { } std::vector extremities; std::string name; @@ -117,11 +114,11 @@ struct SegmentInfo { Structure to store info about a polygon face represented by a vpPolygon and by a list of vpPoint representing the corners of the polygon face in 3D. */ -struct PolygonFaceInfo { +struct PolygonFaceInfo +{ PolygonFaceInfo(double dist, const vpPolygon &poly, const std::vector &corners) : distanceToCamera(dist), polygon(poly), faceCorners(corners) - { - } + { } bool operator<(const PolygonFaceInfo &pfi) const { return distanceToCamera < pfi.distanceToCamera; } @@ -154,16 +151,19 @@ std::istream &safeGetline(std::istream &is, std::string &t) int c = sb->sbumpc(); if (c == '\n') { return is; - } else if (c == '\r') { + } + else if (c == '\r') { if (sb->sgetc() == '\n') sb->sbumpc(); return is; - } else if (c == std::streambuf::traits_type::eof()) { - // Also handle the case when the last line has no line ending + } + else if (c == std::streambuf::traits_type::eof()) { + // Also handle the case when the last line has no line ending if (t.empty()) is.setstate(std::ios::eofbit); return is; - } else { // default case + } + else { // default case t += (char)c; } } @@ -178,18 +178,18 @@ std::istream &safeGetline(std::istream &is, std::string &t) */ vpMbTracker::vpMbTracker() : m_cam(), m_cMo(), oJo(6, 6), m_isoJoIdentity(true), modelFileName(), modelInitialised(false), poseSavingFilename(), - computeCovariance(false), covarianceMatrix(), computeProjError(false), projectionError(90.0), - displayFeatures(false), m_optimizationMethod(vpMbTracker::GAUSS_NEWTON_OPT), faces(), angleAppears(vpMath::rad(89)), - angleDisappears(vpMath::rad(89)), distNearClip(0.001), distFarClip(100), clippingFlag(vpPolygon3D::NO_CLIPPING), - useOgre(false), ogreShowConfigDialog(false), useScanLine(false), nbPoints(0), nbLines(0), nbPolygonLines(0), - nbPolygonPoints(0), nbCylinders(0), nbCircles(0), useLodGeneral(false), applyLodSettingInConfig(false), - minLineLengthThresholdGeneral(50.0), minPolygonAreaThresholdGeneral(2500.0), mapOfParameterNames(), - m_computeInteraction(true), m_lambda(1.0), m_maxIter(30), m_stopCriteriaEpsilon(1e-8), m_initialMu(0.01), - m_projectionErrorLines(), m_projectionErrorCylinders(), m_projectionErrorCircles(), m_projectionErrorFaces(), - m_projectionErrorOgreShowConfigDialog(false), m_projectionErrorMe(), m_projectionErrorKernelSize(2), m_SobelX(5, 5), - m_SobelY(5, 5), m_projectionErrorDisplay(false), m_projectionErrorDisplayLength(20), - m_projectionErrorDisplayThickness(1), m_projectionErrorCam(), m_mask(NULL), m_I(), m_sodb_init_called(false), - m_rand() + computeCovariance(false), covarianceMatrix(), computeProjError(false), projectionError(90.0), + displayFeatures(false), m_optimizationMethod(vpMbTracker::GAUSS_NEWTON_OPT), faces(), angleAppears(vpMath::rad(89)), + angleDisappears(vpMath::rad(89)), distNearClip(0.001), distFarClip(100), clippingFlag(vpPolygon3D::NO_CLIPPING), + useOgre(false), ogreShowConfigDialog(false), useScanLine(false), nbPoints(0), nbLines(0), nbPolygonLines(0), + nbPolygonPoints(0), nbCylinders(0), nbCircles(0), useLodGeneral(false), applyLodSettingInConfig(false), + minLineLengthThresholdGeneral(50.0), minPolygonAreaThresholdGeneral(2500.0), mapOfParameterNames(), + m_computeInteraction(true), m_lambda(1.0), m_maxIter(30), m_stopCriteriaEpsilon(1e-8), m_initialMu(0.01), + m_projectionErrorLines(), m_projectionErrorCylinders(), m_projectionErrorCircles(), m_projectionErrorFaces(), + m_projectionErrorOgreShowConfigDialog(false), m_projectionErrorMe(), m_projectionErrorKernelSize(2), m_SobelX(5, 5), + m_SobelY(5, 5), m_projectionErrorDisplay(false), m_projectionErrorDisplayLength(20), + m_projectionErrorDisplayThickness(1), m_projectionErrorCam(), m_mask(NULL), m_I(), m_sodb_init_called(false), + m_rand() { oJo.eye(); // Map used to parse additional information in CAO model files, @@ -261,14 +261,16 @@ void vpMbTracker::initClick(const vpImage *const I, const vpImage finitpos.open(str_pose.c_str(), std::ios::in); ss << str_pose; - } else { + } + else { finitpos.open(poseSavingFilename.c_str(), std::ios::in); ss << poseSavingFilename; } if (finitpos.fail()) { std::cout << "Cannot read " << ss.str() << std::endl << "cMo set to identity" << std::endl; last_cMo.eye(); - } else { + } + else { for (unsigned int i = 0; i < 6; i += 1) { finitpos >> init_pos[i]; } @@ -283,7 +285,8 @@ void vpMbTracker::initClick(const vpImage *const I, const vpImage display(*I, last_cMo, m_cam, vpColor::green, 1, true); vpDisplay::displayFrame(*I, last_cMo, m_cam, 0.05, vpColor::green); vpDisplay::flush(*I); - } else { + } + else { vpDisplay::display(*I_color); display(*I_color, last_cMo, m_cam, vpColor::green, 1, true); vpDisplay::displayFrame(*I_color, last_cMo, m_cam, 0.05, vpColor::green); @@ -300,7 +303,8 @@ void vpMbTracker::initClick(const vpImage *const I, const vpImage while (!vpDisplay::getClick(*I, ip, button)) { } - } else { + } + else { vpDisplay::displayText(*I_color, 15, 10, "left click to validate, right click to modify initial pose", vpColor::red); @@ -313,13 +317,15 @@ void vpMbTracker::initClick(const vpImage *const I, const vpImage if (!finitpos.fail() && button == vpMouseButton::button1) { m_cMo = last_cMo; - } else { + } + else { vpDisplay *d_help = NULL; if (I) { vpDisplay::display(*I); vpDisplay::flush(*I); - } else { + } + else { vpDisplay::display(*I_color); vpDisplay::flush(*I_color); } @@ -337,17 +343,14 @@ void vpMbTracker::initClick(const vpImage *const I, const vpImage // X Y Z if (pos != std::string::npos) { ss << initFile; - } else { + } + else { ss << initFile; ss << ".init"; } std::cout << "Load 3D points from: " << ss.str() << std::endl; -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) finit.open(ss.str()); -#else - finit.open(ss.str().c_str()); -#endif if (finit.fail()) { std::cout << "Cannot read " << ss.str() << std::endl; throw vpException(vpException::ioError, "Cannot open model-based tracker init file %s", ss.str().c_str()); @@ -357,7 +360,7 @@ void vpMbTracker::initClick(const vpImage *const I, const vpImage // Display window creation and initialisation try { if (displayHelp) { - const std::string imgExtVec[] = {".ppm", ".pgm", ".jpg", ".jpeg", ".png"}; + const std::string imgExtVec[] = { ".ppm", ".pgm", ".jpg", ".jpeg", ".png" }; std::string dispF; bool foundHelpImg = false; if (pos != std::string::npos) { @@ -365,7 +368,8 @@ void vpMbTracker::initClick(const vpImage *const I, const vpImage dispF = initFile.substr(0, pos) + imgExtVec[i]; foundHelpImg = vpIoTools::checkFilename(dispF); } - } else { + } + else { for (size_t i = 0; i < 5 && !foundHelpImg; i++) { dispF = initFile + imgExtVec[i]; foundHelpImg = vpIoTools::checkFilename(dispF); @@ -394,7 +398,8 @@ void vpMbTracker::initClick(const vpImage *const I, const vpImage #endif } } - } catch (...) { + } + catch (...) { if (d_help != NULL) { delete d_help; d_help = NULL; @@ -428,7 +433,7 @@ void vpMbTracker::initClick(const vpImage *const I, const vpImage vpColVector pt_3d_tf = T * pt_3d; std::cout << "Point " << i + 1 << " with 3D coordinates: " << pt_3d_tf[0] << " " << pt_3d_tf[1] << " " - << pt_3d_tf[2] << std::endl; + << pt_3d_tf[2] << std::endl; P[i].setWorldCoordinates(pt_3d_tf[0], pt_3d_tf[1], pt_3d_tf[2]); // (X,Y,Z) } @@ -448,7 +453,8 @@ void vpMbTracker::initClick(const vpImage *const I, const vpImage vpDisplay::displayCross(*I, mem_ip[k], 10, vpColor::green, 2); } vpDisplay::flush(*I); - } else { + } + else { vpDisplay::display(*I_color); vpDisplay::displayText(*I_color, 15, 10, text.str(), vpColor::red); for (unsigned int k = 0; k < mem_ip.size(); k++) { @@ -463,7 +469,8 @@ void vpMbTracker::initClick(const vpImage *const I, const vpImage vpDisplay::getClick(*I, ip); mem_ip.push_back(ip); vpDisplay::flush(*I); - } else { + } + else { vpDisplay::getClick(*I_color, ip); mem_ip.push_back(ip); vpDisplay::flush(*I_color); @@ -479,7 +486,8 @@ void vpMbTracker::initClick(const vpImage *const I, const vpImage if (I) { vpDisplay::flush(*I); vpDisplay::display(*I); - } else { + } + else { vpDisplay::flush(*I_color); vpDisplay::display(*I_color); } @@ -498,12 +506,14 @@ void vpMbTracker::initClick(const vpImage *const I, const vpImage if (button == vpMouseButton::button1) { isWellInit = true; - } else { + } + else { pose.clearPoint(); vpDisplay::display(*I); vpDisplay::flush(*I); } - } else { + } + else { display(*I_color, m_cMo, m_cam, vpColor::green, 1, true); vpDisplay::displayText(*I_color, 15, 10, "left click to validate, right click to re initialize object", vpColor::red); @@ -516,7 +526,8 @@ void vpMbTracker::initClick(const vpImage *const I, const vpImage if (button == vpMouseButton::button1) { isWellInit = true; - } else { + } + else { pose.clearPoint(); vpDisplay::display(*I_color); vpDisplay::flush(*I_color); @@ -630,7 +641,8 @@ void vpMbTracker::initClick(const vpImage *const I, const vpImage if (I) { vpDisplay::display(*I); vpDisplay::flush(*I); - } else { + } + else { vpDisplay::display(*I_color); vpDisplay::flush(*I_color); } @@ -661,14 +673,16 @@ void vpMbTracker::initClick(const vpImage *const I, const vpImage if (I) { d_help->init(Iref, I->display->getWindowXPosition() + (int)I->getWidth() + 80, I->display->getWindowYPosition(), "Where to initialize..."); - } else { + } + else { d_help->init(Iref, I_color->display->getWindowXPosition() + (int)I_color->getWidth() + 80, I_color->display->getWindowYPosition(), "Where to initialize..."); } vpDisplay::display(Iref); vpDisplay::flush(Iref); #endif - } catch (...) { + } + catch (...) { if (d_help != NULL) { delete d_help; d_help = NULL; @@ -689,7 +703,8 @@ void vpMbTracker::initClick(const vpImage *const I, const vpImage vpDisplay::getClick(*I, ip); vpDisplay::displayCross(*I, ip, 5, vpColor::green); vpDisplay::flush(*I); - } else { + } + else { vpDisplay::getClick(*I_color, ip); vpDisplay::displayCross(*I_color, ip, 5, vpColor::green); vpDisplay::flush(*I_color); @@ -702,14 +717,16 @@ void vpMbTracker::initClick(const vpImage *const I, const vpImage if (I) { vpDisplay::displayPoint(*I, ip, vpColor::green); // display target point - } else { + } + else { vpDisplay::displayPoint(*I_color, ip, vpColor::green); // display target point } pose.addPoint(P[i]); // and added to the pose computation point list } if (I) { vpDisplay::flush(*I); - } else { + } + else { vpDisplay::flush(*I_color); } @@ -727,12 +744,14 @@ void vpMbTracker::initClick(const vpImage *const I, const vpImage if (button == vpMouseButton::button1) { isWellInit = true; - } else { + } + else { pose.clearPoint(); vpDisplay::display(*I); vpDisplay::flush(*I); } - } else { + } + else { display(*I_color, m_cMo, m_cam, vpColor::green, 1, true); vpDisplay::displayText(*I_color, 15, 10, "left click to validate, right click to re initialize object", vpColor::red); @@ -745,7 +764,8 @@ void vpMbTracker::initClick(const vpImage *const I, const vpImage if (button == vpMouseButton::button1) { isWellInit = true; - } else { + } + else { pose.clearPoint(); vpDisplay::display(*I_color); vpDisplay::flush(*I_color); @@ -755,7 +775,8 @@ void vpMbTracker::initClick(const vpImage *const I, const vpImage if (I) { vpDisplay::displayFrame(*I, m_cMo, m_cam, 0.05, vpColor::red); - } else { + } + else { vpDisplay::displayFrame(*I_color, m_cMo, m_cam, 0.05, vpColor::red); } @@ -818,7 +839,8 @@ void vpMbTracker::initFromPoints(const vpImage *const I, const vp if (pos == initFile.size() - ext.size() && pos != 0) { ss << initFile; - } else { + } + else { ss << initFile; ss << ".init"; } @@ -929,7 +951,8 @@ void vpMbTracker::initFromPoints(const vpImage *const I, const vp if (I) { init(*I); - } else { + } + else { vpImageConvert::convert(*I_color, m_I); init(m_I); } @@ -1018,7 +1041,8 @@ void vpMbTracker::initFromPoints(const vpImage *const I, const vp if (I) { init(*I); - } else { + } + else { vpImageConvert::convert(*I_color, m_I); init(m_I); } @@ -1064,7 +1088,8 @@ void vpMbTracker::initFromPose(const vpImage *const I, const vpIm if (pos == initFile.size() - ext.size() && pos != 0) { ss << initFile; - } else { + } + else { ss << initFile; ss << ".pos"; } @@ -1083,7 +1108,8 @@ void vpMbTracker::initFromPose(const vpImage *const I, const vpIm if (I) { init(*I); - } else { + } + else { vpImageConvert::convert(*I_color, m_I); init(m_I); } @@ -1438,13 +1464,16 @@ void vpMbTracker::loadModel(const std::string &modelFile, bool verbose, const vp nbCylinders = 0; nbCircles = 0; loadCAOModel(modelFile, vectorOfModelFilename, startIdFace, verbose, true, odTo); - } else if ((*(it - 1) == 'l' && *(it - 2) == 'r' && *(it - 3) == 'w' && *(it - 4) == '.') || - (*(it - 1) == 'L' && *(it - 2) == 'R' && *(it - 3) == 'W' && *(it - 4) == '.')) { + } + else if ((*(it - 1) == 'l' && *(it - 2) == 'r' && *(it - 3) == 'w' && *(it - 4) == '.') || + (*(it - 1) == 'L' && *(it - 2) == 'R' && *(it - 3) == 'W' && *(it - 4) == '.')) { loadVRMLModel(modelFile); - } else { + } + else { throw vpException(vpException::ioError, "Error: File %s doesn't contain a cao or wrl model", modelFile.c_str()); } - } else { + } + else { throw vpException(vpException::ioError, "Error: File %s doesn't exist", modelFile.c_str()); } @@ -1497,7 +1526,8 @@ void vpMbTracker::loadVRMLModel(const std::string &modelFile) sceneGraphVRML2 = tovrml2.getVRML2SceneGraph(); sceneGraphVRML2->ref(); sceneGraph->unref(); - } else { + } + else { sceneGraphVRML2 = SoDB::readAllVRML(&in); if (sceneGraphVRML2 == NULL) { /*return -1;*/ } @@ -1561,7 +1591,8 @@ std::map vpMbTracker::parseParameters(std::string &end if (pos != std::string::npos) { mapOfParams[it->first] = endLine.substr(0, pos); endLine = endLine.substr(pos + 1); - } else { + } + else { parseQuote = false; } } @@ -1662,9 +1693,10 @@ void vpMbTracker::loadCAOModel(const std::string &modelFile, std::vector> caoVersion; fileId.ignore(std::numeric_limits::max(), fileId.widen('\n')); // skip the rest of the line - } else { + } + else { std::cout << "in vpMbTracker::loadCAOModel() -> Bad parameter header " - "file : use V0, V1, ..."; + "file : use V0, V1, ..."; throw vpException(vpException::badValue, "in vpMbTracker::loadCAOModel() -> Bad parameter " "header file : use V0, V1, ..."); } @@ -1771,12 +1803,14 @@ void vpMbTracker::loadCAOModel(const std::string &modelFile, std::vector= VISP_CXX_STANDARD_11) std::lock_guard lock(g_mutex_cout); -#endif std::cout << "> " << caoNbrPoint << " points" << std::endl; } @@ -1842,9 +1874,7 @@ void vpMbTracker::loadCAOModel(const std::string &modelFile, std::vector= VISP_CXX_STANDARD_11) std::lock_guard lock(g_mutex_cout); -#endif std::cout << "> " << caoNbrLine << " lines" << std::endl; } @@ -1903,7 +1933,8 @@ void vpMbTracker::loadCAOModel(const std::string &modelFile, std::vector key(index1, index2); segmentTemporaryMap[key] = segmentInfo; - } else { + } + else { vpTRACE(" line %d has wrong coordinates.", k); } } @@ -1923,9 +1954,7 @@ void vpMbTracker::loadCAOModel(const std::string &modelFile, std::vector= VISP_CXX_STANDARD_11) std::lock_guard lock(g_mutex_cout); -#endif std::cout << "> " << caoNbrPolygonLine << " polygon lines" << std::endl; } @@ -2012,9 +2041,7 @@ void vpMbTracker::loadCAOModel(const std::string &modelFile, std::vector= VISP_CXX_STANDARD_11) std::lock_guard lock(g_mutex_cout); -#endif std::cout << "> " << caoNbrPolygonPoint << " polygon points" << std::endl; } @@ -2085,9 +2112,7 @@ void vpMbTracker::loadCAOModel(const std::string &modelFile, std::vector= VISP_CXX_STANDARD_11) std::lock_guard lock(g_mutex_cout); -#endif std::cout << "> " << caoNbCylinder << " cylinders" << std::endl; } @@ -2142,7 +2167,8 @@ void vpMbTracker::loadCAOModel(const std::string &modelFile, std::vector= VISP_CXX_STANDARD_11) std::lock_guard lock(g_mutex_cout); -#endif std::cout << "> " << caoNbCircle << " circles" << std::endl; } @@ -2217,7 +2241,8 @@ void vpMbTracker::loadCAOModel(const std::string &modelFile, std::vector= VISP_CXX_STANDARD_11) std::lock_guard lock(g_mutex_cout); -#endif std::cout << "Global information for " << vpIoTools::getName(modelFile) << " :" << std::endl; std::cout << "Total nb of points : " << nbPoints << std::endl; std::cout << "Total nb of lines : " << nbLines << std::endl; @@ -2240,10 +2263,9 @@ void vpMbTracker::loadCAOModel(const std::string &modelFile, std::vector= VISP_CXX_STANDARD_11) + } + else { std::lock_guard lock(g_mutex_cout); -#endif std::cout << "> " << nbPoints << " points" << std::endl; std::cout << "> " << nbLines << " lines" << std::endl; std::cout << "> " << nbPolygonLines << " polygon lines" << std::endl; @@ -2255,7 +2277,8 @@ void vpMbTracker::loadCAOModel(const std::string &modelFile, std::vectorget(j); if (!strncmp(face_set->getName().getString(), "cyl", 3)) { extractCylinders(face_set, transform, idFace, name); - } else { + } + else { extractFaces(face_set, transform, idFace, name); } } @@ -2382,7 +2406,8 @@ void vpMbTracker::extractFaces(SoVRMLIndexedFaceSet *face_set, vpHomogeneousMatr initProjectionErrorFaceFromCorners(*(m_projectionErrorFaces.getPolygon().back())); corners.resize(0); } - } else { + } + else { coord = (SoVRMLCoordinate *)(face_set->coord.getValue()); int index = face_set->coordIndex[i]; pointTransformed[0] = coord->point[index].getValue()[0]; @@ -2445,7 +2470,8 @@ void vpMbTracker::extractCylinders(SoVRMLIndexedFaceSet *face_set, vpHomogeneous if (i < (int)corners_c1.size()) { corners_c1[(unsigned int)i] = pt; - } else { + } + else { corners_c2[(unsigned int)i - corners_c1.size()] = pt; } } @@ -2518,7 +2544,8 @@ void vpMbTracker::extractLines(SoVRMLIndexedLineSet *line_set, int &idFace, cons initProjectionErrorFaceFromCorners(*(m_projectionErrorFaces.getPolygon().back())); corners.resize(0); } - } else { + } + else { coord = (SoVRMLCoordinate *)(line_set->coord.getValue()); int index = line_set->coordIndex[i]; point[0] = coord->point[index].getValue()[0]; @@ -2593,7 +2620,8 @@ vpMbTracker::getPolygonFaces(bool orderPolygons, bool useVisibility, bool clipPo if (clipPolygon) { faces.getPolygon()[i]->getRoiClipped(m_cam, roiPts, m_cMo); - } else { + } + else { roiPts = faces.getPolygon()[i]->getRoi(m_cam, m_cMo); } @@ -2606,7 +2634,8 @@ vpMbTracker::getPolygonFaces(bool orderPolygons, bool useVisibility, bool clipPo std::vector polyPts; if (clipPolygon) { faces.getPolygon()[i]->getPolygonClipped(polyPts); - } else { + } + else { for (unsigned int j = 0; j < faces.getPolygon()[i]->nbpt; j++) { polyPts.push_back(faces.getPolygon()[i]->p[j]); } @@ -2650,7 +2679,8 @@ vpMbTracker::getPolygonFaces(bool orderPolygons, bool useVisibility, bool clipPo pairOfPolygonFaces.first = polygonsTmp; pairOfPolygonFaces.second = roisPtTmp; - } else { + } + else { pairOfPolygonFaces.first = polygonsTmp; pairOfPolygonFaces.second = roisPtTmp; } @@ -2673,8 +2703,8 @@ void vpMbTracker::setOgreVisibilityTest(const bool &v) #ifndef VISP_HAVE_OGRE useOgre = false; std::cout << "WARNING: ViSP doesn't have Ogre3D, basic visibility test " - "will be used. setOgreVisibilityTest() set to false." - << std::endl; + "will be used. setOgreVisibilityTest() set to false." + << std::endl; #endif } } @@ -2809,7 +2839,8 @@ void vpMbTracker::computeCovarianceMatrixVVS(const bool isoJoIdentity, const vpC // computation efficiency if (isoJoIdentity) { covarianceMatrix = vpMatrix::computeCovarianceMatrixVVS(cMoPrev, error, L_true, D); - } else { + } + else { covarianceMatrix = vpMatrix::computeCovarianceMatrixVVS(cMoPrev, error, LVJ_true, D); } } @@ -2890,7 +2921,8 @@ void vpMbTracker::computeVVSPoseEstimation(const bool isoJoIdentity, unsigned in v = -m_lambda * LTL.pseudoInverse(LTL.getRows() * std::numeric_limits::epsilon()) * LTR; break; } - } else { + } + else { vpVelocityTwistMatrix cVo; cVo.buildFrom(m_cMo); vpMatrix LVJ = (L * (cVo * oJo)); @@ -2953,7 +2985,7 @@ vpColVector vpMbTracker::getEstimatedDoF() const frame that are estimated by the tracker. When set to 1, all the 6 dof are estimated. - Below we give the correspondance between the index of the vector and the + Below we give the correspondence between the index of the vector and the considered dof: - v[0] = 1 if translation along X is estimated, 0 otherwise; - v[1] = 1 if translation along Y is estimated, 0 otherwise; @@ -2971,7 +3003,8 @@ void vpMbTracker::setEstimatedDoF(const vpColVector &v) // if(v[i] != 0){ if (std::fabs(v[i]) > std::numeric_limits::epsilon()) { oJo[i][i] = 1.0; - } else { + } + else { oJo[i][i] = 0.0; m_isoJoIdentity = false; } @@ -3326,7 +3359,7 @@ void vpMbTracker::addProjectionErrorCircle(const vpPoint &P1, const vpPoint &P2, if ((samePoint(*(ci->p1), P1) && samePoint(*(ci->p2), P2) && samePoint(*(ci->p3), P3)) || (samePoint(*(ci->p1), P1) && samePoint(*(ci->p2), P3) && samePoint(*(ci->p3), P2))) { already_here = - (std::fabs(ci->radius - r) < std::numeric_limits::epsilon() * vpMath::maximum(ci->radius, r)); + (std::fabs(ci->radius - r) < std::numeric_limits::epsilon() * vpMath::maximum(ci->radius, r)); } } @@ -3357,7 +3390,7 @@ void vpMbTracker::addProjectionErrorCylinder(const vpPoint &P1, const vpPoint &P if ((samePoint(*(cy->p1), P1) && samePoint(*(cy->p2), P2)) || (samePoint(*(cy->p1), P2) && samePoint(*(cy->p2), P1))) { already_here = - (std::fabs(cy->radius - r) < std::numeric_limits::epsilon() * vpMath::maximum(cy->radius, r)); + (std::fabs(cy->radius - r) < std::numeric_limits::epsilon() * vpMath::maximum(cy->radius, r)); } } @@ -3566,7 +3599,8 @@ void vpMbTracker::projectionErrorVisibleFace(unsigned int width, unsigned int he if (!useOgre) { m_projectionErrorFaces.setVisible(width, height, m_projectionErrorCam, _cMo, angleAppears, angleDisappears, changed); - } else { + } + else { #ifdef VISP_HAVE_OGRE m_projectionErrorFaces.setVisibleOgre(width, height, m_projectionErrorCam, _cMo, angleAppears, angleDisappears, changed); @@ -3648,7 +3682,8 @@ void vpMbTracker::projectionErrorInitMovingEdge(const vpImage &I, l->updateTracked(); if (l->meline.empty() && l->isTracked()) l->initMovingEdge(I, _cMo, doNotTrack, m_mask); - } else { + } + else { l->setVisible(false); for (size_t a = 0; a < l->meline.size(); a++) { if (l->meline[a] != NULL) @@ -3683,7 +3718,8 @@ void vpMbTracker::projectionErrorInitMovingEdge(const vpImage &I, if (cy->isTracked()) cy->initMovingEdge(I, _cMo, doNotTrack, m_mask); } - } else { + } + else { cy->setVisible(false); if (cy->meline1 != NULL) delete cy->meline1; @@ -3716,7 +3752,8 @@ void vpMbTracker::projectionErrorInitMovingEdge(const vpImage &I, if (ci->isTracked()) ci->initMovingEdge(I, _cMo, doNotTrack, m_mask); } - } else { + } + else { ci->setVisible(false); if (ci->meEllipse != NULL) delete ci->meEllipse; @@ -3738,7 +3775,8 @@ void vpMbTracker::loadConfigFile(const std::string &configFile, bool verbose) std::cout << " *********** Parsing XML for ME projection error ************ " << std::endl; } xmlp.parse(configFile); - } catch (...) { + } + catch (...) { throw vpException(vpException::ioError, "Cannot open XML file \"%s\"", configFile.c_str()); } diff --git a/modules/tracker/mbt/test/generic-with-dataset/perfGenericTracker.cpp b/modules/tracker/mbt/test/generic-with-dataset/perfGenericTracker.cpp index 2b3e3f35bf..da5bb48cb8 100644 --- a/modules/tracker/mbt/test/generic-with-dataset/perfGenericTracker.cpp +++ b/modules/tracker/mbt/test/generic-with-dataset/perfGenericTracker.cpp @@ -57,10 +57,8 @@ template bool read_data(const std::string &input_directory, int cpt, const vpCameraParameters &cam_depth, vpImage &I, vpImage &I_depth, std::vector &pointcloud, vpHomogeneousMatrix &cMo) { -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) static_assert(std::is_same::value || std::is_same::value, "Template function supports only unsigned char and vpRGBa images!"); -#endif #if VISP_HAVE_DATASET_VERSION >= 0x030600 std::string ext("png"); #else @@ -130,7 +128,7 @@ TEST_CASE("Benchmark generic tracker", "[benchmark]") vpMbGenericTracker tracker(tracker_type); const std::string input_directory = - vpIoTools::createFilePath(vpIoTools::getViSPImagesDataPath(), "mbt-depth/Castle-simu"); + vpIoTools::createFilePath(vpIoTools::getViSPImagesDataPath(), "mbt-depth/Castle-simu"); const std::string configFileCam1 = input_directory + std::string("/Config/chateau.xml"); const std::string configFileCam2 = input_directory + std::string("/Config/chateau_depth.xml"); REQUIRE(vpIoTools::checkFilename(configFileCam1)); @@ -191,16 +189,16 @@ TEST_CASE("Benchmark generic tracker", "[benchmark]") { std::vector > mapOfTrackerTypes; mapOfTrackerTypes.push_back( - {{"Camera1", vpMbGenericTracker::EDGE_TRACKER}, {"Camera2", vpMbGenericTracker::DEPTH_DENSE_TRACKER}}); + { {"Camera1", vpMbGenericTracker::EDGE_TRACKER}, {"Camera2", vpMbGenericTracker::DEPTH_DENSE_TRACKER} }); mapOfTrackerTypes.push_back( - {{"Camera1", vpMbGenericTracker::EDGE_TRACKER}, {"Camera2", vpMbGenericTracker::DEPTH_DENSE_TRACKER}}); + { {"Camera1", vpMbGenericTracker::EDGE_TRACKER}, {"Camera2", vpMbGenericTracker::DEPTH_DENSE_TRACKER} }); #if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO) mapOfTrackerTypes.push_back( - {{"Camera1", vpMbGenericTracker::KLT_TRACKER}, {"Camera2", vpMbGenericTracker::DEPTH_DENSE_TRACKER}}); - mapOfTrackerTypes.push_back({{"Camera1", vpMbGenericTracker::EDGE_TRACKER | vpMbGenericTracker::KLT_TRACKER}, - {"Camera2", vpMbGenericTracker::DEPTH_DENSE_TRACKER}}); - mapOfTrackerTypes.push_back({{"Camera1", vpMbGenericTracker::EDGE_TRACKER | vpMbGenericTracker::KLT_TRACKER}, - {"Camera2", vpMbGenericTracker::DEPTH_DENSE_TRACKER}}); + { {"Camera1", vpMbGenericTracker::KLT_TRACKER}, {"Camera2", vpMbGenericTracker::DEPTH_DENSE_TRACKER} }); + mapOfTrackerTypes.push_back({ {"Camera1", vpMbGenericTracker::EDGE_TRACKER | vpMbGenericTracker::KLT_TRACKER}, + {"Camera2", vpMbGenericTracker::DEPTH_DENSE_TRACKER} }); + mapOfTrackerTypes.push_back({ {"Camera1", vpMbGenericTracker::EDGE_TRACKER | vpMbGenericTracker::KLT_TRACKER}, + {"Camera2", vpMbGenericTracker::DEPTH_DENSE_TRACKER} }); #endif std::vector benchmarkNames = { @@ -289,7 +287,7 @@ TEST_CASE("Benchmark generic tracker", "[benchmark]") return cMo; }; #else - } + } #endif vpPoseVector pose_est(cMo); @@ -304,9 +302,9 @@ TEST_CASE("Benchmark generic tracker", "[benchmark]") const double max_rotation_error = 0.03; CHECK(sqrt(t_err.sumSquare()) < max_translation_error); CHECK(sqrt(tu_err.sumSquare()) < max_rotation_error); - } } - } // if (runBenchmark) + } +} // if (runBenchmark) } int main(int argc, char *argv[]) @@ -316,11 +314,11 @@ int main(int argc, char *argv[]) // Build a new parser on top of Catch's using namespace Catch::clara; auto cli = session.cli() // Get Catch's composite command line parser - | Opt(runBenchmark) // bind variable to a new option, with a hint string - ["--benchmark"] // the option names it will respond to - ("run benchmark comparing naive code with ViSP implementation"); // description string for the help output + | Opt(runBenchmark) // bind variable to a new option, with a hint string + ["--benchmark"] // the option names it will respond to + ("run benchmark comparing naive code with ViSP implementation"); // description string for the help output - // Now pass the new composite back to Catch so it uses that +// Now pass the new composite back to Catch so it uses that session.cli(cli); // Let Catch (using Clara) parse the command line diff --git a/modules/tracker/mbt/test/generic-with-dataset/testGenericTracker.cpp b/modules/tracker/mbt/test/generic-with-dataset/testGenericTracker.cpp index 709128e746..2167f43c26 100644 --- a/modules/tracker/mbt/test/generic-with-dataset/testGenericTracker.cpp +++ b/modules/tracker/mbt/test/generic-with-dataset/testGenericTracker.cpp @@ -46,9 +46,7 @@ #if defined(VISP_HAVE_MODULE_MBT) && \ (defined(VISP_HAVE_LAPACK) || defined(VISP_HAVE_EIGEN3) || defined(VISP_HAVE_OPENCV)) -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) #include -#endif #include #include @@ -187,10 +185,8 @@ template bool read_data(const std::string &input_directory, int cpt, const vpCameraParameters &cam_depth, vpImage &I, vpImage &I_depth, std::vector &pointcloud, vpHomogeneousMatrix &cMo) { -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) static_assert(std::is_same::value || std::is_same::value, "Template function supports only unsigned char and vpRGBa images!"); -#endif #if VISP_HAVE_DATASET_VERSION >= 0x030600 std::string ext("png"); #else @@ -258,10 +254,8 @@ template bool run(const std::string &input_directory, bool opt_click_allowed, bool opt_display, bool useScanline, int trackerType_image, int opt_lastFrame, bool use_depth, bool use_mask, bool save) { -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) static_assert(std::is_same::value || std::is_same::value, "Template function supports only unsigned char and vpRGBa images!"); -#endif // Initialise a display #if defined(VISP_HAVE_X11) vpDisplayX display1, display2; @@ -655,7 +649,7 @@ bool run(const std::string &input_directory, bool opt_click_allowed, bool opt_di } } // namespace -int main(int argc, const char *argv []) +int main(int argc, const char *argv[]) { try { std::string env_ipath; @@ -681,7 +675,7 @@ int main(int argc, const char *argv []) // Read the command line options if (!getOptions(argc, argv, opt_ipath, opt_click_allowed, opt_display, opt_save, useScanline, trackerType_image, - opt_lastFrame, use_depth, use_mask, use_color_image)) { + opt_lastFrame, use_depth, use_mask, use_color_image)) { return EXIT_FAILURE; } diff --git a/modules/tracker/mbt/test/generic-with-dataset/testGenericTrackerDepth.cpp b/modules/tracker/mbt/test/generic-with-dataset/testGenericTrackerDepth.cpp index 6c5501226e..bcdbe470f0 100644 --- a/modules/tracker/mbt/test/generic-with-dataset/testGenericTrackerDepth.cpp +++ b/modules/tracker/mbt/test/generic-with-dataset/testGenericTrackerDepth.cpp @@ -43,12 +43,9 @@ #include #include -#if defined(VISP_HAVE_MODULE_MBT) && \ - (defined(VISP_HAVE_LAPACK) || defined(VISP_HAVE_EIGEN3) || defined(VISP_HAVE_OPENCV)) +#if defined(VISP_HAVE_MODULE_MBT) && (defined(VISP_HAVE_LAPACK) || defined(VISP_HAVE_EIGEN3) || defined(VISP_HAVE_OPENCV)) -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) #include -#endif #include #include @@ -166,10 +163,8 @@ template bool read_data(const std::string &input_directory, int cpt, const vpCameraParameters &cam_depth, vpImage &I, vpImage &I_depth, std::vector &pointcloud, vpHomogeneousMatrix &cMo) { -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) static_assert(std::is_same::value || std::is_same::value, "Template function supports only unsigned char and vpRGBa images!"); -#endif #if VISP_HAVE_DATASET_VERSION >= 0x030600 std::string ext("png"); #else @@ -233,10 +228,8 @@ template bool run(vpImage &I, const std::string &input_directory, bool opt_click_allowed, bool opt_display, bool useScanline, int opt_lastFrame, bool use_mask) { -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) static_assert(std::is_same::value || std::is_same::value, "Template function supports only unsigned char and vpRGBa images!"); -#endif // Initialise a display #if defined(VISP_HAVE_X11) vpDisplayX display1, display2; @@ -258,29 +251,29 @@ bool run(vpImage &I, const std::string &input_directory, bool opt_click_al tracker.loadConfigFile(input_directory + "/Config/chateau_depth.xml"); #if 0 // Corresponding parameters manually set to have an example code - { - vpCameraParameters cam_depth; - cam_depth.initPersProjWithoutDistortion(700.0, 700.0, 320.0, 240.0); - tracker.setCameraParameters(cam_depth); - } - // Depth - tracker.setDepthNormalFeatureEstimationMethod(vpMbtFaceDepthNormal::ROBUST_FEATURE_ESTIMATION); - tracker.setDepthNormalPclPlaneEstimationMethod(2); - tracker.setDepthNormalPclPlaneEstimationRansacMaxIter(200); - tracker.setDepthNormalPclPlaneEstimationRansacThreshold(0.001); - tracker.setDepthNormalSamplingStep(2, 2); + { + vpCameraParameters cam_depth; + cam_depth.initPersProjWithoutDistortion(700.0, 700.0, 320.0, 240.0); + tracker.setCameraParameters(cam_depth); + } + // Depth + tracker.setDepthNormalFeatureEstimationMethod(vpMbtFaceDepthNormal::ROBUST_FEATURE_ESTIMATION); + tracker.setDepthNormalPclPlaneEstimationMethod(2); + tracker.setDepthNormalPclPlaneEstimationRansacMaxIter(200); + tracker.setDepthNormalPclPlaneEstimationRansacThreshold(0.001); + tracker.setDepthNormalSamplingStep(2, 2); - tracker.setDepthDenseSamplingStep(4, 4); + tracker.setDepthDenseSamplingStep(4, 4); #if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO) - tracker.setKltMaskBorder(5); + tracker.setKltMaskBorder(5); #endif - tracker.setAngleAppear(vpMath::rad(85.0)); - tracker.setAngleDisappear(vpMath::rad(89.0)); - tracker.setNearClippingDistance(0.01); - tracker.setFarClippingDistance(2.0); - tracker.setClipping(tracker.getClipping() | vpMbtPolygon::FOV_CLIPPING); + tracker.setAngleAppear(vpMath::rad(85.0)); + tracker.setAngleDisappear(vpMath::rad(89.0)); + tracker.setNearClippingDistance(0.01); + tracker.setFarClippingDistance(2.0); + tracker.setClipping(tracker.getClipping() | vpMbtPolygon::FOV_CLIPPING); #endif tracker.loadModel(input_directory + "/Models/chateau.cao"); vpHomogeneousMatrix T; @@ -391,7 +384,7 @@ bool run(vpImage &I, const std::string &input_directory, bool opt_click_al const double tu_thresh = useScanline ? 0.5 : 0.4; if (!use_mask && (t_err2 > t_thresh || tu_err2 > tu_thresh)) { // no accuracy test with mask std::cerr << "Pose estimated exceeds the threshold (t_thresh = " << t_thresh << ", tu_thresh = " << tu_thresh - << ")!" << std::endl; + << ")!" << std::endl; std::cout << "t_err: " << t_err2 << " ; tu_err: " << tu_err2 << std::endl; correct_accuracy = false; } @@ -430,8 +423,8 @@ bool run(vpImage &I, const std::string &input_directory, bool opt_click_al if (!time_vec.empty()) std::cout << "Computation time, Mean: " << vpMath::getMean(time_vec) - << " ms ; Median: " << vpMath::getMedian(time_vec) << " ms ; Std: " << vpMath::getStdev(time_vec) << " ms" - << std::endl; + << " ms ; Median: " << vpMath::getMedian(time_vec) << " ms ; Std: " << vpMath::getStdev(time_vec) << " ms" + << std::endl; if (!vec_err_t.empty()) std::cout << "Max translation error: " << *std::max_element(vec_err_t.begin(), vec_err_t.end()) << std::endl; @@ -479,15 +472,15 @@ int main(int argc, const char *argv[]) usage(argv[0], NULL); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl - << " environment variable to specify the location of the " << std::endl - << " image path where test images are located." << std::endl - << std::endl; + << " environment variable to specify the location of the " << std::endl + << " image path where test images are located." << std::endl + << std::endl; return EXIT_FAILURE; } std::string input_directory = - vpIoTools::createFilePath(!opt_ipath.empty() ? opt_ipath : env_ipath, "mbt-depth/Castle-simu"); + vpIoTools::createFilePath(!opt_ipath.empty() ? opt_ipath : env_ipath, "mbt-depth/Castle-simu"); if (!vpIoTools::checkDirectory(input_directory)) { std::cerr << "ViSP-images does not contain the folder: " << input_directory << "!" << std::endl; return EXIT_SUCCESS; @@ -496,11 +489,13 @@ int main(int argc, const char *argv[]) if (use_color_image) { vpImage I_color; return run(I_color, input_directory, opt_click_allowed, opt_display, useScanline, opt_lastFrame, use_mask); - } else { + } + else { vpImage I_gray; return run(I_gray, input_directory, opt_click_allowed, opt_display, useScanline, opt_lastFrame, use_mask); } - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "Catch an exception: " << e << std::endl; return EXIT_FAILURE; } diff --git a/modules/tracker/me/include/visp3/me/vpMe.h b/modules/tracker/me/include/visp3/me/vpMe.h index 5cb1b64151..a35e03254f 100644 --- a/modules/tracker/me/include/visp3/me/vpMe.h +++ b/modules/tracker/me/include/visp3/me/vpMe.h @@ -174,12 +174,10 @@ class VISP_EXPORT vpMe */ vpMe &operator=(const vpMe &me); -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) /*! * Move operator. */ vpMe &operator=(const vpMe &&me); -#endif /*! * Check sample step wrt min value. diff --git a/modules/tracker/me/include/visp3/me/vpMeEllipse.h b/modules/tracker/me/include/visp3/me/vpMeEllipse.h index cf60d79183..e71ffad56b 100644 --- a/modules/tracker/me/include/visp3/me/vpMeEllipse.h +++ b/modules/tracker/me/include/visp3/me/vpMeEllipse.h @@ -549,11 +549,7 @@ class VISP_EXPORT vpMeEllipse : public vpMeTracker * \exception vpTrackingException::initializationError : Moving edges not * initialized. */ -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) virtual void sample(const vpImage &I, bool doNotTrack = false) override; -#else - virtual void sample(const vpImage &I, bool doNotTrack = false); -#endif /*! * Compute the \f$ theta \f$ angle for each vpMeSite. diff --git a/modules/tracker/me/include/visp3/me/vpMeLine.h b/modules/tracker/me/include/visp3/me/vpMeLine.h index fa03bdc306..0ba0cb1cf6 100644 --- a/modules/tracker/me/include/visp3/me/vpMeLine.h +++ b/modules/tracker/me/include/visp3/me/vpMeLine.h @@ -213,11 +213,7 @@ class VISP_EXPORT vpMeLine : public vpMeTracker * \exception vpTrackingException::initializationError : Moving edges not * initialized. */ -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) virtual void sample(const vpImage &I, bool doNotTrack = false) override; -#else - virtual void sample(const vpImage &I, bool doNotTrack = false); -#endif /*! * Resample the line if the number of sample is less than 80% of the diff --git a/modules/tracker/me/src/moving-edges/vpMe.cpp b/modules/tracker/me/src/moving-edges/vpMe.cpp index 29cd610cb8..f58df242ab 100644 --- a/modules/tracker/me/src/moving-edges/vpMe.cpp +++ b/modules/tracker/me/src/moving-edges/vpMe.cpp @@ -405,7 +405,6 @@ vpMe &vpMe::operator=(const vpMe &me) return *this; } -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpMe &vpMe::operator=(const vpMe &&me) { if (m_mask != NULL) { @@ -430,7 +429,6 @@ vpMe &vpMe::operator=(const vpMe &&me) initMask(); return *this; } -#endif vpMe::~vpMe() { diff --git a/modules/tracker/me/test/testJsonMe.cpp b/modules/tracker/me/test/testJsonMe.cpp index 1874ae1a95..8d0ef208f5 100644 --- a/modules/tracker/me/test/testJsonMe.cpp +++ b/modules/tracker/me/test/testJsonMe.cpp @@ -39,7 +39,7 @@ #include -#if defined(VISP_HAVE_NLOHMANN_JSON) && defined(VISP_HAVE_CATCH2) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_NLOHMANN_JSON) && defined(VISP_HAVE_CATCH2) #include #include @@ -95,7 +95,7 @@ class RandomMeGenerator : public Catch::Generators::IGenerator vpMe current; public: - RandomMeGenerator() : m_rand(std::random_device{}()), m_dist(0.0, 1.0), m_int_dist(1, 10) + RandomMeGenerator() : m_rand(std::random_device {}()), m_dist(0.0, 1.0), m_int_dist(1, 10) { static_cast(next()); } @@ -150,11 +150,11 @@ SCENARIO("Serializing and deserializing a single vpMe", "[json]") const auto testInt = [&j, &me](const std::string &key, std::function setter, std::function getter) -> void { testOptionalProperty(j, { key }, me, setter, getter, [](int v) -> int { return v - 1; }); - }; + }; const auto testDouble = [&j, &me](const std::string &key, std::function setter, std::function getter) -> void { testOptionalProperty(j, { key }, me, setter, getter, [](double v) -> double { return v + 1.0; }); - }; + }; WHEN("Removing threshold") { testDouble("threshold", &vpMe::setThreshold, &vpMe::getThreshold); } WHEN("Removing mu1 and mu2") @@ -175,7 +175,7 @@ SCENARIO("Serializing and deserializing a single vpMe", "[json]") } } -int main(int argc, char *argv []) +int main(int argc, char *argv[]) { Catch::Session session; // There must be exactly one instance session.applyCommandLine(argc, argv); diff --git a/modules/vision/include/visp3/vision/vpPose.h b/modules/vision/include/visp3/vision/vpPose.h index c45a347f39..abd2644b59 100644 --- a/modules/vision/include/visp3/vision/vpPose.h +++ b/modules/vision/include/visp3/vision/vpPose.h @@ -52,9 +52,7 @@ #include #include #include -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) #include -#endif #if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_17) && \ (!defined(_MSC_VER) || ((VISP_CXX_STANDARD >= VISP_CXX_STANDARD_17) && (_MSC_VER >= 1911))) @@ -617,7 +615,7 @@ class VISP_EXPORT vpPose * Match a vector p2D of 2D point (x,y) and a vector p3D of 3D points * (X,Y,Z) using the Ransac algorithm. * - * At least numberOfInlierToReachAConsensus of true correspondance are required + * At least numberOfInlierToReachAConsensus of true correspondence are required * to validate the pose * * The inliers are given in a vector of vpPoint listInliers. diff --git a/modules/vision/include/visp3/vision/vpPoseFeatures.h b/modules/vision/include/visp3/vision/vpPoseFeatures.h index 2f2ca927b4..5a70f55e9c 100644 --- a/modules/vision/include/visp3/vision/vpPoseFeatures.h +++ b/modules/vision/include/visp3/vision/vpPoseFeatures.h @@ -63,8 +63,6 @@ #include #include - -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) #include #ifndef DOXYGEN_SHOULD_SKIP_THIS @@ -367,7 +365,6 @@ class vpPoseSpecificFeatureTemplateObject : public vpPoseSpecificFeature } }; #endif // #ifndef DOXYGEN_SHOULD_SKIP_THIS -#endif // (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) /*! * \class vpPoseFeatures @@ -477,7 +474,6 @@ class VISP_EXPORT vpPoseFeatures */ void addFeatureSegment(vpPoint &, vpPoint &); -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) /*! * Add a specific feature for the pose computation. */ @@ -489,7 +485,6 @@ class VISP_EXPORT vpPoseFeatures */ template void addSpecificFeature(ObjType *obj, RetType(ObjType:: *fct_ptr)(ArgsFunc...), Args &&...args); -#endif /*! * Clear all the features @@ -616,11 +611,8 @@ class VISP_EXPORT vpPoseFeatures std::vector > m_featureLine_DuoLineInt_List; // vpFeatureSegment std::vector > m_featureSegment_DuoPoints_list; - -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) // Specific features std::vector m_featureSpecific_list; -#endif /*! * Get the error vector and L matrix from all the features. @@ -649,7 +641,6 @@ class VISP_EXPORT vpPoseFeatures void computePoseRobustVVS(vpHomogeneousMatrix &cMo); }; -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) /*! * Add a specific feature for the pose computation. * @@ -691,12 +682,10 @@ class VISP_EXPORT vpPoseFeatures * vpFeatureLine fl; * void (*ptr)(vpFeaturePoint&, const vpPoint&) = &vpFeatureBuilder::create; * - * #if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) * pose.addSpecificFeature(ptr, fp, pts[0]); * pose.addSpecificFeature(&vp_createPoint, fp, pts[1]); * pose.addSpecificFeature(&vp_createTwoPoint, fp, pts[2], pts[3]); * pose.addSpecificFeature(&vp_createLine, fl, line); - * #endif * * //... Pose Computation * @@ -773,11 +762,9 @@ void vpPoseFeatures::addSpecificFeature(RetType(*fct_ptr)(ArgsFunc...), Args &&. * void (vp_createClass::*ptrClassLine)(vpFeatureLine &, const vpLine &) * = &vp_createClass::vp_createLine; * - * #if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) * pose.addSpecificFeature(&cpClass, ptrClassPoint, fp, pts[0]); * pose.addSpecificFeature(&cpClass, ptrClassTwoPoint, fp, pts[1], pts[2]); * pose.addSpecificFeature(&cpClass, ptrClassLine, fl, line); - * #endif * * //... Pose Computation * @@ -799,7 +786,6 @@ void vpPoseFeatures::addSpecificFeature(ObjType *obj, RetType(ObjType:: *fct_ptr if (m_featureSpecific_list.size() > m_maxSize) m_maxSize = static_cast(m_featureSpecific_list.size()); } -#endif #endif //#ifdef VISP_HAVE_MODULE_VISUAL_FEATURES diff --git a/modules/vision/src/pose-estimation/vpPoseFeatures.cpp b/modules/vision/src/pose-estimation/vpPoseFeatures.cpp index 958bf2b998..2550ab73ba 100644 --- a/modules/vision/src/pose-estimation/vpPoseFeatures.cpp +++ b/modules/vision/src/pose-estimation/vpPoseFeatures.cpp @@ -81,11 +81,9 @@ void vpPoseFeatures::clear() delete m_featureSegment_DuoPoints_list[(unsigned int)i].desiredFeature; m_featureSegment_DuoPoints_list.clear(); -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) for (int i = (int)m_featureSpecific_list.size() - 1; i >= 0; i--) delete m_featureSpecific_list[(unsigned int)i]; m_featureSpecific_list.clear(); -#endif m_maxSize = 0; m_totalSize = 0; @@ -305,14 +303,12 @@ void vpPoseFeatures::error_and_interaction(vpHomogeneousMatrix &cMo, vpColVector L.stack(fs.interaction()); } -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) //--------------Specific Feature-------------- if (i < m_featureSpecific_list.size()) { m_featureSpecific_list[i]->createCurrent(cMo); err.stack(m_featureSpecific_list[i]->error()); L.stack(m_featureSpecific_list[i]->currentInteraction()); } -#endif } } diff --git a/modules/vision/src/pose-estimation/vpPoseRansac.cpp b/modules/vision/src/pose-estimation/vpPoseRansac.cpp index 2ff1b5a4c2..cd470e4ec4 100644 --- a/modules/vision/src/pose-estimation/vpPoseRansac.cpp +++ b/modules/vision/src/pose-estimation/vpPoseRansac.cpp @@ -49,9 +49,7 @@ #include #include -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) #include -#endif #define eps 1e-6 @@ -355,15 +353,10 @@ bool vpPose::poseRansac(vpHomogeneousMatrix &cMo, bool (*func)(const vpHomogeneo throw(vpPoseException(vpPoseException::notInitializedError, "Not enough point to compute the pose")); } -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) unsigned int nbThreads = 1; bool executeParallelVersion = useParallelRansac; -#else - bool executeParallelVersion = false; -#endif if (executeParallelVersion) { -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) if (nbParallelRansacThreads <= 0) { // Get number of CPU threads nbThreads = std::thread::hardware_concurrency(); @@ -375,13 +368,11 @@ bool vpPose::poseRansac(vpHomogeneousMatrix &cMo, bool (*func)(const vpHomogeneo else { nbThreads = nbParallelRansacThreads; } -#endif } bool foundSolution = false; if (executeParallelVersion) { -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) std::vector threadpool; std::vector ransacWorkers; @@ -422,7 +413,6 @@ bool vpPose::poseRansac(vpHomogeneousMatrix &cMo, bool (*func)(const vpHomogeneo } foundSolution = successRansac; -#endif } else { // Sequential RANSAC diff --git a/modules/vision/test/pose/testFindMatch.cpp b/modules/vision/test/pose/testFindMatch.cpp index 4dbd410b7e..60bd02e72f 100644 --- a/modules/vision/test/pose/testFindMatch.cpp +++ b/modules/vision/test/pose/testFindMatch.cpp @@ -29,7 +29,7 @@ * * Description: * Compute the pose of a 3D object using the Dementhon method. Assuming that - * the correspondance between 2D points and 3D points is not done, we use + * the correspondence between 2D points and 3D points is not done, we use * the RANSAC algorithm to achieve this task */ diff --git a/modules/vision/test/pose/testPoseFeatures.cpp b/modules/vision/test/pose/testPoseFeatures.cpp index 56d6e32548..de359cc12d 100644 --- a/modules/vision/test/pose/testPoseFeatures.cpp +++ b/modules/vision/test/pose/testPoseFeatures.cpp @@ -52,13 +52,12 @@ #ifndef DOXYGEN_SHOULD_SKIP_THIS -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) class vp_createPointClass { public: int value; - vp_createPointClass() : value(0) {} + vp_createPointClass() : value(0) { } int vp_createPoint(vpFeaturePoint &fp, const vpPoint &v) { @@ -72,7 +71,6 @@ void vp_createPoint(vpFeaturePoint &fp, const vpPoint &v) { vpFeatureBuilder::cr void vp_createLine(vpFeatureLine &fp, const vpLine &v) { vpFeatureBuilder::create(fp, v); } #endif -#endif int test_pose(bool use_robust) { @@ -153,7 +151,6 @@ int test_pose(bool use_robust) pose.addFeatureEllipse(circle); -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpFeaturePoint fp; vpFeatureLine fl; vpFeatureSegment fs; @@ -163,7 +160,6 @@ int test_pose(bool use_robust) pose.addSpecificFeature(&cpClass, ptrClass, fp, pts[1]); pose.addSpecificFeature(&vp_createLine, fl, line); pose.addSpecificFeature(ptr, fs, pts[3], pts[4]); -#endif pose.setVerbose(true); pose.setLambda(0.6); @@ -191,7 +187,7 @@ int test_pose(bool use_robust) std::cout << "\nResulting covariance (Diag): " << std::endl; vpMatrix covariance = pose.getCovarianceMatrix(); std::cout << covariance[0][0] << " " << covariance[1][1] << " " << covariance[2][2] << " " << covariance[3][3] << " " - << covariance[4][4] << " " << covariance[5][5] << " " << std::endl; + << covariance[4][4] << " " << covariance[5][5] << " " << std::endl; int test_fail = 0; for (unsigned int i = 0; i < 6; i++) { @@ -215,7 +211,8 @@ int main() return EXIT_FAILURE; return EXIT_SUCCESS; - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "Catch an exception: " << e.getStringMessage() << std::endl; return EXIT_FAILURE; } diff --git a/modules/vision/test/pose/testPoseRansac.cpp b/modules/vision/test/pose/testPoseRansac.cpp index 8c7f8213c0..9dd4cc8330 100644 --- a/modules/vision/test/pose/testPoseRansac.cpp +++ b/modules/vision/test/pose/testPoseRansac.cpp @@ -29,7 +29,7 @@ * * Description: * Compute the pose of a 3D object using the Dementhon method. Assuming that - * the correspondance between 2D points and 3D points is not done, we use + * the correspondence between 2D points and 3D points is not done, we use * the RANSAC algorithm to achieve this task */ diff --git a/tutorial/bridge/opencv/tutorial-bridge-opencv-matrix.cpp b/tutorial/bridge/opencv/tutorial-bridge-opencv-matrix.cpp index 2f4663685a..af960a6ce1 100644 --- a/tutorial/bridge/opencv/tutorial-bridge-opencv-matrix.cpp +++ b/tutorial/bridge/opencv/tutorial-bridge-opencv-matrix.cpp @@ -9,7 +9,7 @@ int main() { -#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) { std::cout << "From OpenCV to ViSP conversion" << std::endl; //! [Create OpenCV matrix] @@ -27,7 +27,7 @@ int main() { std::cout << "From ViSP to OpenCV conversion" << std::endl; //! [Create ViSP matrix] - vpMatrix M(3, 4, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}); + vpMatrix M(3, 4, { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 }); std::cout << "M: \n" << M << std::endl; //! [Create ViSP matrix] diff --git a/tutorial/computer-vision/tutorial-pose-from-points-realsense-T265.cpp b/tutorial/computer-vision/tutorial-pose-from-points-realsense-T265.cpp index f8c9564782..e7e088ad36 100644 --- a/tutorial/computer-vision/tutorial-pose-from-points-realsense-T265.cpp +++ b/tutorial/computer-vision/tutorial-pose-from-points-realsense-T265.cpp @@ -20,14 +20,16 @@ int main(int argc, char **argv) for (int i = 0; i < argc; i++) { if (std::string(argv[i]) == "--camera_index" && i + 1 < argc) { opt_camera_index = atoi(argv[i + 1]); - } else if (std::string(argv[i]) == "--square_width" && i + 1 < argc) { + } + else if (std::string(argv[i]) == "--square_width" && i + 1 < argc) { opt_square_width = atoi(argv[i + 1]); - } else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { + } + else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { std::cout << "\nUsage: " << argv[0] << " [--camera_index <1.Left | 2.Right> (default: 1)]" - << " [--square_width ((2 * 10000) + (31 * 100) + 0)) (void)argc; (void)argv; diff --git a/tutorial/grabber/tutorial-grabber-1394.cpp b/tutorial/grabber/tutorial-grabber-1394.cpp index 76af5f9edc..fe7bfddf5c 100644 --- a/tutorial/grabber/tutorial-grabber-1394.cpp +++ b/tutorial/grabber/tutorial-grabber-1394.cpp @@ -8,57 +8,57 @@ void usage(const char *argv[], int error) { std::cout << "SYNOPSIS" << std::endl - << " " << argv[0] << " [--change-settings]" - << " [--seqname ]" - << " [--record ]" - << " [--no-display]" - << " [--help] [-h]" << std::endl - << std::endl; + << " " << argv[0] << " [--change-settings]" + << " [--seqname ]" + << " [--record ]" + << " [--no-display]" + << " [--help] [-h]" << std::endl + << std::endl; std::cout << "DESCRIPTION" << std::endl - << " --change-settings" << std::endl - << " Force camera settings to acquire 640x480 images in M0NO8 at 60 fps." << std::endl - << std::endl - << " --seqname " << std::endl - << " Name of the sequence of image to create (ie: /tmp/image%04d.jpg)." << std::endl - << " Default: empty." << std::endl - << std::endl - << " --record " << std::endl - << " Allowed values for mode are:" << std::endl - << " 0: record all the captures images (continuous mode)," << std::endl - << " 1: record only images selected by a user click (single shot mode)." << std::endl - << " Default mode: 0" << std::endl - << std::endl - << " --no-display" << std::endl - << " Disable displaying captured images." << std::endl - << " When used and sequence name specified, record mode is internally set to 1 (continuous mode)." - << std::endl - << std::endl - << " --help, -h" << std::endl - << " Print this helper message." << std::endl - << std::endl; + << " --change-settings" << std::endl + << " Force camera settings to acquire 640x480 images in M0NO8 at 60 fps." << std::endl + << std::endl + << " --seqname " << std::endl + << " Name of the sequence of image to create (ie: /tmp/image%04d.jpg)." << std::endl + << " Default: empty." << std::endl + << std::endl + << " --record " << std::endl + << " Allowed values for mode are:" << std::endl + << " 0: record all the captures images (continuous mode)," << std::endl + << " 1: record only images selected by a user click (single shot mode)." << std::endl + << " Default mode: 0" << std::endl + << std::endl + << " --no-display" << std::endl + << " Disable displaying captured images." << std::endl + << " When used and sequence name specified, record mode is internally set to 1 (continuous mode)." + << std::endl + << std::endl + << " --help, -h" << std::endl + << " Print this helper message." << std::endl + << std::endl; std::cout << "USAGE" << std::endl - << " Example to visualize images:" << std::endl - << " " << argv[0] << std::endl - << std::endl - << " Examples to record a sequence:" << std::endl - << " " << argv[0] << " --seqname I%04d.png" << std::endl - << " " << argv[0] << " --seqname folder/I%04d.png --record 0" << std::endl - << std::endl - << " Examples to record single shot images:\n" - << " " << argv[0] << " --seqname I%04d.png --record 1\n" - << " " << argv[0] << " --seqname folder/I%04d.png --record 1" << std::endl - << std::endl; + << " Example to visualize images:" << std::endl + << " " << argv[0] << std::endl + << std::endl + << " Examples to record a sequence:" << std::endl + << " " << argv[0] << " --seqname I%04d.png" << std::endl + << " " << argv[0] << " --seqname folder/I%04d.png --record 0" << std::endl + << std::endl + << " Examples to record single shot images:\n" + << " " << argv[0] << " --seqname I%04d.png --record 1\n" + << " " << argv[0] << " --seqname folder/I%04d.png --record 1" << std::endl + << std::endl; if (error) { std::cout << "Error" << std::endl - << " " - << "Unsupported parameter " << argv[error] << std::endl; + << " " + << "Unsupported parameter " << argv[error] << std::endl; } } int main(int argc, const char *argv[]) { -#if defined(VISP_HAVE_DC1394) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_DC1394) try { std::string opt_seqname; int opt_record_mode = 0; @@ -68,18 +68,23 @@ int main(int argc, const char *argv[]) for (int i = 1; i < argc; i++) { if (std::string(argv[i]) == "--change-settings") { opt_change_settings = true; - } else if (std::string(argv[i]) == "--seqname") { + } + else if (std::string(argv[i]) == "--seqname") { opt_seqname = std::string(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--record") { + } + else if (std::string(argv[i]) == "--record") { opt_record_mode = std::atoi(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--no-display") { + } + else if (std::string(argv[i]) == "--no-display") { opt_display = false; - } else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { + } + else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { usage(argv, 0); return EXIT_SUCCESS; - } else { + } + else { usage(argv, i); return EXIT_FAILURE; } @@ -94,7 +99,7 @@ int main(int argc, const char *argv[]) std::cout << "Display : " << (opt_display ? "enabled" : "disabled") << std::endl; std::string text_record_mode = - std::string("Record mode: ") + (opt_record_mode ? std::string("single") : std::string("continuous")); + std::string("Record mode: ") + (opt_record_mode ? std::string("single") : std::string("continuous")); if (!opt_seqname.empty()) { std::cout << text_record_mode << std::endl; @@ -112,8 +117,9 @@ int main(int argc, const char *argv[]) try { g.setVideoMode(vp1394TwoGrabber::vpVIDEO_MODE_640x480_MONO8); g.setFramerate(vp1394TwoGrabber::vpFRAMERATE_60); - } catch (...) { // If settings are not available just catch execption to - // continue with default settings + } + catch (...) { // If settings are not available just catch execption to + // continue with default settings std::cout << "Warning: cannot modify camera settings" << std::endl; } } @@ -164,7 +170,8 @@ int main(int argc, const char *argv[]) if (d) { delete d; } - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "Catch an exception: " << e << std::endl; } #else @@ -173,8 +180,5 @@ int main(int argc, const char *argv[]) #ifndef VISP_HAVE_DC1394 std::cout << "Install libdc1394, configure and build ViSP again to use this example" << std::endl; #endif -#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "This tutorial should be built with c++11 support" << std::endl; -#endif #endif } diff --git a/tutorial/grabber/tutorial-grabber-basler-pylon.cpp b/tutorial/grabber/tutorial-grabber-basler-pylon.cpp index 5e685b52ef..d663548c24 100644 --- a/tutorial/grabber/tutorial-grabber-basler-pylon.cpp +++ b/tutorial/grabber/tutorial-grabber-basler-pylon.cpp @@ -9,61 +9,61 @@ void usage(const char *argv[], int error) { std::cout << "SYNOPSIS" << std::endl - << " " << argv[0] << " [--device ]" - << " [--type ]" - << " [--seqname ]" - << " [--record ]" - << " [--no-display]" - << " [--help] [-h]" << std::endl - << std::endl; + << " " << argv[0] << " [--device ]" + << " [--type ]" + << " [--seqname ]" + << " [--record ]" + << " [--no-display]" + << " [--help] [-h]" << std::endl + << std::endl; std::cout << "DESCRIPTION" << std::endl - << " --device " << std::endl - << " Camera device index in range [0...9]. Set 0 to dial with the first camera," << std::endl - << " and 1 to dial with the second camera attached to the computer." << std::endl - << " Default: 0." << std::endl - << std::endl - << " --type " << std::endl - << " Camera device type: GigE or USB" << std::endl - << " Default: GigE" << std::endl - << std::endl - << " --seqname " << std::endl - << " Name of the sequence of image to create (ie: /tmp/image%04d.jpg)." << std::endl - << " Default: empty." << std::endl - << std::endl - << " --record " << std::endl - << " Allowed values for mode are:" << std::endl - << " 0: record all the captures images (continuous mode)," << std::endl - << " 1: record only images selected by a user click (single shot mode)." << std::endl - << " Default mode: 0" << std::endl - << std::endl - << " --no-display" << std::endl - << " Disable displaying captured images." << std::endl - << " When used and sequence name specified, record mode is internally set to 1 (continuous mode)." - << std::endl - << std::endl - << " --help, -h" << std::endl - << " Print this helper message." << std::endl - << std::endl; + << " --device " << std::endl + << " Camera device index in range [0...9]. Set 0 to dial with the first camera," << std::endl + << " and 1 to dial with the second camera attached to the computer." << std::endl + << " Default: 0." << std::endl + << std::endl + << " --type " << std::endl + << " Camera device type: GigE or USB" << std::endl + << " Default: GigE" << std::endl + << std::endl + << " --seqname " << std::endl + << " Name of the sequence of image to create (ie: /tmp/image%04d.jpg)." << std::endl + << " Default: empty." << std::endl + << std::endl + << " --record " << std::endl + << " Allowed values for mode are:" << std::endl + << " 0: record all the captures images (continuous mode)," << std::endl + << " 1: record only images selected by a user click (single shot mode)." << std::endl + << " Default mode: 0" << std::endl + << std::endl + << " --no-display" << std::endl + << " Disable displaying captured images." << std::endl + << " When used and sequence name specified, record mode is internally set to 1 (continuous mode)." + << std::endl + << std::endl + << " --help, -h" << std::endl + << " Print this helper message." << std::endl + << std::endl; std::cout << "USAGE" << std::endl - << " Example to visualize images:" << std::endl - << " " << argv[0] << std::endl - << std::endl - << " Example to visualize images from a second camera GigE:" << std::endl - << " " << argv[0] << " --device 1 --type GigE" << std::endl - << std::endl - << " Examples to record a sequence:" << std::endl - << " " << argv[0] << " --seqname I%04d.png" << std::endl - << " " << argv[0] << " --seqname folder/I%04d.png --record 0" << std::endl - << std::endl - << " Examples to record single shot images:\n" - << " " << argv[0] << " --seqname I%04d.png --record 1\n" - << " " << argv[0] << " --seqname folder/I%04d.png --record 1" << std::endl - << std::endl; + << " Example to visualize images:" << std::endl + << " " << argv[0] << std::endl + << std::endl + << " Example to visualize images from a second camera GigE:" << std::endl + << " " << argv[0] << " --device 1 --type GigE" << std::endl + << std::endl + << " Examples to record a sequence:" << std::endl + << " " << argv[0] << " --seqname I%04d.png" << std::endl + << " " << argv[0] << " --seqname folder/I%04d.png --record 0" << std::endl + << std::endl + << " Examples to record single shot images:\n" + << " " << argv[0] << " --seqname I%04d.png --record 1\n" + << " " << argv[0] << " --seqname folder/I%04d.png --record 1" << std::endl + << std::endl; if (error) { std::cout << "Error" << std::endl - << " " - << "Unsupported parameter " << argv[error] << std::endl; + << " " + << "Unsupported parameter " << argv[error] << std::endl; } } /*! @@ -73,7 +73,7 @@ void usage(const char *argv[], int error) */ int main(int argc, const char *argv[]) { -#if defined(VISP_HAVE_PYLON) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_PYLON) try { unsigned int opt_device = 0; std::string opt_type("GigE"); @@ -90,18 +90,23 @@ int main(int argc, const char *argv[]) if (std::string(argv[i]) == "--type") { opt_type = std::string(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--seqname") { + } + else if (std::string(argv[i]) == "--seqname") { opt_seqname = std::string(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--record") { + } + else if (std::string(argv[i]) == "--record") { opt_record_mode = std::atoi(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--no-display") { + } + else if (std::string(argv[i]) == "--no-display") { opt_display = false; - } else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { + } + else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { usage(argv, 0); return EXIT_SUCCESS; - } else { + } + else { usage(argv, i); return EXIT_FAILURE; } @@ -116,7 +121,7 @@ int main(int argc, const char *argv[]) std::cout << "Display : " << (opt_display ? "enabled" : "disabled") << std::endl; std::string text_record_mode = - std::string("Record mode: ") + (opt_record_mode ? std::string("single") : std::string("continuous")); + std::string("Record mode: ") + (opt_record_mode ? std::string("single") : std::string("continuous")); if (!opt_seqname.empty()) { std::cout << text_record_mode << std::endl; @@ -131,10 +136,12 @@ int main(int argc, const char *argv[]) if (opt_type == "GigE" || opt_type == "gige") { g = factory.createPylonGrabber(vpPylonFactory::BASLER_GIGE); std::cout << "Opening Basler GigE camera: " << opt_device << std::endl; - } else if (opt_type == "USB" || opt_type == "usb") { + } + else if (opt_type == "USB" || opt_type == "usb") { g = factory.createPylonGrabber(vpPylonFactory::BASLER_USB); std::cout << "Opening Basler USB camera: " << opt_device << std::endl; - } else { + } + else { std::cout << "Error: only Basler GigE or USB cameras are supported." << std::endl; return EXIT_SUCCESS; } @@ -185,7 +192,8 @@ int main(int argc, const char *argv[]) if (d) { delete d; } - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "Catch an exception: " << e << std::endl; } #else @@ -194,8 +202,5 @@ int main(int argc, const char *argv[]) #ifndef VISP_HAVE_PYLON std::cout << "Install Basler Pylon SDK, configure and build ViSP again to use this example" << std::endl; #endif -#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "This tutorial should be built with c++11 support" << std::endl; -#endif #endif } diff --git a/tutorial/grabber/tutorial-grabber-bebop2.cpp b/tutorial/grabber/tutorial-grabber-bebop2.cpp index 627c9ce686..7d5af10bd6 100644 --- a/tutorial/grabber/tutorial-grabber-bebop2.cpp +++ b/tutorial/grabber/tutorial-grabber-bebop2.cpp @@ -11,59 +11,59 @@ void usage(const char *argv[], int error) { std::cout << "SYNOPSIS" << std::endl - << " " << argv[0] << " [--ip
]" - << " [--hd-resolution]" - << " [--seqname ]" - << " [--record ]" - << " [--no-display]" - << " [--help] [-h]" << std::endl - << std::endl; + << " " << argv[0] << " [--ip
]" + << " [--hd-resolution]" + << " [--seqname ]" + << " [--record ]" + << " [--no-display]" + << " [--help] [-h]" << std::endl + << std::endl; std::cout << "DESCRIPTION" << std::endl - << " --ip
" << std::endl - << " IP address of the drone to which you want to connect." << std::endl - << " Default: 192.168.42.1" << std::endl - << std::endl - << " --hd-resolution" << std::endl - << " Enables HD 720p images instead of default 480p." << std::endl - << " Caution : The camera settings are different depending on whether the resolution is 720p or 480p." - << std::endl - << std::endl - << " --seqname " << std::endl - << " Name of the sequence of image to create (ie: /tmp/image%04d.jpg)." << std::endl - << " Default: empty." << std::endl - << std::endl - << " --record " << std::endl - << " Allowed values for mode are:" << std::endl - << " 0: record all the captures images (continuous mode)," << std::endl - << " 1: record only images selected by a user click (single shot mode)." << std::endl - << " Default mode: 0" << std::endl - << std::endl - << " --no-display" << std::endl - << " Disable displaying captured images." << std::endl - << " When used and sequence name specified, record mode is internally set to 1 (continuous mode)." - << std::endl - << std::endl - << " --help, -h" << std::endl - << " Print this helper message." << std::endl - << std::endl; + << " --ip
" << std::endl + << " IP address of the drone to which you want to connect." << std::endl + << " Default: 192.168.42.1" << std::endl + << std::endl + << " --hd-resolution" << std::endl + << " Enables HD 720p images instead of default 480p." << std::endl + << " Caution : The camera settings are different depending on whether the resolution is 720p or 480p." + << std::endl + << std::endl + << " --seqname " << std::endl + << " Name of the sequence of image to create (ie: /tmp/image%04d.jpg)." << std::endl + << " Default: empty." << std::endl + << std::endl + << " --record " << std::endl + << " Allowed values for mode are:" << std::endl + << " 0: record all the captures images (continuous mode)," << std::endl + << " 1: record only images selected by a user click (single shot mode)." << std::endl + << " Default mode: 0" << std::endl + << std::endl + << " --no-display" << std::endl + << " Disable displaying captured images." << std::endl + << " When used and sequence name specified, record mode is internally set to 1 (continuous mode)." + << std::endl + << std::endl + << " --help, -h" << std::endl + << " Print this helper message." << std::endl + << std::endl; std::cout << "USAGE" << std::endl - << " Example to visualize images:" << std::endl - << " " << argv[0] << std::endl - << std::endl - << " Examples to record a sequence of 720p images from drone with ip different from default:" << std::endl - << " " << argv[0] << " --seqname I%04d.png --ip 192.168.42.3 --hd_resolution" << std::endl - << " " << argv[0] << " --seqname folder/I%04d.png --record 0 --ip 192.168.42.3 --hd-resolution" - << std::endl - << std::endl - << " Examples to record single shot images:" << std::endl - << " " << argv[0] << " --seqname I%04d.png --record 1" << std::endl - << " " << argv[0] << " --seqname folder/I%04d.png --record 1" << std::endl - << std::endl; + << " Example to visualize images:" << std::endl + << " " << argv[0] << std::endl + << std::endl + << " Examples to record a sequence of 720p images from drone with ip different from default:" << std::endl + << " " << argv[0] << " --seqname I%04d.png --ip 192.168.42.3 --hd_resolution" << std::endl + << " " << argv[0] << " --seqname folder/I%04d.png --record 0 --ip 192.168.42.3 --hd-resolution" + << std::endl + << std::endl + << " Examples to record single shot images:" << std::endl + << " " << argv[0] << " --seqname I%04d.png --record 1" << std::endl + << " " << argv[0] << " --seqname folder/I%04d.png --record 1" << std::endl + << std::endl; if (error) { std::cout << "Error" << std::endl - << " " - << "Unsupported parameter " << argv[error] << std::endl; + << " " + << "Unsupported parameter " << argv[error] << std::endl; } } @@ -72,7 +72,7 @@ void usage(const char *argv[], int error) */ int main(int argc, const char **argv) { -#if defined(VISP_HAVE_ARSDK) && defined(VISP_HAVE_FFMPEG) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_ARSDK) && defined(VISP_HAVE_FFMPEG) try { std::string opt_seqname; int opt_record_mode = 0; @@ -84,20 +84,26 @@ int main(int argc, const char **argv) if (std::string(argv[i]) == "--seqname") { opt_seqname = std::string(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--record") { + } + else if (std::string(argv[i]) == "--record") { opt_record_mode = std::atoi(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--ip" && i + 1 < argc) { + } + else if (std::string(argv[i]) == "--ip" && i + 1 < argc) { ip_address = std::string(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--hd-resolution") { + } + else if (std::string(argv[i]) == "--hd-resolution") { image_res = 1; - } else if (std::string(argv[i]) == "--no-display") { + } + else if (std::string(argv[i]) == "--no-display") { opt_display = false; - } else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { + } + else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { usage(argv, 0); return EXIT_SUCCESS; - } else { + } + else { usage(argv, i); return EXIT_FAILURE; } @@ -111,7 +117,7 @@ int main(int argc, const char **argv) std::cout << "Display : " << (opt_display ? "enabled" : "disabled") << std::endl; std::string text_record_mode = - std::string("Record mode: ") + (opt_record_mode ? std::string("single") : std::string("continuous")); + std::string("Record mode: ") + (opt_record_mode ? std::string("single") : std::string("continuous")); if (!opt_seqname.empty()) { std::cout << text_record_mode << std::endl; @@ -130,7 +136,8 @@ int main(int argc, const char **argv) drone.startStreaming(); drone.setExposure(1.5f); drone.getRGBaImage(I); - } else { + } + else { std::cout << "Error : failed to setup drone control" << std::endl; return EXIT_FAILURE; } @@ -178,7 +185,8 @@ int main(int argc, const char **argv) if (d) { delete d; } - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "Caught an exception: " << e << std::endl; } #else @@ -190,9 +198,6 @@ int main(int argc, const char **argv) #ifndef VISP_HAVE_FFMPEG std::cout << "Install ffmpeg, configure and build ViSP again to use this example" << std::endl; #endif -#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "This tutorial should be built with c++11 support" << std::endl; -#endif #endif // #if defined(VISP_HAVE_ARSDK) && defined(VISP_HAVE_FFMPEG) } #else diff --git a/tutorial/grabber/tutorial-grabber-flycapture.cpp b/tutorial/grabber/tutorial-grabber-flycapture.cpp index 53791a6edf..d9b65ea13d 100644 --- a/tutorial/grabber/tutorial-grabber-flycapture.cpp +++ b/tutorial/grabber/tutorial-grabber-flycapture.cpp @@ -9,58 +9,58 @@ void usage(const char *argv[], int error) { std::cout << "SYNOPSIS" << std::endl - << " " << argv[0] << " [--change-settings]" - << " [--seqname ]" - << " [--record ]" - << " [--no-display]" - << " [--help] [-h]" << std::endl - << std::endl; + << " " << argv[0] << " [--change-settings]" + << " [--seqname ]" + << " [--record ]" + << " [--no-display]" + << " [--help] [-h]" << std::endl + << std::endl; std::cout << "DESCRIPTION" << std::endl - << " --change-settings" << std::endl - << " Force camera settings to auto shutter, auto gain and 640x480 MONO8 image" << std::endl - << " acquisition at 60 fps." << std::endl - << std::endl - << " --seqname " << std::endl - << " Name of the sequence of image to create (ie: /tmp/image%04d.jpg)." << std::endl - << " Default: empty." << std::endl - << std::endl - << " --record " << std::endl - << " Allowed values for mode are:" << std::endl - << " 0: record all the captures images (continuous mode)," << std::endl - << " 1: record only images selected by a user click (single shot mode)." << std::endl - << " Default mode: 0" << std::endl - << std::endl - << " --no-display" << std::endl - << " Disable displaying captured images." << std::endl - << " When used and sequence name specified, record mode is internally set to 1 (continuous mode)." - << std::endl - << std::endl - << " --help, -h" << std::endl - << " Print this helper message." << std::endl - << std::endl; + << " --change-settings" << std::endl + << " Force camera settings to auto shutter, auto gain and 640x480 MONO8 image" << std::endl + << " acquisition at 60 fps." << std::endl + << std::endl + << " --seqname " << std::endl + << " Name of the sequence of image to create (ie: /tmp/image%04d.jpg)." << std::endl + << " Default: empty." << std::endl + << std::endl + << " --record " << std::endl + << " Allowed values for mode are:" << std::endl + << " 0: record all the captures images (continuous mode)," << std::endl + << " 1: record only images selected by a user click (single shot mode)." << std::endl + << " Default mode: 0" << std::endl + << std::endl + << " --no-display" << std::endl + << " Disable displaying captured images." << std::endl + << " When used and sequence name specified, record mode is internally set to 1 (continuous mode)." + << std::endl + << std::endl + << " --help, -h" << std::endl + << " Print this helper message." << std::endl + << std::endl; std::cout << "USAGE" << std::endl - << " Example to visualize images:" << std::endl - << " " << argv[0] << std::endl - << std::endl - << " Examples to record a sequence:" << std::endl - << " " << argv[0] << " --seqname I%04d.png" << std::endl - << " " << argv[0] << " --seqname folder/I%04d.png --record 0" << std::endl - << std::endl - << " Examples to record single shot images:\n" - << " " << argv[0] << " --seqname I%04d.png --record 1\n" - << " " << argv[0] << " --seqname folder/I%04d.png --record 1" << std::endl - << std::endl; + << " Example to visualize images:" << std::endl + << " " << argv[0] << std::endl + << std::endl + << " Examples to record a sequence:" << std::endl + << " " << argv[0] << " --seqname I%04d.png" << std::endl + << " " << argv[0] << " --seqname folder/I%04d.png --record 0" << std::endl + << std::endl + << " Examples to record single shot images:\n" + << " " << argv[0] << " --seqname I%04d.png --record 1\n" + << " " << argv[0] << " --seqname folder/I%04d.png --record 1" << std::endl + << std::endl; if (error) { std::cout << "Error" << std::endl - << " " - << "Unsupported parameter " << argv[error] << std::endl; + << " " + << "Unsupported parameter " << argv[error] << std::endl; } } int main(int argc, const char *argv[]) { -#if defined(VISP_HAVE_FLYCAPTURE) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_FLYCAPTURE) try { std::string opt_seqname; int opt_record_mode = 0; @@ -70,18 +70,23 @@ int main(int argc, const char *argv[]) for (int i = 1; i < argc; i++) { if (std::string(argv[i]) == "--change-settings") { opt_change_settings = true; - } else if (std::string(argv[i]) == "--seqname") { + } + else if (std::string(argv[i]) == "--seqname") { opt_seqname = std::string(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--record") { + } + else if (std::string(argv[i]) == "--record") { opt_record_mode = std::atoi(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--no-display") { + } + else if (std::string(argv[i]) == "--no-display") { opt_display = false; - } else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { + } + else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { usage(argv, 0); return EXIT_SUCCESS; - } else { + } + else { usage(argv, i); return EXIT_FAILURE; } @@ -96,7 +101,7 @@ int main(int argc, const char *argv[]) std::cout << "Display : " << (opt_display ? "enabled" : "disabled") << std::endl; std::string text_record_mode = - std::string("Record mode: ") + (opt_record_mode ? std::string("single") : std::string("continuous")); + std::string("Record mode: ") + (opt_record_mode ? std::string("single") : std::string("continuous")); if (!opt_seqname.empty()) { std::cout << text_record_mode << std::endl; @@ -114,8 +119,9 @@ int main(int argc, const char *argv[]) g.setShutter(true); // Turn auto shutter on g.setGain(true); // Turn auto gain on g.setVideoModeAndFrameRate(FlyCapture2::VIDEOMODE_640x480Y8, FlyCapture2::FRAMERATE_60); - } catch (...) { // If settings are not available just catch execption to - // continue with default settings + } + catch (...) { // If settings are not available just catch execption to + // continue with default settings std::cout << "Warning: cannot modify camera settings" << std::endl; } } @@ -170,7 +176,8 @@ int main(int argc, const char *argv[]) if (d) { delete d; } - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "Catch an exception: " << e.getStringMessage() << std::endl; } #else @@ -179,8 +186,5 @@ int main(int argc, const char *argv[]) #ifndef VISP_HAVE_FLYCAPTURE std::cout << "Install Flycapture SDK, configure and build ViSP again to use this example" << std::endl; #endif -#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "This tutorial should be built with c++11 support" << std::endl; -#endif #endif } diff --git a/tutorial/grabber/tutorial-grabber-ids-ueye.cpp b/tutorial/grabber/tutorial-grabber-ids-ueye.cpp index d032fdded0..7c12ec12c2 100644 --- a/tutorial/grabber/tutorial-grabber-ids-ueye.cpp +++ b/tutorial/grabber/tutorial-grabber-ids-ueye.cpp @@ -11,92 +11,92 @@ void usage(const char *argv[], int error) { std::cout << "SYNOPSIS" << std::endl - << " " << argv[0] << " [--device ]" - << " [--config-file ]" - << " [--fps ]" - << " [--gain ]" - << " [--shutter ]" - << " [--subsample ]" - << " [--white-balance ]" - << " [--color-mode ]" - << " [--seqname ]" - << " [--record ]" - << " [--no-display]" - << " [--verbose] [-v]" - << " [--help] [-h]" << std::endl - << std::endl; + << " " << argv[0] << " [--device ]" + << " [--config-file ]" + << " [--fps ]" + << " [--gain ]" + << " [--shutter ]" + << " [--subsample ]" + << " [--white-balance ]" + << " [--color-mode ]" + << " [--seqname ]" + << " [--record ]" + << " [--no-display]" + << " [--verbose] [-v]" + << " [--help] [-h]" << std::endl + << std::endl; std::cout << "DESCRIPTION" << std::endl - << " --device " << std::endl - << " Camera device index. Set 0 to dial with the first camera," << std::endl - << " and 1 to dial with the second camera attached to the computer." << std::endl - << " Default: 0" << std::endl - << std::endl - << " --config-file " << std::endl - << " Camera config file." << std::endl - << " Default: empty." << std::endl - << std::endl - << " --fps " << std::endl - << " \"Auto\" or a frames per second value." << std::endl - << " Default: current setting." << std::endl - << std::endl - << " --gain " << std::endl - << " \"Auto\" or manual gain with a value in 0 - 100." << std::endl - << " Default: current setting." << std::endl - << std::endl - << " --shutter " << std::endl - << " \"Auto\" or manual shutter." << std::endl - << " Default: current setting." << std::endl - << std::endl - << " --subsample " << std::endl - << " Subsample factor to reduce image size alog rows and columns." << std::endl - << " Default: 1." << std::endl - << std::endl - << " --white-balance " << std::endl - << " Possible values are 0 (disabled) or 1 (enabled)." << std::endl - << " Default: current setting." << std::endl - << std::endl - << " --color-mode " << std::endl - << " Possible modes are: mono8, rgb24, rgb32, bayer8." << std::endl - << " Default: current setting." << std::endl - << std::endl - << " --seqname " << std::endl - << " Name of the sequence of image to create (ie: /tmp/image%04d.jpg)." << std::endl - << " Default: empty." << std::endl - << std::endl - << " --record " << std::endl - << " Allowed values for mode are:" << std::endl - << " 0: record all the captures images (continuous mode)," << std::endl - << " 1: record only images selected by a user click (single shot mode)." << std::endl - << " Default mode: 0" << std::endl - << std::endl - << " --no-display" << std::endl - << " Disable displaying captured images." << std::endl - << " When used and sequence name specified, record mode is internally set to 1 (continuous mode)." - << std::endl - << std::endl - << " --verbose, -v" << std::endl - << " Enable extra printings." << std::endl - << std::endl - << " --help, -h" << std::endl - << " Print this helper message." << std::endl - << std::endl; + << " --device " << std::endl + << " Camera device index. Set 0 to dial with the first camera," << std::endl + << " and 1 to dial with the second camera attached to the computer." << std::endl + << " Default: 0" << std::endl + << std::endl + << " --config-file " << std::endl + << " Camera config file." << std::endl + << " Default: empty." << std::endl + << std::endl + << " --fps " << std::endl + << " \"Auto\" or a frames per second value." << std::endl + << " Default: current setting." << std::endl + << std::endl + << " --gain " << std::endl + << " \"Auto\" or manual gain with a value in 0 - 100." << std::endl + << " Default: current setting." << std::endl + << std::endl + << " --shutter " << std::endl + << " \"Auto\" or manual shutter." << std::endl + << " Default: current setting." << std::endl + << std::endl + << " --subsample " << std::endl + << " Subsample factor to reduce image size alog rows and columns." << std::endl + << " Default: 1." << std::endl + << std::endl + << " --white-balance " << std::endl + << " Possible values are 0 (disabled) or 1 (enabled)." << std::endl + << " Default: current setting." << std::endl + << std::endl + << " --color-mode " << std::endl + << " Possible modes are: mono8, rgb24, rgb32, bayer8." << std::endl + << " Default: current setting." << std::endl + << std::endl + << " --seqname " << std::endl + << " Name of the sequence of image to create (ie: /tmp/image%04d.jpg)." << std::endl + << " Default: empty." << std::endl + << std::endl + << " --record " << std::endl + << " Allowed values for mode are:" << std::endl + << " 0: record all the captures images (continuous mode)," << std::endl + << " 1: record only images selected by a user click (single shot mode)." << std::endl + << " Default mode: 0" << std::endl + << std::endl + << " --no-display" << std::endl + << " Disable displaying captured images." << std::endl + << " When used and sequence name specified, record mode is internally set to 1 (continuous mode)." + << std::endl + << std::endl + << " --verbose, -v" << std::endl + << " Enable extra printings." << std::endl + << std::endl + << " --help, -h" << std::endl + << " Print this helper message." << std::endl + << std::endl; std::cout << "USAGE" << std::endl - << " Example to visualize images:" << std::endl - << " " << argv[0] << std::endl - << std::endl - << " Examples to record a sequence of images:" << std::endl - << " " << argv[0] << " --seqname I%04d.png" << std::endl - << " " << argv[0] << " --seqname folder/I%04d.png --record 0" << std::endl - << std::endl - << " Examples to record single shot images:\n" - << " " << argv[0] << " --seqname I%04d.png --record 1\n" - << " " << argv[0] << " --seqname folder/I%04d.png --record 1" << std::endl - << std::endl; + << " Example to visualize images:" << std::endl + << " " << argv[0] << std::endl + << std::endl + << " Examples to record a sequence of images:" << std::endl + << " " << argv[0] << " --seqname I%04d.png" << std::endl + << " " << argv[0] << " --seqname folder/I%04d.png --record 0" << std::endl + << std::endl + << " Examples to record single shot images:\n" + << " " << argv[0] << " --seqname I%04d.png --record 1\n" + << " " << argv[0] << " --seqname folder/I%04d.png --record 1" << std::endl + << std::endl; if (error) { std::cout << "Error" << std::endl - << " " - << "Unsupported parameter " << argv[error] << std::endl; + << " " + << "Unsupported parameter " << argv[error] << std::endl; } } @@ -107,7 +107,7 @@ void usage(const char *argv[], int error) */ int main(int argc, const char *argv[]) { -#if defined(VISP_HAVE_UEYE) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_UEYE) try { unsigned int opt_device = 0; std::string opt_seqname; @@ -126,39 +126,51 @@ int main(int argc, const char *argv[]) if (std::string(argv[i]) == "--device") { opt_device = static_cast(std::atoi(argv[i + 1])); i++; - } else if (std::string(argv[i]) == "--config-file") { + } + else if (std::string(argv[i]) == "--config-file") { opt_config_file = std::string(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--fps") { + } + else if (std::string(argv[i]) == "--fps") { opt_fps = std::string(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--gain") { + } + else if (std::string(argv[i]) == "--gain") { opt_gain = std::string(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--shutter") { + } + else if (std::string(argv[i]) == "--shutter") { opt_shutter = std::string(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--subsample") { + } + else if (std::string(argv[i]) == "--subsample") { opt_subsample = std::atoi(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--white-balance") { + } + else if (std::string(argv[i]) == "--white-balance") { opt_white_balance = std::atoi(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--color-mode") { + } + else if (std::string(argv[i]) == "--color-mode") { opt_color_mode = std::string(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--seqname") { + } + else if (std::string(argv[i]) == "--seqname") { opt_seqname = std::string(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--record") { + } + else if (std::string(argv[i]) == "--record") { opt_record_mode = std::atoi(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--verbose" || std::string(argv[i]) == "-v") { + } + else if (std::string(argv[i]) == "--verbose" || std::string(argv[i]) == "-v") { opt_verbose = true; - } else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { + } + else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { usage(argv, 0); return EXIT_SUCCESS; - } else { + } + else { usage(argv, i); return EXIT_FAILURE; } @@ -191,7 +203,7 @@ int main(int argc, const char *argv[]) std::cout << "Found " << cam_ids.size() << " cameras :" << std::endl; for (unsigned int i = 0; i < cam_ids.size(); i++) { std::cout << (opt_device == i ? " * Camera " : " Camera ") << i << " - ID: " << cam_ids[i] - << " Model: " << cam_models[i] << " S/N: " << cam_serials[i] << std::endl; + << " Model: " << cam_models[i] << " S/N: " << cam_serials[i] << std::endl; } //! [List camera info] @@ -203,9 +215,9 @@ int main(int argc, const char *argv[]) //! [Active camera info] std::cout << "Active camera is Model " << g.getActiveCameraModel() - << " with S/N: " << g.getActiveCameraSerialNumber() << std::endl; + << " with S/N: " << g.getActiveCameraSerialNumber() << std::endl; - //! [Open connection] +//! [Open connection] g.open(I); //! [Open connection] @@ -229,19 +241,21 @@ int main(int argc, const char *argv[]) if (!opt_gain.empty()) { if (opt_gain == "auto") { std::cout << "Auto gain : " << (g.setGain(true) ? "enabled" : "N/A") << std::endl; - } else { + } + else { std::cout << "Manual gain : " - << (g.setGain(false, std::atoi(opt_gain.c_str())) ? (std::string(opt_gain) + " %") : "N/A") - << std::endl; + << (g.setGain(false, std::atoi(opt_gain.c_str())) ? (std::string(opt_gain) + " %") : "N/A") + << std::endl; } } if (!opt_shutter.empty()) { if (opt_shutter == "auto") { std::cout << "Auto shutter : " << (g.setExposure(true) ? "enabled" : "N/A") << std::endl; - } else { + } + else { std::cout << "Manual shutter : " - << (g.setExposure(false, std::atof(opt_shutter.c_str())) ? (std::string(opt_shutter) + " ms") : "N/A") - << std::endl; + << (g.setExposure(false, std::atof(opt_shutter.c_str())) ? (std::string(opt_shutter) + " ms") : "N/A") + << std::endl; } } @@ -261,10 +275,11 @@ int main(int argc, const char *argv[]) if (!opt_fps.empty()) { if (opt_fps == "auto") { std::cout << "Auto framerate : " << (g.setFrameRate(true) ? "enabled" : "N/A") << std::endl; - } else { + } + else { std::cout << "Manual framerate : " - << (g.setFrameRate(false, std::atof(opt_fps.c_str())) ? (std::string(opt_fps) + " Hz") : "N/A") - << std::endl; + << (g.setFrameRate(false, std::atof(opt_fps.c_str())) ? (std::string(opt_fps) + " Hz") : "N/A") + << std::endl; } } @@ -272,7 +287,7 @@ int main(int argc, const char *argv[]) std::cout << "Display : " << (opt_display ? "enabled" : "disabled") << std::endl; std::string text_record_mode = - std::string("Record mode : ") + (opt_record_mode ? std::string("single") : std::string("continuous")); + std::string("Record mode : ") + (opt_record_mode ? std::string("single") : std::string("continuous")); if (!opt_seqname.empty()) { std::cout << text_record_mode << std::endl; @@ -344,7 +359,8 @@ int main(int argc, const char *argv[]) if (d) { delete d; } - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "Catch an exception: " << e << std::endl; } #else @@ -353,8 +369,5 @@ int main(int argc, const char *argv[]) #ifndef VISP_HAVE_UEYE std::cout << "Install IDS uEye SDK, configure and build ViSP again to use this example" << std::endl; #endif -#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "This tutorial should be built with c++11 support" << std::endl; -#endif #endif } diff --git a/tutorial/grabber/tutorial-grabber-multiple-realsense.cpp b/tutorial/grabber/tutorial-grabber-multiple-realsense.cpp index 3d09ec70f3..7df532fc8f 100644 --- a/tutorial/grabber/tutorial-grabber-multiple-realsense.cpp +++ b/tutorial/grabber/tutorial-grabber-multiple-realsense.cpp @@ -12,28 +12,30 @@ int main(int argc, char **argv) { -#if defined(VISP_HAVE_REALSENSE2) && (RS2_API_VERSION > ((2 * 10000) + (31 * 100) + 0)) && \ - (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_REALSENSE2) && (RS2_API_VERSION > ((2 * 10000) + (31 * 100) + 0)) std::vector > type_serial_nb; std::vector cam_found; for (int i = 0; i < argc; i++) { if (std::string(argv[i]) == "--T265") { type_serial_nb.push_back(std::make_pair("T265", std::string(argv[i + 1]))); - } else if (std::string(argv[i]) == "--D435") { + } + else if (std::string(argv[i]) == "--D435") { type_serial_nb.push_back(std::make_pair("D435", std::string(argv[i + 1]))); - } else if (std::string(argv[i]) == "--SR300") { + } + else if (std::string(argv[i]) == "--SR300") { type_serial_nb.push_back(std::make_pair("SR300", std::string(argv[i + 1]))); - } else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { + } + else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { std::cout << "\nUsage: " << argv[0] - << " [--T265 ] [--D435 ] [--SR300 ]\n" - << "\nExample to use 2 T265 cameras:\n" - << " " << argv[0] << " --T265 11622110511 --T265 11622110433 \n" - << "\nExample to use 1 T265 and 1 D435 cameras:\n" - << " " << argv[0] << " --T265 11622110511 --D435 752112077408 \n" - << "\nExample to use 2 T265 and 1 D435 cameras:\n" - << " " << argv[0] << " --T265 11622110511 --T265 11622110433 --D435 752112070408 \n" - << std::endl; + << " [--T265 ] [--D435 ] [--SR300 ]\n" + << "\nExample to use 2 T265 cameras:\n" + << " " << argv[0] << " --T265 11622110511 --T265 11622110433 \n" + << "\nExample to use 1 T265 and 1 D435 cameras:\n" + << " " << argv[0] << " --T265 11622110511 --D435 752112077408 \n" + << "\nExample to use 2 T265 and 1 D435 cameras:\n" + << " " << argv[0] << " --T265 11622110511 --T265 11622110433 --D435 752112070408 \n" + << std::endl; return EXIT_SUCCESS; } } @@ -64,7 +66,8 @@ int main(int argc, char **argv) if (!cam_found.back()) { std::cout << "Device with ID: " << type_serial_nb[i].second << " not found." << std::endl; } - } else { // D435 or SR300 + } + else { // D435 or SR300 D435_cfg.enable_device(type_serial_nb[i].second); D435_cfg.disable_stream(RS2_STREAM_DEPTH); D435_cfg.disable_stream(RS2_STREAM_INFRARED); @@ -121,11 +124,8 @@ int main(int argc, char **argv) #endif #if !(RS2_API_VERSION > ((2 * 10000) + (31 * 100) + 0)) std::cout << "librealsense is detected but its version is too old. Install librealsense version > 2.31.0, configure " - "and build ViSP again to use this example" - << std::endl; -#endif -#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "This tutorial should be built with c++11 support" << std::endl; + "and build ViSP again to use this example" + << std::endl; #endif #endif return EXIT_SUCCESS; diff --git a/tutorial/grabber/tutorial-grabber-opencv.cpp b/tutorial/grabber/tutorial-grabber-opencv.cpp index 7c1f2b6579..7978d6e770 100644 --- a/tutorial/grabber/tutorial-grabber-opencv.cpp +++ b/tutorial/grabber/tutorial-grabber-opencv.cpp @@ -10,7 +10,7 @@ #define USE_COLOR // Comment to acquire gray level images -void usage(const char *argv [], int error) +void usage(const char *argv[], int error) { std::cout << "SYNOPSIS" << std::endl << " " << argv[0] << " [--device ]" @@ -69,7 +69,7 @@ void usage(const char *argv [], int error) // usage: binary -h // device name: 0 is the default to dial with the first camera, // 1 to dial with a second camera attached to the computer -int main(int argc, const char *argv []) +int main(int argc, const char *argv[]) { int opt_device = 0; std::string opt_seqname; @@ -118,7 +118,7 @@ int main(int argc, const char *argv []) std::cout << "Record name: " << opt_seqname << std::endl; } -#if defined(HAVE_OPENCV_VIDEOIO) && defined(HAVE_OPENCV_HIGHGUI) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(HAVE_OPENCV_VIDEOIO) && defined(HAVE_OPENCV_HIGHGUI) try { cv::VideoCapture cap(opt_device); // open the default camera if (!cap.isOpened()) { // check if we succeeded @@ -186,8 +186,5 @@ int main(int argc, const char *argv []) #if !defined(HAVE_OPENCV_VIDEOIO) std::cout << "Install OpenCV videoio module, configure and build ViSP again to use this example" << std::endl; #endif -#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "This tutorial should be built with c++11 support" << std::endl; -#endif #endif } diff --git a/tutorial/grabber/tutorial-grabber-realsense-T265.cpp b/tutorial/grabber/tutorial-grabber-realsense-T265.cpp index 4d3fb8ce2a..a592372027 100644 --- a/tutorial/grabber/tutorial-grabber-realsense-T265.cpp +++ b/tutorial/grabber/tutorial-grabber-realsense-T265.cpp @@ -9,45 +9,45 @@ void usage(const char *argv[], int error) { std::cout << "SYNOPSIS" << std::endl - << " " << argv[0] << " [--fps <6|15|30|60>]" - << " [--record ]" - << " [--no-display]" - << " [--help] [-h]" << std::endl - << std::endl; + << " " << argv[0] << " [--fps <6|15|30|60>]" + << " [--record ]" + << " [--no-display]" + << " [--help] [-h]" << std::endl + << std::endl; std::cout << "DESCRIPTION" << std::endl - << " --fps <6|15|30|60>" << std::endl - << " Frames per second." << std::endl - << " Default: 30." << std::endl - << std::endl - << " --record " << std::endl - << " Allowed values for mode are:" << std::endl - << " 0: record all the captures images (continuous mode)," << std::endl - << " 1: record only images selected by a user click (single shot mode)." << std::endl - << " Default mode: 0" << std::endl - << std::endl - << " --no-display" << std::endl - << " Disable displaying captured images." << std::endl - << " When used and sequence name specified, record mode is internally set to 1 (continuous mode)." - << std::endl - << std::endl - << " --help, -h" << std::endl - << " Print this helper message." << std::endl - << std::endl; + << " --fps <6|15|30|60>" << std::endl + << " Frames per second." << std::endl + << " Default: 30." << std::endl + << std::endl + << " --record " << std::endl + << " Allowed values for mode are:" << std::endl + << " 0: record all the captures images (continuous mode)," << std::endl + << " 1: record only images selected by a user click (single shot mode)." << std::endl + << " Default mode: 0" << std::endl + << std::endl + << " --no-display" << std::endl + << " Disable displaying captured images." << std::endl + << " When used and sequence name specified, record mode is internally set to 1 (continuous mode)." + << std::endl + << std::endl + << " --help, -h" << std::endl + << " Print this helper message." << std::endl + << std::endl; std::cout << "USAGE" << std::endl - << " Example to visualize images:" << std::endl - << " " << argv[0] << std::endl - << std::endl - << " Example to record a sequence of images:" << std::endl - << " " << argv[0] << " --record 0" << std::endl - << std::endl - << " Example to record single shot images:\n" - << " " << argv[0] << " --record 1" << std::endl - << std::endl; + << " Example to visualize images:" << std::endl + << " " << argv[0] << std::endl + << std::endl + << " Example to record a sequence of images:" << std::endl + << " " << argv[0] << " --record 0" << std::endl + << std::endl + << " Example to record single shot images:\n" + << " " << argv[0] << " --record 1" << std::endl + << std::endl; if (error) { std::cout << "Error" << std::endl - << " " - << "Unsupported parameter " << argv[error] << std::endl; + << " " + << "Unsupported parameter " << argv[error] << std::endl; } } @@ -56,8 +56,7 @@ void usage(const char *argv[], int error) */ int main(int argc, const char *argv[]) { -#if defined(VISP_HAVE_REALSENSE2) && (RS2_API_VERSION > ((2 * 10000) + (31 * 100) + 0)) && \ - (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_REALSENSE2) && (RS2_API_VERSION > ((2 * 10000) + (31 * 100) + 0)) try { std::string opt_seqname_left = "left-%04d.png", opt_seqname_right = "right-%04d.png"; int opt_record_mode = 0; @@ -68,15 +67,19 @@ int main(int argc, const char *argv[]) if (std::string(argv[i]) == "--fps") { opt_fps = std::atoi(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--record") { + } + else if (std::string(argv[i]) == "--record") { opt_record_mode = std::atoi(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--no-display") { + } + else if (std::string(argv[i]) == "--no-display") { opt_display = false; - } else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { + } + else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { usage(argv, 0); return EXIT_SUCCESS; - } else { + } + else { usage(argv, i); return EXIT_FAILURE; } @@ -90,7 +93,7 @@ int main(int argc, const char *argv[]) std::cout << "Display : " << (opt_display ? "enabled" : "disabled") << std::endl; std::string text_record_mode = - std::string("Record mode: ") + (opt_record_mode ? std::string("single") : std::string("continuous")); + std::string("Record mode: ") + (opt_record_mode ? std::string("single") : std::string("continuous")); std::cout << text_record_mode << std::endl; std::cout << "Left record name: " << opt_seqname_left << std::endl; @@ -164,7 +167,8 @@ int main(int argc, const char *argv[]) if (display_right) { delete display_right; } - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "Catch an exception: " << e << std::endl; } #else @@ -173,8 +177,5 @@ int main(int argc, const char *argv[]) #if !(defined(VISP_HAVE_REALSENSE2) && (RS2_API_VERSION > ((2 * 10000) + (31 * 100) + 0))) std::cout << "Install librealsense version > 2.31.0, configure and build ViSP again to use this example" << std::endl; #endif -#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "This tutorial should be built with c++11 support" << std::endl; -#endif #endif } diff --git a/tutorial/grabber/tutorial-grabber-realsense.cpp b/tutorial/grabber/tutorial-grabber-realsense.cpp index e22e621be7..6c39b2a632 100644 --- a/tutorial/grabber/tutorial-grabber-realsense.cpp +++ b/tutorial/grabber/tutorial-grabber-realsense.cpp @@ -10,63 +10,63 @@ void usage(const char *argv[], int error) { std::cout << "SYNOPSIS" << std::endl - << " " << argv[0] << " [--fps <6|15|30|60>]" - << " [--width ]" - << " [--height ]" - << " [--seqname ]" - << " [--record ]" - << " [--no-display]" - << " [--help] [-h]" << std::endl - << std::endl; + << " " << argv[0] << " [--fps <6|15|30|60>]" + << " [--width ]" + << " [--height ]" + << " [--seqname ]" + << " [--record ]" + << " [--no-display]" + << " [--help] [-h]" << std::endl + << std::endl; std::cout << "DESCRIPTION" << std::endl - << " --fps <6|15|30|60>" << std::endl - << " Frames per second." << std::endl - << " Default: 30." << std::endl - << std::endl - << " --width " << std::endl - << " Default: 640." << std::endl - << std::endl - << " --height " << std::endl - << " Default: 480." << std::endl - << std::endl - << " --seqname " << std::endl - << " Name of the sequence of image to create (ie: /tmp/image%04d.jpg)." << std::endl - << " Default: empty." << std::endl - << std::endl - << " --record " << std::endl - << " Allowed values for mode are:" << std::endl - << " 0: record all the captures images (continuous mode)," << std::endl - << " 1: record only images selected by a user click (single shot mode)." << std::endl - << " Default mode: 0" << std::endl - << std::endl - << " --no-display" << std::endl - << " Disable displaying captured images." << std::endl - << " When used and sequence name specified, record mode is internally set to 1 (continuous mode)." - << std::endl - << std::endl - << " --help, -h" << std::endl - << " Print this helper message." << std::endl - << std::endl; + << " --fps <6|15|30|60>" << std::endl + << " Frames per second." << std::endl + << " Default: 30." << std::endl + << std::endl + << " --width " << std::endl + << " Default: 640." << std::endl + << std::endl + << " --height " << std::endl + << " Default: 480." << std::endl + << std::endl + << " --seqname " << std::endl + << " Name of the sequence of image to create (ie: /tmp/image%04d.jpg)." << std::endl + << " Default: empty." << std::endl + << std::endl + << " --record " << std::endl + << " Allowed values for mode are:" << std::endl + << " 0: record all the captures images (continuous mode)," << std::endl + << " 1: record only images selected by a user click (single shot mode)." << std::endl + << " Default mode: 0" << std::endl + << std::endl + << " --no-display" << std::endl + << " Disable displaying captured images." << std::endl + << " When used and sequence name specified, record mode is internally set to 1 (continuous mode)." + << std::endl + << std::endl + << " --help, -h" << std::endl + << " Print this helper message." << std::endl + << std::endl; std::cout << "USAGE" << std::endl - << " Example to visualize images:" << std::endl - << " " << argv[0] << std::endl - << std::endl - << " Examples to record a sequence of successive images in 640x480 resolution:" << std::endl - << " " << argv[0] << " --seqname I%04d.png" << std::endl - << " " << argv[0] << " --seqname folder/I%04d.png --record 0" << std::endl - << std::endl - << " Examples to record single shot 640x480 images:\n" - << " " << argv[0] << " --seqname I%04d.png --record 1\n" - << " " << argv[0] << " --seqname folder/I%04d.png --record 1" << std::endl - << std::endl - << " Examples to record single shot 1280x720 images:\n" - << " " << argv[0] << " --seqname I%04d.png --record 1 --width 1280 --height 720" << std::endl - << std::endl; + << " Example to visualize images:" << std::endl + << " " << argv[0] << std::endl + << std::endl + << " Examples to record a sequence of successive images in 640x480 resolution:" << std::endl + << " " << argv[0] << " --seqname I%04d.png" << std::endl + << " " << argv[0] << " --seqname folder/I%04d.png --record 0" << std::endl + << std::endl + << " Examples to record single shot 640x480 images:\n" + << " " << argv[0] << " --seqname I%04d.png --record 1\n" + << " " << argv[0] << " --seqname folder/I%04d.png --record 1" << std::endl + << std::endl + << " Examples to record single shot 1280x720 images:\n" + << " " << argv[0] << " --seqname I%04d.png --record 1 --width 1280 --height 720" << std::endl + << std::endl; if (error) { std::cout << "Error" << std::endl - << " " - << "Unsupported parameter " << argv[error] << std::endl; + << " " + << "Unsupported parameter " << argv[error] << std::endl; } } @@ -75,7 +75,7 @@ void usage(const char *argv[], int error) */ int main(int argc, const char *argv[]) { -#if defined(VISP_HAVE_REALSENSE) || defined(VISP_HAVE_REALSENSE2) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_REALSENSE) || defined(VISP_HAVE_REALSENSE2) try { std::string opt_seqname; int opt_record_mode = 0; @@ -88,24 +88,31 @@ int main(int argc, const char *argv[]) if (std::string(argv[i]) == "--fps") { opt_fps = std::atoi(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--seqname") { + } + else if (std::string(argv[i]) == "--seqname") { opt_seqname = std::string(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--width") { + } + else if (std::string(argv[i]) == "--width") { opt_width = std::atoi(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--height") { + } + else if (std::string(argv[i]) == "--height") { opt_height = std::atoi(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--record") { + } + else if (std::string(argv[i]) == "--record") { opt_record_mode = std::atoi(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--no-display") { + } + else if (std::string(argv[i]) == "--no-display") { opt_display = false; - } else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { + } + else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { usage(argv, 0); return EXIT_SUCCESS; - } else { + } + else { usage(argv, i); return EXIT_FAILURE; } @@ -124,7 +131,7 @@ int main(int argc, const char *argv[]) std::cout << "Display : " << (opt_display ? "enabled" : "disabled") << std::endl; std::string text_record_mode = - std::string("Record mode: ") + (opt_record_mode ? std::string("single") : std::string("continuous")); + std::string("Record mode: ") + (opt_record_mode ? std::string("single") : std::string("continuous")); if (!opt_seqname.empty()) { std::cout << text_record_mode << std::endl; @@ -191,7 +198,8 @@ int main(int argc, const char *argv[]) if (d) { delete d; } - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "Catch an exception: " << e << std::endl; } #else @@ -200,8 +208,5 @@ int main(int argc, const char *argv[]) #if !(defined(VISP_HAVE_REALSENSE) || defined(VISP_HAVE_REALSENSE2)) std::cout << "Install librealsense version > 2.31.0, configure and build ViSP again to use this example" << std::endl; #endif -#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "This tutorial should be built with c++11 support" << std::endl; -#endif #endif } diff --git a/tutorial/grabber/tutorial-grabber-rgbd-D435-structurecore.cpp b/tutorial/grabber/tutorial-grabber-rgbd-D435-structurecore.cpp index 22805544c2..0108f2002a 100644 --- a/tutorial/grabber/tutorial-grabber-rgbd-D435-structurecore.cpp +++ b/tutorial/grabber/tutorial-grabber-rgbd-D435-structurecore.cpp @@ -12,8 +12,7 @@ */ int main(int argc, char **argv) { -#if defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_OCCIPITAL_STRUCTURE) && \ - (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_OCCIPITAL_STRUCTURE) // Both cameras can stream color and depth in 640x480 resolution. unsigned int width = 640, height = 480; @@ -80,8 +79,5 @@ int main(int argc, char **argv) #if !(defined(VISP_HAVE_REALSENSE2)) std::cout << "Install librealsense, configure and build ViSP again to use this example" << std::endl; #endif -#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "This tutorial should be built with c++11 support" << std::endl; -#endif #endif } diff --git a/tutorial/grabber/tutorial-grabber-structure-core.cpp b/tutorial/grabber/tutorial-grabber-structure-core.cpp index cf1f1a2100..b668684ce8 100644 --- a/tutorial/grabber/tutorial-grabber-structure-core.cpp +++ b/tutorial/grabber/tutorial-grabber-structure-core.cpp @@ -9,61 +9,61 @@ void usage(const char *argv[], int error) { std::cout << "SYNOPSIS" << std::endl - << " " << argv[0] << " [--depth-fps <6|15|30|60>]" - << " [--depth-fps <6|15|30|60>]" - << " [--sxga]" - << " [--no-frame-sync]" - << " [--record ]" - << " [--no-display]" - << " [--help] [-h]" << std::endl - << std::endl; + << " " << argv[0] << " [--depth-fps <6|15|30|60>]" + << " [--depth-fps <6|15|30|60>]" + << " [--sxga]" + << " [--no-frame-sync]" + << " [--record ]" + << " [--no-display]" + << " [--help] [-h]" << std::endl + << std::endl; std::cout << "DESCRIPTION" << std::endl - << " --visible-fps <6|15|30|60>" << std::endl - << " Visible camera (gray or color) frames per second." << std::endl - << " Default: 30." << std::endl - << std::endl - << " --depth-fps <6|15|30|60>" << std::endl - << " Depth camera frames per second." << std::endl - << " Default: 30." << std::endl - << std::endl - << " --sxga" << std::endl - << " If available, output 1280x960 high resolution depth array." << std::endl - << std::endl - << " --no-frame-sync" << std::endl - << " If available, disable frame synchronization." << std::endl - << std::endl - << " --record " << std::endl - << " Allowed values for mode are:" << std::endl - << " 0: record all the captures images (continuous mode)," << std::endl - << " 1: record only images selected by a user click (single shot mode)." << std::endl - << " Default mode: 0" << std::endl - << std::endl - << " --no-display" << std::endl - << " Disable displaying captured images." << std::endl - << " When used and sequence name specified, record mode is internally set to 1 (continuous mode)." - << std::endl - << std::endl - << " --help, -h" << std::endl - << " Print this helper message." << std::endl - << std::endl; + << " --visible-fps <6|15|30|60>" << std::endl + << " Visible camera (gray or color) frames per second." << std::endl + << " Default: 30." << std::endl + << std::endl + << " --depth-fps <6|15|30|60>" << std::endl + << " Depth camera frames per second." << std::endl + << " Default: 30." << std::endl + << std::endl + << " --sxga" << std::endl + << " If available, output 1280x960 high resolution depth array." << std::endl + << std::endl + << " --no-frame-sync" << std::endl + << " If available, disable frame synchronization." << std::endl + << std::endl + << " --record " << std::endl + << " Allowed values for mode are:" << std::endl + << " 0: record all the captures images (continuous mode)," << std::endl + << " 1: record only images selected by a user click (single shot mode)." << std::endl + << " Default mode: 0" << std::endl + << std::endl + << " --no-display" << std::endl + << " Disable displaying captured images." << std::endl + << " When used and sequence name specified, record mode is internally set to 1 (continuous mode)." + << std::endl + << std::endl + << " --help, -h" << std::endl + << " Print this helper message." << std::endl + << std::endl; std::cout << "USAGE" << std::endl - << " Example to visualize images:" << std::endl - << " " << argv[0] << std::endl - << std::endl - << " Example to record a sequence of images:" << std::endl - << " " << argv[0] << " --record 0" << std::endl - << std::endl - << " Example to record a sequence of images at different frame rates:" << std::endl - << " " << argv[0] << " --record 0 --depth-fps 15 --visible-fps 10 --no-frame-sync" << std::endl - << std::endl - << " Example to record single shot images:\n" - << " " << argv[0] << " --record 1" << std::endl - << std::endl; + << " Example to visualize images:" << std::endl + << " " << argv[0] << std::endl + << std::endl + << " Example to record a sequence of images:" << std::endl + << " " << argv[0] << " --record 0" << std::endl + << std::endl + << " Example to record a sequence of images at different frame rates:" << std::endl + << " " << argv[0] << " --record 0 --depth-fps 15 --visible-fps 10 --no-frame-sync" << std::endl + << std::endl + << " Example to record single shot images:\n" + << " " << argv[0] << " --record 1" << std::endl + << std::endl; if (error) { std::cout << "Error" << std::endl - << " " - << "Unsupported parameter " << argv[error] << std::endl; + << " " + << "Unsupported parameter " << argv[error] << std::endl; } } @@ -72,7 +72,7 @@ void usage(const char *argv[], int error) */ int main(int argc, const char *argv[]) { -#if defined(VISP_HAVE_OCCIPITAL_STRUCTURE) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_OCCIPITAL_STRUCTURE) try { std::string opt_seqname_visible = "visible-%04d.png", opt_seqname_depth = "depth-%04d.png"; int opt_record_mode = 0; @@ -85,22 +85,29 @@ int main(int argc, const char *argv[]) if (std::string(argv[i]) == "--depth-fps") { opt_depth_fps = std::atoi(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--visible-fps") { + } + else if (std::string(argv[i]) == "--visible-fps") { opt_visible_fps = std::atoi(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--sxga") { + } + else if (std::string(argv[i]) == "--sxga") { opt_sxga = true; - } else if (std::string(argv[i]) == "--no-frame-sync") { + } + else if (std::string(argv[i]) == "--no-frame-sync") { opt_frame_sync = false; - } else if (std::string(argv[i]) == "--record") { + } + else if (std::string(argv[i]) == "--record") { opt_record_mode = std::atoi(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--no-display") { + } + else if (std::string(argv[i]) == "--no-display") { opt_display = false; - } else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { + } + else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { usage(argv, 0); return EXIT_SUCCESS; - } else { + } + else { usage(argv, i); return EXIT_FAILURE; } @@ -115,7 +122,7 @@ int main(int argc, const char *argv[]) std::cout << "Display : " << (opt_display ? "enabled" : "disabled") << std::endl; std::string text_record_mode = - std::string("Record mode: ") + (opt_record_mode ? std::string("single") : std::string("continuous")); + std::string("Record mode: ") + (opt_record_mode ? std::string("single") : std::string("continuous")); std::cout << text_record_mode << std::endl; std::cout << "Visible record name: " << opt_seqname_visible << std::endl; @@ -213,7 +220,8 @@ int main(int argc, const char *argv[]) delete display_depth; } } - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "Catch an exception: " << e << std::endl; } #else @@ -222,8 +230,5 @@ int main(int argc, const char *argv[]) #if !(defined(VISP_HAVE_OCCIPITAL_STRUCTURE)) std::cout << "Install libStructure, configure and build ViSP again to use this example" << std::endl; #endif -#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "This tutorial should be built with c++11 support" << std::endl; -#endif #endif } diff --git a/tutorial/grabber/tutorial-grabber-v4l2.cpp b/tutorial/grabber/tutorial-grabber-v4l2.cpp index 3cf839d98a..245f04a2fb 100644 --- a/tutorial/grabber/tutorial-grabber-v4l2.cpp +++ b/tutorial/grabber/tutorial-grabber-v4l2.cpp @@ -10,61 +10,61 @@ void usage(const char *argv[], int error) { std::cout << "SYNOPSIS" << std::endl - << " " << argv[0] << " [--device ]" - << " [--scale ]" - << " [--seqname ]" - << " [--record ]" - << " [--no-display]" - << " [--help] [-h]" << std::endl - << std::endl; + << " " << argv[0] << " [--device ]" + << " [--scale ]" + << " [--seqname ]" + << " [--record ]" + << " [--no-display]" + << " [--help] [-h]" << std::endl + << std::endl; std::cout << "DESCRIPTION" << std::endl - << " --device " << std::endl - << " Camera device index. Set 0 to dial with the first camera," << std::endl - << " and 1 to dial with the second camera attached to the computer." << std::endl - << " Default: 0 to consider /dev/video0 device." << std::endl - << std::endl - << " --scale " << std::endl - << " Subsampling factor applied to the images captured by the device." << std::endl - << " Default: 1" << std::endl - << std::endl - << " --seqname " << std::endl - << " Name of the sequence of image to create (ie: /tmp/image%04d.jpg)." << std::endl - << " Default: empty." << std::endl - << std::endl - << " --record " << std::endl - << " Allowed values for mode are:" << std::endl - << " 0: record all the captures images (continuous mode)," << std::endl - << " 1: record only images selected by a user click (single shot mode)." << std::endl - << " Default mode: 0" << std::endl - << std::endl - << " --no-display" << std::endl - << " Disable displaying captured images." << std::endl - << " When used and sequence name specified, record mode is internally set to 1 (continuous mode)." - << std::endl - << std::endl - << " --help, -h" << std::endl - << " Print this helper message." << std::endl - << std::endl; + << " --device " << std::endl + << " Camera device index. Set 0 to dial with the first camera," << std::endl + << " and 1 to dial with the second camera attached to the computer." << std::endl + << " Default: 0 to consider /dev/video0 device." << std::endl + << std::endl + << " --scale " << std::endl + << " Subsampling factor applied to the images captured by the device." << std::endl + << " Default: 1" << std::endl + << std::endl + << " --seqname " << std::endl + << " Name of the sequence of image to create (ie: /tmp/image%04d.jpg)." << std::endl + << " Default: empty." << std::endl + << std::endl + << " --record " << std::endl + << " Allowed values for mode are:" << std::endl + << " 0: record all the captures images (continuous mode)," << std::endl + << " 1: record only images selected by a user click (single shot mode)." << std::endl + << " Default mode: 0" << std::endl + << std::endl + << " --no-display" << std::endl + << " Disable displaying captured images." << std::endl + << " When used and sequence name specified, record mode is internally set to 1 (continuous mode)." + << std::endl + << std::endl + << " --help, -h" << std::endl + << " Print this helper message." << std::endl + << std::endl; std::cout << "USAGE" << std::endl - << " Example to visualize images:" << std::endl - << " " << argv[0] << std::endl - << std::endl - << " Example to visualize images from a second camera:" << std::endl - << " " << argv[0] << " --device 1" << std::endl - << std::endl - << " Examples to record a sequence:" << std::endl - << " " << argv[0] << " --seqname I%04d.png" << std::endl - << " " << argv[0] << " --seqname folder/I%04d.png --record 0" << std::endl - << std::endl - << " Examples to record single shot images:\n" - << " " << argv[0] << " --seqname I%04d.png --record 1\n" - << " " << argv[0] << " --seqname folder/I%04d.png --record 1" << std::endl - << std::endl; + << " Example to visualize images:" << std::endl + << " " << argv[0] << std::endl + << std::endl + << " Example to visualize images from a second camera:" << std::endl + << " " << argv[0] << " --device 1" << std::endl + << std::endl + << " Examples to record a sequence:" << std::endl + << " " << argv[0] << " --seqname I%04d.png" << std::endl + << " " << argv[0] << " --seqname folder/I%04d.png --record 0" << std::endl + << std::endl + << " Examples to record single shot images:\n" + << " " << argv[0] << " --seqname I%04d.png --record 1\n" + << " " << argv[0] << " --seqname folder/I%04d.png --record 1" << std::endl + << std::endl; if (error) { std::cout << "Error" << std::endl - << " " - << "Unsupported parameter " << argv[error] << std::endl; + << " " + << "Unsupported parameter " << argv[error] << std::endl; } } @@ -76,7 +76,7 @@ void usage(const char *argv[], int error) */ int main(int argc, const char *argv[]) { -#if defined(VISP_HAVE_V4L2) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_V4L2) try { int opt_device = 0; unsigned int opt_scale = 1; // Default value is 2 in the constructor. Turn @@ -89,21 +89,27 @@ int main(int argc, const char *argv[]) if (std::string(argv[i]) == "--device") { opt_device = std::atoi(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--scale") { + } + else if (std::string(argv[i]) == "--scale") { opt_scale = (unsigned int)atoi(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--seqname") { + } + else if (std::string(argv[i]) == "--seqname") { opt_seqname = std::string(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--record") { + } + else if (std::string(argv[i]) == "--record") { opt_record_mode = std::atoi(argv[i + 1]); i++; - } else if (std::string(argv[i]) == "--no-display") { + } + else if (std::string(argv[i]) == "--no-display") { opt_display = false; - } else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { + } + else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { usage(argv, 0); return EXIT_SUCCESS; - } else { + } + else { usage(argv, i); return EXIT_FAILURE; } @@ -117,7 +123,7 @@ int main(int argc, const char *argv[]) std::cout << "Display : " << (opt_display ? "enabled" : "disabled") << std::endl; std::string text_record_mode = - std::string("Record mode: ") + (opt_record_mode ? std::string("single") : std::string("continuous")); + std::string("Record mode: ") + (opt_record_mode ? std::string("single") : std::string("continuous")); if (!opt_seqname.empty()) { std::cout << text_record_mode << std::endl; @@ -184,7 +190,8 @@ int main(int argc, const char *argv[]) if (d) { delete d; } - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "Catch an exception: " << e << std::endl; } #else @@ -193,8 +200,6 @@ int main(int argc, const char *argv[]) #ifndef VISP_HAVE_V4L2 std::cout << "Install Video 4 Linux 2 (v4l2), configure and build ViSP again to use this example" << std::endl; #endif -#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) - std::cout << "This tutorial should be built with c++11 support" << std::endl; -#endif + #endif } diff --git a/tutorial/gui/pcl-visualizer/ClassUsingPclViewer.cpp b/tutorial/gui/pcl-visualizer/ClassUsingPclViewer.cpp index c7837498dd..5d1749565b 100644 --- a/tutorial/gui/pcl-visualizer/ClassUsingPclViewer.cpp +++ b/tutorial/gui/pcl-visualizer/ClassUsingPclViewer.cpp @@ -1,7 +1,7 @@ //! \example ClassUsingPclViewer.cpp #include "ClassUsingPclViewer.h" -#if defined(VISP_HAVE_PCL) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_PCL) // PCL #include diff --git a/tutorial/gui/pcl-visualizer/ClassUsingPclViewer.h b/tutorial/gui/pcl-visualizer/ClassUsingPclViewer.h index cbfa0543d9..45a541348e 100644 --- a/tutorial/gui/pcl-visualizer/ClassUsingPclViewer.h +++ b/tutorial/gui/pcl-visualizer/ClassUsingPclViewer.h @@ -4,7 +4,7 @@ //! \example ClassUsingPclViewer.h #include -#if defined(VISP_HAVE_PCL) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_PCL) #include #include diff --git a/tutorial/gui/pcl-visualizer/tutorial-pcl-viewer.cpp b/tutorial/gui/pcl-visualizer/tutorial-pcl-viewer.cpp index 0c016f1561..c2ddc12c67 100644 --- a/tutorial/gui/pcl-visualizer/tutorial-pcl-viewer.cpp +++ b/tutorial/gui/pcl-visualizer/tutorial-pcl-viewer.cpp @@ -4,7 +4,7 @@ // System include #include -#if defined(VISP_HAVE_PCL) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) +#if defined(VISP_HAVE_PCL) // ViSP include #include @@ -91,7 +91,7 @@ std::string getAvailableDisplayMode(const std::string &prefix = "< ", const std: } //! [Enum for mode choice] -int main(int argc, char *argv []) +int main(int argc, char *argv[]) { //! [Default arguments values] const double def_addedNoise = 0.; // Standard deviation of the noise added to the points. diff --git a/tutorial/image/drawingHelpers.h b/tutorial/image/drawingHelpers.h index ad3b4e57aa..e612b20237 100644 --- a/tutorial/image/drawingHelpers.h +++ b/tutorial/image/drawingHelpers.h @@ -37,67 +37,67 @@ namespace drawingHelpers { - #if defined(VISP_HAVE_X11) - extern vpDisplayX d; - #elif defined(HAVE_OPENCV_HIGHGUI) - extern vpDisplayOpenCV d; - #elif defined(VISP_HAVE_GTK) - extern vpDisplayGTK d; - #elif defined(VISP_HAVE_GDI) - extern vpDisplayGDI d; - #elif defined(VISP_HAVE_D3D9) - extern vpDisplayD3D d; - #endif +#if defined(VISP_HAVE_X11) +extern vpDisplayX d; +#elif defined(HAVE_OPENCV_HIGHGUI) +extern vpDisplayOpenCV d; +#elif defined(VISP_HAVE_GTK) +extern vpDisplayGTK d; +#elif defined(VISP_HAVE_GDI) +extern vpDisplayGDI d; +#elif defined(VISP_HAVE_D3D9) +extern vpDisplayD3D d; +#endif - extern vpImage I_disp; /*!< Displayed image.*/ +extern vpImage I_disp; /*!< Displayed image.*/ - /** - * \brief Display a RGB image and catch the user clicks to know if - * the user wants to stop the program. - * - * \param[out] I The RGB image to display. - * \param[in] title The title of the window. - * \param[in] blockingMode If true, wait for a click to switch to the next image. - * \return true The user wants to continue the application. - * \return false The user wants to stop the application. - */ - bool display(vpImage &I, const std::string &title, const bool &blockingMode); +/** + * \brief Display a RGB image and catch the user clicks to know if + * the user wants to stop the program. + * + * \param[out] I The RGB image to display. + * \param[in] title The title of the window. + * \param[in] blockingMode If true, wait for a click to switch to the next image. + * \return true The user wants to continue the application. + * \return false The user wants to stop the application. + */ +bool display(vpImage &I, const std::string &title, const bool &blockingMode); - /** - * \brief Display a gray-scale image and catch the user clicks to know if - * the user wants to stop the program. - * - * \param[out] I The gray-scale image to display. - * \param[in] title The title of the window. - * \param[in] blockingMode If true, wait for a click to switch to the next image. - * \return true The user wants to continue the application. - * \return false The user wants to stop the application. - */ - bool display(vpImage &I, const std::string &title, const bool &blockingMode); +/** + * \brief Display a gray-scale image and catch the user clicks to know if + * the user wants to stop the program. + * + * \param[out] I The gray-scale image to display. + * \param[in] title The title of the window. + * \param[in] blockingMode If true, wait for a click to switch to the next image. + * \return true The user wants to continue the application. + * \return false The user wants to stop the application. + */ +bool display(vpImage &I, const std::string &title, const bool &blockingMode); - /** - * \brief Display a double precision image and catch the user clicks to know if - * the user wants to stop the program. - * - * \param[out] I The double precision image to display. - * \param[in] title The title of the window. - * \param[in] blockingMode If true, wait for a click to switch to the next image. - * \return true The user wants to continue the application. - * \return false The user wants to stop the application. - */ - bool display(vpImage &D, const std::string &title, const bool &blockingMode); +/** + * \brief Display a double precision image and catch the user clicks to know if + * the user wants to stop the program. + * + * \param[out] D The double precision image to display. + * \param[in] title The title of the window. + * \param[in] blockingMode If true, wait for a click to switch to the next image. + * \return true The user wants to continue the application. + * \return false The user wants to stop the application. + */ +bool display(vpImage &D, const std::string &title, const bool &blockingMode); - /** - * \brief Display a floating-point precision image and catch the user clicks to know if - * the user wants to stop the program. - * - * \param[out] I The floating-point precision image to display. - * \param[in] title The title of the window. - * \param[in] blockingMode If true, wait for a click to switch to the next image. - * \return true The user wants to continue the application. - * \return false The user wants to stop the application. - */ - bool display(vpImage &F, const std::string &title, const bool &blockingMode); +/** + * \brief Display a floating-point precision image and catch the user clicks to know if + * the user wants to stop the program. + * + * \param[out] F The floating-point precision image to display. + * \param[in] title The title of the window. + * \param[in] blockingMode If true, wait for a click to switch to the next image. + * \return true The user wants to continue the application. + * \return false The user wants to stop the application. + */ +bool display(vpImage &F, const std::string &title, const bool &blockingMode); } #endif diff --git a/tutorial/image/tutorial-image-colormap.cpp b/tutorial/image/tutorial-image-colormap.cpp index d281bd793d..b4bc0ff063 100644 --- a/tutorial/image/tutorial-image-colormap.cpp +++ b/tutorial/image/tutorial-image-colormap.cpp @@ -10,7 +10,6 @@ int main() { -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) try { std::map colormaps_str = { {vpColormap::COLORMAP_AUTUMN, "Colormap Autumn"}, @@ -59,16 +58,16 @@ int main() vpImage Icolor(Irgbf.getHeight(), Irgbf.getWidth()); vpImage Icolor2(Irgbf.getHeight() * 2, Irgbf.getWidth() * 2); - #if defined(VISP_HAVE_X11) +#if defined(VISP_HAVE_X11) vpDisplayX d(Icolor2, 10, 10, "Memorial"); - #elif defined(VISP_HAVE_GDI) +#elif defined(VISP_HAVE_GDI) vpDisplayGDI d(Icolor2, 10, 10, "Memorial"); - #elif defined(HAVE_OPENCV_HIGHGUI) +#elif defined(HAVE_OPENCV_HIGHGUI) vpDisplayOpenCV d(Icolor2, 10, 10, "Memorial"); - #else +#else std::cout << "No image viewer is available..." << std::endl; return EXIT_SUCCESS; - #endif +#endif vpFont font(20); for (size_t i = 0; i < colormaps.size(); i++) { @@ -92,16 +91,16 @@ int main() vpImage Icolor(I.getHeight(), I.getWidth()); vpImage Icolor2(I.getHeight() * 2, I.getWidth() * 2); - #if defined(VISP_HAVE_X11) +#if defined(VISP_HAVE_X11) vpDisplayX d(Icolor2, 10, 10, "Monkey"); - #elif defined(VISP_HAVE_GDI) +#elif defined(VISP_HAVE_GDI) vpDisplayGDI d(Icolor2, 10, 10, "Monkey"); - #elif defined(HAVE_OPENCV_HIGHGUI) +#elif defined(HAVE_OPENCV_HIGHGUI) vpDisplayOpenCV d(Icolor2, 10, 10, "Monkey"); - #else +#else std::cout << "No image viewer is available..." << std::endl; return EXIT_SUCCESS; - #endif +#endif vpFont font(20); for (size_t i = 0; i < colormaps.size(); i++) { @@ -125,16 +124,16 @@ int main() vpImage Icolor(I.getHeight(), I.getWidth()); vpImage Icolor2(I.getHeight() * 2, I.getWidth() * 2); - #if defined(VISP_HAVE_X11) +#if defined(VISP_HAVE_X11) vpDisplayX d(Icolor2, 10, 10, "Monkey"); - #elif defined(VISP_HAVE_GDI) +#elif defined(VISP_HAVE_GDI) vpDisplayGDI d(Icolor2, 10, 10, "Monkey"); - #elif defined(HAVE_OPENCV_HIGHGUI) +#elif defined(HAVE_OPENCV_HIGHGUI) vpDisplayOpenCV d(Icolor2, 10, 10, "Monkey"); - #else +#else std::cout << "No image viewer is available..." << std::endl; return EXIT_SUCCESS; - #endif +#endif vpFont font(20); for (size_t i = 0; i < colormaps.size(); i++) { @@ -150,11 +149,10 @@ int main() vpDisplay::getClick(Icolor2); } } - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cerr << "Catch an exception: " << e << std::endl; } -#else - std::cout << "This tutorial needs at least cxx11 standard." << std::endl; -#endif + return EXIT_SUCCESS; } diff --git a/tutorial/imgproc/hough-transform/tutorial-circle-hough.cpp b/tutorial/imgproc/hough-transform/tutorial-circle-hough.cpp index 105c71a613..d9e4b13c2b 100644 --- a/tutorial/imgproc/hough-transform/tutorial-circle-hough.cpp +++ b/tutorial/imgproc/hough-transform/tutorial-circle-hough.cpp @@ -15,8 +15,6 @@ #include #include -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - #include "drawingHelpers.h" //! [Enum input] @@ -535,10 +533,3 @@ int main(int argc, char **argv) } return EXIT_SUCCESS; } -#else -int main() -{ - std::cout << "This tutorial needs to be build at least with cxx 11 standard!" << std::endl; - return EXIT_SUCCESS; -} -#endif From 370a1318a165cba9d5c80abd18a01156314116b0 Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Sat, 28 Oct 2023 17:26:03 +0200 Subject: [PATCH 35/43] Use nullptr (c++11 standard) instead of NULL --- .../visp-compute-chessboard-poses.cpp | 2 +- demo/wireframe-simulator/servoSimu4Points.cpp | 8 +- .../wireframe-simulator/servoSimuCylinder.cpp | 8 +- demo/wireframe-simulator/servoSimuSphere.cpp | 8 +- .../simulateCircle2DCamVelocity.cpp | 25 +- ...mulateFourPoints2DCartesianCamVelocity.cpp | 25 +- .../simulateFourPoints2DPolarCamVelocity.cpp | 25 +- example/device/display/displayD3D.cpp | 8 +- example/device/display/displayGDI.cpp | 8 +- example/device/display/displayGTK.cpp | 8 +- example/device/display/displayOpenCV.cpp | 8 +- example/device/display/displaySequence.cpp | 43 ++-- example/device/display/displayX.cpp | 8 +- example/device/display/displayXMulti.cpp | 8 +- example/device/framegrabber/grab1394CMU.cpp | 9 +- example/device/framegrabber/grab1394Two.cpp | 59 +++-- .../device/framegrabber/grabDirectShow.cpp | 4 +- .../framegrabber/grabDirectShowMulti.cpp | 4 +- example/device/framegrabber/grabDisk.cpp | 6 +- .../device/framegrabber/grabFlyCapture.cpp | 6 +- example/device/framegrabber/grabRealSense.cpp | 4 +- .../device/framegrabber/grabRealSense2.cpp | 4 +- example/device/framegrabber/grabV4l2.cpp | 4 +- .../framegrabber/grabV4l2MultiCpp11Thread.cpp | 14 +- .../device/framegrabber/readRealSenseData.cpp | 4 +- .../device/framegrabber/saveRealSenseData.cpp | 20 +- .../device/laserscanner/SickLDMRS-Process.cpp | 26 +- example/device/light/ringLight.cpp | 4 +- .../photometricVisualServoing.cpp | 6 +- ...hotometricVisualServoingWithoutVpServo.cpp | 6 +- example/homography/homographyHLM2DObject.cpp | 4 +- example/homography/homographyHLM3DObject.cpp | 4 +- .../homographyHartleyDLT2DObject.cpp | 4 +- .../homography/homographyRansac2DObject.cpp | 4 +- example/image/imageDiskRW.cpp | 8 +- .../manual/ogre/HelloWorldOgreAdvanced.cpp | 2 +- example/manual/simulation/manSimu4Dots.cpp | 2 +- example/manual/simulation/manSimu4Points.cpp | 2 +- example/math/BSpline.cpp | 12 +- example/math/quadprog.cpp | 2 +- example/math/quadprog_eq.cpp | 2 +- example/moments/image/servoMomentImage.cpp | 2 +- example/moments/points/servoMomentPoints.cpp | 4 +- .../moments/polygon/servoMomentPolygon.cpp | 4 +- example/ogre-simulator/AROgre.cpp | 12 +- example/ogre-simulator/AROgreBasic.cpp | 6 +- example/parse-argv/parse-argv1.cpp | 4 +- example/parse-argv/parse-argv2.cpp | 18 +- example/pose-estimation/poseVirtualVS.cpp | 6 +- .../servoSimuAfma6FourPoints2DCamVelocity.cpp | 4 +- .../camera/servoSimu3D_cMcd_CamVelocity.cpp | 4 +- ...oSimu3D_cMcd_CamVelocityWithoutVpServo.cpp | 4 +- .../camera/servoSimu3D_cdMc_CamVelocity.cpp | 4 +- ...oSimu3D_cdMc_CamVelocityWithoutVpServo.cpp | 4 +- .../camera/servoSimuCircle2DCamVelocity.cpp | 4 +- .../servoSimuCircle2DCamVelocityDisplay.cpp | 4 +- .../servoSimuCylinder2DCamVelocityDisplay.cpp | 4 +- ...inder2DCamVelocityDisplaySecondaryTask.cpp | 4 +- .../servoSimuFourPoints2DCamVelocity.cpp | 4 +- ...ervoSimuFourPoints2DCamVelocityDisplay.cpp | 4 +- ...imuFourPoints2DPolarCamVelocityDisplay.cpp | 4 +- .../servoSimuLine2DCamVelocityDisplay.cpp | 4 +- .../camera/servoSimuPoint2DCamVelocity1.cpp | 4 +- .../camera/servoSimuPoint2DCamVelocity2.cpp | 4 +- .../camera/servoSimuPoint2DCamVelocity3.cpp | 4 +- .../servoSimuPoint2DhalfCamVelocity1.cpp | 4 +- .../servoSimuPoint2DhalfCamVelocity2.cpp | 4 +- .../servoSimuPoint2DhalfCamVelocity3.cpp | 4 +- .../camera/servoSimuPoint3DCamVelocity.cpp | 4 +- .../camera/servoSimuSphere2DCamVelocity.cpp | 4 +- .../servoSimuSphere2DCamVelocityDisplay.cpp | 4 +- ...phere2DCamVelocityDisplaySecondaryTask.cpp | 4 +- ...ervoSimuSquareLine2DCamVelocityDisplay.cpp | 4 +- .../camera/servoSimuThetaUCamVelocity.cpp | 4 +- ...rvoSimuViper850FourPoints2DCamVelocity.cpp | 4 +- example/servo-afma4/moveAfma4.cpp | 4 +- .../servoAfma4Point2DCamVelocityKalman.cpp | 4 +- example/servo-biclops/moveBiclops.cpp | 4 +- .../servoBiclopsPoint2DArtVelocity.cpp | 4 +- example/servo-pioneer/sonarPioneerReader.cpp | 8 +- .../servoViper850Point2DCamVelocityKalman.cpp | 2 +- example/tools/histogram.cpp | 8 +- example/tools/parallelPort.cpp | 4 +- example/tracking/mbtEdgeKltTracking.cpp | 6 +- example/tracking/mbtEdgeTracking.cpp | 6 +- example/tracking/mbtGenericTracking.cpp | 8 +- example/tracking/mbtGenericTracking2.cpp | 8 +- example/tracking/mbtGenericTrackingDepth.cpp | 8 +- .../tracking/mbtGenericTrackingDepthOnly.cpp | 8 +- example/tracking/mbtKltTracking.cpp | 6 +- example/tracking/templateTracker.cpp | 16 +- example/tracking/trackDot.cpp | 6 +- example/tracking/trackDot2.cpp | 6 +- .../tracking/trackDot2WithAutoDetection.cpp | 6 +- example/tracking/trackKltOpencv.cpp | 6 +- example/tracking/trackMeCircle.cpp | 6 +- example/tracking/trackMeEllipse.cpp | 10 +- example/tracking/trackMeLine.cpp | 6 +- example/tracking/trackMeNurbs.cpp | 6 +- example/video/imageSequenceReader.cpp | 6 +- example/video/videoReader.cpp | 6 +- .../wireframeSimulator.cpp | 4 +- modules/ar/include/visp3/ar/vpAROgre.h | 2 +- modules/ar/include/visp3/ar/vpSimulator.h | 2 +- modules/ar/src/coin-simulator/vpAR.cpp | 4 +- modules/ar/src/coin-simulator/vpSimulator.cpp | 88 +++---- modules/ar/src/coin-simulator/vpViewer.cpp | 8 +- modules/ar/src/ogre-simulator/vpAROgre.cpp | 4 +- modules/core/include/visp3/core/vpArray2D.h | 56 ++--- modules/core/include/visp3/core/vpColVector.h | 8 +- modules/core/include/visp3/core/vpDebug.h | 6 +- modules/core/include/visp3/core/vpException.h | 2 +- modules/core/include/visp3/core/vpImage.h | 92 ++++---- .../core/include/visp3/core/vpImageConvert.h | 2 +- .../core/include/visp3/core/vpImageTools.h | 6 +- modules/core/include/visp3/core/vpList.h | 18 +- modules/core/include/visp3/core/vpMatrix.h | 12 +- modules/core/include/visp3/core/vpMoment.h | 2 +- modules/core/include/visp3/core/vpMutex.h | 10 +- modules/core/include/visp3/core/vpNetwork.h | 4 +- modules/core/include/visp3/core/vpRansac.h | 10 +- modules/core/include/visp3/core/vpRowVector.h | 8 +- modules/core/include/visp3/core/vpThread.h | 10 +- modules/core/include/visp3/core/vpXmlParser.h | 2 +- .../src/camera/vpMeterPixelConversion.cpp | 4 +- .../src/camera/vpPixelMeterConversion.cpp | 6 +- modules/core/src/display/vpDisplay.cpp | 4 +- modules/core/src/display/vpDisplay_impl.h | 88 +++---- modules/core/src/image/private/stb_truetype.h | 80 +++---- modules/core/src/image/vpGaussianFilter.cpp | 4 +- modules/core/src/image/vpImageConvert.cpp | 28 +-- modules/core/src/math/matrix/vpColVector.cpp | 26 +- modules/core/src/math/matrix/vpMatrix.cpp | 78 +++--- modules/core/src/math/matrix/vpRowVector.cpp | 16 +- .../core/src/math/matrix/vpSubColVector.cpp | 8 +- modules/core/src/math/matrix/vpSubMatrix.cpp | 6 +- .../core/src/math/matrix/vpSubRowVector.cpp | 8 +- .../cpu-features/x86/cpu_x86_Windows.ipp | 2 +- modules/core/src/tools/file/vpIoTools.cpp | 6 +- .../core/src/tools/geometry/vpPolygon3D.cpp | 12 +- .../core/src/tools/histogram/vpHistogram.cpp | 34 +-- modules/core/src/tools/network/vpClient.cpp | 2 +- modules/core/src/tools/network/vpNetwork.cpp | 6 +- modules/core/src/tools/network/vpServer.cpp | 2 +- .../core/src/tools/network/vpUDPClient.cpp | 10 +- .../core/src/tools/network/vpUDPServer.cpp | 16 +- modules/core/src/tools/serial/vpSerial.cpp | 2 +- modules/core/src/tools/xml/vpXmlParser.cpp | 42 ++-- .../core/src/tracking/moments/vpMoment.cpp | 2 +- .../image-with-dataset/testConversion.cpp | 22 +- .../core/test/image-with-dataset/testCrop.cpp | 8 +- .../image-with-dataset/testCropAdvanced.cpp | 8 +- .../testImageComparison.cpp | 6 +- .../image-with-dataset/testImageFilter.cpp | 4 +- .../testImageNormalizedCorrelation.cpp | 10 +- .../testImageTemplateMatching.cpp | 8 +- .../test/image-with-dataset/testIoPGM.cpp | 8 +- .../test/image-with-dataset/testIoPPM.cpp | 8 +- .../image-with-dataset/testPerformanceLUT.cpp | 8 +- .../test/image-with-dataset/testReadImage.cpp | 4 +- .../image-with-dataset/testUndistortImage.cpp | 8 +- .../core/test/math/testMatrixDeterminant.cpp | 4 +- modules/core/test/math/testMatrixInverse.cpp | 4 +- .../test/math/testMatrixPseudoInverse.cpp | 4 +- modules/core/test/math/testRobust.cpp | 8 +- modules/core/test/math/testSvd.cpp | 4 +- .../core/test/tools/geometry/testPolygon.cpp | 4 +- .../histogram-with-dataset/testHistogram.cpp | 6 +- .../core/test/tools/threading/testThread2.cpp | 2 +- modules/core/test/tools/xml/testXmlParser.cpp | 8 +- .../visp3/detection/vpDetectorAprilTag.h | 6 +- .../src/barcode/vpDetectorDataMatrixCode.cpp | 10 +- .../src/barcode/vpDetectorQRCode.cpp | 2 +- .../detection/src/tag/vpDetectorAprilTag.cpp | 22 +- .../gui/include/visp3/gui/vpDisplayOpenCV.h | 4 +- .../gui/include/visp3/gui/vpDisplayWin32.h | 2 +- modules/gui/include/visp3/gui/vpDisplayX.h | 4 +- modules/gui/include/visp3/gui/vpPlot.h | 2 +- modules/gui/include/visp3/gui/vpPlotGraph.h | 2 +- modules/gui/include/visp3/gui/vpWin32Window.h | 2 +- modules/gui/src/display/vpDisplayGTK.cpp | 34 +-- modules/gui/src/display/vpDisplayOpenCV.cpp | 40 ++-- modules/gui/src/display/vpDisplayX.cpp | 30 +-- .../gui/src/display/windows/vpD3DRenderer.cpp | 56 ++--- .../src/display/windows/vpDisplayWin32.cpp | 4 +- .../gui/src/display/windows/vpGDIRenderer.cpp | 30 +-- .../gui/src/display/windows/vpWin32API.cpp | 10 +- .../gui/src/display/windows/vpWin32Window.cpp | 50 ++-- modules/gui/src/plot/vpPlot.cpp | 12 +- modules/gui/src/plot/vpPlotGraph.cpp | 6 +- .../test/display-with-dataset/testClick.cpp | 8 +- .../testDisplayScaled.cpp | 4 +- .../display-with-dataset/testMouseEvent.cpp | 8 +- .../display-with-dataset/testVideoDevice.cpp | 8 +- .../test/display/testDisplayPolygonLines.cpp | 4 +- modules/gui/test/display/testDisplayRoi.cpp | 4 +- modules/gui/test/display/testDisplays.cpp | 4 +- .../gui/test/display/testVideoDeviceDual.cpp | 6 +- .../include/visp3/imgproc/vpContours.h | 24 +- modules/imgproc/src/vpContours.cpp | 12 +- .../test/with-dataset/testAutoThreshold.cpp | 8 +- .../with-dataset/testConnectedComponents.cpp | 8 +- .../test/with-dataset/testContours.cpp | 8 +- .../test/with-dataset/testFloodFill.cpp | 8 +- .../imgproc/test/with-dataset/testImgproc.cpp | 8 +- modules/io/include/visp3/io/vpImageQueue.h | 8 +- modules/io/include/visp3/io/vpParseArgv.h | 14 +- .../io/src/image/private/vpImageIoLibjpeg.cpp | 8 +- .../io/src/image/private/vpImageIoLibpng.cpp | 44 ++-- .../src/image/private/vpImageIoPortable.cpp | 12 +- modules/io/src/image/private/vpImageIoStb.cpp | 4 +- .../io/src/image/private/vpImageIoTinyEXR.cpp | 8 +- modules/io/src/tools/vpKeyboard.cpp | 2 +- modules/io/src/tools/vpParseArgv.cpp | 36 +-- modules/io/src/video/vpVideoReader.cpp | 16 +- modules/java/generator/gen2.py | 42 ++-- modules/java/generator/gen_java.py | 4 +- modules/java/generator/hdr_parser.py | 2 +- .../java/generator/src/cpp/VpImageRGBa.cpp | 12 +- .../java/generator/src/cpp/VpImageUChar.cpp | 12 +- .../java/generator/src/cpp/listconverters.cpp | 16 +- modules/java/misc/core/gen_dict.json | 6 +- .../misc/core/src/java/core+VpImageRGBa.java | 2 +- .../misc/core/src/java/core+VpImageUChar.java | 2 +- modules/java/misc/detection/gen_dict.json | 8 +- .../imgproc/src/java/imgproc+VpContour.java | 2 +- modules/java/misc/mbt/gen_dict.json | 42 ++-- .../visp3/robot/vpRobotWireFrameSimulator.h | 2 +- .../include/visp3/robot/vpSimulatorAfma6.h | 2 +- .../src/haptic-device/virtuose/vpVirtuose.cpp | 8 +- .../src/image-simulator/vpImageSimulator.cpp | 8 +- .../src/real-robot/afma4/vpRobotAfma4.cpp | 73 +++--- .../src/real-robot/afma4/vpServolens.cpp | 2 +- .../robot/src/real-robot/afma6/vpAfma6.cpp | 4 +- .../src/real-robot/afma6/vpRobotAfma6.cpp | 18 +- .../src/real-robot/bebop2/vpRobotBebop2.cpp | 172 +++++++------- .../private/vpRobotBiclopsController_impl.cpp | 2 +- .../private/vpRobotBiclopsController_impl.h | 4 +- .../src/real-robot/biclops/vpRobotBiclops.cpp | 2 +- .../real-robot/flir-ptu/vpRobotFlirPtu.cpp | 24 +- .../src/real-robot/franka/vpRobotFranka.cpp | 16 +- .../src/real-robot/kinova/vpRobotKinova.cpp | 10 +- .../src/real-robot/mavsdk/vpRobotMavsdk.cpp | 2 +- .../vpRobotUniversalRobots.cpp | 2 +- .../src/real-robot/viper/vpRobotViper650.cpp | 16 +- .../src/real-robot/viper/vpRobotViper850.cpp | 16 +- .../robot/src/real-robot/viper/vpViper650.cpp | 4 +- .../robot/src/real-robot/viper/vpViper850.cpp | 4 +- .../src/robot-simulator/vpRobotCamera.cpp | 4 +- .../vpRobotWireFrameSimulator.cpp | 4 +- .../src/robot-simulator/vpSimulatorAfma6.cpp | 24 +- .../src/robot-simulator/vpSimulatorCamera.cpp | 4 +- .../robot-simulator/vpSimulatorPioneer.cpp | 4 +- .../robot-simulator/vpSimulatorPioneerPan.cpp | 4 +- .../robot-simulator/vpSimulatorViper850.cpp | 16 +- modules/robot/src/vpRobot.cpp | 12 +- .../include/visp3/sensor/vp1394TwoGrabber.h | 10 +- .../visp3/sensor/vpOccipitalStructure.h | 32 +-- .../include/visp3/sensor/vpPylonGrabber.h | 2 +- .../sensor/include/visp3/sensor/vpRealSense.h | 10 +- .../include/visp3/sensor/vpRealSense2.h | 32 +-- .../sensor/include/visp3/sensor/vpSickLDMRS.h | 2 +- .../include/visp3/sensor/vpUeyeGrabber.h | 4 +- .../include/visp3/sensor/vpV4l2Grabber.h | 6 +- modules/sensor/src/force-torque/vpComedi.cpp | 10 +- .../force-torque/vpForceTorqueAtiSensor.cpp | 10 +- .../framegrabber/1394/vp1394CMUGrabber.cpp | 6 +- .../framegrabber/1394/vp1394TwoGrabber.cpp | 64 ++--- .../directshow/vpDirectShowGrabberImpl.cpp | 76 +++--- .../directshow/vpDirectShowSampleGrabberI.cpp | 6 +- .../flycapture/vpFlyCaptureGrabber.cpp | 4 +- .../src/framegrabber/pylon/vpPylonFactory.cpp | 4 +- .../framegrabber/pylon/vpPylonGrabberGigE.cpp | 4 +- .../framegrabber/pylon/vpPylonGrabberUsb.cpp | 4 +- .../src/framegrabber/ueye/vpUeyeGrabber.cpp | 44 ++-- .../framegrabber/ueye/vpUeyeGrabber_impl.h | 6 +- .../src/framegrabber/v4l2/vpV4l2Grabber.cpp | 80 +++---- .../src/laserscanner/sick/vpSickLDMRS.cpp | 4 +- modules/sensor/src/mocap/vpMocapQualisys.cpp | 2 +- .../vpOccipitalStructure.cpp | 156 ++++++------ .../src/rgb-depth/realsense/vpRealSense.cpp | 110 ++++----- .../src/rgb-depth/realsense/vpRealSense2.cpp | 222 +++++++++--------- .../testForceTorqueAtiNetFTSensor.cpp | 2 +- .../force-torque/testForceTorqueIitSensor.cpp | 2 +- .../testOccipitalStructure_Core_pcl.cpp | 4 +- .../test/rgb-depth/testRealSense2_D435.cpp | 4 +- .../rgb-depth/testRealSense2_D435_align.cpp | 8 +- .../rgb-depth/testRealSense2_D435_pcl.cpp | 8 +- .../test/rgb-depth/testRealSense2_SR300.cpp | 8 +- .../testRealSense2_T265_images_odometry.cpp | 2 +- .../rgb-depth/testRealSense2_T265_imu.cpp | 2 +- .../testRealSense2_T265_odometry.cpp | 4 +- .../testRealSense2_T265_undistort.cpp | 2 +- .../test/rgb-depth/testRealSense_R200.cpp | 12 +- .../test/rgb-depth/testRealSense_SR300.cpp | 26 +- .../tracker/blob/include/visp3/blob/vpDot2.h | 2 +- modules/tracker/blob/src/dots/vpDot2.cpp | 10 +- .../tracking-with-dataset/testTrackDot.cpp | 6 +- .../klt/include/visp3/klt/vpKltOpencv.h | 2 +- .../include/visp3/mbt/vpMbEdgeKltTracker.h | 2 +- .../include/visp3/mbt/vpMbGenericTracker.h | 6 +- .../mbt/include/visp3/mbt/vpMbHiddenFaces.h | 26 +- .../mbt/include/visp3/mbt/vpMbTracker.h | 8 +- .../include/visp3/mbt/vpMbtDistanceCircle.h | 12 +- .../include/visp3/mbt/vpMbtDistanceCylinder.h | 14 +- .../visp3/mbt/vpMbtDistanceKltCylinder.h | 2 +- .../visp3/mbt/vpMbtDistanceKltPoints.h | 6 +- .../mbt/include/visp3/mbt/vpMbtDistanceLine.h | 10 +- .../include/visp3/mbt/vpMbtFaceDepthDense.h | 8 +- .../include/visp3/mbt/vpMbtFaceDepthNormal.h | 8 +- .../mbt/include/visp3/mbt/vpMbtMeEllipse.h | 2 +- .../mbt/src/depth/vpMbDepthDenseTracker.cpp | 6 +- .../mbt/src/depth/vpMbDepthNormalTracker.cpp | 6 +- .../mbt/src/depth/vpMbtFaceDepthDense.cpp | 4 +- .../mbt/src/depth/vpMbtFaceDepthNormal.cpp | 4 +- .../tracker/mbt/src/edge/vpMbEdgeTracker.cpp | 114 ++++----- .../mbt/src/edge/vpMbtDistanceCircle.cpp | 30 +-- .../mbt/src/edge/vpMbtDistanceCylinder.cpp | 48 ++-- .../mbt/src/edge/vpMbtDistanceLine.cpp | 38 +-- .../tracker/mbt/src/edge/vpMbtMeEllipse.cpp | 4 +- modules/tracker/mbt/src/edge/vpMbtMeLine.cpp | 2 +- .../mbt/src/hybrid/vpMbEdgeKltTracker.cpp | 38 +-- .../tracker/mbt/src/klt/vpMbKltTracker.cpp | 40 ++-- .../mbt/src/klt/vpMbtDistanceKltCylinder.cpp | 2 +- .../mbt/src/klt/vpMbtDistanceKltPoints.cpp | 8 +- .../tracker/mbt/src/vpMbGenericTracker.cpp | 84 +++---- modules/tracker/mbt/src/vpMbScanLine.cpp | 6 +- modules/tracker/mbt/src/vpMbTracker.cpp | 116 ++++----- .../tracker/mbt/src/vpMbtXmlGenericParser.cpp | 2 +- .../testGenericTracker.cpp | 6 +- .../testGenericTrackerDepth.cpp | 6 +- .../tracker/mbt/test/testTukeyEstimator.cpp | 2 +- .../tracker/me/include/visp3/me/vpMeEllipse.h | 8 +- .../tracker/me/include/visp3/me/vpMeTracker.h | 2 +- modules/tracker/me/src/moving-edges/vpMe.cpp | 18 +- .../me/src/moving-edges/vpMeEllipse.cpp | 2 +- .../tracker/me/src/moving-edges/vpMeNurbs.cpp | 38 +-- .../me/src/moving-edges/vpMeTracker.cpp | 6 +- .../tracker/me/src/moving-edges/vpNurbs.cpp | 16 +- modules/tracker/me/test/testNurbs.cpp | 12 +- .../tt/include/visp3/tt/vpTemplateTracker.h | 16 +- .../visp3/tt/vpTemplateTrackerHeader.h | 6 +- modules/tracker/tt/src/vpTemplateTracker.cpp | 48 ++-- .../include/visp3/tt_mi/vpTemplateTrackerMI.h | 8 +- .../tt_mi/src/mi/vpTemplateTrackerMI.cpp | 4 +- .../include/visp3/vision/vpHomography.h | 2 +- .../vision/include/visp3/vision/vpKeyPoint.h | 60 ++--- modules/vision/include/visp3/vision/vpPose.h | 26 +- .../include/visp3/vision/vpPoseFeatures.h | 4 +- .../homography-estimation/vpHomography.cpp | 2 +- .../vpHomographyRansac.cpp | 2 +- modules/vision/src/key-point/vpKeyPoint.cpp | 54 ++--- modules/vision/src/pose-estimation/vpPose.cpp | 10 +- .../src/pose-estimation/vpPoseLagrange.cpp | 6 +- .../vision/src/pose-estimation/vpPoseRGBD.cpp | 8 +- .../src/pose-estimation/vpPoseRansac.cpp | 6 +- .../keypoint-with-dataset/testKeyPoint-2.cpp | 4 +- .../keypoint-with-dataset/testKeyPoint-3.cpp | 4 +- .../keypoint-with-dataset/testKeyPoint-4.cpp | 4 +- .../keypoint-with-dataset/testKeyPoint-5.cpp | 4 +- .../keypoint-with-dataset/testKeyPoint-6.cpp | 4 +- .../keypoint-with-dataset/testKeyPoint-7.cpp | 4 +- .../keypoint-with-dataset/testKeyPoint.cpp | 4 +- .../visp3/visual_features/vpFeatureMoment.h | 14 +- .../visual_features/vpFeatureMomentAlpha.h | 2 +- .../visual_features/vpFeatureMomentArea.h | 2 +- .../vpFeatureMomentAreaNormalized.h | 4 +- .../visual_features/vpFeatureMomentBasic.h | 2 +- .../vpFeatureMomentCInvariant.h | 4 +- .../visual_features/vpFeatureMomentCentered.h | 2 +- .../visual_features/vpFeatureMomentDatabase.h | 2 +- .../vpFeatureMomentGravityCenter.h | 4 +- .../vpFeatureMomentGravityCenterNormalized.h | 4 +- .../src/visual-feature/vpBasicFeature.cpp | 12 +- .../src/visual-feature/vpFeatureDepth.cpp | 2 +- .../src/visual-feature/vpFeatureEllipse.cpp | 2 +- .../src/visual-feature/vpFeatureLine.cpp | 2 +- .../src/visual-feature/vpFeatureLuminance.cpp | 12 +- .../src/visual-feature/vpFeatureMoment.cpp | 10 +- .../src/visual-feature/vpFeaturePoint.cpp | 2 +- .../src/visual-feature/vpFeaturePoint3D.cpp | 2 +- .../visual-feature/vpFeaturePointPolar.cpp | 2 +- .../src/visual-feature/vpFeatureSegment.cpp | 2 +- .../src/visual-feature/vpFeatureThetaU.cpp | 2 +- .../visual-feature/vpFeatureTranslation.cpp | 4 +- .../vpFeatureVanishingPoint.cpp | 2 +- modules/vs/src/vpServo.cpp | 4 +- .../visual-feature/testFeatureSegment.cpp | 14 +- ...torial-pose-from-points-realsense-T265.cpp | 8 +- .../tutorial-barcode-detector-live.cpp | 2 +- .../barcode/tutorial-barcode-detector.cpp | 2 +- .../dnn/tutorial-dnn-tensorrt-live.cpp | 8 +- .../tutorial-face-detector-live-threaded.cpp | 4 +- ...-apriltag-detector-live-T265-realsense.cpp | 8 +- ...-apriltag-detector-live-rgbd-realsense.cpp | 10 +- ...ltag-detector-live-rgbd-structure-core.cpp | 6 +- .../tag/tutorial-apriltag-detector-live.cpp | 2 +- tutorial/grabber/tutorial-grabber-1394.cpp | 2 +- .../grabber/tutorial-grabber-basler-pylon.cpp | 2 +- tutorial/grabber/tutorial-grabber-bebop2.cpp | 2 +- .../grabber/tutorial-grabber-flycapture.cpp | 2 +- .../grabber/tutorial-grabber-ids-ueye.cpp | 2 +- .../tutorial-grabber-multiple-realsense.cpp | 2 +- .../tutorial-grabber-opencv-threaded.cpp | 4 +- tutorial/grabber/tutorial-grabber-opencv.cpp | 2 +- .../tutorial-grabber-realsense-T265.cpp | 4 +- .../grabber/tutorial-grabber-realsense.cpp | 2 +- ...torial-grabber-rgbd-D435-structurecore.cpp | 2 +- .../tutorial-grabber-structure-core.cpp | 4 +- .../tutorial-grabber-v4l2-threaded.cpp | 2 +- tutorial/grabber/tutorial-grabber-v4l2.cpp | 2 +- .../project.pbxproj | 8 +- .../VispHelper/ImageConversion.mm | 5 +- .../VispHelper/ImageDisplayWithContext.h | 4 +- .../GettingStarted.xcodeproj/project.pbxproj | 4 +- .../StartedAprilTag.xcodeproj/project.pbxproj | 4 +- .../StartedAprilTag/ImageConversion.mm | 5 +- .../project.pbxproj | 4 +- .../StartedImageProc/ImageConversion.mm | 5 +- tutorial/matlab/tutorial-matlab.cpp | 2 +- .../mbot-serial-controller.ino | 16 +- .../visp/mbot-apriltag-2D-half-vs.cpp | 4 +- .../raspberry/visp/mbot-apriltag-ibvs.cpp | 4 +- .../raspberry/visp/mbot-apriltag-pbvs.cpp | 4 +- ...torial-mb-generic-tracker-apriltag-rs2.cpp | 10 +- ...ial-mb-generic-tracker-apriltag-webcam.cpp | 2 +- ...c-tracker-rgbd-realsense-json-settings.cpp | 4 +- ...rial-mb-generic-tracker-rgbd-realsense.cpp | 4 +- ...mb-generic-tracker-rgbd-structure-core.cpp | 2 +- .../tutorial-mb-generic-tracker-full.cpp | 6 +- .../tutorial-mb-generic-tracker-live.cpp | 2 +- .../generic/tutorial-mb-generic-tracker.cpp | 2 +- .../old/generic/tutorial-mb-tracker-full.cpp | 2 +- .../old/generic/tutorial-mb-tracker.cpp | 2 +- 434 files changed, 2682 insertions(+), 2656 deletions(-) diff --git a/apps/calibration/visp-compute-chessboard-poses.cpp b/apps/calibration/visp-compute-chessboard-poses.cpp index ffb68ffd81..9c095bf1b3 100644 --- a/apps/calibration/visp-compute-chessboard-poses.cpp +++ b/apps/calibration/visp-compute-chessboard-poses.cpp @@ -211,7 +211,7 @@ int main(int argc, const char **argv) #if defined(VISP_HAVE_MODULE_GUI) - vpDisplay *display = NULL; + vpDisplay *display = nullptr; if (opt_interactive) { #if defined(VISP_HAVE_X11) display = new vpDisplayX(I); diff --git a/demo/wireframe-simulator/servoSimu4Points.cpp b/demo/wireframe-simulator/servoSimu4Points.cpp index bffda5e743..39963223eb 100644 --- a/demo/wireframe-simulator/servoSimu4Points.cpp +++ b/demo/wireframe-simulator/servoSimu4Points.cpp @@ -135,7 +135,7 @@ bool getOptions(int argc, const char **argv, bool &display, bool &plot) plot = false; break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; default: @@ -146,7 +146,7 @@ bool getOptions(int argc, const char **argv, bool &display, bool &plot) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -200,7 +200,7 @@ int main(int argc, const char **argv) vpDisplay::flush(Iext2); } - vpPlot *plotter = NULL; + vpPlot *plotter = nullptr; if (opt_plot) { plotter = new vpPlot(2, 480, 640, 750, 550, "Real time curves plotter"); @@ -464,7 +464,7 @@ int main(int argc, const char **argv) std::cout << "|| s - s* || = " << (task.getError()).sumSquare() << std::endl; } - if (opt_plot && plotter != NULL) { + if (opt_plot && plotter != nullptr) { vpDisplay::display(Iint); sim.getInternalImage(Iint); vpDisplay::displayFrame(Iint, cMo, camera, 0.2, vpColor::none); diff --git a/demo/wireframe-simulator/servoSimuCylinder.cpp b/demo/wireframe-simulator/servoSimuCylinder.cpp index 3a8d589dd0..870777a0ed 100644 --- a/demo/wireframe-simulator/servoSimuCylinder.cpp +++ b/demo/wireframe-simulator/servoSimuCylinder.cpp @@ -136,7 +136,7 @@ bool getOptions(int argc, const char **argv, bool &display, bool &plot) plot = false; break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; default: @@ -147,7 +147,7 @@ bool getOptions(int argc, const char **argv, bool &display, bool &plot) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -194,7 +194,7 @@ int main(int argc, const char **argv) vpDisplay::flush(Iext); } - vpPlot *plotter = NULL; + vpPlot *plotter = nullptr; vpServo task; vpSimulatorCamera robot; @@ -436,7 +436,7 @@ int main(int argc, const char **argv) std::cout << "|| s - s* || = " << (task.getError()).sumSquare() << std::endl; } - if (opt_plot && plotter != NULL) { + if (opt_plot && plotter != nullptr) { vpDisplay::display(Iint); sim.getInternalImage(Iint); vpDisplay::displayFrame(Iint, cMo, camera, 0.2, vpColor::none); diff --git a/demo/wireframe-simulator/servoSimuSphere.cpp b/demo/wireframe-simulator/servoSimuSphere.cpp index 4bbee3396c..7969244b01 100644 --- a/demo/wireframe-simulator/servoSimuSphere.cpp +++ b/demo/wireframe-simulator/servoSimuSphere.cpp @@ -131,7 +131,7 @@ bool getOptions(int argc, const char **argv, bool &display, bool &plot) plot = false; break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; default: @@ -142,7 +142,7 @@ bool getOptions(int argc, const char **argv, bool &display, bool &plot) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -254,7 +254,7 @@ int main(int argc, const char **argv) vpDisplay::flush(Iext2); } - vpPlot *plotter = NULL; + vpPlot *plotter = nullptr; vpServo task; vpSimulatorCamera robot; @@ -473,7 +473,7 @@ int main(int argc, const char **argv) std::cout << "|| s - s* || = " << (task.getError()).sumSquare() << std::endl; } - if (opt_plot && plotter != NULL) { + if (opt_plot && plotter != nullptr) { vpDisplay::display(Iint); sim.getInternalImage(Iint); vpDisplay::displayFrame(Iint, cMo, camera, 0.2, vpColor::none); diff --git a/example/coin-simulator/simulateCircle2DCamVelocity.cpp b/example/coin-simulator/simulateCircle2DCamVelocity.cpp index 52b8fa09eb..060f9be702 100644 --- a/example/coin-simulator/simulateCircle2DCamVelocity.cpp +++ b/example/coin-simulator/simulateCircle2DCamVelocity.cpp @@ -136,7 +136,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, bool &display) display = false; break; case 'h': - usage(argv[0], NULL, ipath); + usage(argv[0], nullptr, ipath); return false; break; @@ -149,7 +149,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, bool &display) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath); + usage(argv[0], nullptr, ipath); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg << std::endl << std::endl; return false; @@ -282,7 +282,7 @@ static void *mainLoop(void *_simu) simu->closeMainApplication(); - void *a = NULL; + void *a = nullptr; return a; } @@ -318,19 +318,19 @@ int main(int argc, const char **argv) if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; std::cout << " Since -i " - << " is different from VISP_INPUT_IMAGE_PATH=" << env_ipath << std::endl - << " we skip the environment variable." << std::endl; + << " is different from VISP_INPUT_IMAGE_PATH=" << env_ipath << std::endl + << " we skip the environment variable." << std::endl; } } // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath); + usage(argv[0], nullptr, ipath); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl - << " environment variable to specify the location of the " << std::endl - << " image path where test images are located." << std::endl - << std::endl; + << " environment variable to specify the location of the " << std::endl + << " image path where test images are located." << std::endl + << std::endl; return EXIT_FAILURE; } @@ -358,7 +358,8 @@ int main(int argc, const char **argv) simu.mainLoop(); } return EXIT_SUCCESS; - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "Catch an exception: " << e << std::endl; return EXIT_FAILURE; } @@ -370,8 +371,8 @@ int main() std::cout << "You do not have Coin3D and SoQT or SoWin or SoXt functionalities enabled..." << std::endl; std::cout << "Tip:" << std::endl; std::cout - << "- Install Coin3D and SoQT or SoWin or SoXt, configure ViSP again using cmake and build again this example" - << std::endl; + << "- Install Coin3D and SoQT or SoWin or SoXt, configure ViSP again using cmake and build again this example" + << std::endl; return EXIT_SUCCESS; } #endif diff --git a/example/coin-simulator/simulateFourPoints2DCartesianCamVelocity.cpp b/example/coin-simulator/simulateFourPoints2DCartesianCamVelocity.cpp index e0d0d6a947..7c5e547753 100644 --- a/example/coin-simulator/simulateFourPoints2DCartesianCamVelocity.cpp +++ b/example/coin-simulator/simulateFourPoints2DCartesianCamVelocity.cpp @@ -136,7 +136,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, bool &display) display = false; break; case 'h': - usage(argv[0], NULL, ipath); + usage(argv[0], nullptr, ipath); return false; break; @@ -149,7 +149,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, bool &display) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath); + usage(argv[0], nullptr, ipath); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg << std::endl << std::endl; return false; @@ -293,7 +293,7 @@ static void *mainLoop(void *_simu) simu->closeMainApplication(); - void *a = NULL; + void *a = nullptr; return a; } @@ -329,19 +329,19 @@ int main(int argc, const char **argv) if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; std::cout << " Since -i " - << " is different from VISP_IMAGE_PATH=" << env_ipath << std::endl - << " we skip the environment variable." << std::endl; + << " is different from VISP_IMAGE_PATH=" << env_ipath << std::endl + << " we skip the environment variable." << std::endl; } } // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath); + usage(argv[0], nullptr, ipath); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl - << " environment variable to specify the location of the " << std::endl - << " image path where test images are located." << std::endl - << std::endl; + << " environment variable to specify the location of the " << std::endl + << " image path where test images are located." << std::endl + << std::endl; return EXIT_FAILURE; } @@ -368,7 +368,8 @@ int main(int argc, const char **argv) simu.mainLoop(); } return EXIT_SUCCESS; - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "Catch an exception: " << e << std::endl; return EXIT_FAILURE; } @@ -380,8 +381,8 @@ int main() std::cout << "You do not have Coin3D and SoQT or SoWin or SoXt functionalities enabled..." << std::endl; std::cout << "Tip:" << std::endl; std::cout - << "- Install Coin3D and SoQT or SoWin or SoXt, configure ViSP again using cmake and build again this example" - << std::endl; + << "- Install Coin3D and SoQT or SoWin or SoXt, configure ViSP again using cmake and build again this example" + << std::endl; return EXIT_SUCCESS; } #endif diff --git a/example/coin-simulator/simulateFourPoints2DPolarCamVelocity.cpp b/example/coin-simulator/simulateFourPoints2DPolarCamVelocity.cpp index 0a8fcc62dc..aeea07dd54 100644 --- a/example/coin-simulator/simulateFourPoints2DPolarCamVelocity.cpp +++ b/example/coin-simulator/simulateFourPoints2DPolarCamVelocity.cpp @@ -134,7 +134,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, bool &display) display = false; break; case 'h': - usage(argv[0], NULL, ipath); + usage(argv[0], nullptr, ipath); return false; break; @@ -147,7 +147,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, bool &display) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath); + usage(argv[0], nullptr, ipath); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg << std::endl << std::endl; return false; @@ -315,7 +315,7 @@ static void *mainLoop(void *_simu) simu->closeMainApplication(); - void *a = NULL; + void *a = nullptr; return a; } @@ -351,19 +351,19 @@ int main(int argc, const char **argv) if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; std::cout << " Since -i " - << " is different from VISP_IMAGE_PATH=" << env_ipath << std::endl - << " we skip the environment variable." << std::endl; + << " is different from VISP_IMAGE_PATH=" << env_ipath << std::endl + << " we skip the environment variable." << std::endl; } } // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath); + usage(argv[0], nullptr, ipath); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl - << " environment variable to specify the location of the " << std::endl - << " image path where test images are located." << std::endl - << std::endl; + << " environment variable to specify the location of the " << std::endl + << " image path where test images are located." << std::endl + << std::endl; return EXIT_FAILURE; } @@ -390,7 +390,8 @@ int main(int argc, const char **argv) simu.mainLoop(); } return EXIT_SUCCESS; - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "Catch an exception: " << e << std::endl; return EXIT_FAILURE; } @@ -402,8 +403,8 @@ int main() std::cout << "You do not have Coin3D and SoQT or SoWin or SoXt functionalities enabled..." << std::endl; std::cout << "Tip:" << std::endl; std::cout - << "- Install Coin3D and SoQT or SoWin or SoXt, configure ViSP again using cmake and build again this example" - << std::endl; + << "- Install Coin3D and SoQT or SoWin or SoXt, configure ViSP again using cmake and build again this example" + << std::endl; return EXIT_SUCCESS; } #endif diff --git a/example/device/display/displayD3D.cpp b/example/device/display/displayD3D.cpp index 934b2cf5ae..0967e93bd0 100644 --- a/example/device/display/displayD3D.cpp +++ b/example/device/display/displayD3D.cpp @@ -162,7 +162,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op opath = optarg; break; case 'h': - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); return false; break; @@ -175,7 +175,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg << std::endl << std::endl; return false; @@ -232,7 +232,7 @@ int main(int argc, const char **argv) vpIoTools::makeDirectory(odirname); } catch (...) { - usage(argv[0], NULL, ipath, opath, username); + usage(argv[0], nullptr, ipath, opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Cannot create " << odirname << std::endl; std::cerr << " Check your -o " << opath << " option " << std::endl; @@ -253,7 +253,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath, opath, username); + usage(argv[0], nullptr, ipath, opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/example/device/display/displayGDI.cpp b/example/device/display/displayGDI.cpp index 17bebbd21f..c3daf5eca2 100644 --- a/example/device/display/displayGDI.cpp +++ b/example/device/display/displayGDI.cpp @@ -162,7 +162,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op opath = optarg; break; case 'h': - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); return false; break; @@ -175,7 +175,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg << std::endl << std::endl; return false; @@ -232,7 +232,7 @@ int main(int argc, const char **argv) vpIoTools::makeDirectory(odirname); } catch (...) { - usage(argv[0], NULL, ipath, opath, username); + usage(argv[0], nullptr, ipath, opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Cannot create " << odirname << std::endl; std::cerr << " Check your -o " << opath << " option " << std::endl; @@ -253,7 +253,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath, opath, username); + usage(argv[0], nullptr, ipath, opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/example/device/display/displayGTK.cpp b/example/device/display/displayGTK.cpp index a80883c498..f14e4094bf 100644 --- a/example/device/display/displayGTK.cpp +++ b/example/device/display/displayGTK.cpp @@ -165,7 +165,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op opath = optarg; break; case 'h': - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); return false; break; @@ -178,7 +178,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg << std::endl << std::endl; return false; @@ -239,7 +239,7 @@ int main(int argc, const char **argv) vpIoTools::makeDirectory(odirname); } catch (...) { - usage(argv[0], NULL, ipath, opath, username); + usage(argv[0], nullptr, ipath, opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Cannot create " << odirname << std::endl; std::cerr << " Check your -o " << opath << " option " << std::endl; @@ -260,7 +260,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath, opath, username); + usage(argv[0], nullptr, ipath, opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/example/device/display/displayOpenCV.cpp b/example/device/display/displayOpenCV.cpp index a0968ada52..f3dfed335f 100644 --- a/example/device/display/displayOpenCV.cpp +++ b/example/device/display/displayOpenCV.cpp @@ -166,7 +166,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op opath = optarg_; break; case 'h': - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); return false; break; @@ -179,7 +179,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -240,7 +240,7 @@ int main(int argc, const char **argv) vpIoTools::makeDirectory(odirname); } catch (...) { - usage(argv[0], NULL, ipath, opath, username); + usage(argv[0], nullptr, ipath, opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Cannot create " << odirname << std::endl; std::cerr << " Check your -o " << opath << " option " << std::endl; @@ -261,7 +261,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath, opath, username); + usage(argv[0], nullptr, ipath, opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/example/device/display/displaySequence.cpp b/example/device/display/displaySequence.cpp index a6d7dd67a4..4f8cd6d4ad 100644 --- a/example/device/display/displaySequence.cpp +++ b/example/device/display/displaySequence.cpp @@ -199,7 +199,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &pp wait = true; break; case 'h': - usage(argv[0], NULL, ipath, ppath, first, last, step); + usage(argv[0], nullptr, ipath, ppath, first, last, step); return false; break; @@ -212,7 +212,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &pp if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, ppath, first, last, step); + usage(argv[0], nullptr, ipath, ppath, first, last, step); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -269,21 +269,21 @@ int main(int argc, const char **argv) if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; std::cout << " Since -i " - << " is different from VISP_IMAGE_PATH=" << env_ipath << std::endl - << " we skip the environment variable." << std::endl; + << " is different from VISP_IMAGE_PATH=" << env_ipath << std::endl + << " we skip the environment variable." << std::endl; } } // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty() && opt_ppath.empty()) { - usage(argv[0], NULL, ipath, opt_ppath, opt_first, opt_last, opt_step); + usage(argv[0], nullptr, ipath, opt_ppath, opt_first, opt_last, opt_step); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl - << " environment variable to specify the location of the " << std::endl - << " image path where test images are located." << std::endl - << " Use -p option if you want to " << std::endl - << " use personal images." << std::endl - << std::endl; + << " environment variable to specify the location of the " << std::endl + << " image path where test images are located." << std::endl + << " Use -p option if you want to " << std::endl + << " use personal images." << std::endl + << std::endl; return EXIT_FAILURE; } @@ -306,19 +306,21 @@ int main(int argc, const char **argv) s.setf(std::ios::right, std::ios::adjustfield); s << "image." << std::setw(4) << std::setfill('0') << iter << "." << ext; filename = vpIoTools::createFilePath(dirname, s.str()); - } else { + } + else { snprintf(cfilename, FILENAME_MAX, opt_ppath.c_str(), iter); filename = cfilename; } // Read image named "filename" and put the bitmap in I try { vpImageIo::read(I, filename); - } catch (...) { + } + catch (...) { std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Cannot read " << filename << std::endl; std::cerr << " Check your -i " << ipath << " option, " << std::endl - << " or your -p " << opt_ppath << " option " << std::endl - << " or VISP_INPUT_IMAGE_PATH environment variable" << std::endl; + << " or your -p " << opt_ppath << " option " << std::endl + << " or VISP_INPUT_IMAGE_PATH environment variable" << std::endl; return EXIT_FAILURE; } @@ -353,7 +355,8 @@ int main(int argc, const char **argv) s.str(""); s << "image." << std::setw(4) << std::setfill('0') << iter << "." << ext; filename = vpIoTools::createFilePath(dirname, s.str()); - } else { + } + else { snprintf(cfilename, FILENAME_MAX, opt_ppath.c_str(), iter); filename = cfilename; } @@ -371,8 +374,9 @@ int main(int argc, const char **argv) std::cout << "A click in the image to continue..." << std::endl; // Wait for a blocking mouse click vpDisplay::getClick(I); - } else { - // Synchronise the loop to 40 ms + } + else { + // Synchronise the loop to 40 ms vpTime::wait(tms, 40); } @@ -382,7 +386,8 @@ int main(int argc, const char **argv) // double tms_total = tms_2 - tms_1 ; // std::cout << "Total Time : "<< tms_total< option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/example/device/display/displayXMulti.cpp b/example/device/display/displayXMulti.cpp index 0981c574f6..d1ada273a4 100644 --- a/example/device/display/displayXMulti.cpp +++ b/example/device/display/displayXMulti.cpp @@ -166,7 +166,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op opath = optarg_; break; case 'h': - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); return false; break; @@ -179,7 +179,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -240,7 +240,7 @@ int main(int argc, const char **argv) vpIoTools::makeDirectory(odirname); } catch (...) { - usage(argv[0], NULL, ipath, opath, username); + usage(argv[0], nullptr, ipath, opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Cannot create " << odirname << std::endl; std::cerr << " Check your -o " << opath << " option " << std::endl; @@ -261,7 +261,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath, opath, username); + usage(argv[0], nullptr, ipath, opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/example/device/framegrabber/grab1394CMU.cpp b/example/device/framegrabber/grab1394CMU.cpp index c10578dddc..dd48c36ea9 100644 --- a/example/device/framegrabber/grab1394CMU.cpp +++ b/example/device/framegrabber/grab1394CMU.cpp @@ -96,7 +96,7 @@ OPTIONS: Default\n\ -h \n\ Print the help.\n\ \n", - nframes, opath.c_str()); +nframes, opath.c_str()); if (badparam) { fprintf(stderr, "ERROR: \n"); fprintf(stderr, "\nBad parameter [%s]\n", badparam); @@ -136,7 +136,7 @@ bool getOptions(int argc, const char **argv, bool &display, unsigned int &nframe opath = optarg_; break; case 'h': - usage(argv[0], NULL, nframes, opath); + usage(argv[0], nullptr, nframes, opath); return false; break; @@ -149,7 +149,7 @@ bool getOptions(int argc, const char **argv, bool &display, unsigned int &nframe if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, nframes, opath); + usage(argv[0], nullptr, nframes, opath); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -257,7 +257,8 @@ int main(int argc, const char **argv) std::cout << "Mean loop time: " << ttotal / nframes << " ms" << std::endl; std::cout << "Mean frequency: " << 1000. / (ttotal / nframes) << " fps" << std::endl; return EXIT_SUCCESS; - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "Catch an exception: " << e << std::endl; return EXIT_FAILURE; } diff --git a/example/device/framegrabber/grab1394Two.cpp b/example/device/framegrabber/grab1394Two.cpp index 2de1d43a25..79d12ba46c 100644 --- a/example/device/framegrabber/grab1394Two.cpp +++ b/example/device/framegrabber/grab1394Two.cpp @@ -318,7 +318,7 @@ bool read_options(int argc, const char **argv, bool &multi, unsigned int &camera break; case 'h': case '?': - usage(argv[0], NULL, camera, nframes, opath, roi_left, roi_top, roi_width, roi_height, ringbuffersize, + usage(argv[0], nullptr, camera, nframes, opath, roi_left, roi_top, roi_width, roi_height, ringbuffersize, panControl); exit(0); break; @@ -327,7 +327,7 @@ bool read_options(int argc, const char **argv, bool &multi, unsigned int &camera if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, camera, nframes, opath, roi_left, roi_top, roi_width, roi_height, ringbuffersize, panControl); + usage(argv[0], nullptr, camera, nframes, opath, roi_left, roi_top, roi_width, roi_height, ringbuffersize, panControl); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -418,7 +418,8 @@ int main(int argc, const char **argv) if (multi) { camera = 0; // to over write a bad option usage - } else { + } + else { ncameras = 1; // acquisition from only one camera } // Offset is used to set the correspondancy between and image and the @@ -454,22 +455,22 @@ int main(int argc, const char **argv) g.getGuid(guid); std::cout << "----------------------------------------------------------" << std::endl - << "---- Video modes and framerates supported by camera " << i + offset << " ----" << std::endl - << "---- with guid 0x" << std::hex << guid << " ----" << std::endl - << "---- * is for the current settings ----" << std::endl - << "---- between ( ) you have the corresponding option ----" << std::endl - << "---- to use. ----" << std::endl - << "----------------------------------------------------------" << std::endl; + << "---- Video modes and framerates supported by camera " << i + offset << " ----" << std::endl + << "---- with guid 0x" << std::hex << guid << " ----" << std::endl + << "---- * is for the current settings ----" << std::endl + << "---- between ( ) you have the corresponding option ----" << std::endl + << "---- to use. ----" << std::endl + << "----------------------------------------------------------" << std::endl; for (it_lmode = lmode.begin(); it_lmode != lmode.end(); ++it_lmode) { // Parse the list of supported modes vp1394TwoGrabber::vp1394TwoVideoModeType supmode = *it_lmode; if (curmode == supmode) std::cout << " * " << vp1394TwoGrabber::videoMode2string(supmode) << " (-v " << (int)supmode << ")" - << std::endl; + << std::endl; else std::cout << " " << vp1394TwoGrabber::videoMode2string(supmode) << " (-v " << (int)supmode << ")" - << std::endl; + << std::endl; if (g.isVideoModeFormat7(supmode)) { // Format 7 video mode; no framerate setting, but color @@ -480,23 +481,24 @@ int main(int argc, const char **argv) supcoding = *it_lcoding; if ((curmode == supmode) && (supcoding == curcoding)) std::cout << " * " << vp1394TwoGrabber::colorCoding2string(supcoding) << " (-g " << (int)supcoding - << ")" << std::endl; + << ")" << std::endl; else std::cout << " " << vp1394TwoGrabber::colorCoding2string(supcoding) << " (-g " << (int)supcoding - << ")" << std::endl; + << ")" << std::endl; } - } else { + } + else { - // Parse the list of supported framerates for a supported mode + // Parse the list of supported framerates for a supported mode g.getFramerateSupported(supmode, lfps); for (it_lfps = lfps.begin(); it_lfps != lfps.end(); ++it_lfps) { vp1394TwoGrabber::vp1394TwoFramerateType supfps = *it_lfps; if ((curmode == supmode) && (supfps == curfps)) std::cout << " * " << vp1394TwoGrabber::framerate2string(supfps) << " (-f " << (int)supfps << ")" - << std::endl; + << std::endl; else std::cout << " " << vp1394TwoGrabber::framerate2string(supfps) << " (-f " << (int)supfps << ")" - << std::endl; + << std::endl; } } } @@ -516,8 +518,9 @@ int main(int argc, const char **argv) if (videomode_is_set) { g.setCamera(camera); g.setVideoMode(videomode); - } else { - // get The actual video mode + } + else { + // get The actual video mode g.setCamera(camera); g.getVideoMode(videomode); } @@ -542,7 +545,7 @@ int main(int argc, const char **argv) #ifdef VISP_HAVE_X11 // allocate a display for each camera to consider - vpDisplayX *d = NULL; + vpDisplayX *d = nullptr; if (display) d = new vpDisplayX[ncameras]; #endif @@ -562,7 +565,7 @@ int main(int argc, const char **argv) if (grab_color[i]) { g.acquire(Ic[i]); std::cout << "Image size for camera " << i + offset << " : width: " << Ic[i].getWidth() - << " height: " << Ic[i].getHeight() << std::endl; + << " height: " << Ic[i].getHeight() << std::endl; #ifdef VISP_HAVE_X11 if (display) { @@ -575,10 +578,11 @@ int main(int argc, const char **argv) vpDisplay::flush(Ic[i]); } #endif - } else { + } + else { g.acquire(Ig[i]); std::cout << "Image size for camera " << i + offset << " : width: " << Ig[i].getWidth() - << " height: " << Ig[i].getHeight() << std::endl; + << " height: " << Ig[i].getHeight() << std::endl; #ifdef VISP_HAVE_X11 if (display) { @@ -615,7 +619,8 @@ int main(int argc, const char **argv) vpDisplay::flush(Ic[c]); } #endif - } else { + } + else { g.acquire(Ig[c]); #ifdef VISP_HAVE_X11 if (display) { @@ -632,7 +637,8 @@ int main(int argc, const char **argv) std::cout << "Write: " << filename << std::endl; if (grab_color[c]) { vpImageIo::write(Ic[c], filename); - } else { + } + else { vpImageIo::write(Ig[c], filename); } } @@ -661,7 +667,8 @@ int main(int argc, const char **argv) delete[] d; #endif return EXIT_SUCCESS; - } catch (const vpException &e) { + } + catch (const vpException &e) { std::cout << "Catch an exception: " << e << std::endl; return EXIT_FAILURE; } diff --git a/example/device/framegrabber/grabDirectShow.cpp b/example/device/framegrabber/grabDirectShow.cpp index 8c56220d09..3e53fc53c7 100644 --- a/example/device/framegrabber/grabDirectShow.cpp +++ b/example/device/framegrabber/grabDirectShow.cpp @@ -134,7 +134,7 @@ bool getOptions(int argc, const char **argv, bool &display, unsigned &nframes, b opath = optarg; break; case 'h': - usage(argv[0], NULL, nframes, opath); + usage(argv[0], nullptr, nframes, opath); return false; break; @@ -147,7 +147,7 @@ bool getOptions(int argc, const char **argv, bool &display, unsigned &nframes, b if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, nframes, opath); + usage(argv[0], nullptr, nframes, opath); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg << std::endl << std::endl; return false; diff --git a/example/device/framegrabber/grabDirectShowMulti.cpp b/example/device/framegrabber/grabDirectShowMulti.cpp index 6fb595223f..26c707b847 100644 --- a/example/device/framegrabber/grabDirectShowMulti.cpp +++ b/example/device/framegrabber/grabDirectShowMulti.cpp @@ -209,14 +209,14 @@ void read_options(int argc, const char **argv, bool &multi, unsigned int &camera mediatypeID = atoi(optarg); break; default: - usage(argv[0], NULL, camera, nframes, opath); + usage(argv[0], nullptr, camera, nframes, opath); break; } } if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, camera, nframes, opath); + usage(argv[0], nullptr, camera, nframes, opath); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg << std::endl << std::endl; } diff --git a/example/device/framegrabber/grabDisk.cpp b/example/device/framegrabber/grabDisk.cpp index 526da60602..cb4ff2dc05 100644 --- a/example/device/framegrabber/grabDisk.cpp +++ b/example/device/framegrabber/grabDisk.cpp @@ -183,7 +183,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &ba nzero = (unsigned)atoi(optarg_); break; case 'h': - usage(argv[0], NULL, ipath, basename, ext, first, last, step, nzero); + usage(argv[0], nullptr, ipath, basename, ext, first, last, step, nzero); return false; break; @@ -196,7 +196,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &ba if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, basename, ext, first, last, step, nzero); + usage(argv[0], nullptr, ipath, basename, ext, first, last, step, nzero); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -265,7 +265,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath, opt_basename, opt_ext, opt_first, opt_last, opt_step, opt_nzero); + usage(argv[0], nullptr, ipath, opt_basename, opt_ext, opt_first, opt_last, opt_step, opt_nzero); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/example/device/framegrabber/grabFlyCapture.cpp b/example/device/framegrabber/grabFlyCapture.cpp index c68add1e12..ff218e7735 100644 --- a/example/device/framegrabber/grabFlyCapture.cpp +++ b/example/device/framegrabber/grabFlyCapture.cpp @@ -141,7 +141,7 @@ bool getOptions(int argc, const char **argv, bool &display, bool &click, bool &s opath = optarg_; break; case 'h': - usage(argv[0], NULL, icamera, opath); + usage(argv[0], nullptr, icamera, opath); return false; break; @@ -154,7 +154,7 @@ bool getOptions(int argc, const char **argv, bool &display, bool &click, bool &s if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, icamera, opath); + usage(argv[0], nullptr, icamera, opath); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -189,7 +189,7 @@ int main(int argc, const char **argv) std::cout << "Camera serial: " << g.getCameraSerial(g.getCameraIndex()) << std::endl; std::cout << "Image size : " << I.getWidth() << " " << I.getHeight() << std::endl; - vpDisplay *display = NULL; + vpDisplay *display = nullptr; if (opt_display) { #if defined(VISP_HAVE_X11) display = new vpDisplayX(I); diff --git a/example/device/framegrabber/grabRealSense.cpp b/example/device/framegrabber/grabRealSense.cpp index 517d2b3379..1c5a6bcc99 100644 --- a/example/device/framegrabber/grabRealSense.cpp +++ b/example/device/framegrabber/grabRealSense.cpp @@ -180,10 +180,10 @@ int main() } else { #ifdef VISP_HAVE_PCL - rs.acquire((unsigned char *)color.bitmap, (unsigned char *)depth.bitmap, NULL, pointcloud, + rs.acquire((unsigned char *)color.bitmap, (unsigned char *)depth.bitmap, nullptr, pointcloud, (unsigned char *)infrared_display.bitmap); #else - rs.acquire((unsigned char *)color.bitmap, (unsigned char *)depth.bitmap, NULL, + rs.acquire((unsigned char *)color.bitmap, (unsigned char *)depth.bitmap, nullptr, (unsigned char *)infrared_display.bitmap); #endif } diff --git a/example/device/framegrabber/grabRealSense2.cpp b/example/device/framegrabber/grabRealSense2.cpp index 18f5bcd345..259285372d 100644 --- a/example/device/framegrabber/grabRealSense2.cpp +++ b/example/device/framegrabber/grabRealSense2.cpp @@ -201,12 +201,12 @@ int main() #ifdef VISP_HAVE_PCL { std::lock_guard lock(mutex); - rs.acquire((unsigned char *)color.bitmap, (unsigned char *)depth.bitmap, NULL, pointcloud_color, + rs.acquire((unsigned char *)color.bitmap, (unsigned char *)depth.bitmap, nullptr, pointcloud_color, (unsigned char *)infrared.bitmap); update_pointcloud = true; } #else - rs.acquire((unsigned char *)color.bitmap, (unsigned char *)depth.bitmap, NULL, (unsigned char *)infrared.bitmap); + rs.acquire((unsigned char *)color.bitmap, (unsigned char *)depth.bitmap, nullptr, (unsigned char *)infrared.bitmap); #endif vpImageConvert::createDepthHistogram(depth, depth_display); diff --git a/example/device/framegrabber/grabV4l2.cpp b/example/device/framegrabber/grabV4l2.cpp index bba25314ce..9590a57a42 100644 --- a/example/device/framegrabber/grabV4l2.cpp +++ b/example/device/framegrabber/grabV4l2.cpp @@ -212,7 +212,7 @@ bool getOptions(int argc, const char **argv, unsigned &fps, unsigned &input, uns verbose = true; break; case 'h': - usage(argv[0], NULL, fps, input, scale, niter, device, pixelformat, image_type, opath); + usage(argv[0], nullptr, fps, input, scale, niter, device, pixelformat, image_type, opath); return false; break; @@ -225,7 +225,7 @@ bool getOptions(int argc, const char **argv, unsigned &fps, unsigned &input, uns if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, fps, input, scale, niter, device, pixelformat, image_type, opath); + usage(argv[0], nullptr, fps, input, scale, niter, device, pixelformat, image_type, opath); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/device/framegrabber/grabV4l2MultiCpp11Thread.cpp b/example/device/framegrabber/grabV4l2MultiCpp11Thread.cpp index ea1a2dc508..0753400e97 100644 --- a/example/device/framegrabber/grabV4l2MultiCpp11Thread.cpp +++ b/example/device/framegrabber/grabV4l2MultiCpp11Thread.cpp @@ -108,7 +108,7 @@ bool getOptions(int argc, char **argv, unsigned int &deviceCount, bool &saveVide saveVideo = true; break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; @@ -121,7 +121,7 @@ bool getOptions(int argc, char **argv, unsigned int &deviceCount, bool &saveVide if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg << std::endl << std::endl; return false; @@ -251,11 +251,11 @@ class vpShareImage struct vpCancelled_t { }; - vpShareImage() : m_cancelled(false), m_cond(), m_mutex(), m_pImgData(NULL), m_totalSize(0) { } + vpShareImage() : m_cancelled(false), m_cond(), m_mutex(), m_pImgData(nullptr), m_totalSize(0) { } virtual ~vpShareImage() { - if (m_pImgData != NULL) { + if (m_pImgData != nullptr) { delete[] m_pImgData; } } @@ -302,10 +302,10 @@ class vpShareImage { std::lock_guard lock(m_mutex); - if (m_pImgData == NULL || m_totalSize != totalSize) { + if (m_pImgData == nullptr || m_totalSize != totalSize) { m_totalSize = totalSize; - if (m_pImgData != NULL) { + if (m_pImgData != nullptr) { delete[] m_pImgData; } @@ -387,7 +387,7 @@ void display(unsigned int width, unsigned int height, int win_x, int win_y, unsi vpImageConvert::convert(I_green_gaussian_double, I_green_gaussian); vpImageConvert::convert(I_blue_gaussian_double, I_blue_gaussian); - vpImageConvert::merge(&I_red_gaussian, &I_green_gaussian, &I_blue_gaussian, NULL, local_img); + vpImageConvert::merge(&I_red_gaussian, &I_green_gaussian, &I_blue_gaussian, nullptr, local_img); } t = vpTime::measureTimeMs() - t; diff --git a/example/device/framegrabber/readRealSenseData.cpp b/example/device/framegrabber/readRealSenseData.cpp index 43b160a9da..ac2860bdf3 100644 --- a/example/device/framegrabber/readRealSenseData.cpp +++ b/example/device/framegrabber/readRealSenseData.cpp @@ -124,7 +124,7 @@ bool getOptions(int argc, char **argv, std::string &input_directory, bool &click break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; @@ -137,7 +137,7 @@ bool getOptions(int argc, char **argv, std::string &input_directory, bool &click if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg << std::endl << std::endl; return false; diff --git a/example/device/framegrabber/saveRealSenseData.cpp b/example/device/framegrabber/saveRealSenseData.cpp index 7548872130..989510085b 100644 --- a/example/device/framegrabber/saveRealSenseData.cpp +++ b/example/device/framegrabber/saveRealSenseData.cpp @@ -159,7 +159,7 @@ bool getOptions(int argc, char **argv, bool &save, std::string &output_directory break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; @@ -172,7 +172,7 @@ bool getOptions(int argc, char **argv, bool &save, std::string &output_directory if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg << std::endl << std::endl; @@ -535,7 +535,7 @@ int main(int argc, char *argv[]) d3.init(I_infrared, I_gray.getWidth() + 80, I_gray.getHeight() + 10, "RealSense infrared stream"); while (true) { - realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, NULL, NULL); + realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, nullptr, nullptr); vpImageConvert::convert(I_color, I_gray); vpImageConvert::createDepthHistogram(I_depth_raw, I_depth); @@ -632,28 +632,28 @@ int main(int argc, char *argv[]) if (use_aligned_stream) { #ifdef USE_REALSENSE2 #ifdef VISP_HAVE_PCL - realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, NULL, pointCloud, NULL, + realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, nullptr, pointCloud, nullptr, &align_to); #else - realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, &pointCloud, NULL, + realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, &pointCloud, nullptr, &align_to); #endif #else #ifdef VISP_HAVE_PCL - realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, NULL, pointCloud, - (unsigned char *)I_infrared.bitmap, NULL, rs::stream::rectified_color, + realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, nullptr, pointCloud, + (unsigned char *)I_infrared.bitmap, nullptr, rs::stream::rectified_color, rs::stream::depth_aligned_to_rectified_color); #else realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, &pointCloud, - (unsigned char *)I_infrared.bitmap, NULL, rs::stream::rectified_color, + (unsigned char *)I_infrared.bitmap, nullptr, rs::stream::rectified_color, rs::stream::depth_aligned_to_rectified_color); #endif #endif } else { #ifdef VISP_HAVE_PCL - realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, NULL, pointCloud, - (unsigned char *)I_infrared.bitmap, NULL); + realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, nullptr, pointCloud, + (unsigned char *)I_infrared.bitmap, nullptr); #else realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, &pointCloud, (unsigned char *)I_infrared.bitmap); diff --git a/example/device/laserscanner/SickLDMRS-Process.cpp b/example/device/laserscanner/SickLDMRS-Process.cpp index 188424f736..dc1dac47b3 100644 --- a/example/device/laserscanner/SickLDMRS-Process.cpp +++ b/example/device/laserscanner/SickLDMRS-Process.cpp @@ -116,7 +116,7 @@ void *laser_display_and_save_loop(void *) } } - vpDisplay *display = NULL; + vpDisplay *display = nullptr; #ifdef VISP_HAVE_MODULE_GUI #if defined(VISP_HAVE_X11) display = new vpDisplayX; @@ -194,7 +194,7 @@ void *laser_display_and_save_loop(void *) // std::endl; } delete display; - return NULL; + return nullptr; } void *laser_acq_loop(void *) @@ -223,7 +223,7 @@ void *laser_acq_loop(void *) std::cout << "laser acq time: " << vpTime::measureTimeMs() - t1 << std::endl; } - return NULL; + return nullptr; } void *camera_acq_and_display_loop(void *) @@ -235,7 +235,7 @@ void *camera_acq_and_display_loop(void *) // If no camera found return if (g.getNumCameras() == 0) - return NULL; + return nullptr; // g.setVideoMode(vp1394TwoGrabber::vpVIDEO_MODE_640x480_MONO8); // g.setFramerate(vp1394TwoGrabber::vpFRAMERATE_60); @@ -245,7 +245,7 @@ void *camera_acq_and_display_loop(void *) g.acquire(I); // Acquire an image I.quarterSizeImage(Q); - vpDisplay *display = NULL; + vpDisplay *display = nullptr; #ifdef VISP_HAVE_MODULE_GUI #if defined(VISP_HAVE_X11) display = new vpDisplayX; @@ -292,7 +292,7 @@ void *camera_acq_and_display_loop(void *) } catch (...) { } #endif - return NULL; + return nullptr; } int main(int argc, const char **argv) @@ -312,18 +312,18 @@ int main(int argc, const char **argv) // Parse the command line to set the variables vpParseArgv::vpArgvInfo argTable[] = { - {"-layer", vpParseArgv::ARGV_INT, (char *)NULL, (char *)&layerToDisplay, + {"-layer", vpParseArgv::ARGV_INT, (char *)nullptr, (char *)&layerToDisplay, "The layer to display:\n" "\t\t. 0x1 for layer 1.\n" "\t\t. 0x2 for layer 2.\n" "\t\t. 0x4 for layer 3.\n" "\t\t. 0x8 for layer 4.\n" "\t\tTo display all the layers you should set 0xF value."}, - {"-save", vpParseArgv::ARGV_INT, (char *)NULL, (char *)&save, "Turn to 1 in order to save data."}, - {"-h", vpParseArgv::ARGV_HELP, (char *)NULL, (char *)NULL, + {"-save", vpParseArgv::ARGV_INT, (char *)nullptr, (char *)&save, "Turn to 1 in order to save data."}, + {"-h", vpParseArgv::ARGV_HELP, (char *)nullptr, (char *)nullptr, "Display one or more measured layers form a Sick LD-MRS laser " "scanner."}, - {(char *)NULL, vpParseArgv::ARGV_END, (char *)NULL, (char *)NULL, (char *)NULL}}; + {(char *)nullptr, vpParseArgv::ARGV_END, (char *)nullptr, (char *)nullptr, (char *)nullptr}}; // Read the command line options if (vpParseArgv::parse(&argc, argv, argTable, @@ -337,9 +337,9 @@ int main(int argc, const char **argv) pthread_t thread_camera_acq; pthread_t thread_laser_acq; pthread_t thread_laser_display; - pthread_create(&thread_camera_acq, NULL, &camera_acq_and_display_loop, NULL); - pthread_create(&thread_laser_acq, NULL, &laser_acq_loop, NULL); - pthread_create(&thread_laser_display, NULL, &laser_display_and_save_loop, NULL); + pthread_create(&thread_camera_acq, nullptr, &camera_acq_and_display_loop, nullptr); + pthread_create(&thread_laser_acq, nullptr, &laser_acq_loop, nullptr); + pthread_create(&thread_laser_display, nullptr, &laser_display_and_save_loop, nullptr); pthread_join(thread_camera_acq, 0); pthread_join(thread_laser_acq, 0); pthread_join(thread_laser_display, 0); diff --git a/example/device/light/ringLight.cpp b/example/device/light/ringLight.cpp index 955a58ace0..a0c908af88 100644 --- a/example/device/light/ringLight.cpp +++ b/example/device/light/ringLight.cpp @@ -141,7 +141,7 @@ bool getOptions(int argc, const char **argv, bool &on, int &nsec, double &nmsec) nmsec = atof(optarg); break; case 'h': - usage(argv[0], NULL, nsec, nmsec); + usage(argv[0], nullptr, nsec, nmsec); return false; break; @@ -154,7 +154,7 @@ bool getOptions(int argc, const char **argv, bool &on, int &nsec, double &nmsec) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, nsec, nmsec); + usage(argv[0], nullptr, nsec, nmsec); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg << std::endl << std::endl; return false; diff --git a/example/direct-visual-servoing/photometricVisualServoing.cpp b/example/direct-visual-servoing/photometricVisualServoing.cpp index 6e2471f0a6..3d64ef22be 100644 --- a/example/direct-visual-servoing/photometricVisualServoing.cpp +++ b/example/direct-visual-servoing/photometricVisualServoing.cpp @@ -149,7 +149,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, bool &click_all niter = atoi(optarg_); break; case 'h': - usage(argv[0], NULL, ipath, niter); + usage(argv[0], nullptr, ipath, niter); return false; default: @@ -160,7 +160,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, bool &click_all if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, niter); + usage(argv[0], nullptr, ipath, niter); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -211,7 +211,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath, opt_niter); + usage(argv[0], nullptr, ipath, opt_niter); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/example/direct-visual-servoing/photometricVisualServoingWithoutVpServo.cpp b/example/direct-visual-servoing/photometricVisualServoingWithoutVpServo.cpp index 31e590054c..19ead6ef91 100644 --- a/example/direct-visual-servoing/photometricVisualServoingWithoutVpServo.cpp +++ b/example/direct-visual-servoing/photometricVisualServoingWithoutVpServo.cpp @@ -150,7 +150,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, bool &click_all niter = atoi(optarg_); break; case 'h': - usage(argv[0], NULL, ipath, niter); + usage(argv[0], nullptr, ipath, niter); return false; default: @@ -161,7 +161,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, bool &click_all if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, niter); + usage(argv[0], nullptr, ipath, niter); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -212,7 +212,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath, opt_niter); + usage(argv[0], nullptr, ipath, opt_niter); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/example/homography/homographyHLM2DObject.cpp b/example/homography/homographyHLM2DObject.cpp index 8535960ad2..ef357b8043 100644 --- a/example/homography/homographyHLM2DObject.cpp +++ b/example/homography/homographyHLM2DObject.cpp @@ -115,7 +115,7 @@ bool getOptions(int argc, const char **argv) switch (c) { case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; @@ -128,7 +128,7 @@ bool getOptions(int argc, const char **argv) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/homography/homographyHLM3DObject.cpp b/example/homography/homographyHLM3DObject.cpp index 9ead514611..ffafeab16a 100644 --- a/example/homography/homographyHLM3DObject.cpp +++ b/example/homography/homographyHLM3DObject.cpp @@ -116,7 +116,7 @@ bool getOptions(int argc, const char **argv) switch (c) { case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; @@ -129,7 +129,7 @@ bool getOptions(int argc, const char **argv) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/homography/homographyHartleyDLT2DObject.cpp b/example/homography/homographyHartleyDLT2DObject.cpp index 74d5f5102d..cce2a041ba 100644 --- a/example/homography/homographyHartleyDLT2DObject.cpp +++ b/example/homography/homographyHartleyDLT2DObject.cpp @@ -115,7 +115,7 @@ bool getOptions(int argc, const char **argv) switch (c) { case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; @@ -128,7 +128,7 @@ bool getOptions(int argc, const char **argv) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/homography/homographyRansac2DObject.cpp b/example/homography/homographyRansac2DObject.cpp index c8dda44226..184f1dcf00 100644 --- a/example/homography/homographyRansac2DObject.cpp +++ b/example/homography/homographyRansac2DObject.cpp @@ -114,7 +114,7 @@ bool getOptions(int argc, const char **argv) switch (c) { case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; @@ -127,7 +127,7 @@ bool getOptions(int argc, const char **argv) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/image/imageDiskRW.cpp b/example/image/imageDiskRW.cpp index 10af918a45..c4f39f762a 100644 --- a/example/image/imageDiskRW.cpp +++ b/example/image/imageDiskRW.cpp @@ -134,7 +134,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op opath = optarg_; break; case 'h': - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); return false; break; @@ -147,7 +147,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -215,7 +215,7 @@ int main(int argc, const char **argv) vpIoTools::makeDirectory(dirname); } catch (...) { - usage(argv[0], NULL, ipath, opath, username); + usage(argv[0], nullptr, ipath, opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Cannot create " << dirname << std::endl; std::cerr << " Check your -o " << opath << " option " << std::endl; @@ -236,7 +236,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath, opath, username); + usage(argv[0], nullptr, ipath, opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/example/manual/ogre/HelloWorldOgreAdvanced.cpp b/example/manual/ogre/HelloWorldOgreAdvanced.cpp index d301bda557..2ca3f16904 100644 --- a/example/manual/ogre/HelloWorldOgreAdvanced.cpp +++ b/example/manual/ogre/HelloWorldOgreAdvanced.cpp @@ -67,7 +67,7 @@ class vpAROgreAdvanced : public vpAROgre unsigned int height = 480) : vpAROgre(cam, width, height) { - mAnimationState = NULL; + mAnimationState = nullptr; } protected: diff --git a/example/manual/simulation/manSimu4Dots.cpp b/example/manual/simulation/manSimu4Dots.cpp index 6e0fdd876f..b2547edbfb 100644 --- a/example/manual/simulation/manSimu4Dots.cpp +++ b/example/manual/simulation/manSimu4Dots.cpp @@ -236,7 +236,7 @@ static void *mainLoop(void *_simu) task.print(); simu->closeMainApplication(); - void *a = NULL; + void *a = nullptr; return a; } diff --git a/example/manual/simulation/manSimu4Points.cpp b/example/manual/simulation/manSimu4Points.cpp index 8486eecda9..99502c3c62 100644 --- a/example/manual/simulation/manSimu4Points.cpp +++ b/example/manual/simulation/manSimu4Points.cpp @@ -187,7 +187,7 @@ static void *mainLoop(void *_simu) } simu->closeMainApplication(); - void *a = NULL; + void *a = nullptr; return a; // return (void *); } diff --git a/example/math/BSpline.cpp b/example/math/BSpline.cpp index 5056dfded9..b3507584fb 100644 --- a/example/math/BSpline.cpp +++ b/example/math/BSpline.cpp @@ -133,7 +133,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) display = false; break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; @@ -146,7 +146,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -253,13 +253,13 @@ int main(int argc, const char **argv) unsigned int i = bSpline.findSpan(5 / 2.0); std::cout << "The knot interval number for the value u = 5/2 is : " << i << std::endl; - vpBasisFunction *N = NULL; + vpBasisFunction *N = nullptr; N = bSpline.computeBasisFuns(5 / 2.0); std::cout << "The nonvanishing basis functions N(u=5/2) are :" << std::endl; for (unsigned int j = 0; j < bSpline.get_p() + 1; j++) std::cout << N[j].value << std::endl; - vpBasisFunction **N2 = NULL; + vpBasisFunction **N2 = nullptr; N2 = bSpline.computeDersBasisFuns(5 / 2.0, 2); std::cout << "The first derivatives of the basis functions N'(u=5/2) are :" << std::endl; for (unsigned int j = 0; j < bSpline.get_p() + 1; j++) @@ -283,9 +283,9 @@ int main(int argc, const char **argv) vpDisplay::getClick(I); } - if (N != NULL) + if (N != nullptr) delete [] N; - if (N2 != NULL) { + if (N2 != nullptr) { for (unsigned int j = 0; j <= 2; j++) delete [] N2[j]; delete [] N2; diff --git a/example/math/quadprog.cpp b/example/math/quadprog.cpp index 116b570804..26324c8b1e 100644 --- a/example/math/quadprog.cpp +++ b/example/math/quadprog.cpp @@ -120,7 +120,7 @@ int main(int argc, char **argv) const double eps = 1e-2; #ifdef VISP_HAVE_DISPLAY - QPlot *plot = NULL; + QPlot *plot = nullptr; if (opt_display) plot = new QPlot(1, total, { "time to solveQP", "warm start" }); #endif diff --git a/example/math/quadprog_eq.cpp b/example/math/quadprog_eq.cpp index 49c20b23de..079f34a77c 100644 --- a/example/math/quadprog_eq.cpp +++ b/example/math/quadprog_eq.cpp @@ -123,7 +123,7 @@ int main(int argc, char **argv) const double eps = 1e-2; #ifdef VISP_HAVE_DISPLAY - QPlot *plot = NULL; + QPlot *plot = nullptr; if (opt_display) plot = new QPlot(2, total, { "only equalities", "pre-solving", "equalities + inequalities", "pre-solving / warm start" }); diff --git a/example/moments/image/servoMomentImage.cpp b/example/moments/image/servoMomentImage.cpp index 17bed69c0f..bd3c7045ae 100644 --- a/example/moments/image/servoMomentImage.cpp +++ b/example/moments/image/servoMomentImage.cpp @@ -94,7 +94,7 @@ class servoMoment : m_width(640), m_height(480), m_cMo(), m_cdMo(), m_robot(), m_Iint(m_height, m_width, 0), m_task(), m_cam(), m_error(0), m_imsim(), m_cur_img(m_height, m_width, 0), m_src_img(m_height, m_width, 0), m_dst_img(m_height, m_width, 0), m_start_img(m_height, m_width, 0), m_interaction_type(), m_src(6), m_dst(6), - m_moments(NULL), m_momentsDes(NULL), m_featureMoments(NULL), m_featureMomentsDes(NULL), m_displayInt(NULL) + m_moments(nullptr), m_momentsDes(nullptr), m_featureMoments(nullptr), m_featureMomentsDes(nullptr), m_displayInt(nullptr) { } ~servoMoment() { diff --git a/example/moments/points/servoMomentPoints.cpp b/example/moments/points/servoMomentPoints.cpp index 34dc30ef5e..53c0f5782c 100644 --- a/example/moments/points/servoMomentPoints.cpp +++ b/example/moments/points/servoMomentPoints.cpp @@ -91,8 +91,8 @@ class servoMoment public: servoMoment() : m_width(640), m_height(480), m_cMo(), m_cdMo(), m_robot(false), m_Iint(m_height, m_width, 255), m_task(), m_cam(), - m_error(0), m_imsim(), m_interaction_type(), m_src(6), m_dst(6), m_moments(NULL), m_momentsDes(NULL), - m_featureMoments(NULL), m_featureMomentsDes(NULL), m_displayInt(NULL) + m_error(0), m_imsim(), m_interaction_type(), m_src(6), m_dst(6), m_moments(nullptr), m_momentsDes(nullptr), + m_featureMoments(nullptr), m_featureMomentsDes(nullptr), m_displayInt(nullptr) { } ~servoMoment() { diff --git a/example/moments/polygon/servoMomentPolygon.cpp b/example/moments/polygon/servoMomentPolygon.cpp index f1637b6246..4134c33caa 100644 --- a/example/moments/polygon/servoMomentPolygon.cpp +++ b/example/moments/polygon/servoMomentPolygon.cpp @@ -90,8 +90,8 @@ class servoMoment public: servoMoment() : m_width(640), m_height(480), m_cMo(), m_cdMo(), m_robot(false), m_Iint(m_height, m_width, 255), m_task(), m_cam(), - m_error(0), m_imsim(), m_interaction_type(), m_src(6), m_dst(6), m_moments(NULL), m_momentsDes(NULL), - m_featureMoments(NULL), m_featureMomentsDes(NULL), m_displayInt(NULL) + m_error(0), m_imsim(), m_interaction_type(), m_src(6), m_dst(6), m_moments(nullptr), m_momentsDes(nullptr), + m_featureMoments(nullptr), m_featureMomentsDes(nullptr), m_displayInt(nullptr) { } ~servoMoment() { diff --git a/example/ogre-simulator/AROgre.cpp b/example/ogre-simulator/AROgre.cpp index 2ba72b250c..05b4b36f24 100644 --- a/example/ogre-simulator/AROgre.cpp +++ b/example/ogre-simulator/AROgre.cpp @@ -161,7 +161,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &pp ppath = optarg_; break; case 'h': - usage(argv[0], NULL, ipath, ppath); + usage(argv[0], nullptr, ipath, ppath); return false; break; @@ -174,7 +174,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &pp if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, ppath); + usage(argv[0], nullptr, ipath, ppath); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -190,7 +190,7 @@ class vpAROgreExample : public vpAROgre public: // The constructor doesn't change here vpAROgreExample(const vpCameraParameters &cam = vpCameraParameters(), unsigned int width = 640, - unsigned int height = 480, const char *resourcePath = NULL) + unsigned int height = 480, const char *resourcePath = nullptr) : vpAROgre(cam, width, height) { // Direction vectors @@ -198,8 +198,8 @@ class vpAROgreExample : public vpAROgre mResourcePath = resourcePath; std::cout << "mResourcePath: " << mResourcePath << std::endl; vecDevant = Ogre::Vector3(0, -1, 0); - robot = NULL; - mAnimationState = NULL; + robot = nullptr; + mAnimationState = nullptr; } protected: @@ -572,7 +572,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty() && opt_ppath.empty()) { - usage(argv[0], NULL, ipath, opt_ppath); + usage(argv[0], nullptr, ipath, opt_ppath); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/example/ogre-simulator/AROgreBasic.cpp b/example/ogre-simulator/AROgreBasic.cpp index 17ecd07bbe..7f740ac9c0 100644 --- a/example/ogre-simulator/AROgreBasic.cpp +++ b/example/ogre-simulator/AROgreBasic.cpp @@ -159,7 +159,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &pp ppath = optarg_; break; case 'h': - usage(argv[0], NULL, ipath, ppath); + usage(argv[0], nullptr, ipath, ppath); return false; break; @@ -172,7 +172,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &pp if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, ppath); + usage(argv[0], nullptr, ipath, ppath); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -436,7 +436,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty() && opt_ppath.empty()) { - usage(argv[0], NULL, ipath, opt_ppath); + usage(argv[0], nullptr, ipath, opt_ppath); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/example/parse-argv/parse-argv1.cpp b/example/parse-argv/parse-argv1.cpp index d57e596612..0764f3fa9b 100644 --- a/example/parse-argv/parse-argv1.cpp +++ b/example/parse-argv/parse-argv1.cpp @@ -129,7 +129,7 @@ bool getOptions(int argc, const char **argv, int &i_val, float &f_val, double &d i_val = atoi(optarg_); break; case 'h': - usage(argv[0], NULL, i_val, f_val, d_val); + usage(argv[0], nullptr, i_val, f_val, d_val); return false; break; @@ -142,7 +142,7 @@ bool getOptions(int argc, const char **argv, int &i_val, float &f_val, double &d if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, i_val, f_val, d_val); + usage(argv[0], nullptr, i_val, f_val, d_val); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/parse-argv/parse-argv2.cpp b/example/parse-argv/parse-argv2.cpp index 07d2e414d7..bf8534c8a0 100644 --- a/example/parse-argv/parse-argv2.cpp +++ b/example/parse-argv/parse-argv2.cpp @@ -64,17 +64,17 @@ int main(int argc, const char **argv) long long_val = 33333333; float float_val = 3.14f; double double_val = 3.1415; - char *string_val = NULL; + char *string_val = nullptr; vpParseArgv::vpArgvInfo argTable[] = { {"-bool", vpParseArgv::ARGV_CONSTANT_BOOL, 0, (char *)&bool_val, "Bool enabled."}, - {"-integer", vpParseArgv::ARGV_INT, (char *)NULL, (char *)&int_val, "An integer value."}, - {"-long", vpParseArgv::ARGV_LONG, (char *)NULL, (char *)&long_val, "A long value."}, - {"-float", vpParseArgv::ARGV_FLOAT, (char *)NULL, (char *)&float_val, "A float value."}, - {"-double", vpParseArgv::ARGV_DOUBLE, (char *)NULL, (char *)&double_val, "A double value."}, - {"-string", vpParseArgv::ARGV_STRING, (char *)NULL, (char *)&string_val, "A chain value."}, - {"-h", vpParseArgv::ARGV_HELP, (char *)NULL, (char *)NULL, "Print the help."}, - {(char *)NULL, vpParseArgv::ARGV_END, (char *)NULL, (char *)NULL, (char *)NULL}}; + {"-integer", vpParseArgv::ARGV_INT, (char *)nullptr, (char *)&int_val, "An integer value."}, + {"-long", vpParseArgv::ARGV_LONG, (char *)nullptr, (char *)&long_val, "A long value."}, + {"-float", vpParseArgv::ARGV_FLOAT, (char *)nullptr, (char *)&float_val, "A float value."}, + {"-double", vpParseArgv::ARGV_DOUBLE, (char *)nullptr, (char *)&double_val, "A double value."}, + {"-string", vpParseArgv::ARGV_STRING, (char *)nullptr, (char *)&string_val, "A chain value."}, + {"-h", vpParseArgv::ARGV_HELP, (char *)nullptr, (char *)nullptr, "Print the help."}, + {(char *)nullptr, vpParseArgv::ARGV_END, (char *)nullptr, (char *)nullptr, (char *)nullptr}}; // Read the command line options if (vpParseArgv::parse(&argc, argv, argTable, vpParseArgv::ARGV_NO_DEFAULTS)) { @@ -87,7 +87,7 @@ int main(int argc, const char **argv) cout << " Long value: " << long_val << endl; cout << " Float value: " << float_val << endl; cout << " Double value: " << double_val << endl; - if (string_val != NULL) + if (string_val != nullptr) cout << " String value: " << string_val << endl; else cout << " String value: \"\"" << endl << endl; diff --git a/example/pose-estimation/poseVirtualVS.cpp b/example/pose-estimation/poseVirtualVS.cpp index 728e2156f9..74060d07fc 100644 --- a/example/pose-estimation/poseVirtualVS.cpp +++ b/example/pose-estimation/poseVirtualVS.cpp @@ -203,7 +203,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &pp step = (unsigned)atoi(optarg_); break; case 'h': - usage(argv[0], NULL, ipath, ppath, first, last, step); + usage(argv[0], nullptr, ipath, ppath, first, last, step); return false; break; @@ -216,7 +216,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &pp if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, ppath, first, last, step); + usage(argv[0], nullptr, ipath, ppath, first, last, step); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -286,7 +286,7 @@ int main(int argc, const char **argv) } // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty() && opt_ppath.empty()) { - usage(argv[0], NULL, ipath, opt_ppath, opt_first, opt_last, opt_step); + usage(argv[0], nullptr, ipath, opt_ppath, opt_first, opt_last, opt_step); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/example/robot-simulator/afma6/servoSimuAfma6FourPoints2DCamVelocity.cpp b/example/robot-simulator/afma6/servoSimuAfma6FourPoints2DCamVelocity.cpp index ba21c02177..649961326d 100644 --- a/example/robot-simulator/afma6/servoSimuAfma6FourPoints2DCamVelocity.cpp +++ b/example/robot-simulator/afma6/servoSimuAfma6FourPoints2DCamVelocity.cpp @@ -148,7 +148,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) display = false; break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; @@ -161,7 +161,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/robot-simulator/camera/servoSimu3D_cMcd_CamVelocity.cpp b/example/robot-simulator/camera/servoSimu3D_cMcd_CamVelocity.cpp index 33f398967c..6af43f9c55 100644 --- a/example/robot-simulator/camera/servoSimu3D_cMcd_CamVelocity.cpp +++ b/example/robot-simulator/camera/servoSimu3D_cMcd_CamVelocity.cpp @@ -116,7 +116,7 @@ bool getOptions(int argc, const char **argv) switch (c) { case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; default: @@ -127,7 +127,7 @@ bool getOptions(int argc, const char **argv) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/robot-simulator/camera/servoSimu3D_cMcd_CamVelocityWithoutVpServo.cpp b/example/robot-simulator/camera/servoSimu3D_cMcd_CamVelocityWithoutVpServo.cpp index 461bb81b8a..b68df9d2ee 100644 --- a/example/robot-simulator/camera/servoSimu3D_cMcd_CamVelocityWithoutVpServo.cpp +++ b/example/robot-simulator/camera/servoSimu3D_cMcd_CamVelocityWithoutVpServo.cpp @@ -140,7 +140,7 @@ bool getOptions(int argc, const char **argv) switch (c) { case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; default: @@ -151,7 +151,7 @@ bool getOptions(int argc, const char **argv) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/robot-simulator/camera/servoSimu3D_cdMc_CamVelocity.cpp b/example/robot-simulator/camera/servoSimu3D_cdMc_CamVelocity.cpp index 0ecdff8951..d5b615f6f2 100644 --- a/example/robot-simulator/camera/servoSimu3D_cdMc_CamVelocity.cpp +++ b/example/robot-simulator/camera/servoSimu3D_cdMc_CamVelocity.cpp @@ -117,7 +117,7 @@ bool getOptions(int argc, const char **argv) switch (c) { case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; default: @@ -128,7 +128,7 @@ bool getOptions(int argc, const char **argv) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/robot-simulator/camera/servoSimu3D_cdMc_CamVelocityWithoutVpServo.cpp b/example/robot-simulator/camera/servoSimu3D_cdMc_CamVelocityWithoutVpServo.cpp index 12cee6a614..37cb1dfb9c 100644 --- a/example/robot-simulator/camera/servoSimu3D_cdMc_CamVelocityWithoutVpServo.cpp +++ b/example/robot-simulator/camera/servoSimu3D_cdMc_CamVelocityWithoutVpServo.cpp @@ -132,7 +132,7 @@ bool getOptions(int argc, const char **argv) switch (c) { case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; default: @@ -143,7 +143,7 @@ bool getOptions(int argc, const char **argv) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/robot-simulator/camera/servoSimuCircle2DCamVelocity.cpp b/example/robot-simulator/camera/servoSimuCircle2DCamVelocity.cpp index 11dcdcd0a5..2ab28359fc 100644 --- a/example/robot-simulator/camera/servoSimuCircle2DCamVelocity.cpp +++ b/example/robot-simulator/camera/servoSimuCircle2DCamVelocity.cpp @@ -115,7 +115,7 @@ bool getOptions(int argc, const char **argv) switch (c) { case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; default: @@ -126,7 +126,7 @@ bool getOptions(int argc, const char **argv) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/robot-simulator/camera/servoSimuCircle2DCamVelocityDisplay.cpp b/example/robot-simulator/camera/servoSimuCircle2DCamVelocityDisplay.cpp index 1c0137c4ff..e8679dc4ee 100644 --- a/example/robot-simulator/camera/servoSimuCircle2DCamVelocityDisplay.cpp +++ b/example/robot-simulator/camera/servoSimuCircle2DCamVelocityDisplay.cpp @@ -139,7 +139,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) display = false; break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; default: @@ -150,7 +150,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/robot-simulator/camera/servoSimuCylinder2DCamVelocityDisplay.cpp b/example/robot-simulator/camera/servoSimuCylinder2DCamVelocityDisplay.cpp index b218f53e9d..303387dd4a 100644 --- a/example/robot-simulator/camera/servoSimuCylinder2DCamVelocityDisplay.cpp +++ b/example/robot-simulator/camera/servoSimuCylinder2DCamVelocityDisplay.cpp @@ -136,7 +136,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) display = false; break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; default: @@ -147,7 +147,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/robot-simulator/camera/servoSimuCylinder2DCamVelocityDisplaySecondaryTask.cpp b/example/robot-simulator/camera/servoSimuCylinder2DCamVelocityDisplaySecondaryTask.cpp index cd620bf763..7599c18c77 100644 --- a/example/robot-simulator/camera/servoSimuCylinder2DCamVelocityDisplaySecondaryTask.cpp +++ b/example/robot-simulator/camera/servoSimuCylinder2DCamVelocityDisplaySecondaryTask.cpp @@ -145,7 +145,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display, new_proj_operator = false; break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; default: @@ -156,7 +156,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display, if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/robot-simulator/camera/servoSimuFourPoints2DCamVelocity.cpp b/example/robot-simulator/camera/servoSimuFourPoints2DCamVelocity.cpp index 44d4f0ad66..8f2fd160e1 100644 --- a/example/robot-simulator/camera/servoSimuFourPoints2DCamVelocity.cpp +++ b/example/robot-simulator/camera/servoSimuFourPoints2DCamVelocity.cpp @@ -116,7 +116,7 @@ bool getOptions(int argc, const char **argv) switch (c) { case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; default: @@ -127,7 +127,7 @@ bool getOptions(int argc, const char **argv) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/robot-simulator/camera/servoSimuFourPoints2DCamVelocityDisplay.cpp b/example/robot-simulator/camera/servoSimuFourPoints2DCamVelocityDisplay.cpp index 1a5fb28ab9..ed0597d062 100644 --- a/example/robot-simulator/camera/servoSimuFourPoints2DCamVelocityDisplay.cpp +++ b/example/robot-simulator/camera/servoSimuFourPoints2DCamVelocityDisplay.cpp @@ -143,7 +143,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) display = false; break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; default: @@ -154,7 +154,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/robot-simulator/camera/servoSimuFourPoints2DPolarCamVelocityDisplay.cpp b/example/robot-simulator/camera/servoSimuFourPoints2DPolarCamVelocityDisplay.cpp index 8feb9ac384..7612b05139 100644 --- a/example/robot-simulator/camera/servoSimuFourPoints2DPolarCamVelocityDisplay.cpp +++ b/example/robot-simulator/camera/servoSimuFourPoints2DPolarCamVelocityDisplay.cpp @@ -145,7 +145,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) display = false; break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; default: @@ -156,7 +156,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/robot-simulator/camera/servoSimuLine2DCamVelocityDisplay.cpp b/example/robot-simulator/camera/servoSimuLine2DCamVelocityDisplay.cpp index 12522e583f..7b5df1375c 100644 --- a/example/robot-simulator/camera/servoSimuLine2DCamVelocityDisplay.cpp +++ b/example/robot-simulator/camera/servoSimuLine2DCamVelocityDisplay.cpp @@ -135,7 +135,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) display = false; break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; default: @@ -146,7 +146,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/robot-simulator/camera/servoSimuPoint2DCamVelocity1.cpp b/example/robot-simulator/camera/servoSimuPoint2DCamVelocity1.cpp index 71fd7702a0..d1c5358b75 100644 --- a/example/robot-simulator/camera/servoSimuPoint2DCamVelocity1.cpp +++ b/example/robot-simulator/camera/servoSimuPoint2DCamVelocity1.cpp @@ -106,7 +106,7 @@ bool getOptions(int argc, const char **argv) switch (c) { case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; default: @@ -117,7 +117,7 @@ bool getOptions(int argc, const char **argv) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/robot-simulator/camera/servoSimuPoint2DCamVelocity2.cpp b/example/robot-simulator/camera/servoSimuPoint2DCamVelocity2.cpp index 60b02f9a8b..a4735b704b 100644 --- a/example/robot-simulator/camera/servoSimuPoint2DCamVelocity2.cpp +++ b/example/robot-simulator/camera/servoSimuPoint2DCamVelocity2.cpp @@ -114,7 +114,7 @@ bool getOptions(int argc, const char **argv) switch (c) { case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; default: @@ -125,7 +125,7 @@ bool getOptions(int argc, const char **argv) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/robot-simulator/camera/servoSimuPoint2DCamVelocity3.cpp b/example/robot-simulator/camera/servoSimuPoint2DCamVelocity3.cpp index 68734766bc..bb4b90bfd2 100644 --- a/example/robot-simulator/camera/servoSimuPoint2DCamVelocity3.cpp +++ b/example/robot-simulator/camera/servoSimuPoint2DCamVelocity3.cpp @@ -113,7 +113,7 @@ bool getOptions(int argc, const char **argv) switch (c) { case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; default: @@ -124,7 +124,7 @@ bool getOptions(int argc, const char **argv) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/robot-simulator/camera/servoSimuPoint2DhalfCamVelocity1.cpp b/example/robot-simulator/camera/servoSimuPoint2DhalfCamVelocity1.cpp index 195ac04bd2..a2c831ebb4 100644 --- a/example/robot-simulator/camera/servoSimuPoint2DhalfCamVelocity1.cpp +++ b/example/robot-simulator/camera/servoSimuPoint2DhalfCamVelocity1.cpp @@ -113,7 +113,7 @@ bool getOptions(int argc, const char **argv) switch (c) { case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; default: @@ -124,7 +124,7 @@ bool getOptions(int argc, const char **argv) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/robot-simulator/camera/servoSimuPoint2DhalfCamVelocity2.cpp b/example/robot-simulator/camera/servoSimuPoint2DhalfCamVelocity2.cpp index 4ce3cba734..2e3b97ae49 100644 --- a/example/robot-simulator/camera/servoSimuPoint2DhalfCamVelocity2.cpp +++ b/example/robot-simulator/camera/servoSimuPoint2DhalfCamVelocity2.cpp @@ -109,7 +109,7 @@ bool getOptions(int argc, const char **argv) switch (c) { case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; default: @@ -120,7 +120,7 @@ bool getOptions(int argc, const char **argv) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/robot-simulator/camera/servoSimuPoint2DhalfCamVelocity3.cpp b/example/robot-simulator/camera/servoSimuPoint2DhalfCamVelocity3.cpp index fb6fb34024..656e9e0274 100644 --- a/example/robot-simulator/camera/servoSimuPoint2DhalfCamVelocity3.cpp +++ b/example/robot-simulator/camera/servoSimuPoint2DhalfCamVelocity3.cpp @@ -109,7 +109,7 @@ bool getOptions(int argc, const char **argv) switch (c) { case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; default: @@ -120,7 +120,7 @@ bool getOptions(int argc, const char **argv) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/robot-simulator/camera/servoSimuPoint3DCamVelocity.cpp b/example/robot-simulator/camera/servoSimuPoint3DCamVelocity.cpp index 04af2bdf0c..fb36403ea4 100644 --- a/example/robot-simulator/camera/servoSimuPoint3DCamVelocity.cpp +++ b/example/robot-simulator/camera/servoSimuPoint3DCamVelocity.cpp @@ -108,7 +108,7 @@ bool getOptions(int argc, const char **argv) switch (c) { case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; default: @@ -119,7 +119,7 @@ bool getOptions(int argc, const char **argv) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/robot-simulator/camera/servoSimuSphere2DCamVelocity.cpp b/example/robot-simulator/camera/servoSimuSphere2DCamVelocity.cpp index 0f24abc76d..d0fda0cc78 100644 --- a/example/robot-simulator/camera/servoSimuSphere2DCamVelocity.cpp +++ b/example/robot-simulator/camera/servoSimuSphere2DCamVelocity.cpp @@ -108,7 +108,7 @@ bool getOptions(int argc, const char **argv) switch (c) { case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; default: @@ -119,7 +119,7 @@ bool getOptions(int argc, const char **argv) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/robot-simulator/camera/servoSimuSphere2DCamVelocityDisplay.cpp b/example/robot-simulator/camera/servoSimuSphere2DCamVelocityDisplay.cpp index 2ff819188c..c0d1dffc75 100644 --- a/example/robot-simulator/camera/servoSimuSphere2DCamVelocityDisplay.cpp +++ b/example/robot-simulator/camera/servoSimuSphere2DCamVelocityDisplay.cpp @@ -128,7 +128,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) display = false; break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; default: @@ -139,7 +139,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/robot-simulator/camera/servoSimuSphere2DCamVelocityDisplaySecondaryTask.cpp b/example/robot-simulator/camera/servoSimuSphere2DCamVelocityDisplaySecondaryTask.cpp index f138daa936..99b0da9da4 100644 --- a/example/robot-simulator/camera/servoSimuSphere2DCamVelocityDisplaySecondaryTask.cpp +++ b/example/robot-simulator/camera/servoSimuSphere2DCamVelocityDisplaySecondaryTask.cpp @@ -139,7 +139,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display, new_proj_operator = false; break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; default: @@ -150,7 +150,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display, if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/robot-simulator/camera/servoSimuSquareLine2DCamVelocityDisplay.cpp b/example/robot-simulator/camera/servoSimuSquareLine2DCamVelocityDisplay.cpp index 55edc655a7..06defae5d3 100644 --- a/example/robot-simulator/camera/servoSimuSquareLine2DCamVelocityDisplay.cpp +++ b/example/robot-simulator/camera/servoSimuSquareLine2DCamVelocityDisplay.cpp @@ -136,7 +136,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) display = false; break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; default: @@ -147,7 +147,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/robot-simulator/camera/servoSimuThetaUCamVelocity.cpp b/example/robot-simulator/camera/servoSimuThetaUCamVelocity.cpp index 89fcf46842..ae54cecbb5 100644 --- a/example/robot-simulator/camera/servoSimuThetaUCamVelocity.cpp +++ b/example/robot-simulator/camera/servoSimuThetaUCamVelocity.cpp @@ -108,7 +108,7 @@ bool getOptions(int argc, const char **argv) switch (c) { case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; default: @@ -119,7 +119,7 @@ bool getOptions(int argc, const char **argv) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/robot-simulator/viper850/servoSimuViper850FourPoints2DCamVelocity.cpp b/example/robot-simulator/viper850/servoSimuViper850FourPoints2DCamVelocity.cpp index f91fa8bc6f..b99dc47015 100644 --- a/example/robot-simulator/viper850/servoSimuViper850FourPoints2DCamVelocity.cpp +++ b/example/robot-simulator/viper850/servoSimuViper850FourPoints2DCamVelocity.cpp @@ -148,7 +148,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) display = false; break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; @@ -161,7 +161,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/servo-afma4/moveAfma4.cpp b/example/servo-afma4/moveAfma4.cpp index ad8444e67e..5594c45864 100644 --- a/example/servo-afma4/moveAfma4.cpp +++ b/example/servo-afma4/moveAfma4.cpp @@ -120,7 +120,7 @@ bool getOptions(int argc, const char **argv, bool &control) control = false; break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; @@ -133,7 +133,7 @@ bool getOptions(int argc, const char **argv, bool &control) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg << std::endl << std::endl; return false; diff --git a/example/servo-afma4/servoAfma4Point2DCamVelocityKalman.cpp b/example/servo-afma4/servoAfma4Point2DCamVelocityKalman.cpp index d2b3430425..b7a52a5797 100644 --- a/example/servo-afma4/servoAfma4Point2DCamVelocityKalman.cpp +++ b/example/servo-afma4/servoAfma4Point2DCamVelocityKalman.cpp @@ -154,7 +154,7 @@ bool getOptions(int argc, const char **argv, KalmanType &kalman, bool &doAdaptat lambda.initFromConstant(atof(optarg)); break; case 'h': - usage(argv[0], NULL, kalman); + usage(argv[0], nullptr, kalman); return false; break; @@ -167,7 +167,7 @@ bool getOptions(int argc, const char **argv, KalmanType &kalman, bool &doAdaptat if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, kalman); + usage(argv[0], nullptr, kalman); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg << std::endl << std::endl; return false; diff --git a/example/servo-biclops/moveBiclops.cpp b/example/servo-biclops/moveBiclops.cpp index d1ab30ce44..55cce1c1a8 100644 --- a/example/servo-biclops/moveBiclops.cpp +++ b/example/servo-biclops/moveBiclops.cpp @@ -102,7 +102,7 @@ bool getOptions(int argc, const char **argv, std::string &conf) conf = optarg_; break; case 'h': - usage(argv[0], NULL, conf); + usage(argv[0], nullptr, conf); return false; break; @@ -115,7 +115,7 @@ bool getOptions(int argc, const char **argv, std::string &conf) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, conf); + usage(argv[0], nullptr, conf); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/servo-biclops/servoBiclopsPoint2DArtVelocity.cpp b/example/servo-biclops/servoBiclopsPoint2DArtVelocity.cpp index dd63b1dba8..c969ba5a5a 100644 --- a/example/servo-biclops/servoBiclopsPoint2DArtVelocity.cpp +++ b/example/servo-biclops/servoBiclopsPoint2DArtVelocity.cpp @@ -120,7 +120,7 @@ bool getOptions(int argc, const char **argv, std::string &conf) conf = optarg_; break; case 'h': - usage(argv[0], NULL, conf); + usage(argv[0], nullptr, conf); return false; break; @@ -133,7 +133,7 @@ bool getOptions(int argc, const char **argv, std::string &conf) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, conf); + usage(argv[0], nullptr, conf); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/example/servo-pioneer/sonarPioneerReader.cpp b/example/servo-pioneer/sonarPioneerReader.cpp index 77e0e78c27..048b9f610a 100644 --- a/example/servo-pioneer/sonarPioneerReader.cpp +++ b/example/servo-pioneer/sonarPioneerReader.cpp @@ -81,11 +81,11 @@ void sonarPrinter(void) ArPose *pose; sd = (ArSonarDevice *)robot->findRangeDevice("sonar"); - if (sd != NULL) + if (sd != nullptr) { sd->lockDevice(); readings = sd->getCurrentBuffer(); - if (readings != NULL) + if (readings != nullptr) { for (it = readings->begin(); it != readings->end(); ++it) { @@ -158,7 +158,7 @@ void sonarPrinter(void) ArSensorReading *reading; for (int sensor = 0; sensor < robot->getNumSonar(); sensor++) { reading = robot->getSonarReading(sensor); - if (reading != NULL) { + if (reading != nullptr) { angle = reading->getSensorTh(); range = reading->getRange(); double sx = reading->getSensorX(); // position of the sensor in the robot frame @@ -328,7 +328,7 @@ int main(int argc, char **argv) #if defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) if (isInitialized) { - if (d != NULL) + if (d != nullptr) delete d; } #endif diff --git a/example/servo-viper850/servoViper850Point2DCamVelocityKalman.cpp b/example/servo-viper850/servoViper850Point2DCamVelocityKalman.cpp index c02068f252..0cebdd1212 100644 --- a/example/servo-viper850/servoViper850Point2DCamVelocityKalman.cpp +++ b/example/servo-viper850/servoViper850Point2DCamVelocityKalman.cpp @@ -236,7 +236,7 @@ int main() double t_0, t_1, Tv; vpColVector err(2), err_1(2); vpColVector dedt_filt(2), dedt_mes(2); - dc1394video_frame_t *frame = NULL; + dc1394video_frame_t *frame = nullptr; t_1 = vpTime::measureTimeMs(); diff --git a/example/tools/histogram.cpp b/example/tools/histogram.cpp index 389a2d69a9..22b54ee5ba 100644 --- a/example/tools/histogram.cpp +++ b/example/tools/histogram.cpp @@ -137,7 +137,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op opath = optarg_; break; case 'h': - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); return false; break; @@ -150,7 +150,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -208,7 +208,7 @@ int main(int argc, const char **argv) // Create the dirname vpIoTools::makeDirectory(dirname); } catch (...) { - usage(argv[0], NULL, ipath, opath, username); + usage(argv[0], nullptr, ipath, opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Cannot create " << dirname << std::endl; std::cerr << " Check your -o " << opath << " option " << std::endl; @@ -229,7 +229,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath, opath, username); + usage(argv[0], nullptr, ipath, opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/example/tools/parallelPort.cpp b/example/tools/parallelPort.cpp index 6f154bd1cc..d3b55d54e0 100644 --- a/example/tools/parallelPort.cpp +++ b/example/tools/parallelPort.cpp @@ -123,7 +123,7 @@ bool getOptions(int argc, const char **argv, unsigned char &data) break; } case 'h': - usage(argv[0], NULL, data); + usage(argv[0], nullptr, data); return false; break; @@ -136,7 +136,7 @@ bool getOptions(int argc, const char **argv, unsigned char &data) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, data); + usage(argv[0], nullptr, data); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg << std::endl << std::endl; return false; diff --git a/example/tracking/mbtEdgeKltTracking.cpp b/example/tracking/mbtEdgeKltTracking.cpp index 747b9ee928..1cee25a362 100644 --- a/example/tracking/mbtEdgeKltTracking.cpp +++ b/example/tracking/mbtEdgeKltTracking.cpp @@ -206,7 +206,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &co projectionError = true; break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; @@ -219,7 +219,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &co if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -276,7 +276,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/example/tracking/mbtEdgeTracking.cpp b/example/tracking/mbtEdgeTracking.cpp index 136ffc7aa8..5381e916c9 100644 --- a/example/tracking/mbtEdgeTracking.cpp +++ b/example/tracking/mbtEdgeTracking.cpp @@ -206,7 +206,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &co projectionError = true; break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; @@ -219,7 +219,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &co if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -276,7 +276,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/example/tracking/mbtGenericTracking.cpp b/example/tracking/mbtGenericTracking.cpp index 007fe75c44..5249b137da 100644 --- a/example/tracking/mbtGenericTracking.cpp +++ b/example/tracking/mbtGenericTracking.cpp @@ -214,7 +214,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &co trackerType = atoi(optarg_); break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; @@ -227,7 +227,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &co if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -283,7 +283,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl @@ -656,7 +656,7 @@ int main(int argc, const char **argv) reader.close(); delete tracker; - tracker = NULL; + tracker = nullptr; return EXIT_SUCCESS; } diff --git a/example/tracking/mbtGenericTracking2.cpp b/example/tracking/mbtGenericTracking2.cpp index 050dc6066d..357dc9cf56 100644 --- a/example/tracking/mbtGenericTracking2.cpp +++ b/example/tracking/mbtGenericTracking2.cpp @@ -214,7 +214,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &co trackerType = atoi(optarg_); break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; @@ -227,7 +227,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &co if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -283,7 +283,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl @@ -743,7 +743,7 @@ int main(int argc, const char **argv) reader.close(); delete tracker; - tracker = NULL; + tracker = nullptr; return EXIT_SUCCESS; } diff --git a/example/tracking/mbtGenericTrackingDepth.cpp b/example/tracking/mbtGenericTrackingDepth.cpp index 32dbb3cd4e..bad1cf3f8c 100644 --- a/example/tracking/mbtGenericTrackingDepth.cpp +++ b/example/tracking/mbtGenericTrackingDepth.cpp @@ -214,7 +214,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &co lastFrame = atoi(optarg_); break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; @@ -227,7 +227,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &co if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -483,7 +483,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl @@ -825,7 +825,7 @@ int main(int argc, const char **argv) } delete tracker; - tracker = NULL; + tracker = nullptr; return EXIT_SUCCESS; } diff --git a/example/tracking/mbtGenericTrackingDepthOnly.cpp b/example/tracking/mbtGenericTrackingDepthOnly.cpp index 45d5804804..36228e84ca 100644 --- a/example/tracking/mbtGenericTrackingDepthOnly.cpp +++ b/example/tracking/mbtGenericTrackingDepthOnly.cpp @@ -201,7 +201,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &co break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; default: @@ -213,7 +213,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &co if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -445,7 +445,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl @@ -769,7 +769,7 @@ int main(int argc, const char **argv) } delete tracker; - tracker = NULL; + tracker = nullptr; return EXIT_SUCCESS; } catch (const vpException &e) { diff --git a/example/tracking/mbtKltTracking.cpp b/example/tracking/mbtKltTracking.cpp index cc9cc9e2df..8c6ba3f3d7 100644 --- a/example/tracking/mbtKltTracking.cpp +++ b/example/tracking/mbtKltTracking.cpp @@ -190,7 +190,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &co computeCovariance = true; break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; @@ -203,7 +203,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &co if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -258,7 +258,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/example/tracking/templateTracker.cpp b/example/tracking/templateTracker.cpp index 0579e517e2..2fec809a78 100644 --- a/example/tracking/templateTracker.cpp +++ b/example/tracking/templateTracker.cpp @@ -260,7 +260,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, bool &click_all display = false; break; case 'h': - usage(argv[0], NULL, warp_type, tracker_type, last_frame, threshold_residual); + usage(argv[0], nullptr, warp_type, tracker_type, last_frame, threshold_residual); return false; break; case 'i': @@ -296,13 +296,13 @@ bool getOptions(int argc, const char **argv, std::string &ipath, bool &click_all } if (warp_type >= WARP_LAST) { - usage(argv[0], NULL, warp_type, tracker_type, last_frame, threshold_residual); + usage(argv[0], nullptr, warp_type, tracker_type, last_frame, threshold_residual); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument -w with \"warp type\"=" << (int)warp_type << std::endl << std::endl; return false; } if (tracker_type >= TRACKER_LAST) { - usage(argv[0], NULL, warp_type, tracker_type, last_frame, threshold_residual); + usage(argv[0], nullptr, warp_type, tracker_type, last_frame, threshold_residual); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument -t with \"tracker type\"=" << (int)tracker_type << std::endl << std::endl; @@ -310,7 +310,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, bool &click_all } if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, warp_type, tracker_type, last_frame, threshold_residual); + usage(argv[0], nullptr, warp_type, tracker_type, last_frame, threshold_residual); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -374,7 +374,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, opt_warp_type, opt_tracker_type, opt_last_frame, opt_threshold_residual); + usage(argv[0], nullptr, opt_warp_type, opt_tracker_type, opt_last_frame, opt_threshold_residual); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl @@ -408,7 +408,7 @@ int main(int argc, const char **argv) } reader.acquire(I); - vpDisplay *display = NULL; + vpDisplay *display = nullptr; if (opt_display) { // initialise a display #if defined(VISP_HAVE_X11) @@ -431,7 +431,7 @@ int main(int argc, const char **argv) vpDisplay::flush(I); } - vpTemplateTrackerWarp *warp = NULL; + vpTemplateTrackerWarp *warp = nullptr; switch (opt_warp_type) { case WARP_AFFINE: warp = new vpTemplateTrackerWarpAffine; @@ -457,7 +457,7 @@ int main(int argc, const char **argv) return EXIT_FAILURE; } - vpTemplateTracker *tracker = NULL; + vpTemplateTracker *tracker = nullptr; switch (opt_tracker_type) { case TRACKER_SSD_ESM: tracker = new vpTemplateTrackerSSDESM(warp); diff --git a/example/tracking/trackDot.cpp b/example/tracking/trackDot.cpp index 68223d8613..3e5d59141b 100644 --- a/example/tracking/trackDot.cpp +++ b/example/tracking/trackDot.cpp @@ -188,7 +188,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &pp step = (unsigned)atoi(optarg_); break; case 'h': - usage(argv[0], NULL, ipath, ppath, first, last, step); + usage(argv[0], nullptr, ipath, ppath, first, last, step); return false; break; @@ -201,7 +201,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &pp if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, ppath, first, last, step); + usage(argv[0], nullptr, ipath, ppath, first, last, step); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -262,7 +262,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty() && opt_ppath.empty()) { - usage(argv[0], NULL, ipath, opt_ppath, opt_first, opt_last, opt_step); + usage(argv[0], nullptr, ipath, opt_ppath, opt_first, opt_last, opt_step); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/example/tracking/trackDot2.cpp b/example/tracking/trackDot2.cpp index 086f50ec9b..d5172f9b09 100644 --- a/example/tracking/trackDot2.cpp +++ b/example/tracking/trackDot2.cpp @@ -187,7 +187,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &pp step = (unsigned)atoi(optarg_); break; case 'h': - usage(argv[0], NULL, ipath, ppath, first, last, step); + usage(argv[0], nullptr, ipath, ppath, first, last, step); return false; break; @@ -200,7 +200,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &pp if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, ppath, first, last, step); + usage(argv[0], nullptr, ipath, ppath, first, last, step); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -261,7 +261,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty() && opt_ppath.empty()) { - usage(argv[0], NULL, ipath, opt_ppath, opt_first, opt_last, opt_step); + usage(argv[0], nullptr, ipath, opt_ppath, opt_first, opt_last, opt_step); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/example/tracking/trackDot2WithAutoDetection.cpp b/example/tracking/trackDot2WithAutoDetection.cpp index ae32188fe4..a36f81e180 100644 --- a/example/tracking/trackDot2WithAutoDetection.cpp +++ b/example/tracking/trackDot2WithAutoDetection.cpp @@ -229,7 +229,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &pp ellipsoidShapePrecision = atof(optarg_); break; case 'h': - usage(argv[0], NULL, ipath, ppath, first, last, step, sizePrecision, grayLevelPrecision, + usage(argv[0], nullptr, ipath, ppath, first, last, step, sizePrecision, grayLevelPrecision, ellipsoidShapePrecision); return false; break; @@ -244,7 +244,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &pp if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, ppath, first, last, step, sizePrecision, grayLevelPrecision, + usage(argv[0], nullptr, ipath, ppath, first, last, step, sizePrecision, grayLevelPrecision, ellipsoidShapePrecision); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; @@ -309,7 +309,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath, opt_ppath, opt_first, opt_last, opt_step, opt_sizePrecision, + usage(argv[0], nullptr, ipath, opt_ppath, opt_first, opt_last, opt_step, opt_sizePrecision, opt_grayLevelPrecision, opt_ellipsoidShapePrecision); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl diff --git a/example/tracking/trackKltOpencv.cpp b/example/tracking/trackKltOpencv.cpp index 763df34d01..1047b4d5bc 100644 --- a/example/tracking/trackKltOpencv.cpp +++ b/example/tracking/trackKltOpencv.cpp @@ -195,7 +195,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &pp step = (unsigned)atoi(optarg_); break; case 'h': - usage(argv[0], NULL, ipath, ppath, first, last, step); + usage(argv[0], nullptr, ipath, ppath, first, last, step); return false; break; @@ -208,7 +208,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &pp if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, ppath, first, last, step); + usage(argv[0], nullptr, ipath, ppath, first, last, step); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -269,7 +269,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty() && opt_ppath.empty()) { - usage(argv[0], NULL, ipath, opt_ppath, opt_first, opt_last, opt_step); + usage(argv[0], nullptr, ipath, opt_ppath, opt_first, opt_last, opt_step); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/example/tracking/trackMeCircle.cpp b/example/tracking/trackMeCircle.cpp index 753623a141..26e787f8ca 100644 --- a/example/tracking/trackMeCircle.cpp +++ b/example/tracking/trackMeCircle.cpp @@ -150,7 +150,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, bool &click_all ipath = optarg_; break; case 'h': - usage(argv[0], NULL, ipath); + usage(argv[0], nullptr, ipath); return false; break; @@ -163,7 +163,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, bool &click_all if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath); + usage(argv[0], nullptr, ipath); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -220,7 +220,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath); + usage(argv[0], nullptr, ipath); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/example/tracking/trackMeEllipse.cpp b/example/tracking/trackMeEllipse.cpp index c3aec3bd83..3df632b198 100644 --- a/example/tracking/trackMeEllipse.cpp +++ b/example/tracking/trackMeEllipse.cpp @@ -263,7 +263,7 @@ bool getOptions(int argc, const char **argv, std::string &video_in_ipath, std::s verbose = true; break; case 'h': - usage(argv[0], NULL, video_in_ipath, video_in_ppath, video_in_first, video_in_last, video_in_step, me_range, me_sample_step, me_threshold); + usage(argv[0], nullptr, video_in_ipath, video_in_ppath, video_in_first, video_in_last, video_in_step, me_range, me_sample_step, me_threshold); return false; break; @@ -276,7 +276,7 @@ bool getOptions(int argc, const char **argv, std::string &video_in_ipath, std::s if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, video_in_ipath, video_in_ppath, video_in_first, video_in_last, video_in_step, me_range, me_sample_step, me_threshold); + usage(argv[0], nullptr, video_in_ipath, video_in_ppath, video_in_first, video_in_last, video_in_step, me_range, me_sample_step, me_threshold); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -313,7 +313,7 @@ int main(int argc, const char **argv) // read on the disk vpImage I; - vpDisplay *display = NULL; + vpDisplay *display = nullptr; try { // Get the visp-images-data package path or VISP_INPUT_IMAGE_PATH @@ -348,7 +348,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty() && opt_ppath.empty()) { - usage(argv[0], NULL, ipath, opt_ppath, opt_first, opt_last, opt_step, opt_me_range, opt_me_sample_step, opt_me_threshold); + usage(argv[0], nullptr, ipath, opt_ppath, opt_first, opt_last, opt_step, opt_me_range, opt_me_sample_step, opt_me_threshold); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl @@ -418,7 +418,7 @@ int main(int argc, const char **argv) vpDisplay::flush(I); } - vpVideoWriter *writer = NULL; + vpVideoWriter *writer = nullptr; vpImage O; if (!opt_save.empty()) { writer = new vpVideoWriter(); diff --git a/example/tracking/trackMeLine.cpp b/example/tracking/trackMeLine.cpp index 18a47623d1..e578375201 100644 --- a/example/tracking/trackMeLine.cpp +++ b/example/tracking/trackMeLine.cpp @@ -193,7 +193,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &pp step = (unsigned)atoi(optarg_); break; case 'h': - usage(argv[0], NULL, ipath, ppath, first, last, step); + usage(argv[0], nullptr, ipath, ppath, first, last, step); return false; break; @@ -206,7 +206,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &pp if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, ppath, first, last, step); + usage(argv[0], nullptr, ipath, ppath, first, last, step); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -268,7 +268,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty() && opt_ppath.empty()) { - usage(argv[0], NULL, ipath, opt_ppath, opt_first, opt_last, opt_step); + usage(argv[0], nullptr, ipath, opt_ppath, opt_first, opt_last, opt_step); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/example/tracking/trackMeNurbs.cpp b/example/tracking/trackMeNurbs.cpp index 3987e64b4d..9b0efefe0b 100644 --- a/example/tracking/trackMeNurbs.cpp +++ b/example/tracking/trackMeNurbs.cpp @@ -153,7 +153,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, bool &click_all ipath = optarg_; break; case 'h': - usage(argv[0], NULL, ipath); + usage(argv[0], nullptr, ipath); return false; break; @@ -166,7 +166,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, bool &click_all if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath); + usage(argv[0], nullptr, ipath); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -221,7 +221,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath); + usage(argv[0], nullptr, ipath); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/example/video/imageSequenceReader.cpp b/example/video/imageSequenceReader.cpp index eb0364a617..c697273777 100644 --- a/example/video/imageSequenceReader.cpp +++ b/example/video/imageSequenceReader.cpp @@ -162,7 +162,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &pp first = atoi(optarg_); break; case 'h': - usage(argv[0], NULL, ipath, ppath); + usage(argv[0], nullptr, ipath, ppath); return false; break; @@ -175,7 +175,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &pp if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, ppath); + usage(argv[0], nullptr, ipath, ppath); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -239,7 +239,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty() && opt_ppath.empty()) { - usage(argv[0], NULL, ipath, opt_ppath); + usage(argv[0], nullptr, ipath, opt_ppath); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/example/video/videoReader.cpp b/example/video/videoReader.cpp index 9b64d22d65..9b156b9983 100644 --- a/example/video/videoReader.cpp +++ b/example/video/videoReader.cpp @@ -149,7 +149,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &pp ppath = optarg_; break; case 'h': - usage(argv[0], NULL, ipath, ppath); + usage(argv[0], nullptr, ipath, ppath); return false; break; @@ -162,7 +162,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &pp if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, ppath); + usage(argv[0], nullptr, ipath, ppath); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -219,7 +219,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty() && opt_ppath.empty()) { - usage(argv[0], NULL, ipath, opt_ppath); + usage(argv[0], nullptr, ipath, opt_ppath); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/example/wireframe-simulator/wireframeSimulator.cpp b/example/wireframe-simulator/wireframeSimulator.cpp index cdaacf1a80..8638b2061c 100644 --- a/example/wireframe-simulator/wireframeSimulator.cpp +++ b/example/wireframe-simulator/wireframeSimulator.cpp @@ -122,7 +122,7 @@ bool getOptions(int argc, const char **argv, bool &display, bool &click) display = false; break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; @@ -135,7 +135,7 @@ bool getOptions(int argc, const char **argv, bool &display, bool &click) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/modules/ar/include/visp3/ar/vpAROgre.h b/modules/ar/include/visp3/ar/vpAROgre.h index 8745cfc556..038db61e0d 100644 --- a/modules/ar/include/visp3/ar/vpAROgre.h +++ b/modules/ar/include/visp3/ar/vpAROgre.h @@ -280,7 +280,7 @@ class VISP_EXPORT vpAROgre : public Ogre::FrameListener, */ inline void setWindowPosition(unsigned int win_x, unsigned int win_y) { - if (mWindow == NULL) { + if (mWindow == nullptr) { throw vpException(vpException::notInitialized, "Window not initialised, cannot set its position"); } mWindow->reposition(static_cast(win_x), static_cast(win_y)); diff --git a/modules/ar/include/visp3/ar/vpSimulator.h b/modules/ar/include/visp3/ar/vpSimulator.h index a81d12ca8b..512ad4e019 100644 --- a/modules/ar/include/visp3/ar/vpSimulator.h +++ b/modules/ar/include/visp3/ar/vpSimulator.h @@ -271,7 +271,7 @@ class VISP_EXPORT vpSimulator protected: SbTime *realtime; SoOffscreenRenderer *offScreenRenderer; - void offScreenRendering(vpSimulatorViewType view = vpSimulator::EXTERNAL, int *width = NULL, int *height = NULL); + void offScreenRendering(vpSimulatorViewType view = vpSimulator::EXTERNAL, int *width = nullptr, int *height = nullptr); public: //! image of the internal view diff --git a/modules/ar/src/coin-simulator/vpAR.cpp b/modules/ar/src/coin-simulator/vpAR.cpp index 450a564666..42c8dc35fc 100644 --- a/modules/ar/src/coin-simulator/vpAR.cpp +++ b/modules/ar/src/coin-simulator/vpAR.cpp @@ -85,9 +85,9 @@ void vpAR::initInternalViewer(unsigned int width, unsigned int height, vpImageTy // no image is loaded background = false; - if (image_background != NULL) { + if (image_background != nullptr) { free(image_background); - image_background = NULL; + image_background = nullptr; } typeImage = type; diff --git a/modules/ar/src/coin-simulator/vpSimulator.cpp b/modules/ar/src/coin-simulator/vpSimulator.cpp index 13d6b9b3cd..16598e938f 100644 --- a/modules/ar/src/coin-simulator/vpSimulator.cpp +++ b/modules/ar/src/coin-simulator/vpSimulator.cpp @@ -272,53 +272,53 @@ void vpSimulator::init() external_height = 200; mainWindowInitialized = false; - internalView = NULL; - externalView = NULL; - image_background = NULL; + internalView = nullptr; + externalView = nullptr; + image_background = nullptr; zoomFactor = 1; cameraPositionInitialized = false; // write image process - realtime = NULL; - offScreenRenderer = NULL; - bufferView = NULL; + realtime = nullptr; + offScreenRenderer = nullptr; + bufferView = nullptr; get = 1; typeImage = grayImage; - mainThread = NULL; - scene = NULL; - internalRoot = NULL; - externalRoot = NULL; - internalCamera = NULL; - externalCamera = NULL; - internalCameraPosition = NULL; - extrenalCameraPosition = NULL; - internalCameraObject = NULL; + mainThread = nullptr; + scene = nullptr; + internalRoot = nullptr; + externalRoot = nullptr; + internalCamera = nullptr; + externalCamera = nullptr; + internalCameraPosition = nullptr; + extrenalCameraPosition = nullptr; + internalCameraObject = nullptr; #if defined(VISP_HAVE_SOWIN) // mainWindow = ?; #elif defined(VISP_HAVE_SOQT) - mainWindow = NULL; + mainWindow = nullptr; #elif defined(VISP_HAVE_SOXT) // mainWindow = ?; #endif } void vpSimulator::kill() { - if (internalView != NULL) { + if (internalView != nullptr) { delete internalView; - internalView = NULL; + internalView = nullptr; } - if (externalView != NULL) { + if (externalView != nullptr) { delete externalView; - externalView = NULL; + externalView = nullptr; } - if (bufferView != NULL) { + if (bufferView != nullptr) { delete[] bufferView; - bufferView = NULL; + bufferView = nullptr; } - if (image_background != NULL) { + if (image_background != nullptr) { free(image_background); - image_background = NULL; + image_background = nullptr; } } @@ -327,16 +327,16 @@ vpSimulator::vpSimulator() #if defined(VISP_HAVE_SOWIN) mainWindow(), #elif defined(VISP_HAVE_SOQT) - mainWindow(NULL), + mainWindow(nullptr), #elif defined(VISP_HAVE_SOXT) mainWindow(), #endif - mainWindowInitialized(false), typeImage(vpSimulator::grayImage), image_background(NULL), internalView(NULL), - externalView(NULL), mainThread(NULL), internal_width(0), internal_height(0), external_width(0), external_height(0), - scene(NULL), internalRoot(NULL), externalRoot(NULL), internalCamera(NULL), externalCamera(NULL), - internalCameraPosition(NULL), extrenalCameraPosition(NULL), internalCameraObject(NULL), zoomFactor(0.), - cameraPositionInitialized(false), cMf(), internalCameraParameters(), externalCameraParameters(), realtime(NULL), - offScreenRenderer(NULL), bufferView(NULL), get(0) + mainWindowInitialized(false), typeImage(vpSimulator::grayImage), image_background(nullptr), internalView(nullptr), + externalView(nullptr), mainThread(nullptr), internal_width(0), internal_height(0), external_width(0), external_height(0), + scene(nullptr), internalRoot(nullptr), externalRoot(nullptr), internalCamera(nullptr), externalCamera(nullptr), + internalCameraPosition(nullptr), extrenalCameraPosition(nullptr), internalCameraObject(nullptr), zoomFactor(0.), + cameraPositionInitialized(false), cMf(), internalCameraParameters(), externalCameraParameters(), realtime(nullptr), + offScreenRenderer(nullptr), bufferView(nullptr), get(0) { vpSimulator::init(); } @@ -391,7 +391,7 @@ void vpSimulator::initSceneGraph() this->externalRoot->addChild(cube); - if (realtime == NULL) { + if (realtime == nullptr) { SoDB::enableRealTimeSensor(FALSE); SoSceneManager::enableRealTimeUpdate(FALSE); @@ -618,12 +618,12 @@ void vpSimulator::redraw() // if (this->cameraPositionInitialized==true) { - if (this->externalView != NULL) { + if (this->externalView != nullptr) { this->externalView->render(); // call actualRedraw() // vpHomogeneousMatrix c ; // getExternalCameraPosition(c) ; } - if (this->internalView != NULL) { + if (this->internalView != nullptr) { this->moveInternalCamera(this->cMf); this->internalView->render(); // call actualRedraw() } @@ -668,7 +668,7 @@ void vpSimulator::load(const char *file_name) SoSeparator *newscene = SoDB::readAll(&input); newscene->ref(); - if (newscene == NULL) { + if (newscene == nullptr) { vpERROR_TRACE("Error while reading %s", file_name); } @@ -744,7 +744,7 @@ void vpSimulator::load(const char *iv_filename, const vpHomogeneousMatrix &fMo) } newObject = SoDB::readAll(&in); - if (NULL == newObject) { + if (nullptr == newObject) { vpERROR_TRACE("Problem reading data for file <%s>.", iv_filename); } @@ -821,7 +821,7 @@ void vpSimulator::addObject(SoSeparator *object, const vpHomogeneousMatrix &fMo, //! init the main program thread void vpSimulator::initApplication(void *(*start_routine)(void *)) { - // pthread_create (&mainThread, NULL, start_routine, (void *)this); + // pthread_create (&mainThread, nullptr, start_routine, (void *)this); mainThread = SbThread::create(start_routine, (void *)this); } @@ -843,8 +843,8 @@ void vpSimulator::initApplication(void *(*start_routine)(void *), void *data) //! should be locate at the beginning of the main program void vpSimulator::initMainApplication() { - // pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL ); - // pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); + // pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, nullptr ); + // pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, nullptr); vpTime::wait(1000); } //! performed some thread destruction in the main program thread @@ -852,7 +852,7 @@ void vpSimulator::initMainApplication() void vpSimulator::closeMainApplication() { vpViewer::exitMainLoop(); - // pthread_exit (NULL); + // pthread_exit (nullptr); } /* Initialise le SoOffScreenRenderer si necessaire, puis realise le rendu. @@ -883,7 +883,7 @@ void vpSimulator::offScreenRendering(vpSimulatorViewType view, int *width, int * SbViewportRegion myViewPort(size); // Creation du rendu si necessaire. - if (NULL == this->offScreenRenderer) { + if (nullptr == this->offScreenRenderer) { // Init du SoOffscreenRenderer this->offScreenRenderer = new SoOffscreenRenderer(myViewPort); } else { @@ -895,7 +895,7 @@ void vpSimulator::offScreenRendering(vpSimulatorViewType view, int *width, int * if (!this->offScreenRenderer->render(thisroot)) { vpERROR_TRACE("La scene n'a pas pu etre rendue offscreen."); delete this->offScreenRenderer; - this->offScreenRenderer = NULL; + this->offScreenRenderer = nullptr; } else { /* @@ -913,10 +913,10 @@ void vpSimulator::offScreenRendering(vpSimulatorViewType view, int *width, int * } // exit(1) ; - if (NULL != width) { + if (nullptr != width) { *width = size[0]; } - if (NULL != height) { + if (nullptr != height) { *height = size[1]; } } diff --git a/modules/ar/src/coin-simulator/vpViewer.cpp b/modules/ar/src/coin-simulator/vpViewer.cpp index d6f5dfe6cf..084f4836bd 100644 --- a/modules/ar/src/coin-simulator/vpViewer.cpp +++ b/modules/ar/src/coin-simulator/vpViewer.cpp @@ -50,13 +50,13 @@ #if defined(VISP_HAVE_SOWIN) vpViewer::vpViewer(HWND parent, vpSimulator *_simu, vpViewerType type) - : SoWinExaminerViewer(parent, (char *)NULL, false), viewerType(type), simu(_simu) + : SoWinExaminerViewer(parent, (char *)nullptr, false), viewerType(type), simu(_simu) #elif defined(VISP_HAVE_SOQT) vpViewer::vpViewer(QWidget *parent, vpSimulator *_simu, vpViewerType type) - : SoQtExaminerViewer(parent, (char *)NULL, false), viewerType(type), simu(_simu) + : SoQtExaminerViewer(parent, (char *)nullptr, false), viewerType(type), simu(_simu) #elif defined(VISP_HAVE_SOXT) vpViewer::vpViewer(Widget parent, vpSimulator *_simu, vpViewerType type) - : SoXtExaminerViewer(parent, (char *)NULL, false), viewerType(type), simu(_simu) + : SoXtExaminerViewer(parent, (char *)nullptr, false), viewerType(type), simu(_simu) #endif { // Coin should not clear the pixel-buffer, so the background image @@ -84,7 +84,7 @@ void vpViewer::actualRedraw(void) // this should be used only with the vpAR:vpSimulator // to diplay an image background - if (simu->image_background != NULL) { + if (simu->image_background != nullptr) { glPixelStorei(GL_UNPACK_ALIGNMENT, 1); if (simu->typeImage == vpSimulator::grayImage) glDrawPixels((GLsizei)simu->getInternalWidth(), (GLsizei)simu->getInternalHeight(), (GLenum)GL_LUMINANCE, diff --git a/modules/ar/src/ogre-simulator/vpAROgre.cpp b/modules/ar/src/ogre-simulator/vpAROgre.cpp index bbd4cf952c..aa4dcf39b5 100644 --- a/modules/ar/src/ogre-simulator/vpAROgre.cpp +++ b/modules/ar/src/ogre-simulator/vpAROgre.cpp @@ -78,7 +78,7 @@ vpAROgre::vpAROgre(const vpCameraParameters &cam, unsigned int width, unsigned i mInputManager(0), mKeyboard(0), #endif keepOn(true), // When created no reason to stop displaying - mImageRGBA(), mImage(), mPixelBuffer(), mBackground(NULL), mBackgroundHeight(0), mBackgroundWidth(0), + mImageRGBA(), mImage(), mPixelBuffer(), mBackground(nullptr), mBackgroundHeight(0), mBackgroundWidth(0), mWindowHeight(height), mWindowWidth(width), windowHidden(false), mNearClipping(0.001), mFarClipping(200), mcam(cam), mshowConfigDialog(true), mOptionalResourceLocation() { @@ -246,7 +246,7 @@ void vpAROgre::init(bool } std::cout << "######################### Load plugin file: " << pluginFile << std::endl; - if (Ogre::Root::getSingletonPtr() == NULL) { + if (Ogre::Root::getSingletonPtr() == nullptr) { mRoot = new Ogre::Root(pluginFile, "ogre.cfg", "Ogre.log"); } else { mRoot = Ogre::Root::getSingletonPtr(); diff --git a/modules/core/include/visp3/core/vpArray2D.h b/modules/core/include/visp3/core/vpArray2D.h index 595ade35ca..98192e58d3 100644 --- a/modules/core/include/visp3/core/vpArray2D.h +++ b/modules/core/include/visp3/core/vpArray2D.h @@ -142,7 +142,7 @@ template class vpArray2D Basic constructor of a 2D array. Number of columns and rows are set to zero. */ - vpArray2D() : rowNum(0), colNum(0), rowPtrs(NULL), dsize(0), data(NULL) { } + vpArray2D() : rowNum(0), colNum(0), rowPtrs(nullptr), dsize(0), data(nullptr) { } /*! Copy constructor of a 2D array. @@ -190,9 +190,9 @@ template class vpArray2D A.rowNum = 0; A.colNum = 0; - A.rowPtrs = NULL; + A.rowPtrs = nullptr; A.dsize = 0; - A.data = NULL; + A.data = nullptr; } explicit vpArray2D(const std::initializer_list &list) : vpArray2D() @@ -202,7 +202,7 @@ template class vpArray2D } explicit vpArray2D(unsigned int nrows, unsigned int ncols, const std::initializer_list &list) - : rowNum(0), colNum(0), rowPtrs(NULL), dsize(0), data(NULL) + : rowNum(0), colNum(0), rowPtrs(nullptr), dsize(0), data(nullptr) { if (nrows * ncols != static_cast(list.size())) { std::ostringstream oss; @@ -235,14 +235,14 @@ template class vpArray2D */ virtual ~vpArray2D() { - if (data != NULL) { + if (data != nullptr) { free(data); - data = NULL; + data = nullptr; } - if (rowPtrs != NULL) { + if (rowPtrs != nullptr) { free(rowPtrs); - rowPtrs = NULL; + rowPtrs = nullptr; } rowNum = colNum = dsize = 0; } @@ -282,19 +282,19 @@ template class vpArray2D void resize(unsigned int nrows, unsigned int ncols, bool flagNullify = true, bool recopy_ = true) { if ((nrows == rowNum) && (ncols == colNum)) { - if (flagNullify && this->data != NULL) { + if (flagNullify && this->data != nullptr) { memset(this->data, 0, this->dsize * sizeof(Type)); } } else { bool recopy = !flagNullify && recopy_; // priority to flagNullify const bool recopyNeeded = (ncols != this->colNum && this->colNum > 0 && ncols > 0 && (!flagNullify || recopy)); - Type *copyTmp = NULL; + Type *copyTmp = nullptr; unsigned int rowTmp = 0, colTmp = 0; // Recopy case per case is required if number of cols has changed; // structure of Type array is not the same in this case. - if (recopyNeeded && this->data != NULL) { + if (recopyNeeded && this->data != nullptr) { copyTmp = new Type[this->dsize]; memcpy(copyTmp, this->data, sizeof(Type) * this->dsize); rowTmp = this->rowNum; @@ -304,16 +304,16 @@ template class vpArray2D // Reallocation of this->data array this->dsize = nrows * ncols; this->data = (Type *)realloc(this->data, this->dsize * sizeof(Type)); - if ((NULL == this->data) && (0 != this->dsize)) { - if (copyTmp != NULL) { + if ((nullptr == this->data) && (0 != this->dsize)) { + if (copyTmp != nullptr) { delete[] copyTmp; } throw(vpException(vpException::memoryAllocationError, "Memory allocation error when allocating 2D array data")); } this->rowPtrs = (Type **)realloc(this->rowPtrs, nrows * sizeof(Type *)); - if ((NULL == this->rowPtrs) && (0 != this->dsize)) { - if (copyTmp != NULL) { + if ((nullptr == this->rowPtrs) && (0 != this->dsize)) { + if (copyTmp != nullptr) { delete[] copyTmp; } throw(vpException(vpException::memoryAllocationError, @@ -335,7 +335,7 @@ template class vpArray2D if (flagNullify) { memset(this->data, 0, (size_t)(this->dsize) * sizeof(Type)); } - else if (recopyNeeded && this->rowPtrs != NULL) { + else if (recopyNeeded && this->rowPtrs != nullptr) { // Recopy... unsigned int minRow = (this->rowNum < rowTmp) ? this->rowNum : rowTmp; unsigned int minCol = (this->colNum < colTmp) ? this->colNum : colTmp; @@ -351,7 +351,7 @@ template class vpArray2D } } - if (copyTmp != NULL) { + if (copyTmp != nullptr) { delete[] copyTmp; } } @@ -394,10 +394,10 @@ template class vpArray2D void insert(const vpArray2D &A, unsigned int r, unsigned int c) { if ((r + A.getRows()) <= rowNum && (c + A.getCols()) <= colNum) { - if (A.colNum == colNum && data != NULL && A.data != NULL && A.data != data) { + if (A.colNum == colNum && data != nullptr && A.data != nullptr && A.data != data) { memcpy(data + r * colNum, A.data, sizeof(Type) * A.size()); } - else if (data != NULL && A.data != NULL && A.data != data) { + else if (data != nullptr && A.data != nullptr && A.data != data) { for (unsigned int i = r; i < (r + A.getRows()); i++) { memcpy(data + i * colNum + c, A.data + (i - r) * A.colNum, sizeof(Type) * A.colNum); } @@ -431,7 +431,7 @@ template class vpArray2D vpArray2D &operator=(const vpArray2D &A) { resize(A.rowNum, A.colNum, false, false); - if (data != NULL && A.data != NULL && data != A.data) { + if (data != nullptr && A.data != nullptr && data != A.data) { memcpy(data, A.data, (size_t)rowNum * (size_t)colNum * sizeof(Type)); } return *this; @@ -451,9 +451,9 @@ template class vpArray2D other.rowNum = 0; other.colNum = 0; - other.rowPtrs = NULL; + other.rowPtrs = nullptr; other.dsize = 0; - other.data = NULL; + other.data = nullptr; } return *this; @@ -503,7 +503,7 @@ template class vpArray2D */ friend std::ostream &operator<<(std::ostream &s, const vpArray2D &A) { - if (A.data == NULL || A.size() == 0) { + if (A.data == nullptr || A.size() == 0) { return s; } std::ios_base::fmtflags original_flags = s.flags(); @@ -554,7 +554,7 @@ template class vpArray2D \sa save() */ - static bool load(const std::string &filename, vpArray2D &A, bool binary = false, char *header = NULL) + static bool load(const std::string &filename, vpArray2D &A, bool binary = false, char *header = nullptr) { std::fstream file; @@ -595,7 +595,7 @@ template class vpArray2D } } while (!headerIsDecoded); - if (header != NULL) { + if (header != nullptr) { #if defined(__MINGW32__) || \ !defined(_WIN32) && (defined(__unix__) || defined(__unix) || (defined(__APPLE__) && defined(__MACH__))) // UNIX snprintf(header, h.size() + 1, "%s", h.c_str()); @@ -630,7 +630,7 @@ template class vpArray2D file.read(&c, 1); h += c; } - if (header != NULL) { + if (header != nullptr) { #if defined(__MINGW32__) || \ !defined(_WIN32) && (defined(__unix__) || defined(__unix) || (defined(__APPLE__) && defined(__MACH__))) // UNIX snprintf(header, h.size() + 1, "%s", h.c_str()); @@ -668,7 +668,7 @@ template class vpArray2D \sa saveYAML() */ - static bool loadYAML(const std::string &filename, vpArray2D &A, char *header = NULL) + static bool loadYAML(const std::string &filename, vpArray2D &A, char *header = nullptr) { std::fstream file; @@ -726,7 +726,7 @@ template class vpArray2D } } - if (header != NULL) { + if (header != nullptr) { std::string h_ = h.substr(0, h.size() - 1); // Remove last '\n' char #if defined(__MINGW32__) || \ !defined(_WIN32) && (defined(__unix__) || defined(__unix) || (defined(__APPLE__) && defined(__MACH__))) // UNIX diff --git a/modules/core/include/visp3/core/vpColVector.h b/modules/core/include/visp3/core/vpColVector.h index 6f0bc5ef96..56111c54f3 100644 --- a/modules/core/include/visp3/core/vpColVector.h +++ b/modules/core/include/visp3/core/vpColVector.h @@ -256,14 +256,14 @@ class VISP_EXPORT vpColVector : public vpArray2D */ void clear() { - if (data != NULL) { + if (data != nullptr) { free(data); - data = NULL; + data = nullptr; } - if (rowPtrs != NULL) { + if (rowPtrs != nullptr) { free(rowPtrs); - rowPtrs = NULL; + rowPtrs = nullptr; } rowNum = colNum = dsize = 0; } diff --git a/modules/core/include/visp3/core/vpDebug.h b/modules/core/include/visp3/core/vpDebug.h index abce2f9162..04c99805a7 100644 --- a/modules/core/include/visp3/core/vpDebug.h +++ b/modules/core/include/visp3/core/vpDebug.h @@ -152,7 +152,7 @@ class vpTraceOutput \note Call the constructor with something like vpTraceOutput(__FILE__,__LINE__, __FUNCTION__). */ - vpTraceOutput(const char *file, int line, const char *func, bool error = false, const char *s = NULL) + vpTraceOutput(const char *file, int line, const char *func, bool error = false, const char *s = nullptr) : currentFile(file), currentFunc(func), currentLine(line), err(error), header(s) { } @@ -216,7 +216,7 @@ class vpTraceOutput // if we want to write to std::cerr/stderr if (err) { // first writes the header if there is one - if (header != NULL) + if (header != nullptr) std::cerr << header; // then writes the recorded namefile, function and line std::cerr << "!!\t" << currentFile << ": " << currentFunc << "(#" << currentLine << ") : "; @@ -227,7 +227,7 @@ class vpTraceOutput fflush(stderr); } else { // first writes the header if there is one - if (header != NULL) + if (header != nullptr) std::cout << header; // then writes the recorded namefile, function and line std::cout << currentFile << ": " << currentFunc << "(#" << currentLine << ") : "; diff --git a/modules/core/include/visp3/core/vpException.h b/modules/core/include/visp3/core/vpException.h index 6dffe6105e..917471c571 100644 --- a/modules/core/include/visp3/core/vpException.h +++ b/modules/core/include/visp3/core/vpException.h @@ -119,7 +119,7 @@ class VISP_EXPORT vpException : public std::exception /*! * Send a pointer on the array of \e char related to the error string. - * Cannot be \e NULL. + * Cannot be \e nullptr. */ const char *getMessage() const; diff --git a/modules/core/include/visp3/core/vpImage.h b/modules/core/include/visp3/core/vpImage.h index 76dc22b44c..ff73fe7eca 100644 --- a/modules/core/include/visp3/core/vpImage.h +++ b/modules/core/include/visp3/core/vpImage.h @@ -190,7 +190,7 @@ template class vpImage // Look for the minumum and the maximum value within the bitmap void getMinMaxValue(Type &min, Type &max, bool onlyFiniteVal = true) const; // Look for the minumum and the maximum value within the bitmap and get their location - void getMinMaxLoc(vpImagePoint *minLoc, vpImagePoint *maxLoc, Type *minVal = NULL, Type *maxVal = NULL) const; + void getMinMaxLoc(vpImagePoint *minLoc, vpImagePoint *maxLoc, Type *minVal = nullptr, Type *maxVal = nullptr) const; /*! Get the image number of pixels which corresponds to the image @@ -352,7 +352,7 @@ template class vpImage template std::ostream &operator<<(std::ostream &s, const vpImage &I) { - if (I.bitmap == NULL) { + if (I.bitmap == nullptr) { return s; } @@ -375,7 +375,7 @@ template std::ostream &operator<<(std::ostream &s, const vpImage &I) { - if (I.bitmap == NULL) { + if (I.bitmap == nullptr) { return s; } @@ -401,7 +401,7 @@ inline std::ostream &operator<<(std::ostream &s, const vpImage &I inline std::ostream &operator<<(std::ostream &s, const vpImage &I) { - if (I.bitmap == NULL) { + if (I.bitmap == nullptr) { return s; } @@ -427,7 +427,7 @@ inline std::ostream &operator<<(std::ostream &s, const vpImage &I) inline std::ostream &operator<<(std::ostream &s, const vpImage &I) { - if (I.bitmap == NULL) { + if (I.bitmap == nullptr) { return s; } @@ -454,7 +454,7 @@ inline std::ostream &operator<<(std::ostream &s, const vpImage &I) inline std::ostream &operator<<(std::ostream &s, const vpImage &I) { - if (I.bitmap == NULL) { + if (I.bitmap == nullptr) { return s; } @@ -490,7 +490,7 @@ struct vpImageLut_Param_t unsigned char m_lut[256]; unsigned char *m_bitmap; - vpImageLut_Param_t() : m_start_index(0), m_end_index(0), m_lut(), m_bitmap(NULL) { } + vpImageLut_Param_t() : m_start_index(0), m_end_index(0), m_lut(), m_bitmap(nullptr) { } vpImageLut_Param_t(unsigned int start_index, unsigned int end_index, unsigned char *bitmap) : m_start_index(start_index), m_end_index(end_index), m_lut(), m_bitmap(bitmap) @@ -558,7 +558,7 @@ struct vpImageLutRGBa_Param_t vpRGBa m_lut[256]; unsigned char *m_bitmap; - vpImageLutRGBa_Param_t() : m_start_index(0), m_end_index(0), m_lut(), m_bitmap(NULL) { } + vpImageLutRGBa_Param_t() : m_start_index(0), m_end_index(0), m_lut(), m_bitmap(nullptr) { } vpImageLutRGBa_Param_t(unsigned int start_index, unsigned int end_index, unsigned char *bitmap) : m_start_index(start_index), m_end_index(end_index), m_lut(), m_bitmap(bitmap) @@ -637,20 +637,20 @@ template void vpImage::init(unsigned int h, unsigned int w, T template void vpImage::init(unsigned int h, unsigned int w) { if (h != this->height) { - if (row != NULL) { + if (row != nullptr) { vpDEBUG_TRACE(10, "Destruction row[]"); delete[] row; - row = NULL; + row = nullptr; } } if ((h != this->height) || (w != this->width)) { - if (bitmap != NULL) { + if (bitmap != nullptr) { vpDEBUG_TRACE(10, "Destruction bitmap[]"); if (hasOwnership) { delete[] bitmap; } - bitmap = NULL; + bitmap = nullptr; } } @@ -659,18 +659,18 @@ template void vpImage::init(unsigned int h, unsigned int w) npixels = width * height; - if (bitmap == NULL) { + if (bitmap == nullptr) { bitmap = new Type[npixels]; hasOwnership = true; } - if (bitmap == NULL) { + if (bitmap == nullptr) { throw(vpException(vpException::memoryAllocationError, "cannot allocate bitmap ")); } - if (row == NULL) + if (row == nullptr) row = new Type *[height]; - if (row == NULL) { + if (row == nullptr) { throw(vpException(vpException::memoryAllocationError, "cannot allocate row ")); } @@ -684,19 +684,19 @@ template void vpImage::init(unsigned int h, unsigned int w) template void vpImage::init(Type *const array, unsigned int h, unsigned int w, bool copyData) { if (h != this->height) { - if (row != NULL) { + if (row != nullptr) { delete[] row; - row = NULL; + row = nullptr; } } // Delete bitmap if copyData==false, otherwise only if the dimension differs if ((copyData && ((h != this->height) || (w != this->width))) || !copyData) { - if (bitmap != NULL) { + if (bitmap != nullptr) { if (hasOwnership) { delete[] bitmap; } - bitmap = NULL; + bitmap = nullptr; } } @@ -707,10 +707,10 @@ template void vpImage::init(Type *const array, unsigned int h npixels = width * height; if (copyData) { - if (bitmap == NULL) + if (bitmap == nullptr) bitmap = new Type[npixels]; - if (bitmap == NULL) { + if (bitmap == nullptr) { throw(vpException(vpException::memoryAllocationError, "cannot allocate bitmap ")); } @@ -722,9 +722,9 @@ template void vpImage::init(Type *const array, unsigned int h bitmap = array; } - if (row == NULL) + if (row == nullptr) row = new Type *[height]; - if (row == NULL) { + if (row == nullptr) { throw(vpException(vpException::memoryAllocationError, "cannot allocate row ")); } @@ -738,7 +738,7 @@ template void vpImage::init(Type *const array, unsigned int h */ template vpImage::vpImage(unsigned int h, unsigned int w) - : bitmap(NULL), display(NULL), npixels(0), width(0), height(0), row(NULL), hasOwnership(true) + : bitmap(nullptr), display(nullptr), npixels(0), width(0), height(0), row(nullptr), hasOwnership(true) { init(h, w, 0); } @@ -748,7 +748,7 @@ vpImage::vpImage(unsigned int h, unsigned int w) */ template vpImage::vpImage(unsigned int h, unsigned int w, Type value) - : bitmap(NULL), display(NULL), npixels(0), width(0), height(0), row(NULL), hasOwnership(true) + : bitmap(nullptr), display(nullptr), npixels(0), width(0), height(0), row(nullptr), hasOwnership(true) { init(h, w, value); } @@ -758,7 +758,7 @@ vpImage::vpImage(unsigned int h, unsigned int w, Type value) */ template vpImage::vpImage(Type *const array, unsigned int h, unsigned int w, bool copyData) - : bitmap(NULL), display(NULL), npixels(0), width(0), height(0), row(NULL), hasOwnership(true) + : bitmap(nullptr), display(nullptr), npixels(0), width(0), height(0), row(nullptr), hasOwnership(true) { init(array, h, w, copyData); } @@ -767,7 +767,7 @@ vpImage::vpImage(Type *const array, unsigned int h, unsigned int w, bool c \relates vpImage */ template -vpImage::vpImage() : bitmap(NULL), display(NULL), npixels(0), width(0), height(0), row(NULL), hasOwnership(true) +vpImage::vpImage() : bitmap(nullptr), display(nullptr), npixels(0), width(0), height(0), row(nullptr), hasOwnership(true) { } /*! @@ -823,20 +823,20 @@ template void vpImage::destroy() { // vpERROR_TRACE("Deallocate "); - if (bitmap != NULL) { + if (bitmap != nullptr) { // vpERROR_TRACE("Deallocate bitmap memory %p",bitmap); // vpDEBUG_TRACE(20,"Deallocate bitmap memory %p",bitmap); if (hasOwnership) { delete[] bitmap; } - bitmap = NULL; + bitmap = nullptr; } - if (row != NULL) { + if (row != nullptr) { // vpERROR_TRACE("Deallocate row memory %p",row); // vpDEBUG_TRACE(20,"Deallocate row memory %p",row); delete[] row; - row = NULL; + row = nullptr; } } @@ -853,7 +853,7 @@ template vpImage::~vpImage() { destroy(); } */ template vpImage::vpImage(const vpImage &I) - : bitmap(NULL), display(NULL), npixels(0), width(0), height(0), row(NULL), hasOwnership(true) + : bitmap(nullptr), display(nullptr), npixels(0), width(0), height(0), row(nullptr), hasOwnership(true) { resize(I.getHeight(), I.getWidth()); memcpy(static_cast(bitmap), static_cast(I.bitmap), I.npixels * sizeof(Type)); @@ -867,12 +867,12 @@ vpImage::vpImage(vpImage &&I) : bitmap(I.bitmap), display(I.display), npixels(I.npixels), width(I.width), height(I.height), row(I.row), hasOwnership(I.hasOwnership) { - I.bitmap = NULL; - I.display = NULL; + I.bitmap = nullptr; + I.display = nullptr; I.npixels = 0; I.width = 0; I.height = 0; - I.row = NULL; + I.row = nullptr; I.hasOwnership = false; } @@ -1203,13 +1203,13 @@ template <> inline void vpImage::getMinMaxValue(vpRGBf &min, vpRGBf &max //[...] Fill I vpImagePoint min_loc; double min_val = 0.0; - I.getMinMaxLoc(&min_loc, NULL, &min_val, NULL); + I.getMinMaxLoc(&min_loc, nullptr, &min_val, nullptr); \endcode - \param minLoc : Position of the pixel with minimum value if not NULL. - \param maxLoc : Position of the pixel with maximum value if not NULL. - \param minVal : Minimum pixel value if not NULL. - \param maxVal : Maximum pixel value if not NULL. + \param minLoc : Position of the pixel with minimum value if not nullptr. + \param maxLoc : Position of the pixel with maximum value if not nullptr. + \param minVal : Minimum pixel value if not nullptr. + \param maxVal : Maximum pixel value if not nullptr. \sa getMaxValue() \sa getMinValue() @@ -1238,16 +1238,16 @@ void vpImage::getMinMaxLoc(vpImagePoint *minLoc, vpImagePoint *maxLoc, Typ } } - if (minLoc != NULL) + if (minLoc != nullptr) *minLoc = minLoc_; - if (maxLoc != NULL) + if (maxLoc != nullptr) *maxLoc = maxLoc_; - if (minVal != NULL) + if (minVal != nullptr) *minVal = min; - if (maxVal != NULL) + if (maxVal != nullptr) *maxVal = max; } @@ -1261,7 +1261,7 @@ template vpImage &vpImage::operator=(vpImage othe // vpImage I2(480, 640); // vpDisplayX d(I2); // I2 = I1; //copy only the data - if (other.display != NULL) + if (other.display != nullptr) display = other.display; return *this; diff --git a/modules/core/include/visp3/core/vpImageConvert.h b/modules/core/include/visp3/core/vpImageConvert.h index cb045254b3..1e566d531a 100644 --- a/modules/core/include/visp3/core/vpImageConvert.h +++ b/modules/core/include/visp3/core/vpImageConvert.h @@ -135,7 +135,7 @@ class VISP_EXPORT vpImageConvert #endif static void split(const vpImage &src, vpImage *pR, vpImage *pG, - vpImage *pB, vpImage *pa = NULL); + vpImage *pB, vpImage *pa = nullptr); static void merge(const vpImage *R, const vpImage *G, const vpImage *B, const vpImage *a, vpImage &RGBa); diff --git a/modules/core/include/visp3/core/vpImageTools.h b/modules/core/include/visp3/core/vpImageTools.h index a9a12d081a..068795c12b 100644 --- a/modules/core/include/visp3/core/vpImageTools.h +++ b/modules/core/include/visp3/core/vpImageTools.h @@ -525,7 +525,7 @@ template class vpUndistortInternalType unsigned int threadid; public: - vpUndistortInternalType() : src(NULL), dst(NULL), width(0), height(0), cam(), nthreads(0), threadid(0) {} + vpUndistortInternalType() : src(nullptr), dst(nullptr), width(0), height(0), cam(), nthreads(0), threadid(0) {} vpUndistortInternalType(const vpUndistortInternalType &u) { *this = u; } vpUndistortInternalType &operator=(const vpUndistortInternalType &u) @@ -609,7 +609,7 @@ template void *vpUndistortInternalType::vpUndistort_threaded( } pthread_exit((void *)0); - return NULL; + return nullptr; } #endif // DOXYGEN_SHOULD_SKIP_THIS #endif // VISP_HAVE_PTHREAD @@ -685,7 +685,7 @@ void vpImageTools::undistort(const vpImage &I, const vpCameraParameters &c for (unsigned int i = 0; i < nthreads; i++) { // vpTRACE("join thread %d", i); - pthread_join(callThd[i], NULL); + pthread_join(callThd[i], nullptr); } delete[] callThd; diff --git a/modules/core/include/visp3/core/vpList.h b/modules/core/include/visp3/core/vpList.h index f1cd034b63..15edf87bd0 100644 --- a/modules/core/include/visp3/core/vpList.h +++ b/modules/core/include/visp3/core/vpList.h @@ -55,7 +55,7 @@ template class vpListElement { // private: // vpListElement(const vpListElement &) - // : prev(NULL), next(NULL), val() + // : prev(nullptr), next(nullptr), val() // { // throw vpException(vpException::functionNotImplementedError,"Not // implemented!"); @@ -66,7 +66,7 @@ template class vpListElement // } public: - vpListElement() : prev(NULL), next(NULL), val(){}; + vpListElement() : prev(nullptr), next(nullptr), val(){}; vpListElement *prev; ///! pointer to the previous element in the list vpListElement *next; ///! pointer to the next element in the list type val; ///! value of the element @@ -194,10 +194,10 @@ template void vpList::init() first = x; last = y; - x->prev = NULL; + x->prev = nullptr; x->next = y; y->prev = x; - y->next = NULL; + y->next = nullptr; cur = x; nb = 0; @@ -210,7 +210,7 @@ template void vpList::init() \endverbatim \sa init() */ -template vpList::vpList() : nb(0), first(NULL), last(NULL), cur(NULL) { init(); } +template vpList::vpList() : nb(0), first(nullptr), last(nullptr), cur(nullptr) { init(); } /*! \brief vpList destructor @@ -220,8 +220,8 @@ template vpList::~vpList() { kill(); - /*if (first != NULL) */ delete first; - /*if (last != NULL) */ delete last; + /*if (first != nullptr) */ delete first; + /*if (last != nullptr) */ delete last; } /*! @@ -601,7 +601,7 @@ template void vpList::suppress(void) x = cur; cur = cur->next; - if (x != NULL) + if (x != nullptr) delete x; nb--; @@ -673,7 +673,7 @@ template void vpList::operator+=(const type &l) \param l : the list to copy */ -template vpList::vpList(const vpList &l) : nb(0), first(NULL), last(NULL), cur(NULL) +template vpList::vpList(const vpList &l) : nb(0), first(nullptr), last(nullptr), cur(nullptr) { init(); *this = l; diff --git a/modules/core/include/visp3/core/vpMatrix.h b/modules/core/include/visp3/core/vpMatrix.h index 4048374153..3fac0d971c 100644 --- a/modules/core/include/visp3/core/vpMatrix.h +++ b/modules/core/include/visp3/core/vpMatrix.h @@ -206,14 +206,14 @@ vpMatrix M(R); */ void clear() { - if (data != NULL) { + if (data != nullptr) { free(data); - data = NULL; + data = nullptr; } - if (rowPtrs != NULL) { + if (rowPtrs != nullptr) { free(rowPtrs); - rowPtrs = NULL; + rowPtrs = nullptr; } rowNum = colNum = dsize = 0; } @@ -748,7 +748,7 @@ vpMatrix M(R); \sa saveMatrix(), saveMatrixYAML(), loadMatrixYAML() */ static inline bool loadMatrix(const std::string &filename, vpArray2D &M, bool binary = false, - char *header = NULL) + char *header = nullptr) { return vpArray2D::load(filename, M, binary, header); } @@ -822,7 +822,7 @@ vpMatrix M(R); \sa saveMatrixYAML(), saveMatrix(), loadMatrix() */ - static inline bool loadMatrixYAML(const std::string &filename, vpArray2D &M, char *header = NULL) + static inline bool loadMatrixYAML(const std::string &filename, vpArray2D &M, char *header = nullptr) { return vpArray2D::loadYAML(filename, M, header); } diff --git a/modules/core/include/visp3/core/vpMoment.h b/modules/core/include/visp3/core/vpMoment.h index 1074561c3b..bfdb378b35 100644 --- a/modules/core/include/visp3/core/vpMoment.h +++ b/modules/core/include/visp3/core/vpMoment.h @@ -120,7 +120,7 @@ class VISP_EXPORT vpMoment // private: //#ifndef DOXYGEN_SHOULD_SKIP_THIS // vpMoment(const vpMoment &) - // : object(NULL), moments(NULL), values() + // : object(nullptr), moments(nullptr), values() // { // throw vpException(vpException::functionNotImplementedError,"Not // implemented!"); diff --git a/modules/core/include/visp3/core/vpMutex.h b/modules/core/include/visp3/core/vpMutex.h index 96a8fc0501..a9aaf12e1e 100644 --- a/modules/core/include/visp3/core/vpMutex.h +++ b/modules/core/include/visp3/core/vpMutex.h @@ -72,16 +72,16 @@ class vpMutex vpMutex() : m_mutex() { #if defined(VISP_HAVE_PTHREAD) - pthread_mutex_init(&m_mutex, NULL); + pthread_mutex_init(&m_mutex, nullptr); #elif defined(_WIN32) #ifdef WINRT_8_1 - m_mutex = CreateMutexEx(NULL, NULL, 0, NULL); + m_mutex = CreateMutexEx(nullptr, nullptr, 0, nullptr); #else - m_mutex = CreateMutex(NULL, // default security attributes + m_mutex = CreateMutex(nullptr, // default security attributes FALSE, // initially not owned - NULL); // unnamed mutex + nullptr); // unnamed mutex #endif - if (m_mutex == NULL) { + if (m_mutex == nullptr) { std::cout << "CreateMutex error: " << GetLastError() << std::endl; return; } diff --git a/modules/core/include/visp3/core/vpNetwork.h b/modules/core/include/visp3/core/vpNetwork.h index 0daa9a5173..45da39ad9f 100644 --- a/modules/core/include/visp3/core/vpNetwork.h +++ b/modules/core/include/visp3/core/vpNetwork.h @@ -304,7 +304,7 @@ template int vpNetwork::receive(T *object, const unsigned int &size socketMax = receptor_list[i].socketFileDescriptorReceptor; } - int value = select((int)socketMax + 1, &readFileDescriptor, NULL, NULL, &tv); + int value = select((int)socketMax + 1, &readFileDescriptor, nullptr, nullptr, &tv); int numbytes = 0; if (value == -1) { @@ -381,7 +381,7 @@ int vpNetwork::receiveFrom(T *object, const unsigned int &receptorEmitting, cons socketMax = receptor_list[receptorEmitting].socketFileDescriptorReceptor; FD_SET((unsigned int)receptor_list[receptorEmitting].socketFileDescriptorReceptor, &readFileDescriptor); - int value = select((int)socketMax + 1, &readFileDescriptor, NULL, NULL, &tv); + int value = select((int)socketMax + 1, &readFileDescriptor, nullptr, nullptr, &tv); int numbytes = 0; if (value == -1) { diff --git a/modules/core/include/visp3/core/vpRansac.h b/modules/core/include/visp3/core/vpRansac.h index fedd345f98..d1b0f83530 100644 --- a/modules/core/include/visp3/core/vpRansac.h +++ b/modules/core/include/visp3/core/vpRansac.h @@ -69,7 +69,7 @@ template class vpRansac public: static bool ransac(unsigned int npts, const vpColVector &x, unsigned int s, double t, vpColVector &model, vpColVector &inliers, int consensus = 1000, double not_used = 0.0, int maxNbumbersOfTrials = 10000, - double *residual = NULL); + double *residual = nullptr); }; /*! @@ -131,7 +131,7 @@ bool vpRansac::ransac(unsigned int npts, const vpColVector &x, int bestscore = -1; double N = 1; // Dummy initialisation for number of trials. - vpUniRand random((const long)time(NULL)); + vpUniRand random((const long)time(nullptr)); vpColVector bestinliers; unsigned int *ind = new unsigned int[s]; int ninliers = 0; @@ -171,7 +171,7 @@ bool vpRansac::ransac(unsigned int npts, const vpColVector &x, vpTransformation::computeResidual(x, M, d); // Find the indices of points that are inliers to this model. - if (residual != NULL) + if (residual != nullptr) *residual = 0.0; ninliers = 0; for (unsigned int i = 0; i < npts; i++) { @@ -179,7 +179,7 @@ bool vpRansac::ransac(unsigned int npts, const vpColVector &x, if (resid < t) { inliers[i] = 1; ninliers++; - if (residual != NULL) { + if (residual != nullptr) { *residual += fabs(d[i]); } } else @@ -222,7 +222,7 @@ bool vpRansac::ransac(unsigned int npts, const vpColVector &x, M = 0; } - if (residual != NULL) { + if (residual != nullptr) { if (ninliers > 0) { *residual /= ninliers; } diff --git a/modules/core/include/visp3/core/vpRowVector.h b/modules/core/include/visp3/core/vpRowVector.h index aeffe809be..830dcb5c51 100644 --- a/modules/core/include/visp3/core/vpRowVector.h +++ b/modules/core/include/visp3/core/vpRowVector.h @@ -130,14 +130,14 @@ class VISP_EXPORT vpRowVector : public vpArray2D */ void clear() { - if (data != NULL) { + if (data != nullptr) { free(data); - data = NULL; + data = nullptr; } - if (rowPtrs != NULL) { + if (rowPtrs != nullptr) { free(rowPtrs); - rowPtrs = NULL; + rowPtrs = nullptr; } rowNum = colNum = dsize = 0; } diff --git a/modules/core/include/visp3/core/vpThread.h b/modules/core/include/visp3/core/vpThread.h index f795aca60f..8420656b82 100644 --- a/modules/core/include/visp3/core/vpThread.h +++ b/modules/core/include/visp3/core/vpThread.h @@ -92,7 +92,7 @@ class vpThread arguments. \param fn : A pointer to a function. \param args : Arguments passed to the call to \e fn (if any). */ - vpThread(vpThread::Fn fn, vpThread::Args args = NULL) : m_handle(), m_isCreated(false), m_isJoinable(false) + vpThread(vpThread::Fn fn, vpThread::Args args = nullptr) : m_handle(), m_isCreated(false), m_isJoinable(false) { create(fn, args); } @@ -102,18 +102,18 @@ class vpThread execution. \param fn : A pointer to a function. \param args : Arguments passed to the call to \e fn (if any). */ - void create(vpThread::Fn fn, vpThread::Args args = NULL) + void create(vpThread::Fn fn, vpThread::Args args = nullptr) { if (m_isCreated) throw vpException(vpException::fatalError, "The thread is already created"); #if defined(VISP_HAVE_PTHREAD) - int err = pthread_create(&m_handle, NULL, fn, args); + int err = pthread_create(&m_handle, nullptr, fn, args); if (err != 0) { throw vpException(vpException::cannotUseConstructorError, "Can't create thread : %s", strerror(err)); } #elif defined(_WIN32) DWORD dwThreadIdArray; - m_handle = CreateThread(NULL, // default security attributes + m_handle = CreateThread(nullptr, // default security attributes 0, // use default stack size fn, // thread function name args, // argument to thread function @@ -150,7 +150,7 @@ class vpThread { if (m_isJoinable) { #if defined(VISP_HAVE_PTHREAD) - pthread_join(m_handle, NULL); + pthread_join(m_handle, nullptr); #elif defined(_WIN32) #if defined(WINRT_8_1) WaitForSingleObjectEx(m_handle, INFINITE, FALSE); diff --git a/modules/core/include/visp3/core/vpXmlParser.h b/modules/core/include/visp3/core/vpXmlParser.h index e5970f1177..e069a59c9e 100644 --- a/modules/core/include/visp3/core/vpXmlParser.h +++ b/modules/core/include/visp3/core/vpXmlParser.h @@ -127,7 +127,7 @@ void vpDataParser::readMainClass(xmlDocPtr doc, xmlNodePtr node) { - for (xmlNodePtr tmpNode = node->xmlChildrenNode; tmpNode != NULL; tmpNode = tmpNode->next) { + for (xmlNodePtr tmpNode = node->xmlChildrenNode; tmpNode != nullptr; tmpNode = tmpNode->next) { if(tmpNode->type == XML_ELEMENT_NODE) { std::map::iterator iter = this->nodeMap.find((char*)tmpNode->name); diff --git a/modules/core/src/camera/vpMeterPixelConversion.cpp b/modules/core/src/camera/vpMeterPixelConversion.cpp index 5c0f835b42..62282e380a 100644 --- a/modules/core/src/camera/vpMeterPixelConversion.cpp +++ b/modules/core/src/camera/vpMeterPixelConversion.cpp @@ -357,7 +357,7 @@ void vpMeterPixelConversion::convertEllipse(const cv::Mat &cameraMatrix, double \param[in] cameraMatrix : Camera Matrix \f$\begin{bmatrix} f_x & 0 & c_x \\ 0 & f_y & c_y \\ 0 & 0 & 1\end{bmatrix}\f$ \param[in] distCoeffs : Input vector of distortion coefficients \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of - 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. + 4, 5, 8, 12 or 14 elements. If the vector is nullptr/empty, the zero distortion coefficients are assumed. \param[in] x : input coordinate in meter along image plane x-axis. \param[in] y : input coordinate in meter along image plane y-axis. \param[out] u : output coordinate in pixels along image horizontal axis. @@ -384,7 +384,7 @@ void vpMeterPixelConversion::convertPoint(const cv::Mat &cameraMatrix, const cv: \param[in] cameraMatrix : Camera Matrix \f$\begin{bmatrix} f_x & 0 & c_x \\ 0 & f_y & c_y \\ 0 & 0 & 1\end{bmatrix}\f$ \param[in] distCoeffs : Input vector of distortion coefficients \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of - 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. + 4, 5, 8, 12 or 14 elements. If the vector is nullptr/empty, the zero distortion coefficients are assumed. \param[in] x : input coordinate in meter along image plane x-axis. \param[in] y : input coordinate in meter along image plane y-axis. \param[out] iP : output coordinates in pixels. diff --git a/modules/core/src/camera/vpPixelMeterConversion.cpp b/modules/core/src/camera/vpPixelMeterConversion.cpp index a55ce1c4a0..ba0b8ac767 100644 --- a/modules/core/src/camera/vpPixelMeterConversion.cpp +++ b/modules/core/src/camera/vpPixelMeterConversion.cpp @@ -172,7 +172,7 @@ void vpPixelMeterConversion::convertMoment(const vpCameraParameters &cam, unsign * to meters \f$(x_c, y_c, n_{{20}_m}, n_{{11}_m}, n_{{02}_m})\f$ in the image plane. * \param[in] cameraMatrix : Camera Matrix \f$\begin{bmatrix} f_x & 0 & c_x \\ 0 & f_y & c_y \\ 0 & 0 & 1\end{bmatrix}\f$ * \param[in] distCoeffs : Input vector of distortion coefficients \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, - * k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, + * k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of 4, 5, 8, 12 or 14 elements. If the vector is nullptr/empty, * the zero distortion coefficients are assumed. * \param[in] center_p : Center of the ellipse (uc, vc) with pixel coordinates. * \param[in] n20_p, n11_p, n02_p : Normalized second order moments of the ellipse in pixels. @@ -281,7 +281,7 @@ void vpPixelMeterConversion::convertMoment(const cv::Mat &cameraMatrix, unsigned \param[in] cameraMatrix : Camera Matrix \f$\begin{bmatrix} f_x & 0 & c_x \\ 0 & f_y & c_y \\ 0 & 0 & 1\end{bmatrix}\f$ \param[in] distCoeffs : Input vector of distortion coefficients \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of - 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. + 4, 5, 8, 12 or 14 elements. If the vector is nullptr/empty, the zero distortion coefficients are assumed. \param[in] u : input coordinate in pixels along image horizontal axis. \param[in] v : input coordinate in pixels along image vertical axis. \param[out] x : output coordinate in meter along image plane x-axis. @@ -306,7 +306,7 @@ void vpPixelMeterConversion::convertPoint(const cv::Mat &cameraMatrix, const cv: \param[in] cameraMatrix : Camera Matrix \f$\begin{bmatrix} f_x & 0 & c_x \\ 0 & f_y & c_y \\ 0 & 0 & 1\end{bmatrix}\f$ \param[in] distCoeffs : Input vector of distortion coefficients \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of - 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. + 4, 5, 8, 12 or 14 elements. If the vector is nullptr/empty, the zero distortion coefficients are assumed. \param[in] iP : input coordinates in pixels. \param[out] x : output coordinate in meter along image plane x-axis. \param[out] y : output coordinate in meter along image plane y-axis. diff --git a/modules/core/src/display/vpDisplay.cpp b/modules/core/src/display/vpDisplay.cpp index 4e88163789..51b39c381d 100644 --- a/modules/core/src/display/vpDisplay.cpp +++ b/modules/core/src/display/vpDisplay.cpp @@ -137,7 +137,7 @@ int main() */ void vpDisplay::getImage(const vpImage &Isrc, vpImage &Idest) { - if (Isrc.display != NULL) { + if (Isrc.display != nullptr) { (Isrc.display)->getImage(Idest); } else { @@ -211,7 +211,7 @@ int main() */ void vpDisplay::getImage(const vpImage &Isrc, vpImage &Idest) { - if (Isrc.display != NULL) { + if (Isrc.display != nullptr) { (Isrc.display)->getImage(Idest); } else { diff --git a/modules/core/src/display/vpDisplay_impl.h b/modules/core/src/display/vpDisplay_impl.h index 671fbfba32..33ad7819fa 100644 --- a/modules/core/src/display/vpDisplay_impl.h +++ b/modules/core/src/display/vpDisplay_impl.h @@ -39,15 +39,15 @@ template void vp_display_close(vpImage &I) { - if (I.display != NULL) { + if (I.display != nullptr) { (I.display)->closeDisplay(); - I.display = NULL; + I.display = nullptr; } } template void vp_display_display(const vpImage &I) { - if (I.display != NULL) { + if (I.display != nullptr) { (I.display)->displayImage(I); } } @@ -56,7 +56,7 @@ template void vp_display_display_arrow(const vpImage &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int w, unsigned int h, unsigned int thickness) { - if (I.display != NULL) { + if (I.display != nullptr) { (I.display)->displayArrow(ip1, ip2, color, w, h, thickness); } } @@ -65,7 +65,7 @@ template void vp_display_display_arrow(const vpImage &I, int i1, int j1, int i2, int j2, const vpColor &color, unsigned int w, unsigned int h, unsigned int thickness) { - if (I.display != NULL) { + if (I.display != nullptr) { vpImagePoint ip1, ip2; ip1.set_i(i1); ip1.set_j(j1); @@ -106,7 +106,7 @@ template void vp_display_display_char_string(const vpImage &I, const vpImagePoint &ip, const char *string, const vpColor &color) { - if (I.display != NULL) { + if (I.display != nullptr) { (I.display)->displayCharString(ip, string, color); } } @@ -114,7 +114,7 @@ void vp_display_display_char_string(const vpImage &I, const vpImagePoint & template void vp_display_display_char_string(const vpImage &I, int i, int j, const char *string, const vpColor &color) { - if (I.display != NULL) { + if (I.display != nullptr) { vpImagePoint ip; ip.set_i(i); ip.set_j(j); @@ -127,7 +127,7 @@ template void vp_display_display_circle(const vpImage &I, const vpImagePoint ¢er, unsigned int radius, const vpColor &color, bool fill, unsigned int thickness) { - if (I.display != NULL) { + if (I.display != nullptr) { (I.display)->displayCircle(center, radius, color, fill, thickness); } } @@ -136,7 +136,7 @@ template void vp_display_display_circle(const vpImage &I, int i, int j, unsigned int radius, const vpColor &color, bool fill, unsigned int thickness) { - if (I.display != NULL) { + if (I.display != nullptr) { vpImagePoint ip; ip.set_i(i); ip.set_j(j); @@ -149,7 +149,7 @@ template void vp_display_display_cross(const vpImage &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness) { - if (I.display != NULL) { + if (I.display != nullptr) { (I.display)->displayCross(ip, size, color, thickness); } } @@ -158,7 +158,7 @@ template void vp_display_display_cross(const vpImage &I, int i, int j, unsigned int size, const vpColor &color, unsigned int thickness) { - if (I.display != NULL) { + if (I.display != nullptr) { vpImagePoint ip; ip.set_i(i); ip.set_j(j); @@ -171,7 +171,7 @@ template void vp_display_display_dot_line(const vpImage &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness) { - if (I.display != NULL) { + if (I.display != nullptr) { (I.display)->displayDotLine(ip1, ip2, color, thickness); } } @@ -180,7 +180,7 @@ template void vp_display_display_dot_line(const vpImage &I, int i1, int j1, int i2, int j2, const vpColor &color, unsigned int thickness) { - if (I.display != NULL) { + if (I.display != nullptr) { vpImagePoint ip1, ip2; ip1.set_i(i1); ip1.set_j(j1); @@ -196,7 +196,7 @@ void vp_display_display_ellipse(const vpImage &I, const vpImagePoint ¢ const double &highalpha, bool use_normalized_centered_moments, const vpColor &color, unsigned int thickness, bool display_center, bool display_arc) { - if (I.display != NULL) { + if (I.display != nullptr) { double a = 0., b = 0., e = 0.; if (use_normalized_centered_moments) { @@ -399,7 +399,7 @@ template void vp_display_display_line(const vpImage &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness) { - if (I.display != NULL) { + if (I.display != nullptr) { (I.display)->displayLine(ip1, ip2, color, thickness); } } @@ -408,7 +408,7 @@ template void vp_display_display_line(const vpImage &I, int i1, int j1, int i2, int j2, const vpColor &color, unsigned int thickness) { - if (I.display != NULL) { + if (I.display != nullptr) { vpImagePoint ip1, ip2; ip1.set_i(i1); ip1.set_j(j1); @@ -422,7 +422,7 @@ template void vp_display_display_point(const vpImage &I, const vpImagePoint &ip, const vpColor &color, unsigned int thickness) { - if (I.display != NULL) { + if (I.display != nullptr) { (I.display)->displayPoint(ip, color, thickness); } } @@ -430,7 +430,7 @@ void vp_display_display_point(const vpImage &I, const vpImagePoint &ip, co template void vp_display_display_point(const vpImage &I, int i, int j, const vpColor &color, unsigned int thickness) { - if (I.display != NULL) { + if (I.display != nullptr) { vpImagePoint ip; ip.set_i(i); ip.set_j(j); @@ -442,7 +442,7 @@ template void vp_display_display_polygon(const vpImage &I, const std::vector &vip, const vpColor &color, unsigned int thickness, bool closed = true) { - if (I.display != NULL) { + if (I.display != nullptr) { if (closed) { for (unsigned int i = 0; i < vip.size(); i++) { (I.display)->displayLine(vip[i], vip[(i + 1) % vip.size()], color, thickness); @@ -460,7 +460,7 @@ template void vp_display_display_rectangle(const vpImage &I, const vpImagePoint &topLeft, unsigned int width, unsigned int height, const vpColor &color, bool fill, unsigned int thickness) { - if (I.display != NULL) { + if (I.display != nullptr) { (I.display)->displayRectangle(topLeft, width, height, color, fill, thickness); } } @@ -469,7 +469,7 @@ template void vp_display_display_rectangle(const vpImage &I, const vpRect &rectangle, const vpColor &color, bool fill, unsigned int thickness) { - if (I.display != NULL) { + if (I.display != nullptr) { (I.display)->displayRectangle(rectangle, color, fill, thickness); } } @@ -478,7 +478,7 @@ template void vp_display_display_rectangle(const vpImage &I, const vpImagePoint ¢er, float angle, unsigned int width, unsigned int height, const vpColor &color, unsigned int thickness) { - if (I.display != NULL) { + if (I.display != nullptr) { double i = center.get_i(); double j = center.get_j(); @@ -506,7 +506,7 @@ template void vp_display_display_rectangle(const vpImage &I, const vpImagePoint &topLeft, const vpImagePoint &bottomRight, const vpColor &color, bool fill, unsigned int thickness) { - if (I.display != NULL) { + if (I.display != nullptr) { (I.display)->displayRectangle(topLeft, bottomRight, color, fill, thickness); } } @@ -515,7 +515,7 @@ template void vp_display_display_rectangle(const vpImage &I, int i, int j, unsigned int width, unsigned int height, const vpColor &color, bool fill, unsigned int thickness) { - if (I.display != NULL) { + if (I.display != nullptr) { vpImagePoint topLeft; topLeft.set_i(i); topLeft.set_j(j); @@ -528,7 +528,7 @@ template void vp_display_display_rectangle(const vpImage &I, unsigned int i, unsigned int j, float angle, unsigned int width, unsigned int height, const vpColor &color, unsigned int thickness) { - if (I.display != NULL) { + if (I.display != nullptr) { // A, B, C, D, corners of the rectangle clockwise vpImagePoint ipa, ipb, ipc, ipd; float cosinus = cos(angle); @@ -562,28 +562,28 @@ template void vp_display_display_roi(const vpImage &I, const throw(vpException(vpException::dimensionError, "Region of interest outside of the image")); } - if (I.display != NULL) { + if (I.display != nullptr) { (I.display)->displayImageROI(I, vpImagePoint(top, left), (unsigned int)roiwidth, (unsigned int)roiheight); } } template void vp_display_flush(const vpImage &I) { - if (I.display != NULL) { + if (I.display != nullptr) { (I.display)->flushDisplay(); } } template void vp_display_flush_roi(const vpImage &I, const vpRect &roi) { - if (I.display != NULL) { + if (I.display != nullptr) { (I.display)->flushDisplayROI(roi.getTopLeft(), (unsigned int)roi.getWidth(), (unsigned int)roi.getHeight()); } } template bool vp_display_get_click(const vpImage &I, bool blocking) { - if (I.display != NULL) { + if (I.display != nullptr) { return (I.display)->getClick(blocking); } return false; @@ -591,7 +591,7 @@ template bool vp_display_get_click(const vpImage &I, bool blo template bool vp_display_get_click(const vpImage &I, vpImagePoint &ip, bool blocking) { - if (I.display != NULL) { + if (I.display != nullptr) { return (I.display)->getClick(ip, blocking); } return false; @@ -601,7 +601,7 @@ template bool vp_display_get_click(const vpImage &I, vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking) { - if (I.display != NULL) { + if (I.display != nullptr) { return (I.display)->getClick(ip, button, blocking); } return false; @@ -611,7 +611,7 @@ template bool vp_display_get_click_up(const vpImage &I, vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking) { - if (I.display != NULL) { + if (I.display != nullptr) { return (I.display)->getClickUp(ip, button, blocking); } return false; @@ -619,7 +619,7 @@ bool vp_display_get_click_up(const vpImage &I, vpImagePoint &ip, vpMouseBu template bool vp_display_get_keyboard_event(const vpImage &I, bool blocking) { - if (I.display != NULL) { + if (I.display != nullptr) { return (I.display)->getKeyboardEvent(blocking); } return false; @@ -627,7 +627,7 @@ template bool vp_display_get_keyboard_event(const vpImage &I, template bool vp_display_get_keyboard_event(const vpImage &I, std::string &key, bool blocking) { - if (I.display != NULL) { + if (I.display != nullptr) { return (I.display)->getKeyboardEvent(key, blocking); } return false; @@ -635,7 +635,7 @@ template bool vp_display_get_keyboard_event(const vpImage &I, template bool vp_display_get_keyboard_event(const vpImage &I, char *key, bool blocking) { - if (I.display != NULL) { + if (I.display != nullptr) { std::string str; bool ret = (I.display)->getKeyboardEvent(str, blocking); snprintf(key, str.size(), "%s", str.c_str()); @@ -646,7 +646,7 @@ template bool vp_display_get_keyboard_event(const vpImage &I, template bool vp_display_get_pointer_motion_event(const vpImage &I, vpImagePoint &ip) { - if (I.display != NULL) { + if (I.display != nullptr) { return (I.display)->getPointerMotionEvent(ip); } return false; @@ -654,7 +654,7 @@ template bool vp_display_get_pointer_motion_event(const vpImage bool vp_display_get_pointer_position(const vpImage &I, vpImagePoint &ip) { - if (I.display != NULL) { + if (I.display != nullptr) { return (I.display)->getPointerPosition(ip); } return false; @@ -662,7 +662,7 @@ template bool vp_display_get_pointer_position(const vpImage & template void vp_display_set_background(const vpImage &I, const vpColor &color) { - if (I.display != NULL) { + if (I.display != nullptr) { (I.display)->clearDisplay(color); } } @@ -670,7 +670,7 @@ template void vp_display_set_background(const vpImage &I, con template void vp_display_display_text(const vpImage &I, const vpImagePoint &ip, const std::string &s, const vpColor &color) { - if (I.display != NULL) { + if (I.display != nullptr) { (I.display)->displayCharString(ip, s.c_str(), color); } } @@ -678,7 +678,7 @@ void vp_display_display_text(const vpImage &I, const vpImagePoint &ip, con template void vp_display_display_text(const vpImage &I, int i, int j, const std::string &s, const vpColor &color) { - if (I.display != NULL) { + if (I.display != nullptr) { vpImagePoint ip; ip.set_i(i); ip.set_j(j); @@ -689,28 +689,28 @@ void vp_display_display_text(const vpImage &I, int i, int j, const std::st template void vp_display_set_font(const vpImage &I, const std::string &fontname) { - if (I.display != NULL) { + if (I.display != nullptr) { (I.display)->setFont(fontname); } } template void vp_display_set_title(const vpImage &I, const std::string &windowtitle) { - if (I.display != NULL) { + if (I.display != nullptr) { (I.display)->setTitle(windowtitle); } } template void vp_display_set_window_position(const vpImage &I, int winx, int winy) { - if (I.display != NULL) { + if (I.display != nullptr) { (I.display)->setWindowPosition(winx, winy); } } template unsigned int vp_display_get_down_scaling_factor(const vpImage &I) { - if (I.display != NULL) { + if (I.display != nullptr) { return (I.display)->getDownScalingFactor(); } else { diff --git a/modules/core/src/image/private/stb_truetype.h b/modules/core/src/image/private/stb_truetype.h index 947d323c71..7d9332ddb1 100644 --- a/modules/core/src/image/private/stb_truetype.h +++ b/modules/core/src/image/private/stb_truetype.h @@ -1210,7 +1210,7 @@ static stbtt__buf stbtt__new_buf(const void *p, size_t size) static stbtt__buf stbtt__buf_range(const stbtt__buf *b, int o, int s) { - stbtt__buf r = stbtt__new_buf(NULL, 0); + stbtt__buf r = stbtt__new_buf(nullptr, 0); if (o < 0 || s < 0 || o > b->size || s > b->size - o) return r; r.data = b->data + o; @@ -1401,11 +1401,11 @@ static stbtt__buf stbtt__get_subrs(stbtt__buf cff, stbtt__buf fontdict) stbtt__buf pdict; stbtt__dict_get_ints(&fontdict, 18, 2, private_loc); if (!private_loc[1] || !private_loc[0]) - return stbtt__new_buf(NULL, 0); + return stbtt__new_buf(nullptr, 0); pdict = stbtt__buf_range(&cff, private_loc[1], private_loc[0]); stbtt__dict_get_ints(&pdict, 19, 1, &subrsoff); if (!subrsoff) - return stbtt__new_buf(NULL, 0); + return stbtt__new_buf(nullptr, 0); stbtt__buf_seek(&cff, private_loc[1] + subrsoff); return stbtt__cff_get_index(&cff); } @@ -1433,7 +1433,7 @@ static int stbtt_InitFont_internal(stbtt_fontinfo *info, unsigned char *data, in info->data = data; info->fontstart = fontstart; - info->cff = stbtt__new_buf(NULL, 0); + info->cff = stbtt__new_buf(nullptr, 0); cmap = stbtt__find_table(data, fontstart, "cmap"); // required info->loca = stbtt__find_table(data, fontstart, "loca"); // required @@ -1460,8 +1460,8 @@ static int stbtt_InitFont_internal(stbtt_fontinfo *info, unsigned char *data, in if (!cff) return 0; - info->fontdicts = stbtt__new_buf(NULL, 0); - info->fdselect = stbtt__new_buf(NULL, 0); + info->fontdicts = stbtt__new_buf(nullptr, 0); + info->fdselect = stbtt__new_buf(nullptr, 0); // @TODO this should use size from table (not 512MB) info->cff = stbtt__new_buf(data + cff, 512 * 1024 * 1024); @@ -1708,7 +1708,7 @@ STBTT_DEF int stbtt_IsGlyphEmpty(const stbtt_fontinfo *info, int glyph_index) stbtt_int16 numberOfContours; int g; if (info->cff.size) - return stbtt__GetGlyphInfoT2(info, glyph_index, NULL, NULL, NULL, NULL) == 0; + return stbtt__GetGlyphInfoT2(info, glyph_index, nullptr, nullptr, nullptr, nullptr) == 0; g = stbtt__GetGlyfOffset(info, glyph_index); if (g < 0) return 1; @@ -1741,7 +1741,7 @@ static int stbtt__GetGlyphShapeTT(const stbtt_fontinfo *info, int glyph_index, s int num_vertices = 0; int g = stbtt__GetGlyfOffset(info, glyph_index); - *pvertices = NULL; + *pvertices = nullptr; if (g < 0) return 0; @@ -1987,7 +1987,7 @@ typedef struct { #define STBTT__CSCTX_INIT(bounds) \ { \ - bounds, 0, 0, 0, 0, 0, 0, 0, 0, 0, NULL, 0 \ + bounds, 0, 0, 0, 0, 0, 0, 0, 0, 0, nullptr, 0 \ } static void stbtt__track_vertex(stbtt__csctx *c, stbtt_int32 x, stbtt_int32 y) @@ -2062,7 +2062,7 @@ static stbtt__buf stbtt__get_subr(stbtt__buf idx, int n) bias = 1131; n += bias; if (n < 0 || n >= count) - return stbtt__new_buf(NULL, 0); + return stbtt__new_buf(nullptr, 0); return stbtt__cff_index_get(idx, n); } @@ -2091,7 +2091,7 @@ static stbtt__buf stbtt__cid_get_glyph_subrs(const stbtt_fontinfo *info, int gly } } if (fdselector == -1) - stbtt__new_buf(NULL, 0); + stbtt__new_buf(nullptr, 0); return stbtt__get_subrs(info->cff, stbtt__cff_index_get(info->fontdicts, fdselector)); } @@ -2398,7 +2398,7 @@ static int stbtt__GetGlyphShapeT2(const stbtt_fontinfo *info, int glyph_index, s return output_ctx.num_vertices; } } - *pvertices = NULL; + *pvertices = nullptr; return 0; } @@ -2839,7 +2839,7 @@ STBTT_DEF int stbtt_GetGlyphSVG(const stbtt_fontinfo *info, int gl, const char * return 0; svg_doc = stbtt_FindSVGDoc(info, gl); - if (svg_doc != NULL) { + if (svg_doc != nullptr) { *svg = (char *)data + info->svg + ttULONG(svg_doc + 4); return ttULONG(svg_doc + 8); } else { @@ -2928,8 +2928,8 @@ static void *stbtt__hheap_alloc(stbtt__hheap *hh, size_t size, void *userdata) if (hh->num_remaining_in_head_chunk == 0) { int count = (size < 32 ? 2000 : size < 128 ? 800 : 100); stbtt__hheap_chunk *c = (stbtt__hheap_chunk *)STBTT_malloc(sizeof(stbtt__hheap_chunk) + size * count, userdata); - if (c == NULL) - return NULL; + if (c == nullptr) + return nullptr; c->next = hh->head; hh->head = c; hh->num_remaining_in_head_chunk = count; @@ -2986,7 +2986,7 @@ static stbtt__active_edge *stbtt__new_active(stbtt__hheap *hh, stbtt__edge *e, i { stbtt__active_edge *z = (stbtt__active_edge *)stbtt__hheap_alloc(hh, sizeof(*z), userdata); float dxdy = (e->x1 - e->x0) / (e->y1 - e->y0); - STBTT_assert(z != NULL); + STBTT_assert(z != nullptr); if (!z) return z; @@ -3011,7 +3011,7 @@ static stbtt__active_edge *stbtt__new_active(stbtt__hheap *hh, stbtt__edge *e, i { stbtt__active_edge *z = (stbtt__active_edge *)stbtt__hheap_alloc(hh, sizeof(*z), userdata); float dxdy = (e->x1 - e->x0) / (e->y1 - e->y0); - STBTT_assert(z != NULL); + STBTT_assert(z != nullptr); // STBTT_assert(e->y0 <= start_point); if (!z) return z; @@ -3082,7 +3082,7 @@ static void stbtt__rasterize_sorted_edges(stbtt__bitmap *result, stbtt__edge *e, int off_y, void *userdata) { stbtt__hheap hh = {0, 0, 0}; - stbtt__active_edge *active = NULL; + stbtt__active_edge *active = nullptr; int y, j = 0; int max_weight = (255 / vsubsample); // weight per vertical scanline int s; // vertical subsample index @@ -3142,9 +3142,9 @@ static void stbtt__rasterize_sorted_edges(stbtt__bitmap *result, stbtt__edge *e, while (e->y0 <= scan_y) { if (e->y1 > scan_y) { stbtt__active_edge *z = stbtt__new_active(&hh, e, off_x, scan_y, userdata); - if (z != NULL) { + if (z != nullptr) { // find insertion point - if (active == NULL) + if (active == nullptr) active = z; else if (z->x < active->x) { // insert at front @@ -3460,7 +3460,7 @@ static void stbtt__rasterize_sorted_edges(stbtt__bitmap *result, stbtt__edge *e, int off_y, void *userdata) { stbtt__hheap hh = {0, 0, 0}; - stbtt__active_edge *active = NULL; + stbtt__active_edge *active = nullptr; int y, j = 0, i; float scanline_data[129], *scanline, *scanline2; @@ -3503,7 +3503,7 @@ static void stbtt__rasterize_sorted_edges(stbtt__bitmap *result, stbtt__edge *e, while (e->y0 <= scan_y_bottom) { if (e->y0 != e->y1) { stbtt__active_edge *z = stbtt__new_active(&hh, e, off_x, scan_y_top, userdata); - if (z != NULL) { + if (z != nullptr) { if (j == 0 && off_y != 0) { if (z->ey < scan_y_top) { // this can happen due to subpixel positioning and some kind of fp rounding error i think @@ -3821,7 +3821,7 @@ static stbtt__point *stbtt_FlattenCurves(stbtt_vertex *vertices, int num_verts, float x = 0, y = 0; if (pass == 1) { points = (stbtt__point *)STBTT_malloc(num_points * sizeof(points[0]), userdata); - if (points == NULL) + if (points == nullptr) goto error; } num_points = 0; @@ -3863,7 +3863,7 @@ static stbtt__point *stbtt_FlattenCurves(stbtt_vertex *vertices, int num_verts, STBTT_free(*contour_lengths, userdata); *contour_lengths = 0; *num_contours = 0; - return NULL; + return nullptr; } STBTT_DEF void stbtt_Rasterize(stbtt__bitmap *result, float flatness_in_pixels, stbtt_vertex *vertices, int num_verts, @@ -3872,7 +3872,7 @@ STBTT_DEF void stbtt_Rasterize(stbtt__bitmap *result, float flatness_in_pixels, { float scale = scale_x > scale_y ? scale_y : scale_x; int winding_count = 0; - int *winding_lengths = NULL; + int *winding_lengths = nullptr; stbtt__point *windings = stbtt_FlattenCurves(vertices, num_verts, flatness_in_pixels / scale, &winding_lengths, &winding_count, userdata); if (windings) { @@ -3899,7 +3899,7 @@ STBTT_DEF unsigned char *stbtt_GetGlyphBitmapSubpixel(const stbtt_fontinfo *info if (scale_y == 0) { if (scale_x == 0) { STBTT_free(vertices, info->userdata); - return NULL; + return nullptr; } scale_y = scale_x; } @@ -3909,7 +3909,7 @@ STBTT_DEF unsigned char *stbtt_GetGlyphBitmapSubpixel(const stbtt_fontinfo *info // now we get the size gbm.w = (ix1 - ix0); gbm.h = (iy1 - iy0); - gbm.pixels = NULL; // in case we error + gbm.pixels = nullptr; // in case we error if (width) *width = gbm.w; @@ -4019,7 +4019,7 @@ static int stbtt_BakeFontBitmap_internal(unsigned char *data, int offset, // fon float scale; int x, y, bottom_y, i; stbtt_fontinfo f; - f.userdata = NULL; + f.userdata = nullptr; if (!stbtt_InitFont(&f, data, offset)) return -1; STBTT_memset(pixels, 0, pw * ph); // background of 0 around pixels @@ -4159,10 +4159,10 @@ STBTT_DEF int stbtt_PackBegin(stbtt_pack_context *spc, unsigned char *pixels, in int num_nodes = pw - padding; stbrp_node *nodes = (stbrp_node *)STBTT_malloc(sizeof(*nodes) * num_nodes, alloc_context); - if (context == NULL || nodes == NULL) { - if (context != NULL) + if (context == nullptr || nodes == nullptr) { + if (context != nullptr) STBTT_free(context, alloc_context); - if (nodes != NULL) + if (nodes != nullptr) STBTT_free(nodes, alloc_context); return 0; } @@ -4358,7 +4358,7 @@ STBTT_DEF int stbtt_PackFontRangesGatherRects(stbtt_pack_context *spc, const stb ranges[i].v_oversample = (unsigned char)spc->v_oversample; for (j = 0; j < ranges[i].num_chars; ++j) { int x0, y0, x1, y1; - int codepoint = ranges[i].array_of_unicode_codepoints == NULL ? ranges[i].first_unicode_codepoint_in_range + j + int codepoint = ranges[i].array_of_unicode_codepoints == nullptr ? ranges[i].first_unicode_codepoint_in_range + j : ranges[i].array_of_unicode_codepoints[j]; int glyph = stbtt_FindGlyphIndex(info, codepoint); if (glyph == 0 && (spc->skip_missing || missing_glyph_added)) { @@ -4422,7 +4422,7 @@ STBTT_DEF int stbtt_PackFontRangesRenderIntoRects(stbtt_pack_context *spc, const if (r->was_packed && r->w != 0 && r->h != 0) { stbtt_packedchar *bc = &ranges[i].chardata_for_range[j]; int advance, lsb, x0, y0, x1, y1; - int codepoint = ranges[i].array_of_unicode_codepoints == NULL ? ranges[i].first_unicode_codepoint_in_range + j + int codepoint = ranges[i].array_of_unicode_codepoints == nullptr ? ranges[i].first_unicode_codepoint_in_range + j : ranges[i].array_of_unicode_codepoints[j]; int glyph = stbtt_FindGlyphIndex(info, codepoint); stbrp_coord pad = (stbrp_coord)spc->padding; @@ -4501,7 +4501,7 @@ STBTT_DEF int stbtt_PackFontRanges(stbtt_pack_context *spc, const unsigned char n += ranges[i].num_chars; rects = (stbrp_rect *)STBTT_malloc(sizeof(*rects) * n, spc->user_allocator_context); - if (rects == NULL) + if (rects == nullptr) return 0; info.userdata = spc->user_allocator_context; @@ -4523,7 +4523,7 @@ STBTT_DEF int stbtt_PackFontRange(stbtt_pack_context *spc, const unsigned char * { stbtt_pack_range range; range.first_unicode_codepoint_in_range = first_unicode_codepoint_in_range; - range.array_of_unicode_codepoints = NULL; + range.array_of_unicode_codepoints = nullptr; range.num_chars = num_chars_in_range; range.chardata_for_range = chardata_for_range; range.font_size = font_size; @@ -4766,13 +4766,13 @@ STBTT_DEF unsigned char *stbtt_GetGlyphSDF(const stbtt_fontinfo *info, float sca unsigned char *data; if (scale == 0) - return NULL; + return nullptr; stbtt_GetGlyphBitmapBoxSubpixel(info, glyph, scale, scale, 0.0f, 0.0f, &ix0, &iy0, &ix1, &iy1); - // if empty, return NULL + // if empty, return nullptr if (ix0 == ix1 || iy0 == iy1) - return NULL; + return nullptr; ix0 -= padding; iy0 -= padding; @@ -5035,7 +5035,7 @@ STBTT_DEF const char *stbtt_GetFontNameString(const stbtt_fontinfo *font, int *l stbtt_uint32 offset = font->fontstart; stbtt_uint32 nm = stbtt__find_table(fc, offset, "name"); if (!nm) - return NULL; + return nullptr; count = ttUSHORT(fc + nm + 2); stringOffset = nm + ttUSHORT(fc + nm + 4); @@ -5047,7 +5047,7 @@ STBTT_DEF const char *stbtt_GetFontNameString(const stbtt_fontinfo *font, int *l return (const char *)(fc + stringOffset + ttUSHORT(fc + loc + 10)); } } - return NULL; + return nullptr; } static int stbtt__matchpair(stbtt_uint8 *fc, stbtt_uint32 nm, stbtt_uint8 *name, stbtt_int32 nlen, diff --git a/modules/core/src/image/vpGaussianFilter.cpp b/modules/core/src/image/vpGaussianFilter.cpp index 4e116085ab..4a37ca4183 100644 --- a/modules/core/src/image/vpGaussianFilter.cpp +++ b/modules/core/src/image/vpGaussianFilter.cpp @@ -40,7 +40,7 @@ class vpGaussianFilter::Impl { public: Impl(unsigned int width, unsigned int height, float sigma, bool deinterleave) - : m_funcPtrGray(NULL), m_funcPtrRGBa(NULL), m_deinterleave(deinterleave) + : m_funcPtrGray(nullptr), m_funcPtrRGBa(nullptr), m_deinterleave(deinterleave) { const float epsilon = 0.001f; { @@ -95,7 +95,7 @@ class vpGaussianFilter::Impl SimdGaussianBlurRun(m_funcPtrGray, m_blue.bitmap, m_blue.getWidth(), m_blueBlurred.bitmap, m_blueBlurred.getWidth()); - vpImageConvert::merge(&m_redBlurred, &m_greenBlurred, &m_blueBlurred, NULL, I_blur); + vpImageConvert::merge(&m_redBlurred, &m_greenBlurred, &m_blueBlurred, nullptr, I_blur); } } diff --git a/modules/core/src/image/vpImageConvert.cpp b/modules/core/src/image/vpImageConvert.cpp index 3fe7de60e8..cc4d74ed45 100644 --- a/modules/core/src/image/vpImageConvert.cpp +++ b/modules/core/src/image/vpImageConvert.cpp @@ -3918,10 +3918,10 @@ void vpImageConvert::YCrCbToRGBa(unsigned char *ycrcb, unsigned char *rgba, unsi /*! Split an image from vpRGBa format to monochrome channels. \param[in] src : source image. - \param[out] pR : red channel. Set as NULL if not needed. - \param[out] pG : green channel. Set as NULL if not needed. - \param[out] pB : blue channel. Set as NULL if not needed. - \param[out] pa : alpha channel. Set as NULL if not needed. + \param[out] pR : red channel. Set as nullptr if not needed. + \param[out] pG : green channel. Set as nullptr if not needed. + \param[out] pB : blue channel. Set as nullptr if not needed. + \param[out] pa : alpha channel. Set as nullptr if not needed. Output channels are resized if needed. @@ -3946,7 +3946,7 @@ int main() // Split Ic color image // R and B will be resized in split function if needed - vpImageConvert::split(Ic, &R, NULL, &B, NULL); + vpImageConvert::split(Ic, &R, nullptr, &B, nullptr); // Save the the R Channel. vpImageIo::write(R, "RChannel.pgm"); @@ -4009,22 +4009,22 @@ void vpImageConvert::merge(const vpImage *R, const vpImage mapOfWidths, mapOfHeights; - if (R != NULL) { + if (R != nullptr) { mapOfWidths[R->getWidth()]++; mapOfHeights[R->getHeight()]++; } - if (G != NULL) { + if (G != nullptr) { mapOfWidths[G->getWidth()]++; mapOfHeights[G->getHeight()]++; } - if (B != NULL) { + if (B != nullptr) { mapOfWidths[B->getWidth()]++; mapOfHeights[B->getHeight()]++; } - if (a != NULL) { + if (a != nullptr) { mapOfWidths[a->getWidth()]++; mapOfHeights[a->getHeight()]++; } @@ -4035,26 +4035,26 @@ void vpImageConvert::merge(const vpImage *R, const vpImagebitmap, width, G->bitmap, width, B->bitmap, width, a->bitmap, width, width, height, reinterpret_cast(RGBa.bitmap), width * sizeof(vpRGBa)); } else { unsigned int size = width * height; for (unsigned int i = 0; i < size; i++) { - if (R != NULL) { + if (R != nullptr) { RGBa.bitmap[i].R = R->bitmap[i]; } - if (G != NULL) { + if (G != nullptr) { RGBa.bitmap[i].G = G->bitmap[i]; } - if (B != NULL) { + if (B != nullptr) { RGBa.bitmap[i].B = B->bitmap[i]; } - if (a != NULL) { + if (a != nullptr) { RGBa.bitmap[i].A = a->bitmap[i]; } } diff --git a/modules/core/src/math/matrix/vpColVector.cpp b/modules/core/src/math/matrix/vpColVector.cpp index 7a06e90e30..105f1dc77e 100644 --- a/modules/core/src/math/matrix/vpColVector.cpp +++ b/modules/core/src/math/matrix/vpColVector.cpp @@ -161,7 +161,7 @@ void vpColVector::init(const vpColVector &v, unsigned int r, unsigned int nrows) v.getRows())); resize(nrows, false); - if (this->rowPtrs == NULL) // Fix coverity scan: explicit null dereferenced + if (this->rowPtrs == nullptr) // Fix coverity scan: explicit null dereferenced return; // Nothing to do for (unsigned int i = r; i < rnrows; i++) (*this)[i - r] = v[i]; @@ -224,9 +224,9 @@ vpColVector::vpColVector(vpColVector &&v) : vpArray2D() v.rowNum = 0; v.colNum = 0; - v.rowPtrs = NULL; + v.rowPtrs = nullptr; v.dsize = 0; - v.data = NULL; + v.data = nullptr; } vpColVector vpColVector::operator-() const @@ -416,9 +416,9 @@ vpColVector &vpColVector::operator=(vpColVector &&other) other.rowNum = 0; other.colNum = 0; - other.rowPtrs = NULL; + other.rowPtrs = nullptr; other.dsize = 0; - other.data = NULL; + other.data = nullptr; } return *this; @@ -478,10 +478,10 @@ vpColVector operator*(const double &x, const vpColVector &v) double vpColVector::dotProd(const vpColVector &a, const vpColVector &b) { - if (a.data == NULL) { + if (a.data == nullptr) { throw(vpException(vpException::fatalError, "Cannot compute the dot product: first vector empty")); } - if (b.data == NULL) { + if (b.data == nullptr) { throw(vpException(vpException::fatalError, "Cannot compute the dot product: second vector empty")); } if (a.size() != b.size()) { @@ -524,7 +524,7 @@ vpColVector &vpColVector::normalize() vpColVector vpColVector::invSort(const vpColVector &v) { - if (v.data == NULL) { + if (v.data == nullptr) { throw(vpException(vpException::fatalError, "Cannot sort content of column vector: vector empty")); } vpColVector tab; @@ -549,7 +549,7 @@ vpColVector vpColVector::invSort(const vpColVector &v) vpColVector vpColVector::sort(const vpColVector &v) { - if (v.data == NULL) { + if (v.data == nullptr) { throw(vpException(vpException::fatalError, "Cannot sort content of column vector: vector empty")); } vpColVector tab; @@ -619,7 +619,7 @@ void vpColVector::stack(const vpColVector &A, const vpColVector &B, vpColVector double vpColVector::mean(const vpColVector &v) { - if (v.data == NULL || v.size() == 0) { + if (v.data == nullptr || v.size() == 0) { throw(vpException(vpException::dimensionError, "Cannot compute column vector mean: vector empty")); } @@ -631,7 +631,7 @@ double vpColVector::mean(const vpColVector &v) double vpColVector::median(const vpColVector &v) { - if (v.data == NULL || v.size() == 0) { + if (v.data == nullptr || v.size() == 0) { throw(vpException(vpException::dimensionError, "Cannot compute column vector median: vector empty")); } @@ -642,7 +642,7 @@ double vpColVector::median(const vpColVector &v) double vpColVector::stdev(const vpColVector &v, bool useBesselCorrection) { - if (v.data == NULL || v.size() == 0) { + if (v.data == nullptr || v.size() == 0) { throw(vpException(vpException::dimensionError, "Cannot compute column vector stdev: vector empty")); } @@ -709,7 +709,7 @@ void vpColVector::insert(unsigned int i, const vpColVector &v) if (i + v.size() > this->size()) throw(vpException(vpException::dimensionError, "Unable to insert a column vector")); - if (data != NULL && v.data != NULL && v.rowNum > 0) { + if (data != nullptr && v.data != nullptr && v.rowNum > 0) { memcpy(data + i, v.data, sizeof(double) * v.rowNum); } } diff --git a/modules/core/src/math/matrix/vpMatrix.cpp b/modules/core/src/math/matrix/vpMatrix.cpp index d238d8e998..83a55a3ba9 100644 --- a/modules/core/src/math/matrix/vpMatrix.cpp +++ b/modules/core/src/math/matrix/vpMatrix.cpp @@ -207,9 +207,9 @@ vpMatrix::vpMatrix(vpMatrix &&A) : vpArray2D() A.rowNum = 0; A.colNum = 0; - A.rowPtrs = NULL; + A.rowPtrs = nullptr; A.dsize = 0; - A.data = NULL; + A.data = nullptr; } /*! @@ -344,7 +344,7 @@ void vpMatrix::init(const vpMatrix &M, unsigned int r, unsigned int c, unsigned M.getCols())); resize(nrows, ncols, false, false); - if (this->rowPtrs == NULL) // Fix coverity scan: explicit null dereferenced + if (this->rowPtrs == nullptr) // Fix coverity scan: explicit null dereferenced return; // Noting to do for (unsigned int i = 0; i < nrows; i++) { memcpy((*this)[i], &M[i + r][c], ncols * sizeof(double)); @@ -642,7 +642,7 @@ vpMatrix &vpMatrix::operator=(const vpArray2D &A) { resize(A.getRows(), A.getCols(), false, false); - if (data != NULL && A.data != NULL && data != A.data) { + if (data != nullptr && A.data != nullptr && data != A.data) { memcpy(data, A.data, dsize * sizeof(double)); } @@ -653,7 +653,7 @@ vpMatrix &vpMatrix::operator=(const vpMatrix &A) { resize(A.getRows(), A.getCols(), false, false); - if (data != NULL && A.data != NULL && data != A.data) { + if (data != nullptr && A.data != nullptr && data != A.data) { memcpy(data, A.data, dsize * sizeof(double)); } @@ -674,9 +674,9 @@ vpMatrix &vpMatrix::operator=(vpMatrix &&other) other.rowNum = 0; other.colNum = 0; - other.rowPtrs = NULL; + other.rowPtrs = nullptr; other.dsize = 0; - other.data = NULL; + other.data = nullptr; } return *this; @@ -2365,7 +2365,7 @@ vpMatrix vpMatrix::pseudoInverseLapack(double svThreshold) const U.insert(*this, 0, 0); U.svdLapack(sv, V); - compute_pseudo_inverse(U, sv, V, nrows, ncols, svThreshold, Ap, rank_out, NULL, NULL, NULL, NULL); + compute_pseudo_inverse(U, sv, V, nrows, ncols, svThreshold, Ap, rank_out, nullptr, nullptr, nullptr, nullptr); return Ap; } @@ -2433,7 +2433,7 @@ unsigned int vpMatrix::pseudoInverseLapack(vpMatrix &Ap, double svThreshold) con U.insert(*this, 0, 0); U.svdLapack(sv, V); - compute_pseudo_inverse(U, sv, V, nrows, ncols, svThreshold, Ap, rank_out, NULL, NULL, NULL, NULL); + compute_pseudo_inverse(U, sv, V, nrows, ncols, svThreshold, Ap, rank_out, nullptr, nullptr, nullptr, nullptr); return static_cast(rank_out); } @@ -2507,7 +2507,7 @@ unsigned int vpMatrix::pseudoInverseLapack(vpMatrix &Ap, vpColVector &sv, double U.insert(*this, 0, 0); U.svdLapack(sv_, V); - compute_pseudo_inverse(U, sv_, V, nrows, ncols, svThreshold, Ap, rank_out, NULL, NULL, NULL, NULL); + compute_pseudo_inverse(U, sv_, V, nrows, ncols, svThreshold, Ap, rank_out, nullptr, nullptr, nullptr, nullptr); // Remove singular values equal to the one that corresponds to the lines of 0 // introduced when m < n @@ -2643,7 +2643,7 @@ unsigned int vpMatrix::pseudoInverseLapack(vpMatrix &Ap, vpColVector &sv, double U.insert(*this, 0, 0); U.svdLapack(sv_, V); - compute_pseudo_inverse(U, sv_, V, nrows, ncols, svThreshold, Ap, rank_out, NULL, &imA, &imAt, &kerAt); + compute_pseudo_inverse(U, sv_, V, nrows, ncols, svThreshold, Ap, rank_out, nullptr, &imA, &imAt, &kerAt); // Remove singular values equal to the one that corresponds to the lines of 0 // introduced when m < n @@ -2712,7 +2712,7 @@ vpMatrix vpMatrix::pseudoInverseLapack(int rank_in) const U.insert(*this, 0, 0); U.svdLapack(sv, V); - compute_pseudo_inverse(U, sv, V, nrows, ncols, svThreshold, Ap, rank_out, &rank_in, NULL, NULL, NULL); + compute_pseudo_inverse(U, sv, V, nrows, ncols, svThreshold, Ap, rank_out, &rank_in, nullptr, nullptr, nullptr); return Ap; } @@ -2787,7 +2787,7 @@ int vpMatrix::pseudoInverseLapack(vpMatrix &Ap, int rank_in) const U.insert(*this, 0, 0); U.svdLapack(sv, V); - compute_pseudo_inverse(U, sv, V, nrows, ncols, svThreshold, Ap, rank_out, &rank_in, NULL, NULL, NULL); + compute_pseudo_inverse(U, sv, V, nrows, ncols, svThreshold, Ap, rank_out, &rank_in, nullptr, nullptr, nullptr); return rank_out; } @@ -2869,7 +2869,7 @@ int vpMatrix::pseudoInverseLapack(vpMatrix &Ap, vpColVector &sv, int rank_in) co U.insert(*this, 0, 0); U.svdLapack(sv_, V); - compute_pseudo_inverse(U, sv_, V, nrows, ncols, svThreshold, Ap, rank_out, &rank_in, NULL, NULL, NULL); + compute_pseudo_inverse(U, sv_, V, nrows, ncols, svThreshold, Ap, rank_out, &rank_in, nullptr, nullptr, nullptr); // Remove singular values equal to the one that corresponds to the lines of 0 // introduced when m < n @@ -3081,7 +3081,7 @@ vpMatrix vpMatrix::pseudoInverseEigen3(double svThreshold) const U.insert(*this, 0, 0); U.svdEigen3(sv, V); - compute_pseudo_inverse(U, sv, V, nrows, ncols, svThreshold, Ap, rank_out, NULL, NULL, NULL, NULL); + compute_pseudo_inverse(U, sv, V, nrows, ncols, svThreshold, Ap, rank_out, nullptr, nullptr, nullptr, nullptr); return Ap; } @@ -3149,7 +3149,7 @@ unsigned int vpMatrix::pseudoInverseEigen3(vpMatrix &Ap, double svThreshold) con U.insert(*this, 0, 0); U.svdEigen3(sv, V); - compute_pseudo_inverse(U, sv, V, nrows, ncols, svThreshold, Ap, rank_out, NULL, NULL, NULL, NULL); + compute_pseudo_inverse(U, sv, V, nrows, ncols, svThreshold, Ap, rank_out, nullptr, nullptr, nullptr, nullptr); return static_cast(rank_out); } @@ -3223,7 +3223,7 @@ unsigned int vpMatrix::pseudoInverseEigen3(vpMatrix &Ap, vpColVector &sv, double U.insert(*this, 0, 0); U.svdEigen3(sv_, V); - compute_pseudo_inverse(U, sv_, V, nrows, ncols, svThreshold, Ap, rank_out, NULL, NULL, NULL, NULL); + compute_pseudo_inverse(U, sv_, V, nrows, ncols, svThreshold, Ap, rank_out, nullptr, nullptr, nullptr, nullptr); // Remove singular values equal to the one that corresponds to the lines of 0 // introduced when m < n @@ -3359,7 +3359,7 @@ unsigned int vpMatrix::pseudoInverseEigen3(vpMatrix &Ap, vpColVector &sv, double U.insert(*this, 0, 0); U.svdEigen3(sv_, V); - compute_pseudo_inverse(U, sv_, V, nrows, ncols, svThreshold, Ap, rank_out, NULL, &imA, &imAt, &kerAt); + compute_pseudo_inverse(U, sv_, V, nrows, ncols, svThreshold, Ap, rank_out, nullptr, &imA, &imAt, &kerAt); // Remove singular values equal to the one that corresponds to the lines of 0 // introduced when m < n @@ -3428,7 +3428,7 @@ vpMatrix vpMatrix::pseudoInverseEigen3(int rank_in) const U.insert(*this, 0, 0); U.svdEigen3(sv, V); - compute_pseudo_inverse(U, sv, V, nrows, ncols, svThreshold, Ap, rank_out, &rank_in, NULL, NULL, NULL); + compute_pseudo_inverse(U, sv, V, nrows, ncols, svThreshold, Ap, rank_out, &rank_in, nullptr, nullptr, nullptr); return Ap; } @@ -3503,7 +3503,7 @@ int vpMatrix::pseudoInverseEigen3(vpMatrix &Ap, int rank_in) const U.insert(*this, 0, 0); U.svdEigen3(sv, V); - compute_pseudo_inverse(U, sv, V, nrows, ncols, svThreshold, Ap, rank_out, &rank_in, NULL, NULL, NULL); + compute_pseudo_inverse(U, sv, V, nrows, ncols, svThreshold, Ap, rank_out, &rank_in, nullptr, nullptr, nullptr); return rank_out; } @@ -3585,7 +3585,7 @@ int vpMatrix::pseudoInverseEigen3(vpMatrix &Ap, vpColVector &sv, int rank_in) co U.insert(*this, 0, 0); U.svdEigen3(sv_, V); - compute_pseudo_inverse(U, sv_, V, nrows, ncols, svThreshold, Ap, rank_out, &rank_in, NULL, NULL, NULL); + compute_pseudo_inverse(U, sv_, V, nrows, ncols, svThreshold, Ap, rank_out, &rank_in, nullptr, nullptr, nullptr); // Remove singular values equal to the one that corresponds to the lines of 0 // introduced when m < n @@ -3797,7 +3797,7 @@ vpMatrix vpMatrix::pseudoInverseOpenCV(double svThreshold) const U.insert(*this, 0, 0); U.svdOpenCV(sv, V); - compute_pseudo_inverse(U, sv, V, nrows, ncols, svThreshold, Ap, rank_out, NULL, NULL, NULL, NULL); + compute_pseudo_inverse(U, sv, V, nrows, ncols, svThreshold, Ap, rank_out, nullptr, nullptr, nullptr, nullptr); return Ap; } @@ -3865,7 +3865,7 @@ unsigned int vpMatrix::pseudoInverseOpenCV(vpMatrix &Ap, double svThreshold) con U.insert(*this, 0, 0); U.svdOpenCV(sv, V); - compute_pseudo_inverse(U, sv, V, nrows, ncols, svThreshold, Ap, rank_out, NULL, NULL, NULL, NULL); + compute_pseudo_inverse(U, sv, V, nrows, ncols, svThreshold, Ap, rank_out, nullptr, nullptr, nullptr, nullptr); return static_cast(rank_out); } @@ -3939,7 +3939,7 @@ unsigned int vpMatrix::pseudoInverseOpenCV(vpMatrix &Ap, vpColVector &sv, double U.insert(*this, 0, 0); U.svdOpenCV(sv_, V); - compute_pseudo_inverse(U, sv_, V, nrows, ncols, svThreshold, Ap, rank_out, NULL, NULL, NULL, NULL); + compute_pseudo_inverse(U, sv_, V, nrows, ncols, svThreshold, Ap, rank_out, nullptr, nullptr, nullptr, nullptr); // Remove singular values equal to the one that corresponds to the lines of 0 // introduced when m < n @@ -4075,7 +4075,7 @@ unsigned int vpMatrix::pseudoInverseOpenCV(vpMatrix &Ap, vpColVector &sv, double U.insert(*this, 0, 0); U.svdOpenCV(sv_, V); - compute_pseudo_inverse(U, sv_, V, nrows, ncols, svThreshold, Ap, rank_out, NULL, &imA, &imAt, &kerAt); + compute_pseudo_inverse(U, sv_, V, nrows, ncols, svThreshold, Ap, rank_out, nullptr, &imA, &imAt, &kerAt); // Remove singular values equal to the one that corresponds to the lines of 0 // introduced when m < n @@ -4144,7 +4144,7 @@ vpMatrix vpMatrix::pseudoInverseOpenCV(int rank_in) const U.insert(*this, 0, 0); U.svdOpenCV(sv, V); - compute_pseudo_inverse(U, sv, V, nrows, ncols, svThreshold, Ap, rank_out, &rank_in, NULL, NULL, NULL); + compute_pseudo_inverse(U, sv, V, nrows, ncols, svThreshold, Ap, rank_out, &rank_in, nullptr, nullptr, nullptr); return Ap; } @@ -4219,7 +4219,7 @@ int vpMatrix::pseudoInverseOpenCV(vpMatrix &Ap, int rank_in) const U.insert(*this, 0, 0); U.svdOpenCV(sv, V); - compute_pseudo_inverse(U, sv, V, nrows, ncols, svThreshold, Ap, rank_out, &rank_in, NULL, NULL, NULL); + compute_pseudo_inverse(U, sv, V, nrows, ncols, svThreshold, Ap, rank_out, &rank_in, nullptr, nullptr, nullptr); return rank_out; } @@ -4301,7 +4301,7 @@ int vpMatrix::pseudoInverseOpenCV(vpMatrix &Ap, vpColVector &sv, int rank_in) co U.insert(*this, 0, 0); U.svdOpenCV(sv_, V); - compute_pseudo_inverse(U, sv_, V, nrows, ncols, svThreshold, Ap, rank_out, &rank_in, NULL, NULL, NULL); + compute_pseudo_inverse(U, sv_, V, nrows, ncols, svThreshold, Ap, rank_out, &rank_in, nullptr, nullptr, nullptr); // Remove singular values equal to the one that corresponds to the lines of 0 // introduced when m < n @@ -5270,7 +5270,7 @@ vpRowVector vpMatrix::getRow(unsigned int i, unsigned int j_begin, unsigned int throw(vpException(vpException::dimensionError, "Unable to extract a row vector from the matrix")); vpRowVector r(row_size); - if (r.data != NULL && data != NULL) { + if (r.data != nullptr && data != nullptr) { memcpy(r.data, (*this)[i] + j_begin, row_size * sizeof(double)); } @@ -5370,24 +5370,24 @@ void vpMatrix::stack(const vpMatrix &A, const vpMatrix &B, vpMatrix &C) } } - if (A.data != NULL && A.data == C.data) { + if (A.data != nullptr && A.data == C.data) { std::cerr << "A and C must be two different objects!" << std::endl; return; } - if (B.data != NULL && B.data == C.data) { + if (B.data != nullptr && B.data == C.data) { std::cerr << "B and C must be two different objects!" << std::endl; return; } C.resize(nra + nrb, B.getCols(), false, false); - if (C.data != NULL && A.data != NULL && A.size() > 0) { + if (C.data != nullptr && A.data != nullptr && A.size() > 0) { // Copy A in C memcpy(C.data, A.data, sizeof(double) * A.size()); } - if (C.data != NULL && B.data != NULL && B.size() > 0) { + if (C.data != nullptr && B.data != nullptr && B.size() > 0) { // Copy B in C memcpy(C.data + A.size(), B.data, sizeof(double) * B.size()); } @@ -5423,7 +5423,7 @@ vpMatrix vpMatrix::stack(const vpMatrix &A, const vpRowVector &r) */ void vpMatrix::stack(const vpMatrix &A, const vpRowVector &r, vpMatrix &C) { - if (A.data != NULL && A.data == C.data) { + if (A.data != nullptr && A.data == C.data) { std::cerr << "A and C must be two different objects!" << std::endl; return; } @@ -5462,7 +5462,7 @@ vpMatrix vpMatrix::stack(const vpMatrix &A, const vpColVector &c) */ void vpMatrix::stack(const vpMatrix &A, const vpColVector &c, vpMatrix &C) { - if (A.data != NULL && A.data == C.data) { + if (A.data != nullptr && A.data == C.data) { std::cerr << "A and C must be two different objects!" << std::endl; return; } @@ -5920,7 +5920,7 @@ void vpMatrix::stack(const vpRowVector &r) unsigned int oldSize = size(); resize(rowNum + 1, colNum, false, false); - if (data != NULL && r.data != NULL && data != r.data) { + if (data != nullptr && r.data != nullptr && data != r.data) { // Copy r in data memcpy(data + oldSize, r.data, sizeof(double) * r.size()); } @@ -5962,7 +5962,7 @@ void vpMatrix::stack(const vpColVector &c) unsigned int oldColNum = colNum; resize(rowNum, colNum + 1, false, false); - if (data != NULL && tmp.data != NULL && data != tmp.data) { + if (data != nullptr && tmp.data != nullptr && data != tmp.data) { // Copy c in data for (unsigned int i = 0; i < rowNum; i++) { memcpy(data + i * colNum, tmp.data + i * oldColNum, sizeof(double) * oldColNum); @@ -5985,10 +5985,10 @@ void vpMatrix::stack(const vpColVector &c) void vpMatrix::insert(const vpMatrix &A, unsigned int r, unsigned int c) { if ((r + A.getRows()) <= rowNum && (c + A.getCols()) <= colNum) { - if (A.colNum == colNum && data != NULL && A.data != NULL && A.data != data) { + if (A.colNum == colNum && data != nullptr && A.data != nullptr && A.data != data) { memcpy(data + r * colNum, A.data, sizeof(double) * A.size()); } - else if (data != NULL && A.data != NULL && A.data != data) { + else if (data != nullptr && A.data != nullptr && A.data != data) { for (unsigned int i = r; i < (r + A.getRows()); i++) { memcpy(data + i * colNum + c, A.data + (i - r) * A.colNum, sizeof(double) * A.colNum); } diff --git a/modules/core/src/math/matrix/vpRowVector.cpp b/modules/core/src/math/matrix/vpRowVector.cpp index 2fda61340a..54cfa3af73 100644 --- a/modules/core/src/math/matrix/vpRowVector.cpp +++ b/modules/core/src/math/matrix/vpRowVector.cpp @@ -136,9 +136,9 @@ vpRowVector &vpRowVector::operator=(vpRowVector &&other) other.rowNum = 0; other.colNum = 0; - other.rowPtrs = NULL; + other.rowPtrs = nullptr; other.dsize = 0; - other.data = NULL; + other.data = nullptr; } return *this; @@ -586,9 +586,9 @@ vpRowVector::vpRowVector(vpRowVector &&v) : vpArray2D() v.rowNum = 0; v.colNum = 0; - v.rowPtrs = NULL; + v.rowPtrs = nullptr; v.dsize = 0; - v.data = NULL; + v.data = nullptr; } /*! @@ -885,7 +885,7 @@ void vpRowVector::stack(const vpRowVector &A, const vpRowVector &B, vpRowVector */ double vpRowVector::mean(const vpRowVector &v) { - if (v.data == NULL || v.size() == 0) { + if (v.data == nullptr || v.size() == 0) { throw(vpException(vpException::dimensionError, "Cannot compute mean value of an empty row vector")); } @@ -902,7 +902,7 @@ double vpRowVector::mean(const vpRowVector &v) */ double vpRowVector::median(const vpRowVector &v) { - if (v.data == NULL || v.size() == 0) { + if (v.data == nullptr || v.size() == 0) { throw(vpException(vpException::dimensionError, "Cannot compute mean value of an empty row vector")); } @@ -916,7 +916,7 @@ double vpRowVector::median(const vpRowVector &v) */ double vpRowVector::stdev(const vpRowVector &v, bool useBesselCorrection) { - if (v.data == NULL || v.size() == 0) { + if (v.data == nullptr || v.size() == 0) { throw(vpException(vpException::dimensionError, "Cannot compute mean value of an empty row vector")); } @@ -1150,7 +1150,7 @@ void vpRowVector::init(const vpRowVector &v, unsigned int c, unsigned int ncols) throw(vpException(vpException::dimensionError, "Bad column dimension (%d > %d) used to initialize vpRowVector", cncols, v.getCols())); resize(ncols); - if (this->rowPtrs == NULL) // Fix coverity scan: explicit null dereferenced + if (this->rowPtrs == nullptr) // Fix coverity scan: explicit null dereferenced return; // Noting to do for (unsigned int i = 0; i < ncols; i++) (*this)[i] = v[i + c]; diff --git a/modules/core/src/math/matrix/vpSubColVector.cpp b/modules/core/src/math/matrix/vpSubColVector.cpp index 4d5875081e..32873ff54b 100644 --- a/modules/core/src/math/matrix/vpSubColVector.cpp +++ b/modules/core/src/math/matrix/vpSubColVector.cpp @@ -42,7 +42,7 @@ #include //! Default constructor that creates an empty vector. -vpSubColVector::vpSubColVector() : vpColVector(), m_pRowNum(0), m_parent(NULL) { } +vpSubColVector::vpSubColVector() : vpColVector(), m_pRowNum(0), m_parent(nullptr) { } /*! * Construct a sub-column vector from a parent column vector. @@ -51,7 +51,7 @@ vpSubColVector::vpSubColVector() : vpColVector(), m_pRowNum(0), m_parent(NULL) { * \param nrows : size of the sub-column vector. */ vpSubColVector::vpSubColVector(vpColVector &v, const unsigned int &offset, const unsigned int &nrows) - : vpColVector(), m_pRowNum(0), m_parent(NULL) + : vpColVector(), m_pRowNum(0), m_parent(nullptr) { init(v, offset, nrows); } @@ -96,9 +96,9 @@ void vpSubColVector::init(vpColVector &v, const unsigned int &offset, const unsi } /*! - * Destructor that set the pointer to the parent column vector to NULL. + * Destructor that set the pointer to the parent column vector to nullptr. */ -vpSubColVector::~vpSubColVector() { data = NULL; } +vpSubColVector::~vpSubColVector() { data = nullptr; } /*! * This method can be used to detect if the parent column vector diff --git a/modules/core/src/math/matrix/vpSubMatrix.cpp b/modules/core/src/math/matrix/vpSubMatrix.cpp index d1e8f73e7b..da9e7c7b7c 100644 --- a/modules/core/src/math/matrix/vpSubMatrix.cpp +++ b/modules/core/src/math/matrix/vpSubMatrix.cpp @@ -42,7 +42,7 @@ #include #include -vpSubMatrix::vpSubMatrix() : pRowNum(0), pColNum(0), parent(NULL) {} +vpSubMatrix::vpSubMatrix() : pRowNum(0), pColNum(0), parent(nullptr) {} /*! \brief Constructor @@ -54,7 +54,7 @@ vpSubMatrix::vpSubMatrix() : pRowNum(0), pColNum(0), parent(NULL) {} */ vpSubMatrix::vpSubMatrix(vpMatrix &m, const unsigned int &row_offset, const unsigned int &col_offset, const unsigned int &nrows, const unsigned int &ncols) - : pRowNum(0), pColNum(0), parent(NULL) + : pRowNum(0), pColNum(0), parent(nullptr) { init(m, row_offset, col_offset, nrows, ncols); } @@ -173,4 +173,4 @@ vpSubMatrix &vpSubMatrix::operator=(const double &x) return *this; } -vpSubMatrix::~vpSubMatrix() { data = NULL; } +vpSubMatrix::~vpSubMatrix() { data = nullptr; } diff --git a/modules/core/src/math/matrix/vpSubRowVector.cpp b/modules/core/src/math/matrix/vpSubRowVector.cpp index 6e64b314e0..6f7b10bc8e 100644 --- a/modules/core/src/math/matrix/vpSubRowVector.cpp +++ b/modules/core/src/math/matrix/vpSubRowVector.cpp @@ -37,7 +37,7 @@ #include //! Default constructor that creates an empty vector. -vpSubRowVector::vpSubRowVector() : vpRowVector(), m_pColNum(0), m_parent(NULL) { } +vpSubRowVector::vpSubRowVector() : vpRowVector(), m_pColNum(0), m_parent(nullptr) { } /*! * Construct a sub-row vector from a parent row vector. @@ -46,7 +46,7 @@ vpSubRowVector::vpSubRowVector() : vpRowVector(), m_pColNum(0), m_parent(NULL) { * \param ncols : size of the sub-row vector. */ vpSubRowVector::vpSubRowVector(vpRowVector &v, const unsigned int &offset, const unsigned int &ncols) - : vpRowVector(), m_pColNum(0), m_parent(NULL) + : vpRowVector(), m_pColNum(0), m_parent(nullptr) { init(v, offset, ncols); } @@ -89,9 +89,9 @@ void vpSubRowVector::init(vpRowVector &v, const unsigned int &offset, const unsi } /*! - * Destructor that set the pointer to the parent row vector to NULL. + * Destructor that set the pointer to the parent row vector to nullptr. */ -vpSubRowVector::~vpSubRowVector() { data = NULL; } +vpSubRowVector::~vpSubRowVector() { data = nullptr; } /*! * This method can be used to detect if the parent row vector diff --git a/modules/core/src/tools/cpu-features/x86/cpu_x86_Windows.ipp b/modules/core/src/tools/cpu-features/x86/cpu_x86_Windows.ipp index 3d8684263d..f4b3f8046e 100644 --- a/modules/core/src/tools/cpu-features/x86/cpu_x86_Windows.ipp +++ b/modules/core/src/tools/cpu-features/x86/cpu_x86_Windows.ipp @@ -102,7 +102,7 @@ BOOL IsWow64() LPFN_ISWOW64PROCESS fnIsWow64Process = (LPFN_ISWOW64PROCESS)GetProcAddress( GetModuleHandle(TEXT("kernel32")), "IsWow64Process"); - if (NULL != fnIsWow64Process) { + if (nullptr != fnIsWow64Process) { if (!fnIsWow64Process(GetCurrentProcess(), &bIsWow64)) { printf("Error Detecting Operating System.\n"); printf("Defaulting to 32-bit OS.\n\n"); diff --git a/modules/core/src/tools/file/vpIoTools.cpp b/modules/core/src/tools/file/vpIoTools.cpp index a5d90de5f5..7a2c66ea11 100644 --- a/modules/core/src/tools/file/vpIoTools.cpp +++ b/modules/core/src/tools/file/vpIoTools.cpp @@ -1723,7 +1723,7 @@ std::string vpIoTools::getAbsolutePathname(const std::string &pathname) #if !defined(_WIN32) && (defined(__unix__) || defined(__unix) || (defined(__APPLE__) && defined(__MACH__))) // UNIX std::string real_path_str = pathname; - char *real_path = realpath(pathname.c_str(), NULL); + char *real_path = realpath(pathname.c_str(), nullptr); if (real_path) { real_path_str = real_path; @@ -2030,8 +2030,8 @@ std::vector vpIoTools::getDirFiles(const std::string &pathname) #if !defined(_WIN32) && (defined(__unix__) || defined(__unix) || (defined(__APPLE__) && defined(__MACH__))) // UNIX std::vector files; - struct dirent **list = NULL; - int filesCount = scandir(dirName.c_str(), &list, NULL, NULL); + struct dirent **list = nullptr; + int filesCount = scandir(dirName.c_str(), &list, nullptr, nullptr); if (filesCount == -1) { throw(vpIoException(vpException::fatalError, "Cannot read files of directory %s", dirName.c_str())); } diff --git a/modules/core/src/tools/geometry/vpPolygon3D.cpp b/modules/core/src/tools/geometry/vpPolygon3D.cpp index 272e9e65a5..bf8f6b1e8f 100644 --- a/modules/core/src/tools/geometry/vpPolygon3D.cpp +++ b/modules/core/src/tools/geometry/vpPolygon3D.cpp @@ -51,13 +51,13 @@ Basic constructor. */ vpPolygon3D::vpPolygon3D() - : nbpt(0), nbCornersInsidePrev(0), p(NULL), polyClipped(), clippingFlag(vpPolygon3D::NO_CLIPPING), + : nbpt(0), nbCornersInsidePrev(0), p(nullptr), polyClipped(), clippingFlag(vpPolygon3D::NO_CLIPPING), distNearClip(0.001), distFarClip(100.) { } vpPolygon3D::vpPolygon3D(const vpPolygon3D &mbtp) - : nbpt(mbtp.nbpt), nbCornersInsidePrev(mbtp.nbCornersInsidePrev), p(NULL), polyClipped(mbtp.polyClipped), + : nbpt(mbtp.nbpt), nbCornersInsidePrev(mbtp.nbCornersInsidePrev), p(nullptr), polyClipped(mbtp.polyClipped), clippingFlag(mbtp.clippingFlag), distNearClip(mbtp.distNearClip), distFarClip(mbtp.distFarClip) { if (p) @@ -90,9 +90,9 @@ vpPolygon3D &vpPolygon3D::operator=(const vpPolygon3D &mbtp) */ vpPolygon3D::~vpPolygon3D() { - if (p != NULL) { + if (p != nullptr) { delete[] p; - p = NULL; + p = nullptr; } } @@ -119,7 +119,7 @@ vpPoint &vpPolygon3D::getPoint(const unsigned int _index) void vpPolygon3D::setNbPoint(unsigned int nb) { nbpt = nb; - if (p != NULL) + if (p != nullptr) delete[] p; p = new vpPoint[nb]; } @@ -132,7 +132,7 @@ void vpPolygon3D::setNbPoint(unsigned int nb) */ void vpPolygon3D::addPoint(unsigned int n, const vpPoint &P) { - // if( p!NULL && n < nbpt ) + // if( p!nullptr && n < nbpt ) p[n] = P; } diff --git a/modules/core/src/tools/histogram/vpHistogram.cpp b/modules/core/src/tools/histogram/vpHistogram.cpp index de74e105c6..5fa5f6fcb2 100644 --- a/modules/core/src/tools/histogram/vpHistogram.cpp +++ b/modules/core/src/tools/histogram/vpHistogram.cpp @@ -61,15 +61,15 @@ struct vpHistogram_Param_t unsigned int *m_histogram; const vpImage *m_I; - vpHistogram_Param_t() : m_start_index(0), m_end_index(0), m_lut(), m_histogram(NULL), m_I(NULL) { } + vpHistogram_Param_t() : m_start_index(0), m_end_index(0), m_lut(), m_histogram(nullptr), m_I(nullptr) { } vpHistogram_Param_t(unsigned int start_index, unsigned int end_index, const vpImage *const I) - : m_start_index(start_index), m_end_index(end_index), m_lut(), m_histogram(NULL), m_I(I) + : m_start_index(start_index), m_end_index(end_index), m_lut(), m_histogram(nullptr), m_I(I) { } ~vpHistogram_Param_t() { - if (m_histogram != NULL) { + if (m_histogram != nullptr) { delete[] m_histogram; } } @@ -139,12 +139,12 @@ bool compare_vpHistogramPeak(vpHistogramPeak first, vpHistogramPeak second) /*! Defaut constructor for a gray level histogram. */ -vpHistogram::vpHistogram() : histogram(NULL), size(256) { init(); } +vpHistogram::vpHistogram() : histogram(nullptr), size(256) { init(); } /*! Copy constructor of a gray level histogram. */ -vpHistogram::vpHistogram(const vpHistogram &h) : histogram(NULL), size(256) +vpHistogram::vpHistogram(const vpHistogram &h) : histogram(nullptr), size(256) { init(h.size); memcpy(histogram, h.histogram, size * sizeof(unsigned)); @@ -157,7 +157,7 @@ vpHistogram::vpHistogram(const vpHistogram &h) : histogram(NULL), size(256) \sa calculate() */ -vpHistogram::vpHistogram(const vpImage &I) : histogram(NULL), size(256) +vpHistogram::vpHistogram(const vpImage &I) : histogram(nullptr), size(256) { init(); @@ -169,10 +169,10 @@ vpHistogram::vpHistogram(const vpImage &I) : histogram(NULL), siz */ vpHistogram::~vpHistogram() { - if (histogram != NULL) { + if (histogram != nullptr) { // vpTRACE("free: %p", &histogram); delete[] histogram; - histogram = NULL; + histogram = nullptr; size = 0; } } @@ -205,9 +205,9 @@ vpHistogram &vpHistogram::operator=(const vpHistogram &h) */ void vpHistogram::init(unsigned size_) { - if (histogram != NULL) { + if (histogram != nullptr) { delete[] histogram; - histogram = NULL; + histogram = nullptr; } this->size = size_; @@ -229,9 +229,9 @@ void vpHistogram::init(unsigned size_) void vpHistogram::calculate(const vpImage &I, unsigned int nbins, unsigned int nbThreads) { if (size != nbins) { - if (histogram != NULL) { + if (histogram != nullptr) { delete[] histogram; - histogram = NULL; + histogram = nullptr; } size = nbins > 256 ? 256 : (nbins > 0 ? nbins : 256); @@ -371,7 +371,7 @@ void vpHistogram::equalize(const vpImage &I, vpImage &I, const vpColor &color, */ void vpHistogram::smooth(unsigned int fsize) { - if (histogram == NULL) { + if (histogram == nullptr) { vpERROR_TRACE("Histogram array not initialised\n"); throw(vpImageException(vpImageException::notInitializedError, "Histogram array not initialised")); } @@ -484,7 +484,7 @@ void vpHistogram::smooth(unsigned int fsize) */ unsigned vpHistogram::getPeaks(std::list &peaks) { - if (histogram == NULL) { + if (histogram == nullptr) { vpERROR_TRACE("Histogram array not initialised\n"); throw(vpImageException(vpImageException::notInitializedError, "Histogram array not initialised")); } @@ -757,7 +757,7 @@ bool vpHistogram::getPeaks(unsigned char dist, vpHistogramPeak &peakl, vpHistogr */ unsigned vpHistogram::getValey(std::list &valey) { - if (histogram == NULL) { + if (histogram == nullptr) { vpERROR_TRACE("Histogram array not initialised\n"); throw(vpImageException(vpImageException::notInitializedError, "Histogram array not initialised")); } @@ -1085,7 +1085,7 @@ bool vpHistogram::write(const std::string &filename) { return (this->write(filen bool vpHistogram::write(const char *filename) { FILE *fd = fopen(filename, "w"); - if (fd == NULL) + if (fd == nullptr) return false; for (unsigned i = 0; i < size; i++) fprintf(fd, "%u %u\n", i, histogram[i]); diff --git a/modules/core/src/tools/network/vpClient.cpp b/modules/core/src/tools/network/vpClient.cpp index 55561dca5b..19d0994c8e 100644 --- a/modules/core/src/tools/network/vpClient.cpp +++ b/modules/core/src/tools/network/vpClient.cpp @@ -64,7 +64,7 @@ bool vpClient::connectToHostname(const std::string &hostname, const unsigned int // get server host information from hostname struct hostent *server = gethostbyname(hostname.c_str()); - if (server == NULL) { + if (server == nullptr) { std::string noSuchHostMessage("ERROR, "); noSuchHostMessage.append(hostname); noSuchHostMessage.append(": no such host\n"); diff --git a/modules/core/src/tools/network/vpNetwork.cpp b/modules/core/src/tools/network/vpNetwork.cpp index abfaf86dee..158c5942e3 100644 --- a/modules/core/src/tools/network/vpNetwork.cpp +++ b/modules/core/src/tools/network/vpNetwork.cpp @@ -139,7 +139,7 @@ int vpNetwork::getReceptorIndex(const char *name) { struct hostent *server = gethostbyname(name); - if (server == NULL) { + if (server == nullptr) { std::string noSuchHostMessage("ERROR, "); noSuchHostMessage.append(name); noSuchHostMessage.append(": no such host\n"); @@ -673,7 +673,7 @@ int vpNetwork::privReceiveRequestOnce() socketMax = receptor_list[i].socketFileDescriptorReceptor; } - int value = select((int)socketMax + 1, &readFileDescriptor, NULL, NULL, &tv); + int value = select((int)socketMax + 1, &readFileDescriptor, nullptr, nullptr, &tv); int numbytes = 0; if (value == -1) { @@ -757,7 +757,7 @@ int vpNetwork::privReceiveRequestOnceFrom(const unsigned int &receptorEmitting) socketMax = receptor_list[receptorEmitting].socketFileDescriptorReceptor; FD_SET((unsigned int)receptor_list[receptorEmitting].socketFileDescriptorReceptor, &readFileDescriptor); - int value = select((int)socketMax + 1, &readFileDescriptor, NULL, NULL, &tv); + int value = select((int)socketMax + 1, &readFileDescriptor, nullptr, nullptr, &tv); int numbytes = 0; if (value == -1) { if (verboseMode) diff --git a/modules/core/src/tools/network/vpServer.cpp b/modules/core/src/tools/network/vpServer.cpp index c7b92d368c..80d68105ca 100644 --- a/modules/core/src/tools/network/vpServer.cpp +++ b/modules/core/src/tools/network/vpServer.cpp @@ -228,7 +228,7 @@ bool vpServer::checkForConnections() socketMax = receptor_list[i].socketFileDescriptorReceptor; } - int value = select((int)socketMax + 1, &readFileDescriptor, NULL, NULL, &tv); + int value = select((int)socketMax + 1, &readFileDescriptor, nullptr, nullptr, &tv); if (value == -1) { // vpERROR_TRACE( "vpServer::run(), select()" ); return false; diff --git a/modules/core/src/tools/network/vpUDPClient.cpp b/modules/core/src/tools/network/vpUDPClient.cpp index fe4880f6cd..eba00e84f4 100644 --- a/modules/core/src/tools/network/vpUDPClient.cpp +++ b/modules/core/src/tools/network/vpUDPClient.cpp @@ -142,8 +142,8 @@ void vpUDPClient::init(const std::string &hostname, int port) std::stringstream ss; ss << port; struct addrinfo hints; - struct addrinfo *result = NULL; - struct addrinfo *ptr = NULL; + struct addrinfo *result = nullptr; + struct addrinfo *ptr = nullptr; memset(&hints, 0, sizeof(hints)); hints.ai_family = AF_INET; @@ -157,7 +157,7 @@ void vpUDPClient::init(const std::string &hostname, int port) throw vpException(vpException::fatalError, ss.str()); } - for (ptr = result; ptr != NULL; ptr = ptr->ai_next) { + for (ptr = result; ptr != nullptr; ptr = ptr->ai_next) { if (ptr->ai_family == AF_INET && ptr->ai_socktype == SOCK_DGRAM) { m_serverAddress = *(struct sockaddr_in *)ptr->ai_addr; break; @@ -205,7 +205,7 @@ int vpUDPClient::receive(std::string &msg, int timeoutMs) timeout.tv_sec = timeoutMs / 1000; timeout.tv_usec = (timeoutMs % 1000) * 1000; } - int retval = select((int)m_socketFileDescriptor + 1, &s, NULL, NULL, timeoutMs > 0 ? &timeout : NULL); + int retval = select((int)m_socketFileDescriptor + 1, &s, nullptr, nullptr, timeoutMs > 0 ? &timeout : nullptr); if (retval == -1) { std::cerr << "Error select!" << std::endl; @@ -251,7 +251,7 @@ int vpUDPClient::receive(void *msg, size_t len, int timeoutMs) timeout.tv_sec = timeoutMs / 1000; timeout.tv_usec = (timeoutMs % 1000) * 1000; } - int retval = select((int)m_socketFileDescriptor + 1, &s, NULL, NULL, timeoutMs > 0 ? &timeout : NULL); + int retval = select((int)m_socketFileDescriptor + 1, &s, nullptr, nullptr, timeoutMs > 0 ? &timeout : nullptr); if (retval == -1) { std::cerr << "Error select!" << std::endl; diff --git a/modules/core/src/tools/network/vpUDPServer.cpp b/modules/core/src/tools/network/vpUDPServer.cpp index b55f3efebb..42725f7db3 100644 --- a/modules/core/src/tools/network/vpUDPServer.cpp +++ b/modules/core/src/tools/network/vpUDPServer.cpp @@ -149,8 +149,8 @@ void vpUDPServer::init(const std::string &hostname, int port) std::stringstream ss; ss << port; struct addrinfo hints; - struct addrinfo *result = NULL; - struct addrinfo *ptr = NULL; + struct addrinfo *result = nullptr; + struct addrinfo *ptr = nullptr; memset(&hints, 0, sizeof(hints)); hints.ai_family = AF_INET; @@ -164,7 +164,7 @@ void vpUDPServer::init(const std::string &hostname, int port) throw vpException(vpException::fatalError, ss.str()); } - for (ptr = result; ptr != NULL; ptr = ptr->ai_next) { + for (ptr = result; ptr != nullptr; ptr = ptr->ai_next) { if (ptr->ai_family == AF_INET && ptr->ai_socktype == SOCK_DGRAM) { m_serverAddress = *(struct sockaddr_in *)ptr->ai_addr; break; @@ -220,7 +220,7 @@ int vpUDPServer::receive(std::string &msg, std::string &hostInfo, int timeoutMs) timeout.tv_sec = timeoutMs / 1000; timeout.tv_usec = (timeoutMs % 1000) * 1000; } - int retval = select((int)m_socketFileDescriptor + 1, &s, NULL, NULL, timeoutMs > 0 ? &timeout : NULL); + int retval = select((int)m_socketFileDescriptor + 1, &s, nullptr, nullptr, timeoutMs > 0 ? &timeout : nullptr); if (retval == -1) { std::cerr << "Error select!" << std::endl; @@ -258,7 +258,7 @@ int vpUDPServer::receive(std::string &msg, std::string &hostInfo, int timeoutMs) char result[INET_ADDRSTRLEN]; const char *ptr = inet_ntop(AF_INET, (void *)&m_clientAddress.sin_addr, result, sizeof(result)); - if (ptr == NULL) { + if (ptr == nullptr) { std::cerr << "inet_ntop failed with error: " << WSAGetLastError() << std::endl; } else { hostIp = result; @@ -297,8 +297,8 @@ int vpUDPServer::send(const std::string &msg, const std::string &hostname, int p std::stringstream ss; ss << port; struct addrinfo hints; - struct addrinfo *result = NULL; - struct addrinfo *ptr = NULL; + struct addrinfo *result = nullptr; + struct addrinfo *ptr = nullptr; memset(&hints, 0, sizeof(hints)); hints.ai_family = AF_INET; @@ -312,7 +312,7 @@ int vpUDPServer::send(const std::string &msg, const std::string &hostname, int p throw vpException(vpException::fatalError, ss.str()); } - for (ptr = result; ptr != NULL; ptr = ptr->ai_next) { + for (ptr = result; ptr != nullptr; ptr = ptr->ai_next) { if (ptr->ai_family == AF_INET && ptr->ai_socktype == SOCK_DGRAM) { m_clientAddress = *(struct sockaddr_in *)ptr->ai_addr; break; diff --git a/modules/core/src/tools/serial/vpSerial.cpp b/modules/core/src/tools/serial/vpSerial.cpp index ac6f6273fc..a36e5e16f1 100644 --- a/modules/core/src/tools/serial/vpSerial.cpp +++ b/modules/core/src/tools/serial/vpSerial.cpp @@ -267,7 +267,7 @@ bool vpSerial::read(char *c, long timeout_s) FD_ZERO(&readfds); FD_SET(static_cast(m_fd), &readfds); - int ret = select(FD_SETSIZE, &readfds, (fd_set *)NULL, (fd_set *)NULL, &timeout); + int ret = select(FD_SETSIZE, &readfds, (fd_set *)nullptr, (fd_set *)nullptr, &timeout); if (ret < 0) { throw(vpException(vpException::fatalError, "Serial i/o exception")); diff --git a/modules/core/src/tools/xml/vpXmlParser.cpp b/modules/core/src/tools/xml/vpXmlParser.cpp index 7ac5cee6cf..8c203293bb 100644 --- a/modules/core/src/tools/xml/vpXmlParser.cpp +++ b/modules/core/src/tools/xml/vpXmlParser.cpp @@ -96,7 +96,7 @@ vpXmlParser::vpXmlParser(const vpXmlParser &_twin) : nodeMap(_twin.nodeMap), mai */ char *vpXmlParser::xmlReadCharChild(xmlDocPtr doc, xmlNodePtr node) { - if (node->xmlChildrenNode == NULL) { + if (node->xmlChildrenNode == nullptr) { std::string errorMsg = "Empty node " + std::string((char *)node->name) + ", cannot read char*"; std::cerr << errorMsg << std::endl; throw vpException(vpException::fatalError, errorMsg); @@ -116,7 +116,7 @@ char *vpXmlParser::xmlReadCharChild(xmlDocPtr doc, xmlNodePtr node) std::string vpXmlParser::xmlReadStringChild(xmlDocPtr doc, xmlNodePtr node) { - if (node->xmlChildrenNode == NULL) { + if (node->xmlChildrenNode == nullptr) { std::string errorMsg = "Empty node " + std::string((char *)node->name) + ", cannot read std::string"; std::cerr << errorMsg << std::endl; throw vpException(vpException::fatalError, errorMsg); @@ -140,7 +140,7 @@ std::string vpXmlParser::xmlReadStringChild(xmlDocPtr doc, xmlNodePtr node) */ int vpXmlParser::xmlReadIntChild(xmlDocPtr doc, xmlNodePtr node) { - if (node->xmlChildrenNode == NULL) { + if (node->xmlChildrenNode == nullptr) { std::string errorMsg = "Empty node " + std::string((char *)node->name) + ", cannot read int"; std::cerr << errorMsg << std::endl; throw vpException(vpException::fatalError, errorMsg); @@ -174,7 +174,7 @@ int vpXmlParser::xmlReadIntChild(xmlDocPtr doc, xmlNodePtr node) */ unsigned int vpXmlParser::xmlReadUnsignedIntChild(xmlDocPtr doc, xmlNodePtr node) { - if (node->xmlChildrenNode == NULL) { + if (node->xmlChildrenNode == nullptr) { std::string errorMsg = "Empty node " + std::string((char *)node->name) + ", cannot read unsigned int"; std::cerr << errorMsg << std::endl; throw vpException(vpException::fatalError, errorMsg); @@ -208,7 +208,7 @@ unsigned int vpXmlParser::xmlReadUnsignedIntChild(xmlDocPtr doc, xmlNodePtr node */ double vpXmlParser::xmlReadDoubleChild(xmlDocPtr doc, xmlNodePtr node) { - if (node->xmlChildrenNode == NULL) { + if (node->xmlChildrenNode == nullptr) { std::string errorMsg = "Empty node " + std::string((char *)node->name) + ", cannot read double"; std::cerr << errorMsg << std::endl; throw vpException(vpException::fatalError, errorMsg); @@ -241,7 +241,7 @@ double vpXmlParser::xmlReadDoubleChild(xmlDocPtr doc, xmlNodePtr node) */ float vpXmlParser::xmlReadFloatChild(xmlDocPtr doc, xmlNodePtr node) { - if (node->xmlChildrenNode == NULL) { + if (node->xmlChildrenNode == nullptr) { std::string errorMsg = "Empty node " + std::string((char *)node->name) + ", cannot read float"; std::cerr << errorMsg << std::endl; throw vpException(vpException::fatalError, errorMsg); @@ -278,7 +278,7 @@ float vpXmlParser::xmlReadFloatChild(xmlDocPtr doc, xmlNodePtr node) */ bool vpXmlParser::xmlReadBoolChild(xmlDocPtr doc, xmlNodePtr node) { - if (node->xmlChildrenNode == NULL) { + if (node->xmlChildrenNode == nullptr) { std::string errorMsg = "Empty node " + std::string((char *)node->name) + ", cannot read bool"; std::cerr << errorMsg << std::endl; throw vpException(vpException::fatalError, errorMsg); @@ -303,7 +303,7 @@ bool vpXmlParser::xmlReadBoolChild(xmlDocPtr doc, xmlNodePtr node) void vpXmlParser::xmlWriteCharChild(xmlNodePtr node, const char *label, const char *value) { xmlNodePtr tmp; - tmp = xmlNewChild(node, NULL, (xmlChar *)label, (xmlChar *)value); + tmp = xmlNewChild(node, nullptr, (xmlChar *)label, (xmlChar *)value); xmlAddChild(node, tmp); } @@ -317,7 +317,7 @@ void vpXmlParser::xmlWriteCharChild(xmlNodePtr node, const char *label, const ch void vpXmlParser::xmlWriteStringChild(xmlNodePtr node, const char *label, const std::string &value) { xmlNodePtr tmp; - tmp = xmlNewChild(node, NULL, (xmlChar *)label, (xmlChar *)value.c_str()); + tmp = xmlNewChild(node, nullptr, (xmlChar *)label, (xmlChar *)value.c_str()); xmlAddChild(node, tmp); } @@ -333,7 +333,7 @@ void vpXmlParser::xmlWriteIntChild(xmlNodePtr node, const char *label, int value char str[100]; snprintf(str, 100, "%d", value); xmlNodePtr tmp; - tmp = xmlNewChild(node, NULL, (xmlChar *)label, (xmlChar *)str); + tmp = xmlNewChild(node, nullptr, (xmlChar *)label, (xmlChar *)str); xmlAddChild(node, tmp); } @@ -349,7 +349,7 @@ void vpXmlParser::xmlWriteUnsignedIntChild(xmlNodePtr node, const char *label, u char str[100]; snprintf(str, 100, "%u", value); xmlNodePtr tmp; - tmp = xmlNewChild(node, NULL, (xmlChar *)label, (xmlChar *)str); + tmp = xmlNewChild(node, nullptr, (xmlChar *)label, (xmlChar *)str); xmlAddChild(node, tmp); } @@ -365,7 +365,7 @@ void vpXmlParser::xmlWriteDoubleChild(xmlNodePtr node, const char *label, double char str[100]; snprintf(str, 100, "%lf", value); xmlNodePtr tmp; - tmp = xmlNewChild(node, NULL, (xmlChar *)label, (xmlChar *)str); + tmp = xmlNewChild(node, nullptr, (xmlChar *)label, (xmlChar *)str); xmlAddChild(node, tmp); } @@ -381,7 +381,7 @@ void vpXmlParser::xmlWriteFloatChild(xmlNodePtr node, const char *label, float v char str[100]; snprintf(str, 100, "%f", value); xmlNodePtr tmp; - tmp = xmlNewChild(node, NULL, (xmlChar *)label, (xmlChar *)str); + tmp = xmlNewChild(node, nullptr, (xmlChar *)label, (xmlChar *)str); xmlAddChild(node, tmp); } @@ -397,7 +397,7 @@ void vpXmlParser::xmlWriteBoolChild(xmlNodePtr node, const char *label, bool val char str[2]; snprintf(str, 2, "%d", (int)value); xmlNodePtr tmp; - tmp = xmlNewChild(node, NULL, (xmlChar *)label, (xmlChar *)str); + tmp = xmlNewChild(node, nullptr, (xmlChar *)label, (xmlChar *)str); xmlAddChild(node, tmp); } @@ -421,13 +421,13 @@ void vpXmlParser::parse(const std::string &filename) xmlNodePtr root_node; doc = xmlParseFile(filename.c_str()); - if (doc == NULL) { + if (doc == nullptr) { vpERROR_TRACE("cannot open file"); throw vpException(vpException::ioError, "cannot open file"); } root_node = xmlDocGetRootElement(doc); - if (root_node == NULL) { + if (root_node == nullptr) { vpERROR_TRACE("cannot get root element"); throw vpException(vpException::ioError, "cannot get root element"); } @@ -452,10 +452,10 @@ void vpXmlParser::save(const std::string &filename, bool append) xmlDocPtr doc; xmlNodePtr root_node; - doc = xmlReadFile(filename.c_str(), NULL, XML_PARSE_NOWARNING + XML_PARSE_NOERROR + XML_PARSE_NOBLANKS); - if (doc == NULL) { + doc = xmlReadFile(filename.c_str(), nullptr, XML_PARSE_NOWARNING + XML_PARSE_NOERROR + XML_PARSE_NOBLANKS); + if (doc == nullptr) { doc = xmlNewDoc((xmlChar *)"1.0"); - root_node = xmlNewNode(NULL, (xmlChar *)main_tag.c_str()); + root_node = xmlNewNode(nullptr, (xmlChar *)main_tag.c_str()); xmlDocSetRootElement(doc, root_node); } else { if (!append) { @@ -464,13 +464,13 @@ void vpXmlParser::save(const std::string &filename, bool append) throw vpException(vpException::ioError, "Cannot remove existing xml file"); doc = xmlNewDoc((xmlChar *)"1.0"); - root_node = xmlNewNode(NULL, (xmlChar *)main_tag.c_str()); + root_node = xmlNewNode(nullptr, (xmlChar *)main_tag.c_str()); xmlDocSetRootElement(doc, root_node); } } root_node = xmlDocGetRootElement(doc); - if (root_node == NULL) { + if (root_node == nullptr) { vpERROR_TRACE("problem to get the root node"); throw vpException(vpException::ioError, "problem to get the root node"); } diff --git a/modules/core/src/tracking/moments/vpMoment.cpp b/modules/core/src/tracking/moments/vpMoment.cpp index 866ffe60eb..dced3cf428 100644 --- a/modules/core/src/tracking/moments/vpMoment.cpp +++ b/modules/core/src/tracking/moments/vpMoment.cpp @@ -48,7 +48,7 @@ /*! Default constructor */ -vpMoment::vpMoment() : object(NULL), moments(NULL), values() {} +vpMoment::vpMoment() : object(nullptr), moments(nullptr), values() {} /*! Links the moment to a database of moment primitives. diff --git a/modules/core/test/image-with-dataset/testConversion.cpp b/modules/core/test/image-with-dataset/testConversion.cpp index a61f87d6f8..3fc667e281 100644 --- a/modules/core/test/image-with-dataset/testConversion.cpp +++ b/modules/core/test/image-with-dataset/testConversion.cpp @@ -140,7 +140,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op nbIterations = atoi(optarg_); break; case 'h': - usage(argv[0], NULL, ipath, opath, user, nbIterations); + usage(argv[0], nullptr, ipath, opath, user, nbIterations); return false; case 'c': @@ -155,7 +155,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, opath, user, nbIterations); + usage(argv[0], nullptr, ipath, opath, user, nbIterations); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -215,7 +215,7 @@ int main(int argc, const char **argv) vpIoTools::makeDirectory(opath); } catch (...) { - usage(argv[0], NULL, ipath, opt_opath, username, nbIterations); + usage(argv[0], nullptr, ipath, opt_opath, username, nbIterations); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Cannot create " << opath << std::endl; std::cerr << " Check your -o " << opt_opath << " option " << std::endl; @@ -236,7 +236,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath, opt_opath, username, nbIterations); + usage(argv[0], nullptr, ipath, opt_opath, username, nbIterations); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl @@ -307,7 +307,7 @@ int main(int argc, const char **argv) int flags = CV_LOAD_IMAGE_COLOR; #endif imageMat = cv::imread(filename, flags); // Force to a three channel BGR color image. - if (imageMat.data == NULL) { + if (imageMat.data == nullptr) { std::cout << " Cannot read image: " << filename << std::endl; return EXIT_FAILURE; } @@ -326,7 +326,7 @@ int main(int argc, const char **argv) flags = CV_LOAD_IMAGE_GRAYSCALE; #endif imageMat = cv::imread(filename, flags); // Forced to grayscale. - if (imageMat.data == NULL) { + if (imageMat.data == nullptr) { std::cout << " Cannot read image: " << filename << std::endl; return EXIT_FAILURE; } @@ -350,7 +350,7 @@ int main(int argc, const char **argv) flags = CV_LOAD_IMAGE_COLOR; #endif imageMat = cv::imread(filename, flags); // Force to a three channel BGR color image. - if (imageMat.data == NULL) { + if (imageMat.data == nullptr) { std::cout << " Cannot read image: " << filename << std::endl; return EXIT_FAILURE; } @@ -370,7 +370,7 @@ int main(int argc, const char **argv) flags = CV_LOAD_IMAGE_GRAYSCALE; #endif imageMat = cv::imread(filename, flags); - if (imageMat.data == NULL) { + if (imageMat.data == nullptr) { std::cout << " Cannot read image: " << filename << std::endl; return EXIT_FAILURE; } @@ -439,10 +439,10 @@ int main(int argc, const char **argv) std::cout << " Load " << filename << std::endl; vpImageIo::read(Ic, filename); vpImage R, G, B, a; - vpImageConvert::split(Ic, &R, NULL, &B); + vpImageConvert::split(Ic, &R, nullptr, &B); chrono.start(); for (int iteration = 0; iteration < nbIterations; iteration++) { - vpImageConvert::split(Ic, &R, NULL, &B); + vpImageConvert::split(Ic, &R, nullptr, &B); } chrono.stop(); @@ -494,7 +494,7 @@ int main(int argc, const char **argv) vpImage I_saturation(&saturation.front(), h, w); vpImage I_value(&value.front(), h, w); vpImage I_HSV; - vpImageConvert::merge(&I_hue, &I_saturation, &I_value, NULL, I_HSV); + vpImageConvert::merge(&I_hue, &I_saturation, &I_value, nullptr, I_HSV); filename = vpIoTools::createFilePath(opath, "Klimt_HSV.ppm"); std::cout << " Resulting image saved in: " << filename << std::endl; diff --git a/modules/core/test/image-with-dataset/testCrop.cpp b/modules/core/test/image-with-dataset/testCrop.cpp index 9f64551516..75333030ae 100644 --- a/modules/core/test/image-with-dataset/testCrop.cpp +++ b/modules/core/test/image-with-dataset/testCrop.cpp @@ -130,7 +130,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op opath = optarg_; break; case 'h': - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); return false; break; @@ -147,7 +147,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -205,7 +205,7 @@ int main(int argc, const char **argv) // Create the dirname vpIoTools::makeDirectory(opath); } catch (...) { - usage(argv[0], NULL, ipath, opt_opath, username); + usage(argv[0], nullptr, ipath, opt_opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Cannot create " << opath << std::endl; std::cerr << " Check your -o " << opt_opath << " option " << std::endl; @@ -226,7 +226,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath, opt_opath, username); + usage(argv[0], nullptr, ipath, opt_opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/modules/core/test/image-with-dataset/testCropAdvanced.cpp b/modules/core/test/image-with-dataset/testCropAdvanced.cpp index 224ba1485a..7e748ead18 100644 --- a/modules/core/test/image-with-dataset/testCropAdvanced.cpp +++ b/modules/core/test/image-with-dataset/testCropAdvanced.cpp @@ -122,7 +122,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op opath = optarg_; break; case 'h': - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); return false; break; @@ -139,7 +139,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -196,7 +196,7 @@ int main(int argc, const char **argv) // Create the dirname vpIoTools::makeDirectory(opath); } catch (...) { - usage(argv[0], NULL, ipath, opt_opath, username); + usage(argv[0], nullptr, ipath, opt_opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Cannot create " << opath << std::endl; std::cerr << " Check your -o " << opt_opath << " option " << std::endl; @@ -217,7 +217,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath, opt_opath, username); + usage(argv[0], nullptr, ipath, opt_opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/modules/core/test/image-with-dataset/testImageComparison.cpp b/modules/core/test/image-with-dataset/testImageComparison.cpp index 8aa597c41e..e5782b9651 100644 --- a/modules/core/test/image-with-dataset/testImageComparison.cpp +++ b/modules/core/test/image-with-dataset/testImageComparison.cpp @@ -107,7 +107,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath) ipath = optarg_; break; case 'h': - usage(argv[0], NULL, ipath); + usage(argv[0], nullptr, ipath); return false; break; @@ -124,7 +124,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath); + usage(argv[0], nullptr, ipath); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -177,7 +177,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath); + usage(argv[0], nullptr, ipath); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/modules/core/test/image-with-dataset/testImageFilter.cpp b/modules/core/test/image-with-dataset/testImageFilter.cpp index 408144adee..51315e8b0e 100644 --- a/modules/core/test/image-with-dataset/testImageFilter.cpp +++ b/modules/core/test/image-with-dataset/testImageFilter.cpp @@ -114,7 +114,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &pp ppath = optarg_; break; case 'h': - usage(argv[0], NULL, ipath); + usage(argv[0], nullptr, ipath); return false; break; @@ -131,7 +131,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &pp if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath); + usage(argv[0], nullptr, ipath); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/modules/core/test/image-with-dataset/testImageNormalizedCorrelation.cpp b/modules/core/test/image-with-dataset/testImageNormalizedCorrelation.cpp index 37ce35b37b..7b49cb53db 100644 --- a/modules/core/test/image-with-dataset/testImageNormalizedCorrelation.cpp +++ b/modules/core/test/image-with-dataset/testImageNormalizedCorrelation.cpp @@ -89,7 +89,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath) ipath = optarg_; break; case 'h': - usage(argv[0], NULL, ipath); + usage(argv[0], nullptr, ipath); return false; break; @@ -106,7 +106,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath); + usage(argv[0], nullptr, ipath); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -181,7 +181,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath); + usage(argv[0], nullptr, ipath); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl @@ -226,8 +226,8 @@ int main(int argc, const char **argv) vpImagePoint max_loc, max_loc_gold; double max_correlation = -1.0, max_correlation_gold = -1.0; - I_score.getMinMaxLoc(NULL, &max_loc, NULL, &max_correlation); - I_score_gold.getMinMaxLoc(NULL, &max_loc_gold, NULL, &max_correlation_gold); + I_score.getMinMaxLoc(nullptr, &max_loc, nullptr, &max_correlation); + I_score_gold.getMinMaxLoc(nullptr, &max_loc_gold, nullptr, &max_correlation_gold); std::cout << "Compare regular and SSE version of vpImageTools::normalizedCorrelation()" << std::endl; std::cout << "vpImageTools::normalizedCorrelation(): " << max_correlation << " ; " << t << " ms" << std::endl; diff --git a/modules/core/test/image-with-dataset/testImageTemplateMatching.cpp b/modules/core/test/image-with-dataset/testImageTemplateMatching.cpp index ed11ce5bc1..ffab07fb18 100644 --- a/modules/core/test/image-with-dataset/testImageTemplateMatching.cpp +++ b/modules/core/test/image-with-dataset/testImageTemplateMatching.cpp @@ -95,7 +95,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, bool &click, bo ipath = optarg_; break; case 'h': - usage(argv[0], NULL, ipath); + usage(argv[0], nullptr, ipath); return false; break; case 't': @@ -117,7 +117,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, bool &click, bo if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath); + usage(argv[0], nullptr, ipath); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -242,7 +242,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath); + usage(argv[0], nullptr, ipath); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl @@ -297,7 +297,7 @@ int main(int argc, const char **argv) vpImagePoint max_loc; double max_correlation = -1.0; - I_score.getMinMaxLoc(NULL, &max_loc, NULL, &max_correlation); + I_score.getMinMaxLoc(nullptr, &max_loc, nullptr, &max_correlation); t_proc = vpTime::measureTimeMs() - t_proc; benchmark_vec.push_back(t_proc); diff --git a/modules/core/test/image-with-dataset/testIoPGM.cpp b/modules/core/test/image-with-dataset/testIoPGM.cpp index a1077b1b29..b6dcb78ee8 100644 --- a/modules/core/test/image-with-dataset/testIoPGM.cpp +++ b/modules/core/test/image-with-dataset/testIoPGM.cpp @@ -124,7 +124,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op opath = optarg_; break; case 'h': - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); return false; break; @@ -141,7 +141,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -199,7 +199,7 @@ int main(int argc, const char **argv) // Create the dirname vpIoTools::makeDirectory(opath); } catch (...) { - usage(argv[0], NULL, ipath, opt_opath, username); + usage(argv[0], nullptr, ipath, opt_opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Cannot create " << opath << std::endl; std::cerr << " Check your -o " << opt_opath << " option " << std::endl; @@ -220,7 +220,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath, opt_opath, username); + usage(argv[0], nullptr, ipath, opt_opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/modules/core/test/image-with-dataset/testIoPPM.cpp b/modules/core/test/image-with-dataset/testIoPPM.cpp index d4195abfa9..363e480708 100644 --- a/modules/core/test/image-with-dataset/testIoPPM.cpp +++ b/modules/core/test/image-with-dataset/testIoPPM.cpp @@ -126,7 +126,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op opath = optarg_; break; case 'h': - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); return false; break; @@ -143,7 +143,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -201,7 +201,7 @@ int main(int argc, const char **argv) // Create the dirname vpIoTools::makeDirectory(opath); } catch (...) { - usage(argv[0], NULL, ipath, opt_opath, username); + usage(argv[0], nullptr, ipath, opt_opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Cannot create " << opath << std::endl; std::cerr << " Check your -o " << opt_opath << " option " << std::endl; @@ -222,7 +222,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath, opt_opath, username); + usage(argv[0], nullptr, ipath, opt_opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/modules/core/test/image-with-dataset/testPerformanceLUT.cpp b/modules/core/test/image-with-dataset/testPerformanceLUT.cpp index 1508bea06e..0705fbebf4 100644 --- a/modules/core/test/image-with-dataset/testPerformanceLUT.cpp +++ b/modules/core/test/image-with-dataset/testPerformanceLUT.cpp @@ -125,7 +125,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op nbThreads = (unsigned int)atoi(optarg_); break; case 'h': - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); return false; break; @@ -142,7 +142,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -290,7 +290,7 @@ int main(int argc, const char **argv) vpIoTools::makeDirectory(opath); } catch (...) { - usage(argv[0], NULL, ipath, opt_opath, username); + usage(argv[0], nullptr, ipath, opt_opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Cannot create " << opath << std::endl; std::cerr << " Check your -o " << opt_opath << " option " << std::endl; @@ -311,7 +311,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath, opt_opath, username); + usage(argv[0], nullptr, ipath, opt_opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/modules/core/test/image-with-dataset/testReadImage.cpp b/modules/core/test/image-with-dataset/testReadImage.cpp index 0e5325085d..13fecb8b98 100644 --- a/modules/core/test/image-with-dataset/testReadImage.cpp +++ b/modules/core/test/image-with-dataset/testReadImage.cpp @@ -121,7 +121,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &pp ppath = optarg_; break; case 'h': - usage(argv[0], NULL, ipath); + usage(argv[0], nullptr, ipath); return false; break; @@ -138,7 +138,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &pp if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath); + usage(argv[0], nullptr, ipath); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/modules/core/test/image-with-dataset/testUndistortImage.cpp b/modules/core/test/image-with-dataset/testUndistortImage.cpp index 49690b4f9c..9f63d5687f 100644 --- a/modules/core/test/image-with-dataset/testUndistortImage.cpp +++ b/modules/core/test/image-with-dataset/testUndistortImage.cpp @@ -149,7 +149,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op scale = atoi(optarg_); break; case 'h': - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); return false; case 'c': @@ -164,7 +164,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -230,7 +230,7 @@ int main(int argc, const char **argv) // Create the dirname vpIoTools::makeDirectory(opath); } catch (...) { - usage(argv[0], NULL, ipath, opt_opath, username); + usage(argv[0], nullptr, ipath, opt_opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Cannot create " << opath << std::endl; std::cerr << " Check your -o " << opt_opath << " option " << std::endl; @@ -251,7 +251,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath, opt_opath, username); + usage(argv[0], nullptr, ipath, opt_opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/modules/core/test/math/testMatrixDeterminant.cpp b/modules/core/test/math/testMatrixDeterminant.cpp index cdb3ab20dd..77ed348f5e 100644 --- a/modules/core/test/math/testMatrixDeterminant.cpp +++ b/modules/core/test/math/testMatrixDeterminant.cpp @@ -118,7 +118,7 @@ bool getOptions(int argc, const char **argv, unsigned int &nb_matrices, unsigned switch (c) { case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; case 'n': @@ -157,7 +157,7 @@ bool getOptions(int argc, const char **argv, unsigned int &nb_matrices, unsigned if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/modules/core/test/math/testMatrixInverse.cpp b/modules/core/test/math/testMatrixInverse.cpp index 7ce8fa88ba..745fc88c98 100644 --- a/modules/core/test/math/testMatrixInverse.cpp +++ b/modules/core/test/math/testMatrixInverse.cpp @@ -126,7 +126,7 @@ bool getOptions(int argc, const char **argv, unsigned int &nb_matrices, unsigned switch (c) { case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; case 'n': @@ -165,7 +165,7 @@ bool getOptions(int argc, const char **argv, unsigned int &nb_matrices, unsigned if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/modules/core/test/math/testMatrixPseudoInverse.cpp b/modules/core/test/math/testMatrixPseudoInverse.cpp index 5314f4ba7e..5ca14afa35 100644 --- a/modules/core/test/math/testMatrixPseudoInverse.cpp +++ b/modules/core/test/math/testMatrixPseudoInverse.cpp @@ -123,7 +123,7 @@ bool getOptions(int argc, const char **argv, unsigned int &nb_matrices, unsigned switch (c) { case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; case 'n': @@ -162,7 +162,7 @@ bool getOptions(int argc, const char **argv, unsigned int &nb_matrices, unsigned if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/modules/core/test/math/testRobust.cpp b/modules/core/test/math/testRobust.cpp index fb63016a9f..86697b5640 100644 --- a/modules/core/test/math/testRobust.cpp +++ b/modules/core/test/math/testRobust.cpp @@ -113,7 +113,7 @@ bool getOptions(int argc, const char **argv, std::string &ofilename) ofilename = optarg_; break; case 'h': - usage(argv[0], NULL, ofilename); + usage(argv[0], nullptr, ofilename); return false; break; @@ -129,7 +129,7 @@ bool getOptions(int argc, const char **argv, std::string &ofilename) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ofilename); + usage(argv[0], nullptr, ofilename); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -163,7 +163,7 @@ int main(int argc, const char **argv) // Create the dirname vpIoTools::makeDirectory(ofilename); } catch (...) { - usage(argv[0], NULL, ofilename); + usage(argv[0], nullptr, ofilename); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Cannot create " << ofilename << std::endl; std::cerr << " Check your -o " << ofilename << " option " << std::endl; @@ -186,7 +186,7 @@ int main(int argc, const char **argv) std::cout << "Create file: " << ofilename << std::endl; f.open(ofilename.c_str()); if (f.fail()) { - usage(argv[0], NULL, ofilename); + usage(argv[0], nullptr, ofilename); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Cannot create the file: " << ofilename << std::endl; std::cerr << " Check your -o " << ofilename << " option " << std::endl; diff --git a/modules/core/test/math/testSvd.cpp b/modules/core/test/math/testSvd.cpp index 546fa194f2..3c7f9420da 100644 --- a/modules/core/test/math/testSvd.cpp +++ b/modules/core/test/math/testSvd.cpp @@ -124,7 +124,7 @@ bool getOptions(int argc, const char **argv, unsigned int &nb_matrices, unsigned switch (c) { case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; case 'n': @@ -163,7 +163,7 @@ bool getOptions(int argc, const char **argv, unsigned int &nb_matrices, unsigned if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/modules/core/test/tools/geometry/testPolygon.cpp b/modules/core/test/tools/geometry/testPolygon.cpp index 132eda4f40..784cbccfc7 100644 --- a/modules/core/test/tools/geometry/testPolygon.cpp +++ b/modules/core/test/tools/geometry/testPolygon.cpp @@ -118,7 +118,7 @@ bool getOptions(int argc, const char **argv, bool &opt_display, bool &opt_click, method = atoi(optarg_); break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; @@ -131,7 +131,7 @@ bool getOptions(int argc, const char **argv, bool &opt_display, bool &opt_click, if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/modules/core/test/tools/histogram-with-dataset/testHistogram.cpp b/modules/core/test/tools/histogram-with-dataset/testHistogram.cpp index ef22227daf..6dfb62ce80 100644 --- a/modules/core/test/tools/histogram-with-dataset/testHistogram.cpp +++ b/modules/core/test/tools/histogram-with-dataset/testHistogram.cpp @@ -118,7 +118,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, unsigned int &n nbThreads = (unsigned int)atoi(optarg_); break; case 'h': - usage(argv[0], NULL, ipath); + usage(argv[0], nullptr, ipath); return false; break; @@ -135,7 +135,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, unsigned int &n if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath); + usage(argv[0], nullptr, ipath); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -238,7 +238,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath); + usage(argv[0], nullptr, ipath); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/modules/core/test/tools/threading/testThread2.cpp b/modules/core/test/tools/threading/testThread2.cpp index 5fba1d2687..10ed60cfc5 100644 --- a/modules/core/test/tools/threading/testThread2.cpp +++ b/modules/core/test/tools/threading/testThread2.cpp @@ -144,7 +144,7 @@ int main() { unsigned int nb_threads = 4; unsigned int size = 1000007; - srand((unsigned int)time(NULL)); + srand((unsigned int)time(nullptr)); vpColVector v1(size), v2(size); for (unsigned int i = 0; i < size; i++) { diff --git a/modules/core/test/tools/xml/testXmlParser.cpp b/modules/core/test/tools/xml/testXmlParser.cpp index 5dbcecbc5f..5c667d327d 100644 --- a/modules/core/test/tools/xml/testXmlParser.cpp +++ b/modules/core/test/tools/xml/testXmlParser.cpp @@ -120,7 +120,7 @@ vpExampleDataParser::vpExampleDataParser() : m_range(0.), m_step(0), m_size_filt */ void vpExampleDataParser::readMainClass(xmlDocPtr doc, xmlNodePtr node) { - for (xmlNodePtr dataNode = node->xmlChildrenNode; dataNode != NULL; dataNode = dataNode->next) { + for (xmlNodePtr dataNode = node->xmlChildrenNode; dataNode != nullptr; dataNode = dataNode->next) { if (dataNode->type == XML_ELEMENT_NODE) { std::map::iterator iter_data = this->nodeMap.find((char *)dataNode->name); if (iter_data != nodeMap.end()) { @@ -228,7 +228,7 @@ bool getOptions(int argc, const char **argv, std::string &opath, const std::stri opath = optarg_; break; case 'h': - usage(argv[0], NULL, opath, user); + usage(argv[0], nullptr, opath, user); return false; break; @@ -245,7 +245,7 @@ bool getOptions(int argc, const char **argv, std::string &opath, const std::stri if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, opath, user); + usage(argv[0], nullptr, opath, user); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -301,7 +301,7 @@ int main(int argc, const char **argv) vpIoTools::makeDirectory(dirname); } catch (...) { - usage(argv[0], NULL, opath, username); + usage(argv[0], nullptr, opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Cannot create " << dirname << std::endl; std::cerr << " Check your -o " << opath << " option " << std::endl; diff --git a/modules/detection/include/visp3/detection/vpDetectorAprilTag.h b/modules/detection/include/visp3/detection/vpDetectorAprilTag.h index 0bb54ee988..e440dac32f 100644 --- a/modules/detection/include/visp3/detection/vpDetectorAprilTag.h +++ b/modules/detection/include/visp3/detection/vpDetectorAprilTag.h @@ -254,8 +254,8 @@ class VISP_EXPORT vpDetectorAprilTag : public vpDetectorBase bool detect(const vpImage &I) override; bool detect(const vpImage &I, double tagSize, const vpCameraParameters &cam, - std::vector &cMo_vec, std::vector *cMo_vec2 = NULL, - std::vector *projErrors = NULL, std::vector *projErrors2 = NULL); + std::vector &cMo_vec, std::vector *cMo_vec2 = nullptr, + std::vector *projErrors = nullptr, std::vector *projErrors2 = nullptr); void displayFrames(const vpImage &I, const std::vector &cMo_vec, const vpCameraParameters &cam, double size, const vpColor &color, unsigned int thickness = 1) const; @@ -268,7 +268,7 @@ class VISP_EXPORT vpDetectorAprilTag : public vpDetectorBase const vpColor &color = vpColor::none, unsigned int thickness = 1) const; bool getPose(size_t tagIndex, double tagSize, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, - vpHomogeneousMatrix *cMo2 = NULL, double *projError = NULL, double *projError2 = NULL); + vpHomogeneousMatrix *cMo2 = nullptr, double *projError = nullptr, double *projError2 = nullptr); /*! * Return the pose estimation method. diff --git a/modules/detection/src/barcode/vpDetectorDataMatrixCode.cpp b/modules/detection/src/barcode/vpDetectorDataMatrixCode.cpp index 5d3e28d47b..c71573fe15 100644 --- a/modules/detection/src/barcode/vpDetectorDataMatrixCode.cpp +++ b/modules/detection/src/barcode/vpDetectorDataMatrixCode.cpp @@ -67,7 +67,7 @@ bool vpDetectorDataMatrixCode::detect(const vpImage &I) DmtxImage *img; DmtxMessage *msg; - DmtxTime *dmtx_timeout = NULL; + DmtxTime *dmtx_timeout = nullptr; if (m_timeout_ms) { dmtx_timeout = new DmtxTime; *dmtx_timeout = dmtxTimeNow(); @@ -75,18 +75,18 @@ bool vpDetectorDataMatrixCode::detect(const vpImage &I) } img = dmtxImageCreate(I.bitmap, (int)I.getWidth(), (int)I.getHeight(), DmtxPack8bppK); - assert(img != NULL); + assert(img != nullptr); dec = dmtxDecodeCreate(img, 1); - assert(dec != NULL); + assert(dec != nullptr); bool end = false; do { reg = dmtxRegionFindNext(dec, dmtx_timeout); - if (reg != NULL) { + if (reg != nullptr) { msg = dmtxDecodeMatrixRegion(dec, reg, DmtxUndefined); - if (msg != NULL) { + if (msg != nullptr) { std::vector polygon; diff --git a/modules/detection/src/barcode/vpDetectorQRCode.cpp b/modules/detection/src/barcode/vpDetectorQRCode.cpp index fd8db87ddd..81c904b6fd 100644 --- a/modules/detection/src/barcode/vpDetectorQRCode.cpp +++ b/modules/detection/src/barcode/vpDetectorQRCode.cpp @@ -84,7 +84,7 @@ bool vpDetectorQRCode::detect(const vpImage &I) } // clean up - img.set_data(NULL, 0); + img.set_data(nullptr, 0); return detected; } diff --git a/modules/detection/src/tag/vpDetectorAprilTag.cpp b/modules/detection/src/tag/vpDetectorAprilTag.cpp index 984ce3182f..b8feb4efdc 100644 --- a/modules/detection/src/tag/vpDetectorAprilTag.cpp +++ b/modules/detection/src/tag/vpDetectorAprilTag.cpp @@ -65,7 +65,7 @@ class vpDetectorAprilTag::Impl { public: Impl(const vpAprilTagFamily &tagFamily, const vpPoseEstimationMethod &method) - : m_poseEstimationMethod(method), m_tagsId(), m_tagFamily(tagFamily), m_td(NULL), m_tf(NULL), m_detections(NULL), + : m_poseEstimationMethod(method), m_tagsId(), m_tagFamily(tagFamily), m_td(nullptr), m_tf(nullptr), m_detections(nullptr), m_zAlignedWithCameraFrame(false) { switch (m_tagFamily) { @@ -134,8 +134,8 @@ class vpDetectorAprilTag::Impl } Impl(const Impl &o) - : m_poseEstimationMethod(o.m_poseEstimationMethod), m_tagsId(o.m_tagsId), m_tagFamily(o.m_tagFamily), m_td(NULL), - m_tf(NULL), m_detections(NULL), m_zAlignedWithCameraFrame(o.m_zAlignedWithCameraFrame) + : m_poseEstimationMethod(o.m_poseEstimationMethod), m_tagsId(o.m_tagsId), m_tagFamily(o.m_tagFamily), m_td(nullptr), + m_tf(nullptr), m_detections(nullptr), m_zAlignedWithCameraFrame(o.m_zAlignedWithCameraFrame) { switch (m_tagFamily) { case TAG_36h11: @@ -201,7 +201,7 @@ class vpDetectorAprilTag::Impl m_mapOfCorrespondingPoseMethods[DEMENTHON_VIRTUAL_VS] = vpPose::DEMENTHON; m_mapOfCorrespondingPoseMethods[LAGRANGE_VIRTUAL_VS] = vpPose::LAGRANGE; - if (o.m_detections != NULL) { + if (o.m_detections != nullptr) { m_detections = apriltag_detections_copy(o.m_detections); } } @@ -272,7 +272,7 @@ class vpDetectorAprilTag::Impl if (m_detections) { apriltag_detections_destroy(m_detections); - m_detections = NULL; + m_detections = nullptr; } } @@ -306,7 +306,7 @@ class vpDetectorAprilTag::Impl } #endif - const bool computePose = (cMo_vec != NULL); + const bool computePose = (cMo_vec != nullptr); image_u8_t im = {/*.width =*/(int32_t)I.getWidth(), /*.height =*/(int32_t)I.getHeight(), @@ -315,7 +315,7 @@ class vpDetectorAprilTag::Impl if (m_detections) { apriltag_detections_destroy(m_detections); - m_detections = NULL; + m_detections = nullptr; } m_detections = apriltag_detector_detect(m_td, &im); @@ -359,8 +359,8 @@ class vpDetectorAprilTag::Impl if (computePose) { vpHomogeneousMatrix cMo, cMo2; double err1, err2; - if (getPose(static_cast(i), tagSize, cam, cMo, cMo_vec2 ? &cMo2 : NULL, projErrors ? &err1 : NULL, - projErrors2 ? &err2 : NULL)) { + if (getPose(static_cast(i), tagSize, cam, cMo, cMo_vec2 ? &cMo2 : nullptr, projErrors ? &err1 : nullptr, + projErrors2 ? &err2 : nullptr)) { cMo_vec->push_back(cMo); if (cMo_vec2) { cMo_vec2->push_back(cMo2); @@ -446,7 +446,7 @@ class vpDetectorAprilTag::Impl bool getPose(size_t tagIndex, double tagSize, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, vpHomogeneousMatrix *cMo2, double *projErrors, double *projErrors2) { - if (m_detections == NULL) { + if (m_detections == nullptr) { throw(vpException(vpException::fatalError, "Cannot get tag index=%d pose: detection empty", tagIndex)); } if (m_tagFamily == TAG_36ARTOOLKIT) { @@ -849,7 +849,7 @@ bool vpDetectorAprilTag::detect(const vpImage &I) std::vector cMo_vec; const double tagSize = 1.0; bool detected = m_impl->detect(I, tagSize, m_defaultCam, m_polygon, m_message, m_displayTag, m_displayTagColor, - m_displayTagThickness, NULL, NULL, NULL, NULL); + m_displayTagThickness, nullptr, nullptr, nullptr, nullptr); m_nb_objects = m_message.size(); return detected; diff --git a/modules/gui/include/visp3/gui/vpDisplayOpenCV.h b/modules/gui/include/visp3/gui/vpDisplayOpenCV.h index b977619fbf..ddc2cdab4b 100644 --- a/modules/gui/include/visp3/gui/vpDisplayOpenCV.h +++ b/modules/gui/include/visp3/gui/vpDisplayOpenCV.h @@ -170,9 +170,9 @@ class VISP_EXPORT vpDisplayOpenCV : public vpDisplay // vpDisplayOpenCV(const vpDisplayOpenCV &) // : vpDisplay(), // #if (VISP_HAVE_OPENCV_VERSION < 0x020408) - // background(NULL), col(NULL), cvcolor(), font(NULL), + // background(nullptr), col(nullptr), cvcolor(), font(nullptr), // #else - // background(), col(NULL), cvcolor(), font(cv::FONT_HERSHEY_PLAIN), + // background(), col(nullptr), cvcolor(), font(cv::FONT_HERSHEY_PLAIN), // fontScale(0.8f), // #endif // fontHeight(10), x_move(0), y_move(0) , move(false), diff --git a/modules/gui/include/visp3/gui/vpDisplayWin32.h b/modules/gui/include/visp3/gui/vpDisplayWin32.h index 47a8d8611a..437a803b23 100644 --- a/modules/gui/include/visp3/gui/vpDisplayWin32.h +++ b/modules/gui/include/visp3/gui/vpDisplayWin32.h @@ -112,7 +112,7 @@ class VISP_EXPORT vpDisplayWin32 : public vpDisplay friend void vpCreateWindow(threadParam *param); public: - explicit vpDisplayWin32(vpWin32Renderer *rend = NULL); + explicit vpDisplayWin32(vpWin32Renderer *rend = nullptr); vpDisplayWin32(vpImage &I, int winx = -1, int winy = -1, const std::string &title = ""); diff --git a/modules/gui/include/visp3/gui/vpDisplayX.h b/modules/gui/include/visp3/gui/vpDisplayX.h index feaadfc8bb..3d2b2d3b55 100644 --- a/modules/gui/include/visp3/gui/vpDisplayX.h +++ b/modules/gui/include/visp3/gui/vpDisplayX.h @@ -129,9 +129,9 @@ class VISP_EXPORT vpDisplayX : public vpDisplay // private: //#ifndef DOXYGEN_SHOULD_SKIP_THIS // vpDisplayX(const vpDisplayX &) - // : vpDisplay(), display(NULL), window(), Ximage(NULL), lut(), + // : vpDisplay(), display(nullptr), window(), Ximage(nullptr), lut(), // context(), screen(), event(), pixmap(), - // x_color(NULL), screen_depth(8), xcolor(), values(), + // x_color(nullptr), screen_depth(8), xcolor(), values(), // ximage_data_init(false), RMask(0), GMask(0), BMask(0), RShift(0), // GShift(0), BShift(0) // { diff --git a/modules/gui/include/visp3/gui/vpPlot.h b/modules/gui/include/visp3/gui/vpPlot.h index 0ec40b738f..ffd1dbb128 100644 --- a/modules/gui/include/visp3/gui/vpPlot.h +++ b/modules/gui/include/visp3/gui/vpPlot.h @@ -125,7 +125,7 @@ class VISP_EXPORT vpPlot // private: //#ifndef DOXYGEN_SHOULD_SKIP_THIS // vpPlot(const vpPlot &) - // : I(), display(NULL), graphNbr(0), graphList(NULL), margei(0), + // : I(), display(nullptr), graphNbr(0), graphList(nullptr), margei(0), // margej(0), // factori(0), factorj(0) // { diff --git a/modules/gui/include/visp3/gui/vpPlotGraph.h b/modules/gui/include/visp3/gui/vpPlotGraph.h index 4e8b0e01d8..935e4c15f9 100644 --- a/modules/gui/include/visp3/gui/vpPlotGraph.h +++ b/modules/gui/include/visp3/gui/vpPlotGraph.h @@ -143,7 +143,7 @@ class vpPlotGraph // : xorg(0.), yorg(0.), zoomx(1.), zoomy(1.), xmax(10), ymax(10), // xmin(0), ymin(-10), // xdelt(1), ydelt(1), gridx(true), gridy(true), gridColor(), - // curveNbr(1), curveList(NULL), scaleInitialized(false), + // curveNbr(1), curveList(nullptr), scaleInitialized(false), // firstPoint(true), nbDivisionx(10), nbDivisiony(10), topLeft(), // width(0), height(0), graphZone(), dTopLeft(), dWidth(0), // dHeight(0), dGraphZone(), dTopLeft3D(), dGraphZone3D(), cam(), diff --git a/modules/gui/include/visp3/gui/vpWin32Window.h b/modules/gui/include/visp3/gui/vpWin32Window.h index e36934dadc..1e97ad6e0f 100644 --- a/modules/gui/include/visp3/gui/vpWin32Window.h +++ b/modules/gui/include/visp3/gui/vpWin32Window.h @@ -112,7 +112,7 @@ class VISP_EXPORT vpWin32Window vpWin32Renderer *renderer; public: - explicit vpWin32Window(vpWin32Renderer *rend = NULL); + explicit vpWin32Window(vpWin32Renderer *rend = nullptr); virtual ~vpWin32Window(); HWND getHWnd() { return hWnd; } diff --git a/modules/gui/src/display/vpDisplayGTK.cpp b/modules/gui/src/display/vpDisplayGTK.cpp index e9a849c8d7..7e4fa7da78 100644 --- a/modules/gui/src/display/vpDisplayGTK.cpp +++ b/modules/gui/src/display/vpDisplayGTK.cpp @@ -72,10 +72,10 @@ class vpDisplayGTK::Impl { public: Impl() - : m_widget(NULL), m_background(NULL), m_gc(NULL), m_blue(), m_red(), m_yellow(), m_green(), m_cyan(), m_orange(), + : m_widget(nullptr), m_background(nullptr), m_gc(nullptr), m_blue(), m_red(), m_yellow(), m_green(), m_cyan(), m_orange(), m_white(), m_black(), m_gdkcolor(), m_lightBlue(), m_darkBlue(), m_lightRed(), m_darkRed(), m_lightGreen(), - m_darkGreen(), m_purple(), m_lightGray(), m_gray(), m_darkGray(), m_colormap(NULL), m_font(NULL), m_vectgtk(NULL), - m_col(NULL) + m_darkGreen(), m_purple(), m_lightGray(), m_gray(), m_darkGray(), m_colormap(nullptr), m_font(nullptr), m_vectgtk(nullptr), + m_col(nullptr) { } ~Impl() { } @@ -86,7 +86,7 @@ class vpDisplayGTK::Impl gint height = static_cast(win_height); /* Initialisation of the gdk et gdk_rgb library */ - int *argc = NULL; + int *argc = nullptr; char **argv; gtk_init(argc, &argv); @@ -190,9 +190,9 @@ class vpDisplayGTK::Impl // Try to load a default font m_font = gdk_font_load("-*-times-medium-r-normal-*-16-*-*-*-*-*-*-*"); - if (m_font == NULL) + if (m_font == nullptr) m_font = gdk_font_load("-*-courier-bold-r-normal-*-*-140-*-*-*-*-*-*"); - if (m_font == NULL) + if (m_font == nullptr) m_font = gdk_font_load("-*-courier 10 pitch-medium-r-normal-*-16-*-*-*-*-*-*-*"); if (!title.empty()) @@ -257,16 +257,16 @@ class vpDisplayGTK::Impl void closeDisplay() { - if (m_col != NULL) { + if (m_col != nullptr) { delete[] m_col; - m_col = NULL; + m_col = nullptr; } - if (m_widget != NULL) { + if (m_widget != nullptr) { gdk_window_hide(m_widget->window); gdk_window_destroy(m_widget->window); gtk_widget_destroy(m_widget); - m_widget = NULL; + m_widget = nullptr; } } @@ -287,7 +287,7 @@ class vpDisplayGTK::Impl gdk_colormap_alloc_color(m_colormap, &m_gdkcolor, FALSE, TRUE); gdk_gc_set_foreground(m_gc, &m_gdkcolor); } - if (m_font != NULL) + if (m_font != nullptr) gdk_draw_string(m_background, m_font, m_gc, vpMath::round(ip.get_u() / scale), vpMath::round(ip.get_v() / scale), (const gchar *)text); else @@ -409,7 +409,7 @@ class vpDisplayGTK::Impl { bool ret = false; do { - GdkEvent *ev = NULL; + GdkEvent *ev = nullptr; while ((ev = gdk_event_get())) { if (ev->any.window == m_widget->window && ev->type == event_type) { double u = ((GdkEventButton *)ev)->x; @@ -473,8 +473,8 @@ class vpDisplayGTK::Impl bool ret = false; int cpt = 0; do { - GdkEvent *ev = NULL; - while ((ev = gdk_event_get()) != NULL) { + GdkEvent *ev = nullptr; + while ((ev = gdk_event_get()) != nullptr) { cpt++; if (ev->any.window == m_widget->window && ev->type == GDK_KEY_PRESS) { @@ -494,7 +494,7 @@ class vpDisplayGTK::Impl bool getPointerMotionEvent(vpImagePoint &ip, unsigned int scale) { bool ret = false; - GdkEvent *ev = NULL; + GdkEvent *ev = nullptr; if ((ev = gdk_event_get())) { if (ev->any.window == m_widget->window && ev->type == GDK_MOTION_NOTIFY) { double u = ((GdkEventMotion *)ev)->x; @@ -512,7 +512,7 @@ class vpDisplayGTK::Impl void getPointerPosition(vpImagePoint &ip, unsigned int scale) { gint u, v; - gdk_window_get_pointer(m_widget->window, &u, &v, NULL); + gdk_window_get_pointer(m_widget->window, &u, &v, nullptr); ip.set_u(static_cast(u) * scale); ip.set_v(static_cast(v) * scale); } @@ -520,7 +520,7 @@ class vpDisplayGTK::Impl void getScreenSize(bool is_init, unsigned int &w, unsigned int &h) { if (!is_init) { - int *argc = NULL; + int *argc = nullptr; char **argv; gtk_init(argc, &argv); diff --git a/modules/gui/src/display/vpDisplayOpenCV.cpp b/modules/gui/src/display/vpDisplayOpenCV.cpp index 56e28f2bac..21b7ab1daa 100644 --- a/modules/gui/src/display/vpDisplayOpenCV.cpp +++ b/modules/gui/src/display/vpDisplayOpenCV.cpp @@ -102,9 +102,9 @@ unsigned int vpDisplayOpenCV::m_nbWindows = 0; vpDisplayOpenCV::vpDisplayOpenCV(vpImage &I, vpScaleType scaleType) : vpDisplay(), #if (VISP_HAVE_OPENCV_VERSION < 0x020408) - m_background(NULL), col(NULL), cvcolor(), font(NULL), + m_background(nullptr), col(nullptr), cvcolor(), font(nullptr), #else - m_background(), col(NULL), cvcolor(), font(cv::FONT_HERSHEY_PLAIN), fontScale(0.8f), + m_background(), col(nullptr), cvcolor(), font(cv::FONT_HERSHEY_PLAIN), fontScale(0.8f), #endif fontHeight(10), x_move(0), y_move(0), move(false), x_lbuttondown(0), y_lbuttondown(0), lbuttondown(false), x_mbuttondown(0), y_mbuttondown(0), mbuttondown(false), x_rbuttondown(0), y_rbuttondown(0), rbuttondown(false), @@ -142,9 +142,9 @@ vpDisplayOpenCV::vpDisplayOpenCV(vpImage &I, int x, int y, const vpScaleType scaleType) : vpDisplay(), #if (VISP_HAVE_OPENCV_VERSION < 0x020408) - m_background(NULL), col(NULL), cvcolor(), font(NULL), + m_background(nullptr), col(nullptr), cvcolor(), font(nullptr), #else - m_background(), col(NULL), cvcolor(), font(cv::FONT_HERSHEY_PLAIN), fontScale(0.8f), + m_background(), col(nullptr), cvcolor(), font(cv::FONT_HERSHEY_PLAIN), fontScale(0.8f), #endif fontHeight(10), x_move(0), y_move(0), move(false), x_lbuttondown(0), y_lbuttondown(0), lbuttondown(false), x_mbuttondown(0), y_mbuttondown(0), mbuttondown(false), x_rbuttondown(0), y_rbuttondown(0), rbuttondown(false), @@ -176,9 +176,9 @@ vpDisplayOpenCV::vpDisplayOpenCV(vpImage &I, int x, int y, const vpDisplayOpenCV::vpDisplayOpenCV(vpImage &I, vpScaleType scaleType) : #if (VISP_HAVE_OPENCV_VERSION < 0x020408) - m_background(NULL), col(NULL), cvcolor(), font(NULL), + m_background(nullptr), col(nullptr), cvcolor(), font(nullptr), #else - m_background(), col(NULL), cvcolor(), font(cv::FONT_HERSHEY_PLAIN), fontScale(0.8f), + m_background(), col(nullptr), cvcolor(), font(cv::FONT_HERSHEY_PLAIN), fontScale(0.8f), #endif fontHeight(10), x_move(0), y_move(0), move(false), x_lbuttondown(0), y_lbuttondown(0), lbuttondown(false), x_mbuttondown(0), y_mbuttondown(0), mbuttondown(false), x_rbuttondown(0), y_rbuttondown(0), rbuttondown(false), @@ -212,9 +212,9 @@ vpDisplayOpenCV::vpDisplayOpenCV(vpImage &I, vpScaleType scaleType) vpDisplayOpenCV::vpDisplayOpenCV(vpImage &I, int x, int y, const std::string &title, vpScaleType scaleType) : #if (VISP_HAVE_OPENCV_VERSION < 0x020408) - m_background(NULL), col(NULL), cvcolor(), font(NULL), + m_background(nullptr), col(nullptr), cvcolor(), font(nullptr), #else - m_background(), col(NULL), cvcolor(), font(cv::FONT_HERSHEY_PLAIN), fontScale(0.8f), + m_background(), col(nullptr), cvcolor(), font(cv::FONT_HERSHEY_PLAIN), fontScale(0.8f), #endif fontHeight(10), x_move(0), y_move(0), move(false), x_lbuttondown(0), y_lbuttondown(0), lbuttondown(false), x_mbuttondown(0), y_mbuttondown(0), mbuttondown(false), x_rbuttondown(0), y_rbuttondown(0), rbuttondown(false), @@ -250,9 +250,9 @@ int main() vpDisplayOpenCV::vpDisplayOpenCV(int x, int y, const std::string &title) : #if (VISP_HAVE_OPENCV_VERSION < 0x020408) - m_background(NULL), col(NULL), cvcolor(), font(NULL), + m_background(nullptr), col(nullptr), cvcolor(), font(nullptr), #else - m_background(), col(NULL), cvcolor(), font(cv::FONT_HERSHEY_PLAIN), fontScale(0.8f), + m_background(), col(nullptr), cvcolor(), font(cv::FONT_HERSHEY_PLAIN), fontScale(0.8f), #endif fontHeight(10), x_move(0), y_move(0), move(false), x_lbuttondown(0), y_lbuttondown(0), lbuttondown(false), x_mbuttondown(0), y_mbuttondown(0), mbuttondown(false), x_rbuttondown(0), y_rbuttondown(0), rbuttondown(false), @@ -310,9 +310,9 @@ int main() vpDisplayOpenCV::vpDisplayOpenCV() : #if (VISP_HAVE_OPENCV_VERSION < 0x020408) - m_background(NULL), col(NULL), cvcolor(), font(NULL), + m_background(nullptr), col(nullptr), cvcolor(), font(nullptr), #else - m_background(), col(NULL), cvcolor(), font(cv::FONT_HERSHEY_PLAIN), fontScale(0.8f), + m_background(), col(nullptr), cvcolor(), font(cv::FONT_HERSHEY_PLAIN), fontScale(0.8f), #endif fontHeight(10), x_move(0), y_move(0), move(false), x_lbuttondown(0), y_lbuttondown(0), lbuttondown(false), x_mbuttondown(0), y_mbuttondown(0), mbuttondown(false), x_rbuttondown(0), y_rbuttondown(0), rbuttondown(false), @@ -585,7 +585,7 @@ void vpDisplayOpenCV::displayImage(const vpImage &I) int depth = 8; int channels = 3; CvSize size = cvSize((int)this->m_width, (int)this->m_height); - if (m_background != NULL) { + if (m_background != nullptr) { if (m_background->nChannels != channels || m_background->depth != depth || m_background->height != (int)m_height || m_background->width != (int)m_width) { if (m_background->nChannels != 0) @@ -682,7 +682,7 @@ void vpDisplayOpenCV::displayImageROI(const vpImage &I, const vpI int depth = 8; int channels = 3; CvSize size = cvSize((int)this->m_width, (int)this->m_height); - if (m_background != NULL) { + if (m_background != nullptr) { if (m_background->nChannels != channels || m_background->depth != depth || m_background->height != (int)m_height || m_background->width != (int)m_width) { if (m_background->nChannels != 0) @@ -792,7 +792,7 @@ void vpDisplayOpenCV::displayImage(const vpImage &I) int depth = 8; int channels = 3; CvSize size = cvSize((int)this->m_width, (int)this->m_height); - if (m_background != NULL) { + if (m_background != nullptr) { if (m_background->nChannels != channels || m_background->depth != depth || m_background->height != (int)m_height || m_background->width != (int)m_width) { if (m_background->nChannels != 0) @@ -886,7 +886,7 @@ void vpDisplayOpenCV::displayImageROI(const vpImage &I, const vpImagePoi int depth = 8; int channels = 3; CvSize size = cvSize((int)this->m_width, (int)this->m_height); - if (m_background != NULL) { + if (m_background != nullptr) { if (m_background->nChannels != channels || m_background->depth != depth || m_background->height != (int)m_height || m_background->width != (int)m_width) { if (m_background->nChannels != 0) @@ -992,14 +992,14 @@ void vpDisplayOpenCV::displayImage(const unsigned char * /* I */) { vpTRACE(" no */ void vpDisplayOpenCV::closeDisplay() { - if (col != NULL) { + if (col != nullptr) { delete[] col; - col = NULL; + col = nullptr; } #if (VISP_HAVE_OPENCV_VERSION < 0x020408) - if (font != NULL) { + if (font != nullptr) { delete font; - font = NULL; + font = nullptr; } #endif if (m_displayHasBeenInitialized) { diff --git a/modules/gui/src/display/vpDisplayX.cpp b/modules/gui/src/display/vpDisplayX.cpp index b37a26af84..17892fd412 100644 --- a/modules/gui/src/display/vpDisplayX.cpp +++ b/modules/gui/src/display/vpDisplayX.cpp @@ -75,7 +75,7 @@ class vpDisplayX::Impl { public: Impl() - : display(NULL), window(), Ximage(NULL), lut(), context(), screen(0), event(), pixmap(), x_color(NULL), + : display(nullptr), window(), Ximage(nullptr), lut(), context(), screen(0), event(), pixmap(), x_color(nullptr), screen_depth(8), xcolor(), values(), ximage_data_init(false), RMask(0), GMask(0), BMask(0), RShift(0), GShift(0), BShift(0) { @@ -108,7 +108,7 @@ class vpDisplayX::Impl if (ximage_data_init == true) free(Ximage->data); - Ximage->data = NULL; + Ximage->data = nullptr; XDestroyImage(Ximage); XFreePixmap(display, pixmap); @@ -117,9 +117,9 @@ class vpDisplayX::Impl XDestroyWindow(display, window); XCloseDisplay(display); - if (x_color != NULL) { + if (x_color != nullptr) { delete[] x_color; - x_color = NULL; + x_color = nullptr; } } @@ -363,7 +363,7 @@ class vpDisplayX::Impl /* * 32-bit source, 24/32-bit destination */ - unsigned char *dst_32 = NULL; + unsigned char *dst_32 = nullptr; dst_32 = (unsigned char *)Ximage->data; if (scale == 1) { vpRGBa *bitmap = I.bitmap; @@ -952,7 +952,7 @@ class vpDisplayX::Impl I.resize(height, width); - unsigned char *src_32 = NULL; + unsigned char *src_32 = nullptr; src_32 = (unsigned char *)xi->data; if (screen_depth == 16) { @@ -1101,9 +1101,9 @@ class vpDisplayX::Impl int screen_; unsigned int depth; - if ((display_ = XOpenDisplay(NULL)) == NULL) { + if ((display_ = XOpenDisplay(nullptr)) == nullptr) { throw(vpDisplayException(vpDisplayException::connexionError, "Can't connect display on server %s.", - XDisplayName(NULL))); + XDisplayName(nullptr))); } screen_ = DefaultScreen(display_); depth = (unsigned int)DefaultDepth(display_, screen_); @@ -1118,9 +1118,9 @@ class vpDisplayX::Impl Display *display_; int screen_; - if ((display_ = XOpenDisplay(NULL)) == NULL) { + if ((display_ = XOpenDisplay(nullptr)) == nullptr) { throw(vpDisplayException(vpDisplayException::connexionError, "Can't connect display on server %s.", - XDisplayName(NULL))); + XDisplayName(nullptr))); } screen_ = DefaultScreen(display_); w = (unsigned int)DisplayWidth(display_, screen_); @@ -1131,7 +1131,7 @@ class vpDisplayX::Impl void init(unsigned int win_width, unsigned int win_height, int win_x, int win_y, const std::string &win_title) { - if (x_color == NULL) { + if (x_color == nullptr) { // id_unknown = number of predefined colors x_color = new unsigned long[vpColor::id_unknown]; } @@ -1147,8 +1147,8 @@ class vpDisplayX::Impl hints.y = win_y; } - if ((display = XOpenDisplay(NULL)) == NULL) { - vpERROR_TRACE("Can't connect display on server %s.\n", XDisplayName(NULL)); + if ((display = XOpenDisplay(nullptr)) == nullptr) { + vpERROR_TRACE("Can't connect display on server %s.\n", XDisplayName(nullptr)); throw(vpDisplayException(vpDisplayException::connexionError, "Can't connect display on server.")); } @@ -1533,7 +1533,7 @@ class vpDisplayX::Impl values.background = BlackPixel(display, screen); context = XCreateGC(display, window, GCPlaneMask | GCFillStyle | GCForeground | GCBackground, &values); - if (context == NULL) { + if (context == nullptr) { vpERROR_TRACE("Can't create graphics context."); throw(vpDisplayException(vpDisplayException::XWindowsError, "Can't create graphics context")); } @@ -1547,7 +1547,7 @@ class vpDisplayX::Impl // while ( event.xany.type != Expose ); { - Ximage = XCreateImage(display, DefaultVisual(display, screen), screen_depth, ZPixmap, 0, NULL, win_width, + Ximage = XCreateImage(display, DefaultVisual(display, screen), screen_depth, ZPixmap, 0, nullptr, win_width, win_height, XBitmapPad(display), 0); Ximage->data = (char *)malloc(win_height * (unsigned int)Ximage->bytes_per_line); diff --git a/modules/gui/src/display/windows/vpD3DRenderer.cpp b/modules/gui/src/display/windows/vpD3DRenderer.cpp index 0627a086cb..fe8357fe23 100644 --- a/modules/gui/src/display/windows/vpD3DRenderer.cpp +++ b/modules/gui/src/display/windows/vpD3DRenderer.cpp @@ -73,11 +73,11 @@ */ vpD3DRenderer::vpD3DRenderer() { - pD3D = NULL; - pd3dDevice = NULL; - pSprite = NULL; - pd3dText = NULL; - pd3dVideoText = NULL; + pD3D = nullptr; + pd3dDevice = nullptr; + pSprite = nullptr; + pd3dText = nullptr; + pd3dVideoText = nullptr; textWidth = 0; // D3D palette @@ -159,7 +159,7 @@ vpD3DRenderer::vpD3DRenderer() // Creates a logical font hFont = CreateFont(18, 0, 0, 0, FW_NORMAL, false, false, false, DEFAULT_CHARSET, OUT_DEFAULT_PRECIS, - CLIP_DEFAULT_PRECIS, DEFAULT_QUALITY, DEFAULT_PITCH | FF_DONTCARE, NULL); + CLIP_DEFAULT_PRECIS, DEFAULT_QUALITY, DEFAULT_PITCH | FF_DONTCARE, nullptr); } /*! @@ -170,13 +170,13 @@ vpD3DRenderer::~vpD3DRenderer() { DeleteObject(hFont); - if (pd3dDevice != NULL) + if (pd3dDevice != nullptr) pd3dDevice->Release(); - if (pD3D != NULL) + if (pD3D != nullptr) pD3D->Release(); - if (pd3dText != NULL) + if (pd3dText != nullptr) pd3dText->Release(); - if (pd3dVideoText != NULL) + if (pd3dVideoText != nullptr) pd3dVideoText->Release(); } @@ -210,7 +210,7 @@ bool vpD3DRenderer::init(HWND hwnd, unsigned int width, unsigned int height) hWnd = hwnd; // D3D initialize - if (NULL == (pD3D = Direct3DCreate9(D3D_SDK_VERSION))) + if (nullptr == (pD3D = Direct3DCreate9(D3D_SDK_VERSION))) throw vpDisplayException(vpDisplayException::notInitializedError, "Can't initialize D3D!"); D3DDISPLAYMODE d3ddm; @@ -443,7 +443,7 @@ void vpD3DRenderer::convertROI(const vpImage &I, unsigned char *imBuffer void vpD3DRenderer::setImg(const vpImage &im) { // if the device has been initialized - if (pd3dDevice != NULL) { + if (pd3dDevice != nullptr) { D3DLOCKED_RECT d3dLRect; RECT r; @@ -479,7 +479,7 @@ void vpD3DRenderer::setImgROI(const vpImage &im, const vpImagePoint &iP, unsigned int height) { // if the device has been initialized - if (pd3dDevice != NULL) { + if (pd3dDevice != nullptr) { D3DLOCKED_RECT d3dLRect; int i_min = (std::max)((int)ceil(iP.get_i() / m_rscale), 0); @@ -519,7 +519,7 @@ void vpD3DRenderer::setImgROI(const vpImage &im, const vpImagePoint &iP, void vpD3DRenderer::setImg(const vpImage &im) { // if the device has been initialized - if (pd3dDevice != NULL) { + if (pd3dDevice != nullptr) { D3DLOCKED_RECT d3dLRect; RECT r; @@ -555,7 +555,7 @@ void vpD3DRenderer::setImgROI(const vpImage &im, const vpImagePoi unsigned int height) { // if the device has been initialized - if (pd3dDevice != NULL) { + if (pd3dDevice != nullptr) { D3DLOCKED_RECT d3dLRect; int i_min = (std::max)((int)ceil(iP.get_i() / m_rscale), 0); @@ -596,7 +596,7 @@ void vpD3DRenderer::setImgROI(const vpImage &im, const vpImagePoi bool vpD3DRenderer::render() { // Clears the back buffer to a blue color - // pd3dDevice->Clear( 0, NULL, D3DCLEAR_TARGET, + // pd3dDevice->Clear( 0, nullptr, D3DCLEAR_TARGET, // D3DCOLOR_XRGB(0,0,255), 1.0f, 0 ); // Begins the scene. @@ -617,17 +617,17 @@ bool vpD3DRenderer::render() #if (D3DX_SDK_VERSION <= 9) pSprite->Begin(); // - pSprite->Draw(pd3dVideoText, &r, NULL, NULL, NULL, NULL, 0xFFFFFFFF); + pSprite->Draw(pd3dVideoText, &r, nullptr, nullptr, nullptr, nullptr, 0xFFFFFFFF); #else pSprite->Begin(0); - pSprite->Draw(pd3dVideoText, &r, NULL, NULL, 0xFFFFFFFF); + pSprite->Draw(pd3dVideoText, &r, nullptr, nullptr, 0xFFFFFFFF); #endif pSprite->End(); // Ends the scene. pd3dDevice->EndScene(); // Presents the backbuffer - pd3dDevice->Present(NULL, NULL, NULL, NULL); + pd3dDevice->Present(nullptr, nullptr, nullptr, nullptr); return true; } @@ -647,7 +647,7 @@ void vpD3DRenderer::setPixel(const vpImagePoint &iP, const vpColor &color) } // if the device has been initialized - if (pd3dDevice != NULL) { + if (pd3dDevice != nullptr) { D3DLOCKED_RECT d3dLRect; RECT r; @@ -687,7 +687,7 @@ void vpD3DRenderer::drawLine(const vpImagePoint &ip1, const vpImagePoint &ip2, c unsigned int thickness, int style) { // if the device has been initialized - if (pd3dDevice != NULL) { + if (pd3dDevice != nullptr) { // Will contain the texture's surface drawing context HDC hDCMem; @@ -742,7 +742,7 @@ void vpD3DRenderer::drawLine(const vpImagePoint &ip1, const vpImagePoint &ip2, c double j = ip1_.get_j(); // Move to the starting point - MoveToEx(hDCMem, vpMath::round(j / m_rscale), vpMath::round(i / m_rscale), NULL); + MoveToEx(hDCMem, vpMath::round(j / m_rscale), vpMath::round(i / m_rscale), nullptr); // Draw the line LineTo(hDCMem, vpMath::round(j / m_rscale), vpMath::round((i + deltai) / m_rscale)); } @@ -750,14 +750,14 @@ void vpD3DRenderer::drawLine(const vpImagePoint &ip1, const vpImagePoint &ip2, c for (unsigned int j = (unsigned int)ip1_.get_j(); j < ip2_.get_j(); j += (unsigned int)(2 * deltaj)) { double i = slope * j + orig; // Move to the starting point - MoveToEx(hDCMem, vpMath::round(j / m_rscale), vpMath::round(i / m_rscale), NULL); + MoveToEx(hDCMem, vpMath::round(j / m_rscale), vpMath::round(i / m_rscale), nullptr); // Draw the line LineTo(hDCMem, vpMath::round((j + deltaj) / m_rscale), vpMath::round((i + deltai) / m_rscale)); } } } else { // move to the starting point - MoveToEx(hDCMem, vpMath::round(ip1.get_u() / m_rscale), vpMath::round(ip1.get_v() / m_rscale), NULL); + MoveToEx(hDCMem, vpMath::round(ip1.get_u() / m_rscale), vpMath::round(ip1.get_v() / m_rscale), nullptr); // Draw the line LineTo(hDCMem, vpMath::round(ip2.get_u() / m_rscale), vpMath::round(ip2.get_v() / m_rscale)); } @@ -784,7 +784,7 @@ void vpD3DRenderer::drawRect(const vpImagePoint &topLeft, unsigned int width, un bool fill, unsigned int thickness) { // if the device has been initialized - if (pd3dDevice != NULL) { + if (pd3dDevice != nullptr) { if (fill == false) { drawLine(topLeft, topLeft + vpImagePoint(0, width), color, thickness); drawLine(topLeft + vpImagePoint(0, width), topLeft + vpImagePoint(height, width), color, thickness); @@ -846,7 +846,7 @@ void vpD3DRenderer::drawRect(const vpImagePoint &topLeft, unsigned int width, un void vpD3DRenderer::clear(const vpColor &color) { // if the device has been initialized - if (pd3dDevice != NULL) { + if (pd3dDevice != nullptr) { D3DLOCKED_RECT d3dLRect; RECT r; @@ -928,7 +928,7 @@ void vpD3DRenderer::drawCircle(const vpImagePoint ¢er, unsigned int radius, return; // if the device has been initialized - if (pd3dDevice != NULL) { + if (pd3dDevice != nullptr) { D3DLOCKED_RECT d3dLRect; RECT rec; @@ -1146,7 +1146,7 @@ void TextureToRGBa(vpImage &I, unsigned char *imBuffer, unsigned int pit void vpD3DRenderer::getImage(vpImage &I) { // if the device has been initialized - if (pd3dDevice != NULL) { + if (pd3dDevice != nullptr) { // resize the destination image as needed I.resize(m_rheight, m_rwidth); diff --git a/modules/gui/src/display/windows/vpDisplayWin32.cpp b/modules/gui/src/display/windows/vpDisplayWin32.cpp index 1f74bab7a1..dc0ffd0e4c 100644 --- a/modules/gui/src/display/windows/vpDisplayWin32.cpp +++ b/modules/gui/src/display/windows/vpDisplayWin32.cpp @@ -147,10 +147,10 @@ void vpDisplayWin32::init(unsigned int width, unsigned int height, int x, int y, param->title = this->m_title; // creates the window in a separate thread - hThread = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE)vpCreateWindow, param, 0, &threadId); + hThread = CreateThread(nullptr, 0, (LPTHREAD_START_ROUTINE)vpCreateWindow, param, 0, &threadId); // the initialization worked - iStatus = (hThread != (HANDLE)NULL); + iStatus = (hThread != (HANDLE)nullptr); m_displayHasBeenInitialized = true; } diff --git a/modules/gui/src/display/windows/vpGDIRenderer.cpp b/modules/gui/src/display/windows/vpGDIRenderer.cpp index b985f795e7..120161351c 100644 --- a/modules/gui/src/display/windows/vpGDIRenderer.cpp +++ b/modules/gui/src/display/windows/vpGDIRenderer.cpp @@ -47,10 +47,10 @@ /*! Constructor. */ -vpGDIRenderer::vpGDIRenderer() : m_bmp(NULL), m_bmp_width(0), m_bmp_height(0), timelost(0) +vpGDIRenderer::vpGDIRenderer() : m_bmp(nullptr), m_bmp_width(0), m_bmp_height(0), timelost(0) { // if the screen depth is not 32bpp, throw an exception - int bpp = GetDeviceCaps(GetDC(NULL), BITSPIXEL); + int bpp = GetDeviceCaps(GetDC(nullptr), BITSPIXEL); if (bpp != 32) throw vpDisplayException(vpDisplayException::depthNotSupportedError, "vpGDIRenderer supports only 32bits depth: screen is %dbits depth!", bpp); @@ -130,7 +130,7 @@ bool vpGDIRenderer::init(HWND hWindow, unsigned int width, unsigned int height) // creates the font m_hFont = CreateFont(18, 0, 0, 0, FW_NORMAL, false, false, false, DEFAULT_CHARSET, OUT_DEFAULT_PRECIS, - CLIP_DEFAULT_PRECIS, DEFAULT_QUALITY, DEFAULT_PITCH | FF_DONTCARE, NULL); + CLIP_DEFAULT_PRECIS, DEFAULT_QUALITY, DEFAULT_PITCH | FF_DONTCARE, nullptr); return true; } @@ -421,12 +421,12 @@ bool vpGDIRenderer::updateBitmap(HBITMAP &hBmp, unsigned char *imBuffer, unsigne // just replace the content SetBitmapBits(hBmp, w * h * 4, imBuffer); } else { - if (hBmp != NULL) { + if (hBmp != nullptr) { // delete the old BITMAP DeleteObject(hBmp); } // create a new BITMAP from this buffer - if ((hBmp = CreateBitmap(static_cast(w), static_cast(h), 1, 32, (void *)imBuffer)) == NULL) + if ((hBmp = CreateBitmap(static_cast(w), static_cast(h), 1, 32, (void *)imBuffer)) == nullptr) return false; m_bmp_width = w; @@ -512,8 +512,8 @@ void vpGDIRenderer::setPixel(const vpImagePoint &iP, const vpColor &color) void vpGDIRenderer::drawLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness, int style) { - HDC hDCScreen = NULL, hDCMem = NULL; - HPEN hPen = NULL; + HDC hDCScreen = nullptr, hDCMem = nullptr; + HPEN hPen = nullptr; #ifdef GDI_ROBUST double start = vpTime::measureTimeMs(); while (vpTime::measureTimeMs() - start < 1000) { @@ -616,7 +616,7 @@ void vpGDIRenderer::drawLine(const vpImagePoint &ip1, const vpImagePoint &ip2, c double j = ip1_.get_j(); // Move to the starting point - MoveToEx(hDCMem, vpMath::round(j / m_rscale), vpMath::round(i / m_rscale), NULL); + MoveToEx(hDCMem, vpMath::round(j / m_rscale), vpMath::round(i / m_rscale), nullptr); // Draw the line LineTo(hDCMem, vpMath::round(j / m_rscale), vpMath::round((i + deltai) / m_rscale)); } @@ -624,14 +624,14 @@ void vpGDIRenderer::drawLine(const vpImagePoint &ip1, const vpImagePoint &ip2, c for (unsigned int j = (unsigned int)ip1_.get_j(); j < ip2_.get_j(); j += (unsigned int)(2 * deltaj)) { double i = slope * j + orig; // Move to the starting point - MoveToEx(hDCMem, vpMath::round(j / m_rscale), vpMath::round(i / m_rscale), NULL); + MoveToEx(hDCMem, vpMath::round(j / m_rscale), vpMath::round(i / m_rscale), nullptr); // Draw the line LineTo(hDCMem, vpMath::round((j + deltaj) / m_rscale), vpMath::round((i + deltai) / m_rscale)); } } } else { // move to the starting point - MoveToEx(hDCMem, vpMath::round(ip1.get_u() / m_rscale), vpMath::round(ip1.get_v() / m_rscale), NULL); + MoveToEx(hDCMem, vpMath::round(ip1.get_u() / m_rscale), vpMath::round(ip1.get_v() / m_rscale), nullptr); // Draw the line LineTo(hDCMem, vpMath::round(ip2.get_u() / m_rscale), vpMath::round(ip2.get_v() / m_rscale)); } @@ -876,12 +876,12 @@ void vpGDIRenderer::drawCross(const vpImagePoint &ip, unsigned int size, const v SelectObject(hDCMem, hPen); // move to the starting point - MoveToEx(hDCMem, vpMath::round(ip.get_u() / m_rscale) - half_size, vpMath::round(ip.get_v() / m_rscale), NULL); + MoveToEx(hDCMem, vpMath::round(ip.get_u() / m_rscale) - half_size, vpMath::round(ip.get_v() / m_rscale), nullptr); // Draw the first line (horizontal) LineTo(hDCMem, vpMath::round(ip.get_u() / m_rscale) + half_size, vpMath::round(ip.get_v() / m_rscale)); // move to the starting point - MoveToEx(hDCMem, vpMath::round(ip.get_u() / m_rscale), vpMath::round(ip.get_v() / m_rscale) - half_size, NULL); + MoveToEx(hDCMem, vpMath::round(ip.get_u() / m_rscale), vpMath::round(ip.get_v() / m_rscale) - half_size, nullptr); // Draw the second line (vertical) LineTo(hDCMem, vpMath::round(ip.get_u() / m_rscale), vpMath::round(ip.get_v() / m_rscale) + half_size); @@ -957,7 +957,7 @@ void vpGDIRenderer::drawArrow(const vpImagePoint &ip1, const vpImagePoint &ip2, ip4.set_j(ip3.get_j() + a * h); if (lg > 2 * vpImagePoint::distance(ip2 / m_rscale, ip4)) { - MoveToEx(hDCMem, vpMath::round(ip2.get_u() / m_rscale), vpMath::round(ip2.get_v() / m_rscale), NULL); + MoveToEx(hDCMem, vpMath::round(ip2.get_u() / m_rscale), vpMath::round(ip2.get_v() / m_rscale), nullptr); LineTo(hDCMem, vpMath::round(ip4.get_u()), vpMath::round(ip4.get_v())); } // t+=0.1 ; @@ -970,13 +970,13 @@ void vpGDIRenderer::drawArrow(const vpImagePoint &ip1, const vpImagePoint &ip2, ip4.set_j(ip3.get_j() - a * h); if (lg > 2 * vpImagePoint::distance(ip2 / m_rscale, ip4)) { - MoveToEx(hDCMem, vpMath::round(ip2.get_u() / m_rscale), vpMath::round(ip2.get_v() / m_rscale), NULL); + MoveToEx(hDCMem, vpMath::round(ip2.get_u() / m_rscale), vpMath::round(ip2.get_v() / m_rscale), nullptr); LineTo(hDCMem, vpMath::round(ip4.get_u()), vpMath::round(ip4.get_v())); } // t-=0.1 ; } - MoveToEx(hDCMem, vpMath::round(ip1.get_u() / m_rscale), vpMath::round(ip1.get_v() / m_rscale), NULL); + MoveToEx(hDCMem, vpMath::round(ip1.get_u() / m_rscale), vpMath::round(ip1.get_v() / m_rscale), nullptr); LineTo(hDCMem, vpMath::round(ip2.get_u() / m_rscale), vpMath::round(ip2.get_v() / m_rscale)); } diff --git a/modules/gui/src/display/windows/vpWin32API.cpp b/modules/gui/src/display/windows/vpWin32API.cpp index 285562c628..105853e7f4 100644 --- a/modules/gui/src/display/windows/vpWin32API.cpp +++ b/modules/gui/src/display/windows/vpWin32API.cpp @@ -47,8 +47,8 @@ DWORD vpProcessErrors(const std::string &api_name) LPVOID lpMsgBuf; DWORD err = GetLastError(); - FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, err, - MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPTSTR)&lpMsgBuf, 0, NULL); + FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, nullptr, err, + MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPTSTR)&lpMsgBuf, 0, nullptr); std::cout << "call to " << api_name << " failed with the following error code: " << err << "(" << (LPTSTR)lpMsgBuf << ")" << std::endl; return err; @@ -91,12 +91,12 @@ void vpSelectObject(HWND hWnd, HDC hDC, HDC hDCMem, HGDIOBJ h) { HGDIOBJ ret = SelectObject(hDCMem, h); - if (ret == NULL) { + if (ret == nullptr) { vpProcessErrors("SelectObject"); double ms = vpTime::measureTimeMs(); - while (ret == NULL && vpTime::measureTimeMs() - ms < 5000) { + while (ret == nullptr && vpTime::measureTimeMs() - ms < 5000) { DeleteObject(h); DeleteDC(hDCMem); ReleaseDC(hWnd, hDC); @@ -129,7 +129,7 @@ COLORREF vpSetPixel(HDC hdc, int X, int Y, COLORREF crColor) HBITMAP vpCreateBitmap(int nWidth, int nHeight, UINT cPlanes, UINT cBitsPerPel, const VOID *lpvBits) { HBITMAP ret = CreateBitmap(nWidth, nHeight, cPlanes, cBitsPerPel, lpvBits); - if (ret == NULL) + if (ret == nullptr) vpProcessErrors("CreateBitmap"); return ret; diff --git a/modules/gui/src/display/windows/vpWin32Window.cpp b/modules/gui/src/display/windows/vpWin32Window.cpp index e09013ee77..3b2a0962b6 100644 --- a/modules/gui/src/display/windows/vpWin32Window.cpp +++ b/modules/gui/src/display/windows/vpWin32Window.cpp @@ -73,16 +73,16 @@ LRESULT CALLBACK WndProc(HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam) { // the first time this callback is executed, put the window in initialized // state - if (window != NULL) { + if (window != nullptr) { if (!window->isInitialized()) { window->initialized = true; - vpReleaseSemaphore(window->semaInit, 1, NULL); + vpReleaseSemaphore(window->semaInit, 1, nullptr); } } switch (message) { case vpWM_DISPLAY: // redraw the whole window - InvalidateRect(window->getHWnd(), NULL, TRUE); + InvalidateRect(window->getHWnd(), nullptr, TRUE); UpdateWindow(window->getHWnd()); break; @@ -104,7 +104,7 @@ LRESULT CALLBACK WndProc(HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam) window->clickY = GET_Y_LPARAM(lParam); window->clickButton = vpMouseButton::button1; - vpReleaseSemaphore(window->semaClick, 1, NULL); + vpReleaseSemaphore(window->semaClick, 1, nullptr); } break; case WM_MBUTTONDOWN: { @@ -112,7 +112,7 @@ LRESULT CALLBACK WndProc(HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam) window->clickY = GET_Y_LPARAM(lParam); window->clickButton = vpMouseButton::button2; - vpReleaseSemaphore(window->semaClick, 1, NULL); + vpReleaseSemaphore(window->semaClick, 1, nullptr); } break; case WM_RBUTTONDOWN: { @@ -120,7 +120,7 @@ LRESULT CALLBACK WndProc(HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam) window->clickY = GET_Y_LPARAM(lParam); window->clickButton = vpMouseButton::button3; - vpReleaseSemaphore(window->semaClick, 1, NULL); + vpReleaseSemaphore(window->semaClick, 1, nullptr); } break; case WM_LBUTTONUP: { @@ -128,7 +128,7 @@ LRESULT CALLBACK WndProc(HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam) window->clickYUp = GET_Y_LPARAM(lParam); window->clickButtonUp = vpMouseButton::button1; - vpReleaseSemaphore(window->semaClickUp, 1, NULL); + vpReleaseSemaphore(window->semaClickUp, 1, nullptr); } break; case WM_MBUTTONUP: { @@ -136,7 +136,7 @@ LRESULT CALLBACK WndProc(HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam) window->clickYUp = GET_Y_LPARAM(lParam); window->clickButtonUp = vpMouseButton::button2; - vpReleaseSemaphore(window->semaClickUp, 1, NULL); + vpReleaseSemaphore(window->semaClickUp, 1, nullptr); } break; case WM_RBUTTONUP: { @@ -144,12 +144,12 @@ LRESULT CALLBACK WndProc(HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam) window->clickYUp = GET_Y_LPARAM(lParam); window->clickButtonUp = vpMouseButton::button3; - vpReleaseSemaphore(window->semaClickUp, 1, NULL); + vpReleaseSemaphore(window->semaClickUp, 1, nullptr); } break; case WM_MOUSEMOVE: { window->coordX = GET_X_LPARAM(lParam); window->coordY = GET_Y_LPARAM(lParam); - vpReleaseSemaphore(window->semaMove, 1, NULL); + vpReleaseSemaphore(window->semaMove, 1, nullptr); } break; case WM_SYSKEYDOWN: @@ -160,7 +160,7 @@ LRESULT CALLBACK WndProc(HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam) GetKeyNameText((LONG)lParam, window->lpString, 10); // 10 is the size of lpString // window->key = MapVirtualKey(wParam, MAPVK_VK_TO_CHAR); - vpReleaseSemaphore(window->semaKey, 1, NULL); + vpReleaseSemaphore(window->semaKey, 1, nullptr); break; } @@ -204,11 +204,11 @@ vpWin32Window::vpWin32Window(vpWin32Renderer *rend) : initialized(false) // this file (registered = false) // creates the semaphores - semaInit = CreateSemaphore(NULL, 0, 1, NULL); - semaClick = CreateSemaphore(NULL, 0, MAX_SEM_COUNT, NULL); - semaClickUp = CreateSemaphore(NULL, 0, MAX_SEM_COUNT, NULL); - semaKey = CreateSemaphore(NULL, 0, MAX_SEM_COUNT, NULL); - semaMove = CreateSemaphore(NULL, 0, MAX_SEM_COUNT, NULL); + semaInit = CreateSemaphore(nullptr, 0, 1, nullptr); + semaClick = CreateSemaphore(nullptr, 0, MAX_SEM_COUNT, nullptr); + semaClickUp = CreateSemaphore(nullptr, 0, MAX_SEM_COUNT, nullptr); + semaKey = CreateSemaphore(nullptr, 0, MAX_SEM_COUNT, nullptr); + semaMove = CreateSemaphore(nullptr, 0, MAX_SEM_COUNT, nullptr); } /*! @@ -256,12 +256,12 @@ void vpWin32Window::initWindow(const char *title, int posx, int posy, unsigned i wcex.cbClsExtra = 0; wcex.cbWndExtra = 0; wcex.hInstance = hInst; - wcex.hIcon = LoadIcon(NULL, IDI_APPLICATION); - wcex.hCursor = LoadCursor(NULL, IDC_ARROW); + wcex.hIcon = LoadIcon(nullptr, IDI_APPLICATION); + wcex.hCursor = LoadCursor(nullptr, IDC_ARROW); wcex.hbrBackground = (HBRUSH)(COLOR_WINDOW + 1); - wcex.lpszMenuName = NULL; + wcex.lpszMenuName = nullptr; wcex.lpszClassName = g_szClassName; - wcex.hIconSm = LoadIcon(NULL, IDI_APPLICATION); + wcex.hIconSm = LoadIcon(nullptr, IDI_APPLICATION); RegisterClassEx(&wcex); @@ -271,13 +271,13 @@ void vpWin32Window::initWindow(const char *title, int posx, int posy, unsigned i // creates the window hWnd = CreateWindowEx(WS_EX_APPWINDOW, g_szClassName, title, style, posx, posy, rect.right - rect.left, - rect.bottom - rect.top, NULL, NULL, hInst, NULL); - if (hWnd == NULL) { + rect.bottom - rect.top, nullptr, nullptr, hInst, nullptr); + if (hWnd == nullptr) { DWORD err = GetLastError(); std::cout << "err CreateWindowEx=" << err << std::endl; throw vpDisplayException(vpDisplayException::cannotOpenWindowError, "Can't create the window!"); } - SetWindowPos(hWnd, NULL, 0, 0, rect.right - rect.left, rect.bottom - rect.top, SWP_NOZORDER | SWP_NOMOVE); + SetWindowPos(hWnd, nullptr, 0, 0, rect.right - rect.left, rect.bottom - rect.top, SWP_NOZORDER | SWP_NOMOVE); // needed if we want to access it from the callback method (message handler) window = this; @@ -294,14 +294,14 @@ void vpWin32Window::initWindow(const char *title, int posx, int posy, unsigned i // starts the message loop while (true) { - BOOL val = GetMessage(&msg, NULL, 0, 0); + BOOL val = GetMessage(&msg, nullptr, 0, 0); if (val == -1) { std::cout << "GetMessage error:" << GetLastError() << std::endl; break; } else if (val == 0) { break; } else { - if (!TranslateAccelerator(msg.hwnd, NULL, &msg)) { + if (!TranslateAccelerator(msg.hwnd, nullptr, &msg)) { TranslateMessage(&msg); DispatchMessage(&msg); } diff --git a/modules/gui/src/plot/vpPlot.cpp b/modules/gui/src/plot/vpPlot.cpp index fdfc1db1c4..f85f67f39d 100644 --- a/modules/gui/src/plot/vpPlot.cpp +++ b/modules/gui/src/plot/vpPlot.cpp @@ -55,7 +55,7 @@ Needs then a call to init(). */ -vpPlot::vpPlot() : I(), display(NULL), graphNbr(1), graphList(NULL), margei(30), margej(40), factori(1.f), factorj(1.) +vpPlot::vpPlot() : I(), display(nullptr), graphNbr(1), graphList(nullptr), margei(30), margej(40), factori(1.f), factorj(1.) { } /*! This constructor creates a new window where the curves @@ -75,7 +75,7 @@ vpPlot::vpPlot() : I(), display(NULL), graphNbr(1), graphList(NULL), margei(30), \param title : Window title. */ vpPlot::vpPlot(unsigned int graph_nbr, unsigned int height, unsigned int width, int x, int y, const std::string &title) - : I(), display(NULL), graphNbr(1), graphList(NULL), margei(30), margej(40), factori(1.f), factorj(1.) + : I(), display(nullptr), graphNbr(1), graphList(nullptr), margei(30), margej(40), factori(1.f), factorj(1.) { init(graph_nbr, height, width, x, y, title); } @@ -124,13 +124,13 @@ void vpPlot::init(unsigned int graph_nbr, unsigned int height, unsigned int widt */ vpPlot::~vpPlot() { - if (graphList != NULL) { + if (graphList != nullptr) { delete[] graphList; - graphList = NULL; + graphList = nullptr; } - if (display != NULL) { + if (display != nullptr) { delete display; - display = NULL; + display = nullptr; } } diff --git a/modules/gui/src/plot/vpPlotGraph.cpp b/modules/gui/src/plot/vpPlotGraph.cpp index 156394d62a..4d11ea617a 100644 --- a/modules/gui/src/plot/vpPlotGraph.cpp +++ b/modules/gui/src/plot/vpPlotGraph.cpp @@ -60,7 +60,7 @@ void getGrid3DPoint(double pente, vpImagePoint &iPunit, vpImagePoint &ip1, vpIma vpPlotGraph::vpPlotGraph() : xorg(0.), yorg(0.), zoomx(1.), zoomy(1.), xmax(10), ymax(10), xmin(0), ymin(-10), xdelt(1), ydelt(1), gridx(true), - gridy(true), gridColor(), curveNbr(1), curveList(NULL), scaleInitialized(false), firstPoint(true), nbDivisionx(10), + gridy(true), gridColor(), curveNbr(1), curveList(nullptr), scaleInitialized(false), firstPoint(true), nbDivisionx(10), nbDivisiony(10), topLeft(), width(0), height(0), graphZone(), dTopLeft(), dWidth(0), dHeight(0), dGraphZone(), dTopLeft3D(), dGraphZone3D(), cam(), cMo(), cMf(), w_xval(0), w_xsize(0), w_yval(0), w_ysize(0), w_zval(0), w_zsize(0), ptXorg(0), ptYorg(0), ptZorg(0), zoomx_3D(1.), zoomy_3D(1.), zoomz_3D(1.), nbDivisionz(10), zorg(1.), @@ -77,9 +77,9 @@ vpPlotGraph::vpPlotGraph() vpPlotGraph::~vpPlotGraph() { - if (curveList != NULL) { + if (curveList != nullptr) { delete[] curveList; - curveList = NULL; + curveList = nullptr; } } diff --git a/modules/gui/test/display-with-dataset/testClick.cpp b/modules/gui/test/display-with-dataset/testClick.cpp index 8b3fe714df..3d2602b086 100644 --- a/modules/gui/test/display-with-dataset/testClick.cpp +++ b/modules/gui/test/display-with-dataset/testClick.cpp @@ -197,7 +197,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, vpDisplayType & break; case 'h': - usage(argv[0], NULL, ipath, dtype); + usage(argv[0], nullptr, ipath, dtype); return false; break; case 'c': @@ -216,7 +216,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, vpDisplayType & if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, dtype); + usage(argv[0], nullptr, ipath, dtype); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -311,7 +311,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath, opt_dtype); + usage(argv[0], nullptr, ipath, opt_dtype); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl @@ -329,7 +329,7 @@ int main(int argc, const char **argv) vpImageIo::read(I, filename); // Create a display for the image - vpDisplay *display = NULL; + vpDisplay *display = nullptr; switch (opt_dtype) { case vpX11: diff --git a/modules/gui/test/display-with-dataset/testDisplayScaled.cpp b/modules/gui/test/display-with-dataset/testDisplayScaled.cpp index c3c111b975..2c7457371a 100644 --- a/modules/gui/test/display-with-dataset/testDisplayScaled.cpp +++ b/modules/gui/test/display-with-dataset/testDisplayScaled.cpp @@ -61,7 +61,7 @@ template bool test(const std::string &display, vpImage &I, // backup the input image vpImage Ibackup(I); - vpDisplay *d = NULL; + vpDisplay *d = nullptr; if (display == "GDI") { #ifdef VISP_HAVE_GDI d = new vpDisplayGDI; @@ -249,7 +249,7 @@ template bool test(const std::string &display, vpImage &I, vpDisplay::close(I); - if (d != NULL) + if (d != nullptr) delete d; if (success) diff --git a/modules/gui/test/display-with-dataset/testMouseEvent.cpp b/modules/gui/test/display-with-dataset/testMouseEvent.cpp index cd4432532c..0b208897b3 100644 --- a/modules/gui/test/display-with-dataset/testMouseEvent.cpp +++ b/modules/gui/test/display-with-dataset/testMouseEvent.cpp @@ -269,7 +269,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &pp wait = true; break; case 'h': - usage(argv[0], NULL, ipath, ppath, first, last, step, dtype); + usage(argv[0], nullptr, ipath, ppath, first, last, step, dtype); return false; break; @@ -282,7 +282,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &pp if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, ppath, first, last, step, dtype); + usage(argv[0], nullptr, ipath, ppath, first, last, step, dtype); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -385,7 +385,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty() && opt_ppath.empty()) { - usage(argv[0], NULL, ipath, opt_ppath, opt_first, opt_last, opt_step, opt_dtype); + usage(argv[0], nullptr, ipath, opt_ppath, opt_first, opt_last, opt_step, opt_dtype); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl @@ -431,7 +431,7 @@ int main(int argc, const char **argv) return EXIT_FAILURE; } // Create a display for the image - vpDisplay *display = NULL; + vpDisplay *display = nullptr; switch (opt_dtype) { case vpX11: diff --git a/modules/gui/test/display-with-dataset/testVideoDevice.cpp b/modules/gui/test/display-with-dataset/testVideoDevice.cpp index f9057e0d44..d73a428e71 100644 --- a/modules/gui/test/display-with-dataset/testVideoDevice.cpp +++ b/modules/gui/test/display-with-dataset/testVideoDevice.cpp @@ -195,7 +195,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, vpDisplayType & break; case 'h': - usage(argv[0], NULL, ipath, dtype); + usage(argv[0], nullptr, ipath, dtype); return false; break; case 'c': @@ -214,7 +214,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, vpDisplayType & if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, dtype); + usage(argv[0], nullptr, ipath, dtype); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -309,7 +309,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath, opt_dtype); + usage(argv[0], nullptr, ipath, opt_dtype); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl @@ -334,7 +334,7 @@ int main(int argc, const char **argv) vpImageIo::read(Irgba, filename); // Create a display for the image - vpDisplay *display = NULL; + vpDisplay *display = nullptr; switch (opt_dtype) { case vpX11: diff --git a/modules/gui/test/display/testDisplayPolygonLines.cpp b/modules/gui/test/display/testDisplayPolygonLines.cpp index 760c82129b..6b1d96c849 100644 --- a/modules/gui/test/display/testDisplayPolygonLines.cpp +++ b/modules/gui/test/display/testDisplayPolygonLines.cpp @@ -121,7 +121,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) display = false; break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; @@ -134,7 +134,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/modules/gui/test/display/testDisplayRoi.cpp b/modules/gui/test/display/testDisplayRoi.cpp index 1917eb0858..78d780cf7a 100644 --- a/modules/gui/test/display/testDisplayRoi.cpp +++ b/modules/gui/test/display/testDisplayRoi.cpp @@ -125,7 +125,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) display = false; break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; @@ -138,7 +138,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/modules/gui/test/display/testDisplays.cpp b/modules/gui/test/display/testDisplays.cpp index 0d60c02122..fe5c88ec35 100644 --- a/modules/gui/test/display/testDisplays.cpp +++ b/modules/gui/test/display/testDisplays.cpp @@ -122,7 +122,7 @@ static bool getOptions(int argc, const char **argv, bool &list, bool &click_allo list = true; break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; case 'c': @@ -141,7 +141,7 @@ static bool getOptions(int argc, const char **argv, bool &list, bool &click_allo if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/modules/gui/test/display/testVideoDeviceDual.cpp b/modules/gui/test/display/testVideoDeviceDual.cpp index 7357097f70..ea2a4e710d 100644 --- a/modules/gui/test/display/testVideoDeviceDual.cpp +++ b/modules/gui/test/display/testVideoDeviceDual.cpp @@ -170,7 +170,7 @@ bool getOptions(int argc, const char **argv, vpDisplayType &dtype, bool &list, b break; case 'h': - usage(argv[0], NULL, dtype); + usage(argv[0], nullptr, dtype); return false; break; case 'c': @@ -189,7 +189,7 @@ bool getOptions(int argc, const char **argv, vpDisplayType &dtype, bool &list, b if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, dtype); + usage(argv[0], nullptr, dtype); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -261,7 +261,7 @@ int main(int argc, const char **argv) I2 = 255; // Create 2 display - vpDisplay *d1 = NULL, *d2 = NULL; + vpDisplay *d1 = nullptr, *d2 = nullptr; // Initialize the displays switch (opt_dtype) { diff --git a/modules/imgproc/include/visp3/imgproc/vpContours.h b/modules/imgproc/include/visp3/imgproc/vpContours.h index bc3265285a..9d2d79c858 100644 --- a/modules/imgproc/include/visp3/imgproc/vpContours.h +++ b/modules/imgproc/include/visp3/imgproc/vpContours.h @@ -218,18 +218,18 @@ struct vpContour /*! * Default constructor. */ - vpContour() : m_children(), m_contourType(vp::CONTOUR_HOLE), m_parent(NULL), m_points() { } + vpContour() : m_children(), m_contourType(vp::CONTOUR_HOLE), m_parent(nullptr), m_points() { } /*! * Constructor of a given contour type. */ - vpContour(const vpContourType &type) : m_children(), m_contourType(type), m_parent(NULL), m_points() { } + vpContour(const vpContourType &type) : m_children(), m_contourType(type), m_parent(nullptr), m_points() { } /*! * Copy constructor. */ vpContour(const vpContour &contour) - : m_children(), m_contourType(contour.m_contourType), m_parent(NULL), m_points(contour.m_points) + : m_children(), m_contourType(contour.m_contourType), m_parent(nullptr), m_points(contour.m_points) { // Copy the underlying contours @@ -247,10 +247,10 @@ struct vpContour virtual ~vpContour() { for (std::vector::iterator it = m_children.begin(); it != m_children.end(); ++it) { - (*it)->m_parent = NULL; - if (*it != NULL) { + (*it)->m_parent = nullptr; + if (*it != nullptr) { delete *it; - *it = NULL; + *it = nullptr; } } } @@ -262,20 +262,20 @@ struct vpContour { m_contourType = other.m_contourType; - if (m_parent == NULL) { + if (m_parent == nullptr) { // We are a root or an uninitialized contour so delete everything for (std::vector::iterator it = m_children.begin(); it != m_children.end(); ++it) { - (*it)->m_parent = NULL; - if (*it != NULL) { + (*it)->m_parent = nullptr; + if (*it != nullptr) { delete *it; - *it = NULL; + *it = nullptr; } } } else { // Make the current contour the root contour // to avoid problem when deleting - m_parent = NULL; + m_parent = nullptr; } m_children.clear(); @@ -295,7 +295,7 @@ struct vpContour { m_parent = parent; - if (parent != NULL) { + if (parent != nullptr) { parent->m_children.push_back(this); } } diff --git a/modules/imgproc/src/vpContours.cpp b/modules/imgproc/src/vpContours.cpp index 36b86af5fc..cefb7cdf92 100644 --- a/modules/imgproc/src/vpContours.cpp +++ b/modules/imgproc/src/vpContours.cpp @@ -322,7 +322,7 @@ void findContours(const vpImage &I_original, vpContour &contours, if (isOuter || isHole) { // else (1) (c) vpContour *border = new vpContour; - vpContour *borderPrime = NULL; + vpContour *borderPrime = nullptr; vpImagePoint from(i, j); if (isOuter) { @@ -399,13 +399,13 @@ void findContours(const vpImage &I_original, vpContour &contours, if (retrievalMode == CONTOUR_RETR_EXTERNAL || retrievalMode == CONTOUR_RETR_LIST) { // Delete contours content - contours.m_parent = NULL; + contours.m_parent = nullptr; for (std::vector::iterator it = contours.m_children.begin(); it != contours.m_children.end(); ++it) { - (*it)->m_parent = NULL; - if (*it != NULL) { + (*it)->m_parent = nullptr; + if (*it != nullptr) { delete *it; - *it = NULL; + *it = nullptr; } } @@ -444,6 +444,6 @@ void findContours(const vpImage &I_original, vpContour &contours, } delete root; - root = NULL; + root = nullptr; } }; diff --git a/modules/imgproc/test/with-dataset/testAutoThreshold.cpp b/modules/imgproc/test/with-dataset/testAutoThreshold.cpp index e25b9a2c8e..b4eca72f85 100644 --- a/modules/imgproc/test/with-dataset/testAutoThreshold.cpp +++ b/modules/imgproc/test/with-dataset/testAutoThreshold.cpp @@ -123,7 +123,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op opath = optarg_; break; case 'h': - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); return false; break; @@ -140,7 +140,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -204,7 +204,7 @@ int main(int argc, const char **argv) // Create the dirname vpIoTools::makeDirectory(opath); } catch (...) { - usage(argv[0], NULL, ipath, opt_opath, username); + usage(argv[0], nullptr, ipath, opt_opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Cannot create " << opath << std::endl; std::cerr << " Check your -o " << opt_opath << " option " << std::endl; @@ -225,7 +225,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath, opt_opath, username); + usage(argv[0], nullptr, ipath, opt_opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/modules/imgproc/test/with-dataset/testConnectedComponents.cpp b/modules/imgproc/test/with-dataset/testConnectedComponents.cpp index 64d5aca7f8..970002f264 100644 --- a/modules/imgproc/test/with-dataset/testConnectedComponents.cpp +++ b/modules/imgproc/test/with-dataset/testConnectedComponents.cpp @@ -120,7 +120,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op opath = optarg_; break; case 'h': - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); return false; break; @@ -137,7 +137,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -243,7 +243,7 @@ int main(int argc, const char **argv) // Create the dirname vpIoTools::makeDirectory(opath); } catch (...) { - usage(argv[0], NULL, ipath, opt_opath, username); + usage(argv[0], nullptr, ipath, opt_opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Cannot create " << opath << std::endl; std::cerr << " Check your -o " << opt_opath << " option " << std::endl; @@ -264,7 +264,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath, opt_opath, username); + usage(argv[0], nullptr, ipath, opt_opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/modules/imgproc/test/with-dataset/testContours.cpp b/modules/imgproc/test/with-dataset/testContours.cpp index e094dd7dc7..029e2d8ece 100644 --- a/modules/imgproc/test/with-dataset/testContours.cpp +++ b/modules/imgproc/test/with-dataset/testContours.cpp @@ -120,7 +120,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op opath = optarg_; break; case 'h': - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); return false; break; @@ -137,7 +137,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -230,7 +230,7 @@ int main(int argc, const char **argv) // Create the dirname vpIoTools::makeDirectory(opath); } catch (...) { - usage(argv[0], NULL, ipath, opt_opath, username); + usage(argv[0], nullptr, ipath, opt_opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Cannot create " << opath << std::endl; std::cerr << " Check your -o " << opt_opath << " option " << std::endl; @@ -251,7 +251,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath, opt_opath, username); + usage(argv[0], nullptr, ipath, opt_opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/modules/imgproc/test/with-dataset/testFloodFill.cpp b/modules/imgproc/test/with-dataset/testFloodFill.cpp index b6ecd2e7a1..e03293ac7a 100644 --- a/modules/imgproc/test/with-dataset/testFloodFill.cpp +++ b/modules/imgproc/test/with-dataset/testFloodFill.cpp @@ -119,7 +119,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op opath = optarg_; break; case 'h': - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); return false; break; @@ -136,7 +136,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -222,7 +222,7 @@ int main(int argc, const char **argv) // Create the dirname vpIoTools::makeDirectory(opath); } catch (...) { - usage(argv[0], NULL, ipath, opt_opath, username); + usage(argv[0], nullptr, ipath, opt_opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Cannot create " << opath << std::endl; std::cerr << " Check your -o " << opt_opath << " option " << std::endl; @@ -243,7 +243,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath, opt_opath, username); + usage(argv[0], nullptr, ipath, opt_opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/modules/imgproc/test/with-dataset/testImgproc.cpp b/modules/imgproc/test/with-dataset/testImgproc.cpp index 6307069edb..c6b6a8a27d 100644 --- a/modules/imgproc/test/with-dataset/testImgproc.cpp +++ b/modules/imgproc/test/with-dataset/testImgproc.cpp @@ -120,7 +120,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op opath = optarg_; break; case 'h': - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); return false; break; @@ -137,7 +137,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, std::string &op if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, ipath, opath, user); + usage(argv[0], nullptr, ipath, opath, user); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -201,7 +201,7 @@ int main(int argc, const char **argv) // Create the dirname vpIoTools::makeDirectory(opath); } catch (...) { - usage(argv[0], NULL, ipath, opt_opath, username); + usage(argv[0], nullptr, ipath, opt_opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Cannot create " << opath << std::endl; std::cerr << " Check your -o " << opt_opath << " option " << std::endl; @@ -222,7 +222,7 @@ int main(int argc, const char **argv) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL, ipath, opt_opath, username); + usage(argv[0], nullptr, ipath, opt_opath, username); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/modules/io/include/visp3/io/vpImageQueue.h b/modules/io/include/visp3/io/vpImageQueue.h index f7ea820a87..8a8f9e318c 100644 --- a/modules/io/include/visp3/io/vpImageQueue.h +++ b/modules/io/include/visp3/io/vpImageQueue.h @@ -155,7 +155,7 @@ template class vpImageQueue m_queue_image.push(I); - if (data != NULL) { + if (data != nullptr) { m_queue_data.push(*data); } @@ -164,7 +164,7 @@ template class vpImageQueue m_queue_image.pop(); } - if (data != NULL) { + if (data != nullptr) { while (m_queue_data.size() > m_maxQueueSize) { m_queue_data.pop(); } @@ -177,12 +177,12 @@ template class vpImageQueue * Record helper that display information in the windows associated to the image, pop current image and additional * data in the queue. * \param[in] I : Image to record. - * \param[in] data : Data to record. Set to NULL when no additional data have to be considered. + * \param[in] data : Data to record. Set to nullptr when no additional data have to be considered. * \param[in] trigger_recording : External trigger to start data saving. * \param[in] disable_left_click : Disable left click usage to trigger data saving. * \return true when the used asked to quit using a right click in the display window. */ - bool record(const vpImage &I, std::string *data = NULL, bool trigger_recording = false, + bool record(const vpImage &I, std::string *data = nullptr, bool trigger_recording = false, bool disable_left_click = false) { if (I.display) { diff --git a/modules/io/include/visp3/io/vpParseArgv.h b/modules/io/include/visp3/io/vpParseArgv.h index 60bf1dd872..aa83214d54 100644 --- a/modules/io/include/visp3/io/vpParseArgv.h +++ b/modules/io/include/visp3/io/vpParseArgv.h @@ -60,19 +60,19 @@ int main(int argc, const char ** argv) { {"-bool", vpParseArgv::ARGV_CONSTANT_BOOL, 0, (char *) &b_val, "Flag enabled."}, - {"-int", vpParseArgv::ARGV_INT, (char*) NULL, (char *) &i_val, + {"-int", vpParseArgv::ARGV_INT, (char*) nullptr, (char *) &i_val, "An integer value."}, - {"-long", vpParseArgv::ARGV_LONG, (char*) NULL, (char *) &l_val, + {"-long", vpParseArgv::ARGV_LONG, (char*) nullptr, (char *) &l_val, "An integer value."}, - {"-float", vpParseArgv::ARGV_FLOAT, (char*) NULL, (char *) &f_val, + {"-float", vpParseArgv::ARGV_FLOAT, (char*) nullptr, (char *) &f_val, "A float value."}, - {"-double", vpParseArgv::ARGV_DOUBLE, (char*) NULL, (char *) &d_val, + {"-double", vpParseArgv::ARGV_DOUBLE, (char*) nullptr, (char *) &d_val, "A double value."}, - {"-string", vpParseArgv::ARGV_STRING, (char*) NULL, (char *) &s_val, + {"-string", vpParseArgv::ARGV_STRING, (char*) nullptr, (char *) &s_val, "A string value."}, - {"-h", vpParseArgv::ARGV_HELP, (char*) NULL, (char *) NULL, + {"-h", vpParseArgv::ARGV_HELP, (char*) nullptr, (char *) nullptr, "Print the help."}, - {(char*) NULL, vpParseArgv::ARGV_END, (char*) NULL, (char*) NULL, (char*) NULL} } ; + {(char*) nullptr, vpParseArgv::ARGV_END, (char*) nullptr, (char*) nullptr, (char*) nullptr} } ; // Read the command line options if(vpParseArgv::parse(&argc, argv, argTable, diff --git a/modules/io/src/image/private/vpImageIoLibjpeg.cpp b/modules/io/src/image/private/vpImageIoLibjpeg.cpp index 7ebf46c94b..ecd4aa39e8 100644 --- a/modules/io/src/image/private/vpImageIoLibjpeg.cpp +++ b/modules/io/src/image/private/vpImageIoLibjpeg.cpp @@ -74,7 +74,7 @@ void writeJPEGLibjpeg(const vpImage &I, const std::string &filena file = fopen(filename.c_str(), "wb"); - if (file == NULL) { + if (file == nullptr) { throw(vpImageException(vpImageException::ioError, "Cannot create JPEG file \"%s\"", filename.c_str())); } @@ -133,7 +133,7 @@ void writeJPEGLibjpeg(const vpImage &I, const std::string &filename, int file = fopen(filename.c_str(), "wb"); - if (file == NULL) { + if (file == nullptr) { throw(vpImageException(vpImageException::ioError, "Cannot create JPEG file \"%s\"", filename.c_str())); } @@ -204,7 +204,7 @@ void readJPEGLibjpeg(vpImage &I, const std::string &filename) file = fopen(filename.c_str(), "rb"); - if (file == NULL) { + if (file == nullptr) { throw(vpImageException(vpImageException::ioError, "Cannot read JPEG file \"%s\"", filename.c_str())); } @@ -284,7 +284,7 @@ void readJPEGLibjpeg(vpImage &I, const std::string &filename) file = fopen(filename.c_str(), "rb"); - if (file == NULL) { + if (file == nullptr) { throw(vpImageException(vpImageException::ioError, "Cannot read JPEG file \"%s\"", filename.c_str())); } diff --git a/modules/io/src/image/private/vpImageIoLibpng.cpp b/modules/io/src/image/private/vpImageIoLibpng.cpp index aed697c5b5..e3a8c4fc02 100644 --- a/modules/io/src/image/private/vpImageIoLibpng.cpp +++ b/modules/io/src/image/private/vpImageIoLibpng.cpp @@ -67,12 +67,12 @@ void writePNGLibpng(const vpImage &I, const std::string &filename file = fopen(filename.c_str(), "wb"); - if (file == NULL) { + if (file == nullptr) { throw(vpImageException(vpImageException::ioError, "Cannot create PNG file \"%s\"", filename.c_str())); } /* create a png info struct */ - png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); + png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, nullptr, nullptr, nullptr); if (!png_ptr) { fclose(file); vpERROR_TRACE("Error during png_create_write_struct()\n"); @@ -82,7 +82,7 @@ void writePNGLibpng(const vpImage &I, const std::string &filename png_infop info_ptr = png_create_info_struct(png_ptr); if (!info_ptr) { fclose(file); - png_destroy_write_struct(&png_ptr, NULL); + png_destroy_write_struct(&png_ptr, nullptr); vpERROR_TRACE("Error during png_create_info_struct()\n"); throw(vpImageException(vpImageException::ioError, "PNG write error")); } @@ -134,7 +134,7 @@ void writePNGLibpng(const vpImage &I, const std::string &filename png_write_image(png_ptr, row_ptrs); - png_write_end(png_ptr, NULL); + png_write_end(png_ptr, nullptr); for (unsigned int j = 0; j < height; j++) delete[] row_ptrs[j]; @@ -164,12 +164,12 @@ void writePNGLibpng(const vpImage &I, const std::string &filename) file = fopen(filename.c_str(), "wb"); - if (file == NULL) { + if (file == nullptr) { throw(vpImageException(vpImageException::ioError, "Cannot create PNG file \"%s\"", filename.c_str())); } /* create a png info struct */ - png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); + png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, nullptr, nullptr, nullptr); if (!png_ptr) { fclose(file); vpERROR_TRACE("Error during png_create_write_struct()\n"); @@ -179,7 +179,7 @@ void writePNGLibpng(const vpImage &I, const std::string &filename) png_infop info_ptr = png_create_info_struct(png_ptr); if (!info_ptr) { fclose(file); - png_destroy_write_struct(&png_ptr, NULL); + png_destroy_write_struct(&png_ptr, nullptr); vpERROR_TRACE("Error during png_create_info_struct()\n"); throw(vpImageException(vpImageException::ioError, "PNG write error")); } @@ -236,7 +236,7 @@ void writePNGLibpng(const vpImage &I, const std::string &filename) png_write_image(png_ptr, row_ptrs); - png_write_end(png_ptr, NULL); + png_write_end(png_ptr, nullptr); for (unsigned int j = 0; j < height; j++) delete[] row_ptrs[j]; @@ -274,7 +274,7 @@ void readPNGLibpng(vpImage &I, const std::string &filename) file = fopen(filename.c_str(), "rb"); - if (file == NULL) { + if (file == nullptr) { throw(vpImageException(vpImageException::ioError, "Cannot read file \"%s\"", filename.c_str())); } @@ -293,8 +293,8 @@ void readPNGLibpng(vpImage &I, const std::string &filename) /* create a png read struct */ // printf("version %s\n", PNG_LIBPNG_VER_STRING); - png_structp png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); - if (png_ptr == NULL) { + png_structp png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, nullptr, nullptr, nullptr); + if (png_ptr == nullptr) { fprintf(stderr, "error: can't create a png read structure!\n"); fclose(file); throw(vpImageException(vpImageException::ioError, "error reading png file")); @@ -302,10 +302,10 @@ void readPNGLibpng(vpImage &I, const std::string &filename) /* create a png info struct */ png_infop info_ptr = png_create_info_struct(png_ptr); - if (info_ptr == NULL) { + if (info_ptr == nullptr) { fprintf(stderr, "error: can't create a png info structure!\n"); fclose(file); - png_destroy_read_struct(&png_ptr, NULL, NULL); + png_destroy_read_struct(&png_ptr, nullptr, nullptr); throw(vpImageException(vpImageException::ioError, "error reading png file")); } @@ -313,7 +313,7 @@ void readPNGLibpng(vpImage &I, const std::string &filename) */ if (setjmp(png_jmpbuf(png_ptr))) { fclose(file); - png_destroy_read_struct(&png_ptr, &info_ptr, NULL); + png_destroy_read_struct(&png_ptr, &info_ptr, nullptr); vpERROR_TRACE("Error during init io\n"); throw(vpImageException(vpImageException::ioError, "PNG read error")); } @@ -417,8 +417,8 @@ void readPNGLibpng(vpImage &I, const std::string &filename) delete[](png_bytep) rowPtrs; delete[] data; - png_read_end(png_ptr, NULL); - png_destroy_read_struct(&png_ptr, &info_ptr, NULL); + png_read_end(png_ptr, nullptr); + png_destroy_read_struct(&png_ptr, &info_ptr, nullptr); fclose(file); } @@ -452,7 +452,7 @@ void readPNGLibpng(vpImage &I, const std::string &filename) file = fopen(filename.c_str(), "rb"); - if (file == NULL) { + if (file == nullptr) { throw(vpImageException(vpImageException::ioError, "Cannot read file \"%s\"", filename.c_str())); } @@ -470,7 +470,7 @@ void readPNGLibpng(vpImage &I, const std::string &filename) } /* create a png read struct */ - png_structp png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); + png_structp png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, nullptr, nullptr, nullptr); if (!png_ptr) { fclose(file); vpERROR_TRACE("Error during png_create_read_struct()\n"); @@ -481,7 +481,7 @@ void readPNGLibpng(vpImage &I, const std::string &filename) png_infop info_ptr = png_create_info_struct(png_ptr); if (!info_ptr) { fclose(file); - png_destroy_read_struct(&png_ptr, NULL, NULL); + png_destroy_read_struct(&png_ptr, nullptr, nullptr); vpERROR_TRACE("Error during png_create_info_struct()\n"); throw(vpImageException(vpImageException::ioError, "PNG read error")); } @@ -490,7 +490,7 @@ void readPNGLibpng(vpImage &I, const std::string &filename) */ if (setjmp(png_jmpbuf(png_ptr))) { fclose(file); - png_destroy_read_struct(&png_ptr, &info_ptr, NULL); + png_destroy_read_struct(&png_ptr, &info_ptr, nullptr); vpERROR_TRACE("Error during init io\n"); throw(vpImageException(vpImageException::ioError, "PNG read error")); } @@ -594,8 +594,8 @@ void readPNGLibpng(vpImage &I, const std::string &filename) delete[](png_bytep) rowPtrs; delete[] data; - png_read_end(png_ptr, NULL); - png_destroy_read_struct(&png_ptr, &info_ptr, NULL); + png_read_end(png_ptr, nullptr); + png_destroy_read_struct(&png_ptr, &info_ptr, nullptr); fclose(file); } #endif diff --git a/modules/io/src/image/private/vpImageIoPortable.cpp b/modules/io/src/image/private/vpImageIoPortable.cpp index 489bd1a449..7960632c44 100644 --- a/modules/io/src/image/private/vpImageIoPortable.cpp +++ b/modules/io/src/image/private/vpImageIoPortable.cpp @@ -190,7 +190,7 @@ void vp_writePFM(const vpImage &I, const std::string &filename) fd = fopen(filename.c_str(), "wb"); - if (fd == NULL) { + if (fd == nullptr) { throw(vpImageException(vpImageException::ioError, "Cannot create PFM file \"%s\"", filename.c_str())); } @@ -222,7 +222,7 @@ void vp_writePFM_HDR(const vpImage &I, const std::string &filename) } FILE *fd = fopen(filename.c_str(), "wb"); - if (fd == NULL) { + if (fd == nullptr) { throw(vpImageException(vpImageException::ioError, "Cannot create PFM file \"%s\"", filename.c_str())); } @@ -258,7 +258,7 @@ void vp_writePFM_HDR(const vpImage &I, const std::string &filename) } FILE *fd = fopen(filename.c_str(), "wb"); - if (fd == NULL) { + if (fd == nullptr) { throw(vpImageException(vpImageException::ioError, "Cannot create PFM file \"%s\"", filename.c_str())); } @@ -308,7 +308,7 @@ void vp_writePGM(const vpImage &I, const std::string &filename) fd = fopen(filename.c_str(), "wb"); - if (fd == NULL) { + if (fd == nullptr) { throw(vpImageException(vpImageException::ioError, "Cannot create PGM file \"%s\"", filename.c_str())); } @@ -373,7 +373,7 @@ void vp_writePGM(const vpImage &I, const std::string &filename) fd = fopen(filename.c_str(), "wb"); - if (fd == NULL) { + if (fd == nullptr) { throw(vpImageException(vpImageException::ioError, "Cannot create PGM file \"%s\"", filename.c_str())); } @@ -806,7 +806,7 @@ void vp_writePPM(const vpImage &I, const std::string &filename) f = fopen(filename.c_str(), "wb"); - if (f == NULL) { + if (f == nullptr) { throw(vpImageException(vpImageException::ioError, "Cannot create PPM file \"%s\"", filename.c_str())); } diff --git a/modules/io/src/image/private/vpImageIoStb.cpp b/modules/io/src/image/private/vpImageIoStb.cpp index 3db5e29804..40d52ba5e8 100644 --- a/modules/io/src/image/private/vpImageIoStb.cpp +++ b/modules/io/src/image/private/vpImageIoStb.cpp @@ -56,7 +56,7 @@ void readStb(vpImage &I, const std::string &filename) { int width = 0, height = 0, channels = 0; unsigned char *image = stbi_load(filename.c_str(), &width, &height, &channels, STBI_grey); - if (image == NULL) { + if (image == nullptr) { throw(vpImageException(vpImageException::ioError, "Can't read the image: %s", filename.c_str())); } I.init(image, static_cast(height), static_cast(width), true); @@ -67,7 +67,7 @@ void readStb(vpImage &I, const std::string &filename) { int width = 0, height = 0, channels = 0; unsigned char *image = stbi_load(filename.c_str(), &width, &height, &channels, STBI_rgb_alpha); - if (image == NULL) { + if (image == nullptr) { throw(vpImageException(vpImageException::ioError, "Can't read the image: %s", filename.c_str())); } I.init(reinterpret_cast(image), static_cast(height), static_cast(width), true); diff --git a/modules/io/src/image/private/vpImageIoTinyEXR.cpp b/modules/io/src/image/private/vpImageIoTinyEXR.cpp index 780c903ca5..be16de13f3 100644 --- a/modules/io/src/image/private/vpImageIoTinyEXR.cpp +++ b/modules/io/src/image/private/vpImageIoTinyEXR.cpp @@ -63,7 +63,7 @@ void readEXRTiny(vpImage &I, const std::string &filename) EXRHeader exr_header; InitEXRHeader(&exr_header); - const char *err = NULL; // or `nullptr` in C++11 or later. + const char *err = nullptr; // or `nullptr` in C++11 or later. ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename.c_str(), &err); if (ret != 0) { std::string err_msg(err); @@ -136,7 +136,7 @@ void readEXRTiny(vpImage &I, const std::string &filename) EXRHeader exr_header; InitEXRHeader(&exr_header); - const char *err = NULL; // or `nullptr` in C++11 or later. + const char *err = nullptr; // or `nullptr` in C++11 or later. ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename.c_str(), &err); if (ret != 0) { std::string err_msg(err); @@ -236,7 +236,7 @@ void writeEXRTiny(const vpImage &I, const std::string &filename) header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // pixel type of output image to be stored in .EXR } - const char *err = NULL; // or nullptr in C++11 or later. + const char *err = nullptr; // or nullptr in C++11 or later. int ret = SaveEXRImageToFile(&image, &header, filename.c_str(), &err); if (ret != TINYEXR_SUCCESS) { std::string err_msg(err); @@ -298,7 +298,7 @@ void writeEXRTiny(const vpImage &I, const std::string &filename) header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // pixel type of output image to be stored in .EXR } - const char *err = NULL; // or nullptr in C++11 or later. + const char *err = nullptr; // or nullptr in C++11 or later. int ret = SaveEXRImageToFile(&image, &header, filename.c_str(), &err); if (ret != TINYEXR_SUCCESS) { std::string err_msg(err); diff --git a/modules/io/src/tools/vpKeyboard.cpp b/modules/io/src/tools/vpKeyboard.cpp index 6f4a2f5f0a..76c202b0c8 100644 --- a/modules/io/src/tools/vpKeyboard.cpp +++ b/modules/io/src/tools/vpKeyboard.cpp @@ -102,7 +102,7 @@ int vpKeyboard::kbhit() FD_ZERO(&readfds); FD_SET(STDIN_FILENO, &readfds); - return select(STDIN_FILENO + 1, &readfds, NULL, NULL, &tv) == 1; + return select(STDIN_FILENO + 1, &readfds, nullptr, nullptr, &tv) == 1; #elif defined(_WIN32) && !defined(WINRT) return _kbhit(); #elif defined(_WIN32) && defined(WINRT) diff --git a/modules/io/src/tools/vpParseArgv.cpp b/modules/io/src/tools/vpParseArgv.cpp index c15efc7b00..856646a0b3 100644 --- a/modules/io/src/tools/vpParseArgv.cpp +++ b/modules/io/src/tools/vpParseArgv.cpp @@ -38,8 +38,8 @@ */ vpParseArgv::vpArgvInfo vpParseArgv::defaultTable[2] = { - {"-help", ARGV_HELP, (char *)NULL, (char *)NULL, "Print summary of command-line options and abort.\n"}, - {NULL, ARGV_END, (char *)NULL, (char *)NULL, (char *)NULL}}; + {"-help", ARGV_HELP, (char *)nullptr, (char *)nullptr, "Print summary of command-line options and abort.\n"}, + {nullptr, ARGV_END, (char *)nullptr, (char *)nullptr, (char *)nullptr}}; int (*handlerProc1)(const char *dst, const char *key, const char *argument); int (*handlerProc2)(const char *dst, const char *key, int valargc, const char **argument); @@ -112,7 +112,7 @@ bool vpParseArgv::parse(int *argcPtr, const char **argv, vpArgvInfo *argTable, i * matchPtr. */ - matchPtr = NULL; + matchPtr = nullptr; for (unsigned int i = 0; i < 2; i++) { if (i == 0) { infoPtr = argTable; @@ -120,7 +120,7 @@ bool vpParseArgv::parse(int *argcPtr, const char **argv, vpArgvInfo *argTable, i infoPtr = defaultTable; } for (; infoPtr->type != ARGV_END; infoPtr++) { - if (infoPtr->key == NULL) { + if (infoPtr->key == nullptr) { continue; } if ((infoPtr->key[1] != c) || (strncmp(infoPtr->key, curArg, length) != 0)) { @@ -133,14 +133,14 @@ bool vpParseArgv::parse(int *argcPtr, const char **argv, vpArgvInfo *argTable, i if (flags & ARGV_NO_ABBREV) { continue; } - if (matchPtr != NULL) { + if (matchPtr != nullptr) { FPRINTF(stderr, "ambiguous option \"%s\"\n", curArg); return true; } matchPtr = infoPtr; } } - if (matchPtr == NULL) { + if (matchPtr == nullptr) { /* * Unrecognized argument. Just copy it down, unless the caller @@ -176,7 +176,7 @@ bool vpParseArgv::parse(int *argcPtr, const char **argv, vpArgvInfo *argTable, i if (argc == 0) { goto missingArg; } else { - char *endPtr = NULL; + char *endPtr = nullptr; *(((int *)infoPtr->dst) + i) = (int)strtol(argv[srcIndex], &endPtr, 0); if ((endPtr == argv[srcIndex]) || (*endPtr != 0)) { @@ -196,7 +196,7 @@ bool vpParseArgv::parse(int *argcPtr, const char **argv, vpArgvInfo *argTable, i if (argc == 0) { goto missingArg; } else { - char *endPtr = NULL; + char *endPtr = nullptr; *(((long *)infoPtr->dst) + i) = strtol(argv[srcIndex], &endPtr, 0); if ((endPtr == argv[srcIndex]) || (*endPtr != 0)) { @@ -308,7 +308,7 @@ bool vpParseArgv::parse(int *argcPtr, const char **argv, vpArgvInfo *argTable, i dstIndex++; argc--; } - argv[dstIndex] = (char *)NULL; + argv[dstIndex] = (char *)nullptr; *argcPtr = dstIndex; return false; @@ -357,7 +357,7 @@ void vpParseArgv::printUsage(vpArgvInfo *argTable, int flags) for (unsigned int i = 0; i < 2; i++) { for (infoPtr = i ? defaultTable : argTable; infoPtr->type != ARGV_END; infoPtr++) { int length; - if (infoPtr->key == NULL) { + if (infoPtr->key == nullptr) { continue; } length = (int)strlen(infoPtr->key); @@ -370,7 +370,7 @@ void vpParseArgv::printUsage(vpArgvInfo *argTable, int flags) FPRINTF(stderr, "Command-specific options:"); for (unsigned int i = 0;; i++) { for (infoPtr = i ? defaultTable : argTable; infoPtr->type != ARGV_END; infoPtr++) { - if ((infoPtr->type == ARGV_HELP) && (infoPtr->key == NULL)) { + if ((infoPtr->type == ARGV_HELP) && (infoPtr->key == nullptr)) { FPRINTF(stderr, "\n%s", infoPtr->help); continue; } @@ -433,11 +433,11 @@ void vpParseArgv::printUsage(vpArgvInfo *argTable, int flags) if (nargs < 1) nargs = 1; string = *((const char **)infoPtr->dst); - if ((nargs == 1) && (string == NULL)) + if ((nargs == 1) && (string == nullptr)) break; for (unsigned long j = 0; j < nargs; j++) { string = *(((const char **)infoPtr->dst) + j); - if (string != NULL) { + if (string != nullptr) { FPRINTF(stderr, " \"%s\"", string); } } @@ -482,7 +482,7 @@ void vpParseArgv::printUsage(vpArgvInfo *argTable, int flags) \param param: Pointer to a pointer to a string for output. \return If valid option is found, the character value of that option is - returned, and *param points to the parameter if given, or is NULL if no + returned, and *param points to the parameter if given, or is nullptr if no param. \return If standalone parameter (with no option) is found, 1 is returned, @@ -492,15 +492,15 @@ void vpParseArgv::printUsage(vpArgvInfo *argTable, int flags) is returned, and *param points to the invalid argument. \return When end of argument list is reached, 0 is returned, and *param - is NULL. + is nullptr. */ int vpParseArgv::parse(int argc, const char **argv, const char *validOpts, const char **param) { static int iArg = 1; int chOpt; - const char *psz = NULL; - const char *pszParam = NULL; + const char *psz = nullptr; + const char *pszParam = nullptr; if (iArg < argc) { psz = &(argv[iArg][0]); @@ -510,7 +510,7 @@ int vpParseArgv::parse(int argc, const char **argv, const char *validOpts, const if (isalnum(chOpt) || ispunct(chOpt)) { // we have an option character psz = strchr(validOpts, chOpt); - if (psz != NULL) { + if (psz != nullptr) { // option is valid, we want to return chOpt if (psz[1] == ':') { // option can have a parameter diff --git a/modules/io/src/video/vpVideoReader.cpp b/modules/io/src/video/vpVideoReader.cpp index 134db3a30a..571a489490 100644 --- a/modules/io/src/video/vpVideoReader.cpp +++ b/modules/io/src/video/vpVideoReader.cpp @@ -49,7 +49,7 @@ Basic constructor. */ vpVideoReader::vpVideoReader() - : vpFrameGrabber(), m_imSequence(NULL), + : vpFrameGrabber(), m_imSequence(nullptr), #if defined(HAVE_OPENCV_HIGHGUI) && defined(HAVE_OPENCV_VIDEOIO) m_capture(), m_frame(), m_lastframe_unknown(false), #endif @@ -63,7 +63,7 @@ Basic destructor. */ vpVideoReader::~vpVideoReader() { - if (m_imSequence != NULL) { + if (m_imSequence != nullptr) { delete m_imSequence; } } @@ -247,7 +247,7 @@ void vpVideoReader::acquire(vpImage &I) if (!m_isOpen) { open(I); } - if (m_imSequence != NULL) { + if (m_imSequence != nullptr) { m_imSequence->setStep(m_frameStep); bool skip_frame = false; do { @@ -342,7 +342,7 @@ void vpVideoReader::acquire(vpImage &I) open(I); } - if (m_imSequence != NULL) { + if (m_imSequence != nullptr) { m_imSequence->setStep(m_frameStep); bool skip_frame = false; do { @@ -434,7 +434,7 @@ void vpVideoReader::acquire(vpImage &I) */ bool vpVideoReader::getFrame(vpImage &I, long frame_index) { - if (m_imSequence != NULL) { + if (m_imSequence != nullptr) { try { m_imSequence->acquire(I, frame_index); width = I.getWidth(); @@ -511,7 +511,7 @@ bool vpVideoReader::getFrame(vpImage &I, long frame_index) */ bool vpVideoReader::getFrame(vpImage &I, long frame_index) { - if (m_imSequence != NULL) { + if (m_imSequence != nullptr) { try { m_imSequence->acquire(I, frame_index); width = I.getWidth(); @@ -702,7 +702,7 @@ void vpVideoReader::findLastFrameIndex() throw(vpException(vpException::notInitialized, "file not yet opened")); } - if (m_imSequence != NULL) { + if (m_imSequence != nullptr) { if (!m_lastFrameIndexIsSet) { std::string imageNameFormat = vpIoTools::getName(m_videoName); std::string dirName = vpIoTools::getParent(m_videoName); @@ -754,7 +754,7 @@ void vpVideoReader::findLastFrameIndex() */ void vpVideoReader::findFirstFrameIndex() { - if (m_imSequence != NULL) { + if (m_imSequence != nullptr) { if (!m_firstFrameIndexIsSet) { std::string imageNameFormat = vpIoTools::getName(m_videoName); std::string dirName = vpIoTools::getParent(m_videoName); diff --git a/modules/java/generator/gen2.py b/modules/java/generator/gen2.py index 27928bc485..0b5fbc609b 100755 --- a/modules/java/generator/gen2.py +++ b/modules/java/generator/gen2.py @@ -12,17 +12,17 @@ ignored_arg_types = ["RNG*"] -gen_template_check_self = Template(""" $cname* _self_ = NULL; +gen_template_check_self = Template(""" $cname* _self_ = nullptr; if(PyObject_TypeCheck(self, &pyopencv_${name}_Type)) _self_ = ${amp}((pyopencv_${name}_t*)self)->v${get}; - if (_self_ == NULL) + if (_self_ == nullptr) return failmsgp("Incorrect type of self (must be '${name}' or its derivative)"); """) -gen_template_check_self_algo = Template(""" $cname* _self_ = NULL; +gen_template_check_self_algo = Template(""" $cname* _self_ = nullptr; if(PyObject_TypeCheck(self, &pyopencv_${name}_Type)) _self_ = dynamic_cast<$cname*>(${amp}((pyopencv_${name}_t*)self)->v.get()); - if (_self_ == NULL) + if (_self_ == nullptr) return failmsgp("Incorrect type of self (must be '${name}' or its derivative)"); """) @@ -35,7 +35,7 @@ gen_template_simple_call_constructor = Template("""new (&(self->v)) ${cname}${args}""") -gen_template_parse_args = Template("""const char* keywords[] = { $kw_list, NULL }; +gen_template_parse_args = Template("""const char* keywords[] = { $kw_list, nullptr }; if( PyArg_ParseTupleAndKeywords(args, kw, "$fmtspec", (char**)keywords, $parse_arglist)$code_cvt )""") gen_template_func_body = Template("""$code_decl @@ -77,7 +77,7 @@ template<> bool pyopencv_to(PyObject* src, ${cname}& dst, const char* name) { - if( src == NULL || src == Py_None ) + if( src == nullptr || src == Py_None ) return true; if(!PyObject_TypeCheck(src, &pyopencv_${name}_Type)) { @@ -120,7 +120,7 @@ template<> bool pyopencv_to(PyObject* src, Ptr<${cname}>& dst, const char* name) { - if( src == NULL || src == Py_None ) + if( src == nullptr || src == Py_None ) return true; if(!PyObject_TypeCheck(src, &pyopencv_${name}_Type)) { @@ -158,7 +158,7 @@ static PyGetSetDef pyopencv_${name}_getseters[] = {${getset_inits} - {NULL} /* Sentinel */ + {nullptr} /* Sentinel */ }; ${methods_code} @@ -166,7 +166,7 @@ static PyMethodDef pyopencv_${name}_methods[] = { ${methods_inits} - {NULL, NULL} + {nullptr, nullptr} }; static void pyopencv_${name}_specials(void) @@ -192,7 +192,7 @@ static PyObject* pyopencv_${name}_get_${member}(pyopencv_${name}_t* p, void *closure) { $cname* _self_ = dynamic_cast<$cname*>(p->v.get()); - if (_self_ == NULL) + if (_self_ == nullptr) return failmsgp("Incorrect type of object (must be '${name}' or its derivative)"); return pyopencv_from(_self_${access}${member}); } @@ -201,7 +201,7 @@ gen_template_set_prop = Template(""" static int pyopencv_${name}_set_${member}(pyopencv_${name}_t* p, PyObject *value, void *closure) { - if (value == NULL) + if (value == nullptr) { PyErr_SetString(PyExc_TypeError, "Cannot delete the ${member} attribute"); return -1; @@ -213,13 +213,13 @@ gen_template_set_prop_algo = Template(""" static int pyopencv_${name}_set_${member}(pyopencv_${name}_t* p, PyObject *value, void *closure) { - if (value == NULL) + if (value == nullptr) { PyErr_SetString(PyExc_TypeError, "Cannot delete the ${member} attribute"); return -1; } $cname* _self_ = dynamic_cast<$cname*>(p->v.get()); - if (_self_ == NULL) + if (_self_ == nullptr) { failmsgp("Incorrect type of object (must be '${name}' or its derivative)"); return -1; @@ -230,10 +230,10 @@ gen_template_prop_init = Template(""" - {(char*)"${member}", (getter)pyopencv_${name}_get_${member}, NULL, (char*)"${member}", NULL},""") + {(char*)"${member}", (getter)pyopencv_${name}_get_${member}, nullptr, (char*)"${member}", nullptr},""") gen_template_rw_prop_init = Template(""" - {(char*)"${member}", (getter)pyopencv_${name}_get_${member}, (setter)pyopencv_${name}_set_${member}, (char*)"${member}", NULL},""") + {(char*)"${member}", (getter)pyopencv_${name}_get_${member}, (setter)pyopencv_${name}_set_${member}, (char*)"${member}", nullptr},""") simple_argtype_mapping = { "bool": ("bool", "b", "0"), @@ -350,7 +350,7 @@ def gen_code(self, codegen): methods_code.write(m.gen_code(codegen)) methods_inits.write(m.get_tab_entry()) - baseptr = "NULL" + baseptr = "nullptr" if self.base and self.base in all_classes: baseptr = "&pyopencv_" + all_classes[self.base].name + "_Type" @@ -674,7 +674,7 @@ def gen_code(self, codegen): parse_name = a.name if a.py_inputarg: if amapping[1] == "O": - code_decl += " PyObject* pyobj_%s = NULL;\n" % (a.name,) + code_decl += " PyObject* pyobj_%s = nullptr;\n" % (a.name,) parse_name = "pyobj_" + a.name if a.tp == 'char': code_cvt_list.append("convert_to_char(pyobj_%s, &%s, %s)"% (a.name, a.name, a.crepr())) @@ -758,7 +758,7 @@ def gen_code(self, codegen): parse_arglist = ", ".join(["&" + all_cargs[argno][1] for aname, argno in v.py_arglist]), code_cvt = " &&\n ".join(code_cvt_list)) else: - code_parse = "if(PyObject_Size(args) == 0 && (kw == NULL || PyObject_Size(kw) == 0))" + code_parse = "if(PyObject_Size(args) == 0 && (kw == nullptr || PyObject_Size(kw) == 0))" if len(v.py_outlist) == 0: code_ret = "Py_RETURN_NONE" @@ -788,7 +788,7 @@ def gen_code(self, codegen): # try to execute each signature code += " PyErr_Clear();\n\n".join([" {\n" + v + " }\n" for v in all_code_variants]) - def_ret = "NULL" + def_ret = "nullptr" if self.isconstructor: def_ret = "-1" code += "\n return %s;\n}\n\n" % def_ret @@ -949,7 +949,7 @@ def gen_namespace(self, ns_name): if func.isconstructor: continue self.code_ns_reg.write(func.get_tab_entry()) - self.code_ns_reg.write(' {NULL, NULL}\n};\n\n') + self.code_ns_reg.write(' {nullptr, nullptr}\n};\n\n') self.code_ns_reg.write('static ConstDef consts_%s[] = {\n'%wname) for name, cname in sorted(ns.consts.items()): @@ -957,7 +957,7 @@ def gen_namespace(self, ns_name): compat_name = re.sub(r"([a-z])([A-Z])", r"\1_\2", name).upper() if name != compat_name: self.code_ns_reg.write(' {"%s", %s},\n'%(compat_name, cname)) - self.code_ns_reg.write(' {NULL, 0}\n};\n\n') + self.code_ns_reg.write(' {nullptr, 0}\n};\n\n') def gen_namespaces_reg(self): self.code_ns_reg.write('static void init_submodules(PyObject * root) \n{\n') diff --git a/modules/java/generator/gen_java.py b/modules/java/generator/gen_java.py index d7e980ac1f..d1df15517a 100755 --- a/modules/java/generator/gen_java.py +++ b/modules/java/generator/gen_java.py @@ -964,7 +964,7 @@ def gen_func(self, ci, fi, prop_name=''): else: cvname = ("me->" if not self.isSmartClass(ci) else "(*me)->") + name c_prologue.append( \ - "%(cls)s* me = (%(cls)s*) self; //TODO: check for NULL" \ + "%(cls)s* me = (%(cls)s*) self; //TODO: check for nullptr" \ % {"cls": reverseCamelCase(self.smartWrap(ci, fi.fullClass(isCPP=True)))} \ ) cvargs = [] @@ -1133,7 +1133,7 @@ def gen_class(self, ci): JNIEXPORT jstring JNICALL Java_org_visp_%(module)s_%(j_cls)s_toString (JNIEnv* env, jclass, jlong self) { - %(cls)s* me = (%(cls)s*) self; //TODO: check for NULL + %(cls)s* me = (%(cls)s*) self; //TODO: check for nullptr std::stringstream ss; ss << *me; return env->NewStringUTF(ss.str().c_str()); diff --git a/modules/java/generator/hdr_parser.py b/modules/java/generator/hdr_parser.py index 15a5e27bf3..2d93598481 100755 --- a/modules/java/generator/hdr_parser.py +++ b/modules/java/generator/hdr_parser.py @@ -533,7 +533,7 @@ def parse_func_decl(self, decl_str, docstring=""): eqpos = a.find("CV_WRAP_DEFAULT") if eqpos >= 0: defval, pos3 = self.get_macro_arg(a, eqpos) - if defval == "NULL": + if defval == "nullptr": defval = "0" if eqpos >= 0: a = a[:eqpos].strip() diff --git a/modules/java/generator/src/cpp/VpImageRGBa.cpp b/modules/java/generator/src/cpp/VpImageRGBa.cpp index b75fa1c9d9..4606e8387f 100644 --- a/modules/java/generator/src/cpp/VpImageRGBa.cpp +++ b/modules/java/generator/src/cpp/VpImageRGBa.cpp @@ -39,7 +39,7 @@ JNIEXPORT jlong JNICALL Java_org_visp_core_VpImageRGBa_n_1VpImageRGBa__IICCCC(JN JNIEXPORT jlong JNICALL Java_org_visp_core_VpImageRGBa_n_1VpImageRGBa___3BIIZ(JNIEnv *env, jclass, jbyteArray arr, jint h, jint w, jboolean copyData) { - jbyte *array = env->GetByteArrayElements(arr, NULL); + jbyte *array = env->GetByteArrayElements(arr, nullptr); return (jlong) new vpImage((vpRGBa *const)array, (const unsigned int)h, (const unsigned int)w, copyData); @@ -51,7 +51,7 @@ JNIEXPORT jlong JNICALL Java_org_visp_core_VpImageRGBa_n_1VpImageRGBa___3BIIZ(JN JNIEXPORT jint JNICALL Java_org_visp_core_VpImageRGBa_n_1cols(JNIEnv *env, jclass, jlong address) { (void)env; - vpImage *me = (vpImage *)address; // TODO: check for NULL + vpImage *me = (vpImage *)address; // TODO: check for nullptr return me->getCols(); } @@ -59,7 +59,7 @@ JNIEXPORT jint JNICALL Java_org_visp_core_VpImageRGBa_n_1cols(JNIEnv *env, jclas JNIEXPORT jint JNICALL Java_org_visp_core_VpImageRGBa_n_1rows(JNIEnv *env, jclass, jlong address) { (void)env; - vpImage *me = (vpImage *)address; // TODO: check for NULL + vpImage *me = (vpImage *)address; // TODO: check for nullptr return me->getRows(); } @@ -67,7 +67,7 @@ JNIEXPORT jint JNICALL Java_org_visp_core_VpImageRGBa_n_1rows(JNIEnv *env, jclas JNIEXPORT jbyteArray JNICALL Java_org_visp_core_VpImageRGBa_n_1getPixel(JNIEnv *env, jclass, jlong address, jint i, jint j) { - vpImage *me = (vpImage *)address; // TODO: check for NULL + vpImage *me = (vpImage *)address; // TODO: check for nullptr vpRGBa val = (*me)(i, j); jbyteArray ret = env->NewByteArray(4); unsigned char temp[] = {val.R, val.G, val.B, val.A}; @@ -78,7 +78,7 @@ JNIEXPORT jbyteArray JNICALL Java_org_visp_core_VpImageRGBa_n_1getPixel(JNIEnv * // Java Method: getPixels() JNIEXPORT jbyteArray JNICALL Java_org_visp_core_VpImageRGBa_n_1getPixels(JNIEnv *env, jclass, jlong address) { - vpImage *me = (vpImage *)address; // TODO: check for NULL + vpImage *me = (vpImage *)address; // TODO: check for nullptr jbyteArray ret = env->NewByteArray(me->getNumberOfPixel() * 4); env->SetByteArrayRegion(ret, 0, me->getNumberOfPixel() * 4, (jbyte *)me->bitmap); return ret; @@ -87,7 +87,7 @@ JNIEXPORT jbyteArray JNICALL Java_org_visp_core_VpImageRGBa_n_1getPixels(JNIEnv // Java Method: dump() JNIEXPORT jstring JNICALL Java_org_visp_core_VpImageRGBa_n_1dump(JNIEnv *env, jclass, jlong address) { - vpImage *me = (vpImage *)address; // TODO: check for NULL + vpImage *me = (vpImage *)address; // TODO: check for nullptr std::stringstream ss; ss << *me; return env->NewStringUTF(ss.str().c_str()); diff --git a/modules/java/generator/src/cpp/VpImageUChar.cpp b/modules/java/generator/src/cpp/VpImageUChar.cpp index e506ed48de..8de84ce7c9 100644 --- a/modules/java/generator/src/cpp/VpImageUChar.cpp +++ b/modules/java/generator/src/cpp/VpImageUChar.cpp @@ -38,7 +38,7 @@ JNIEXPORT jlong JNICALL Java_org_visp_core_VpImageUChar_n_1VpImageUChar__IIB(JNI JNIEXPORT jlong JNICALL Java_org_visp_core_VpImageUChar_n_1VpImageUChar___3BIIZ(JNIEnv *env, jclass, jbyteArray arr, jint h, jint w, jboolean copyData) { - jbyte *array = env->GetByteArrayElements(arr, NULL); + jbyte *array = env->GetByteArrayElements(arr, nullptr); return (jlong) new vpImage((u_char *const)array, (const unsigned int)h, (const unsigned int)w, copyData); @@ -50,7 +50,7 @@ JNIEXPORT jlong JNICALL Java_org_visp_core_VpImageUChar_n_1VpImageUChar___3BIIZ( JNIEXPORT jint JNICALL Java_org_visp_core_VpImageUChar_n_1cols(JNIEnv *env, jclass, jlong address) { (void)env; - vpImage *me = (vpImage *)address; // TODO: check for NULL + vpImage *me = (vpImage *)address; // TODO: check for nullptr return me->getCols(); } @@ -58,7 +58,7 @@ JNIEXPORT jint JNICALL Java_org_visp_core_VpImageUChar_n_1cols(JNIEnv *env, jcla JNIEXPORT jint JNICALL Java_org_visp_core_VpImageUChar_n_1rows(JNIEnv *env, jclass, jlong address) { (void)env; - vpImage *me = (vpImage *)address; // TODO: check for NULL + vpImage *me = (vpImage *)address; // TODO: check for nullptr return me->getRows(); } @@ -66,14 +66,14 @@ JNIEXPORT jint JNICALL Java_org_visp_core_VpImageUChar_n_1rows(JNIEnv *env, jcla JNIEXPORT jint JNICALL Java_org_visp_core_VpImageUChar_n_1getPixel(JNIEnv *env, jclass, jlong address, jint i, jint j) { (void)env; - vpImage *me = (vpImage *)address; // TODO: check for NULL + vpImage *me = (vpImage *)address; // TODO: check for nullptr return (*me)(i, j); } // Java Method: getPixels() JNIEXPORT jbyteArray JNICALL Java_org_visp_core_VpImageUChar_n_1getPixels(JNIEnv *env, jclass, jlong address) { - vpImage *me = (vpImage *)address; // TODO: check for NULL + vpImage *me = (vpImage *)address; // TODO: check for nullptr jbyteArray ret = env->NewByteArray(me->getNumberOfPixel()); env->SetByteArrayRegion(ret, 0, me->getNumberOfPixel(), (jbyte *)me->bitmap); return ret; @@ -82,7 +82,7 @@ JNIEXPORT jbyteArray JNICALL Java_org_visp_core_VpImageUChar_n_1getPixels(JNIEnv // Java Method: dump() JNIEXPORT jstring JNICALL Java_org_visp_core_VpImageUChar_n_1dump(JNIEnv *env, jclass, jlong address) { - vpImage *me = (vpImage *)address; // TODO: check for NULL + vpImage *me = (vpImage *)address; // TODO: check for nullptr std::stringstream ss; ss << *me; return env->NewStringUTF(ss.str().c_str()); diff --git a/modules/java/generator/src/cpp/listconverters.cpp b/modules/java/generator/src/cpp/listconverters.cpp index 478137eb1f..4259faecd5 100644 --- a/modules/java/generator/src/cpp/listconverters.cpp +++ b/modules/java/generator/src/cpp/listconverters.cpp @@ -103,11 +103,11 @@ std::vector List_to_vector_double(JNIEnv *env, jdoubleArray arr) jobjectArray vector_vector_vpImagePoint_to_List(JNIEnv *env, const std::vector > &V) { if (V.empty()) { - return NULL; + return nullptr; } size_t outerSize = V.size(); - jobjectArray outerArray = env->NewObjectArray(outerSize, env->FindClass("java/lang/Object"), NULL); + jobjectArray outerArray = env->NewObjectArray(outerSize, env->FindClass("java/lang/Object"), nullptr); for (int i = 0; i < env->GetArrayLength(outerArray); i++) { size_t innerSize = V[i].size(); @@ -128,11 +128,11 @@ jobjectArray vector_vector_vpImagePoint_to_List(JNIEnv *env, const std::vector > &V) { if (V.empty()) { - return NULL; + return nullptr; } size_t outerSize = V.size(); - jobjectArray outerArray = env->NewObjectArray(outerSize, env->FindClass("java/lang/Object"), NULL); + jobjectArray outerArray = env->NewObjectArray(outerSize, env->FindClass("java/lang/Object"), nullptr); for (int i = 0; i < env->GetArrayLength(outerArray); i++) { size_t innerSize = V[i].size(); @@ -166,17 +166,17 @@ map_string_vector_vector_double_to_array_native(JNIEnv *env, const std::map > > &map) { if (map.empty()) { - return NULL; + return nullptr; } size_t mapSize = map.size(); - jobjectArray mapArray = env->NewObjectArray(mapSize, env->FindClass("java/lang/Object"), NULL); + jobjectArray mapArray = env->NewObjectArray(mapSize, env->FindClass("java/lang/Object"), nullptr); int idx = 0; for (std::map > >::const_iterator it = map.begin(); it != map.end(); ++it, idx++) { size_t outerSize = it->second.size(); - jobjectArray outerArray = env->NewObjectArray(outerSize, env->FindClass("java/lang/Object"), NULL); + jobjectArray outerArray = env->NewObjectArray(outerSize, env->FindClass("java/lang/Object"), nullptr); for (int i = 0; i < env->GetArrayLength(outerArray); i++) { size_t innerSize = it->second[i].size(); @@ -200,7 +200,7 @@ map_string_vector_vector_double_to_array_native(JNIEnv *env, jobjectArray vector_string_to_array_native(JNIEnv *env, const std::vector &V) { if (V.empty()) { - return NULL; + return nullptr; } size_t vecSize = V.size(); diff --git a/modules/java/misc/core/gen_dict.json b/modules/java/misc/core/gen_dict.json index 1770423c94..f1d493a8f0 100644 --- a/modules/java/misc/core/gen_dict.json +++ b/modules/java/misc/core/gen_dict.json @@ -53,10 +53,10 @@ " static const char method_name[] = \"core::getKannalaBrandtDistortionCoefficients()\";", " try {", " LOGD(\"%s\", method_name);", - " vpCameraParameters *cam = (vpCameraParameters*) self; //TODO: check for NULL", + " vpCameraParameters *cam = (vpCameraParameters*) self; //TODO: check for nullptr", " std::vector coefs = cam->getKannalaBrandtDistortionCoefficients();", " jdoubleArray jCoefs = env->NewDoubleArray(coefs.size());", - " jdouble *ptr_jCoefs = NULL;", + " jdouble *ptr_jCoefs = nullptr;", " ptr_jCoefs = env->GetDoubleArrayElements(jCoefs, 0);", " for (size_t i = 0; i < coefs.size(); i++) {", " ptr_jCoefs[i] = coefs[i];", @@ -97,7 +97,7 @@ " static const char method_name[] = \"core::getWorldCoordinates_12()\";", " try {", " LOGD(\"%s\", method_name);", - " vpPoint* me = (vpPoint*) self; //TODO: check for NULL", + " vpPoint* me = (vpPoint*) self; //TODO: check for nullptr", " vpColVector _retval_ = me->getWorldCoordinates( );", " return (jlong) new vpColVector(_retval_);", " } catch(const std::exception &e) {", diff --git a/modules/java/misc/core/src/java/core+VpImageRGBa.java b/modules/java/misc/core/src/java/core+VpImageRGBa.java index 3f42aa9b83..a5b19a314b 100644 --- a/modules/java/misc/core/src/java/core+VpImageRGBa.java +++ b/modules/java/misc/core/src/java/core+VpImageRGBa.java @@ -8,7 +8,7 @@ public class VpImageRGBa { public VpImageRGBa(long addr){ if (addr == 0) - throw new java.lang.UnsupportedOperationException("Native object address is NULL"); + throw new java.lang.UnsupportedOperationException("Native object address is nullptr"); nativeObj = addr; } diff --git a/modules/java/misc/core/src/java/core+VpImageUChar.java b/modules/java/misc/core/src/java/core+VpImageUChar.java index e29d952537..581bcc10da 100644 --- a/modules/java/misc/core/src/java/core+VpImageUChar.java +++ b/modules/java/misc/core/src/java/core+VpImageUChar.java @@ -8,7 +8,7 @@ public class VpImageUChar { public VpImageUChar(long addr){ if (addr == 0) - throw new java.lang.UnsupportedOperationException("Native object address is NULL"); + throw new java.lang.UnsupportedOperationException("Native object address is nullptr"); nativeObj = addr; } diff --git a/modules/java/misc/detection/gen_dict.json b/modules/java/misc/detection/gen_dict.json index bd7de1194a..bdcae347cd 100644 --- a/modules/java/misc/detection/gen_dict.json +++ b/modules/java/misc/detection/gen_dict.json @@ -36,7 +36,7 @@ " try {", " LOGD(\"%s\", method_name);", " std::vector cMo_vec_list_arr;", - " vpDetectorAprilTag* me = (vpDetectorAprilTag*) self; //TODO: check for NULL", + " vpDetectorAprilTag* me = (vpDetectorAprilTag*) self; //TODO: check for nullptr", " vpImage& I = *((vpImage*)I_nativeObj);", " vpCameraParameters& cam = *((vpCameraParameters*)cam_nativeObj);", " /* bool _retval_ = */ me->detect( I, (double)tagSize, cam, cMo_vec_list_arr );", @@ -46,7 +46,7 @@ " } catch (...) {", " throwJavaException(env, 0, method_name);", " }", - " return NULL;", + " return nullptr;", "}\n\n", "//", "// manual port", @@ -59,7 +59,7 @@ " static const char method_name[] = \"detection::detect_11()\";", " try {", " LOGD(\"%s\", method_name);", - " vpDetectorAprilTag* me = (vpDetectorAprilTag*) self; //TODO: check for NULL", + " vpDetectorAprilTag* me = (vpDetectorAprilTag*) self; //TODO: check for nullptr", " vpImage& I = *((vpImage*)I_nativeObj);", " bool _retval_ = me->detect( I );", " return _retval_;", @@ -267,7 +267,7 @@ " vpDetectorAprilTag *tag = (vpDetectorAprilTag*) self;", " std::vector tag_ids = tag->getTagsId();", " jintArray jIds = env->NewIntArray(tag_ids.size());", - " jint *ptr_ids = NULL;", + " jint *ptr_ids = nullptr;", " ptr_ids = env->GetIntArrayElements(jIds, 0);", " for (size_t i = 0; i < tag_ids.size(); i++) {", " ptr_ids[i] = tag_ids[i];", diff --git a/modules/java/misc/imgproc/src/java/imgproc+VpContour.java b/modules/java/misc/imgproc/src/java/imgproc+VpContour.java index 31b9c1872f..36efebcb87 100644 --- a/modules/java/misc/imgproc/src/java/imgproc+VpContour.java +++ b/modules/java/misc/imgproc/src/java/imgproc+VpContour.java @@ -6,7 +6,7 @@ public class VpContour { public VpContour(long addr){ if (addr == 0) - throw new java.lang.UnsupportedOperationException("Native object address is NULL"); + throw new java.lang.UnsupportedOperationException("Native object address is nullptr"); nativeObj = addr; } diff --git a/modules/java/misc/mbt/gen_dict.json b/modules/java/misc/mbt/gen_dict.json index 62b13dc277..9e634f20ec 100644 --- a/modules/java/misc/mbt/gen_dict.json +++ b/modules/java/misc/mbt/gen_dict.json @@ -190,7 +190,7 @@ " static const char method_name[] = \"vpMbGenericTracker::getCameraParameters_10()\";", " try {", " LOGD(\"%s\", method_name);", - " vpMbGenericTracker* me = (vpMbGenericTracker*) self; //TODO: check for NULL", + " vpMbGenericTracker* me = (vpMbGenericTracker*) self; //TODO: check for nullptr", " vpCameraParameters& camera = *((vpCameraParameters*)camera_nativeObj);", " me->getCameraParameters( camera );", " } catch(const std::exception &e) {", @@ -210,7 +210,7 @@ " static const char method_name[] = \"vpMbGenericTracker::getCameraParameters_11()\";", " try {", " LOGD(\"%s\", method_name);", - " vpMbGenericTracker* me = (vpMbGenericTracker*) self; //TODO: check for NULL", + " vpMbGenericTracker* me = (vpMbGenericTracker*) self; //TODO: check for nullptr", " vpCameraParameters& cam1 = *((vpCameraParameters*)cam1_nativeObj);", " vpCameraParameters& cam2 = *((vpCameraParameters*)cam2_nativeObj);", " me->getCameraParameters( cam1, cam2 );", @@ -231,7 +231,7 @@ " static const char method_name[] = \"vpMbGenericTracker::getCameraParameters_12()\";", " try {", " LOGD(\"%s\", method_name);", - " vpMbGenericTracker* me = (vpMbGenericTracker*) self; //TODO: check for NULL", + " vpMbGenericTracker* me = (vpMbGenericTracker*) self; //TODO: check for nullptr", " int cameraCount = env->GetArrayLength(cameraNames);", " std::map mapOfCameraParameters;", " jlong *jcameras = env->GetLongArrayElements(cameras, 0);", @@ -299,7 +299,7 @@ " static const char method_name[] = \"vpMbGenericTracker::getPose_10()\";", " try {", " LOGD(\"%s\", method_name);", - " vpMbGenericTracker* me = (vpMbGenericTracker*) self; //TODO: check for NULL", + " vpMbGenericTracker* me = (vpMbGenericTracker*) self; //TODO: check for nullptr", " vpHomogeneousMatrix& cMo = *((vpHomogeneousMatrix*)cMo_nativeObj);", " me->getPose( cMo );", " } catch(const std::exception &e) {", @@ -319,7 +319,7 @@ " static const char method_name[] = \"vpMbGenericTracker::getPose_11()\";", " try {", " LOGD(\"%s\", method_name);", - " vpMbGenericTracker* me = (vpMbGenericTracker*) self; //TODO: check for NULL", + " vpMbGenericTracker* me = (vpMbGenericTracker*) self; //TODO: check for nullptr", " vpHomogeneousMatrix& c1Mo = *((vpHomogeneousMatrix*)c1Mo_nativeObj);", " vpHomogeneousMatrix& c2Mo = *((vpHomogeneousMatrix*)c2Mo_nativeObj);", " me->getPose( c1Mo, c2Mo );", @@ -340,7 +340,7 @@ " static const char method_name[] = \"vpMbGenericTracker::getPose_12()\";", " try {", " LOGD(\"%s\", method_name);", - " vpMbGenericTracker* me = (vpMbGenericTracker*) self; //TODO: check for NULL", + " vpMbGenericTracker* me = (vpMbGenericTracker*) self; //TODO: check for nullptr", " int cameraCount = env->GetArrayLength(cameraNames);", " std::map mapOfCameraPoses;", " jlong *jposes = env->GetLongArrayElements(poses, 0);", @@ -408,7 +408,7 @@ " static const char method_name[] = \"vpMbGenericTracker::initFromPose_10()\";", " try {", " LOGD(\"%s\", method_name);", - " vpMbGenericTracker* me = (vpMbGenericTracker*) self; //TODO: check for NULL", + " vpMbGenericTracker* me = (vpMbGenericTracker*) self; //TODO: check for nullptr", " vpImage& I = *((vpImage*)I_nativeObj);", " vpHomogeneousMatrix& cMo = *((vpHomogeneousMatrix*)cMo_nativeObj);", " me->initFromPose( I, cMo );", @@ -429,7 +429,7 @@ " static const char method_name[] = \"vpMbGenericTracker::initFromPose_11()\";", " try {", " LOGD(\"%s\", method_name);", - " vpMbGenericTracker* me = (vpMbGenericTracker*) self; //TODO: check for NULL", + " vpMbGenericTracker* me = (vpMbGenericTracker*) self; //TODO: check for nullptr", " vpImage& I1 = *((vpImage*)I1_nativeObj);", " vpImage& I2 = *((vpImage*)I2_nativeObj);", " vpHomogeneousMatrix& c1Mo = *((vpHomogeneousMatrix*)c1Mo_nativeObj);", @@ -452,7 +452,7 @@ " static const char method_name[] = \"vpMbGenericTracker::initFromPose_12()\";", " try {", " LOGD(\"%s\", method_name);", - " vpMbGenericTracker* tracker = (vpMbGenericTracker*) address; //TODO: check for NULL", + " vpMbGenericTracker* tracker = (vpMbGenericTracker*) address; //TODO: check for nullptr", " int stringCount = env->GetArrayLength(cameraNames);", " std::map *> mapOfImages;", " std::map mapOfCameraPoses;", @@ -525,7 +525,7 @@ " static const char method_name[] = \"mbt::loadConfigFile_11()\";", " try {", " LOGD(\"%s\", method_name);", - " vpMbGenericTracker* me = (vpMbGenericTracker*) self; //TODO: check for NULL", + " vpMbGenericTracker* me = (vpMbGenericTracker*) self; //TODO: check for nullptr", " const char* utf_configFile = env->GetStringUTFChars(configFile, 0);", " string n_configFile( utf_configFile ? utf_configFile : \"\" );", " env->ReleaseStringUTFChars(configFile, utf_configFile);", @@ -547,7 +547,7 @@ " static const char method_name[] = \"mbt::loadConfigFile_10()\";", " try {", " LOGD(\"%s\", method_name);", - " vpMbGenericTracker* me = (vpMbGenericTracker*) self; //TODO: check for NULL", + " vpMbGenericTracker* me = (vpMbGenericTracker*) self; //TODO: check for nullptr", " const char* utf_configFile1 = env->GetStringUTFChars(configFile1, 0);", " string n_configFile1( utf_configFile1 ? utf_configFile1 : \"\" );", " env->ReleaseStringUTFChars(configFile1, utf_configFile1);", @@ -666,7 +666,7 @@ " static const char method_name[] = \"vpMbGenericTracker::loadModel()\";", " try {", " LOGD(\"%s\", method_name);", - " vpMbGenericTracker* me = (vpMbGenericTracker*) self; //TODO: check for NULL", + " vpMbGenericTracker* me = (vpMbGenericTracker*) self; //TODO: check for nullptr", " const char* utf_modelFile = env->GetStringUTFChars(modelFile, 0);", " string n_modelFile( utf_modelFile ? utf_modelFile : \"\" );", " env->ReleaseStringUTFChars(modelFile, utf_modelFile);", @@ -689,7 +689,7 @@ " static const char method_name[] = \"vpMbGenericTracker::loadModel()\";", " try {", " LOGD(\"%s\", method_name);", - " vpMbGenericTracker* me = (vpMbGenericTracker*) self; //TODO: check for NULL", + " vpMbGenericTracker* me = (vpMbGenericTracker*) self; //TODO: check for nullptr", " const char* utf_modelFile = env->GetStringUTFChars(modelFile, 0);", " string n_modelFile( utf_modelFile ? utf_modelFile : \"\" );", " env->ReleaseStringUTFChars(modelFile, utf_modelFile);", @@ -711,7 +711,7 @@ " static const char method_name[] = \"vpMbGenericTracker::loadModel()\";", " try {", " LOGD(\"%s\", method_name);", - " vpMbGenericTracker* me = (vpMbGenericTracker*) self; //TODO: check for NULL", + " vpMbGenericTracker* me = (vpMbGenericTracker*) self; //TODO: check for nullptr", " const char* utf_modelFile1 = env->GetStringUTFChars(modelFile1, 0);", " string n_modelFile1( utf_modelFile1 ? utf_modelFile1 : \"\" );", " env->ReleaseStringUTFChars(modelFile1, utf_modelFile1);", @@ -738,7 +738,7 @@ " static const char method_name[] = \"vpMbGenericTracker::loadModel()\";", " try {", " LOGD(\"%s\", method_name);", - " vpMbGenericTracker* me = (vpMbGenericTracker*) self; //TODO: check for NULL", + " vpMbGenericTracker* me = (vpMbGenericTracker*) self; //TODO: check for nullptr", " const char* utf_modelFile1 = env->GetStringUTFChars(modelFile1, 0);", " string n_modelFile1( utf_modelFile1 ? utf_modelFile1 : \"\" );", " env->ReleaseStringUTFChars(modelFile1, utf_modelFile1);", @@ -766,14 +766,14 @@ " vpMbGenericTracker *tracker = (vpMbGenericTracker*) address;", " int stringCount = env->GetArrayLength(stringKeys);", " int sizeT = env->GetArrayLength(Ts);", - " jlong *jTs = sizeT > 0 ? env->GetLongArrayElements(Ts, 0) : NULL;", + " jlong *jTs = sizeT > 0 ? env->GetLongArrayElements(Ts, 0) : nullptr;", " std::map map;", " std::map mapOfT;", " for (int i = 0; i < stringCount; i++) {", " jstring key = (jstring) (env->GetObjectArrayElement(stringKeys, i));", " jstring value = (jstring) (env->GetObjectArrayElement(stringValues, i));", " map[convertTo(env, key)] = convertTo(env, value);", - " if (jTs != NULL) {", + " if (jTs != nullptr) {", " vpHomogeneousMatrix& T = *((vpHomogeneousMatrix*)jTs[i]);", " mapOfT[convertTo(env, key)] = T;", " }", @@ -869,7 +869,7 @@ " static const char method_name[] = \"vpMbGenericTracker::track_10()\";", " try {", " LOGD(\"%s\", method_name);", - " vpMbGenericTracker* me = (vpMbGenericTracker*) self; //TODO: check for NULL", + " vpMbGenericTracker* me = (vpMbGenericTracker*) self; //TODO: check for nullptr", " vpImage& I = *((vpImage*)I_nativeObj);", " me->track( I );", " return;", @@ -890,7 +890,7 @@ " static const char method_name[] = \"vpMbGenericTracker::track_11()\";", " try {", " LOGD(\"%s\", method_name);", - " vpMbGenericTracker* me = (vpMbGenericTracker*) self; //TODO: check for NULL", + " vpMbGenericTracker* me = (vpMbGenericTracker*) self; //TODO: check for nullptr", " vpImage& I1 = *((vpImage*)I1_nativeObj);", " vpImage& I2 = *((vpImage*)I2_nativeObj);", " me->track( I1, I2 );", @@ -912,7 +912,7 @@ " static const char method_name[] = \"vpMbGenericTracker::track_12()\";", " try {", " LOGD(\"%s\", method_name);", - " vpMbGenericTracker* tracker = (vpMbGenericTracker*) self; //TODO: check for NULL", + " vpMbGenericTracker* tracker = (vpMbGenericTracker*) self; //TODO: check for nullptr", " int cameraNamesCount = env->GetArrayLength(cameraNames);", " jlong *jlong_images = env->GetLongArrayElements(images, 0);", " std::map *> mapOfImages;", @@ -1029,7 +1029,7 @@ "jn_code": [ "" ] - }, + }, "getKltMaskBorder": { "j_code": [ "" diff --git a/modules/robot/include/visp3/robot/vpRobotWireFrameSimulator.h b/modules/robot/include/visp3/robot/vpRobotWireFrameSimulator.h index f2fac6d886..3bb77f9b16 100644 --- a/modules/robot/include/visp3/robot/vpRobotWireFrameSimulator.h +++ b/modules/robot/include/visp3/robot/vpRobotWireFrameSimulator.h @@ -349,7 +349,7 @@ class VISP_EXPORT vpRobotWireFrameSimulator : protected vpWireFrameSimulator, pu { (reinterpret_cast(arg))->updateArticularPosition(); // pthread_exit((void*) 0); - return NULL; + return nullptr; } #endif diff --git a/modules/robot/include/visp3/robot/vpSimulatorAfma6.h b/modules/robot/include/visp3/robot/vpSimulatorAfma6.h index 3a38780376..0e2bde3196 100644 --- a/modules/robot/include/visp3/robot/vpSimulatorAfma6.h +++ b/modules/robot/include/visp3/robot/vpSimulatorAfma6.h @@ -226,7 +226,7 @@ class VISP_EXPORT vpSimulatorAfma6 : public vpRobotWireFrameSimulator, public vp double pos5, double pos6); void setPosition(const char *filename); void setPositioningVelocity(double vel) { positioningVelocity = vel; } - bool setPosition(const vpHomogeneousMatrix &cdMo, vpImage *Iint = NULL, const double &errMax = 0.001); + bool setPosition(const vpHomogeneousMatrix &cdMo, vpImage *Iint = nullptr, const double &errMax = 0.001); vpRobot::vpRobotStateType setRobotState(const vpRobot::vpRobotStateType newState) override; void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &velocity) override; diff --git a/modules/robot/src/haptic-device/virtuose/vpVirtuose.cpp b/modules/robot/src/haptic-device/virtuose/vpVirtuose.cpp index 71310ae2a4..de7ed643db 100644 --- a/modules/robot/src/haptic-device/virtuose/vpVirtuose.cpp +++ b/modules/robot/src/haptic-device/virtuose/vpVirtuose.cpp @@ -49,7 +49,7 @@ * Authorize indexing on all movements by default. */ vpVirtuose::vpVirtuose() - : m_virtContext(NULL), m_ip_port("localhost#5000"), m_verbose(false), m_apiMajorVersion(0), m_apiMinorVersion(0), + : m_virtContext(nullptr), m_ip_port("localhost#5000"), m_verbose(false), m_apiMajorVersion(0), m_apiMinorVersion(0), m_ctrlMajorVersion(0), m_ctrlMinorVersion(0), m_typeCommand(COMMAND_TYPE_IMPEDANCE), m_indexType(INDEXING_ALL), m_is_init(false), m_period(0.001f), m_njoints(6) { @@ -62,9 +62,9 @@ vpVirtuose::vpVirtuose() */ void vpVirtuose::close() { - if (m_virtContext != NULL) { + if (m_virtContext != nullptr) { virtClose(m_virtContext); - m_virtContext = NULL; + m_virtContext = nullptr; } } @@ -538,7 +538,7 @@ void vpVirtuose::init() if (!m_is_init) { m_virtContext = virtOpen(m_ip_port.c_str()); - if (m_virtContext == NULL) { + if (m_virtContext == nullptr) { int err = virtGetErrorCode(m_virtContext); throw(vpException(vpException::fatalError, "Cannot open communication with haptic device using %s: %s. Check ip and port values", diff --git a/modules/robot/src/image-simulator/vpImageSimulator.cpp b/modules/robot/src/image-simulator/vpImageSimulator.cpp index f35cdcdc13..d71babc2db 100644 --- a/modules/robot/src/image-simulator/vpImageSimulator.cpp +++ b/modules/robot/src/image-simulator/vpImageSimulator.cpp @@ -57,8 +57,8 @@ */ vpImageSimulator::vpImageSimulator(const vpColorPlan &col) : cMt(), pt(), ptClipped(), interp(SIMPLE), normal_obj(), normal_Cam(), normal_Cam_optim(), distance(1.), - visible_result(1.), visible(false), X0_2_optim(NULL), frobeniusNorm_u(0.), fronbniusNorm_v(0.), vbase_u(), - vbase_v(), vbase_u_optim(NULL), vbase_v_optim(NULL), Xinter_optim(NULL), listTriangle(), colorI(col), Ig(), Ic(), + visible_result(1.), visible(false), X0_2_optim(nullptr), frobeniusNorm_u(0.), fronbniusNorm_v(0.), vbase_u(), + vbase_v(), vbase_u_optim(nullptr), vbase_v_optim(nullptr), Xinter_optim(nullptr), listTriangle(), colorI(col), Ig(), Ic(), rect(), cleanPrevImage(false), setBackgroundTexture(false), bgColor(vpColor::white), focal(), needClipping(false) { for (int i = 0; i < 4; i++) @@ -94,8 +94,8 @@ vpImageSimulator::vpImageSimulator(const vpColorPlan &col) */ vpImageSimulator::vpImageSimulator(const vpImageSimulator &text) : cMt(), pt(), ptClipped(), interp(SIMPLE), normal_obj(), normal_Cam(), normal_Cam_optim(), distance(1.), - visible_result(1.), visible(false), X0_2_optim(NULL), frobeniusNorm_u(0.), fronbniusNorm_v(0.), vbase_u(), - vbase_v(), vbase_u_optim(NULL), vbase_v_optim(NULL), Xinter_optim(NULL), listTriangle(), colorI(GRAY_SCALED), Ig(), + visible_result(1.), visible(false), X0_2_optim(nullptr), frobeniusNorm_u(0.), fronbniusNorm_v(0.), vbase_u(), + vbase_v(), vbase_u_optim(nullptr), vbase_v_optim(nullptr), Xinter_optim(nullptr), listTriangle(), colorI(GRAY_SCALED), Ig(), Ic(), rect(), cleanPrevImage(false), setBackgroundTexture(false), bgColor(vpColor::white), focal(), needClipping(false) { diff --git a/modules/robot/src/real-robot/afma4/vpRobotAfma4.cpp b/modules/robot/src/real-robot/afma4/vpRobotAfma4.cpp index f5cc6d3f3f..c79d06eb55 100644 --- a/modules/robot/src/real-robot/afma4/vpRobotAfma4.cpp +++ b/modules/robot/src/real-robot/afma4/vpRobotAfma4.cpp @@ -161,8 +161,9 @@ vpRobotAfma4::vpRobotAfma4(bool verbose) : vpAfma4(), vpRobot() try { this->init(); this->setRobotState(vpRobot::STATE_STOP); - } catch (...) { - // vpERROR_TRACE("Error caught") ; + } + catch (...) { + // vpERROR_TRACE("Error caught") ; throw; } positioningVelocity = defaultPositioningVelocity; @@ -220,7 +221,7 @@ void vpRobotAfma4::init(void) // Look if the power is on or off UInt32 HIPowerStatus; UInt32 EStopStatus; - Try(PrimitiveSTATUS_Afma4(NULL, NULL, &EStopStatus, NULL, NULL, NULL, &HIPowerStatus)); + Try(PrimitiveSTATUS_Afma4(nullptr, nullptr, &EStopStatus, nullptr, nullptr, nullptr, &HIPowerStatus)); CAL_Wait(0.1); // Print the robot status @@ -293,7 +294,7 @@ vpRobotAfma4::~vpRobotAfma4(void) // Look if the power is on or off UInt32 HIPowerStatus; - Try(PrimitiveSTATUS_Afma4(NULL, NULL, NULL, NULL, NULL, NULL, &HIPowerStatus)); + Try(PrimitiveSTATUS_Afma4(nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, &HIPowerStatus)); CAL_Wait(0.1); // if (HIPowerStatus == 1) { @@ -333,9 +334,10 @@ vpRobot::vpRobotStateType vpRobotAfma4::setRobotState(vpRobot::vpRobotStateType if (vpRobot::STATE_VELOCITY_CONTROL == getRobotState()) { std::cout << "Change the control mode from velocity to position control.\n"; Try(PrimitiveSTOP_Afma4()); - } else { - // std::cout << "Change the control mode from stop to position - // control.\n"; + } + else { + // std::cout << "Change the control mode from stop to position + // control.\n"; } this->powerOn(); break; @@ -400,23 +402,26 @@ void vpRobotAfma4::powerOn(void) unsigned int nitermax = 10; for (unsigned int i = 0; i < nitermax; i++) { - Try(PrimitiveSTATUS_Afma4(NULL, NULL, &EStopStatus, NULL, NULL, NULL, &HIPowerStatus)); + Try(PrimitiveSTATUS_Afma4(nullptr, nullptr, &EStopStatus, nullptr, nullptr, nullptr, &HIPowerStatus)); if (EStopStatus == ESTOP_AUTO) { break; // exit for loop - } else if (EStopStatus == ESTOP_MANUAL) { + } + else if (EStopStatus == ESTOP_MANUAL) { break; // exit for loop - } else if (EStopStatus == ESTOP_ACTIVATED) { + } + else if (EStopStatus == ESTOP_ACTIVATED) { if (firsttime) { std::cout << "Emergency stop is activated! \n" - << "Check the emergency stop button and push the yellow " - "button before continuing." - << std::endl; + << "Check the emergency stop button and push the yellow " + "button before continuing." + << std::endl; firsttime = false; } fprintf(stdout, "Remaining time %us \r", nitermax - i); fflush(stdout); CAL_Wait(1); - } else { + } + else { std::cout << "Sorry there is an error on the emergency chain." << std::endl; std::cout << "You have to call Adept for maintenance..." << std::endl; // Free allocated resources @@ -462,7 +467,7 @@ void vpRobotAfma4::powerOff(void) // Look if the power is on or off UInt32 HIPowerStatus; - Try(PrimitiveSTATUS_Afma4(NULL, NULL, NULL, NULL, NULL, NULL, &HIPowerStatus)); + Try(PrimitiveSTATUS_Afma4(nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, &HIPowerStatus)); CAL_Wait(0.1); if (HIPowerStatus == 1) { @@ -496,7 +501,7 @@ bool vpRobotAfma4::getPowerState(void) bool status = false; // Look if the power is on or off UInt32 HIPowerStatus; - Try(PrimitiveSTATUS_Afma4(NULL, NULL, NULL, NULL, NULL, NULL, &HIPowerStatus)); + Try(PrimitiveSTATUS_Afma4(nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, &HIPowerStatus)); CAL_Wait(0.1); if (HIPowerStatus == 1) { @@ -552,7 +557,8 @@ void vpRobotAfma4::get_cVf(vpVelocityTwistMatrix &cVf) const try { vpAfma4::get_cVf(q, cVf); - } catch (...) { + } + catch (...) { vpERROR_TRACE("catch exception "); throw; } @@ -598,7 +604,8 @@ void vpRobotAfma4::get_eJe(vpMatrix &eJe) try { vpAfma4::get_eJe(q, eJe); - } catch (...) { + } + catch (...) { vpERROR_TRACE("catch exception "); throw; } @@ -633,7 +640,8 @@ void vpRobotAfma4::get_fJe(vpMatrix &fJe) try { vpAfma4::get_fJe(q, fJe); - } catch (...) { + } + catch (...) { vpERROR_TRACE("Error caught"); throw; } @@ -867,7 +875,8 @@ void vpRobotAfma4::setPosition(const vpRobot::vpControlFrameType frame, const do position[3] = q5; setPosition(frame, position); - } catch (...) { + } + catch (...) { vpERROR_TRACE("Error caught"); throw; } @@ -1188,10 +1197,10 @@ void vpRobotAfma4::setVelocity(const vpRobot::vpControlFrameType frame, const vp vpColVector velocity = vpRobot::saturateVelocities(vel, vel_max, true); #if 0 // ok - vpMatrix eJe(4,6); + vpMatrix eJe(4, 6); eJe = 0; eJe[2][4] = -1; - eJe[3][3] = 1; + eJe[3][3] = 1; joint_vel = eJe * velocity; // Compute the articular velocity #endif @@ -1240,17 +1249,19 @@ void vpRobotAfma4::setVelocity(const vpRobot::vpControlFrameType frame, const vp if (TryStt < 0) { if (TryStt == VelStopOnJoint) { UInt32 axisInJoint[njoint]; - PrimitiveSTATUS_Afma4(NULL, NULL, NULL, NULL, NULL, axisInJoint, NULL); + PrimitiveSTATUS_Afma4(nullptr, nullptr, nullptr, nullptr, nullptr, axisInJoint, nullptr); for (unsigned int i = 0; i < njoint; i++) { if (axisInJoint[i]) std::cout << "\nWarning: Velocity control stopped: axis " << i + 1 << " on joint limit!" << std::endl; } - } else { + } + else { printf("\n%s(%d): Error %d", __FUNCTION__, TryLine, TryStt); - if (TryString != NULL) { + if (TryString != nullptr) { // The statement is in TryString, but we need to check the validity printf(" Error sentence %s\n", TryString); // Print the TryString - } else { + } + else { printf("\n"); } } @@ -1388,7 +1399,8 @@ void vpRobotAfma4::getVelocity(const vpRobot::vpControlFrameType frame, vpColVec break; } } - } else { + } + else { first_time_getvel = false; } @@ -1625,7 +1637,7 @@ bool vpRobotAfma4::savePosFile(const std::string &filename, const vpColVector &q FILE *fd; fd = fopen(filename.c_str(), "w"); - if (fd == NULL) + if (fd == nullptr) return false; fprintf(fd, "\ @@ -1726,7 +1738,8 @@ void vpRobotAfma4::getDisplacement(vpRobot::vpControlFrameType frame, vpColVecto return; } } - } else { + } + else { first_time_getdis = false; } @@ -1743,5 +1756,5 @@ void vpRobotAfma4::getDisplacement(vpRobot::vpControlFrameType frame, vpColVecto #elif !defined(VISP_BUILD_SHARED_LIBS) // Work around to avoid warning: libvisp_robot.a(vpRobotAfma4.cpp.o) has no // symbols -void dummy_vpRobotAfma4(){}; +void dummy_vpRobotAfma4() { }; #endif diff --git a/modules/robot/src/real-robot/afma4/vpServolens.cpp b/modules/robot/src/real-robot/afma4/vpServolens.cpp index 226746015d..f4a8bc9fa0 100644 --- a/modules/robot/src/real-robot/afma4/vpServolens.cpp +++ b/modules/robot/src/real-robot/afma4/vpServolens.cpp @@ -715,7 +715,7 @@ bool vpServolens::read(char *c, long timeout_s) const FD_ZERO(&readfds); FD_SET(static_cast(this->remfd), &readfds); - if (select(FD_SETSIZE, &readfds, (fd_set *)NULL, (fd_set *)NULL, &timeout) > 0) { + if (select(FD_SETSIZE, &readfds, (fd_set *)nullptr, (fd_set *)nullptr, &timeout) > 0) { ssize_t n = ::read(this->remfd, c, 1); /* read one character at a time */ if (n != 1) return false; diff --git a/modules/robot/src/real-robot/afma6/vpAfma6.cpp b/modules/robot/src/real-robot/afma6/vpAfma6.cpp index 2ead7b326c..600ee8e7cc 100644 --- a/modules/robot/src/real-robot/afma6/vpAfma6.cpp +++ b/modules/robot/src/real-robot/afma6/vpAfma6.cpp @@ -59,7 +59,7 @@ /* ---------------------------------------------------------------------- */ static const char *opt_Afma6[] = {"JOINT_MAX", "JOINT_MIN", "LONG_56", "COUPL_56", - "CAMERA", "eMc_ROT_XYZ", "eMc_TRANS_XYZ", NULL}; + "CAMERA", "eMc_ROT_XYZ", "eMc_TRANS_XYZ", nullptr}; #ifdef VISP_HAVE_AFMA6_DATA const std::string vpAfma6::CONST_AFMA6_FILENAME = @@ -1116,7 +1116,7 @@ void vpAfma6::parseConfigFile(const std::string &filename) std::string key; ss >> key; - for (code = 0; NULL != opt_Afma6[code]; ++code) { + for (code = 0; nullptr != opt_Afma6[code]; ++code) { if (key.compare(opt_Afma6[code]) == 0) { break; } diff --git a/modules/robot/src/real-robot/afma6/vpRobotAfma6.cpp b/modules/robot/src/real-robot/afma6/vpRobotAfma6.cpp index 24ed3560b5..80f1a4d1f9 100644 --- a/modules/robot/src/real-robot/afma6/vpRobotAfma6.cpp +++ b/modules/robot/src/real-robot/afma6/vpRobotAfma6.cpp @@ -250,7 +250,7 @@ void vpRobotAfma6::init(void) // Look if the power is on or off UInt32 HIPowerStatus; UInt32 EStopStatus; - Try(PrimitiveSTATUS_Afma6(NULL, NULL, &EStopStatus, NULL, NULL, NULL, &HIPowerStatus)); + Try(PrimitiveSTATUS_Afma6(nullptr, nullptr, &EStopStatus, nullptr, nullptr, nullptr, &HIPowerStatus)); CAL_Wait(0.1); // Print the robot status @@ -562,7 +562,7 @@ vpRobotAfma6::~vpRobotAfma6(void) // Look if the power is on or off UInt32 HIPowerStatus; - Try(PrimitiveSTATUS_Afma6(NULL, NULL, NULL, NULL, NULL, NULL, &HIPowerStatus)); + Try(PrimitiveSTATUS_Afma6(nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, &HIPowerStatus)); CAL_Wait(0.1); // if (HIPowerStatus == 1) { @@ -669,7 +669,7 @@ void vpRobotAfma6::powerOn(void) unsigned int nitermax = 10; for (unsigned int i = 0; i < nitermax; i++) { - Try(PrimitiveSTATUS_Afma6(NULL, NULL, &EStopStatus, NULL, NULL, NULL, &HIPowerStatus)); + Try(PrimitiveSTATUS_Afma6(nullptr, nullptr, &EStopStatus, nullptr, nullptr, nullptr, &HIPowerStatus)); if (EStopStatus == ESTOP_AUTO) { break; // exit for loop } else if (EStopStatus == ESTOP_MANUAL) { @@ -731,7 +731,7 @@ void vpRobotAfma6::powerOff(void) // Look if the power is on or off UInt32 HIPowerStatus; - Try(PrimitiveSTATUS_Afma6(NULL, NULL, NULL, NULL, NULL, NULL, &HIPowerStatus)); + Try(PrimitiveSTATUS_Afma6(nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, &HIPowerStatus)); CAL_Wait(0.1); if (HIPowerStatus == 1) { @@ -765,7 +765,7 @@ bool vpRobotAfma6::getPowerState(void) bool status = false; // Look if the power is on or off UInt32 HIPowerStatus; - Try(PrimitiveSTATUS_Afma6(NULL, NULL, NULL, NULL, NULL, NULL, &HIPowerStatus)); + Try(PrimitiveSTATUS_Afma6(nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, &HIPowerStatus)); CAL_Wait(0.1); if (HIPowerStatus == 1) { @@ -1669,14 +1669,14 @@ void vpRobotAfma6::setVelocity(const vpRobot::vpControlFrameType frame, const vp if (TryStt < 0) { if (TryStt == VelStopOnJoint) { Int32 axisInJoint[njoint]; - PrimitiveSTATUS_Afma6(NULL, NULL, NULL, NULL, NULL, axisInJoint, NULL); + PrimitiveSTATUS_Afma6(nullptr, nullptr, nullptr, nullptr, nullptr, axisInJoint, nullptr); for (unsigned int i = 0; i < njoint; i++) { if (axisInJoint[i]) std::cout << "\nWarning: Velocity control stopped: axis " << i + 1 << " on joint limit!" << std::endl; } } else { printf("\n%s(%d): Error %d", __FUNCTION__, TryLine, TryStt); - if (TryString != NULL) { + if (TryString != nullptr) { // The statement is in TryString, but we need to check the validity printf(" Error sentence %s\n", TryString); // Print the TryString } else { @@ -2061,7 +2061,7 @@ bool vpRobotAfma6::savePosFile(const std::string &filename, const vpColVector &q FILE *fd; fd = fopen(filename.c_str(), "w"); - if (fd == NULL) + if (fd == nullptr) return false; fprintf(fd, "\ @@ -2273,7 +2273,7 @@ bool vpRobotAfma6::checkJointLimits(vpColVector &jointsStatus) jointsStatus.resize(6); InitTry; - Try(PrimitiveSTATUS_Afma6(NULL, NULL, NULL, NULL, NULL, axisInJoint, NULL)); + Try(PrimitiveSTATUS_Afma6(nullptr, nullptr, nullptr, nullptr, nullptr, axisInJoint, nullptr)); for (unsigned int i = 0; i < njoint; i++) { if (axisInJoint[i]) { std::cout << "\nWarning: Velocity control stopped: axis " << i + 1 << " on joint limit!" << std::endl; diff --git a/modules/robot/src/real-robot/bebop2/vpRobotBebop2.cpp b/modules/robot/src/real-robot/bebop2/vpRobotBebop2.cpp index 73ccb914a1..45d99cb07c 100644 --- a/modules/robot/src/real-robot/bebop2/vpRobotBebop2.cpp +++ b/modules/robot/src/real-robot/bebop2/vpRobotBebop2.cpp @@ -73,7 +73,7 @@ extern "C" { */ bool vpRobotBebop2::m_running = false; -ARCONTROLLER_Device_t *vpRobotBebop2::m_deviceController = NULL; +ARCONTROLLER_Device_t *vpRobotBebop2::m_deviceController = nullptr; /*! @@ -123,12 +123,12 @@ vpRobotBebop2::vpRobotBebop2(bool verbose, bool setDefaultSettings, std::string sigaction(SIGQUIT, &m_sigAct, 0); #ifdef VISP_HAVE_FFMPEG - m_codecContext = NULL; + m_codecContext = nullptr; m_packet = AVPacket(); - m_picture = NULL; - m_bgr_picture = NULL; - m_img_convert_ctx = NULL; - m_buffer = NULL; + m_picture = nullptr; + m_bgr_picture = nullptr; + m_img_convert_ctx = nullptr; + m_buffer = nullptr; m_videoDecodingStarted = false; #endif @@ -220,7 +220,7 @@ vpRobotBebop2::~vpRobotBebop2() { cleanUp(); } */ void vpRobotBebop2::doFlatTrim() { - if (isRunning() && m_deviceController != NULL && isLanded()) { + if (isRunning() && m_deviceController != nullptr && isLanded()) { m_flatTrimFinished = false; @@ -317,7 +317,7 @@ double vpRobotBebop2::getMaxCameraPan() const { return m_maxCameraPan; } */ void vpRobotBebop2::setCameraOrientation(double tilt, double pan, bool blocking) { - if (isRunning() && m_deviceController != NULL) { + if (isRunning() && m_deviceController != nullptr) { m_deviceController->aRDrone3->sendCameraOrientationV2(m_deviceController->aRDrone3, static_cast(tilt), static_cast(pan)); @@ -346,7 +346,7 @@ void vpRobotBebop2::setCameraOrientation(double tilt, double pan, bool blocking) */ void vpRobotBebop2::setCameraTilt(double tilt, bool blocking) { - if (isRunning() && m_deviceController != NULL) { + if (isRunning() && m_deviceController != nullptr) { m_deviceController->aRDrone3->sendCameraOrientationV2(m_deviceController->aRDrone3, static_cast(tilt), static_cast(getCurrentCameraPan())); @@ -375,7 +375,7 @@ void vpRobotBebop2::setCameraTilt(double tilt, bool blocking) */ void vpRobotBebop2::setCameraPan(double pan, bool blocking) { - if (isRunning() && m_deviceController != NULL) { + if (isRunning() && m_deviceController != nullptr) { m_deviceController->aRDrone3->sendCameraOrientationV2( m_deviceController->aRDrone3, static_cast(getCurrentCameraTilt()), static_cast(pan)); @@ -397,7 +397,7 @@ void vpRobotBebop2::setCameraPan(double pan, bool blocking) */ bool vpRobotBebop2::isRunning() { - if (m_deviceController == NULL) { + if (m_deviceController == nullptr) { return false; } else { return m_running; @@ -453,7 +453,7 @@ bool vpRobotBebop2::isLanded() */ void vpRobotBebop2::takeOff(bool blocking) { - if (isRunning() && isLanded() && m_deviceController != NULL) { + if (isRunning() && isLanded() && m_deviceController != nullptr) { m_deviceController->aRDrone3->sendPilotingTakeOff(m_deviceController->aRDrone3); @@ -476,7 +476,7 @@ void vpRobotBebop2::takeOff(bool blocking) */ void vpRobotBebop2::land() { - if (m_deviceController != NULL) { + if (m_deviceController != nullptr) { m_deviceController->aRDrone3->sendPilotingLanding(m_deviceController->aRDrone3); } } @@ -493,7 +493,7 @@ void vpRobotBebop2::land() */ void vpRobotBebop2::setVerticalSpeed(int value) { - if (isRunning() && m_deviceController != NULL && (isFlying() || isHovering())) { + if (isRunning() && m_deviceController != nullptr && (isFlying() || isHovering())) { m_errorController = m_deviceController->aRDrone3->setPilotingPCMDGaz(m_deviceController->aRDrone3, static_cast(value)); @@ -519,7 +519,7 @@ void vpRobotBebop2::setVerticalSpeed(int value) */ void vpRobotBebop2::setYawSpeed(int value) { - if (isRunning() && m_deviceController != NULL && (isFlying() || isHovering())) { + if (isRunning() && m_deviceController != nullptr && (isFlying() || isHovering())) { m_errorController = m_deviceController->aRDrone3->setPilotingPCMDYaw(m_deviceController->aRDrone3, static_cast(value)); @@ -546,7 +546,7 @@ void vpRobotBebop2::setYawSpeed(int value) */ void vpRobotBebop2::setPitch(int value) { - if (isRunning() && m_deviceController != NULL && (isFlying() || isHovering())) { + if (isRunning() && m_deviceController != nullptr && (isFlying() || isHovering())) { m_errorController = m_deviceController->aRDrone3->setPilotingPCMDPitch(m_deviceController->aRDrone3, static_cast(value)); @@ -574,7 +574,7 @@ void vpRobotBebop2::setPitch(int value) */ void vpRobotBebop2::setRoll(int value) { - if (isRunning() && m_deviceController != NULL && (isFlying() || isHovering())) { + if (isRunning() && m_deviceController != nullptr && (isFlying() || isHovering())) { m_errorController = m_deviceController->aRDrone3->setPilotingPCMDRoll(m_deviceController->aRDrone3, static_cast(value)); @@ -598,7 +598,7 @@ void vpRobotBebop2::setRoll(int value) */ void vpRobotBebop2::cutMotors() { - if (m_deviceController != NULL) { + if (m_deviceController != nullptr) { m_errorController = m_deviceController->aRDrone3->sendPilotingEmergency(m_deviceController->aRDrone3); } } @@ -620,7 +620,7 @@ void vpRobotBebop2::cutMotors() */ void vpRobotBebop2::setPosition(float dX, float dY, float dZ, float dPsi, bool blocking) { - if (isRunning() && m_deviceController != NULL && (isFlying() || isHovering())) { + if (isRunning() && m_deviceController != nullptr && (isFlying() || isHovering())) { m_relativeMoveEnded = false; m_deviceController->aRDrone3->sendPilotingMoveBy(m_deviceController->aRDrone3, dX, dY, dZ, dPsi); @@ -721,7 +721,7 @@ void vpRobotBebop2::setVerbose(bool verbose) */ void vpRobotBebop2::resetAllSettings() { - if (isRunning() && m_deviceController != NULL) { + if (isRunning() && m_deviceController != nullptr) { m_settingsReset = false; m_deviceController->common->sendSettingsReset(m_deviceController->common); @@ -748,7 +748,7 @@ void vpRobotBebop2::resetAllSettings() */ void vpRobotBebop2::setMaxTilt(double maxTilt) { - if (isRunning() && m_deviceController != NULL) { + if (isRunning() && m_deviceController != nullptr) { m_deviceController->aRDrone3->sendPilotingSettingsMaxTilt(m_deviceController->aRDrone3, static_cast(maxTilt)); } else { @@ -765,7 +765,7 @@ void vpRobotBebop2::setMaxTilt(double maxTilt) */ void vpRobotBebop2::stopMoving() { - if (isRunning() && !isLanded() && m_deviceController != NULL) { + if (isRunning() && !isLanded() && m_deviceController != nullptr) { m_errorController = m_deviceController->aRDrone3->setPilotingPCMD(m_deviceController->aRDrone3, 0, 0, 0, 0, 0, 0); } } @@ -788,7 +788,7 @@ void vpRobotBebop2::getGrayscaleImage(vpImage &I) { if (m_videoDecodingStarted) { - if (m_bgr_picture->data[0] != NULL) { + if (m_bgr_picture->data[0] != nullptr) { I.resize(static_cast(m_videoHeight), static_cast(m_videoWidth)); m_bgr_picture_mutex.lock(); @@ -796,7 +796,7 @@ void vpRobotBebop2::getGrayscaleImage(vpImage &I) I.getHeight()); m_bgr_picture_mutex.unlock(); } else { - ARSAL_PRINT(ARSAL_PRINT_ERROR, "ERROR", "Error while getting current grayscale image : image data is NULL"); + ARSAL_PRINT(ARSAL_PRINT_ERROR, "ERROR", "Error while getting current grayscale image : image data is nullptr"); } } else { @@ -816,7 +816,7 @@ void vpRobotBebop2::getRGBaImage(vpImage &I) { if (m_videoDecodingStarted) { - if (m_bgr_picture->data[0] != NULL) { + if (m_bgr_picture->data[0] != nullptr) { I.resize(static_cast(m_videoHeight), static_cast(m_videoWidth)); m_bgr_picture_mutex.lock(); @@ -824,7 +824,7 @@ void vpRobotBebop2::getRGBaImage(vpImage &I) I.getHeight()); m_bgr_picture_mutex.unlock(); } else { - ARSAL_PRINT(ARSAL_PRINT_ERROR, "ERROR", "Error while getting current RGBa image : image data is NULL"); + ARSAL_PRINT(ARSAL_PRINT_ERROR, "ERROR", "Error while getting current RGBa image : image data is nullptr"); } } else { @@ -855,7 +855,7 @@ int vpRobotBebop2::getVideoWidth() { return m_videoWidth; } */ void vpRobotBebop2::setExposure(float expo) { - if (isRunning() && m_deviceController != NULL) { + if (isRunning() && m_deviceController != nullptr) { expo = std::min(1.5f, std::max(-1.5f, expo)); m_exposureSet = false; @@ -886,7 +886,7 @@ void vpRobotBebop2::setExposure(float expo) */ void vpRobotBebop2::setStreamingMode(int mode) { - if (isRunning() && m_deviceController != NULL) { + if (isRunning() && m_deviceController != nullptr) { if (!isStreaming() && isLanded()) { eARCOMMANDS_ARDRONE3_MEDIASTREAMING_VIDEOSTREAMMODE_MODE cmd_mode = @@ -935,7 +935,7 @@ void vpRobotBebop2::setStreamingMode(int mode) */ void vpRobotBebop2::setVideoResolution(int mode) { - if (isRunning() && m_deviceController != NULL) { + if (isRunning() && m_deviceController != nullptr) { if (!isStreaming() && isLanded()) { @@ -988,7 +988,7 @@ void vpRobotBebop2::setVideoResolution(int mode) */ void vpRobotBebop2::setVideoStabilisationMode(int mode) { - if (isRunning() && m_deviceController != NULL) { + if (isRunning() && m_deviceController != nullptr) { eARCOMMANDS_ARDRONE3_PICTURESETTINGS_VIDEOSTABILIZATIONMODE_MODE cmd_mode = ARCOMMANDS_ARDRONE3_PICTURESETTINGS_VIDEOSTABILIZATIONMODE_MODE_NONE; @@ -1027,7 +1027,7 @@ void vpRobotBebop2::setVideoStabilisationMode(int mode) */ void vpRobotBebop2::startStreaming() { - if (isRunning() && m_deviceController != NULL) { + if (isRunning() && m_deviceController != nullptr) { ARSAL_PRINT(ARSAL_PRINT_INFO, TAG, "- Starting video streaming ... "); // Sending command to the drone to start the video stream @@ -1062,7 +1062,7 @@ void vpRobotBebop2::startStreaming() */ void vpRobotBebop2::stopStreaming() { - if (m_videoDecodingStarted && m_deviceController != NULL) { + if (m_videoDecodingStarted && m_deviceController != nullptr) { ARSAL_PRINT(ARSAL_PRINT_INFO, TAG, "- Stopping video streaming ... "); // Sending command to the drone to stop the video stream @@ -1122,7 +1122,7 @@ void vpRobotBebop2::sighandler(int signo) vpRobotBebop2::m_running = false; // Landing the drone - if (m_deviceController != NULL) { + if (m_deviceController != nullptr) { m_deviceController->aRDrone3->sendPilotingLanding(m_deviceController->aRDrone3); } std::exit(EXIT_FAILURE); @@ -1134,7 +1134,7 @@ void vpRobotBebop2::sighandler(int signo) */ eARCOMMANDS_ARDRONE3_PILOTINGSTATE_FLYINGSTATECHANGED_STATE vpRobotBebop2::getFlyingState() { - if (m_deviceController != NULL) { + if (m_deviceController != nullptr) { eARCOMMANDS_ARDRONE3_PILOTINGSTATE_FLYINGSTATECHANGED_STATE flyingState = ARCOMMANDS_ARDRONE3_PILOTINGSTATE_FLYINGSTATECHANGED_STATE_MAX; eARCONTROLLER_ERROR error; @@ -1142,19 +1142,19 @@ eARCOMMANDS_ARDRONE3_PILOTINGSTATE_FLYINGSTATECHANGED_STATE vpRobotBebop2::getFl ARCONTROLLER_DICTIONARY_ELEMENT_t *elementDictionary = ARCONTROLLER_ARDrone3_GetCommandElements( m_deviceController->aRDrone3, ARCONTROLLER_DICTIONARY_KEY_ARDRONE3_PILOTINGSTATE_FLYINGSTATECHANGED, &error); - if (error == ARCONTROLLER_OK && elementDictionary != NULL) { - ARCONTROLLER_DICTIONARY_ARG_t *arg = NULL; - ARCONTROLLER_DICTIONARY_ELEMENT_t *element = NULL; + if (error == ARCONTROLLER_OK && elementDictionary != nullptr) { + ARCONTROLLER_DICTIONARY_ARG_t *arg = nullptr; + ARCONTROLLER_DICTIONARY_ELEMENT_t *element = nullptr; HASH_FIND_STR(elementDictionary, ARCONTROLLER_DICTIONARY_SINGLE_KEY, element); - if (element != NULL) { + if (element != nullptr) { // Suppress warnings // Get the value HASH_FIND_STR(element->arguments, ARCONTROLLER_DICTIONARY_KEY_ARDRONE3_PILOTINGSTATE_FLYINGSTATECHANGED_STATE, arg); - if (arg != NULL) { + if (arg != nullptr) { // Enums are stored as I32 flyingState = static_cast(arg->value.I32); } @@ -1173,7 +1173,7 @@ eARCOMMANDS_ARDRONE3_PILOTINGSTATE_FLYINGSTATECHANGED_STATE vpRobotBebop2::getFl */ eARCOMMANDS_ARDRONE3_MEDIASTREAMINGSTATE_VIDEOENABLECHANGED_ENABLED vpRobotBebop2::getStreamingState() { - if (m_deviceController != NULL) { + if (m_deviceController != nullptr) { eARCOMMANDS_ARDRONE3_MEDIASTREAMINGSTATE_VIDEOENABLECHANGED_ENABLED streamingState = ARCOMMANDS_ARDRONE3_MEDIASTREAMINGSTATE_VIDEOENABLECHANGED_ENABLED_MAX; eARCONTROLLER_ERROR error; @@ -1182,18 +1182,18 @@ eARCOMMANDS_ARDRONE3_MEDIASTREAMINGSTATE_VIDEOENABLECHANGED_ENABLED vpRobotBebop m_deviceController->aRDrone3, ARCONTROLLER_DICTIONARY_KEY_ARDRONE3_MEDIASTREAMINGSTATE_VIDEOENABLECHANGED, &error); - if (error == ARCONTROLLER_OK && elementDictionary != NULL) { - ARCONTROLLER_DICTIONARY_ARG_t *arg = NULL; - ARCONTROLLER_DICTIONARY_ELEMENT_t *element = NULL; + if (error == ARCONTROLLER_OK && elementDictionary != nullptr) { + ARCONTROLLER_DICTIONARY_ARG_t *arg = nullptr; + ARCONTROLLER_DICTIONARY_ELEMENT_t *element = nullptr; HASH_FIND_STR(elementDictionary, ARCONTROLLER_DICTIONARY_SINGLE_KEY, element); - if (element != NULL) { + if (element != nullptr) { // Get the value HASH_FIND_STR(element->arguments, ARCONTROLLER_DICTIONARY_KEY_ARDRONE3_MEDIASTREAMINGSTATE_VIDEOENABLECHANGED_ENABLED, arg); - if (arg != NULL) { + if (arg != nullptr) { // Enums are stored as I32 streamingState = static_cast(arg->value.I32); @@ -1267,7 +1267,7 @@ void vpRobotBebop2::setupCallbacks() #ifdef VISP_HAVE_FFMPEG // Adding frame received callback, called when a streaming frame has been received from the device m_errorController = ARCONTROLLER_Device_SetVideoStreamCallbacks(m_deviceController, decoderConfigCallback, - didReceiveFrameCallback, NULL, this); + didReceiveFrameCallback, nullptr, this); if (m_errorController != ARCONTROLLER_OK) { ARSAL_PRINT(ARSAL_PRINT_ERROR, TAG, "- error: %s", ARCONTROLLER_Error_ToString(m_errorController)); @@ -1348,7 +1348,7 @@ void vpRobotBebop2::initCodec() m_codecContext->flags2 |= AV_CODEC_FLAG2_CHUNKS; // Opens the codec - if (avcodec_open2(m_codecContext, codec, NULL) < 0) { + if (avcodec_open2(m_codecContext, codec, nullptr) < 0) { ARSAL_PRINT(ARSAL_PRINT_ERROR, TAG, "Failed to open codec."); return; } @@ -1365,8 +1365,8 @@ void vpRobotBebop2::initCodec() m_bgr_picture_mutex.unlock(); m_img_convert_ctx = sws_getContext(m_codecContext->width, m_codecContext->height, m_codecContext->pix_fmt, - m_codecContext->width, m_codecContext->height, pFormat, SWS_BICUBIC, NULL, NULL, - NULL); // Used to rescale frame received from the decoder + m_codecContext->width, m_codecContext->height, pFormat, SWS_BICUBIC, nullptr, nullptr, + nullptr); // Used to rescale frame received from the decoder } /*! @@ -1524,7 +1524,7 @@ void vpRobotBebop2::computeFrame(ARCONTROLLER_Frame_t *frame) */ void vpRobotBebop2::cleanUp() { - if (m_deviceController != NULL) { + if (m_deviceController != nullptr) { // Lands the drone if not landed land(); @@ -1646,14 +1646,14 @@ eARCONTROLLER_ERROR vpRobotBebop2::didReceiveFrameCallback(ARCONTROLLER_Frame_t { vpRobotBebop2 *drone = static_cast(customData); - if (frame != NULL) { + if (frame != nullptr) { if (drone->m_videoDecodingStarted) { drone->computeFrame(frame); } } else { - ARSAL_PRINT(ARSAL_PRINT_WARNING, TAG, "frame is NULL."); + ARSAL_PRINT(ARSAL_PRINT_WARNING, TAG, "frame is nullptr."); } return ARCONTROLLER_OK; @@ -1672,19 +1672,19 @@ eARCONTROLLER_ERROR vpRobotBebop2::didReceiveFrameCallback(ARCONTROLLER_Frame_t void vpRobotBebop2::cmdBatteryStateChangedRcv(ARCONTROLLER_DICTIONARY_ELEMENT_t *elementDictionary, vpRobotBebop2 *drone) { - ARCONTROLLER_DICTIONARY_ARG_t *arg = NULL; - ARCONTROLLER_DICTIONARY_ELEMENT_t *singleElement = NULL; + ARCONTROLLER_DICTIONARY_ARG_t *arg = nullptr; + ARCONTROLLER_DICTIONARY_ELEMENT_t *singleElement = nullptr; - if (elementDictionary == NULL) { - ARSAL_PRINT(ARSAL_PRINT_ERROR, TAG, "elements is NULL"); + if (elementDictionary == nullptr) { + ARSAL_PRINT(ARSAL_PRINT_ERROR, TAG, "elements is nullptr"); return; } // Get the command received in the device controller HASH_FIND_STR(elementDictionary, ARCONTROLLER_DICTIONARY_SINGLE_KEY, singleElement); - if (singleElement == NULL) { - ARSAL_PRINT(ARSAL_PRINT_ERROR, TAG, "singleElement is NULL"); + if (singleElement == nullptr) { + ARSAL_PRINT(ARSAL_PRINT_ERROR, TAG, "singleElement is nullptr"); return; } @@ -1692,8 +1692,8 @@ void vpRobotBebop2::cmdBatteryStateChangedRcv(ARCONTROLLER_DICTIONARY_ELEMENT_t HASH_FIND_STR(singleElement->arguments, ARCONTROLLER_DICTIONARY_KEY_COMMON_COMMONSTATE_BATTERYSTATECHANGED_PERCENT, arg); - if (arg == NULL) { - ARSAL_PRINT(ARSAL_PRINT_ERROR, TAG, "arg is NULL"); + if (arg == nullptr) { + ARSAL_PRINT(ARSAL_PRINT_ERROR, TAG, "arg is nullptr"); return; } drone->m_batteryLevel = arg->value.U8; @@ -1719,18 +1719,18 @@ void vpRobotBebop2::cmdBatteryStateChangedRcv(ARCONTROLLER_DICTIONARY_ELEMENT_t void vpRobotBebop2::cmdCameraOrientationChangedRcv(ARCONTROLLER_DICTIONARY_ELEMENT_t *elementDictionary, vpRobotBebop2 *drone) { - ARCONTROLLER_DICTIONARY_ARG_t *arg = NULL; - ARCONTROLLER_DICTIONARY_ELEMENT_t *element = NULL; + ARCONTROLLER_DICTIONARY_ARG_t *arg = nullptr; + ARCONTROLLER_DICTIONARY_ELEMENT_t *element = nullptr; HASH_FIND_STR(elementDictionary, ARCONTROLLER_DICTIONARY_SINGLE_KEY, element); - if (element != NULL) { + if (element != nullptr) { HASH_FIND_STR(element->arguments, ARCONTROLLER_DICTIONARY_KEY_ARDRONE3_CAMERASTATE_ORIENTATIONV2_TILT, arg); - if (arg != NULL) { + if (arg != nullptr) { drone->m_currentCameraTilt = static_cast(arg->value.Float); } HASH_FIND_STR(element->arguments, ARCONTROLLER_DICTIONARY_KEY_ARDRONE3_CAMERASTATE_ORIENTATIONV2_PAN, arg); - if (arg != NULL) { + if (arg != nullptr) { drone->m_currentCameraPan = static_cast(arg->value.Float); } } @@ -1749,41 +1749,41 @@ void vpRobotBebop2::cmdCameraOrientationChangedRcv(ARCONTROLLER_DICTIONARY_ELEME */ void vpRobotBebop2::cmdCameraSettingsRcv(ARCONTROLLER_DICTIONARY_ELEMENT_t *elementDictionary, vpRobotBebop2 *drone) { - ARCONTROLLER_DICTIONARY_ARG_t *arg = NULL; - ARCONTROLLER_DICTIONARY_ELEMENT_t *element = NULL; + ARCONTROLLER_DICTIONARY_ARG_t *arg = nullptr; + ARCONTROLLER_DICTIONARY_ELEMENT_t *element = nullptr; HASH_FIND_STR(elementDictionary, ARCONTROLLER_DICTIONARY_SINGLE_KEY, element); - if (element != NULL) { + if (element != nullptr) { HASH_FIND_STR(element->arguments, ARCONTROLLER_DICTIONARY_KEY_COMMON_CAMERASETTINGSSTATE_CAMERASETTINGSCHANGED_FOV, arg); - if (arg != NULL) { + if (arg != nullptr) { drone->m_cameraHorizontalFOV = static_cast(arg->value.Float); ARSAL_PRINT(ARSAL_PRINT_INFO, TAG, " - Camera horizontal FOV : %f degrees.", static_cast(drone->m_cameraHorizontalFOV)); } HASH_FIND_STR(element->arguments, ARCONTROLLER_DICTIONARY_KEY_COMMON_CAMERASETTINGSSTATE_CAMERASETTINGSCHANGED_PANMAX, arg); - if (arg != NULL) { + if (arg != nullptr) { drone->m_maxCameraPan = static_cast(arg->value.Float); ARSAL_PRINT(ARSAL_PRINT_INFO, TAG, " - Max camera pan : %f degrees.", static_cast(drone->m_maxCameraPan)); } HASH_FIND_STR(element->arguments, ARCONTROLLER_DICTIONARY_KEY_COMMON_CAMERASETTINGSSTATE_CAMERASETTINGSCHANGED_PANMIN, arg); - if (arg != NULL) { + if (arg != nullptr) { drone->m_minCameraPan = static_cast(arg->value.Float); ARSAL_PRINT(ARSAL_PRINT_INFO, TAG, " - Min camera pan : %f degrees.", static_cast(drone->m_minCameraPan)); } HASH_FIND_STR(element->arguments, ARCONTROLLER_DICTIONARY_KEY_COMMON_CAMERASETTINGSSTATE_CAMERASETTINGSCHANGED_TILTMAX, arg); - if (arg != NULL) { + if (arg != nullptr) { drone->m_maxCameraTilt = static_cast(arg->value.Float); ARSAL_PRINT(ARSAL_PRINT_INFO, TAG, " - Max camera tilt : %f degrees.", static_cast(drone->m_maxCameraTilt)); } HASH_FIND_STR(element->arguments, ARCONTROLLER_DICTIONARY_KEY_COMMON_CAMERASETTINGSSTATE_CAMERASETTINGSCHANGED_TILTMIN, arg); - if (arg != NULL) { + if (arg != nullptr) { drone->m_minCameraTilt = static_cast(arg->value.Float); ARSAL_PRINT(ARSAL_PRINT_INFO, TAG, " - Min camera tilt : %f degrees.", static_cast(drone->m_minCameraTilt)); @@ -1804,14 +1804,14 @@ void vpRobotBebop2::cmdCameraSettingsRcv(ARCONTROLLER_DICTIONARY_ELEMENT_t *elem void vpRobotBebop2::cmdMaxPitchRollChangedRcv(ARCONTROLLER_DICTIONARY_ELEMENT_t *elementDictionary, vpRobotBebop2 *drone) { - ARCONTROLLER_DICTIONARY_ARG_t *arg = NULL; - ARCONTROLLER_DICTIONARY_ELEMENT_t *element = NULL; + ARCONTROLLER_DICTIONARY_ARG_t *arg = nullptr; + ARCONTROLLER_DICTIONARY_ELEMENT_t *element = nullptr; HASH_FIND_STR(elementDictionary, ARCONTROLLER_DICTIONARY_SINGLE_KEY, element); - if (element != NULL) { + if (element != nullptr) { HASH_FIND_STR(element->arguments, ARCONTROLLER_DICTIONARY_KEY_ARDRONE3_PILOTINGSETTINGSSTATE_MAXTILTCHANGED_CURRENT, arg); - if (arg != NULL) { + if (arg != nullptr) { drone->m_maxTilt = static_cast(arg->value.Float); } } @@ -1829,15 +1829,15 @@ void vpRobotBebop2::cmdMaxPitchRollChangedRcv(ARCONTROLLER_DICTIONARY_ELEMENT_t */ void vpRobotBebop2::cmdRelativeMoveEndedRcv(ARCONTROLLER_DICTIONARY_ELEMENT_t *elementDictionary, vpRobotBebop2 *drone) { - ARCONTROLLER_DICTIONARY_ARG_t *arg = NULL; - ARCONTROLLER_DICTIONARY_ELEMENT_t *element = NULL; + ARCONTROLLER_DICTIONARY_ARG_t *arg = nullptr; + ARCONTROLLER_DICTIONARY_ELEMENT_t *element = nullptr; HASH_FIND_STR(elementDictionary, ARCONTROLLER_DICTIONARY_SINGLE_KEY, element); - if (element != NULL) { + if (element != nullptr) { HASH_FIND_STR(element->arguments, ARCONTROLLER_DICTIONARY_KEY_ARDRONE3_PILOTINGEVENT_MOVEBYEND_ERROR, arg); - if (arg != NULL) { + if (arg != nullptr) { eARCOMMANDS_ARDRONE3_PILOTINGEVENT_MOVEBYEND_ERROR error = static_cast(arg->value.I32); if ((error != ARCOMMANDS_ARDRONE3_PILOTINGEVENT_MOVEBYEND_ERROR_OK) && @@ -1861,17 +1861,17 @@ void vpRobotBebop2::cmdRelativeMoveEndedRcv(ARCONTROLLER_DICTIONARY_ELEMENT_t *e */ void vpRobotBebop2::cmdExposureSetRcv(ARCONTROLLER_DICTIONARY_ELEMENT_t *elementDictionary, vpRobotBebop2 *drone) { - ARCONTROLLER_DICTIONARY_ARG_t *arg = NULL; - ARCONTROLLER_DICTIONARY_ELEMENT_t *element = NULL; + ARCONTROLLER_DICTIONARY_ARG_t *arg = nullptr; + ARCONTROLLER_DICTIONARY_ELEMENT_t *element = nullptr; HASH_FIND_STR(elementDictionary, ARCONTROLLER_DICTIONARY_SINGLE_KEY, element); - if (element != NULL) { + if (element != nullptr) { HASH_FIND_STR(element->arguments, ARCONTROLLER_DICTIONARY_KEY_ARDRONE3_PICTURESETTINGSSTATE_EXPOSITIONCHANGED_VALUE, arg); - if (arg != NULL) { + if (arg != nullptr) { drone->m_exposureSet = true; } } @@ -1891,7 +1891,7 @@ void vpRobotBebop2::commandReceivedCallback(eARCONTROLLER_DICTIONARY_KEY command { vpRobotBebop2 *drone = static_cast(customData); - if (drone == NULL) + if (drone == nullptr) return; switch (commandKey) { diff --git a/modules/robot/src/real-robot/biclops/private/vpRobotBiclopsController_impl.cpp b/modules/robot/src/real-robot/biclops/private/vpRobotBiclopsController_impl.cpp index 957f688d08..bf9231b1ac 100644 --- a/modules/robot/src/real-robot/biclops/private/vpRobotBiclopsController_impl.cpp +++ b/modules/robot/src/real-robot/biclops/private/vpRobotBiclopsController_impl.cpp @@ -53,7 +53,7 @@ #include vpRobotBiclops::vpRobotBiclopsController::vpRobotBiclopsController() - : m_biclops(), m_axisMask(0), m_panAxis(NULL), m_tiltAxis(NULL), m_vergeAxis(NULL), m_panProfile(), m_tiltProfile(), + : m_biclops(), m_axisMask(0), m_panAxis(nullptr), m_tiltAxis(nullptr), m_vergeAxis(nullptr), m_panProfile(), m_tiltProfile(), m_vergeProfile(), m_shm(), m_stopControllerThread(false) { m_axisMask = Biclops::PanMask + Biclops::TiltMask; //+ Biclops::VergeMask*/; // add this if you want verge. diff --git a/modules/robot/src/real-robot/biclops/private/vpRobotBiclopsController_impl.h b/modules/robot/src/real-robot/biclops/private/vpRobotBiclopsController_impl.h index e44bf2296e..abdcff0d92 100644 --- a/modules/robot/src/real-robot/biclops/private/vpRobotBiclopsController_impl.h +++ b/modules/robot/src/real-robot/biclops/private/vpRobotBiclopsController_impl.h @@ -96,8 +96,8 @@ class VISP_EXPORT vpRobotBiclops::vpRobotBiclopsController // private: //#ifndef DOXYGEN_SHOULD_SKIP_THIS // vpRobotBiclopsController(const vpRobotBiclopsController &) - // : m_biclops(), m_axisMask(0), m_panAxis(NULL), m_tiltAxis(NULL), - // m_vergeAxis(NULL), + // : m_biclops(), m_axisMask(0), m_panAxis(nullptr), m_tiltAxis(nullptr), + // m_vergeAxis(nullptr), // m_panProfile(), m_tiltProfile(), m_vergeProfile(), m_shm(), // m_stopControllerThread(false) // { diff --git a/modules/robot/src/real-robot/biclops/vpRobotBiclops.cpp b/modules/robot/src/real-robot/biclops/vpRobotBiclops.cpp index 288e6ec518..893258a983 100644 --- a/modules/robot/src/real-robot/biclops/vpRobotBiclops.cpp +++ b/modules/robot/src/real-robot/biclops/vpRobotBiclops.cpp @@ -116,7 +116,7 @@ void vpRobotBiclops::init() { // test if the config file exists FILE *fd = fopen(m_configfile.c_str(), "r"); - if (fd == NULL) { + if (fd == nullptr) { vpCERROR << "Cannot open Biclops config file: " << m_configfile << std::endl; throw vpRobotException(vpRobotException::constructionError, "Cannot open connection with biclops"); } diff --git a/modules/robot/src/real-robot/flir-ptu/vpRobotFlirPtu.cpp b/modules/robot/src/real-robot/flir-ptu/vpRobotFlirPtu.cpp index 1122542422..d427549854 100644 --- a/modules/robot/src/real-robot/flir-ptu/vpRobotFlirPtu.cpp +++ b/modules/robot/src/real-robot/flir-ptu/vpRobotFlirPtu.cpp @@ -107,7 +107,7 @@ void vpRobotFlirPtu::init() Default constructor. */ vpRobotFlirPtu::vpRobotFlirPtu() - : m_eMc(), m_cer(NULL), m_status(0), m_pos_max_tics(2), m_pos_min_tics(2), m_vel_max_tics(2), m_res(2), + : m_eMc(), m_cer(nullptr), m_status(0), m_pos_max_tics(2), m_pos_min_tics(2), m_vel_max_tics(2), m_res(2), m_connected(false), m_njoints(2), m_positioning_velocity(20.) { signal(SIGINT, vpRobotFlirPtu::emergencyStop); @@ -499,8 +499,8 @@ void vpRobotFlirPtu::setPosition(const vpRobot::vpControlFrameType frame, const vpMath::deg(q[1]))); } - if (cpi_block_until(m_cer, NULL, NULL, OP_PAN_CURRENT_POS_GET, pos_tics[0]) || - cpi_block_until(m_cer, NULL, NULL, OP_TILT_CURRENT_POS_GET, pos_tics[1])) { + if (cpi_block_until(m_cer, nullptr, nullptr, OP_PAN_CURRENT_POS_GET, pos_tics[0]) || + cpi_block_until(m_cer, nullptr, nullptr, OP_TILT_CURRENT_POS_GET, pos_tics[1])) { disconnect(); throw(vpException(vpException::fatalError, "FLIR PTU failed to wait until position %d, %d reached (deg)", vpMath::deg(q[0]), vpMath::deg(q[1]))); @@ -538,7 +538,7 @@ void vpRobotFlirPtu::connect(const std::string &portname, int baudrate) throw(vpException(vpException::fatalError, "Port name is required to connect to FLIR PTU.")); } - if ((m_cer = (struct cerial *)malloc(sizeof(struct cerial))) == NULL) { + if ((m_cer = (struct cerial *)malloc(sizeof(struct cerial))) == nullptr) { disconnect(); throw(vpException(vpException::fatalError, "Out of memory during FLIR PTU connection.")); } @@ -559,7 +559,7 @@ void vpRobotFlirPtu::connect(const std::string &portname, int baudrate) cerioctl(m_cer, CERIAL_IOCTL_BAUDRATE_SET, &baudrate); // Flush any characters already buffered - cerioctl(m_cer, CERIAL_IOCTL_FLUSH_INPUT, NULL); + cerioctl(m_cer, CERIAL_IOCTL_FLUSH_INPUT, nullptr); // Set two second timeout */ int timeout = 2000; @@ -596,10 +596,10 @@ void vpRobotFlirPtu::connect(const std::string &portname, int baudrate) */ void vpRobotFlirPtu::disconnect() { - if (m_cer != NULL) { + if (m_cer != nullptr) { cerclose(m_cer); free(m_cer); - m_cer = NULL; + m_cer = nullptr; m_connected = false; } } @@ -832,7 +832,7 @@ void vpRobotFlirPtu::reset() return; } - if (cpi_ptcmd(m_cer, &m_status, OP_RESET, NULL)) { + if (cpi_ptcmd(m_cer, &m_status, OP_RESET, nullptr)) { throw(vpException(vpException::fatalError, "Unable to reset PTU.")); } } @@ -850,7 +850,7 @@ void vpRobotFlirPtu::stopMotion() return; } - if (cpi_ptcmd(m_cer, &m_status, OP_HALT, NULL)) { + if (cpi_ptcmd(m_cer, &m_status, OP_HALT, nullptr)) { throw(vpException(vpException::fatalError, "Unable to stop PTU.")); } } @@ -868,7 +868,7 @@ std::string vpRobotFlirPtu::getNetworkIP() } char str[64]; - if (cpi_ptcmd(m_cer, &m_status, OP_NET_IP_GET, (int)sizeof(str), NULL, &str)) { + if (cpi_ptcmd(m_cer, &m_status, OP_NET_IP_GET, (int)sizeof(str), nullptr, &str)) { throw(vpException(vpException::fatalError, "Unable to get Network IP.")); } @@ -888,7 +888,7 @@ std::string vpRobotFlirPtu::getNetworkGateway() } char str[64]; - if (cpi_ptcmd(m_cer, &m_status, OP_NET_GATEWAY_GET, (int)sizeof(str), NULL, &str)) { + if (cpi_ptcmd(m_cer, &m_status, OP_NET_GATEWAY_GET, (int)sizeof(str), nullptr, &str)) { throw(vpException(vpException::fatalError, "Unable to get Network Gateway.")); } @@ -908,7 +908,7 @@ std::string vpRobotFlirPtu::getNetworkHostName() } char str[64]; - if (cpi_ptcmd(m_cer, &m_status, OP_NET_HOSTNAME_GET, (int)sizeof(str), NULL, &str)) { + if (cpi_ptcmd(m_cer, &m_status, OP_NET_HOSTNAME_GET, (int)sizeof(str), nullptr, &str)) { throw(vpException(vpException::fatalError, "Unable to get Network hostname.")); } diff --git a/modules/robot/src/real-robot/franka/vpRobotFranka.cpp b/modules/robot/src/real-robot/franka/vpRobotFranka.cpp index 5579d4348e..66b0067f2b 100644 --- a/modules/robot/src/real-robot/franka/vpRobotFranka.cpp +++ b/modules/robot/src/real-robot/franka/vpRobotFranka.cpp @@ -51,7 +51,7 @@ */ vpRobotFranka::vpRobotFranka() - : vpRobot(), m_handler(NULL), m_gripper(NULL), m_model(NULL), m_positioningVelocity(20.), m_velControlThread(), + : vpRobot(), m_handler(nullptr), m_gripper(nullptr), m_model(nullptr), m_positioningVelocity(20.), m_velControlThread(), m_velControlThreadIsRunning(false), m_velControlThreadStopAsked(false), m_dq_des(), m_v_cart_des(), m_ftControlThread(), m_ftControlThreadIsRunning(false), m_ftControlThreadStopAsked(false), m_tau_J_des(), m_ft_cart_des(), m_q_min(), m_q_max(), m_dq_max(), m_ddq_max(), m_robot_state(), m_mutex(), m_eMc(), m_log_folder(), @@ -67,7 +67,7 @@ vpRobotFranka::vpRobotFranka() * be set when required. Setting realtime_config to kIgnore disables this behavior. */ vpRobotFranka::vpRobotFranka(const std::string &franka_address, franka::RealtimeConfig realtime_config) - : vpRobot(), m_handler(NULL), m_gripper(NULL), m_model(NULL), m_positioningVelocity(20.), m_velControlThread(), + : vpRobot(), m_handler(nullptr), m_gripper(nullptr), m_model(nullptr), m_positioningVelocity(20.), m_velControlThread(), m_velControlThreadIsRunning(false), m_velControlThreadStopAsked(false), m_dq_des(), m_v_cart_des(), m_ftControlThread(), m_ftControlThreadIsRunning(false), m_ftControlThreadStopAsked(false), m_tau_J_des(), m_ft_cart_des(), m_q_min(), m_q_max(), m_dq_max(), m_ddq_max(), m_robot_state(), m_mutex(), m_eMc(), m_log_folder(), @@ -1151,7 +1151,7 @@ bool vpRobotFranka::savePosFile(const std::string &filename, const vpColVector & FILE *fd; fd = fopen(filename.c_str(), "w"); - if (fd == NULL) + if (fd == nullptr) return false; fprintf(fd, "#PANDA - Joint position file\n" @@ -1195,7 +1195,7 @@ void vpRobotFranka::gripperHoming() if (m_franka_address.empty()) { throw(vpException(vpException::fatalError, "Cannot perform franka gripper homing without ip address")); } - if (m_gripper == NULL) + if (m_gripper == nullptr) m_gripper = new franka::Gripper(m_franka_address); m_gripper->homing(); @@ -1215,7 +1215,7 @@ int vpRobotFranka::gripperMove(double width) if (m_franka_address.empty()) { throw(vpException(vpException::fatalError, "Cannot open franka gripper without ip address")); } - if (m_gripper == NULL) + if (m_gripper == nullptr) m_gripper = new franka::Gripper(m_franka_address); // Check for the maximum grasping width. @@ -1254,7 +1254,7 @@ int vpRobotFranka::gripperOpen() if (m_franka_address.empty()) { throw(vpException(vpException::fatalError, "Cannot open franka gripper without ip address")); } - if (m_gripper == NULL) + if (m_gripper == nullptr) m_gripper = new franka::Gripper(m_franka_address); // Check for the maximum grasping width. @@ -1275,7 +1275,7 @@ void vpRobotFranka::gripperRelease() if (m_franka_address.empty()) { throw(vpException(vpException::fatalError, "Cannot release franka gripper without ip address")); } - if (m_gripper == NULL) + if (m_gripper == nullptr) m_gripper = new franka::Gripper(m_franka_address); m_gripper->stop(); @@ -1297,7 +1297,7 @@ void vpRobotFranka::gripperRelease() */ int vpRobotFranka::gripperGrasp(double grasping_width, double force) { - if (m_gripper == NULL) + if (m_gripper == nullptr) m_gripper = new franka::Gripper(m_franka_address); // Check for the maximum grasping width. diff --git a/modules/robot/src/real-robot/kinova/vpRobotKinova.cpp b/modules/robot/src/real-robot/kinova/vpRobotKinova.cpp index 991c18b15c..0b82a0095e 100644 --- a/modules/robot/src/real-robot/kinova/vpRobotKinova.cpp +++ b/modules/robot/src/real-robot/kinova/vpRobotKinova.cpp @@ -53,7 +53,7 @@ */ vpRobotKinova::vpRobotKinova() : m_eMc(), m_plugin_location("./"), m_verbose(false), m_plugin_loaded(false), m_devices_count(0), - m_devices_list(NULL), m_active_device(-1), m_command_layer(CMD_LAYER_UNSET), m_command_layer_handle() + m_devices_list(nullptr), m_active_device(-1), m_command_layer(CMD_LAYER_UNSET), m_command_layer_handle() { init(); } @@ -761,10 +761,10 @@ void vpRobotKinova::loadPlugin() #endif // Verify that all functions has been loaded correctly - if ((KinovaCloseAPI == NULL) || (KinovaGetAngularCommand == NULL) || (KinovaGetAngularCommand == NULL) || - (KinovaGetCartesianCommand == NULL) || (KinovaGetDevices == NULL) || (KinovaInitAPI == NULL) || - (KinovaInitFingers == NULL) || (KinovaMoveHome == NULL) || (KinovaSendBasicTrajectory == NULL) || - (KinovaSetActiveDevice == NULL) || (KinovaSetAngularControl == NULL) || (KinovaSetCartesianControl == NULL)) { + if ((KinovaCloseAPI == nullptr) || (KinovaGetAngularCommand == nullptr) || (KinovaGetAngularCommand == nullptr) || + (KinovaGetCartesianCommand == nullptr) || (KinovaGetDevices == nullptr) || (KinovaInitAPI == nullptr) || + (KinovaInitFingers == nullptr) || (KinovaMoveHome == nullptr) || (KinovaSendBasicTrajectory == nullptr) || + (KinovaSetActiveDevice == nullptr) || (KinovaSetAngularControl == nullptr) || (KinovaSetCartesianControl == nullptr)) { throw(vpException(vpException::fatalError, "Cannot load plugin from \"%s\" folder", m_plugin_location.c_str())); } if (m_verbose) { diff --git a/modules/robot/src/real-robot/mavsdk/vpRobotMavsdk.cpp b/modules/robot/src/real-robot/mavsdk/vpRobotMavsdk.cpp index cb6d242e6a..c94b8322ba 100644 --- a/modules/robot/src/real-robot/mavsdk/vpRobotMavsdk.cpp +++ b/modules/robot/src/real-robot/mavsdk/vpRobotMavsdk.cpp @@ -251,7 +251,7 @@ class vpRobotMavsdk::vpRobotMavsdkImpl bool isRunning() const { - if (m_system == NULL) { + if (m_system == nullptr) { return false; } else { diff --git a/modules/robot/src/real-robot/universal-robots/vpRobotUniversalRobots.cpp b/modules/robot/src/real-robot/universal-robots/vpRobotUniversalRobots.cpp index 950c6e5b0c..64e5cd3b8f 100644 --- a/modules/robot/src/real-robot/universal-robots/vpRobotUniversalRobots.cpp +++ b/modules/robot/src/real-robot/universal-robots/vpRobotUniversalRobots.cpp @@ -813,7 +813,7 @@ bool vpRobotUniversalRobots::savePosFile(const std::string &filename, const vpCo FILE *fd; fd = fopen(filename.c_str(), "w"); - if (fd == NULL) + if (fd == nullptr) return false; fprintf(fd, "#UR - Joint position file\n" diff --git a/modules/robot/src/real-robot/viper/vpRobotViper650.cpp b/modules/robot/src/real-robot/viper/vpRobotViper650.cpp index b252cc2ed4..0955609b51 100644 --- a/modules/robot/src/real-robot/viper/vpRobotViper650.cpp +++ b/modules/robot/src/real-robot/viper/vpRobotViper650.cpp @@ -276,7 +276,7 @@ void vpRobotViper650::init(void) // Look if the power is on or off UInt32 HIPowerStatus; UInt32 EStopStatus; - Try(PrimitiveSTATUS_Viper650(NULL, NULL, &EStopStatus, NULL, NULL, NULL, &HIPowerStatus)); + Try(PrimitiveSTATUS_Viper650(nullptr, nullptr, &EStopStatus, nullptr, nullptr, nullptr, &HIPowerStatus)); CAL_Wait(0.1); // Print the robot status @@ -660,7 +660,7 @@ vpRobotViper650::~vpRobotViper650(void) // Look if the power is on or off UInt32 HIPowerStatus; - Try(PrimitiveSTATUS_Viper650(NULL, NULL, NULL, NULL, NULL, NULL, &HIPowerStatus)); + Try(PrimitiveSTATUS_Viper650(nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, &HIPowerStatus)); CAL_Wait(0.1); // if (HIPowerStatus == 1) { @@ -772,7 +772,7 @@ void vpRobotViper650::powerOn(void) unsigned int nitermax = 10; for (unsigned int i = 0; i < nitermax; i++) { - Try(PrimitiveSTATUS_Viper650(NULL, NULL, &EStopStatus, NULL, NULL, NULL, &HIPowerStatus)); + Try(PrimitiveSTATUS_Viper650(nullptr, nullptr, &EStopStatus, nullptr, nullptr, nullptr, &HIPowerStatus)); if (EStopStatus == ESTOP_AUTO) { m_controlMode = AUTO; break; // exit for loop @@ -837,7 +837,7 @@ void vpRobotViper650::powerOff(void) // Look if the power is on or off UInt32 HIPowerStatus; - Try(PrimitiveSTATUS_Viper650(NULL, NULL, NULL, NULL, NULL, NULL, &HIPowerStatus)); + Try(PrimitiveSTATUS_Viper650(nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, &HIPowerStatus)); CAL_Wait(0.1); if (HIPowerStatus == 1) { @@ -871,7 +871,7 @@ bool vpRobotViper650::getPowerState(void) const bool status = false; // Look if the power is on or off UInt32 HIPowerStatus; - Try(PrimitiveSTATUS_Viper650(NULL, NULL, NULL, NULL, NULL, NULL, &HIPowerStatus)); + Try(PrimitiveSTATUS_Viper650(nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, &HIPowerStatus)); CAL_Wait(0.1); if (HIPowerStatus == 1) { @@ -1731,14 +1731,14 @@ void vpRobotViper650::setVelocity(const vpRobot::vpControlFrameType frame, const if (TryStt < 0) { if (TryStt == VelStopOnJoint) { UInt32 axisInJoint[njoint]; - PrimitiveSTATUS_Viper650(NULL, NULL, NULL, NULL, NULL, axisInJoint, NULL); + PrimitiveSTATUS_Viper650(nullptr, nullptr, nullptr, nullptr, nullptr, axisInJoint, nullptr); for (unsigned int i = 0; i < njoint; i++) { if (axisInJoint[i]) std::cout << "\nWarning: Velocity control stopped: axis " << i + 1 << " on joint limit!" << std::endl; } } else { printf("\n%s(%d): Error %d", __FUNCTION__, TryLine, TryStt); - if (TryString != NULL) { + if (TryString != nullptr) { // The statement is in TryString, but we need to check the validity printf(" Error sentence %s\n", TryString); // Print the TryString } else { @@ -2162,7 +2162,7 @@ bool vpRobotViper650::savePosFile(const std::string &filename, const vpColVector FILE *fd; fd = fopen(filename.c_str(), "w"); - if (fd == NULL) + if (fd == nullptr) return false; fprintf(fd, "\ diff --git a/modules/robot/src/real-robot/viper/vpRobotViper850.cpp b/modules/robot/src/real-robot/viper/vpRobotViper850.cpp index c22d028a18..792279ee55 100644 --- a/modules/robot/src/real-robot/viper/vpRobotViper850.cpp +++ b/modules/robot/src/real-robot/viper/vpRobotViper850.cpp @@ -292,7 +292,7 @@ void vpRobotViper850::init(void) // Look if the power is on or off UInt32 HIPowerStatus; UInt32 EStopStatus; - Try(PrimitiveSTATUS_Viper850(NULL, NULL, &EStopStatus, NULL, NULL, NULL, &HIPowerStatus)); + Try(PrimitiveSTATUS_Viper850(nullptr, nullptr, &EStopStatus, nullptr, nullptr, nullptr, &HIPowerStatus)); CAL_Wait(0.1); // Print the robot status @@ -680,7 +680,7 @@ vpRobotViper850::~vpRobotViper850(void) // Look if the power is on or off UInt32 HIPowerStatus; - Try(PrimitiveSTATUS_Viper850(NULL, NULL, NULL, NULL, NULL, NULL, &HIPowerStatus)); + Try(PrimitiveSTATUS_Viper850(nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, &HIPowerStatus)); CAL_Wait(0.1); // if (HIPowerStatus == 1) { @@ -792,7 +792,7 @@ void vpRobotViper850::powerOn(void) unsigned int nitermax = 10; for (unsigned int i = 0; i < nitermax; i++) { - Try(PrimitiveSTATUS_Viper850(NULL, NULL, &EStopStatus, NULL, NULL, NULL, &HIPowerStatus)); + Try(PrimitiveSTATUS_Viper850(nullptr, nullptr, &EStopStatus, nullptr, nullptr, nullptr, &HIPowerStatus)); if (EStopStatus == ESTOP_AUTO) { m_controlMode = AUTO; break; // exit for loop @@ -857,7 +857,7 @@ void vpRobotViper850::powerOff(void) // Look if the power is on or off UInt32 HIPowerStatus; - Try(PrimitiveSTATUS_Viper850(NULL, NULL, NULL, NULL, NULL, NULL, &HIPowerStatus)); + Try(PrimitiveSTATUS_Viper850(nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, &HIPowerStatus)); CAL_Wait(0.1); if (HIPowerStatus == 1) { @@ -891,7 +891,7 @@ bool vpRobotViper850::getPowerState(void) const bool status = false; // Look if the power is on or off UInt32 HIPowerStatus; - Try(PrimitiveSTATUS_Viper850(NULL, NULL, NULL, NULL, NULL, NULL, &HIPowerStatus)); + Try(PrimitiveSTATUS_Viper850(nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, &HIPowerStatus)); CAL_Wait(0.1); if (HIPowerStatus == 1) { @@ -1761,14 +1761,14 @@ void vpRobotViper850::setVelocity(const vpRobot::vpControlFrameType frame, const if (TryStt < 0) { if (TryStt == VelStopOnJoint) { UInt32 axisInJoint[njoint]; - PrimitiveSTATUS_Viper850(NULL, NULL, NULL, NULL, NULL, axisInJoint, NULL); + PrimitiveSTATUS_Viper850(nullptr, nullptr, nullptr, nullptr, nullptr, axisInJoint, nullptr); for (unsigned int i = 0; i < njoint; i++) { if (axisInJoint[i]) std::cout << "\nWarning: Velocity control stopped: axis " << i + 1 << " on joint limit!" << std::endl; } } else { printf("\n%s(%d): Error %d", __FUNCTION__, TryLine, TryStt); - if (TryString != NULL) { + if (TryString != nullptr) { // The statement is in TryString, but we need to check the validity printf(" Error sentence %s\n", TryString); // Print the TryString } else { @@ -2192,7 +2192,7 @@ bool vpRobotViper850::savePosFile(const std::string &filename, const vpColVector FILE *fd; fd = fopen(filename.c_str(), "w"); - if (fd == NULL) + if (fd == nullptr) return false; fprintf(fd, "\ diff --git a/modules/robot/src/real-robot/viper/vpViper650.cpp b/modules/robot/src/real-robot/viper/vpViper650.cpp index 704de86e7d..2531b8d986 100644 --- a/modules/robot/src/real-robot/viper/vpViper650.cpp +++ b/modules/robot/src/real-robot/viper/vpViper650.cpp @@ -46,7 +46,7 @@ #include #include -static const char *opt_viper650[] = {"CAMERA", "eMc_ROT_XYZ", "eMc_TRANS_XYZ", NULL}; +static const char *opt_viper650[] = {"CAMERA", "eMc_ROT_XYZ", "eMc_TRANS_XYZ", nullptr}; #ifdef VISP_HAVE_VIPER650_DATA const std::string vpViper650::CONST_EMC_MARLIN_F033C_WITHOUT_DISTORTION_FILENAME = @@ -438,7 +438,7 @@ void vpViper650::parseConfigFile(const std::string &filename) std::string key; ss >> key; - for (code = 0; NULL != opt_viper650[code]; ++code) { + for (code = 0; nullptr != opt_viper650[code]; ++code) { if (key.compare(opt_viper650[code]) == 0) { break; } diff --git a/modules/robot/src/real-robot/viper/vpViper850.cpp b/modules/robot/src/real-robot/viper/vpViper850.cpp index 53eac839cf..d5951a1714 100644 --- a/modules/robot/src/real-robot/viper/vpViper850.cpp +++ b/modules/robot/src/real-robot/viper/vpViper850.cpp @@ -46,7 +46,7 @@ #include #include -static const char *opt_viper850[] = {"CAMERA", "eMc_ROT_XYZ", "eMc_TRANS_XYZ", NULL}; +static const char *opt_viper850[] = {"CAMERA", "eMc_ROT_XYZ", "eMc_TRANS_XYZ", nullptr}; #ifdef VISP_HAVE_VIPER850_DATA const std::string vpViper850::CONST_EMC_MARLIN_F033C_WITHOUT_DISTORTION_FILENAME = @@ -439,7 +439,7 @@ void vpViper850::parseConfigFile(const std::string &filename) std::string key; ss >> key; - for (code = 0; NULL != opt_viper850[code]; ++code) { + for (code = 0; nullptr != opt_viper850[code]; ++code) { if (key.compare(opt_viper850[code]) == 0) { break; } diff --git a/modules/robot/src/robot-simulator/vpRobotCamera.cpp b/modules/robot/src/robot-simulator/vpRobotCamera.cpp index e5ac10c3d1..d9eecf8d4a 100644 --- a/modules/robot/src/robot-simulator/vpRobotCamera.cpp +++ b/modules/robot/src/robot-simulator/vpRobotCamera.cpp @@ -82,8 +82,8 @@ void vpRobotCamera::init() eJeAvailable = true; fJeAvailable = false; areJointLimitsAvailable = false; - qmin = NULL; - qmax = NULL; + qmin = nullptr; + qmax = nullptr; setMaxTranslationVelocity(1.); // vx, vy and vz max set to 1 m/s setMaxRotationVelocity(vpMath::rad(90)); // wx, wy and wz max set to 90 deg/s diff --git a/modules/robot/src/robot-simulator/vpRobotWireFrameSimulator.cpp b/modules/robot/src/robot-simulator/vpRobotWireFrameSimulator.cpp index e64efb01a9..f45e033005 100644 --- a/modules/robot/src/robot-simulator/vpRobotWireFrameSimulator.cpp +++ b/modules/robot/src/robot-simulator/vpRobotWireFrameSimulator.cpp @@ -47,7 +47,7 @@ Basic constructor */ vpRobotWireFrameSimulator::vpRobotWireFrameSimulator() - : vpWireFrameSimulator(), vpRobotSimulator(), I(), tcur(0), tprev(0), robotArms(NULL), size_fMi(8), fMi(NULL), + : vpWireFrameSimulator(), vpRobotSimulator(), I(), tcur(0), tprev(0), robotArms(nullptr), size_fMi(8), fMi(nullptr), artCoord(), artVel(), velocity(), #if defined(_WIN32) #elif defined(VISP_HAVE_PTHREAD) @@ -76,7 +76,7 @@ vpRobotWireFrameSimulator::vpRobotWireFrameSimulator() \param do_display : When true, enables the display of the external view. */ vpRobotWireFrameSimulator::vpRobotWireFrameSimulator(bool do_display) - : vpWireFrameSimulator(), vpRobotSimulator(), I(), tcur(0), tprev(0), robotArms(NULL), size_fMi(8), fMi(NULL), + : vpWireFrameSimulator(), vpRobotSimulator(), I(), tcur(0), tprev(0), robotArms(nullptr), size_fMi(8), fMi(nullptr), artCoord(), artVel(), velocity(), #if defined(_WIN32) #elif defined(VISP_HAVE_PTHREAD) diff --git a/modules/robot/src/robot-simulator/vpSimulatorAfma6.cpp b/modules/robot/src/robot-simulator/vpSimulatorAfma6.cpp index d6c1d38888..ae1d7c870d 100644 --- a/modules/robot/src/robot-simulator/vpSimulatorAfma6.cpp +++ b/modules/robot/src/robot-simulator/vpSimulatorAfma6.cpp @@ -67,7 +67,7 @@ vpSimulatorAfma6::vpSimulatorAfma6() #if defined(_WIN32) DWORD dwThreadIdArray; - hThread = CreateThread(NULL, // default security attributes + hThread = CreateThread(nullptr, // default security attributes 0, // use default stack size launcher, // thread function name this, // argument to thread function @@ -77,7 +77,7 @@ vpSimulatorAfma6::vpSimulatorAfma6() pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); - pthread_create(&thread, NULL, launcher, (void *)this); + pthread_create(&thread, nullptr, launcher, (void *)this); #endif compute_fMi(); @@ -100,7 +100,7 @@ vpSimulatorAfma6::vpSimulatorAfma6(bool do_display) #if defined(_WIN32) DWORD dwThreadIdArray; - hThread = CreateThread(NULL, // default security attributes + hThread = CreateThread(nullptr, // default security attributes 0, // use default stack size launcher, // thread function name this, // argument to thread function @@ -110,7 +110,7 @@ vpSimulatorAfma6::vpSimulatorAfma6(bool do_display) pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); - pthread_create(&thread, NULL, launcher, (void *)this); + pthread_create(&thread, nullptr, launcher, (void *)this); #endif compute_fMi(); @@ -134,10 +134,10 @@ vpSimulatorAfma6::~vpSimulatorAfma6() CloseHandle(hThread); #elif defined(VISP_HAVE_PTHREAD) pthread_attr_destroy(&attr); - pthread_join(thread, NULL); + pthread_join(thread, nullptr); #endif - if (robotArms != NULL) { + if (robotArms != nullptr) { for (int i = 0; i < 6; i++) free_Bound_scene(&(robotArms[i])); } @@ -225,7 +225,7 @@ void vpSimulatorAfma6::init() */ void vpSimulatorAfma6::initDisplay() { - robotArms = NULL; + robotArms = nullptr; robotArms = new Bound_scene[6]; initArms(); setExternalCameraPosition(vpHomogeneousMatrix(0, 0, 0, 0, 0, vpMath::rad(180)) * @@ -276,7 +276,7 @@ void vpSimulatorAfma6::init(vpAfma6::vpAfma6ToolType tool, vpCameraParameters::v setCameraParameters(vpCameraParameters(1109.5735473989, 1112.1520168160, 320, 240)); - if (robotArms != NULL) { + if (robotArms != nullptr) { while (get_displayBusy()) vpTime::wait(2); free_Bound_scene(&(robotArms[5])); @@ -299,7 +299,7 @@ void vpSimulatorAfma6::init(vpAfma6::vpAfma6ToolType tool, vpCameraParameters::v setCameraParameters(vpCameraParameters(852.6583228197, 854.8084224761, 320, 240)); - if (robotArms != NULL) { + if (robotArms != nullptr) { while (get_displayBusy()) vpTime::wait(2); free_Bound_scene(&(robotArms[5])); @@ -322,7 +322,7 @@ void vpSimulatorAfma6::init(vpAfma6::vpAfma6ToolType tool, vpCameraParameters::v setCameraParameters(vpCameraParameters(853.4876600807, 856.0339170706, 320, 240)); - if (robotArms != NULL) { + if (robotArms != nullptr) { while (get_displayBusy()) vpTime::wait(2); free_Bound_scene(&(robotArms[5])); @@ -2027,7 +2027,7 @@ bool vpSimulatorAfma6::savePosFile(const std::string &filename, const vpColVecto { FILE *fd; fd = fopen(filename.c_str(), "w"); - if (fd == NULL) + if (fd == nullptr) return false; fprintf(fd, "\ @@ -2450,7 +2450,7 @@ bool vpSimulatorAfma6::setPosition(const vpHomogeneousMatrix &cdMo_, vpImage - // libdc1394-2.0.0-rc7 , d(NULL), - // list(NULL) + // libdc1394-2.0.0-rc7 , d(nullptr), + // list(nullptr) // #endif // { // throw vpException(vpException::functionNotImplementedError,"Not diff --git a/modules/sensor/include/visp3/sensor/vpOccipitalStructure.h b/modules/sensor/include/visp3/sensor/vpOccipitalStructure.h index 4f06577a7a..48173e26dd 100644 --- a/modules/sensor/include/visp3/sensor/vpOccipitalStructure.h +++ b/modules/sensor/include/visp3/sensor/vpOccipitalStructure.h @@ -163,7 +163,7 @@ pcl::PointCloud::Ptr pointcloud(new pcl::PointCloud); - sc.acquire(NULL, NULL, NULL, pointcloud); + sc.acquire(nullptr, nullptr, nullptr, pointcloud); pcl::visualization::PCLVisualizer::Ptr viewer (new pcl::visualization::PCLVisualizer ("3D Viewer")); pcl::visualization::PointCloudColorHandlerRGBField rgb(pointcloud); @@ -172,7 +172,7 @@ viewer->setCameraPosition(0, 0, -0.5, 0, -1, 0); while (true) { - sc.acquire(NULL, NULL, NULL, pointcloud); + sc.acquire(nullptr, nullptr, nullptr, pointcloud); static bool update = false; if (!update) { @@ -284,33 +284,33 @@ class VISP_EXPORT vpOccipitalStructure vpOccipitalStructure(); ~vpOccipitalStructure(); - void acquire(vpImage &gray, bool undistorted = false, double *ts = NULL); - void acquire(vpImage &rgb, bool undistorted = false, double *ts = NULL); + void acquire(vpImage &gray, bool undistorted = false, double *ts = nullptr); + void acquire(vpImage &rgb, bool undistorted = false, double *ts = nullptr); - void acquire(vpImage *rgb, vpImage *depth, vpColVector *acceleration_data = NULL, - vpColVector *gyroscope_data = NULL, bool undistorted = false, double *ts = NULL); - void acquire(vpImage *gray, vpImage *depth, vpColVector *acceleration_data = NULL, - vpColVector *gyroscope_data = NULL, bool undistorted = false, double *ts = NULL); + void acquire(vpImage *rgb, vpImage *depth, vpColVector *acceleration_data = nullptr, + vpColVector *gyroscope_data = nullptr, bool undistorted = false, double *ts = nullptr); + void acquire(vpImage *gray, vpImage *depth, vpColVector *acceleration_data = nullptr, + vpColVector *gyroscope_data = nullptr, bool undistorted = false, double *ts = nullptr); void acquire(unsigned char *const data_image, unsigned char *const data_depth, - std::vector *const data_pointCloud = NULL, unsigned char *const data_infrared = NULL, - vpColVector *acceleration_data = NULL, vpColVector *gyroscope_data = NULL, bool undistorted = true, - double *ts = NULL); + std::vector *const data_pointCloud = nullptr, unsigned char *const data_infrared = nullptr, + vpColVector *acceleration_data = nullptr, vpColVector *gyroscope_data = nullptr, bool undistorted = true, + double *ts = nullptr); #ifdef VISP_HAVE_PCL void acquire(unsigned char *const data_image, unsigned char *const data_depth, std::vector *const data_pointCloud, pcl::PointCloud::Ptr &pointcloud, - unsigned char *const data_infrared = NULL, vpColVector *acceleration_data = NULL, - vpColVector *gyroscope_data = NULL, bool undistorted = true, double *ts = NULL); + unsigned char *const data_infrared = nullptr, vpColVector *acceleration_data = nullptr, + vpColVector *gyroscope_data = nullptr, bool undistorted = true, double *ts = nullptr); void acquire(unsigned char *const data_image, unsigned char *const data_depth, std::vector *const data_pointCloud, pcl::PointCloud::Ptr &pointcloud, - unsigned char *const data_infrared = NULL, vpColVector *acceleration_data = NULL, - vpColVector *gyroscope_data = NULL, bool undistorted = true, double *ts = NULL); + unsigned char *const data_infrared = nullptr, vpColVector *acceleration_data = nullptr, + vpColVector *gyroscope_data = nullptr, bool undistorted = true, double *ts = nullptr); #endif void getIMUVelocity(vpColVector *imu_vel, double *ts); void getIMUAcceleration(vpColVector *imu_acc, double *ts); - void getIMUData(vpColVector *imu_vel, vpColVector *imu_acc, double *ts = NULL); + void getIMUData(vpColVector *imu_vel, vpColVector *imu_acc, double *ts = nullptr); bool open(const ST::CaptureSessionSettings &settings); void close(); diff --git a/modules/sensor/include/visp3/sensor/vpPylonGrabber.h b/modules/sensor/include/visp3/sensor/vpPylonGrabber.h index 2842d9584c..726b7064ba 100644 --- a/modules/sensor/include/visp3/sensor/vpPylonGrabber.h +++ b/modules/sensor/include/visp3/sensor/vpPylonGrabber.h @@ -165,7 +165,7 @@ class VISP_EXPORT vpPylonGrabber : public vpFrameGrabber */ virtual std::ostream &getCameraInfo(std::ostream &os) = 0; /*! - Return the handler to the active camera or NULL if the camera is not + Return the handler to the active camera or nullptr if the camera is not connected. This function was designed to provide a direct access to the Pylon SDK to get access to advanced functionalities that are not implemented in this class. diff --git a/modules/sensor/include/visp3/sensor/vpRealSense.h b/modules/sensor/include/visp3/sensor/vpRealSense.h index d64c5ed770..05975be0b9 100644 --- a/modules/sensor/include/visp3/sensor/vpRealSense.h +++ b/modules/sensor/include/visp3/sensor/vpRealSense.h @@ -212,7 +212,7 @@ rs.getIntrinsics(rs::stream::infrared).width); #endif while (1) { - rs.acquire((unsigned char *) Ic.bitmap, NULL, NULL, Ii.bitmap); + rs.acquire((unsigned char *) Ic.bitmap, nullptr, nullptr, Ii.bitmap); vpDisplay::display(Ic); vpDisplay::display(Ii); vpDisplay::flush(Ic); @@ -254,7 +254,7 @@ int main() { #endif while (1) { - rs.acquire((unsigned char *) Ic.bitmap, (unsigned char *) Id_raw.bitmap, NULL, NULL, NULL, + rs.acquire((unsigned char *) Ic.bitmap, (unsigned char *) Id_raw.bitmap, nullptr, nullptr, nullptr, rs::stream::color, rs::stream::depth_aligned_to_color); vpImageConvert::createDepthHistogram(Id_raw, Id); vpDisplay::display(Ic); @@ -358,7 +358,7 @@ class VISP_EXPORT vpRealSense void acquire(unsigned char *const data_image, unsigned char *const data_depth, std::vector *const data_pointCloud, unsigned char *const data_infrared, - unsigned char *const data_infrared2 = NULL, const rs::stream &stream_color = rs::stream::color, + unsigned char *const data_infrared2 = nullptr, const rs::stream &stream_color = rs::stream::color, const rs::stream &stream_depth = rs::stream::depth, const rs::stream &stream_infrared = rs::stream::infrared, const rs::stream &stream_infrared2 = rs::stream::infrared2); @@ -372,13 +372,13 @@ class VISP_EXPORT vpRealSense void acquire(unsigned char *const data_image, unsigned char *const data_depth, std::vector *const data_pointCloud, pcl::PointCloud::Ptr &pointcloud, - unsigned char *const data_infrared, unsigned char *const data_infrared2 = NULL, + unsigned char *const data_infrared, unsigned char *const data_infrared2 = nullptr, const rs::stream &stream_color = rs::stream::color, const rs::stream &stream_depth = rs::stream::depth, const rs::stream &stream_infrared = rs::stream::infrared, const rs::stream &stream_infrared2 = rs::stream::infrared2); void acquire(unsigned char *const data_image, unsigned char *const data_depth, std::vector *const data_pointCloud, pcl::PointCloud::Ptr &pointcloud, - unsigned char *const data_infrared, unsigned char *const data_infrared2 = NULL, + unsigned char *const data_infrared, unsigned char *const data_infrared2 = nullptr, const rs::stream &stream_color = rs::stream::color, const rs::stream &stream_depth = rs::stream::depth, const rs::stream &stream_infrared = rs::stream::infrared, const rs::stream &stream_infrared2 = rs::stream::infrared2); diff --git a/modules/sensor/include/visp3/sensor/vpRealSense2.h b/modules/sensor/include/visp3/sensor/vpRealSense2.h index 40f5c9c0f5..3e40c7ed14 100644 --- a/modules/sensor/include/visp3/sensor/vpRealSense2.h +++ b/modules/sensor/include/visp3/sensor/vpRealSense2.h @@ -146,7 +146,7 @@ pcl::PointCloud::Ptr pointcloud(new pcl::PointCloud); - rs.acquire(NULL, NULL, NULL, pointcloud); + rs.acquire(nullptr, nullptr, nullptr, pointcloud); pcl::visualization::PCLVisualizer::Ptr viewer (new pcl::visualization::PCLVisualizer ("3D Viewer")); pcl::visualization::PointCloudColorHandlerRGBField rgb(pointcloud); @@ -155,7 +155,7 @@ viewer->setCameraPosition(0, 0, -0.5, 0, -1, 0); while (true) { - rs.acquire(NULL, NULL, NULL, pointcloud); + rs.acquire(nullptr, nullptr, nullptr, pointcloud); static bool update = false; if (!update) { @@ -202,7 +202,7 @@ #endif while (true) { - rs.acquire((unsigned char *) Ic.bitmap, NULL, NULL, Ii.bitmap); + rs.acquire((unsigned char *) Ic.bitmap, nullptr, nullptr, Ii.bitmap); vpDisplay::display(Ic); vpDisplay::display(Ii); vpDisplay::flush(Ic); @@ -243,7 +243,7 @@ rs2::align align_to(RS2_STREAM_COLOR); while (true) { - rs.acquire((unsigned char *) Ic.bitmap, (unsigned char *) Id_raw.bitmap, NULL, NULL, &align_to); + rs.acquire((unsigned char *) Ic.bitmap, (unsigned char *) Id_raw.bitmap, nullptr, nullptr, &align_to); vpImageConvert::createDepthHistogram(Id_raw, Id); vpDisplay::display(Ic); vpDisplay::display(Id); @@ -290,39 +290,39 @@ class VISP_EXPORT vpRealSense2 vpRealSense2(); virtual ~vpRealSense2(); - void acquire(vpImage &grey, double *ts = NULL); - void acquire(vpImage &color, double *ts = NULL); + void acquire(vpImage &grey, double *ts = nullptr); + void acquire(vpImage &color, double *ts = nullptr); void acquire(unsigned char *const data_image, unsigned char *const data_depth, std::vector *const data_pointCloud, unsigned char *const data_infrared, - rs2::align *const align_to = NULL, double *ts = NULL); + rs2::align *const align_to = nullptr, double *ts = nullptr); void acquire(unsigned char *const data_image, unsigned char *const data_depth, std::vector *const data_pointCloud, unsigned char *const data_infrared1, - unsigned char *const data_infrared2, rs2::align *const align_to, double *ts = NULL); + unsigned char *const data_infrared2, rs2::align *const align_to, double *ts = nullptr); #if (RS2_API_VERSION > ((2 * 10000) + (31 * 100) + 0)) - void acquire(vpImage *left, vpImage *right, double *ts = NULL); + void acquire(vpImage *left, vpImage *right, double *ts = nullptr); void acquire(vpImage *left, vpImage *right, vpHomogeneousMatrix *cMw, - vpColVector *odo_vel, vpColVector *odo_acc, unsigned int *confidence = NULL, double *ts = NULL); + vpColVector *odo_vel, vpColVector *odo_acc, unsigned int *confidence = nullptr, double *ts = nullptr); void acquire(vpImage *left, vpImage *right, vpHomogeneousMatrix *cMw, vpColVector *odo_vel, vpColVector *odo_acc, vpColVector *imu_vel, vpColVector *imu_acc, - unsigned int *tracker_confidence = NULL, double *ts = NULL); + unsigned int *tracker_confidence = nullptr, double *ts = nullptr); #endif #ifdef VISP_HAVE_PCL void acquire(unsigned char *const data_image, unsigned char *const data_depth, std::vector *const data_pointCloud, pcl::PointCloud::Ptr &pointcloud, - unsigned char *const data_infrared = NULL, rs2::align *const align_to = NULL, double *ts = NULL); + unsigned char *const data_infrared = nullptr, rs2::align *const align_to = nullptr, double *ts = nullptr); void acquire(unsigned char *const data_image, unsigned char *const data_depth, std::vector *const data_pointCloud, pcl::PointCloud::Ptr &pointcloud, unsigned char *const data_infrared1, unsigned char *const data_infrared2, rs2::align *const align_to, - double *ts = NULL); + double *ts = nullptr); void acquire(unsigned char *const data_image, unsigned char *const data_depth, std::vector *const data_pointCloud, pcl::PointCloud::Ptr &pointcloud, - unsigned char *const data_infrared = NULL, rs2::align *const align_to = NULL, double *ts = NULL); + unsigned char *const data_infrared = nullptr, rs2::align *const align_to = nullptr, double *ts = nullptr); void acquire(unsigned char *const data_image, unsigned char *const data_depth, std::vector *const data_pointCloud, pcl::PointCloud::Ptr &pointcloud, unsigned char *const data_infrared1, unsigned char *const data_infrared2, rs2::align *const align_to, - double *ts = NULL); + double *ts = nullptr); #endif void close(); @@ -352,7 +352,7 @@ class VISP_EXPORT vpRealSense2 inline float getMaxZ() const { return m_max_Z; } #if (RS2_API_VERSION > ((2 * 10000) + (31 * 100) + 0)) - unsigned int getOdometryData(vpHomogeneousMatrix *cMw, vpColVector *odo_vel, vpColVector *odo_acc, double *ts = NULL); + unsigned int getOdometryData(vpHomogeneousMatrix *cMw, vpColVector *odo_vel, vpColVector *odo_acc, double *ts = nullptr); #endif //! Get a reference to `rs2::pipeline`. diff --git a/modules/sensor/include/visp3/sensor/vpSickLDMRS.h b/modules/sensor/include/visp3/sensor/vpSickLDMRS.h index de42458366..650e6aec4d 100644 --- a/modules/sensor/include/visp3/sensor/vpSickLDMRS.h +++ b/modules/sensor/include/visp3/sensor/vpSickLDMRS.h @@ -116,7 +116,7 @@ class VISP_EXPORT vpSickLDMRS : public vpLaserScanner /*! Copy constructor. */ vpSickLDMRS(const vpSickLDMRS &sick) - : vpLaserScanner(sick), socket_fd(-1), body(NULL), vAngle(), time_offset(0), isFirstMeasure(true), + : vpLaserScanner(sick), socket_fd(-1), body(nullptr), vAngle(), time_offset(0), isFirstMeasure(true), maxlen_body(104000) { *this = sick; diff --git a/modules/sensor/include/visp3/sensor/vpUeyeGrabber.h b/modules/sensor/include/visp3/sensor/vpUeyeGrabber.h index 30e1870bec..2a0370c23b 100644 --- a/modules/sensor/include/visp3/sensor/vpUeyeGrabber.h +++ b/modules/sensor/include/visp3/sensor/vpUeyeGrabber.h @@ -86,8 +86,8 @@ class VISP_EXPORT vpUeyeGrabber vpUeyeGrabber(); virtual ~vpUeyeGrabber(); - void acquire(vpImage &I, double *timestamp_camera = NULL, std::string *timestamp_system = NULL); - void acquire(vpImage &I, double *timestamp_camera = NULL, std::string *timestamp_system = NULL); + void acquire(vpImage &I, double *timestamp_camera = nullptr, std::string *timestamp_system = nullptr); + void acquire(vpImage &I, double *timestamp_camera = nullptr, std::string *timestamp_system = nullptr); std::string getActiveCameraModel() const; std::string getActiveCameraSerialNumber() const; diff --git a/modules/sensor/include/visp3/sensor/vpV4l2Grabber.h b/modules/sensor/include/visp3/sensor/vpV4l2Grabber.h index 0a5ace7663..9e02200464 100644 --- a/modules/sensor/include/visp3/sensor/vpV4l2Grabber.h +++ b/modules/sensor/include/visp3/sensor/vpV4l2Grabber.h @@ -188,9 +188,9 @@ class VISP_EXPORT vpV4l2Grabber : public vpFrameGrabber // private: //#ifndef DOXYGEN_SHOULD_SKIP_THIS // vpV4l2Grabber(const vpV4l2Grabber &) - // : fd(-1), device(), cap(), streamparm(), inp(NULL), std(NULL), - // fmt(NULL), ctl(NULL), - // fmt_v4l2(), fmt_me(), reqbufs(), buf_v4l2(NULL), buf_me(NULL), + // : fd(-1), device(), cap(), streamparm(), inp(nullptr), std(nullptr), + // fmt(nullptr), ctl(nullptr), + // fmt_v4l2(), fmt_me(), reqbufs(), buf_v4l2(nullptr), buf_me(nullptr), // queue(0), waiton_cpt(0), index_buffer(0), m_verbose(false), // m_nbuffers(3), field(0), streaming(false), // m_input(vpV4l2Grabber::DEFAULT_INPUT), diff --git a/modules/sensor/src/force-torque/vpComedi.cpp b/modules/sensor/src/force-torque/vpComedi.cpp index b52ae080fd..c4bee5e8a3 100644 --- a/modules/sensor/src/force-torque/vpComedi.cpp +++ b/modules/sensor/src/force-torque/vpComedi.cpp @@ -46,7 +46,7 @@ Default constructor. */ vpComedi::vpComedi() - : m_device("/dev/comedi0"), m_handler(NULL), m_subdevice(0), m_range(0), m_aref(AREF_DIFF), m_nchannel(6), + : m_device("/dev/comedi0"), m_handler(nullptr), m_subdevice(0), m_range(0), m_aref(AREF_DIFF), m_nchannel(6), m_range_info(6), m_maxdata(6), m_chanlist(6) { } @@ -91,7 +91,7 @@ void vpComedi::close() { if (m_handler) { comedi_close(m_handler); - m_handler = NULL; + m_handler = nullptr; } } @@ -104,7 +104,7 @@ void vpComedi::close() */ std::vector vpComedi::getRawData() const { - if (m_handler == NULL) { + if (m_handler == nullptr) { throw vpException(vpException::fatalError, "Comedi device not open"); } // Get raw data @@ -132,7 +132,7 @@ std::vector vpComedi::getRawData() const */ vpColVector vpComedi::getPhyData() const { - if (m_handler == NULL) { + if (m_handler == nullptr) { throw vpException(vpException::fatalError, "Comedi device not open"); } // Get raw data @@ -154,7 +154,7 @@ vpColVector vpComedi::getPhyData() const //! getPhyDataAsync(). std::string vpComedi::getPhyDataUnits() const { - if (m_handler == NULL) { + if (m_handler == nullptr) { throw vpException(vpException::fatalError, "Comedi device not open"); } std::string units; diff --git a/modules/sensor/src/force-torque/vpForceTorqueAtiSensor.cpp b/modules/sensor/src/force-torque/vpForceTorqueAtiSensor.cpp index bd5cc76534..b65ca6e197 100644 --- a/modules/sensor/src/force-torque/vpForceTorqueAtiSensor.cpp +++ b/modules/sensor/src/force-torque/vpForceTorqueAtiSensor.cpp @@ -42,7 +42,7 @@ #include #include -static Calibration *s_calibinfo = NULL; //!< Struct containing calibration information +static Calibration *s_calibinfo = nullptr; //!< Struct containing calibration information /*! * Default constructor. @@ -120,10 +120,10 @@ void vpForceTorqueAtiSensor::unbias() */ void vpForceTorqueAtiSensor::close() { - if (s_calibinfo != NULL) { + if (s_calibinfo != nullptr) { // free memory allocated to calibration structure destroyCalibration(s_calibinfo); - s_calibinfo = NULL; + s_calibinfo = nullptr; } vpComedi::close(); } @@ -202,7 +202,7 @@ void vpForceTorqueAtiSensor::setCalibrationFile(const std::string &calibfile, un // Create calibration struct s_calibinfo = createCalibration(m_calibfile.c_str(), m_index); - if (s_calibinfo == NULL) { + if (s_calibinfo == nullptr) { throw vpException(vpException::fatalError, "Calibration file %s couldn't be loaded", m_calibfile.c_str()); } @@ -231,7 +231,7 @@ int main() */ std::ostream &operator<<(std::ostream &os, const vpForceTorqueAtiSensor &ati) { - if (s_calibinfo == NULL) { + if (s_calibinfo == nullptr) { os << "Calibration Information is not available" << std::endl; return os; } diff --git a/modules/sensor/src/framegrabber/1394/vp1394CMUGrabber.cpp b/modules/sensor/src/framegrabber/1394/vp1394CMUGrabber.cpp index f6aec9b132..a2364259b1 100644 --- a/modules/sensor/src/framegrabber/1394/vp1394CMUGrabber.cpp +++ b/modules/sensor/src/framegrabber/1394/vp1394CMUGrabber.cpp @@ -69,7 +69,7 @@ vp1394CMUGrabber::~vp1394CMUGrabber() // delete camera instance if (camera) { delete camera; - camera = NULL; + camera = nullptr; } } @@ -206,7 +206,7 @@ void vp1394CMUGrabber::acquire(vpImage &I) { // get image data unsigned long length; - unsigned char *rawdata = NULL; + unsigned char *rawdata = nullptr; int dropped; unsigned int size; @@ -268,7 +268,7 @@ void vp1394CMUGrabber::acquire(vpImage &I) { // get image data unsigned long length; - unsigned char *rawdata = NULL; + unsigned char *rawdata = nullptr; int dropped; unsigned int size; diff --git a/modules/sensor/src/framegrabber/1394/vp1394TwoGrabber.cpp b/modules/sensor/src/framegrabber/1394/vp1394TwoGrabber.cpp index 512e4fa2ab..9f1cab44b0 100644 --- a/modules/sensor/src/framegrabber/1394/vp1394TwoGrabber.cpp +++ b/modules/sensor/src/framegrabber/1394/vp1394TwoGrabber.cpp @@ -118,12 +118,12 @@ int main() */ vp1394TwoGrabber::vp1394TwoGrabber(bool reset) - : camera(NULL), cameras(NULL), num_cameras(0), camera_id(0), verbose(false), camIsOpen(NULL), + : camera(nullptr), cameras(nullptr), num_cameras(0), camera_id(0), verbose(false), camIsOpen(nullptr), num_buffers(4), // ring buffer size - isDataModified(NULL), initialShutterMode(NULL), dataCam(NULL) + isDataModified(nullptr), initialShutterMode(nullptr), dataCam(nullptr) #ifdef VISP_HAVE_DC1394_CAMERA_ENUMERATE // new API > libdc1394-2.0.0-rc7 , - d(NULL), list(NULL) + d(nullptr), list(nullptr) #endif { // protected members @@ -1289,7 +1289,7 @@ void vp1394TwoGrabber::initialize(bool reset) if (init == false) { // Find cameras #ifdef VISP_HAVE_DC1394_CAMERA_ENUMERATE // new API > libdc1394-2.0.0-rc7 - if (d != NULL) + if (d != nullptr) dc1394_free(d); d = dc1394_new(); @@ -1307,7 +1307,7 @@ void vp1394TwoGrabber::initialize(bool reset) throw(vpFrameGrabberException(vpFrameGrabberException::initializationError, "No cameras found")); } - if (cameras != NULL) + if (cameras != nullptr) delete[] cameras; cameras = new dc1394camera_t *[list->num]; @@ -1331,14 +1331,14 @@ void vp1394TwoGrabber::initialize(bool reset) dc1394_reset_bus(cameras[0]); } - // if (list != NULL) + // if (list != nullptr) dc1394_camera_free_list(list); - list = NULL; + list = nullptr; #elif defined VISP_HAVE_DC1394_FIND_CAMERAS // old API <= libdc1394-2.0.0-rc7 - if (cameras != NULL) + if (cameras != nullptr) free(cameras); - cameras = NULL; + cameras = nullptr; int err = dc1394_find_cameras(&cameras, &num_cameras); if (err != DC1394_SUCCESS && err != DC1394_NO_CAMERA) { @@ -1379,7 +1379,7 @@ void vp1394TwoGrabber::initialize(bool reset) std::cout << "-----------------------------" << std::endl; } - if (camIsOpen != NULL) + if (camIsOpen != nullptr) delete[] camIsOpen; camIsOpen = new bool[num_cameras]; for (unsigned int i = 0; i < num_cameras; i++) { @@ -1486,43 +1486,43 @@ void vp1394TwoGrabber::close() #endif } } - if (camIsOpen != NULL) { + if (camIsOpen != nullptr) { delete[] camIsOpen; - camIsOpen = NULL; + camIsOpen = nullptr; } #ifdef VISP_HAVE_DC1394_CAMERA_ENUMERATE // new API > libdc1394-2.0.0-rc7 - if (cameras != NULL) { + if (cameras != nullptr) { delete[] cameras; - cameras = NULL; + cameras = nullptr; } - if (d != NULL) { + if (d != nullptr) { dc1394_free(d); - d = NULL; + d = nullptr; } #elif defined VISP_HAVE_DC1394_FIND_CAMERAS // old API <= libdc1394-2.0.0-rc7 - if (cameras != NULL) { + if (cameras != nullptr) { free(cameras); - cameras = NULL; + cameras = nullptr; } #endif - camIsOpen = NULL; + camIsOpen = nullptr; num_cameras = 0; // remove data for the parameters - if (isDataModified != NULL) { + if (isDataModified != nullptr) { delete[] isDataModified; - isDataModified = NULL; + isDataModified = nullptr; } - if (initialShutterMode != NULL) { + if (initialShutterMode != nullptr) { delete[] initialShutterMode; - initialShutterMode = NULL; + initialShutterMode = nullptr; } - if (dataCam != NULL) { + if (dataCam != nullptr) { delete[] dataCam; - dataCam = NULL; + dataCam = nullptr; } init = false; @@ -2120,7 +2120,7 @@ dc1394video_frame_t *vp1394TwoGrabber::dequeue() throw(vpFrameGrabberException(vpFrameGrabberException::initializationError, "No camera found")); } - dc1394video_frame_t *frame = NULL; + dc1394video_frame_t *frame = nullptr; if (dc1394_capture_dequeue(camera, DC1394_CAPTURE_POLICY_WAIT, &frame) != DC1394_SUCCESS) { vpERROR_TRACE("Error: Failed to capture from camera %d\n", camera_id); @@ -2983,10 +2983,10 @@ void vp1394TwoGrabber::resetBus() dc1394_camera_free(camera); dc1394_free(d); - d = NULL; - // if (cameras != NULL) + d = nullptr; + // if (cameras != nullptr) delete[] cameras; - cameras = NULL; + cameras = nullptr; #elif defined VISP_HAVE_DC1394_FIND_CAMERAS // old API <= libdc1394-2.0.0-rc7 setCamera(camera_id); @@ -2996,15 +2996,15 @@ void vp1394TwoGrabber::resetBus() dc1394_free_camera(cameras[i]); } free(cameras); - cameras = NULL; + cameras = nullptr; dc1394_reset_bus(camera); dc1394_free_camera(camera); #endif - if (camIsOpen != NULL) + if (camIsOpen != nullptr) delete[] camIsOpen; - camIsOpen = NULL; + camIsOpen = nullptr; num_cameras = 0; diff --git a/modules/sensor/src/framegrabber/directshow/vpDirectShowGrabberImpl.cpp b/modules/sensor/src/framegrabber/directshow/vpDirectShowGrabberImpl.cpp index 1657dd1695..a1603b4244 100644 --- a/modules/sensor/src/framegrabber/directshow/vpDirectShowGrabberImpl.cpp +++ b/modules/sensor/src/framegrabber/directshow/vpDirectShowGrabberImpl.cpp @@ -40,7 +40,7 @@ #include -vpDirectShowDevice *vpDirectShowGrabberImpl::deviceList = NULL; +vpDirectShowDevice *vpDirectShowGrabberImpl::deviceList = nullptr; unsigned int vpDirectShowGrabberImpl::nbDevices; /*! @@ -68,7 +68,7 @@ vpDirectShowGrabberImpl::vpDirectShowGrabberImpl() init = false; initCo = false; // COM initialization - if (FAILED(hr = CoInitializeEx(NULL, COINIT_MULTITHREADED))) { + if (FAILED(hr = CoInitializeEx(nullptr, COINIT_MULTITHREADED))) { std::string err; HRtoStr(err); throw(vpFrameGrabberException(vpFrameGrabberException::initializationError, "Can't initialize COM\n" + err)); @@ -76,8 +76,8 @@ vpDirectShowGrabberImpl::vpDirectShowGrabberImpl() initCo = true; // create the device list - if (deviceList == NULL) { - CComPtr pVideoInputEnum = NULL; + if (deviceList == nullptr) { + CComPtr pVideoInputEnum = nullptr; if (enumerate(pVideoInputEnum)) { createDeviceList(pVideoInputEnum); @@ -94,8 +94,8 @@ vpDirectShowGrabberImpl::vpDirectShowGrabberImpl() void vpDirectShowGrabberImpl::open() { // create the device list - if (deviceList == NULL) { - CComPtr pVideoInputEnum = NULL; + if (deviceList == nullptr) { + CComPtr pVideoInputEnum = nullptr; if (enumerate(pVideoInputEnum)) { createDeviceList(pVideoInputEnum); @@ -168,7 +168,7 @@ bool vpDirectShowGrabberImpl::initDirectShow() pGraph->QueryInterface(IID_IMediaControl, reinterpret_cast(&pControl)); pGraph->QueryInterface(IID_IMediaEvent, (void **)&pEvent); - pMediaFilter->SetSyncSource(NULL); + pMediaFilter->SetSyncSource(nullptr); pMediaFilter.Release(); return true; @@ -186,11 +186,11 @@ vpDirectShowGrabberImpl::~vpDirectShowGrabberImpl() { close(); } */ bool vpDirectShowGrabberImpl::enumerate(CComPtr &ppVideoInputEnum) { - CComPtr pDevEnum = NULL; + CComPtr pDevEnum = nullptr; bool res = false; // Enumerate system devices - hr = pDevEnum.CoCreateInstance(CLSID_SystemDeviceEnum, NULL, CLSCTX_INPROC_SERVER); + hr = pDevEnum.CoCreateInstance(CLSID_SystemDeviceEnum, nullptr, CLSCTX_INPROC_SERVER); // if it is a success if (SUCCEEDED(hr)) { @@ -272,15 +272,15 @@ bool vpDirectShowGrabberImpl::getDevice(unsigned int n, CComPtr &pp return false; // if we can't enumerate the devices, quit - CComPtr pVideoInputEnum = NULL; + CComPtr pVideoInputEnum = nullptr; if (!enumerate(pVideoInputEnum)) return false; - CComPtr pMoniker = NULL; + CComPtr pMoniker = nullptr; bool deviceFound = false; // Enumerates the different inputs - while (pVideoInputEnum->Next(1, &pMoniker, NULL) == S_OK && !deviceFound) { + while (pVideoInputEnum->Next(1, &pMoniker, nullptr) == S_OK && !deviceFound) { // implicit conversion should work ... if (deviceList[n] == vpDirectShowDevice(pMoniker)) { // we get the filter @@ -358,7 +358,7 @@ bool vpDirectShowGrabberImpl::createGraph() bool vpDirectShowGrabberImpl::createSampleGrabber(CComPtr &ppGrabberFilter) { // Creates the sample grabber - hr = ppGrabberFilter.CoCreateInstance(CLSID_SampleGrabber, NULL, CLSCTX_INPROC_SERVER); + hr = ppGrabberFilter.CoCreateInstance(CLSID_SampleGrabber, nullptr, CLSCTX_INPROC_SERVER); if (FAILED(hr)) return false; @@ -376,7 +376,7 @@ bool vpDirectShowGrabberImpl::createSampleGrabber(CComPtr &ppGrabbe mt.majortype = MEDIATYPE_Video; // ask for a connection - mt.subtype = MEDIATYPE_NULL; + mt.subtype = MEDIATYPE_nullptr; if (FAILED(hr = pGrabberI->SetMediaType(&mt))) return false; @@ -482,12 +482,12 @@ bool vpDirectShowGrabberImpl::connectSourceToGrabber(CComPtr &_pCap /* //get the capture source's output pin CComPtr pCapSourcePin; - if(FAILED(pBuild->FindPin(_pCapSource, PINDIR_OUTPUT, NULL, NULL, false, 0, + if(FAILED(pBuild->FindPin(_pCapSource, PINDIR_OUTPUT, nullptr, nullptr, false, 0, &pCapSourcePin))) return false; //get the grabber's input pin CComPtr pGrabberInputPin; - if(FAILED(pBuild->FindPin(_pGrabberFilter, PINDIR_INPUT, NULL, NULL, false, + if(FAILED(pBuild->FindPin(_pGrabberFilter, PINDIR_INPUT, nullptr, nullptr, false, 0, &pGrabberInputPin))) return false; //connect the two of them @@ -497,21 +497,21 @@ bool vpDirectShowGrabberImpl::connectSourceToGrabber(CComPtr &_pCap //not used anymore, we can release it pGrabberInputPin.Release(); */ - if (FAILED(hr = pBuild->RenderStream(NULL, NULL, _pCapSource, NULL, _pGrabberFilter))) + if (FAILED(hr = pBuild->RenderStream(nullptr, nullptr, _pCapSource, nullptr, _pGrabberFilter))) return false; // get the Null renderer - CComPtr pNull = NULL; - if (FAILED(pNull.CoCreateInstance(CLSID_NullRenderer, NULL, CLSCTX_INPROC_SERVER))) + CComPtr pNull = nullptr; + if (FAILED(pNull.CoCreateInstance(CLSID_NullRenderer, nullptr, CLSCTX_INPROC_SERVER))) return false; if (FAILED(pGraph->AddFilter(pNull, L"NullRenderer")) || - FAILED(pBuild->RenderStream(NULL, NULL, _pGrabberFilter, NULL, pNull))) + FAILED(pBuild->RenderStream(nullptr, nullptr, _pGrabberFilter, nullptr, pNull))) return false; // get the capture source's output pin CComPtr pCapSourcePin; - if (FAILED(pBuild->FindPin(_pCapSource, PINDIR_OUTPUT, NULL, NULL, false, 0, &pCapSourcePin))) + if (FAILED(pBuild->FindPin(_pCapSource, PINDIR_OUTPUT, nullptr, nullptr, false, 0, &pCapSourcePin))) return false; // checks the media type of the capture filter // and if the image needs to be inverted @@ -532,7 +532,7 @@ bool vpDirectShowGrabberImpl::connectSourceToGrabber(CComPtr &_pCap */ bool vpDirectShowGrabberImpl::removeAll() { - CComPtr pEnum = NULL; + CComPtr pEnum = nullptr; CComPtr pFilter; ULONG cFetched; @@ -684,7 +684,7 @@ bool vpDirectShowGrabberImpl::setDevice(unsigned int id) */ void vpDirectShowGrabberImpl::displayDevices() { - if (deviceList == NULL) { + if (deviceList == nullptr) { throw(vpFrameGrabberException(vpFrameGrabberException::initializationError, "Initialization not done")); } @@ -721,7 +721,7 @@ bool vpDirectShowGrabberImpl::setImageSize(unsigned int width, unsigned int heig throw(vpFrameGrabberException(vpFrameGrabberException::initializationError, "Initialization not done")); } - return setFormat(width, height, NULL); + return setFormat(width, height, nullptr); } /*! @@ -750,7 +750,7 @@ bool vpDirectShowGrabberImpl::setFormat(unsigned int width, unsigned int height, bool found = false; // gets the stream config interface - IAMStreamConfig *pConfig = NULL; + IAMStreamConfig *pConfig = nullptr; if (FAILED(hr = pBuild->FindInterface(&LOOK_UPSTREAM_ONLY, // Capture pin. / Preview pin 0, // Any media type. @@ -759,7 +759,7 @@ bool vpDirectShowGrabberImpl::setFormat(unsigned int width, unsigned int height, return false; // gets the video control interface - IAMVideoControl *pVideoControl = NULL; + IAMVideoControl *pVideoControl = nullptr; if (FAILED(hr = pBuild->FindInterface(&LOOK_UPSTREAM_ONLY, // Capture pin. / Preview pin 0, // Any media type. @@ -769,7 +769,7 @@ bool vpDirectShowGrabberImpl::setFormat(unsigned int width, unsigned int height, // get the grabber's input pin CComPtr pCapSourcePin; - if (FAILED(pBuild->FindPin(pCapSource, PINDIR_OUTPUT, NULL, NULL, false, 0, &pCapSourcePin))) + if (FAILED(pBuild->FindPin(pCapSource, PINDIR_OUTPUT, nullptr, nullptr, false, 0, &pCapSourcePin))) return false; int iCount = 0, iSize = 0; @@ -790,12 +790,12 @@ bool vpDirectShowGrabberImpl::setFormat(unsigned int width, unsigned int height, if ((pmtConfig->majortype == sgCB.connectedMediaType.majortype) && (pmtConfig->subtype == sgCB.connectedMediaType.subtype) && (pmtConfig->formattype == sgCB.connectedMediaType.formattype) && - (pmtConfig->cbFormat >= sizeof(VIDEOINFOHEADER)) && (pmtConfig->pbFormat != NULL)) { + (pmtConfig->cbFormat >= sizeof(VIDEOINFOHEADER)) && (pmtConfig->pbFormat != nullptr)) { VIDEOINFOHEADER *pVih = (VIDEOINFOHEADER *)pmtConfig->pbFormat; LONG lWidth = pVih->bmiHeader.biWidth; LONG lHeight = pVih->bmiHeader.biHeight; - if (framerate != NULL) { + if (framerate != nullptr) { if ((unsigned int)lWidth == width && (unsigned int)lHeight == height) { pVih->AvgTimePerFrame = (LONGLONG)(10000000 / framerate); @@ -845,7 +845,7 @@ bool vpDirectShowGrabberImpl::setFormat(unsigned int width, unsigned int height, } } if (!found) - if (framerate != NULL) + if (framerate != nullptr) std::cout << "The " << width << " x " << height << " at " << framerate << " fps source image format is not available. " << std::endl << std::endl; @@ -883,7 +883,7 @@ bool vpDirectShowGrabberImpl::getStreamCapabilities() } // gets the stream config interface - IAMStreamConfig *pConfig = NULL; + IAMStreamConfig *pConfig = nullptr; if (FAILED(hr = pBuild->FindInterface(&LOOK_UPSTREAM_ONLY, // Capture pin. / Preview pin 0, // Any media type. @@ -988,7 +988,7 @@ bool vpDirectShowGrabberImpl::setMediaType(int mediaTypeID) } // gets the stream config interface - IAMStreamConfig *pConfig = NULL; + IAMStreamConfig *pConfig = nullptr; if (FAILED(hr = pBuild->FindInterface(&LOOK_UPSTREAM_ONLY, // Capture pin. / Preview pin 0, // Any media type. @@ -1030,7 +1030,7 @@ int vpDirectShowGrabberImpl::getMediaType() VIDEOINFOHEADER *pVihConnected = (VIDEOINFOHEADER *)sgCB.connectedMediaType.pbFormat; // gets the stream config interface - IAMStreamConfig *pConfig = NULL; + IAMStreamConfig *pConfig = nullptr; if (FAILED(hr = pBuild->FindInterface(&LOOK_UPSTREAM_ONLY, // Capture pin. / Preview pin 0, // Any media type. @@ -1055,7 +1055,7 @@ int vpDirectShowGrabberImpl::getMediaType() if ((pmtConfig->majortype == sgCB.connectedMediaType.majortype) && (pmtConfig->subtype == sgCB.connectedMediaType.subtype) && (pmtConfig->formattype == sgCB.connectedMediaType.formattype) && - (pmtConfig->cbFormat >= sizeof(VIDEOINFOHEADER)) && (pmtConfig->pbFormat != NULL)) { + (pmtConfig->cbFormat >= sizeof(VIDEOINFOHEADER)) && (pmtConfig->pbFormat != nullptr)) { VIDEOINFOHEADER *pVih = (VIDEOINFOHEADER *)pmtConfig->pbFormat; if (pVih->bmiHeader.biWidth == pVihConnected->bmiHeader.biWidth && pVih->bmiHeader.biHeight == pVihConnected->bmiHeader.biHeight) @@ -1075,7 +1075,7 @@ int vpDirectShowGrabberImpl::getMediaType() */ void vpDirectShowGrabberImpl::MyDeleteMediaType(AM_MEDIA_TYPE *pmt) { - if (pmt != NULL) { + if (pmt != nullptr) { MyFreeMediaType(*pmt); // See FreeMediaType for the implementation. CoTaskMemFree(pmt); } @@ -1089,12 +1089,12 @@ void vpDirectShowGrabberImpl::MyFreeMediaType(AM_MEDIA_TYPE &mt) if (mt.cbFormat != 0) { CoTaskMemFree((PVOID)mt.pbFormat); mt.cbFormat = 0; - mt.pbFormat = NULL; + mt.pbFormat = nullptr; } - if (mt.pUnk != NULL) { + if (mt.pUnk != nullptr) { // Unecessary because pUnk should not be used, but safest. mt.pUnk->Release(); - mt.pUnk = NULL; + mt.pUnk = nullptr; } } diff --git a/modules/sensor/src/framegrabber/directshow/vpDirectShowSampleGrabberI.cpp b/modules/sensor/src/framegrabber/directshow/vpDirectShowSampleGrabberI.cpp index 6d6e89a78b..0307a84319 100644 --- a/modules/sensor/src/framegrabber/directshow/vpDirectShowSampleGrabberI.cpp +++ b/modules/sensor/src/framegrabber/directshow/vpDirectShowSampleGrabberI.cpp @@ -51,7 +51,7 @@ vpDirectShowSampleGrabberI::vpDirectShowSampleGrabberI() : acqGrayDemand(false), acqRGBaDemand(false), specialMediaType(false), invertedSource(false) { // semaphore(0), max value = 1 - copySem = CreateSemaphore(NULL, 0, 1, NULL); + copySem = CreateSemaphore(nullptr, 0, 1, nullptr); } /*! @@ -65,7 +65,7 @@ vpDirectShowSampleGrabberI::~vpDirectShowSampleGrabberI() STDMETHODIMP vpDirectShowSampleGrabberI::QueryInterface(REFIID riid, void **ppvObject) { - if (NULL == ppvObject) + if (nullptr == ppvObject) return E_POINTER; if (riid == __uuidof(IUnknown)) { *ppvObject = static_cast(this); @@ -270,7 +270,7 @@ STDMETHODIMP vpDirectShowSampleGrabberI::BufferCB(double Time, BYTE *pBuffer, lo } // increment the semaphore - allows acquire to continue execution - ReleaseSemaphore(copySem, 1, NULL); + ReleaseSemaphore(copySem, 1, nullptr); } return S_OK; } diff --git a/modules/sensor/src/framegrabber/flycapture/vpFlyCaptureGrabber.cpp b/modules/sensor/src/framegrabber/flycapture/vpFlyCaptureGrabber.cpp index 69f442832c..a982ee310c 100644 --- a/modules/sensor/src/framegrabber/flycapture/vpFlyCaptureGrabber.cpp +++ b/modules/sensor/src/framegrabber/flycapture/vpFlyCaptureGrabber.cpp @@ -100,7 +100,7 @@ std::ostream &vpFlyCaptureGrabber::getCameraInfo(std::ostream &os) } /*! - Return the handler to the active camera or NULL if the camera is not + Return the handler to the active camera or nullptr if the camera is not connected. This function was designed to provide a direct access to the FlyCapture SDK to get access to advanced functionalities that are not implemented in this class. @@ -185,7 +185,7 @@ FlyCapture2::Camera *vpFlyCaptureGrabber::getCameraHandler() if (m_connected == true) { return &m_camera; } else { - return NULL; + return nullptr; } } diff --git a/modules/sensor/src/framegrabber/pylon/vpPylonFactory.cpp b/modules/sensor/src/framegrabber/pylon/vpPylonFactory.cpp index d08d237f46..68320a1fe5 100644 --- a/modules/sensor/src/framegrabber/pylon/vpPylonFactory.cpp +++ b/modules/sensor/src/framegrabber/pylon/vpPylonFactory.cpp @@ -64,7 +64,7 @@ vpPylonFactory &vpPylonFactory::instance() \param dev_class The device class. See vpPylonFactory::DeviceClass for valid values. \return The pointer towards the vpPylonGrabber object. It's the - caller's responsibility to destroy the object. NULL pointer will be + caller's responsibility to destroy the object. nullptr pointer will be returned if requested object can't be properly created. */ vpPylonGrabber *vpPylonFactory::createPylonGrabber(DeviceClass dev_class) @@ -77,7 +77,7 @@ vpPylonGrabber *vpPylonFactory::createPylonGrabber(DeviceClass dev_class) return new vpPylonGrabberUsb(); break; default: - return NULL; + return nullptr; break; } } diff --git a/modules/sensor/src/framegrabber/pylon/vpPylonGrabberGigE.cpp b/modules/sensor/src/framegrabber/pylon/vpPylonGrabberGigE.cpp index 126ce0bf12..88217022fe 100644 --- a/modules/sensor/src/framegrabber/pylon/vpPylonGrabberGigE.cpp +++ b/modules/sensor/src/framegrabber/pylon/vpPylonGrabberGigE.cpp @@ -105,7 +105,7 @@ std::ostream &vpPylonGrabberGigE::getCameraInfo(std::ostream &os) } /*! - Return the handler to the active camera or NULL if the camera is not + Return the handler to the active camera or nullptr if the camera is not connected. This function was designed to provide a direct access to the Pylon SDK to get access to advanced functionalities that are not implemented in this class. @@ -117,7 +117,7 @@ Pylon::CInstantCamera *vpPylonGrabberGigE::getCameraHandler() if (m_connected == true) { return &m_camera; } else { - return NULL; + return nullptr; } } diff --git a/modules/sensor/src/framegrabber/pylon/vpPylonGrabberUsb.cpp b/modules/sensor/src/framegrabber/pylon/vpPylonGrabberUsb.cpp index 74ca4c0377..114d1780d2 100644 --- a/modules/sensor/src/framegrabber/pylon/vpPylonGrabberUsb.cpp +++ b/modules/sensor/src/framegrabber/pylon/vpPylonGrabberUsb.cpp @@ -105,7 +105,7 @@ std::ostream &vpPylonGrabberUsb::getCameraInfo(std::ostream &os) } /*! - Return the handler to the active camera or NULL if the camera is not + Return the handler to the active camera or nullptr if the camera is not connected. This function was designed to provide a direct access to the Pylon SDK to get access to advanced functionalities that are not implemented in this class. @@ -117,7 +117,7 @@ Pylon::CInstantCamera *vpPylonGrabberUsb::getCameraHandler() if (m_connected == true) { return &m_camera; } else { - return NULL; + return nullptr; } } diff --git a/modules/sensor/src/framegrabber/ueye/vpUeyeGrabber.cpp b/modules/sensor/src/framegrabber/ueye/vpUeyeGrabber.cpp index b0c0b64034..b90ac24d7b 100644 --- a/modules/sensor/src/framegrabber/ueye/vpUeyeGrabber.cpp +++ b/modules/sensor/src/framegrabber/ueye/vpUeyeGrabber.cpp @@ -95,7 +95,7 @@ class vpUeyeGrabber::vpUeyeGrabberImpl { public: vpUeyeGrabberImpl() - : m_hCamera((HIDS)0), m_activeCameraSelected(-1), m_pLastBuffer(NULL), m_cameraList(NULL), m_bLive(true), + : m_hCamera((HIDS)0), m_activeCameraSelected(-1), m_pLastBuffer(nullptr), m_cameraList(nullptr), m_bLive(true), m_bLiveStarted(false), m_verbose(false), m_I_temp() { ZeroMemory(&m_SensorInfo, sizeof(SENSORINFO)); @@ -140,7 +140,7 @@ class vpUeyeGrabber::vpUeyeGrabberImpl if (ret == IS_SUCCESS) { INT dummy = 0; - char *pLast = NULL, *pMem = NULL; + char *pLast = nullptr, *pMem = nullptr; is_GetActSeqBuf(m_hCamera, &dummy, &pMem, &pLast); m_pLastBuffer = pLast; @@ -149,14 +149,14 @@ class vpUeyeGrabber::vpUeyeGrabberImpl return; int nNum = getImageNum(m_pLastBuffer); - if (timestamp_camera != NULL || timestamp_system != NULL) { + if (timestamp_camera != nullptr || timestamp_system != nullptr) { int nImageID = getImageID(m_pLastBuffer); UEYEIMAGEINFO ImageInfo; if (is_GetImageInfo(m_hCamera, nImageID, &ImageInfo, sizeof(ImageInfo)) == IS_SUCCESS) { - if (timestamp_camera != NULL) { + if (timestamp_camera != nullptr) { *timestamp_camera = static_cast(ImageInfo.u64TimestampDevice) / 10000.; } - if (timestamp_system != NULL) { + if (timestamp_system != nullptr) { std::stringstream ss; ss << ImageInfo.TimestampSystem.wYear << ":" << std::setfill('0') << std::setw(2) << ImageInfo.TimestampSystem.wMonth << ":" << std::setfill('0') << std::setw(2) @@ -243,7 +243,7 @@ class vpUeyeGrabber::vpUeyeGrabberImpl if (ret == IS_SUCCESS) { INT dummy = 0; - char *pLast = NULL, *pMem = NULL; + char *pLast = nullptr, *pMem = nullptr; is_GetActSeqBuf(m_hCamera, &dummy, &pMem, &pLast); m_pLastBuffer = pLast; @@ -252,14 +252,14 @@ class vpUeyeGrabber::vpUeyeGrabberImpl return; int nNum = getImageNum(m_pLastBuffer); - if (timestamp_camera != NULL || timestamp_system != NULL) { + if (timestamp_camera != nullptr || timestamp_system != nullptr) { int nImageID = getImageID(m_pLastBuffer); UEYEIMAGEINFO ImageInfo; if (is_GetImageInfo(m_hCamera, nImageID, &ImageInfo, sizeof(ImageInfo)) == IS_SUCCESS) { - if (timestamp_camera != NULL) { + if (timestamp_camera != nullptr) { *timestamp_camera = static_cast(ImageInfo.u64TimestampDevice) / 10000.; } - if (timestamp_system != NULL) { + if (timestamp_system != nullptr) { std::stringstream ss; ss << ImageInfo.TimestampSystem.wYear << ":" << std::setfill('0') << std::setw(2) << ImageInfo.TimestampSystem.wMonth << ":" << std::setfill('0') << std::setw(2) @@ -321,7 +321,7 @@ class vpUeyeGrabber::vpUeyeGrabberImpl bool allocImages() { - m_pLastBuffer = NULL; + m_pLastBuffer = nullptr; int nWidth = 0; int nHeight = 0; @@ -424,7 +424,7 @@ class vpUeyeGrabber::vpUeyeGrabberImpl enableEvent(IS_SET_EVENT_FRAME); } - m_pLastBuffer = NULL; + m_pLastBuffer = nullptr; return ret; } @@ -471,8 +471,8 @@ class vpUeyeGrabber::vpUeyeGrabberImpl int ret = 0; m_event = event; #ifndef __linux__ - m_hEvent = CreateEvent(NULL, FALSE, FALSE, NULL); - if (m_hEvent == NULL) { + m_hEvent = CreateEvent(nullptr, FALSE, FALSE, nullptr); + if (m_hEvent == nullptr) { return -1; } ret = is_InitEvent(m_hCamera, m_hEvent, m_event); @@ -497,14 +497,14 @@ class vpUeyeGrabber::vpUeyeGrabberImpl void freeImages() { - m_pLastBuffer = NULL; + m_pLastBuffer = nullptr; // printf ("freeing image buffers\n"); for (unsigned int i = 0; i < sizeof(m_Images) / sizeof(m_Images[0]); i++) { - if (NULL != m_Images[i].pBuf) { + if (nullptr != m_Images[i].pBuf) { is_FreeImageMem(m_hCamera, m_Images[i].pBuf, m_Images[i].nImageID); } - m_Images[i].pBuf = NULL; + m_Images[i].pBuf = nullptr; m_Images[i].nImageID = 0; m_Images[i].nImageSeqNum = 0; } @@ -1027,14 +1027,14 @@ class vpUeyeGrabber::vpUeyeGrabberImpl if (auto_wb) { dblAutoWb = 0.0; - is_SetAutoParameter(m_hCamera, IS_SET_AUTO_WB_ONCE, &dblAutoWb, NULL); + is_SetAutoParameter(m_hCamera, IS_SET_AUTO_WB_ONCE, &dblAutoWb, nullptr); dblAutoWb = 1.0; - is_SetAutoParameter(m_hCamera, IS_SET_ENABLE_AUTO_WHITEBALANCE, &dblAutoWb, NULL); + is_SetAutoParameter(m_hCamera, IS_SET_ENABLE_AUTO_WHITEBALANCE, &dblAutoWb, nullptr); } else { dblAutoWb = 0.0; - is_SetAutoParameter(m_hCamera, IS_SET_AUTO_WB_ONCE, &dblAutoWb, NULL); - is_SetAutoParameter(m_hCamera, IS_SET_ENABLE_AUTO_WHITEBALANCE, &dblAutoWb, NULL); + is_SetAutoParameter(m_hCamera, IS_SET_AUTO_WB_ONCE, &dblAutoWb, nullptr); + is_SetAutoParameter(m_hCamera, IS_SET_ENABLE_AUTO_WHITEBALANCE, &dblAutoWb, nullptr); } } @@ -1124,7 +1124,7 @@ vpUeyeGrabber::~vpUeyeGrabber() { delete m_impl; } * Capture a new grayscale image. * * \param[out] I : Captured image. - * \param[out] timestamp_camera : Time of image capture in milli-seconds with a resolution of 0.1 μs, or NULL if not + * \param[out] timestamp_camera : Time of image capture in milli-seconds with a resolution of 0.1 μs, or nullptr if not * wanted. The time of image capture is defined as: * - The time when a (hardware or software) trigger event is received by the camera in trigger mode. * The delay between the receipt of the trigger signal and the start of exposure depends on the sensor. @@ -1149,7 +1149,7 @@ void vpUeyeGrabber::acquire(vpImage &I, double *timestamp_camera, /*! * Capture a new color image. * \param[out] I : Captured image. - * \param[out] timestamp_camera : Time of image capture in milli-seconds with a resolution of 0.1 μs, or NULL if not + * \param[out] timestamp_camera : Time of image capture in milli-seconds with a resolution of 0.1 μs, or nullptr if not * wanted. The time of image capture is defined as: * - The time when a (hardware or software) trigger event is received by the camera in trigger mode. * The delay between the receipt of the trigger signal and the start of exposure depends on the sensor. diff --git a/modules/sensor/src/framegrabber/ueye/vpUeyeGrabber_impl.h b/modules/sensor/src/framegrabber/ueye/vpUeyeGrabber_impl.h index 3d161fac99..7f6e1c85de 100644 --- a/modules/sensor/src/framegrabber/ueye/vpUeyeGrabber_impl.h +++ b/modules/sensor/src/framegrabber/ueye/vpUeyeGrabber_impl.h @@ -38,7 +38,7 @@ class CameraList { public: - CameraList() : m_pCamList(NULL), m_CamInfo() + CameraList() : m_pCamList(nullptr), m_CamInfo() { // init the internal camera info structure ZeroMemory(&m_CamInfo, sizeof(UEYE_CAMERA_INFO)); @@ -73,7 +73,7 @@ class CameraList if (is_GetCameraList(m_pCamList) == IS_SUCCESS) { DWORD dw = m_pCamList->dwCount; delete m_pCamList; - m_pCamList = NULL; + m_pCamList = nullptr; if (dw) { // Reallocate the required camera list size @@ -101,7 +101,7 @@ class CameraList { if (m_pCamList) delete m_pCamList; - m_pCamList = NULL; + m_pCamList = nullptr; ZeroMemory(&m_CamInfo, sizeof(UEYE_CAMERA_INFO)); } diff --git a/modules/sensor/src/framegrabber/v4l2/vpV4l2Grabber.cpp b/modules/sensor/src/framegrabber/v4l2/vpV4l2Grabber.cpp index 4047d0122c..543dd82d12 100644 --- a/modules/sensor/src/framegrabber/v4l2/vpV4l2Grabber.cpp +++ b/modules/sensor/src/framegrabber/v4l2/vpV4l2Grabber.cpp @@ -144,8 +144,8 @@ int main() */ vpV4l2Grabber::vpV4l2Grabber() - : fd(-1), device(), cap(), streamparm(), inp(NULL), std(NULL), fmt(NULL), ctl(NULL), fmt_v4l2(), fmt_me(), reqbufs(), - buf_v4l2(NULL), buf_me(NULL), queue(0), waiton_cpt(0), index_buffer(0), m_verbose(false), m_nbuffers(3), field(0), + : fd(-1), device(), cap(), streamparm(), inp(nullptr), std(nullptr), fmt(nullptr), ctl(nullptr), fmt_v4l2(), fmt_me(), reqbufs(), + buf_v4l2(nullptr), buf_me(nullptr), queue(0), waiton_cpt(0), index_buffer(0), m_verbose(false), m_nbuffers(3), field(0), streaming(false), m_input(vpV4l2Grabber::DEFAULT_INPUT), m_framerate(vpV4l2Grabber::framerate_25fps), m_frameformat(vpV4l2Grabber::V4L2_FRAME_FORMAT), m_pixelformat(vpV4l2Grabber::V4L2_YUYV_FORMAT) { @@ -200,8 +200,8 @@ vpV4l2Grabber::vpV4l2Grabber() */ vpV4l2Grabber::vpV4l2Grabber(bool verbose) - : fd(-1), device(), cap(), streamparm(), inp(NULL), std(NULL), fmt(NULL), ctl(NULL), fmt_v4l2(), fmt_me(), reqbufs(), - buf_v4l2(NULL), buf_me(NULL), queue(0), waiton_cpt(0), index_buffer(0), m_verbose(verbose), m_nbuffers(3), field(0), + : fd(-1), device(), cap(), streamparm(), inp(nullptr), std(nullptr), fmt(nullptr), ctl(nullptr), fmt_v4l2(), fmt_me(), reqbufs(), + buf_v4l2(nullptr), buf_me(nullptr), queue(0), waiton_cpt(0), index_buffer(0), m_verbose(verbose), m_nbuffers(3), field(0), streaming(false), m_input(vpV4l2Grabber::DEFAULT_INPUT), m_framerate(vpV4l2Grabber::framerate_25fps), m_frameformat(vpV4l2Grabber::V4L2_FRAME_FORMAT), m_pixelformat(vpV4l2Grabber::V4L2_YUYV_FORMAT) { @@ -246,8 +246,8 @@ vpV4l2Grabber::vpV4l2Grabber(bool verbose) \endcode */ vpV4l2Grabber::vpV4l2Grabber(unsigned input, unsigned scale) - : fd(-1), device(), cap(), streamparm(), inp(NULL), std(NULL), fmt(NULL), ctl(NULL), fmt_v4l2(), fmt_me(), reqbufs(), - buf_v4l2(NULL), buf_me(NULL), queue(0), waiton_cpt(0), index_buffer(0), m_verbose(false), m_nbuffers(3), field(0), + : fd(-1), device(), cap(), streamparm(), inp(nullptr), std(nullptr), fmt(nullptr), ctl(nullptr), fmt_v4l2(), fmt_me(), reqbufs(), + buf_v4l2(nullptr), buf_me(nullptr), queue(0), waiton_cpt(0), index_buffer(0), m_verbose(false), m_nbuffers(3), field(0), streaming(false), m_input(vpV4l2Grabber::DEFAULT_INPUT), m_framerate(vpV4l2Grabber::framerate_25fps), m_frameformat(vpV4l2Grabber::V4L2_FRAME_FORMAT), m_pixelformat(vpV4l2Grabber::V4L2_YUYV_FORMAT) { @@ -292,8 +292,8 @@ vpV4l2Grabber::vpV4l2Grabber(unsigned input, unsigned scale) \endcode */ vpV4l2Grabber::vpV4l2Grabber(vpImage &I, unsigned input, unsigned scale) - : fd(-1), device(), cap(), streamparm(), inp(NULL), std(NULL), fmt(NULL), ctl(NULL), fmt_v4l2(), fmt_me(), reqbufs(), - buf_v4l2(NULL), buf_me(NULL), queue(0), waiton_cpt(0), index_buffer(0), m_verbose(false), m_nbuffers(3), field(0), + : fd(-1), device(), cap(), streamparm(), inp(nullptr), std(nullptr), fmt(nullptr), ctl(nullptr), fmt_v4l2(), fmt_me(), reqbufs(), + buf_v4l2(nullptr), buf_me(nullptr), queue(0), waiton_cpt(0), index_buffer(0), m_verbose(false), m_nbuffers(3), field(0), streaming(false), m_input(vpV4l2Grabber::DEFAULT_INPUT), m_framerate(vpV4l2Grabber::framerate_25fps), m_frameformat(vpV4l2Grabber::V4L2_FRAME_FORMAT), m_pixelformat(vpV4l2Grabber::V4L2_YUYV_FORMAT) { @@ -341,8 +341,8 @@ vpV4l2Grabber::vpV4l2Grabber(vpImage &I, unsigned input, unsigned */ vpV4l2Grabber::vpV4l2Grabber(vpImage &I, unsigned input, unsigned scale) - : fd(-1), device(), cap(), streamparm(), inp(NULL), std(NULL), fmt(NULL), ctl(NULL), fmt_v4l2(), fmt_me(), reqbufs(), - buf_v4l2(NULL), buf_me(NULL), queue(0), waiton_cpt(0), index_buffer(0), m_verbose(false), m_nbuffers(3), field(0), + : fd(-1), device(), cap(), streamparm(), inp(nullptr), std(nullptr), fmt(nullptr), ctl(nullptr), fmt_v4l2(), fmt_me(), reqbufs(), + buf_v4l2(nullptr), buf_me(nullptr), queue(0), waiton_cpt(0), index_buffer(0), m_verbose(false), m_nbuffers(3), field(0), streaming(false), m_input(vpV4l2Grabber::DEFAULT_INPUT), m_framerate(vpV4l2Grabber::framerate_25fps), m_frameformat(vpV4l2Grabber::V4L2_FRAME_FORMAT), m_pixelformat(vpV4l2Grabber::V4L2_YUYV_FORMAT) { @@ -853,29 +853,29 @@ void vpV4l2Grabber::close() fd = -1; } - if (inp != NULL) { + if (inp != nullptr) { delete [] inp; - inp = NULL; + inp = nullptr; } - if (std != NULL) { + if (std != nullptr) { delete [] std; - std = NULL; + std = nullptr; } - if (fmt != NULL) { + if (fmt != nullptr) { delete [] fmt; - fmt = NULL; + fmt = nullptr; } - if (ctl != NULL) { + if (ctl != nullptr) { delete [] ctl; - ctl = NULL; + ctl = nullptr; } - if (buf_v4l2 != NULL) { + if (buf_v4l2 != nullptr) { delete [] buf_v4l2; - buf_v4l2 = NULL; + buf_v4l2 = nullptr; } - if (buf_me != NULL) { + if (buf_me != nullptr) { delete [] buf_me; - buf_me = NULL; + buf_me = nullptr; } } @@ -912,29 +912,29 @@ void vpV4l2Grabber::open() throw(vpFrameGrabberException(vpFrameGrabberException::initializationError, "Can't access to video device")); } - if (inp != NULL) { + if (inp != nullptr) { delete [] inp; - inp = NULL; + inp = nullptr; } - if (std != NULL) { + if (std != nullptr) { delete [] std; - std = NULL; + std = nullptr; } - if (fmt != NULL) { + if (fmt != nullptr) { delete [] fmt; - fmt = NULL; + fmt = nullptr; } - if (ctl != NULL) { + if (ctl != nullptr) { delete [] ctl; - ctl = NULL; + ctl = nullptr; } - if (buf_v4l2 != NULL) { + if (buf_v4l2 != nullptr) { delete [] buf_v4l2; - buf_v4l2 = NULL; + buf_v4l2 = nullptr; } - if (buf_me != NULL) { + if (buf_me != nullptr) { delete [] buf_me; - buf_me = NULL; + buf_me = nullptr; } inp = new struct v4l2_input[vpV4l2Grabber::MAX_INPUTS]; @@ -1194,7 +1194,7 @@ void vpV4l2Grabber::startStreaming() memcpy(&buf_me[i].fmt, &fmt_me, sizeof(ng_video_fmt)); buf_me[i].size = buf_me[i].fmt.bytesperline * buf_me[i].fmt.height; - buf_me[i].data = (unsigned char *)v4l2_mmap(NULL, buf_v4l2[i].length, PROT_READ | PROT_WRITE, MAP_SHARED, fd, + buf_me[i].data = (unsigned char *)v4l2_mmap(nullptr, buf_v4l2[i].length, PROT_READ | PROT_WRITE, MAP_SHARED, fd, (off_t)buf_v4l2[i].m.offset); if (buf_me[i].data == MAP_FAILED) { @@ -1252,7 +1252,7 @@ void vpV4l2Grabber::stopStreaming() } /*! - Fill the next buffer. If all the buffers are filled return NULL. + Fill the next buffer. If all the buffers are filled return nullptr. Update the buffer index. If all the buffers are filled index is set to -1. @@ -1277,17 +1277,17 @@ unsigned char *vpV4l2Grabber::waiton(__u32 &index, struct timeval ×tamp) tv.tv_usec = 0; FD_ZERO(&rdset); FD_SET(static_cast(fd), &rdset); - switch (select(fd + 1, &rdset, NULL, NULL, &tv)) { + switch (select(fd + 1, &rdset, nullptr, nullptr, &tv)) { case -1: if (EINTR == errno) goto again; index = 0; throw(vpFrameGrabberException(vpFrameGrabberException::otherError, "Can't access to the frame")); - return NULL; + return nullptr; case 0: index = 0; throw(vpFrameGrabberException(vpFrameGrabberException::otherError, "Can't access to the frame: timeout")); - return NULL; + return nullptr; } /* get it */ @@ -1310,7 +1310,7 @@ unsigned char *vpV4l2Grabber::waiton(__u32 &index, struct timeval ×tamp) throw(vpFrameGrabberException(vpFrameGrabberException::otherError, "VIDIOC_DQBUF")); break; } - return NULL; + return nullptr; } waiton_cpt++; diff --git a/modules/sensor/src/laserscanner/sick/vpSickLDMRS.cpp b/modules/sensor/src/laserscanner/sick/vpSickLDMRS.cpp index 6037e9bd39..bd983c069a 100644 --- a/modules/sensor/src/laserscanner/sick/vpSickLDMRS.cpp +++ b/modules/sensor/src/laserscanner/sick/vpSickLDMRS.cpp @@ -66,7 +66,7 @@ body messages. */ vpSickLDMRS::vpSickLDMRS() - : socket_fd(-1), body(NULL), vAngle(), time_offset(0), isFirstMeasure(true), maxlen_body(104000) + : socket_fd(-1), body(nullptr), vAngle(), time_offset(0), isFirstMeasure(true), maxlen_body(104000) { ip = "131.254.12.119"; port = 12002; @@ -135,7 +135,7 @@ bool vpSickLDMRS::setup() tv.tv_usec = 0; FD_ZERO(&myset); FD_SET(static_cast(socket_fd), &myset); - res = select(socket_fd + 1, NULL, &myset, NULL, &tv); + res = select(socket_fd + 1, nullptr, &myset, nullptr, &tv); if (res < 0 && errno != EINTR) { fprintf(stderr, "Error connecting to server %d - %s\n", errno, strerror(errno)); return false; diff --git a/modules/sensor/src/mocap/vpMocapQualisys.cpp b/modules/sensor/src/mocap/vpMocapQualisys.cpp index 6e3d09cc19..6b88f638e8 100644 --- a/modules/sensor/src/mocap/vpMocapQualisys.cpp +++ b/modules/sensor/src/mocap/vpMocapQualisys.cpp @@ -117,7 +117,7 @@ class vpMocapQualisys::vpMocapQualisysImpl } else { for (auto i = 0; i < 6; i++) { if (!m_streamFrames) { - if (!m_rtProtocol.StreamFrames(CRTProtocol::RateAllFrames, 0, m_udpPort, NULL, CRTProtocol::cComponent6d)) { + if (!m_rtProtocol.StreamFrames(CRTProtocol::RateAllFrames, 0, m_udpPort, nullptr, CRTProtocol::cComponent6d)) { if (m_verbose) { std::cout << "Streaming frames error: " << m_rtProtocol.GetErrorString() << std::endl; } diff --git a/modules/sensor/src/rgb-depth/occipital_structure/vpOccipitalStructure.cpp b/modules/sensor/src/rgb-depth/occipital_structure/vpOccipitalStructure.cpp index b5e8ac3fc8..6e3c05d3d7 100644 --- a/modules/sensor/src/rgb-depth/occipital_structure/vpOccipitalStructure.cpp +++ b/modules/sensor/src/rgb-depth/occipital_structure/vpOccipitalStructure.cpp @@ -67,7 +67,7 @@ vpOccipitalStructure::~vpOccipitalStructure() { close(); } Acquire greyscale image from Structure Core device. \param gray : Greyscale image. \param undistorted : Set to true to get undistorted grayscale image. - \param ts : Image timestamp or NULL if not wanted. + \param ts : Image timestamp or nullptr if not wanted. */ void vpOccipitalStructure::acquire(vpImage &gray, bool undistorted, double *ts) { @@ -80,7 +80,7 @@ void vpOccipitalStructure::acquire(vpImage &gray, bool undistorte else memcpy(gray.bitmap, m_delegate.m_visibleFrame.undistorted().yData(), m_delegate.m_visibleFrame.ySize()); - if (ts != NULL) + if (ts != nullptr) *ts = m_delegate.m_visibleFrame.arrivalTimestamp(); } @@ -91,7 +91,7 @@ void vpOccipitalStructure::acquire(vpImage &gray, bool undistorte Acquire color image from Structure Core device. \param rgb : RGB image. \param undistorted : Set to true to get undistorted image. - \param ts : Image timestamp or NULL if not wanted. + \param ts : Image timestamp or nullptr if not wanted. */ void vpOccipitalStructure::acquire(vpImage &rgb, bool undistorted, double *ts) { @@ -117,7 +117,7 @@ void vpOccipitalStructure::acquire(vpImage &rgb, bool undistorted, doubl m_delegate.m_visibleFrame.width() * m_delegate.m_visibleFrame.height()); } - if (ts != NULL) + if (ts != nullptr) *ts = m_delegate.m_visibleFrame.arrivalTimestamp(); } @@ -126,12 +126,12 @@ void vpOccipitalStructure::acquire(vpImage &rgb, bool undistorted, doubl /*! Acquire rgb image and IMU data from Structure Core device. - \param rgb : RGB image or NULL if not wanted. - \param depth : Depth image or NULL if not wanted. - \param acceleration_data : Acceleration data or NULL if not wanted. - \param gyroscope_data : Gyroscope data or NULL if not wanted. + \param rgb : RGB image or nullptr if not wanted. + \param depth : Depth image or nullptr if not wanted. + \param acceleration_data : Acceleration data or nullptr if not wanted. + \param gyroscope_data : Gyroscope data or nullptr if not wanted. \param undistorted : Set to true to get undistorted image. - \param ts : Image timestamp or NULL if not wanted. + \param ts : Image timestamp or nullptr if not wanted. */ void vpOccipitalStructure::acquire(vpImage *rgb, vpImage *depth, vpColVector *acceleration_data, vpColVector *gyroscope_data, bool undistorted, double *ts) @@ -139,7 +139,7 @@ void vpOccipitalStructure::acquire(vpImage *rgb, vpImage *depth, std::unique_lock u(m_delegate.m_sampleLock); m_delegate.cv_sampleLock.wait(u); - if (rgb != NULL && m_delegate.m_visibleFrame.isValid()) { + if (rgb != nullptr && m_delegate.m_visibleFrame.isValid()) { // Detecting if it's a Color Structure Core device. if (m_delegate.m_cameraType == ST::StructureCoreCameraType::Color) vpImageConvert::RGBToRGBa(const_cast(m_delegate.m_visibleFrame.rgbData()), @@ -158,29 +158,29 @@ void vpOccipitalStructure::acquire(vpImage *rgb, vpImage *depth, m_delegate.m_visibleFrame.width() * m_delegate.m_visibleFrame.height()); } - if (ts != NULL) + if (ts != nullptr) *ts = m_delegate.m_visibleFrame.arrivalTimestamp(); } - if (depth != NULL && m_delegate.m_depthFrame.isValid()) + if (depth != nullptr && m_delegate.m_depthFrame.isValid()) memcpy((unsigned char *)depth->bitmap, m_delegate.m_depthFrame.convertDepthToRgba(), m_delegate.m_depthFrame.width() * m_delegate.m_depthFrame.height() * 4); - if (acceleration_data != NULL) { + if (acceleration_data != nullptr) { ST::Acceleration accel = m_delegate.m_accelerometerEvent.acceleration(); acceleration_data[0] = accel.x; acceleration_data[1] = accel.y; acceleration_data[2] = accel.z; } - if (gyroscope_data != NULL) { + if (gyroscope_data != nullptr) { ST::RotationRate gyro_data = m_delegate.m_gyroscopeEvent.rotationRate(); gyroscope_data[0] = gyro_data.x; gyroscope_data[1] = gyro_data.y; gyroscope_data[2] = gyro_data.z; } - if (ts != NULL) // By default, frames are synchronized. + if (ts != nullptr) // By default, frames are synchronized. *ts = m_delegate.m_visibleFrame.arrivalTimestamp(); u.unlock(); @@ -188,12 +188,12 @@ void vpOccipitalStructure::acquire(vpImage *rgb, vpImage *depth, /*! Acquire grayscale image, depth and IMU data from Structure Core device. - \param gray : Gray image or NULL if not wanted. - \param depth : Depth image or NULL if not wanted. - \param acceleration_data : Acceleration data or NULL if not wanted. - \param gyroscope_data : Gyroscope data or NULL if not wanted. + \param gray : Gray image or nullptr if not wanted. + \param depth : Depth image or nullptr if not wanted. + \param acceleration_data : Acceleration data or nullptr if not wanted. + \param gyroscope_data : Gyroscope data or nullptr if not wanted. \param undistorted : Set to true to get undistorted image. - \param ts : Image timestamp or NULL if not wanted. + \param ts : Image timestamp or nullptr if not wanted. */ void vpOccipitalStructure::acquire(vpImage *gray, vpImage *depth, vpColVector *acceleration_data, vpColVector *gyroscope_data, bool undistorted, double *ts) @@ -201,35 +201,35 @@ void vpOccipitalStructure::acquire(vpImage *gray, vpImage std::unique_lock u(m_delegate.m_sampleLock); m_delegate.cv_sampleLock.wait(u); - if (gray != NULL && m_delegate.m_visibleFrame.isValid()) { + if (gray != nullptr && m_delegate.m_visibleFrame.isValid()) { if (!undistorted) memcpy(gray->bitmap, m_delegate.m_visibleFrame.yData(), m_delegate.m_visibleFrame.ySize()); else memcpy(gray->bitmap, m_delegate.m_visibleFrame.undistorted().yData(), m_delegate.m_visibleFrame.ySize()); - if (ts != NULL) + if (ts != nullptr) *ts = m_delegate.m_visibleFrame.arrivalTimestamp(); } - if (depth != NULL && m_delegate.m_depthFrame.isValid()) + if (depth != nullptr && m_delegate.m_depthFrame.isValid()) memcpy((unsigned char *)depth->bitmap, m_delegate.m_depthFrame.convertDepthToRgba(), m_delegate.m_depthFrame.width() * m_delegate.m_depthFrame.height() * 4); - if (acceleration_data != NULL) { + if (acceleration_data != nullptr) { ST::Acceleration accel = m_delegate.m_accelerometerEvent.acceleration(); acceleration_data[0] = accel.x; acceleration_data[1] = accel.y; acceleration_data[2] = accel.z; } - if (gyroscope_data != NULL) { + if (gyroscope_data != nullptr) { ST::RotationRate gyro_data = m_delegate.m_gyroscopeEvent.rotationRate(); gyroscope_data[0] = gyro_data.x; gyroscope_data[1] = gyro_data.y; gyroscope_data[2] = gyro_data.z; } - if (ts != NULL) // By default, frames are synchronized. + if (ts != nullptr) // By default, frames are synchronized. *ts = m_delegate.m_visibleFrame.arrivalTimestamp(); u.unlock(); @@ -237,14 +237,14 @@ void vpOccipitalStructure::acquire(vpImage *gray, vpImage /*! Acquire data from Structure Core device. - \param data_image : Visible image buffer or NULL if not wanted. - \param data_depth : Depth image buffer in millimeters or NULL if not wanted. - \param data_pointCloud : Point cloud vector pointer or NULL if not wanted. - \param data_infrared : Infrared image buffer or NULL if not wanted. - \param acceleration_data : Acceleration data or NULL if not wanted. - \param gyroscope_data : Gyroscope data or NULL if not wanted. + \param data_image : Visible image buffer or nullptr if not wanted. + \param data_depth : Depth image buffer in millimeters or nullptr if not wanted. + \param data_pointCloud : Point cloud vector pointer or nullptr if not wanted. + \param data_infrared : Infrared image buffer or nullptr if not wanted. + \param acceleration_data : Acceleration data or nullptr if not wanted. + \param gyroscope_data : Gyroscope data or nullptr if not wanted. \param undistorted : Set to true if you want undistorted monochrome device image. - \param ts : Data timestamp or NULL if not wanted. + \param ts : Data timestamp or nullptr if not wanted. */ void vpOccipitalStructure::acquire(unsigned char *const data_image, unsigned char *const data_depth, std::vector *const data_pointCloud, unsigned char *const data_infrared, @@ -254,11 +254,11 @@ void vpOccipitalStructure::acquire(unsigned char *const data_image, unsigned cha std::unique_lock u(m_delegate.m_sampleLock); m_delegate.cv_sampleLock.wait(u); - if (m_delegate.m_depthFrame.isValid() && data_depth != NULL) + if (m_delegate.m_depthFrame.isValid() && data_depth != nullptr) memcpy(data_depth, m_delegate.m_depthFrame.depthInMillimeters(), m_delegate.m_depthFrame.width() * m_delegate.m_depthFrame.height() * sizeof(float)); - if (m_delegate.m_visibleFrame.isValid() && data_image != NULL) { + if (m_delegate.m_visibleFrame.isValid() && data_image != nullptr) { if (m_delegate.m_cameraType == ST::StructureCoreCameraType::Color) vpImageConvert::RGBToRGBa(const_cast(m_delegate.m_visibleFrame.rgbData()), reinterpret_cast(data_image), @@ -277,28 +277,28 @@ void vpOccipitalStructure::acquire(unsigned char *const data_image, unsigned cha } } - if (m_delegate.m_infraredFrame.isValid() && data_infrared != NULL) + if (m_delegate.m_infraredFrame.isValid() && data_infrared != nullptr) memcpy(data_infrared, m_delegate.m_infraredFrame.data(), m_delegate.m_infraredFrame.width() * m_delegate.m_infraredFrame.height() * sizeof(uint16_t)); - if (data_pointCloud != NULL) + if (data_pointCloud != nullptr) getPointcloud(*data_pointCloud); - if (acceleration_data != NULL) { + if (acceleration_data != nullptr) { ST::Acceleration accel = m_delegate.m_accelerometerEvent.acceleration(); acceleration_data[0] = accel.x; acceleration_data[1] = accel.y; acceleration_data[2] = accel.z; } - if (gyroscope_data != NULL) { + if (gyroscope_data != nullptr) { ST::RotationRate gyro_data = m_delegate.m_gyroscopeEvent.rotationRate(); gyroscope_data[0] = gyro_data.x; gyroscope_data[1] = gyro_data.y; gyroscope_data[2] = gyro_data.z; } - if (ts != NULL) // By default, frames are synchronized. + if (ts != nullptr) // By default, frames are synchronized. *ts = m_delegate.m_visibleFrame.arrivalTimestamp(); u.unlock(); @@ -307,15 +307,15 @@ void vpOccipitalStructure::acquire(unsigned char *const data_image, unsigned cha #ifdef VISP_HAVE_PCL /*! Acquire data from Structure Core device. - \param data_image : Color image buffer or NULL if not wanted. - \param data_depth : Depth image buffer or NULL if not wanted. - \param data_pointCloud : Point cloud vector pointer or NULL if not wanted. - \param pointcloud : Point cloud (in PCL format and without texture information) pointer or NULL if not wanted. - \param data_infrared : Infrared image buffer or NULL if not wanted. - \param acceleration_data : Acceleration data or NULL if not wanted. - \param gyroscope_data : Gyroscope data or NULL if not wanted. + \param data_image : Color image buffer or nullptr if not wanted. + \param data_depth : Depth image buffer or nullptr if not wanted. + \param data_pointCloud : Point cloud vector pointer or nullptr if not wanted. + \param pointcloud : Point cloud (in PCL format and without texture information) pointer or nullptr if not wanted. + \param data_infrared : Infrared image buffer or nullptr if not wanted. + \param acceleration_data : Acceleration data or nullptr if not wanted. + \param gyroscope_data : Gyroscope data or nullptr if not wanted. \param undistorted : Set to true if you want undistorted monochrome device image. - \param ts : Data timestamp or NULL if not wanted. + \param ts : Data timestamp or nullptr if not wanted. */ void vpOccipitalStructure::acquire(unsigned char *const data_image, unsigned char *const data_depth, std::vector *const data_pointCloud, @@ -345,35 +345,35 @@ void vpOccipitalStructure::acquire(unsigned char *const data_image, unsigned cha } } - if (m_delegate.m_depthFrame.isValid() && data_depth != NULL) + if (m_delegate.m_depthFrame.isValid() && data_depth != nullptr) memcpy(data_depth, m_delegate.m_depthFrame.depthInMillimeters(), m_delegate.m_depthFrame.width() * m_delegate.m_depthFrame.height() * sizeof(float)); - if (m_delegate.m_infraredFrame.isValid() && data_infrared != NULL) + if (m_delegate.m_infraredFrame.isValid() && data_infrared != nullptr) memcpy(data_infrared, m_delegate.m_infraredFrame.data(), m_delegate.m_infraredFrame.width() * m_delegate.m_infraredFrame.height() * sizeof(uint16_t)); - if (data_pointCloud != NULL) + if (data_pointCloud != nullptr) getPointcloud(*data_pointCloud); - if (m_delegate.m_depthFrame.isValid() && pointcloud != NULL) + if (m_delegate.m_depthFrame.isValid() && pointcloud != nullptr) getPointcloud(pointcloud); - if (acceleration_data != NULL) { + if (acceleration_data != nullptr) { ST::Acceleration accel = m_delegate.m_accelerometerEvent.acceleration(); acceleration_data[0] = accel.x; acceleration_data[1] = accel.y; acceleration_data[2] = accel.z; } - if (gyroscope_data != NULL) { + if (gyroscope_data != nullptr) { ST::RotationRate gyro_data = m_delegate.m_gyroscopeEvent.rotationRate(); gyroscope_data[0] = gyro_data.x; gyroscope_data[1] = gyro_data.y; gyroscope_data[2] = gyro_data.z; } - if (ts != NULL) // By default, frames are synchronized. + if (ts != nullptr) // By default, frames are synchronized. *ts = m_delegate.m_visibleFrame.arrivalTimestamp(); u.unlock(); @@ -381,15 +381,15 @@ void vpOccipitalStructure::acquire(unsigned char *const data_image, unsigned cha /*! Acquire data from Structure Core device. - \param data_image : Color image buffer or NULL if not wanted. - \param data_depth : Depth image buffer or NULL if not wanted. - \param data_pointCloud : Point cloud vector pointer or NULL if not wanted. - \param pointcloud : Point cloud (in PCL format) pointer or NULL if not wanted. - \param data_infrared : Infrared image buffer or NULL if not wanted. - \param acceleration_data : Acceleration data or NULL if not wanted. - \param gyroscope_data : Gyroscope data or NULL if not wanted. + \param data_image : Color image buffer or nullptr if not wanted. + \param data_depth : Depth image buffer or nullptr if not wanted. + \param data_pointCloud : Point cloud vector pointer or nullptr if not wanted. + \param pointcloud : Point cloud (in PCL format) pointer or nullptr if not wanted. + \param data_infrared : Infrared image buffer or nullptr if not wanted. + \param acceleration_data : Acceleration data or nullptr if not wanted. + \param gyroscope_data : Gyroscope data or nullptr if not wanted. \param undistorted : Set to true if you want undistorted monochrome device image. - \param ts : Data timestamp or NULL if not wanted. + \param ts : Data timestamp or nullptr if not wanted. */ void vpOccipitalStructure::acquire(unsigned char *const data_image, unsigned char *const data_depth, std::vector *const data_pointCloud, @@ -400,11 +400,11 @@ void vpOccipitalStructure::acquire(unsigned char *const data_image, unsigned cha std::unique_lock u(m_delegate.m_sampleLock); m_delegate.cv_sampleLock.wait(u); - if (m_delegate.m_depthFrame.isValid() && data_depth != NULL) + if (m_delegate.m_depthFrame.isValid() && data_depth != nullptr) memcpy(data_depth, m_delegate.m_depthFrame.depthInMillimeters(), m_delegate.m_depthFrame.width() * m_delegate.m_depthFrame.height() * sizeof(float)); - if (m_delegate.m_visibleFrame.isValid() && data_image != NULL) { + if (m_delegate.m_visibleFrame.isValid() && data_image != nullptr) { if (m_delegate.m_cameraType == ST::StructureCoreCameraType::Color) vpImageConvert::RGBToRGBa(const_cast(m_delegate.m_visibleFrame.rgbData()), reinterpret_cast(data_image), @@ -423,31 +423,31 @@ void vpOccipitalStructure::acquire(unsigned char *const data_image, unsigned cha } } - if (m_delegate.m_infraredFrame.isValid() && data_infrared != NULL) + if (m_delegate.m_infraredFrame.isValid() && data_infrared != nullptr) memcpy(data_infrared, m_delegate.m_infraredFrame.data(), m_delegate.m_infraredFrame.width() * m_delegate.m_infraredFrame.height() * sizeof(uint16_t)); - if (m_delegate.m_depthFrame.isValid() && data_pointCloud != NULL) + if (m_delegate.m_depthFrame.isValid() && data_pointCloud != nullptr) getPointcloud(*data_pointCloud); - if (m_delegate.m_depthFrame.isValid() && m_delegate.m_visibleFrame.isValid() && pointcloud != NULL) + if (m_delegate.m_depthFrame.isValid() && m_delegate.m_visibleFrame.isValid() && pointcloud != nullptr) getColoredPointcloud(pointcloud); - if (acceleration_data != NULL) { + if (acceleration_data != nullptr) { ST::Acceleration accel = m_delegate.m_accelerometerEvent.acceleration(); acceleration_data[0] = accel.x; acceleration_data[1] = accel.y; acceleration_data[2] = accel.z; } - if (gyroscope_data != NULL) { + if (gyroscope_data != nullptr) { ST::RotationRate gyro_data = m_delegate.m_gyroscopeEvent.rotationRate(); gyroscope_data[0] = gyro_data.x; gyroscope_data[1] = gyro_data.y; gyroscope_data[2] = gyro_data.z; } - if (ts != NULL) // By default, frames are synchronized. + if (ts != nullptr) // By default, frames are synchronized. *ts = m_delegate.m_visibleFrame.arrivalTimestamp(); u.unlock(); @@ -481,7 +481,7 @@ void vpOccipitalStructure::getIMUVelocity(vpColVector *imu_vel, double *ts) std::unique_lock u(m_delegate.m_sampleLock); m_delegate.cv_sampleLock.wait(u); - if (imu_vel != NULL) { + if (imu_vel != nullptr) { imu_vel->resize(3, false); ST::RotationRate imu_rotationRate = m_delegate.m_gyroscopeEvent.rotationRate(); (*imu_vel)[0] = imu_rotationRate.x; @@ -489,7 +489,7 @@ void vpOccipitalStructure::getIMUVelocity(vpColVector *imu_vel, double *ts) (*imu_vel)[2] = imu_rotationRate.z; } - if (ts != NULL) + if (ts != nullptr) *ts = m_delegate.m_gyroscopeEvent.arrivalTimestamp(); u.unlock(); @@ -521,7 +521,7 @@ void vpOccipitalStructure::getIMUAcceleration(vpColVector *imu_acc, double *ts) std::unique_lock u(m_delegate.m_sampleLock); m_delegate.cv_sampleLock.wait(u); - if (imu_acc != NULL) { + if (imu_acc != nullptr) { imu_acc->resize(3, false); ST::Acceleration imu_acceleration = m_delegate.m_accelerometerEvent.acceleration(); (*imu_acc)[0] = imu_acceleration.x; @@ -529,7 +529,7 @@ void vpOccipitalStructure::getIMUAcceleration(vpColVector *imu_acc, double *ts) (*imu_acc)[2] = imu_acceleration.z; } - if (ts != NULL) + if (ts != nullptr) *ts = m_delegate.m_accelerometerEvent.arrivalTimestamp(); u.unlock(); @@ -564,7 +564,7 @@ void vpOccipitalStructure::getIMUData(vpColVector *imu_vel, vpColVector *imu_acc m_delegate.cv_sampleLock.wait(u); double imu_vel_timestamp, imu_acc_timestamp; - if (imu_vel != NULL) { + if (imu_vel != nullptr) { imu_vel->resize(3, false); ST::RotationRate imu_rotationRate = m_delegate.m_gyroscopeEvent.rotationRate(); (*imu_vel)[0] = imu_rotationRate.x; @@ -574,7 +574,7 @@ void vpOccipitalStructure::getIMUData(vpColVector *imu_vel, vpColVector *imu_acc m_delegate.m_gyroscopeEvent.arrivalTimestamp(); // Relative to an unspecified epoch. (see documentation). } - if (imu_acc != NULL) { + if (imu_acc != nullptr) { imu_acc->resize(3, false); ST::Acceleration imu_acceleration = m_delegate.m_accelerometerEvent.acceleration(); (*imu_acc)[0] = imu_acceleration.x; @@ -583,7 +583,7 @@ void vpOccipitalStructure::getIMUData(vpColVector *imu_vel, vpColVector *imu_acc imu_acc_timestamp = m_delegate.m_accelerometerEvent.arrivalTimestamp(); } - if (ts != NULL) + if (ts != nullptr) *ts = std::max(imu_vel_timestamp, imu_acc_timestamp); u.unlock(); diff --git a/modules/sensor/src/rgb-depth/realsense/vpRealSense.cpp b/modules/sensor/src/rgb-depth/realsense/vpRealSense.cpp index b1b084f5af..f0605385d8 100644 --- a/modules/sensor/src/rgb-depth/realsense/vpRealSense.cpp +++ b/modules/sensor/src/rgb-depth/realsense/vpRealSense.cpp @@ -47,7 +47,7 @@ * Default constructor. */ vpRealSense::vpRealSense() - : m_context(), m_device(NULL), m_num_devices(0), m_serial_no(), m_intrinsics(), m_max_Z(8), m_enableStreams(), + : m_context(), m_device(nullptr), m_num_devices(0), m_serial_no(), m_intrinsics(), m_max_Z(8), m_enableStreams(), m_useStreamPresets(), m_streamPresets(), m_streamParams(), m_invalidDepthValue(0.0f) { initStream(); @@ -120,7 +120,7 @@ void vpRealSense::open() } // Exit with error if no camera is detected that matches the input serial // number. - if ((!m_serial_no.empty()) && (m_device == NULL)) { + if ((!m_serial_no.empty()) && (m_device == nullptr)) { throw vpException(vpException::fatalError, "RealSense Camera - No camera detected with input " "serial_no \"%s\" Exiting!", @@ -245,7 +245,7 @@ void vpRealSense::close() if (m_device->is_streaming()) { m_device->stop(); } - m_device = NULL; + m_device = nullptr; } } @@ -376,7 +376,7 @@ vpHomogeneousMatrix vpRealSense::getTransformation(const rs::stream &from, const */ void vpRealSense::acquire(vpImage &grey) { - if (m_device == NULL) { + if (m_device == nullptr) { throw vpException(vpException::fatalError, "RealSense Camera - Device not opened!"); } if (!m_device->is_streaming()) { @@ -398,7 +398,7 @@ void vpRealSense::acquire(vpImage &grey) */ void vpRealSense::acquire(vpImage &grey, std::vector &pointcloud) { - if (m_device == NULL) { + if (m_device == nullptr) { throw vpException(vpException::fatalError, "RealSense Camera - Device not opened!"); } if (!m_device->is_streaming()) { @@ -422,7 +422,7 @@ void vpRealSense::acquire(vpImage &grey, std::vector */ void vpRealSense::acquire(std::vector &pointcloud) { - if (m_device == NULL) { + if (m_device == nullptr) { throw vpException(vpException::fatalError, "RealSense Camera - Device not opened!"); } if (!m_device->is_streaming()) { @@ -441,7 +441,7 @@ void vpRealSense::acquire(std::vector &pointcloud) */ void vpRealSense::acquire(vpImage &color) { - if (m_device == NULL) { + if (m_device == nullptr) { throw vpException(vpException::fatalError, "RealSense Camera - Device not opened!"); } if (!m_device->is_streaming()) { @@ -466,7 +466,7 @@ void vpRealSense::acquire(vpImage &color) void vpRealSense::acquire(vpImage &color, vpImage &infrared, vpImage &depth, std::vector &pointcloud) { - if (m_device == NULL) { + if (m_device == nullptr) { throw vpException(vpException::fatalError, "RealSense Camera - Device not opened!"); } if (!m_device->is_streaming()) { @@ -500,7 +500,7 @@ void vpRealSense::acquire(vpImage &color, vpImage &infrared, v void vpRealSense::acquire(vpImage &grey, vpImage &infrared, vpImage &depth, std::vector &pointcloud) { - if (m_device == NULL) { + if (m_device == nullptr) { throw vpException(vpException::fatalError, "RealSense Camera - Device not opened!"); } if (!m_device->is_streaming()) { @@ -531,7 +531,7 @@ void vpRealSense::acquire(vpImage &grey, vpImage &infra */ void vpRealSense::acquire(vpImage &color, std::vector &pointcloud) { - if (m_device == NULL) { + if (m_device == nullptr) { throw vpException(vpException::fatalError, "RealSense Camera - Device not opened!"); } if (!m_device->is_streaming()) { @@ -549,12 +549,12 @@ void vpRealSense::acquire(vpImage &color, std::vector &poin /*! Acquire data from RealSense device. - \param data_image : Color image buffer or NULL if not wanted. - \param data_depth : Depth image buffer or NULL if not wanted. - \param data_pointCloud : Point cloud vector pointer or NULL if not wanted. - \param data_infrared : Infrared image buffer or NULL if not wanted. + \param data_image : Color image buffer or nullptr if not wanted. + \param data_depth : Depth image buffer or nullptr if not wanted. + \param data_pointCloud : Point cloud vector pointer or nullptr if not wanted. + \param data_infrared : Infrared image buffer or nullptr if not wanted. \param data_infrared2 : Infrared (for the second IR camera if available) - image buffer or NULL if not wanted. \param stream_color : Type of color + image buffer or nullptr if not wanted. \param stream_color : Type of color stream (e.g. rs::stream::color, rs::stream::rectified_color, rs::stream::color_aligned_to_depth). \param stream_depth : Type of depth stream (e.g. rs::stream::depth, rs::stream::depth_aligned_to_color, @@ -570,7 +570,7 @@ void vpRealSense::acquire(unsigned char *const data_image, unsigned char *const const rs::stream &stream_depth, const rs::stream &stream_infrared, const rs::stream &stream_infrared2) { - if (m_device == NULL) { + if (m_device == nullptr) { throw vpException(vpException::fatalError, "RealSense Camera - Device not opened!"); } @@ -580,23 +580,23 @@ void vpRealSense::acquire(unsigned char *const data_image, unsigned char *const m_device->wait_for_frames(); - if (data_image != NULL) { + if (data_image != nullptr) { vp_rs_get_native_frame_data_impl(m_device, m_intrinsics, rs::stream::color, data_image, stream_color); } - if (data_depth != NULL) { + if (data_depth != nullptr) { vp_rs_get_native_frame_data_impl(m_device, m_intrinsics, rs::stream::depth, data_depth, stream_depth); - if (data_pointCloud != NULL) { + if (data_pointCloud != nullptr) { vp_rs_get_pointcloud_impl(m_device, m_intrinsics, m_max_Z, *data_pointCloud, m_invalidDepthValue, stream_depth); } } - if (data_infrared != NULL) { + if (data_infrared != nullptr) { vp_rs_get_native_frame_data_impl(m_device, m_intrinsics, rs::stream::infrared, data_infrared, stream_infrared); } - if (data_infrared2 != NULL) { + if (data_infrared2 != nullptr) { vp_rs_get_native_frame_data_impl(m_device, m_intrinsics, rs::stream::infrared2, data_infrared2, stream_infrared2); } } @@ -677,7 +677,7 @@ void vpRealSense::setEnableStream(const rs::stream &stream, bool status) { m_ena */ void vpRealSense::acquire(pcl::PointCloud::Ptr &pointcloud) { - if (m_device == NULL) { + if (m_device == nullptr) { throw vpException(vpException::fatalError, "RealSense Camera - Device not opened!"); } if (!m_device->is_streaming()) { @@ -696,7 +696,7 @@ void vpRealSense::acquire(pcl::PointCloud::Ptr &pointcloud) */ void vpRealSense::acquire(pcl::PointCloud::Ptr &pointcloud) { - if (m_device == NULL) { + if (m_device == nullptr) { throw vpException(vpException::fatalError, "RealSense Camera - Device not opened!"); } if (!m_device->is_streaming()) { @@ -716,7 +716,7 @@ void vpRealSense::acquire(pcl::PointCloud::Ptr &pointcloud) */ void vpRealSense::acquire(vpImage &grey, pcl::PointCloud::Ptr &pointcloud) { - if (m_device == NULL) { + if (m_device == nullptr) { throw vpException(vpException::fatalError, "RealSense Camera - Device not opened!"); } if (!m_device->is_streaming()) { @@ -739,7 +739,7 @@ void vpRealSense::acquire(vpImage &grey, pcl::PointCloud &color, pcl::PointCloud::Ptr &pointcloud) { - if (m_device == NULL) { + if (m_device == nullptr) { throw vpException(vpException::fatalError, "RealSense Camera - Device not opened!"); } if (!m_device->is_streaming()) { @@ -765,7 +765,7 @@ void vpRealSense::acquire(vpImage &color, pcl::PointCloud void vpRealSense::acquire(vpImage &color, vpImage &infrared, vpImage &depth, pcl::PointCloud::Ptr &pointcloud) { - if (m_device == NULL) { + if (m_device == nullptr) { throw vpException(vpException::fatalError, "RealSense Camera - Device not opened!"); } if (!m_device->is_streaming()) { @@ -797,7 +797,7 @@ void vpRealSense::acquire(vpImage &color, vpImage &infrared, v void vpRealSense::acquire(vpImage &grey, vpImage &infrared, vpImage &depth, pcl::PointCloud::Ptr &pointcloud) { - if (m_device == NULL) { + if (m_device == nullptr) { throw vpException(vpException::fatalError, "RealSense Camera - Device not opened!"); } if (!m_device->is_streaming()) { @@ -829,7 +829,7 @@ void vpRealSense::acquire(vpImage &grey, vpImage &infra void vpRealSense::acquire(vpImage &color, vpImage &infrared, vpImage &depth, pcl::PointCloud::Ptr &pointcloud) { - if (m_device == NULL) { + if (m_device == nullptr) { throw vpException(vpException::fatalError, "RealSense Camera - Device not opened!"); } if (!m_device->is_streaming()) { @@ -861,7 +861,7 @@ void vpRealSense::acquire(vpImage &color, vpImage &infrared, v void vpRealSense::acquire(vpImage &grey, vpImage &infrared, vpImage &depth, pcl::PointCloud::Ptr &pointcloud) { - if (m_device == NULL) { + if (m_device == nullptr) { throw vpException(vpException::fatalError, "RealSense Camera - Device not opened!"); } if (!m_device->is_streaming()) { @@ -885,13 +885,13 @@ void vpRealSense::acquire(vpImage &grey, vpImage &infra /*! Acquire data from RealSense device. - \param data_image : Color image buffer or NULL if not wanted. - \param data_depth : Depth image buffer or NULL if not wanted. - \param data_pointCloud : Point cloud vector pointer or NULL if not wanted. + \param data_image : Color image buffer or nullptr if not wanted. + \param data_depth : Depth image buffer or nullptr if not wanted. + \param data_pointCloud : Point cloud vector pointer or nullptr if not wanted. \param pointcloud : Point cloud (in PCL format and without texture - information) pointer or NULL if not wanted. \param data_infrared : Infrared - image buffer or NULL if not wanted. \param data_infrared2 : Infrared (for - the second IR camera if available) image buffer or NULL if not wanted. + information) pointer or nullptr if not wanted. \param data_infrared : Infrared + image buffer or nullptr if not wanted. \param data_infrared2 : Infrared (for + the second IR camera if available) image buffer or nullptr if not wanted. \param stream_color : Type of color stream (e.g. rs::stream::color, rs::stream::rectified_color, rs::stream::color_aligned_to_depth). \param stream_depth : Type of depth stream (e.g. rs::stream::depth, @@ -909,7 +909,7 @@ void vpRealSense::acquire(unsigned char *const data_image, unsigned char *const const rs::stream &stream_depth, const rs::stream &stream_infrared, const rs::stream &stream_infrared2) { - if (m_device == NULL) { + if (m_device == nullptr) { throw vpException(vpException::fatalError, "RealSense Camera - Device not opened!"); } @@ -919,40 +919,40 @@ void vpRealSense::acquire(unsigned char *const data_image, unsigned char *const m_device->wait_for_frames(); - if (data_image != NULL) { + if (data_image != nullptr) { vp_rs_get_native_frame_data_impl(m_device, m_intrinsics, rs::stream::color, data_image, stream_color); } - if (data_depth != NULL) { + if (data_depth != nullptr) { vp_rs_get_native_frame_data_impl(m_device, m_intrinsics, rs::stream::depth, data_depth, stream_depth); } - if (data_pointCloud != NULL) { + if (data_pointCloud != nullptr) { vp_rs_get_pointcloud_impl(m_device, m_intrinsics, m_max_Z, *data_pointCloud, m_invalidDepthValue, stream_depth); } - if (pointcloud != NULL) { + if (pointcloud != nullptr) { vp_rs_get_pointcloud_impl(m_device, m_intrinsics, m_max_Z, pointcloud, m_invalidDepthValue, stream_depth); } - if (data_infrared != NULL) { + if (data_infrared != nullptr) { vp_rs_get_native_frame_data_impl(m_device, m_intrinsics, rs::stream::infrared, data_infrared, stream_infrared); } - if (data_infrared2 != NULL) { + if (data_infrared2 != nullptr) { vp_rs_get_native_frame_data_impl(m_device, m_intrinsics, rs::stream::infrared2, data_infrared2, stream_infrared2); } } /*! Acquire data from RealSense device. - \param data_image : Color image buffer or NULL if not wanted. - \param data_depth : Depth image buffer or NULL if not wanted. - \param data_pointCloud : Point cloud vector pointer or NULL if not wanted. + \param data_image : Color image buffer or nullptr if not wanted. + \param data_depth : Depth image buffer or nullptr if not wanted. + \param data_pointCloud : Point cloud vector pointer or nullptr if not wanted. \param pointcloud : Point cloud (in PCL format and with texture information) - pointer or NULL if not wanted. \param data_infrared : Infrared image buffer - or NULL if not wanted. \param data_infrared2 : Infrared (for the second IR - camera if available) image buffer or NULL if not wanted. \param stream_color + pointer or nullptr if not wanted. \param data_infrared : Infrared image buffer + or nullptr if not wanted. \param data_infrared2 : Infrared (for the second IR + camera if available) image buffer or nullptr if not wanted. \param stream_color : Type of color stream (e.g. rs::stream::color, rs::stream::rectified_color, rs::stream::color_aligned_to_depth). \param stream_depth : Type of depth stream (e.g. rs::stream::depth, rs::stream::depth_aligned_to_color, @@ -969,7 +969,7 @@ void vpRealSense::acquire(unsigned char *const data_image, unsigned char *const const rs::stream &stream_depth, const rs::stream &stream_infrared, const rs::stream &stream_infrared2) { - if (m_device == NULL) { + if (m_device == nullptr) { throw vpException(vpException::fatalError, "RealSense Camera - Device not opened!"); } @@ -979,28 +979,28 @@ void vpRealSense::acquire(unsigned char *const data_image, unsigned char *const m_device->wait_for_frames(); - if (data_image != NULL) { + if (data_image != nullptr) { vp_rs_get_native_frame_data_impl(m_device, m_intrinsics, rs::stream::color, data_image, stream_color); } - if (data_depth != NULL) { + if (data_depth != nullptr) { vp_rs_get_native_frame_data_impl(m_device, m_intrinsics, rs::stream::depth, data_depth, stream_depth); } - if (data_pointCloud != NULL) { + if (data_pointCloud != nullptr) { vp_rs_get_pointcloud_impl(m_device, m_intrinsics, m_max_Z, *data_pointCloud, m_invalidDepthValue, stream_depth); } - if (pointcloud != NULL) { + if (pointcloud != nullptr) { vp_rs_get_pointcloud_impl(m_device, m_intrinsics, m_max_Z, pointcloud, m_invalidDepthValue, stream_color, stream_depth); } - if (data_infrared != NULL) { + if (data_infrared != nullptr) { vp_rs_get_native_frame_data_impl(m_device, m_intrinsics, rs::stream::infrared, data_infrared, stream_infrared); } - if (data_infrared2 != NULL) { + if (data_infrared2 != nullptr) { vp_rs_get_native_frame_data_impl(m_device, m_intrinsics, rs::stream::infrared2, data_infrared2, stream_infrared2); } } diff --git a/modules/sensor/src/rgb-depth/realsense/vpRealSense2.cpp b/modules/sensor/src/rgb-depth/realsense/vpRealSense2.cpp index 4ed0468df4..93a918cf3a 100644 --- a/modules/sensor/src/rgb-depth/realsense/vpRealSense2.cpp +++ b/modules/sensor/src/rgb-depth/realsense/vpRealSense2.cpp @@ -82,14 +82,14 @@ vpRealSense2::~vpRealSense2() { close(); } /*! Acquire greyscale image from RealSense device. \param grey : Greyscale image. - \param ts : Image timestamp or NULL if not wanted. + \param ts : Image timestamp or nullptr if not wanted. */ void vpRealSense2::acquire(vpImage &grey, double *ts) { auto data = m_pipe.wait_for_frames(); auto color_frame = data.get_color_frame(); getGreyFrame(color_frame, grey); - if (ts != NULL) { + if (ts != nullptr) { *ts = data.get_timestamp(); } } @@ -97,46 +97,46 @@ void vpRealSense2::acquire(vpImage &grey, double *ts) /*! Acquire color image from RealSense device. \param color : Color image. - \param ts : Image timestamp or NULL if not wanted. + \param ts : Image timestamp or nullptr if not wanted. */ void vpRealSense2::acquire(vpImage &color, double *ts) { auto data = m_pipe.wait_for_frames(); auto color_frame = data.get_color_frame(); getColorFrame(color_frame, color); - if (ts != NULL) { + if (ts != nullptr) { *ts = data.get_timestamp(); } } /*! Acquire data from RealSense device. - \param data_image : Color image buffer or NULL if not wanted. - \param data_depth : Depth image buffer or NULL if not wanted. - \param data_pointCloud : Point cloud vector pointer or NULL if not wanted. - \param data_infrared : Infrared image buffer or NULL if not wanted. - \param align_to : Align to a reference stream or NULL if not wanted. + \param data_image : Color image buffer or nullptr if not wanted. + \param data_depth : Depth image buffer or nullptr if not wanted. + \param data_pointCloud : Point cloud vector pointer or nullptr if not wanted. + \param data_infrared : Infrared image buffer or nullptr if not wanted. + \param align_to : Align to a reference stream or nullptr if not wanted. Only depth and color streams can be aligned. - \param ts : Image timestamp or NULL if not wanted. + \param ts : Image timestamp or nullptr if not wanted. */ void vpRealSense2::acquire(unsigned char *const data_image, unsigned char *const data_depth, std::vector *const data_pointCloud, unsigned char *const data_infrared, rs2::align *const align_to, double *ts) { - acquire(data_image, data_depth, data_pointCloud, data_infrared, NULL, align_to, ts); + acquire(data_image, data_depth, data_pointCloud, data_infrared, nullptr, align_to, ts); } /*! Acquire data from RealSense device. - \param data_image : Color image buffer or NULL if not wanted. - \param data_depth : Depth image buffer or NULL if not wanted. - \param data_pointCloud : Point cloud vector pointer or NULL if not wanted. - \param data_infrared1 : First infrared image buffer or NULL if not wanted. + \param data_image : Color image buffer or nullptr if not wanted. + \param data_depth : Depth image buffer or nullptr if not wanted. + \param data_pointCloud : Point cloud vector pointer or nullptr if not wanted. + \param data_infrared1 : First infrared image buffer or nullptr if not wanted. \param data_infrared2 : Second infrared image buffer (if supported by the device) - or NULL if not wanted. - \param align_to : Align to a reference stream or NULL if not wanted. + or nullptr if not wanted. + \param align_to : Align to a reference stream or nullptr if not wanted. Only depth and color streams can be aligned. - \param ts : Data timestamp or NULL if not wanted. + \param ts : Data timestamp or nullptr if not wanted. The following code shows how to use this function to get color, infrared 1 and infrared 2 frames acquired by a D435 device: @@ -169,7 +169,7 @@ int main() { #endif while (true) { - rs.acquire((unsigned char *) Ic.bitmap, NULL, NULL, Ii1.bitmap, Ii2.bitmap, NULL); + rs.acquire((unsigned char *) Ic.bitmap, nullptr, nullptr, Ii1.bitmap, Ii2.bitmap, nullptr); vpDisplay::display(Ic); vpDisplay::display(Ii1); vpDisplay::display(Ii2); @@ -188,7 +188,7 @@ void vpRealSense2::acquire(unsigned char *const data_image, unsigned char *const unsigned char *const data_infrared2, rs2::align *const align_to, double *ts) { auto data = m_pipe.wait_for_frames(); - if (align_to != NULL) { + if (align_to != nullptr) { // Infrared stream is not aligned // see https://github.com/IntelRealSense/librealsense/issues/1556#issuecomment-384919994 #if (RS2_API_VERSION > ((2 * 10000) + (9 * 100) + 0)) @@ -198,33 +198,33 @@ void vpRealSense2::acquire(unsigned char *const data_image, unsigned char *const #endif } - if (data_image != NULL) { + if (data_image != nullptr) { auto color_frame = data.get_color_frame(); getNativeFrameData(color_frame, data_image); } - if (data_depth != NULL || data_pointCloud != NULL) { + if (data_depth != nullptr || data_pointCloud != nullptr) { auto depth_frame = data.get_depth_frame(); - if (data_depth != NULL) { + if (data_depth != nullptr) { getNativeFrameData(depth_frame, data_depth); } - if (data_pointCloud != NULL) { + if (data_pointCloud != nullptr) { getPointcloud(depth_frame, *data_pointCloud); } } - if (data_infrared1 != NULL) { + if (data_infrared1 != nullptr) { auto infrared_frame = data.first(RS2_STREAM_INFRARED); getNativeFrameData(infrared_frame, data_infrared1); } - if (data_infrared2 != NULL) { + if (data_infrared2 != nullptr) { auto infrared_frame = data.get_infrared_frame(2); getNativeFrameData(infrared_frame, data_infrared2); } - if (ts != NULL) { + if (ts != nullptr) { *ts = data.get_timestamp(); } } @@ -236,7 +236,7 @@ void vpRealSense2::acquire(unsigned char *const data_image, unsigned char *const \param right : Right image. \param ts : Data timestamp. - Pass NULL to one of these parameters if you don't want the corresponding data. + Pass nullptr to one of these parameters if you don't want the corresponding data. For example if you are only interested in the right fisheye image, use: \code @@ -247,14 +247,14 @@ void vpRealSense2::acquire(unsigned char *const data_image, unsigned char *const rs.open(config); vpImage I_right; - rs.acquire(NULL, &I_right, NULL, NULL); + rs.acquire(nullptr, &I_right, nullptr, nullptr); \endcode */ void vpRealSense2::acquire(vpImage *left, vpImage *right, double *ts) { auto data = m_pipe.wait_for_frames(); - if (left != NULL) { + if (left != nullptr) { auto left_fisheye_frame = data.get_fisheye_frame(1); unsigned int width = static_cast(left_fisheye_frame.get_width()); unsigned int height = static_cast(left_fisheye_frame.get_height()); @@ -262,7 +262,7 @@ void vpRealSense2::acquire(vpImage *left, vpImage getNativeFrameData(left_fisheye_frame, (*left).bitmap); } - if (right != NULL) { + if (right != nullptr) { auto right_fisheye_frame = data.get_fisheye_frame(2); unsigned int width = static_cast(right_fisheye_frame.get_width()); unsigned int height = static_cast(right_fisheye_frame.get_height()); @@ -270,7 +270,7 @@ void vpRealSense2::acquire(vpImage *left, vpImage getNativeFrameData(right_fisheye_frame, (*right).bitmap); } - if (ts != NULL) { + if (ts != nullptr) { *ts = data.get_timestamp(); } } @@ -286,7 +286,7 @@ void vpRealSense2::acquire(vpImage *left, vpImage \param confidence : Pose estimation confidence (1: Low, 2: Medium, 3: High). \param ts : Data timestamp. - Pass NULL to one of these parameters if you don't want the corresponding data. + Pass nullptr to one of these parameters if you don't want the corresponding data. For example if you are only interested in the pose, use: \code @@ -300,7 +300,7 @@ void vpRealSense2::acquire(vpImage *left, vpImage rs.open(config); vpHomogeneousMatrix cMw; - rs.acquire(NULL, NULL, &cMw, NULL, NULL, NULL, NULL); + rs.acquire(nullptr, nullptr, &cMw, nullptr, nullptr, nullptr, nullptr); \endcode */ void vpRealSense2::acquire(vpImage *left, vpImage *right, vpHomogeneousMatrix *cMw, @@ -308,7 +308,7 @@ void vpRealSense2::acquire(vpImage *left, vpImage { auto data = m_pipe.wait_for_frames(); - if (left != NULL) { + if (left != nullptr) { auto left_fisheye_frame = data.get_fisheye_frame(1); unsigned int width = static_cast(left_fisheye_frame.get_width()); unsigned int height = static_cast(left_fisheye_frame.get_height()); @@ -316,7 +316,7 @@ void vpRealSense2::acquire(vpImage *left, vpImage getNativeFrameData(left_fisheye_frame, (*left).bitmap); } - if (right != NULL) { + if (right != nullptr) { auto right_fisheye_frame = data.get_fisheye_frame(2); unsigned int width = static_cast(right_fisheye_frame.get_width()); unsigned int height = static_cast(right_fisheye_frame.get_height()); @@ -327,11 +327,11 @@ void vpRealSense2::acquire(vpImage *left, vpImage auto pose_frame = data.first_or_default(RS2_STREAM_POSE); auto pose_data = pose_frame.as().get_pose_data(); - if (ts != NULL) { + if (ts != nullptr) { *ts = data.get_timestamp(); } - if (cMw != NULL) { + if (cMw != nullptr) { m_pos[0] = static_cast(pose_data.translation.x); m_pos[1] = static_cast(pose_data.translation.y); m_pos[2] = static_cast(pose_data.translation.z); @@ -344,7 +344,7 @@ void vpRealSense2::acquire(vpImage *left, vpImage *cMw = vpHomogeneousMatrix(m_pos, m_quat); } - if (odo_vel != NULL) { + if (odo_vel != nullptr) { odo_vel->resize(6, false); (*odo_vel)[0] = static_cast(pose_data.velocity.x); (*odo_vel)[1] = static_cast(pose_data.velocity.y); @@ -354,7 +354,7 @@ void vpRealSense2::acquire(vpImage *left, vpImage (*odo_vel)[5] = static_cast(pose_data.angular_velocity.z); } - if (odo_acc != NULL) { + if (odo_acc != nullptr) { odo_acc->resize(6, false); (*odo_acc)[0] = static_cast(pose_data.acceleration.x); (*odo_acc)[1] = static_cast(pose_data.acceleration.y); @@ -364,7 +364,7 @@ void vpRealSense2::acquire(vpImage *left, vpImage (*odo_acc)[5] = static_cast(pose_data.angular_acceleration.z); } - if (confidence != NULL) { + if (confidence != nullptr) { *confidence = pose_data.tracker_confidence; } } @@ -388,7 +388,7 @@ void vpRealSense2::acquire(vpImage *left, vpImage { auto data = m_pipe.wait_for_frames(); - if (left != NULL) { + if (left != nullptr) { auto left_fisheye_frame = data.get_fisheye_frame(1); unsigned int width = static_cast(left_fisheye_frame.get_width()); unsigned int height = static_cast(left_fisheye_frame.get_height()); @@ -396,7 +396,7 @@ void vpRealSense2::acquire(vpImage *left, vpImage getNativeFrameData(left_fisheye_frame, (*left).bitmap); } - if (right != NULL) { + if (right != nullptr) { auto right_fisheye_frame = data.get_fisheye_frame(2); unsigned int width = static_cast(right_fisheye_frame.get_width()); unsigned int height = static_cast(right_fisheye_frame.get_height()); @@ -407,11 +407,11 @@ void vpRealSense2::acquire(vpImage *left, vpImage auto pose_frame = data.first_or_default(RS2_STREAM_POSE); auto pose_data = pose_frame.as().get_pose_data(); - if (ts != NULL) { + if (ts != nullptr) { *ts = data.get_timestamp(); } - if (cMw != NULL) { + if (cMw != nullptr) { m_pos[0] = static_cast(pose_data.translation.x); m_pos[1] = static_cast(pose_data.translation.y); m_pos[2] = static_cast(pose_data.translation.z); @@ -424,7 +424,7 @@ void vpRealSense2::acquire(vpImage *left, vpImage *cMw = vpHomogeneousMatrix(m_pos, m_quat); } - if (odo_vel != NULL) { + if (odo_vel != nullptr) { odo_vel->resize(6, false); (*odo_vel)[0] = static_cast(pose_data.velocity.x); (*odo_vel)[1] = static_cast(pose_data.velocity.y); @@ -434,7 +434,7 @@ void vpRealSense2::acquire(vpImage *left, vpImage (*odo_vel)[5] = static_cast(pose_data.angular_velocity.z); } - if (odo_acc != NULL) { + if (odo_acc != nullptr) { odo_acc->resize(6, false); (*odo_acc)[0] = static_cast(pose_data.acceleration.x); (*odo_acc)[1] = static_cast(pose_data.acceleration.y); @@ -447,7 +447,7 @@ void vpRealSense2::acquire(vpImage *left, vpImage auto accel_frame = data.first_or_default(RS2_STREAM_ACCEL); auto accel_data = accel_frame.as().get_motion_data(); - if (imu_acc != NULL) { + if (imu_acc != nullptr) { imu_acc->resize(3, false); (*imu_acc)[0] = static_cast(accel_data.x); (*imu_acc)[1] = static_cast(accel_data.y); @@ -457,14 +457,14 @@ void vpRealSense2::acquire(vpImage *left, vpImage auto gyro_frame = data.first_or_default(RS2_STREAM_GYRO); auto gyro_data = gyro_frame.as().get_motion_data(); - if (imu_vel != NULL) { + if (imu_vel != nullptr) { imu_vel->resize(3, false); (*imu_vel)[0] = static_cast(gyro_data.x); (*imu_vel)[1] = static_cast(gyro_data.y); (*imu_vel)[2] = static_cast(gyro_data.z); } - if (confidence != NULL) { + if (confidence != nullptr) { *confidence = pose_data.tracker_confidence; } } @@ -473,37 +473,37 @@ void vpRealSense2::acquire(vpImage *left, vpImage #ifdef VISP_HAVE_PCL /*! Acquire data from RealSense device. - \param data_image : Color image buffer or NULL if not wanted. - \param data_depth : Depth image buffer or NULL if not wanted. - \param data_pointCloud : Point cloud vector pointer or NULL if not wanted. + \param data_image : Color image buffer or nullptr if not wanted. + \param data_depth : Depth image buffer or nullptr if not wanted. + \param data_pointCloud : Point cloud vector pointer or nullptr if not wanted. \param pointcloud : Point cloud (in PCL format and without texture - information) pointer or NULL if not wanted. - \param data_infrared : Infrared image buffer or NULL if not wanted. - \param align_to : Align to a reference stream or NULL if not wanted. + information) pointer or nullptr if not wanted. + \param data_infrared : Infrared image buffer or nullptr if not wanted. + \param align_to : Align to a reference stream or nullptr if not wanted. Only depth and color streams can be aligned. - \param ts : Data timestamp or NULL if not wanted. + \param ts : Data timestamp or nullptr if not wanted. */ void vpRealSense2::acquire(unsigned char *const data_image, unsigned char *const data_depth, std::vector *const data_pointCloud, pcl::PointCloud::Ptr &pointcloud, unsigned char *const data_infrared, rs2::align *const align_to, double *ts) { - acquire(data_image, data_depth, data_pointCloud, pointcloud, data_infrared, NULL, align_to, ts); + acquire(data_image, data_depth, data_pointCloud, pointcloud, data_infrared, nullptr, align_to, ts); } /*! Acquire data from RealSense device. - \param data_image : Color image buffer or NULL if not wanted. - \param data_depth : Depth image buffer or NULL if not wanted. - \param data_pointCloud : Point cloud vector pointer or NULL if not wanted. + \param data_image : Color image buffer or nullptr if not wanted. + \param data_depth : Depth image buffer or nullptr if not wanted. + \param data_pointCloud : Point cloud vector pointer or nullptr if not wanted. \param pointcloud : Point cloud (in PCL format and without texture - information) pointer or NULL if not wanted. - \param data_infrared1 : First infrared image buffer or NULL if not wanted. + information) pointer or nullptr if not wanted. + \param data_infrared1 : First infrared image buffer or nullptr if not wanted. \param data_infrared2 : Second infrared image (if supported by the device) - buffer or NULL if not wanted. - \param align_to : Align to a reference stream or NULL if not wanted. + buffer or nullptr if not wanted. + \param align_to : Align to a reference stream or nullptr if not wanted. Only depth and color streams can be aligned. - \param ts : Data timestamp or NULL if not wanted. + \param ts : Data timestamp or nullptr if not wanted. */ void vpRealSense2::acquire(unsigned char *const data_image, unsigned char *const data_depth, std::vector *const data_pointCloud, @@ -511,7 +511,7 @@ void vpRealSense2::acquire(unsigned char *const data_image, unsigned char *const unsigned char *const data_infrared2, rs2::align *const align_to, double *ts) { auto data = m_pipe.wait_for_frames(); - if (align_to != NULL) { + if (align_to != nullptr) { // Infrared stream is not aligned // see https://github.com/IntelRealSense/librealsense/issues/1556#issuecomment-384919994 #if (RS2_API_VERSION > ((2 * 10000) + (9 * 100) + 0)) @@ -521,74 +521,74 @@ void vpRealSense2::acquire(unsigned char *const data_image, unsigned char *const #endif } - if (data_image != NULL) { + if (data_image != nullptr) { auto color_frame = data.get_color_frame(); getNativeFrameData(color_frame, data_image); } - if (data_depth != NULL || data_pointCloud != NULL || pointcloud != NULL) { + if (data_depth != nullptr || data_pointCloud != nullptr || pointcloud != nullptr) { auto depth_frame = data.get_depth_frame(); - if (data_depth != NULL) { + if (data_depth != nullptr) { getNativeFrameData(depth_frame, data_depth); } - if (data_pointCloud != NULL) { + if (data_pointCloud != nullptr) { getPointcloud(depth_frame, *data_pointCloud); } - if (pointcloud != NULL) { + if (pointcloud != nullptr) { getPointcloud(depth_frame, pointcloud); } } - if (data_infrared1 != NULL) { + if (data_infrared1 != nullptr) { auto infrared_frame = data.first(RS2_STREAM_INFRARED); getNativeFrameData(infrared_frame, data_infrared1); } - if (data_infrared2 != NULL) { + if (data_infrared2 != nullptr) { auto infrared_frame = data.get_infrared_frame(2); getNativeFrameData(infrared_frame, data_infrared2); } - if (ts != NULL) { + if (ts != nullptr) { *ts = data.get_timestamp(); } } /*! Acquire data from RealSense device. - \param data_image : Color image buffer or NULL if not wanted. - \param data_depth : Depth image buffer or NULL if not wanted. - \param data_pointCloud : Point cloud vector pointer or NULL if not wanted. + \param data_image : Color image buffer or nullptr if not wanted. + \param data_depth : Depth image buffer or nullptr if not wanted. + \param data_pointCloud : Point cloud vector pointer or nullptr if not wanted. \param pointcloud : Point cloud (in PCL format and with texture information) - pointer or NULL if not wanted. - \param data_infrared : Infrared image buffer or NULL if not wanted. - \param align_to : Align to a reference stream or NULL if not wanted. + pointer or nullptr if not wanted. + \param data_infrared : Infrared image buffer or nullptr if not wanted. + \param align_to : Align to a reference stream or nullptr if not wanted. Only depth and color streams can be aligned. - \param ts : Data timestamp or NULL if not wanted. + \param ts : Data timestamp or nullptr if not wanted. */ void vpRealSense2::acquire(unsigned char *const data_image, unsigned char *const data_depth, std::vector *const data_pointCloud, pcl::PointCloud::Ptr &pointcloud, unsigned char *const data_infrared, rs2::align *const align_to, double *ts) { - acquire(data_image, data_depth, data_pointCloud, pointcloud, data_infrared, NULL, align_to, ts); + acquire(data_image, data_depth, data_pointCloud, pointcloud, data_infrared, nullptr, align_to, ts); } /*! Acquire data from RealSense device. - \param data_image : Color image buffer or NULL if not wanted. - \param data_depth : Depth image buffer or NULL if not wanted. - \param data_pointCloud : Point cloud vector pointer or NULL if not wanted. + \param data_image : Color image buffer or nullptr if not wanted. + \param data_depth : Depth image buffer or nullptr if not wanted. + \param data_pointCloud : Point cloud vector pointer or nullptr if not wanted. \param pointcloud : Point cloud (in PCL format and with texture information) - pointer or NULL if not wanted. - \param data_infrared1 : First infrared image buffer or NULL if not wanted. + pointer or nullptr if not wanted. + \param data_infrared1 : First infrared image buffer or nullptr if not wanted. \param data_infrared2 : Second infrared image (if supported by the device) - buffer or NULL if not wanted. - \param align_to : Align to a reference stream or NULL if not wanted. + buffer or nullptr if not wanted. + \param align_to : Align to a reference stream or nullptr if not wanted. Only depth and color streams can be aligned. - \param ts : Data timestamp or NULL if not wanted. + \param ts : Data timestamp or nullptr if not wanted. */ void vpRealSense2::acquire(unsigned char *const data_image, unsigned char *const data_depth, std::vector *const data_pointCloud, @@ -596,7 +596,7 @@ void vpRealSense2::acquire(unsigned char *const data_image, unsigned char *const unsigned char *const data_infrared2, rs2::align *const align_to, double *ts) { auto data = m_pipe.wait_for_frames(); - if (align_to != NULL) { + if (align_to != nullptr) { // Infrared stream is not aligned // see https://github.com/IntelRealSense/librealsense/issues/1556#issuecomment-384919994 #if (RS2_API_VERSION > ((2 * 10000) + (9 * 100) + 0)) @@ -607,36 +607,36 @@ void vpRealSense2::acquire(unsigned char *const data_image, unsigned char *const } auto color_frame = data.get_color_frame(); - if (data_image != NULL) { + if (data_image != nullptr) { getNativeFrameData(color_frame, data_image); } - if (data_depth != NULL || data_pointCloud != NULL || pointcloud != NULL) { + if (data_depth != nullptr || data_pointCloud != nullptr || pointcloud != nullptr) { auto depth_frame = data.get_depth_frame(); - if (data_depth != NULL) { + if (data_depth != nullptr) { getNativeFrameData(depth_frame, data_depth); } - if (data_pointCloud != NULL) { + if (data_pointCloud != nullptr) { getPointcloud(depth_frame, *data_pointCloud); } - if (pointcloud != NULL) { + if (pointcloud != nullptr) { getPointcloud(depth_frame, color_frame, pointcloud); } } - if (data_infrared1 != NULL) { + if (data_infrared1 != nullptr) { auto infrared_frame = data.first(RS2_STREAM_INFRARED); getNativeFrameData(infrared_frame, data_infrared1); } - if (data_infrared2 != NULL) { + if (data_infrared2 != nullptr) { auto infrared_frame = data.get_infrared_frame(2); getNativeFrameData(infrared_frame, data_infrared2); } - if (ts != NULL) { + if (ts != nullptr) { *ts = data.get_timestamp(); } } @@ -1187,10 +1187,10 @@ unsigned int vpRealSense2::getOdometryData(vpHomogeneousMatrix *cMw, vpColVector auto f = frame.first_or_default(RS2_STREAM_POSE); auto pose_data = f.as().get_pose_data(); - if (ts != NULL) + if (ts != nullptr) *ts = frame.get_timestamp(); - if (cMw != NULL) { + if (cMw != nullptr) { m_pos[0] = static_cast(pose_data.translation.x); m_pos[1] = static_cast(pose_data.translation.y); m_pos[2] = static_cast(pose_data.translation.z); @@ -1203,7 +1203,7 @@ unsigned int vpRealSense2::getOdometryData(vpHomogeneousMatrix *cMw, vpColVector *cMw = vpHomogeneousMatrix(m_pos, m_quat); } - if (odo_vel != NULL) { + if (odo_vel != nullptr) { odo_vel->resize(6, false); (*odo_vel)[0] = static_cast(pose_data.velocity.x); (*odo_vel)[1] = static_cast(pose_data.velocity.y); @@ -1213,7 +1213,7 @@ unsigned int vpRealSense2::getOdometryData(vpHomogeneousMatrix *cMw, vpColVector (*odo_vel)[5] = static_cast(pose_data.angular_velocity.z); } - if (odo_acc != NULL) { + if (odo_acc != nullptr) { odo_acc->resize(6, false); (*odo_acc)[0] = static_cast(pose_data.acceleration.x); (*odo_acc)[1] = static_cast(pose_data.acceleration.y); @@ -1252,10 +1252,10 @@ void vpRealSense2::getIMUAcceleration(vpColVector *imu_acc, double *ts) auto f = frame.first_or_default(RS2_STREAM_ACCEL); auto imu_acc_data = f.as().get_motion_data(); - if (ts != NULL) + if (ts != nullptr) *ts = f.get_timestamp(); - if (imu_acc != NULL) { + if (imu_acc != nullptr) { imu_acc->resize(3, false); (*imu_acc)[0] = static_cast(imu_acc_data.x); (*imu_acc)[1] = static_cast(imu_acc_data.y); @@ -1289,10 +1289,10 @@ void vpRealSense2::getIMUVelocity(vpColVector *imu_vel, double *ts) auto f = frame.first_or_default(RS2_STREAM_GYRO); auto imu_vel_data = f.as().get_motion_data(); - if (ts != NULL) + if (ts != nullptr) *ts = f.get_timestamp(); - if (imu_vel != NULL) { + if (imu_vel != nullptr) { imu_vel->resize(3, false); (*imu_vel)[0] = static_cast(imu_vel_data.x); (*imu_vel)[1] = static_cast(imu_vel_data.x); @@ -1324,10 +1324,10 @@ void vpRealSense2::getIMUData(vpColVector *imu_acc, vpColVector *imu_vel, double { auto data = m_pipe.wait_for_frames(); - if (ts != NULL) + if (ts != nullptr) *ts = data.get_timestamp(); - if (imu_acc != NULL) { + if (imu_acc != nullptr) { auto acc_data = data.first_or_default(RS2_STREAM_ACCEL); auto imu_acc_data = acc_data.as().get_motion_data(); @@ -1337,7 +1337,7 @@ void vpRealSense2::getIMUData(vpColVector *imu_acc, vpColVector *imu_vel, double (*imu_acc)[2] = static_cast(imu_acc_data.z); } - if (imu_vel != NULL) { + if (imu_vel != nullptr) { auto vel_data = data.first_or_default(RS2_STREAM_GYRO); auto imu_vel_data = vel_data.as().get_motion_data(); diff --git a/modules/sensor/test/force-torque/testForceTorqueAtiNetFTSensor.cpp b/modules/sensor/test/force-torque/testForceTorqueAtiNetFTSensor.cpp index f7a7cbd995..f3f93f1d66 100755 --- a/modules/sensor/test/force-torque/testForceTorqueAtiNetFTSensor.cpp +++ b/modules/sensor/test/force-torque/testForceTorqueAtiNetFTSensor.cpp @@ -82,7 +82,7 @@ int main(int argc, char **argv) } #if defined(VISP_HAVE_DISPLAY) - vpPlot *plotter = NULL; + vpPlot *plotter = nullptr; if (!opt_no_display) { plotter = new vpPlot(2, 700, 700, 100, 200, "Curves..."); plotter->initGraph(0, 3); diff --git a/modules/sensor/test/force-torque/testForceTorqueIitSensor.cpp b/modules/sensor/test/force-torque/testForceTorqueIitSensor.cpp index a8a626e1a7..ce6eea2e85 100644 --- a/modules/sensor/test/force-torque/testForceTorqueIitSensor.cpp +++ b/modules/sensor/test/force-torque/testForceTorqueIitSensor.cpp @@ -66,7 +66,7 @@ int main(int argc, char **argv) vpForceTorqueIitSensor iit_ft; #if defined(VISP_HAVE_DISPLAY) - vpPlot *plotter = NULL; + vpPlot *plotter = nullptr; if (!opt_no_display) { plotter = new vpPlot(2, 700, 700, 100, 200, "Curves..."); plotter->initGraph(0, 3); diff --git a/modules/sensor/test/rgb-depth/testOccipitalStructure_Core_pcl.cpp b/modules/sensor/test/rgb-depth/testOccipitalStructure_Core_pcl.cpp index d585e7d3bc..c591caed22 100644 --- a/modules/sensor/test/rgb-depth/testOccipitalStructure_Core_pcl.cpp +++ b/modules/sensor/test/rgb-depth/testOccipitalStructure_Core_pcl.cpp @@ -85,7 +85,7 @@ int main() pcl::PointCloud::Ptr pointcloud(new pcl::PointCloud); - sc.acquire((unsigned char *)I_visible.bitmap, NULL, NULL, pointcloud); + sc.acquire((unsigned char *)I_visible.bitmap, nullptr, nullptr, pointcloud); pcl::visualization::PCLVisualizer::Ptr viewer(new pcl::visualization::PCLVisualizer("3D Viewer")); pcl::visualization::PointCloudColorHandlerRGBField rgb(pointcloud); @@ -98,7 +98,7 @@ int main() double t = vpTime::measureTimeMs(); // Acquire depth as point cloud. - sc.acquire((unsigned char *)I_visible.bitmap, NULL, NULL, pointcloud); + sc.acquire((unsigned char *)I_visible.bitmap, nullptr, nullptr, pointcloud); vpDisplay::display(I_visible); vpDisplay::displayText(I_visible, 15 * display_scale, 15 * display_scale, "Click to quit", vpColor::red); vpDisplay::flush(I_visible); diff --git a/modules/sensor/test/rgb-depth/testRealSense2_D435.cpp b/modules/sensor/test/rgb-depth/testRealSense2_D435.cpp index 85b2c97c06..39073eb05f 100644 --- a/modules/sensor/test/rgb-depth/testRealSense2_D435.cpp +++ b/modules/sensor/test/rgb-depth/testRealSense2_D435.cpp @@ -100,7 +100,7 @@ int main(int argc, char *argv[]) rs.acquire(reinterpret_cast(color.bitmap), reinterpret_cast(depth_raw.bitmap), &pointcloud_colvector, reinterpret_cast(infrared1.bitmap), - reinterpret_cast(infrared2.bitmap), NULL); + reinterpret_cast(infrared2.bitmap), nullptr); vpImageConvert::createDepthHistogram(depth_raw, depth_color); @@ -160,7 +160,7 @@ int main(int argc, char *argv[]) double t = vpTime::measureTimeMs(); rs.acquire(reinterpret_cast(color.bitmap), reinterpret_cast(depth_raw.bitmap), - NULL, reinterpret_cast(infrared1.bitmap)); + nullptr, reinterpret_cast(infrared1.bitmap)); vpImageConvert::createDepthHistogram(depth_raw, depth_color); diff --git a/modules/sensor/test/rgb-depth/testRealSense2_D435_align.cpp b/modules/sensor/test/rgb-depth/testRealSense2_D435_align.cpp index be5ea0b7ab..ac4a01048e 100644 --- a/modules/sensor/test/rgb-depth/testRealSense2_D435_align.cpp +++ b/modules/sensor/test/rgb-depth/testRealSense2_D435_align.cpp @@ -149,13 +149,13 @@ int main(int argc, char *argv[]) while (true) { if (color_pointcloud) { rs.acquire(reinterpret_cast(I_color.bitmap), - reinterpret_cast(I_depth_raw.bitmap), &vp_pointcloud, pointcloud_color, NULL, - no_align ? NULL : &align_to); + reinterpret_cast(I_depth_raw.bitmap), &vp_pointcloud, pointcloud_color, nullptr, + no_align ? nullptr : &align_to); } else { rs.acquire(reinterpret_cast(I_color.bitmap), - reinterpret_cast(I_depth_raw.bitmap), &vp_pointcloud, pointcloud, NULL, - no_align ? NULL : &align_to); + reinterpret_cast(I_depth_raw.bitmap), &vp_pointcloud, pointcloud, nullptr, + no_align ? nullptr : &align_to); } vpImageConvert::createDepthHistogram(I_depth_raw, I_depth); diff --git a/modules/sensor/test/rgb-depth/testRealSense2_D435_pcl.cpp b/modules/sensor/test/rgb-depth/testRealSense2_D435_pcl.cpp index 50b6005887..f4caef1ea4 100644 --- a/modules/sensor/test/rgb-depth/testRealSense2_D435_pcl.cpp +++ b/modules/sensor/test/rgb-depth/testRealSense2_D435_pcl.cpp @@ -193,13 +193,13 @@ int main(int argc, char *argv[]) if (pcl_color) { rs.acquire(reinterpret_cast(color.bitmap), reinterpret_cast(depth_raw.bitmap), - NULL, pointcloud_color, reinterpret_cast(infrared1.bitmap), - show_infrared2 ? reinterpret_cast(infrared2.bitmap) : NULL, NULL); + nullptr, pointcloud_color, reinterpret_cast(infrared1.bitmap), + show_infrared2 ? reinterpret_cast(infrared2.bitmap) : nullptr, nullptr); } else { rs.acquire(reinterpret_cast(color.bitmap), reinterpret_cast(depth_raw.bitmap), - NULL, pointcloud, reinterpret_cast(infrared1.bitmap), - show_infrared2 ? reinterpret_cast(infrared2.bitmap) : NULL, NULL); + nullptr, pointcloud, reinterpret_cast(infrared1.bitmap), + show_infrared2 ? reinterpret_cast(infrared2.bitmap) : nullptr, nullptr); } update_pointcloud = true; diff --git a/modules/sensor/test/rgb-depth/testRealSense2_SR300.cpp b/modules/sensor/test/rgb-depth/testRealSense2_SR300.cpp index 12cb8b1725..d4fb566e39 100644 --- a/modules/sensor/test/rgb-depth/testRealSense2_SR300.cpp +++ b/modules/sensor/test/rgb-depth/testRealSense2_SR300.cpp @@ -449,18 +449,18 @@ int main(int argc, char *argv[]) std::lock_guard lock(mutex); if (pcl_color) { - rs.acquire((unsigned char *)color.bitmap, (unsigned char *)depth_raw.bitmap, NULL, pointcloud_color, + rs.acquire((unsigned char *)color.bitmap, (unsigned char *)depth_raw.bitmap, nullptr, pointcloud_color, (unsigned char *)infrared.bitmap); } else { - rs.acquire((unsigned char *)color.bitmap, (unsigned char *)depth_raw.bitmap, NULL, pointcloud, + rs.acquire((unsigned char *)color.bitmap, (unsigned char *)depth_raw.bitmap, nullptr, pointcloud, (unsigned char *)infrared.bitmap); } update_pointcloud = true; } #else - rs.acquire((unsigned char *)color.bitmap, (unsigned char *)depth_raw.bitmap, NULL, + rs.acquire((unsigned char *)color.bitmap, (unsigned char *)depth_raw.bitmap, nullptr, (unsigned char *)infrared.bitmap); #endif @@ -559,7 +559,7 @@ int main(int argc, char *argv[]) t_begin = vpTime::measureTimeMs(); while (vpTime::measureTimeMs() - t_begin < 10000) { double t = vpTime::measureTimeMs(); - rs.acquire(NULL, NULL, &pointcloud_colvector, NULL, NULL); + rs.acquire(nullptr, nullptr, &pointcloud_colvector, nullptr, nullptr); { std::lock_guard lock(mutex); diff --git a/modules/sensor/test/rgb-depth/testRealSense2_T265_images_odometry.cpp b/modules/sensor/test/rgb-depth/testRealSense2_T265_images_odometry.cpp index 666d03f38c..5f84a4d420 100644 --- a/modules/sensor/test/rgb-depth/testRealSense2_T265_images_odometry.cpp +++ b/modules/sensor/test/rgb-depth/testRealSense2_T265_images_odometry.cpp @@ -86,7 +86,7 @@ int main() vpCameraParameters cam(300., 300., I_pose.getWidth() / 2, I_pose.getHeight() / 2); // For pose visualization - rs.acquire(&I_left, &I_right, &cMw_0, NULL, NULL, &confidence, &ts); + rs.acquire(&I_left, &I_right, &cMw_0, nullptr, nullptr, &confidence, &ts); #if defined(VISP_HAVE_X11) vpDisplayX display_left; // Left image diff --git a/modules/sensor/test/rgb-depth/testRealSense2_T265_imu.cpp b/modules/sensor/test/rgb-depth/testRealSense2_T265_imu.cpp index c55622c7da..17f939e52c 100644 --- a/modules/sensor/test/rgb-depth/testRealSense2_T265_imu.cpp +++ b/modules/sensor/test/rgb-depth/testRealSense2_T265_imu.cpp @@ -69,7 +69,7 @@ int main() while (true) { - rs.getIMUData(&imu_acc, &imu_vel, NULL); + rs.getIMUData(&imu_acc, &imu_vel, nullptr); std::cout << "Gyro vel: x = " << std::setw(12) << imu_vel[0] << " y = " << std::setw(12) << imu_vel[1] << " z = " << std::setw(12) << imu_vel[2]; diff --git a/modules/sensor/test/rgb-depth/testRealSense2_T265_odometry.cpp b/modules/sensor/test/rgb-depth/testRealSense2_T265_odometry.cpp index cf38353a98..63053891bd 100644 --- a/modules/sensor/test/rgb-depth/testRealSense2_T265_odometry.cpp +++ b/modules/sensor/test/rgb-depth/testRealSense2_T265_odometry.cpp @@ -88,7 +88,7 @@ int main() display_pose.init(I_pose, 10, 10, "Pose visualizer"); #endif - confidence = rs.getOdometryData(&cMw_0, NULL, NULL, &ts); // Acquire first frame (pose only) + confidence = rs.getOdometryData(&cMw_0, nullptr, nullptr, &ts); // Acquire first frame (pose only) vpHomogeneousMatrix cextMc_0 = cextMw * cMw_0.inverse(); vpMeterPixelConversion::convertPoint(cam, cextMc_0[0][3] / cextMc_0[2][3], cextMc_0[1][3] / cextMc_0[2][3], @@ -97,7 +97,7 @@ int main() while (true) { double t = vpTime::measureTimeMs(); - confidence = rs.getOdometryData(&cMw, NULL, NULL, &ts); // Acquire timestamped pose only + confidence = rs.getOdometryData(&cMw, nullptr, nullptr, &ts); // Acquire timestamped pose only vpDisplay::display(I_pose); diff --git a/modules/sensor/test/rgb-depth/testRealSense2_T265_undistort.cpp b/modules/sensor/test/rgb-depth/testRealSense2_T265_undistort.cpp index 516a700a2f..12d2d0b88b 100644 --- a/modules/sensor/test/rgb-depth/testRealSense2_T265_undistort.cpp +++ b/modules/sensor/test/rgb-depth/testRealSense2_T265_undistort.cpp @@ -105,7 +105,7 @@ int main() while (true) { double t = vpTime::measureTimeMs(); - rs.acquire(&I, NULL, NULL); // Acquire only left image + rs.acquire(&I, nullptr, nullptr); // Acquire only left image vpDisplay::display(I); diff --git a/modules/sensor/test/rgb-depth/testRealSense_R200.cpp b/modules/sensor/test/rgb-depth/testRealSense_R200.cpp index 571de313f4..7388e378a4 100644 --- a/modules/sensor/test/rgb-depth/testRealSense_R200.cpp +++ b/modules/sensor/test/rgb-depth/testRealSense_R200.cpp @@ -290,24 +290,24 @@ void test_R200(vpRealSense &rs, const std::map &enables, if (direct_infrared_conversion) { if (pcl_color) { - rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, NULL, pointcloud_color, + rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, nullptr, pointcloud_color, (unsigned char *)I_infrared.bitmap, (unsigned char *)I_infrared2.bitmap, color_stream, depth_stream, rs::stream::infrared, infrared2_stream); } else { - rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, NULL, pointcloud, + rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, nullptr, pointcloud, (unsigned char *)I_infrared.bitmap, (unsigned char *)I_infrared2.bitmap, color_stream, depth_stream, rs::stream::infrared, infrared2_stream); } } else { if (pcl_color) { - rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, NULL, pointcloud_color, + rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, nullptr, pointcloud_color, (unsigned char *)infrared.bitmap, (unsigned char *)infrared2.bitmap, color_stream, depth_stream, rs::stream::infrared, infrared2_stream); } else { - rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, NULL, pointcloud, + rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, nullptr, pointcloud, (unsigned char *)infrared.bitmap, (unsigned char *)infrared2.bitmap, color_stream, depth_stream, rs::stream::infrared, infrared2_stream); } @@ -321,12 +321,12 @@ void test_R200(vpRealSense &rs, const std::map &enables, } else { if (direct_infrared_conversion) { - rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, NULL, + rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, nullptr, (unsigned char *)I_infrared.bitmap, (unsigned char *)I_infrared2.bitmap, color_stream, depth_stream, rs::stream::infrared, infrared2_stream); } else { - rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, NULL, + rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, nullptr, (unsigned char *)infrared.bitmap, (unsigned char *)infrared2.bitmap, color_stream, depth_stream, rs::stream::infrared, infrared2_stream); vpImageConvert::convert(infrared, I_infrared); diff --git a/modules/sensor/test/rgb-depth/testRealSense_SR300.cpp b/modules/sensor/test/rgb-depth/testRealSense_SR300.cpp index 3a91cc5ed9..42bfb80d9b 100644 --- a/modules/sensor/test/rgb-depth/testRealSense_SR300.cpp +++ b/modules/sensor/test/rgb-depth/testRealSense_SR300.cpp @@ -267,22 +267,22 @@ void test_SR300(vpRealSense &rs, const std::map &enables, if (direct_infrared_conversion) { if (pcl_color) { - rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, NULL, pointcloud_color, - (unsigned char *)I_infrared.bitmap, NULL, color_stream, depth_stream); + rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, nullptr, pointcloud_color, + (unsigned char *)I_infrared.bitmap, nullptr, color_stream, depth_stream); } else { - rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, NULL, pointcloud, - (unsigned char *)I_infrared.bitmap, NULL, color_stream, depth_stream); + rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, nullptr, pointcloud, + (unsigned char *)I_infrared.bitmap, nullptr, color_stream, depth_stream); } } else { if (pcl_color) { - rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, NULL, pointcloud_color, - (unsigned char *)infrared.bitmap, NULL, color_stream, depth_stream); + rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, nullptr, pointcloud_color, + (unsigned char *)infrared.bitmap, nullptr, color_stream, depth_stream); } else { - rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, NULL, pointcloud, - (unsigned char *)infrared.bitmap, NULL, color_stream, depth_stream); + rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, nullptr, pointcloud, + (unsigned char *)infrared.bitmap, nullptr, color_stream, depth_stream); } vpImageConvert::convert(infrared, I_infrared); @@ -293,12 +293,12 @@ void test_SR300(vpRealSense &rs, const std::map &enables, } else { if (direct_infrared_conversion) { - rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, NULL, - (unsigned char *)I_infrared.bitmap, NULL, color_stream, depth_stream); + rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, nullptr, + (unsigned char *)I_infrared.bitmap, nullptr, color_stream, depth_stream); } else { - rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, NULL, - (unsigned char *)infrared.bitmap, NULL, color_stream, depth_stream); + rs.acquire((unsigned char *)I_color.bitmap, (unsigned char *)depth.bitmap, nullptr, + (unsigned char *)infrared.bitmap, nullptr, color_stream, depth_stream); vpImageConvert::convert(infrared, I_infrared); } } @@ -480,7 +480,7 @@ int main(int argc, char *argv[]) double t_begin = vpTime::measureTimeMs(); while (true) { - rs.acquire(color_mat.data, NULL, NULL, infrared_mat.data); + rs.acquire(color_mat.data, nullptr, nullptr, infrared_mat.data); cv::imshow("Color mat", color_mat); cv::imshow("Infrared mat", infrared_mat); diff --git a/modules/tracker/blob/include/visp3/blob/vpDot2.h b/modules/tracker/blob/include/visp3/blob/vpDot2.h index a89dceba84..ec8171ebda 100644 --- a/modules/tracker/blob/include/visp3/blob/vpDot2.h +++ b/modules/tracker/blob/include/visp3/blob/vpDot2.h @@ -361,7 +361,7 @@ class VISP_EXPORT vpDot2 : public vpTracker void track(const vpImage &I, vpImagePoint &cog, bool canMakeTheWindowGrow = true); static void trackAndDisplay(vpDot2 dot[], const unsigned int &n, vpImage &I, - std::vector &cogs, vpImagePoint *cogStar = NULL); + std::vector &cogs, vpImagePoint *cogStar = nullptr); #ifdef VISP_BUILD_DEPRECATED_FUNCTIONS public: diff --git a/modules/tracker/blob/src/dots/vpDot2.cpp b/modules/tracker/blob/src/dots/vpDot2.cpp index f3453957db..4de44dbb60 100644 --- a/modules/tracker/blob/src/dots/vpDot2.cpp +++ b/modules/tracker/blob/src/dots/vpDot2.cpp @@ -1000,7 +1000,7 @@ void vpDot2::searchDotsInArea(const vpImage &I, int area_u, int a std::list::iterator itnice; std::list::iterator itbad; - vpDot2 *dotToTest = NULL; + vpDot2 *dotToTest = nullptr; vpDot2 tmpDot; unsigned int area_u_min = (unsigned int)area.getLeft(); @@ -1100,7 +1100,7 @@ void vpDot2::searchDotsInArea(const vpImage &I, int area_u, int a // otherwise estimate the width, height and surface of the dot we // created, and test it. - if (dotToTest != NULL) + if (dotToTest != nullptr) delete dotToTest; dotToTest = getInstance(); dotToTest->setCog(germ); @@ -1195,7 +1195,7 @@ void vpDot2::searchDotsInArea(const vpImage &I, int area_u, int a } } } - if (dotToTest != NULL) + if (dotToTest != nullptr) delete dotToTest; } @@ -2395,7 +2395,7 @@ vpMatrix vpDot2::defineDots(vpDot2 dot[], const unsigned int &n, const std::stri dots, will be displayed in green \param cogStar (optional) : array of - vpImagePoint indicating the desired position (default NULL), will be + vpImagePoint indicating the desired position (default nullptr), will be displayed in red */ void vpDot2::trackAndDisplay(vpDot2 dot[], const unsigned int &n, vpImage &I, @@ -2414,7 +2414,7 @@ void vpDot2::trackAndDisplay(vpDot2 dot[], const unsigned int &n, vpImage option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/modules/tracker/klt/include/visp3/klt/vpKltOpencv.h b/modules/tracker/klt/include/visp3/klt/vpKltOpencv.h index edce98a3a0..45e4dcc6a1 100644 --- a/modules/tracker/klt/include/visp3/klt/vpKltOpencv.h +++ b/modules/tracker/klt/include/visp3/klt/vpKltOpencv.h @@ -216,7 +216,7 @@ class VISP_EXPORT vpKltOpencv * * \param I : Grey level image used as input. This image should have only 1 channel. * \param mask : Image mask used to restrict the keypoint detection - * area. If mask is NULL, all the image will be considered. + * area. If mask is nullptr, all the image will be considered. * * \exception vpTrackingException::initializationError : If the image I is not * initialized, or if the image or the mask have bad coding format. diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbEdgeKltTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbEdgeKltTracker.h index 6d6a6be8fd..4ab137f519 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbEdgeKltTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbEdgeKltTracker.h @@ -328,7 +328,7 @@ class VISP_EXPORT vpMbEdgeKltTracker : protected: virtual void computeVVS(const vpImage &I, const unsigned int &nbInfos, unsigned int &nbrow, - unsigned int lvl = 0, double *edge_residual = NULL, double *klt_residual = NULL); + unsigned int lvl = 0, double *edge_residual = nullptr, double *klt_residual = nullptr); virtual void computeVVSInit() override; virtual void computeVVSInteractionMatrixAndResidu() override; using vpMbTracker::computeCovarianceMatrixVVS; diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h index bdd129c628..3175dcd0a1 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h @@ -764,10 +764,10 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker virtual void preTracking(const vpImage *const ptr_I, const pcl::PointCloud::ConstPtr &point_cloud); #endif - virtual void postTracking(const vpImage *const ptr_I = NULL, const unsigned int pointcloud_width = 0, + virtual void postTracking(const vpImage *const ptr_I = nullptr, const unsigned int pointcloud_width = 0, const unsigned int pointcloud_height = 0); - virtual void preTracking(const vpImage *const ptr_I = NULL, - const std::vector *const point_cloud = NULL, + virtual void preTracking(const vpImage *const ptr_I = nullptr, + const std::vector *const point_cloud = nullptr, const unsigned int pointcloud_width = 0, const unsigned int pointcloud_height = 0); virtual void reInitModel(const vpImage *const I, const vpImage *const I_color, diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbHiddenFaces.h b/modules/tracker/mbt/include/visp3/mbt/vpMbHiddenFaces.h index 8edb9db333..38c233e647 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbHiddenFaces.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbHiddenFaces.h @@ -293,25 +293,25 @@ vpMbHiddenFaces::vpMbHiddenFaces() : Lpol(), nbVisiblePolygon(0), s template vpMbHiddenFaces::~vpMbHiddenFaces() { for (unsigned int i = 0; i < Lpol.size(); i++) { - if (Lpol[i] != NULL) { + if (Lpol[i] != nullptr) { delete Lpol[i]; } - Lpol[i] = NULL; + Lpol[i] = nullptr; } Lpol.resize(0); #ifdef VISP_HAVE_OGRE - if (ogre != NULL) { + if (ogre != nullptr) { delete ogre; - ogre = NULL; + ogre = nullptr; } // This is already done by calling "delete ogre" // for(unsigned int i = 0 ; i < lOgrePolygons.size() ; i++){ - // if (lOgrePolygons[i]!=NULL){ + // if (lOgrePolygons[i]!=nullptr){ // delete lOgrePolygons[i]; // } - // lOgrePolygons[i] = NULL; + // lOgrePolygons[i] = nullptr; // } lOgrePolygons.resize(0); @@ -327,7 +327,7 @@ vpMbHiddenFaces::vpMbHiddenFaces(const vpMbHiddenFaces #ifdef VISP_HAVE_OGRE , ogreBackground(copy.ogreBackground), ogreInitialised(copy.ogreInitialised), nbRayAttempts(copy.nbRayAttempts), - ratioVisibleRay(copy.ratioVisibleRay), ogre(NULL), lOgrePolygons(), ogreShowConfigDialog(copy.ogreShowConfigDialog) + ratioVisibleRay(copy.ratioVisibleRay), ogre(nullptr), lOgrePolygons(), ogreShowConfigDialog(copy.ogreShowConfigDialog) #endif { // Copy the list of polygons @@ -393,25 +393,25 @@ template void vpMbHiddenFaces::reset() { nbVisiblePolygon = 0; for (unsigned int i = 0; i < Lpol.size(); i++) { - if (Lpol[i] != NULL) { + if (Lpol[i] != nullptr) { delete Lpol[i]; } - Lpol[i] = NULL; + Lpol[i] = nullptr; } Lpol.resize(0); #ifdef VISP_HAVE_OGRE - if (ogre != NULL) { + if (ogre != nullptr) { delete ogre; - ogre = NULL; + ogre = nullptr; } // This is already done by calling "delete ogre" // for(unsigned int i = 0 ; i < lOgrePolygons.size() ; i++){ - // if (lOgrePolygons[i]!=NULL){ + // if (lOgrePolygons[i]!=nullptr){ // delete lOgrePolygons[i]; // } - // lOgrePolygons[i] = NULL; + // lOgrePolygons[i] = nullptr; // } lOgrePolygons.resize(0); diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbTracker.h index 32949e7c1b..7d21b1216f 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbTracker.h @@ -808,14 +808,14 @@ class VISP_EXPORT vpMbTracker virtual void computeVVSCheckLevenbergMarquardt(unsigned int iter, vpColVector &error, const vpColVector &m_error_prev, const vpHomogeneousMatrix &cMoPrev, double &mu, - bool &reStartFromLastIncrement, vpColVector *const w = NULL, - const vpColVector *const m_w_prev = NULL); + bool &reStartFromLastIncrement, vpColVector *const w = nullptr, + const vpColVector *const m_w_prev = nullptr); virtual void computeVVSInit() = 0; virtual void computeVVSInteractionMatrixAndResidu() = 0; virtual void computeVVSPoseEstimation(const bool isoJoIdentity, unsigned int iter, vpMatrix &L, vpMatrix <L, vpColVector &R, const vpColVector &error, vpColVector &error_prev, - vpColVector <R, double &mu, vpColVector &v, const vpColVector *const w = NULL, - vpColVector *const m_w_prev = NULL); + vpColVector <R, double &mu, vpColVector &v, const vpColVector *const w = nullptr, + vpColVector *const m_w_prev = nullptr); virtual void computeVVSWeights(vpRobust &robust, const vpColVector &error, vpColVector &w); #ifdef VISP_HAVE_COIN3D diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbtDistanceCircle.h b/modules/tracker/mbt/include/visp3/mbt/vpMbtDistanceCircle.h index c462d2c406..1a2ed07bfe 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbtDistanceCircle.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbtDistanceCircle.h @@ -104,11 +104,11 @@ class VISP_EXPORT vpMbtDistanceCircle // private: //#ifndef DOXYGEN_SHOULD_SKIP_THIS // vpMbtDistanceCircle(const vpMbtDistanceCircle &) - // : name(), index(0), cam(), me(NULL), wmean(1), - // featureEllipse(), isTrackedCircle(true), meEllipse(NULL), - // circle(NULL), radius(0.), p1(NULL), p2(NULL), p3(NULL), + // : name(), index(0), cam(), me(nullptr), wmean(1), + // featureEllipse(), isTrackedCircle(true), meEllipse(nullptr), + // circle(nullptr), radius(0.), p1(nullptr), p2(nullptr), p3(nullptr), // L(), error(), nbFeature(0), Reinit(false), - // hiddenface(NULL), index_polygon(-1), isvisible(false) + // hiddenface(nullptr), index_polygon(-1), isvisible(false) // { // throw vpException(vpException::functionNotImplementedError, "Not // implemented!"); @@ -172,7 +172,7 @@ class VISP_EXPORT vpMbtDistanceCircle void initInteractionMatrixError(); bool initMovingEdge(const vpImage &I, const vpHomogeneousMatrix &cMo, bool doNotTrack, - const vpImage *mask = NULL); + const vpImage *mask = nullptr); /*! Return if the circle is used for tracking. @@ -189,7 +189,7 @@ class VISP_EXPORT vpMbtDistanceCircle inline bool isVisible() const { return isvisible; } void reinitMovingEdge(const vpImage &I, const vpHomogeneousMatrix &cMo, - const vpImage *mask = NULL); + const vpImage *mask = nullptr); /*! Set the camera parameters. diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbtDistanceCylinder.h b/modules/tracker/mbt/include/visp3/mbt/vpMbtDistanceCylinder.h index bb41a192ca..0e8787b932 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbtDistanceCylinder.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbtDistanceCylinder.h @@ -118,12 +118,12 @@ class VISP_EXPORT vpMbtDistanceCylinder // private: //#ifndef DOXYGEN_SHOULD_SKIP_THIS // vpMbtDistanceCylinder(const vpMbtDistanceCylinder &) - // : name(), index(0), cam(), me(NULL), wmean1(1), wmean2(1), + // : name(), index(0), cam(), me(nullptr), wmean1(1), wmean2(1), // featureline1(), featureline2(), isTrackedCylinder(true), - // meline1(NULL), meline2(NULL), cercle1(NULL), cercle2(NULL), - // radius(0), p1(NULL), p2(NULL), L(), error(), nbFeature(0), - // nbFeaturel1(0), nbFeaturel2(0), Reinit(false), c(NULL), - // hiddenface(NULL), index_polygon(-1), isvisible(false) + // meline1(nullptr), meline2(nullptr), cercle1(nullptr), cercle2(nullptr), + // radius(0), p1(nullptr), p2(nullptr), L(), error(), nbFeature(0), + // nbFeaturel1(0), nbFeaturel2(0), Reinit(false), c(nullptr), + // hiddenface(nullptr), index_polygon(-1), isvisible(false) // { // throw vpException(vpException::functionNotImplementedError, "Not // implemented!"); @@ -197,7 +197,7 @@ class VISP_EXPORT vpMbtDistanceCylinder void initInteractionMatrixError(); bool initMovingEdge(const vpImage &I, const vpHomogeneousMatrix &cMo, bool doNotTrack, - const vpImage *mask = NULL); + const vpImage *mask = nullptr); /*! Return if the cylinder is used for tracking. @@ -214,7 +214,7 @@ class VISP_EXPORT vpMbtDistanceCylinder inline bool isVisible() const { return isvisible; } void reinitMovingEdge(const vpImage &I, const vpHomogeneousMatrix &cMo, - const vpImage *mask = NULL); + const vpImage *mask = nullptr); /*! Set the camera parameters. diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbtDistanceKltCylinder.h b/modules/tracker/mbt/include/visp3/mbt/vpMbtDistanceKltCylinder.h index 5530f493df..f8ac56233c 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbtDistanceKltCylinder.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbtDistanceKltCylinder.h @@ -117,7 +117,7 @@ class VISP_EXPORT vpMbtDistanceKltCylinder // initPoints(), initPoints3D(), curPoints(), curPointsInd(), // nbPointsCur(0), nbPointsInit(0), minNbPoint(4), // enoughPoints(false), cam(), isTrackedKltCylinder(true), - // listIndicesCylinderBBox(), hiddenface(NULL), useScanLine(false) + // listIndicesCylinderBBox(), hiddenface(nullptr), useScanLine(false) // { // throw vpException(vpException::functionNotImplementedError, "Not // implemented!"); diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbtDistanceKltPoints.h b/modules/tracker/mbt/include/visp3/mbt/vpMbtDistanceKltPoints.h index bc20dfb94a..16c69ae071 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbtDistanceKltPoints.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbtDistanceKltPoints.h @@ -117,7 +117,7 @@ class VISP_EXPORT vpMbtDistanceKltPoints // curPoints(), curPointsInd(), // nbPointsCur(0), nbPointsInit(0), minNbPoint(4), // enoughPoints(false), dt(1.), d0(1.), cam(), - // isTrackedKltPoints(true), polygon(NULL), hiddenface(NULL), + // isTrackedKltPoints(true), polygon(nullptr), hiddenface(nullptr), // useScanLine(false) // { // throw vpException(vpException::functionNotImplementedError, "Not @@ -133,7 +133,7 @@ class VISP_EXPORT vpMbtDistanceKltPoints vpMbtDistanceKltPoints(); virtual ~vpMbtDistanceKltPoints(); - unsigned int computeNbDetectedCurrent(const vpKltOpencv &_tracker, const vpImage *mask = NULL); + unsigned int computeNbDetectedCurrent(const vpKltOpencv &_tracker, const vpImage *mask = nullptr); void computeHomography(const vpHomogeneousMatrix &_cTc0, vpHomography &cHc0); void computeInteractionMatrixAndResidu(vpColVector &_R, vpMatrix &_J); @@ -185,7 +185,7 @@ class VISP_EXPORT vpMbtDistanceKltPoints inline bool hasEnoughPoints() const { return enoughPoints; } - void init(const vpKltOpencv &_tracker, const vpImage *mask = NULL); + void init(const vpKltOpencv &_tracker, const vpImage *mask = nullptr); /*! Return if the klt points are used for tracking. diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbtDistanceLine.h b/modules/tracker/mbt/include/visp3/mbt/vpMbtDistanceLine.h index 831078713d..ac64ea07dc 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbtDistanceLine.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbtDistanceLine.h @@ -112,11 +112,11 @@ class VISP_EXPORT vpMbtDistanceLine // private: //#ifndef DOXYGEN_SHOULD_SKIP_THIS // vpMbtDistanceLine(const vpMbtDistanceLine &) - // : name(), index(0), cam(), me(NULL), isTrackedLine(true), + // : name(), index(0), cam(), me(nullptr), isTrackedLine(true), // isTrackedLineWithVisibility(true), // wmean(1), featureline(), poly(), useScanLine(false), meline(), - // line(NULL), p1(NULL), p2(NULL), L(), error(), nbFeature(), - // nbFeatureTotal(0), Reinit(false), hiddenface(NULL), + // line(nullptr), p1(nullptr), p2(nullptr), L(), error(), nbFeature(), + // nbFeatureTotal(0), Reinit(false), hiddenface(nullptr), // Lindex_polygon(), Lindex_polygon_tracked(), isvisible(false) // { // throw vpException(vpException::functionNotImplementedError, "Not @@ -192,7 +192,7 @@ class VISP_EXPORT vpMbtDistanceLine void initInteractionMatrixError(); bool initMovingEdge(const vpImage &I, const vpHomogeneousMatrix &cMo, bool doNotTrack, - const vpImage *mask = NULL); + const vpImage *mask = nullptr); /*! Return if the line is used for tracking. @@ -209,7 +209,7 @@ class VISP_EXPORT vpMbtDistanceLine inline bool isVisible() const { return isvisible; } void reinitMovingEdge(const vpImage &I, const vpHomogeneousMatrix &cMo, - const vpImage *mask = NULL); + const vpImage *mask = nullptr); /*! Set the camera parameters. diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbtFaceDepthDense.h b/modules/tracker/mbt/include/visp3/mbt/vpMbtFaceDepthDense.h index 5d0fc4d69f..0ae0eea302 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbtFaceDepthDense.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbtFaceDepthDense.h @@ -96,7 +96,7 @@ class VISP_EXPORT vpMbtFaceDepthDense vpImage &debugImage, std::vector > &roiPts_vec #endif , - const vpImage *mask = NULL); + const vpImage *mask = nullptr); #endif bool computeDesiredFeatures(const vpHomogeneousMatrix &cMo, unsigned int width, unsigned int height, const std::vector &point_cloud, unsigned int stepX, unsigned int stepY @@ -105,7 +105,7 @@ class VISP_EXPORT vpMbtFaceDepthDense vpImage &debugImage, std::vector > &roiPts_vec #endif , - const vpImage *mask = NULL); + const vpImage *mask = nullptr); void computeInteractionMatrixAndResidu(const vpHomogeneousMatrix &cMo, vpMatrix &L, vpColVector &error); @@ -168,10 +168,10 @@ class VISP_EXPORT vpMbtFaceDepthDense //! The second extremity clipped in the image frame vpImagePoint m_imPt2; - PolygonLine() : m_p1(NULL), m_p2(NULL), m_poly(), m_imPt1(), m_imPt2() {} + PolygonLine() : m_p1(nullptr), m_p2(nullptr), m_poly(), m_imPt1(), m_imPt2() {} PolygonLine(const PolygonLine &polyLine) - : m_p1(NULL), m_p2(NULL), m_poly(polyLine.m_poly), m_imPt1(polyLine.m_imPt1), m_imPt2(polyLine.m_imPt2) + : m_p1(nullptr), m_p2(nullptr), m_poly(polyLine.m_poly), m_imPt1(polyLine.m_imPt1), m_imPt2(polyLine.m_imPt2) { m_p1 = &m_poly.p[0]; m_p2 = &m_poly.p[1]; diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbtFaceDepthNormal.h b/modules/tracker/mbt/include/visp3/mbt/vpMbtFaceDepthNormal.h index 61cd959aea..db5a673d70 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbtFaceDepthNormal.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbtFaceDepthNormal.h @@ -98,7 +98,7 @@ class VISP_EXPORT vpMbtFaceDepthNormal vpImage &debugImage, std::vector > &roiPts_vec #endif , - const vpImage *mask = NULL); + const vpImage *mask = nullptr); #endif bool computeDesiredFeatures(const vpHomogeneousMatrix &cMo, unsigned int width, unsigned int height, const std::vector &point_cloud, vpColVector &desired_features, @@ -108,7 +108,7 @@ class VISP_EXPORT vpMbtFaceDepthNormal vpImage &debugImage, std::vector > &roiPts_vec #endif , - const vpImage *mask = NULL); + const vpImage *mask = nullptr); void computeInteractionMatrix(const vpHomogeneousMatrix &cMo, vpMatrix &L, vpColVector &features); @@ -179,10 +179,10 @@ class VISP_EXPORT vpMbtFaceDepthNormal //! The second extremity clipped in the image frame vpImagePoint m_imPt2; - PolygonLine() : m_p1(NULL), m_p2(NULL), m_poly(), m_imPt1(), m_imPt2() {} + PolygonLine() : m_p1(nullptr), m_p2(nullptr), m_poly(), m_imPt1(), m_imPt2() {} PolygonLine(const PolygonLine &polyLine) - : m_p1(NULL), m_p2(NULL), m_poly(polyLine.m_poly), m_imPt1(polyLine.m_imPt1), m_imPt2(polyLine.m_imPt2) + : m_p1(nullptr), m_p2(nullptr), m_poly(polyLine.m_poly), m_imPt1(polyLine.m_imPt1), m_imPt2(polyLine.m_imPt2) { m_p1 = &m_poly.p[0]; m_p2 = &m_poly.p[1]; diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbtMeEllipse.h b/modules/tracker/mbt/include/visp3/mbt/vpMbtMeEllipse.h index 2a712dc9d4..f5fd18fa99 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbtMeEllipse.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbtMeEllipse.h @@ -66,7 +66,7 @@ class VISP_EXPORT vpMbtMeEllipse : public vpMeEllipse unsigned int thickness); void initTracking(const vpImage &I, const vpImagePoint &ic, double n20_p, double n11_p, double n02_p, - bool doNotTrack, vpImagePoint *pt1 = NULL, const vpImagePoint *pt2 = NULL); + bool doNotTrack, vpImagePoint *pt1 = nullptr, const vpImagePoint *pt2 = nullptr); void track(const vpImage &I); void updateParameters(const vpImage &I, const vpImagePoint ¢er_p, double n20_p, double n11_p, diff --git a/modules/tracker/mbt/src/depth/vpMbDepthDenseTracker.cpp b/modules/tracker/mbt/src/depth/vpMbDepthDenseTracker.cpp index 63693eaa1d..ca74cdc202 100644 --- a/modules/tracker/mbt/src/depth/vpMbDepthDenseTracker.cpp +++ b/modules/tracker/mbt/src/depth/vpMbDepthDenseTracker.cpp @@ -58,7 +58,7 @@ vpMbDepthDenseTracker::vpMbDepthDenseTracker() m_robust_depthDense(), m_w_depthDense(), m_weightedError_depthDense() #if DEBUG_DISPLAY_DEPTH_DENSE , - m_debugDisp_depthDense(NULL), m_debugImage_depthDense() + m_debugDisp_depthDense(nullptr), m_debugImage_depthDense() #endif { #ifdef VISP_HAVE_OGRE @@ -439,7 +439,7 @@ void vpMbDepthDenseTracker::reInitModel(const vpImage &I, const s for (size_t i = 0; i < m_depthDenseFaces.size(); i++) { delete m_depthDenseFaces[i]; - m_depthDenseFaces[i] = NULL; + m_depthDenseFaces[i] = nullptr; } m_depthDenseFaces.clear(); @@ -466,7 +466,7 @@ void vpMbDepthDenseTracker::resetTracker() ++it) { vpMbtFaceDepthDense *normal_face = *it; delete normal_face; - normal_face = NULL; + normal_face = nullptr; } m_depthDenseFaces.clear(); diff --git a/modules/tracker/mbt/src/depth/vpMbDepthNormalTracker.cpp b/modules/tracker/mbt/src/depth/vpMbDepthNormalTracker.cpp index 651ce4b7cf..0901245fca 100644 --- a/modules/tracker/mbt/src/depth/vpMbDepthNormalTracker.cpp +++ b/modules/tracker/mbt/src/depth/vpMbDepthNormalTracker.cpp @@ -61,7 +61,7 @@ vpMbDepthNormalTracker::vpMbDepthNormalTracker() m_robust_depthNormal(), m_w_depthNormal(), m_weightedError_depthNormal() #if DEBUG_DISPLAY_DEPTH_NORMAL , - m_debugDisp_depthNormal(NULL), m_debugImage_depthNormal() + m_debugDisp_depthNormal(nullptr), m_debugImage_depthNormal() #endif { #ifdef VISP_HAVE_OGRE @@ -476,7 +476,7 @@ void vpMbDepthNormalTracker::reInitModel(const vpImage &I, const for (size_t i = 0; i < m_depthNormalFaces.size(); i++) { delete m_depthNormalFaces[i]; - m_depthNormalFaces[i] = NULL; + m_depthNormalFaces[i] = nullptr; } m_depthNormalFaces.clear(); @@ -503,7 +503,7 @@ void vpMbDepthNormalTracker::resetTracker() ++it) { vpMbtFaceDepthNormal *normal_face = *it; delete normal_face; - normal_face = NULL; + normal_face = nullptr; } m_depthNormalFaces.clear(); diff --git a/modules/tracker/mbt/src/depth/vpMbtFaceDepthDense.cpp b/modules/tracker/mbt/src/depth/vpMbtFaceDepthDense.cpp index 07f25e06cd..0dacb833f1 100644 --- a/modules/tracker/mbt/src/depth/vpMbtFaceDepthDense.cpp +++ b/modules/tracker/mbt/src/depth/vpMbtFaceDepthDense.cpp @@ -156,8 +156,8 @@ inline float64x2_t v_fma(const float64x2_t &a, const float64x2_t &b, const float #endif // !USE_OPENCV_HAL && (USE_SSE || USE_NEON) vpMbtFaceDepthDense::vpMbtFaceDepthDense() - : m_cam(), m_clippingFlag(vpPolygon3D::NO_CLIPPING), m_distFarClip(100), m_distNearClip(0.001), m_hiddenFace(NULL), - m_planeObject(), m_polygon(NULL), m_useScanLine(false), + : m_cam(), m_clippingFlag(vpPolygon3D::NO_CLIPPING), m_distFarClip(100), m_distNearClip(0.001), m_hiddenFace(nullptr), + m_planeObject(), m_polygon(nullptr), m_useScanLine(false), m_depthDenseFilteringMethod(DEPTH_OCCUPANCY_RATIO_FILTERING), m_depthDenseFilteringMaxDist(3.0), m_depthDenseFilteringMinDist(0.8), m_depthDenseFilteringOccupancyRatio(0.3), m_isTrackedDepthDenseFace(true), m_isVisible(false), m_listOfFaceLines(), m_planeCamera(), m_pointCloudFace(), m_polygonLines() diff --git a/modules/tracker/mbt/src/depth/vpMbtFaceDepthNormal.cpp b/modules/tracker/mbt/src/depth/vpMbtFaceDepthNormal.cpp index d2c7390ac7..faee1d75d3 100644 --- a/modules/tracker/mbt/src/depth/vpMbtFaceDepthNormal.cpp +++ b/modules/tracker/mbt/src/depth/vpMbtFaceDepthNormal.cpp @@ -56,8 +56,8 @@ #endif vpMbtFaceDepthNormal::vpMbtFaceDepthNormal() - : m_cam(), m_clippingFlag(vpPolygon3D::NO_CLIPPING), m_distFarClip(100), m_distNearClip(0.001), m_hiddenFace(NULL), - m_planeObject(), m_polygon(NULL), m_useScanLine(false), m_faceActivated(false), + : m_cam(), m_clippingFlag(vpPolygon3D::NO_CLIPPING), m_distFarClip(100), m_distNearClip(0.001), m_hiddenFace(nullptr), + m_planeObject(), m_polygon(nullptr), m_useScanLine(false), m_faceActivated(false), m_faceCentroidMethod(GEOMETRIC_CENTROID), m_faceDesiredCentroid(), m_faceDesiredNormal(), m_featureEstimationMethod(ROBUST_FEATURE_ESTIMATION), m_isTrackedDepthNormalFace(true), m_isVisible(false), m_listOfFaceLines(), m_planeCamera(), diff --git a/modules/tracker/mbt/src/edge/vpMbEdgeTracker.cpp b/modules/tracker/mbt/src/edge/vpMbEdgeTracker.cpp index d4d7ae9f47..663fa223f0 100644 --- a/modules/tracker/mbt/src/edge/vpMbEdgeTracker.cpp +++ b/modules/tracker/mbt/src/edge/vpMbEdgeTracker.cpp @@ -88,27 +88,27 @@ vpMbEdgeTracker::~vpMbEdgeTracker() if (scales[i]) { for (std::list::const_iterator it = lines[i].begin(); it != lines[i].end(); ++it) { l = *it; - if (l != NULL) { + if (l != nullptr) { delete l; } - l = NULL; + l = nullptr; } for (std::list::const_iterator it = cylinders[i].begin(); it != cylinders[i].end(); ++it) { cy = *it; - if (cy != NULL) { + if (cy != nullptr) { delete cy; } - cy = NULL; + cy = nullptr; } for (std::list::const_iterator it = circles[i].begin(); it != circles[i].end(); ++it) { ci = *it; - if (ci != NULL) { + if (ci != nullptr) { delete ci; } - ci = NULL; + ci = nullptr; } lines[i].clear(); @@ -323,7 +323,7 @@ void vpMbEdgeTracker::computeVVSFirstPhase(const vpImage &_I, uns unsigned int indexFeature = 0; for (size_t a = 0; a < l->meline.size(); a++) { - if (iter == 0 && l->meline[a] != NULL) + if (iter == 0 && l->meline[a] != nullptr) itListLine = l->meline[a]->getMeList().begin(); for (unsigned int i = 0; i < l->nbFeature[a]; i++) { @@ -393,7 +393,7 @@ void vpMbEdgeTracker::computeVVSFirstPhase(const vpImage &_I, uns std::list::const_iterator itCyl1; std::list::const_iterator itCyl2; - if (iter == 0 && (cy->meline1 != NULL || cy->meline2 != NULL)) { + if (iter == 0 && (cy->meline1 != nullptr || cy->meline2 != nullptr)) { itCyl1 = cy->meline1->getMeList().begin(); itCyl2 = cy->meline2->getMeList().begin(); } @@ -486,7 +486,7 @@ void vpMbEdgeTracker::computeVVSFirstPhase(const vpImage &_I, uns double fac = 1.0; std::list::const_iterator itCir; - if (iter == 0 && (ci->meEllipse != NULL)) { + if (iter == 0 && (ci->meEllipse != nullptr)) { itCir = ci->meEllipse->getMeList().begin(); } @@ -576,7 +576,7 @@ void vpMbEdgeTracker::computeVVSFirstPhaseFactor(const vpImage &I for (size_t a = 0; a < l->meline.size(); a++) { std::list::const_iterator itListLine; - if (l->meline[a] != NULL) { + if (l->meline[a] != nullptr) { itListLine = l->meline[a]->getMeList().begin(); for (unsigned int i = 0; i < l->nbFeature[a]; i++) { @@ -600,7 +600,7 @@ void vpMbEdgeTracker::computeVVSFirstPhaseFactor(const vpImage &I std::list::const_iterator itCyl1; std::list::const_iterator itCyl2; - if ((cy->meline1 != NULL || cy->meline2 != NULL)) { + if ((cy->meline1 != nullptr || cy->meline2 != nullptr)) { itCyl1 = cy->meline1->getMeList().begin(); itCyl2 = cy->meline2->getMeList().begin(); @@ -629,7 +629,7 @@ void vpMbEdgeTracker::computeVVSFirstPhaseFactor(const vpImage &I ci->computeInteractionMatrixError(m_cMo); std::list::const_iterator itCir; - if (ci->meEllipse != NULL) { + if (ci->meEllipse != nullptr) { itCir = ci->meEllipse->getMeList().begin(); double fac = 1.0; @@ -861,7 +861,7 @@ void vpMbEdgeTracker::computeProjectionError(const vpImage &_I) vpMbtDistanceLine *l = *it; if (l->isVisible() && l->isTracked()) { for (size_t a = 0; a < l->meline.size(); a++) { - if (l->meline[a] != NULL) { + if (l->meline[a] != nullptr) { double lineNormGradient; unsigned int lineNbFeatures; l->meline[a]->computeProjectionError(_I, lineNormGradient, lineNbFeatures, m_SobelX, m_SobelY, @@ -878,7 +878,7 @@ void vpMbEdgeTracker::computeProjectionError(const vpImage &_I) it != cylinders[scaleLevel].end(); ++it) { vpMbtDistanceCylinder *cy = *it; if (cy->isVisible() && cy->isTracked()) { - if (cy->meline1 != NULL) { + if (cy->meline1 != nullptr) { double cylinderNormGradient = 0; unsigned int cylinderNbFeatures = 0; cy->meline1->computeProjectionError(_I, cylinderNormGradient, cylinderNbFeatures, m_SobelX, m_SobelY, @@ -888,7 +888,7 @@ void vpMbEdgeTracker::computeProjectionError(const vpImage &_I) nbFeatures += cylinderNbFeatures; } - if (cy->meline2 != NULL) { + if (cy->meline2 != nullptr) { double cylinderNormGradient = 0; unsigned int cylinderNbFeatures = 0; cy->meline2->computeProjectionError(_I, cylinderNormGradient, cylinderNbFeatures, m_SobelX, m_SobelY, @@ -903,7 +903,7 @@ void vpMbEdgeTracker::computeProjectionError(const vpImage &_I) for (std::list::const_iterator it = circles[scaleLevel].begin(); it != circles[scaleLevel].end(); ++it) { vpMbtDistanceCircle *c = *it; - if (c->isVisible() && c->isTracked() && c->meEllipse != NULL) { + if (c->isVisible() && c->isTracked() && c->meEllipse != nullptr) { double circleNormGradient = 0; unsigned int circleNbFeatures = 0; c->meEllipse->computeProjectionError(_I, circleNormGradient, circleNbFeatures, m_SobelX, m_SobelY, @@ -940,7 +940,7 @@ void vpMbEdgeTracker::testTracking() vpMbtDistanceLine *l = *it; if (l->isVisible() && l->isTracked()) { for (size_t a = 0; a < l->meline.size(); a++) { - if (l->meline[a] != NULL) { + if (l->meline[a] != nullptr) { nbExpectedPoint += (int)l->meline[a]->expecteddensity; for (std::list::const_iterator itme = l->meline[a]->getMeList().begin(); itme != l->meline[a]->getMeList().end(); ++itme) { @@ -958,7 +958,7 @@ void vpMbEdgeTracker::testTracking() for (std::list::const_iterator it = cylinders[scaleLevel].begin(); it != cylinders[scaleLevel].end(); ++it) { vpMbtDistanceCylinder *cy = *it; - if ((cy->meline1 != NULL && cy->meline2 != NULL) && cy->isVisible() && cy->isTracked()) { + if ((cy->meline1 != nullptr && cy->meline2 != nullptr) && cy->isVisible() && cy->isTracked()) { nbExpectedPoint += (int)cy->meline1->expecteddensity; for (std::list::const_iterator itme1 = cy->meline1->getMeList().begin(); itme1 != cy->meline1->getMeList().end(); ++itme1) { @@ -983,7 +983,7 @@ void vpMbEdgeTracker::testTracking() for (std::list::const_iterator it = circles[scaleLevel].begin(); it != circles[scaleLevel].end(); ++it) { vpMbtDistanceCircle *ci = *it; - if (ci->isVisible() && ci->isTracked() && ci->meEllipse != NULL) { + if (ci->isVisible() && ci->isTracked() && ci->meEllipse != nullptr) { nbExpectedPoint += ci->meEllipse->getExpectedDensity(); for (std::list::const_iterator itme = ci->meEllipse->getMeList().begin(); itme != ci->meEllipse->getMeList().end(); ++itme) { @@ -1545,7 +1545,7 @@ void vpMbEdgeTracker::initMovingEdge(const vpImage &I, const vpHo } else { l->setVisible(false); for (size_t a = 0; a < l->meline.size(); a++) { - if (l->meline[a] != NULL) + if (l->meline[a] != nullptr) delete l->meline[a]; if (a < l->nbFeature.size()) l->nbFeature[a] = 0; @@ -1574,18 +1574,18 @@ void vpMbEdgeTracker::initMovingEdge(const vpImage &I, const vpHo if (isvisible) { cy->setVisible(true); - if (cy->meline1 == NULL || cy->meline2 == NULL) { + if (cy->meline1 == nullptr || cy->meline2 == nullptr) { if (cy->isTracked()) cy->initMovingEdge(I, _cMo, doNotTrack, m_mask); } } else { cy->setVisible(false); - if (cy->meline1 != NULL) + if (cy->meline1 != nullptr) delete cy->meline1; - if (cy->meline2 != NULL) + if (cy->meline2 != nullptr) delete cy->meline2; - cy->meline1 = NULL; - cy->meline2 = NULL; + cy->meline1 = nullptr; + cy->meline2 = nullptr; cy->nbFeature = 0; cy->nbFeaturel1 = 0; cy->nbFeaturel2 = 0; @@ -1607,15 +1607,15 @@ void vpMbEdgeTracker::initMovingEdge(const vpImage &I, const vpHo if (isvisible) { ci->setVisible(true); - if (ci->meEllipse == NULL) { + if (ci->meEllipse == nullptr) { if (ci->isTracked()) ci->initMovingEdge(I, _cMo, doNotTrack, m_mask); } } else { ci->setVisible(false); - if (ci->meEllipse != NULL) + if (ci->meEllipse != nullptr) delete ci->meEllipse; - ci->meEllipse = NULL; + ci->meEllipse = nullptr; ci->nbFeature = 0; } } @@ -1645,7 +1645,7 @@ void vpMbEdgeTracker::trackMovingEdge(const vpImage &I) it != cylinders[scaleLevel].end(); ++it) { vpMbtDistanceCylinder *cy = *it; if (cy->isVisible() && cy->isTracked()) { - if (cy->meline1 == NULL || cy->meline2 == NULL) { + if (cy->meline1 == nullptr || cy->meline2 == nullptr) { cy->initMovingEdge(I, m_cMo, doNotTrack, m_mask); } cy->trackMovingEdge(I, m_cMo); @@ -1656,7 +1656,7 @@ void vpMbEdgeTracker::trackMovingEdge(const vpImage &I) it != circles[scaleLevel].end(); ++it) { vpMbtDistanceCircle *ci = *it; if (ci->isVisible() && ci->isTracked()) { - if (ci->meEllipse == NULL) { + if (ci->meEllipse == nullptr) { ci->initMovingEdge(I, m_cMo, doNotTrack, m_mask); } ci->trackMovingEdge(I, m_cMo); @@ -1911,9 +1911,9 @@ void vpMbEdgeTracker::resetMovingEdge() if (scales[i]) { for (std::list::const_iterator it = lines[i].begin(); it != lines[i].end(); ++it) { for (size_t a = 0; a < (*it)->meline.size(); a++) { - if ((*it)->meline[a] != NULL) { + if ((*it)->meline[a] != nullptr) { delete (*it)->meline[a]; - (*it)->meline[a] = NULL; + (*it)->meline[a] = nullptr; } } @@ -1924,13 +1924,13 @@ void vpMbEdgeTracker::resetMovingEdge() for (std::list::const_iterator it = cylinders[i].begin(); it != cylinders[i].end(); ++it) { - if ((*it)->meline1 != NULL) { + if ((*it)->meline1 != nullptr) { delete (*it)->meline1; - (*it)->meline1 = NULL; + (*it)->meline1 = nullptr; } - if ((*it)->meline2 != NULL) { + if ((*it)->meline2 != nullptr) { delete (*it)->meline2; - (*it)->meline2 = NULL; + (*it)->meline2 = nullptr; } (*it)->nbFeature = 0; @@ -1939,9 +1939,9 @@ void vpMbEdgeTracker::resetMovingEdge() } for (std::list::const_iterator it = circles[i].begin(); it != circles[i].end(); ++it) { - if ((*it)->meEllipse != NULL) { + if ((*it)->meEllipse != nullptr) { delete (*it)->meEllipse; - (*it)->meEllipse = NULL; + (*it)->meEllipse = nullptr; } (*it)->nbFeature = 0; } @@ -2379,24 +2379,24 @@ void vpMbEdgeTracker::resetTracker() if (scales[i]) { for (std::list::const_iterator it = lines[i].begin(); it != lines[i].end(); ++it) { l = *it; - if (l != NULL) + if (l != nullptr) delete l; - l = NULL; + l = nullptr; } for (std::list::const_iterator it = cylinders[i].begin(); it != cylinders[i].end(); ++it) { cy = *it; - if (cy != NULL) + if (cy != nullptr) delete cy; - cy = NULL; + cy = nullptr; } for (std::list::const_iterator it = circles[i].begin(); it != circles[i].end(); ++it) { ci = *it; - if (ci != NULL) + if (ci != nullptr) delete ci; - ci = NULL; + ci = nullptr; } lines[i].clear(); cylinders[i].clear(); @@ -2453,24 +2453,24 @@ void vpMbEdgeTracker::reInitModel(const vpImage &I, const std::st if (scales[i]) { for (std::list::const_iterator it = lines[i].begin(); it != lines[i].end(); ++it) { l = *it; - if (l != NULL) + if (l != nullptr) delete l; - l = NULL; + l = nullptr; } for (std::list::const_iterator it = cylinders[i].begin(); it != cylinders[i].end(); ++it) { cy = *it; - if (cy != NULL) + if (cy != nullptr) delete cy; - cy = NULL; + cy = nullptr; } for (std::list::const_iterator it = circles[i].begin(); it != circles[i].end(); ++it) { ci = *it; - if (ci != NULL) + if (ci != nullptr) delete ci; - ci = NULL; + ci = nullptr; } lines[i].clear(); @@ -2529,7 +2529,7 @@ unsigned int vpMbEdgeTracker::getNbPoints(unsigned int level) const for (std::list::const_iterator it = cylinders[level].begin(); it != cylinders[level].end(); ++it) { cy = *it; - if (cy->isVisible() && cy->isTracked() && (cy->meline1 != NULL || cy->meline2 != NULL)) { + if (cy->isVisible() && cy->isTracked() && (cy->meline1 != nullptr || cy->meline2 != nullptr)) { for (std::list::const_iterator itme1 = cy->meline1->getMeList().begin(); itme1 != cy->meline1->getMeList().end(); ++itme1) { if (itme1->getState() == vpMeSite::NO_SUPPRESSION) @@ -2546,7 +2546,7 @@ unsigned int vpMbEdgeTracker::getNbPoints(unsigned int level) const vpMbtDistanceCircle *ci; for (std::list::const_iterator it = circles[level].begin(); it != circles[level].end(); ++it) { ci = *it; - if (ci->isVisible() && ci->isTracked() && ci->meEllipse != NULL) { + if (ci->isVisible() && ci->isTracked() && ci->meEllipse != nullptr) { for (std::list::const_iterator itme = ci->meEllipse->getMeList().begin(); itme != ci->meEllipse->getMeList().end(); ++itme) { if (itme->getState() == vpMeSite::NO_SUPPRESSION) @@ -2723,7 +2723,7 @@ void vpMbEdgeTracker::initPyramid(const vpImage &_I, if (scales[0]) { _pyramid[0] = &_I; } else { - _pyramid[0] = NULL; + _pyramid[0] = nullptr; } for (unsigned int i = 1; i < _pyramid.size(); i += 1) { @@ -2737,7 +2737,7 @@ void vpMbEdgeTracker::initPyramid(const vpImage &_I, } _pyramid[i] = I; } else { - _pyramid[i] = NULL; + _pyramid[i] = nullptr; } } } @@ -2751,11 +2751,11 @@ void vpMbEdgeTracker::initPyramid(const vpImage &_I, void vpMbEdgeTracker::cleanPyramid(std::vector *> &_pyramid) { if (_pyramid.size() > 0) { - _pyramid[0] = NULL; + _pyramid[0] = nullptr; for (unsigned int i = 1; i < _pyramid.size(); i += 1) { - if (_pyramid[i] != NULL) { + if (_pyramid[i] != nullptr) { delete _pyramid[i]; - _pyramid[i] = NULL; + _pyramid[i] = nullptr; } } _pyramid.resize(0); diff --git a/modules/tracker/mbt/src/edge/vpMbtDistanceCircle.cpp b/modules/tracker/mbt/src/edge/vpMbtDistanceCircle.cpp index e85a72d168..5bfffe2fb7 100644 --- a/modules/tracker/mbt/src/edge/vpMbtDistanceCircle.cpp +++ b/modules/tracker/mbt/src/edge/vpMbtDistanceCircle.cpp @@ -56,8 +56,8 @@ Basic constructor */ vpMbtDistanceCircle::vpMbtDistanceCircle() - : name(), index(0), cam(), me(NULL), wmean(1), featureEllipse(), isTrackedCircle(true), meEllipse(NULL), circle(NULL), - radius(0.), p1(NULL), p2(NULL), p3(NULL), L(), error(), nbFeature(0), Reinit(false), hiddenface(NULL), + : name(), index(0), cam(), me(nullptr), wmean(1), featureEllipse(), isTrackedCircle(true), meEllipse(nullptr), circle(nullptr), + radius(0.), p1(nullptr), p2(nullptr), p3(nullptr), L(), error(), nbFeature(0), Reinit(false), hiddenface(nullptr), index_polygon(-1), isvisible(false) { } @@ -66,15 +66,15 @@ vpMbtDistanceCircle::vpMbtDistanceCircle() */ vpMbtDistanceCircle::~vpMbtDistanceCircle() { - if (meEllipse != NULL) + if (meEllipse != nullptr) delete meEllipse; - if (circle != NULL) + if (circle != nullptr) delete circle; - if (p1 != NULL) + if (p1 != nullptr) delete p1; - if (p2 != NULL) + if (p2 != nullptr) delete p2; - if (p3 != NULL) + if (p3 != nullptr) delete p3; } @@ -125,7 +125,7 @@ void vpMbtDistanceCircle::buildFrom(const vpPoint &_p1, const vpPoint &_p2, cons void vpMbtDistanceCircle::setMovingEdge(vpMe *_me) { me = _me; - if (meEllipse != NULL) { + if (meEllipse != nullptr) { meEllipse->setMe(me); } } @@ -138,7 +138,7 @@ void vpMbtDistanceCircle::setMovingEdge(vpMe *_me) \param I : The image. \param cMo : The pose of the camera used to initialize the moving edges. \param doNotTrack : If true, ME are not tracked. - \param mask: Mask image or NULL if not wanted. Mask values that are set to true are considered in the tracking. To + \param mask: Mask image or nullptr if not wanted. Mask values that are set to true are considered in the tracking. To disable a pixel, set false. \return false if an error occur, true otherwise. */ bool vpMbtDistanceCircle::initMovingEdge(const vpImage &I, const vpHomogeneousMatrix &cMo, @@ -244,16 +244,16 @@ void vpMbtDistanceCircle::updateMovingEdge(const vpImage &I, cons \param I : the image. \param cMo : The pose of the camera. - \param mask: Mask image or NULL if not wanted. Mask values that are set to true are considered in the tracking. To + \param mask: Mask image or nullptr if not wanted. Mask values that are set to true are considered in the tracking. To disable a pixel, set false. */ void vpMbtDistanceCircle::reinitMovingEdge(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpImage *mask) { - if (meEllipse != NULL) + if (meEllipse != nullptr) delete meEllipse; - meEllipse = NULL; + meEllipse = nullptr; if (!initMovingEdge(I, cMo, false, mask)) Reinit = true; @@ -317,7 +317,7 @@ std::vector > vpMbtDistanceCircle::getFeaturesForDisplay() { std::vector > features; - if (meEllipse != NULL) { + if (meEllipse != nullptr) { for (std::list::const_iterator it = meEllipse->getMeList().begin(); it != meEllipse->getMeList().end(); ++it) { vpMeSite p_me = *it; @@ -385,7 +385,7 @@ std::vector vpMbtDistanceCircle::getModelForDisplay(const vpHomogeneousM */ void vpMbtDistanceCircle::displayMovingEdges(const vpImage &I) { - if (meEllipse != NULL) { + if (meEllipse != nullptr) { meEllipse->display(I); // display the me if (vpDEBUG_ENABLE(3)) vpDisplay::flush(I); @@ -394,7 +394,7 @@ void vpMbtDistanceCircle::displayMovingEdges(const vpImage &I) void vpMbtDistanceCircle::displayMovingEdges(const vpImage &I) { - if (meEllipse != NULL) { + if (meEllipse != nullptr) { meEllipse->display(I); // display the me if (vpDEBUG_ENABLE(3)) vpDisplay::flush(I); diff --git a/modules/tracker/mbt/src/edge/vpMbtDistanceCylinder.cpp b/modules/tracker/mbt/src/edge/vpMbtDistanceCylinder.cpp index 9ba91189f4..ba13ab1220 100644 --- a/modules/tracker/mbt/src/edge/vpMbtDistanceCylinder.cpp +++ b/modules/tracker/mbt/src/edge/vpMbtDistanceCylinder.cpp @@ -60,9 +60,9 @@ Basic constructor */ vpMbtDistanceCylinder::vpMbtDistanceCylinder() - : name(), index(0), cam(), me(NULL), wmean1(1), wmean2(1), featureline1(), featureline2(), isTrackedCylinder(true), - meline1(NULL), meline2(NULL), cercle1(NULL), cercle2(NULL), radius(0), p1(NULL), p2(NULL), L(), error(), - nbFeature(0), nbFeaturel1(0), nbFeaturel2(0), Reinit(false), c(NULL), hiddenface(NULL), index_polygon(-1), + : name(), index(0), cam(), me(nullptr), wmean1(1), wmean2(1), featureline1(), featureline2(), isTrackedCylinder(true), + meline1(nullptr), meline2(nullptr), cercle1(nullptr), cercle2(nullptr), radius(0), p1(nullptr), p2(nullptr), L(), error(), + nbFeature(0), nbFeaturel1(0), nbFeaturel2(0), Reinit(false), c(nullptr), hiddenface(nullptr), index_polygon(-1), isvisible(false) { } @@ -71,19 +71,19 @@ vpMbtDistanceCylinder::vpMbtDistanceCylinder() */ vpMbtDistanceCylinder::~vpMbtDistanceCylinder() { - if (p1 != NULL) + if (p1 != nullptr) delete p1; - if (p2 != NULL) + if (p2 != nullptr) delete p2; - if (c != NULL) + if (c != nullptr) delete c; - if (meline1 != NULL) + if (meline1 != nullptr) delete meline1; - if (meline2 != NULL) + if (meline2 != nullptr) delete meline2; - if (cercle1 != NULL) + if (cercle1 != nullptr) delete cercle1; - if (cercle2 != NULL) + if (cercle2 != nullptr) delete cercle2; } @@ -157,10 +157,10 @@ void vpMbtDistanceCylinder::buildFrom(const vpPoint &_p1, const vpPoint &_p2, do void vpMbtDistanceCylinder::setMovingEdge(vpMe *_me) { me = _me; - if (meline1 != NULL) { + if (meline1 != nullptr) { meline1->setMe(me); } - if (meline2 != NULL) { + if (meline2 != nullptr) { meline2->setMe(me); } } @@ -173,7 +173,7 @@ void vpMbtDistanceCylinder::setMovingEdge(vpMe *_me) \param I : The image. \param cMo : The pose of the camera used to initialize the moving edges. \param doNotTrack : If true, ME are not tracked. - \param mask: Mask image or NULL if not wanted. Mask values that are set to true are considered in the tracking. To + \param mask: Mask image or nullptr if not wanted. Mask values that are set to true are considered in the tracking. To disable a pixel, set false. \return false if an error occur, true otherwise. */ bool vpMbtDistanceCylinder::initMovingEdge(const vpImage &I, const vpHomogeneousMatrix &cMo, @@ -497,19 +497,19 @@ void vpMbtDistanceCylinder::updateMovingEdge(const vpImage &I, co \param I : the image. \param cMo : The pose of the camera. - \param mask: Mask image or NULL if not wanted. Mask values that are set to true are considered in the tracking. To + \param mask: Mask image or nullptr if not wanted. Mask values that are set to true are considered in the tracking. To disable a pixel, set false. */ void vpMbtDistanceCylinder::reinitMovingEdge(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpImage *mask) { - if (meline1 != NULL) + if (meline1 != nullptr) delete meline1; - if (meline2 != NULL) + if (meline2 != nullptr) delete meline2; - meline1 = NULL; - meline2 = NULL; + meline1 = nullptr; + meline2 = nullptr; if (!initMovingEdge(I, cMo, false, mask)) Reinit = true; @@ -575,7 +575,7 @@ std::vector > vpMbtDistanceCylinder::getFeaturesForDisplay() { std::vector > features; - if (meline1 != NULL) { + if (meline1 != nullptr) { for (std::list::const_iterator it = meline1->getMeList().begin(); it != meline1->getMeList().end(); ++it) { vpMeSite p_me = *it; @@ -586,7 +586,7 @@ std::vector > vpMbtDistanceCylinder::getFeaturesForDisplay() } } - if (meline2 != NULL) { + if (meline2 != nullptr) { for (std::list::const_iterator it = meline2->getMeList().begin(); it != meline2->getMeList().end(); ++it) { vpMeSite p_me = *it; @@ -691,20 +691,20 @@ std::vector > vpMbtDistanceCylinder::getModelForDisplay(unsi */ void vpMbtDistanceCylinder::displayMovingEdges(const vpImage &I) { - if (meline1 != NULL) { + if (meline1 != nullptr) { meline1->display(I); } - if (meline2 != NULL) { + if (meline2 != nullptr) { meline2->display(I); } } void vpMbtDistanceCylinder::displayMovingEdges(const vpImage &I) { - if (meline1 != NULL) { + if (meline1 != nullptr) { meline1->display(I); } - if (meline2 != NULL) { + if (meline2 != nullptr) { meline2->display(I); } } diff --git a/modules/tracker/mbt/src/edge/vpMbtDistanceLine.cpp b/modules/tracker/mbt/src/edge/vpMbtDistanceLine.cpp index 92651b06e4..7fdcba420c 100644 --- a/modules/tracker/mbt/src/edge/vpMbtDistanceLine.cpp +++ b/modules/tracker/mbt/src/edge/vpMbtDistanceLine.cpp @@ -55,9 +55,9 @@ void buildLine(vpPoint &P1, vpPoint &P2, vpPoint &P3, vpPoint &P4, vpLine &L); Basic constructor */ vpMbtDistanceLine::vpMbtDistanceLine() - : name(), index(0), cam(), me(NULL), isTrackedLine(true), isTrackedLineWithVisibility(true), wmean(1), featureline(), - poly(), useScanLine(false), meline(), line(NULL), p1(NULL), p2(NULL), L(), error(), nbFeature(), nbFeatureTotal(0), - Reinit(false), hiddenface(NULL), Lindex_polygon(), Lindex_polygon_tracked(), isvisible(false) + : name(), index(0), cam(), me(nullptr), isTrackedLine(true), isTrackedLineWithVisibility(true), wmean(1), featureline(), + poly(), useScanLine(false), meline(), line(nullptr), p1(nullptr), p2(nullptr), L(), error(), nbFeature(), nbFeatureTotal(0), + Reinit(false), hiddenface(nullptr), Lindex_polygon(), Lindex_polygon_tracked(), isvisible(false) { } /*! @@ -65,11 +65,11 @@ vpMbtDistanceLine::vpMbtDistanceLine() */ vpMbtDistanceLine::~vpMbtDistanceLine() { - if (line != NULL) + if (line != nullptr) delete line; for (unsigned int i = 0; i < meline.size(); i++) - if (meline[i] != NULL) + if (meline[i] != nullptr) delete meline[i]; meline.clear(); @@ -162,7 +162,7 @@ void buildLine(vpPoint &P1, vpPoint &P2, vpPoint &P3, vpPoint &P4, vpLine &L) */ void vpMbtDistanceLine::buildFrom(vpPoint &_p1, vpPoint &_p2, vpUniRand &rand_gen) { - if (line == NULL) { + if (line == nullptr) { line = new vpLine; } @@ -281,7 +281,7 @@ void vpMbtDistanceLine::setMovingEdge(vpMe *_me) me = _me; for (unsigned int i = 0; i < meline.size(); i++) - if (meline[i] != NULL) { + if (meline[i] != nullptr) { // nbFeature[i] = 0; meline[i]->reset(); meline[i]->setMe(me); @@ -298,14 +298,14 @@ void vpMbtDistanceLine::setMovingEdge(vpMe *_me) \param I : The image. \param cMo : The pose of the camera used to initialize the moving edges. \param doNotTrack : If true, ME are not tracked. - \param mask: Mask image or NULL if not wanted. Mask values that are set to true are considered in the tracking. To + \param mask: Mask image or nullptr if not wanted. Mask values that are set to true are considered in the tracking. To disable a pixel, set false. \return false if an error occur, true otherwise. */ bool vpMbtDistanceLine::initMovingEdge(const vpImage &I, const vpHomogeneousMatrix &cMo, bool doNotTrack, const vpImage *mask) { for (unsigned int i = 0; i < meline.size(); i++) { - if (meline[i] != NULL) + if (meline[i] != nullptr) delete meline[i]; } @@ -434,7 +434,7 @@ void vpMbtDistanceLine::trackMovingEdge(const vpImage &I) } catch (...) { for (size_t i = 0; i < meline.size(); i++) { - if (meline[i] != NULL) + if (meline[i] != nullptr) delete meline[i]; } @@ -477,7 +477,7 @@ void vpMbtDistanceLine::updateMovingEdge(const vpImage &I, const if (linesLst.size() != meline.size() || linesLst.size() == 0) { for (size_t i = 0; i < meline.size(); i++) { - if (meline[i] != NULL) + if (meline[i] != nullptr) delete meline[i]; } @@ -494,7 +494,7 @@ void vpMbtDistanceLine::updateMovingEdge(const vpImage &I, const } catch (...) { for (size_t j = 0; j < meline.size(); j++) { - if (meline[j] != NULL) + if (meline[j] != nullptr) delete meline[j]; } @@ -556,7 +556,7 @@ void vpMbtDistanceLine::updateMovingEdge(const vpImage &I, const } catch (...) { for (size_t j = 0; j < meline.size(); j++) { - if (meline[j] != NULL) + if (meline[j] != nullptr) delete meline[j]; } @@ -570,7 +570,7 @@ void vpMbtDistanceLine::updateMovingEdge(const vpImage &I, const } else { for (size_t i = 0; i < meline.size(); i++) { - if (meline[i] != NULL) + if (meline[i] != nullptr) delete meline[i]; } nbFeature.clear(); @@ -589,14 +589,14 @@ void vpMbtDistanceLine::updateMovingEdge(const vpImage &I, const \param I : the image. \param cMo : The pose of the camera. - \param mask: Mask image or NULL if not wanted. Mask values that are set to true are considered in the tracking. To + \param mask: Mask image or nullptr if not wanted. Mask values that are set to true are considered in the tracking. To disable a pixel, set false. */ void vpMbtDistanceLine::reinitMovingEdge(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpImage *mask) { for (size_t i = 0; i < meline.size(); i++) { - if (meline[i] != NULL) + if (meline[i] != nullptr) delete meline[i]; } @@ -677,7 +677,7 @@ void vpMbtDistanceLine::display(const vpImage &I, const vpHomogeneousMat void vpMbtDistanceLine::displayMovingEdges(const vpImage &I) { for (size_t i = 0; i < meline.size(); i++) { - if (meline[i] != NULL) { + if (meline[i] != nullptr) { meline[i]->display(I); } } @@ -686,7 +686,7 @@ void vpMbtDistanceLine::displayMovingEdges(const vpImage &I) void vpMbtDistanceLine::displayMovingEdges(const vpImage &I) { for (size_t i = 0; i < meline.size(); i++) { - if (meline[i] != NULL) { + if (meline[i] != nullptr) { meline[i]->display(I); } } @@ -702,7 +702,7 @@ std::vector > vpMbtDistanceLine::getFeaturesForDisplay() for (size_t i = 0; i < meline.size(); i++) { vpMbtMeLine *me_l = meline[i]; - if (me_l != NULL) { + if (me_l != nullptr) { for (std::list::const_iterator it = me_l->getMeList().begin(); it != me_l->getMeList().end(); ++it) { vpMeSite p_me_l = *it; std::vector params = { 0, // ME diff --git a/modules/tracker/mbt/src/edge/vpMbtMeEllipse.cpp b/modules/tracker/mbt/src/edge/vpMbtMeEllipse.cpp index e7e81ad3f6..e670dc2744 100644 --- a/modules/tracker/mbt/src/edge/vpMbtMeEllipse.cpp +++ b/modules/tracker/mbt/src/edge/vpMbtMeEllipse.cpp @@ -183,7 +183,7 @@ void vpMbtMeEllipse::initTracking(const vpImage &I, const vpImage double n11_p, double n02_p, bool doNotTrack, vpImagePoint *pt1, const vpImagePoint *pt2) { - if (pt1 != NULL && pt2 != NULL) { + if (pt1 != nullptr && pt2 != nullptr) { m_trackArc = true; } @@ -238,7 +238,7 @@ void vpMbtMeEllipse::track(const vpImage &I) { try { vpMeTracker::track(I); - if (m_mask != NULL) { + if (m_mask != nullptr) { // Expected density could be modified if some vpMeSite are no more tracked because they are outside the mask. m_expectedDensity = static_cast(list.size()); } diff --git a/modules/tracker/mbt/src/edge/vpMbtMeLine.cpp b/modules/tracker/mbt/src/edge/vpMbtMeLine.cpp index d0976d78dd..86940c13e8 100644 --- a/modules/tracker/mbt/src/edge/vpMbtMeLine.cpp +++ b/modules/tracker/mbt/src/edge/vpMbtMeLine.cpp @@ -561,7 +561,7 @@ void vpMbtMeLine::track(const vpImage &I) { try { vpMeTracker::track(I); - if (m_mask != NULL) { + if (m_mask != nullptr) { // Expected density could be modified if some vpMeSite are no more tracked because they are outside the mask. expecteddensity = (double)list.size(); } diff --git a/modules/tracker/mbt/src/hybrid/vpMbEdgeKltTracker.cpp b/modules/tracker/mbt/src/hybrid/vpMbEdgeKltTracker.cpp index 0aaf3e5250..911fc9692c 100644 --- a/modules/tracker/mbt/src/hybrid/vpMbEdgeKltTracker.cpp +++ b/modules/tracker/mbt/src/hybrid/vpMbEdgeKltTracker.cpp @@ -690,9 +690,9 @@ void vpMbEdgeKltTracker::computeVVS(const vpImage &I, const unsig if (nbInfos < 4) factorMBT = 1.; - if (edge_residual != NULL) + if (edge_residual != nullptr) *edge_residual = 0; - if (klt_residual != NULL) + if (klt_residual != nullptr) *klt_residual = 0; vpHomogeneousMatrix cMoPrev; @@ -788,7 +788,7 @@ void vpMbEdgeKltTracker::computeVVS(const vpImage &I, const unsig if (!reStartFromLastIncrement) { /* robust */ if (nbrow > 3) { - if (edge_residual != NULL) { + if (edge_residual != nullptr) { *edge_residual = 0; for (unsigned int i = 0; i < R_mbt.getRows(); i++) *edge_residual += fabs(R_mbt[i]); @@ -802,7 +802,7 @@ void vpMbEdgeKltTracker::computeVVS(const vpImage &I, const unsig } if (nbInfos > 3) { - if (klt_residual != NULL) { + if (klt_residual != nullptr) { *klt_residual = 0; for (unsigned int i = 0; i < R_klt.getRows(); i++) *klt_residual += fabs(R_klt[i]); @@ -1039,7 +1039,7 @@ unsigned int vpMbEdgeKltTracker::trackFirstLoop(const vpImage &I, for (size_t a = 0; a < l->meline.size(); a++) { std::list::const_iterator itListLine; - if (l->meline[a] != NULL) { + if (l->meline[a] != nullptr) { itListLine = l->meline[a]->getMeList().begin(); for (unsigned int i = 0; i < l->nbFeature[a]; i++) { @@ -1064,7 +1064,7 @@ unsigned int vpMbEdgeKltTracker::trackFirstLoop(const vpImage &I, std::list::const_iterator itCyl1; std::list::const_iterator itCyl2; - if ((cy->meline1 != NULL || cy->meline2 != NULL)) { + if ((cy->meline1 != nullptr || cy->meline2 != nullptr)) { itCyl1 = cy->meline1->getMeList().begin(); itCyl2 = cy->meline2->getMeList().begin(); } @@ -1095,7 +1095,7 @@ unsigned int vpMbEdgeKltTracker::trackFirstLoop(const vpImage &I, double fac = 1.0; std::list::const_iterator itCir; - if (ci->meEllipse != NULL) { + if (ci->meEllipse != nullptr) { itCir = ci->meEllipse->getMeList().begin(); } @@ -1404,20 +1404,20 @@ void vpMbEdgeKltTracker::reInitModel(const vpImage &I, const std: // delete the Klt Polygon features for (std::list::const_iterator it = kltPolygons.begin(); it != kltPolygons.end(); ++it) { vpMbtDistanceKltPoints *kltpoly = *it; - if (kltpoly != NULL) { + if (kltpoly != nullptr) { delete kltpoly; } - kltpoly = NULL; + kltpoly = nullptr; } kltPolygons.clear(); for (std::list::const_iterator it = kltCylinders.begin(); it != kltCylinders.end(); ++it) { vpMbtDistanceKltCylinder *kltPolyCylinder = *it; - if (kltPolyCylinder != NULL) { + if (kltPolyCylinder != nullptr) { delete kltPolyCylinder; } - kltPolyCylinder = NULL; + kltPolyCylinder = nullptr; } kltCylinders.clear(); @@ -1425,10 +1425,10 @@ void vpMbEdgeKltTracker::reInitModel(const vpImage &I, const std: vpMbtDistanceCircle *ci; for (std::list::const_iterator it = circles_disp.begin(); it != circles_disp.end(); ++it) { ci = *it; - if (ci != NULL) { + if (ci != nullptr) { delete ci; } - ci = NULL; + ci = nullptr; } circles_disp.clear(); @@ -1443,24 +1443,24 @@ void vpMbEdgeKltTracker::reInitModel(const vpImage &I, const std: if (scales[i]) { for (std::list::const_iterator it = lines[i].begin(); it != lines[i].end(); ++it) { l = *it; - if (l != NULL) + if (l != nullptr) delete l; - l = NULL; + l = nullptr; } for (std::list::const_iterator it = cylinders[i].begin(); it != cylinders[i].end(); ++it) { cy = *it; - if (cy != NULL) + if (cy != nullptr) delete cy; - cy = NULL; + cy = nullptr; } for (std::list::const_iterator it = circles[i].begin(); it != circles[i].end(); ++it) { ci = *it; - if (ci != NULL) + if (ci != nullptr) delete ci; - ci = NULL; + ci = nullptr; } lines[i].clear(); diff --git a/modules/tracker/mbt/src/klt/vpMbKltTracker.cpp b/modules/tracker/mbt/src/klt/vpMbKltTracker.cpp index 05f0bbce43..944a4c1ee8 100644 --- a/modules/tracker/mbt/src/klt/vpMbKltTracker.cpp +++ b/modules/tracker/mbt/src/klt/vpMbKltTracker.cpp @@ -138,30 +138,30 @@ vpMbKltTracker::~vpMbKltTracker() // delete the Klt Polygon features for (std::list::const_iterator it = kltPolygons.begin(); it != kltPolygons.end(); ++it) { vpMbtDistanceKltPoints *kltpoly = *it; - if (kltpoly != NULL) { + if (kltpoly != nullptr) { delete kltpoly; } - kltpoly = NULL; + kltpoly = nullptr; } kltPolygons.clear(); for (std::list::const_iterator it = kltCylinders.begin(); it != kltCylinders.end(); ++it) { vpMbtDistanceKltCylinder *kltPolyCylinder = *it; - if (kltPolyCylinder != NULL) { + if (kltPolyCylinder != nullptr) { delete kltPolyCylinder; } - kltPolyCylinder = NULL; + kltPolyCylinder = nullptr; } kltCylinders.clear(); // delete the structures used to display circles for (std::list::const_iterator it = circles_disp.begin(); it != circles_disp.end(); ++it) { vpMbtDistanceCircle *ci = *it; - if (ci != NULL) { + if (ci != nullptr) { delete ci; } - ci = NULL; + ci = nullptr; } circles_disp.clear(); @@ -281,30 +281,30 @@ void vpMbKltTracker::resetTracker() // delete the Klt Polygon features for (std::list::const_iterator it = kltPolygons.begin(); it != kltPolygons.end(); ++it) { vpMbtDistanceKltPoints *kltpoly = *it; - if (kltpoly != NULL) { + if (kltpoly != nullptr) { delete kltpoly; } - kltpoly = NULL; + kltpoly = nullptr; } kltPolygons.clear(); for (std::list::const_iterator it = kltCylinders.begin(); it != kltCylinders.end(); ++it) { vpMbtDistanceKltCylinder *kltPolyCylinder = *it; - if (kltPolyCylinder != NULL) { + if (kltPolyCylinder != nullptr) { delete kltPolyCylinder; } - kltPolyCylinder = NULL; + kltPolyCylinder = nullptr; } kltCylinders.clear(); // delete the structures used to display circles for (std::list::const_iterator it = circles_disp.begin(); it != circles_disp.end(); ++it) { vpMbtDistanceCircle *ci = *it; - if (ci != NULL) { + if (ci != nullptr) { delete ci; } - ci = NULL; + ci = nullptr; } circles_disp.clear(); @@ -606,7 +606,7 @@ void vpMbKltTracker::setPose(const vpImage *const I, const vpImag */ void vpMbKltTracker::setPose(const vpImage &I, const vpHomogeneousMatrix &cdMo) { - vpMbKltTracker::setPose(&I, NULL, cdMo); + vpMbKltTracker::setPose(&I, nullptr, cdMo); } /*! @@ -620,7 +620,7 @@ void vpMbKltTracker::setPose(const vpImage &I, const vpHomogeneou */ void vpMbKltTracker::setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo) { - vpMbKltTracker::setPose(NULL, &I_color, cdMo); + vpMbKltTracker::setPose(nullptr, &I_color, cdMo); } /*! @@ -1391,30 +1391,30 @@ void vpMbKltTracker::reInitModel(const vpImage &I, const std::str // delete the Klt Polygon features for (std::list::const_iterator it = kltPolygons.begin(); it != kltPolygons.end(); ++it) { vpMbtDistanceKltPoints *kltpoly = *it; - if (kltpoly != NULL) { + if (kltpoly != nullptr) { delete kltpoly; } - kltpoly = NULL; + kltpoly = nullptr; } kltPolygons.clear(); for (std::list::const_iterator it = kltCylinders.begin(); it != kltCylinders.end(); ++it) { vpMbtDistanceKltCylinder *kltPolyCylinder = *it; - if (kltPolyCylinder != NULL) { + if (kltPolyCylinder != nullptr) { delete kltPolyCylinder; } - kltPolyCylinder = NULL; + kltPolyCylinder = nullptr; } kltCylinders.clear(); // delete the structures used to display circles for (std::list::const_iterator it = circles_disp.begin(); it != circles_disp.end(); ++it) { vpMbtDistanceCircle *ci = *it; - if (ci != NULL) { + if (ci != nullptr) { delete ci; } - ci = NULL; + ci = nullptr; } faces.reset(); diff --git a/modules/tracker/mbt/src/klt/vpMbtDistanceKltCylinder.cpp b/modules/tracker/mbt/src/klt/vpMbtDistanceKltCylinder.cpp index 68ae531fc1..a90ba01884 100644 --- a/modules/tracker/mbt/src/klt/vpMbtDistanceKltCylinder.cpp +++ b/modules/tracker/mbt/src/klt/vpMbtDistanceKltCylinder.cpp @@ -54,7 +54,7 @@ vpMbtDistanceKltCylinder::vpMbtDistanceKltCylinder() : c0Mo(), p1Ext(), p2Ext(), cylinder(), circle1(), circle2(), initPoints(), initPoints3D(), curPoints(), curPointsInd(), nbPointsCur(0), nbPointsInit(0), minNbPoint(4), enoughPoints(false), cam(), - isTrackedKltCylinder(true), listIndicesCylinderBBox(), hiddenface(NULL), useScanLine(false) + isTrackedKltCylinder(true), listIndicesCylinderBBox(), hiddenface(nullptr), useScanLine(false) { } /*! diff --git a/modules/tracker/mbt/src/klt/vpMbtDistanceKltPoints.cpp b/modules/tracker/mbt/src/klt/vpMbtDistanceKltPoints.cpp index 9c67083b1a..2671cffd87 100644 --- a/modules/tracker/mbt/src/klt/vpMbtDistanceKltPoints.cpp +++ b/modules/tracker/mbt/src/klt/vpMbtDistanceKltPoints.cpp @@ -54,8 +54,8 @@ vpMbtDistanceKltPoints::vpMbtDistanceKltPoints() : H(), N(), N_cur(), invd0(1.), cRc0_0n(), initPoints(std::map()), curPoints(std::map()), curPointsInd(std::map()), nbPointsCur(0), nbPointsInit(0), - minNbPoint(4), enoughPoints(false), dt(1.), d0(1.), cam(), isTrackedKltPoints(true), polygon(NULL), - hiddenface(NULL), useScanLine(false) + minNbPoint(4), enoughPoints(false), dt(1.), d0(1.), cam(), isTrackedKltPoints(true), polygon(nullptr), + hiddenface(nullptr), useScanLine(false) { } /*! @@ -70,7 +70,7 @@ vpMbtDistanceKltPoints::~vpMbtDistanceKltPoints() { } points that are indeed in the face. \param _tracker : ViSP OpenCV KLT Tracker. - \param mask: Mask image or NULL if not wanted. Mask values that are set to true are considered in the tracking. To + \param mask: Mask image or nullptr if not wanted. Mask values that are set to true are considered in the tracking. To disable a pixel, set false. */ void vpMbtDistanceKltPoints::init(const vpKltOpencv &_tracker, const vpImage *mask) @@ -144,7 +144,7 @@ void vpMbtDistanceKltPoints::init(const vpKltOpencv &_tracker, const vpImage *mask) diff --git a/modules/tracker/mbt/src/vpMbGenericTracker.cpp b/modules/tracker/mbt/src/vpMbGenericTracker.cpp index d4b851ccef..830ff1577b 100644 --- a/modules/tracker/mbt/src/vpMbGenericTracker.cpp +++ b/modules/tracker/mbt/src/vpMbGenericTracker.cpp @@ -183,7 +183,7 @@ vpMbGenericTracker::~vpMbGenericTracker() for (std::map::iterator it = m_mapOfTrackers.begin(); it != m_mapOfTrackers.end(); ++it) { delete it->second; - it->second = NULL; + it->second = nullptr; } } @@ -1655,7 +1655,7 @@ void vpMbGenericTracker::getNbPolygon(std::map &mapOf polygon. \param index : Index of the polygon to return. - \return Pointer to the polygon index for the reference camera or NULL in + \return Pointer to the polygon index for the reference camera or nullptr in case of problem. */ vpMbtPolygon *vpMbGenericTracker::getPolygon(unsigned int index) @@ -1666,7 +1666,7 @@ vpMbtPolygon *vpMbGenericTracker::getPolygon(unsigned int index) } std::cerr << "The reference camera: " << m_referenceCameraName << " does not exist!" << std::endl; - return NULL; + return nullptr; } /*! @@ -1677,7 +1677,7 @@ vpMbtPolygon *vpMbGenericTracker::getPolygon(unsigned int index) \param cameraName : Name of the camera to return the polygon. \param index : Index of the polygon to return. - \return Pointer to the polygon index for the specified camera or NULL in + \return Pointer to the polygon index for the specified camera or nullptr in case of problem. */ vpMbtPolygon *vpMbGenericTracker::getPolygon(const std::string &cameraName, unsigned int index) @@ -1688,7 +1688,7 @@ vpMbtPolygon *vpMbGenericTracker::getPolygon(const std::string &cameraName, unsi } std::cerr << "The camera: " << cameraName << " does not exist!" << std::endl; - return NULL; + return nullptr; } /*! @@ -5449,13 +5449,13 @@ void vpMbGenericTracker::track(std::mapfirst] == NULL) { - throw vpException(vpException::fatalError, "Image pointer is NULL!"); + mapOfImages[it->first] == nullptr) { + throw vpException(vpException::fatalError, "Image pointer is nullptr!"); } if (tracker->m_trackerType & (DEPTH_NORMAL_TRACKER | DEPTH_DENSE_TRACKER) && !mapOfPointClouds[it->first]) { // mapOfPointClouds[it->first] == nullptr - throw vpException(vpException::fatalError, "Pointcloud smart pointer is NULL!"); + throw vpException(vpException::fatalError, "Pointcloud smart pointer is nullptr!"); } } @@ -5526,22 +5526,22 @@ void vpMbGenericTracker::track(std::map *> &m | KLT_TRACKER #endif ) && - mapOfImages[it->first] == NULL) { - throw vpException(vpException::fatalError, "Image pointer is NULL!"); + mapOfImages[it->first] == nullptr) { + throw vpException(vpException::fatalError, "Image pointer is nullptr!"); } else if (tracker->m_trackerType & (EDGE_TRACKER #if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO) | KLT_TRACKER #endif ) && - mapOfImages[it->first] != NULL) { + mapOfImages[it->first] != nullptr) { vpImageConvert::convert(*mapOfColorImages[it->first], tracker->m_I); mapOfImages[it->first] = &tracker->m_I; // update grayscale image buffer } if (tracker->m_trackerType & (DEPTH_NORMAL_TRACKER | DEPTH_DENSE_TRACKER) && !mapOfPointClouds[it->first]) { // mapOfPointClouds[it->first] == nullptr - throw vpException(vpException::fatalError, "Pointcloud smart pointer is NULL!"); + throw vpException(vpException::fatalError, "Pointcloud smart pointer is nullptr!"); } } @@ -5616,13 +5616,13 @@ void vpMbGenericTracker::track(std::mapfirst] == NULL) { - throw vpException(vpException::fatalError, "Image pointer is NULL!"); + mapOfImages[it->first] == nullptr) { + throw vpException(vpException::fatalError, "Image pointer is nullptr!"); } if (tracker->m_trackerType & (DEPTH_NORMAL_TRACKER | DEPTH_DENSE_TRACKER) && - (mapOfPointClouds[it->first] == NULL)) { - throw vpException(vpException::fatalError, "Pointcloud is NULL!"); + (mapOfPointClouds[it->first] == nullptr)) { + throw vpException(vpException::fatalError, "Pointcloud is nullptr!"); } } @@ -5697,22 +5697,22 @@ void vpMbGenericTracker::track(std::map *> &m | KLT_TRACKER #endif ) && - mapOfColorImages[it->first] == NULL) { - throw vpException(vpException::fatalError, "Image pointer is NULL!"); + mapOfColorImages[it->first] == nullptr) { + throw vpException(vpException::fatalError, "Image pointer is nullptr!"); } else if (tracker->m_trackerType & (EDGE_TRACKER #if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO) | KLT_TRACKER #endif ) && - mapOfColorImages[it->first] != NULL) { + mapOfColorImages[it->first] != nullptr) { vpImageConvert::convert(*mapOfColorImages[it->first], tracker->m_I); mapOfImages[it->first] = &tracker->m_I; // update grayscale image buffer } if (tracker->m_trackerType & (DEPTH_NORMAL_TRACKER | DEPTH_DENSE_TRACKER) && - (mapOfPointClouds[it->first] == NULL)) { - throw vpException(vpException::fatalError, "Pointcloud is NULL!"); + (mapOfPointClouds[it->first] == nullptr)) { + throw vpException(vpException::fatalError, "Pointcloud is nullptr!"); } } @@ -6847,24 +6847,24 @@ void vpMbGenericTracker::TrackerWrapper::reInitModel(const vpImage::const_iterator it = lines[i].begin(); it != lines[i].end(); ++it) { l = *it; - if (l != NULL) + if (l != nullptr) delete l; - l = NULL; + l = nullptr; } for (std::list::const_iterator it = cylinders[i].begin(); it != cylinders[i].end(); ++it) { cy = *it; - if (cy != NULL) + if (cy != nullptr) delete cy; - cy = NULL; + cy = nullptr; } for (std::list::const_iterator it = circles[i].begin(); it != circles[i].end(); ++it) { ci = *it; - if (ci != NULL) + if (ci != nullptr) delete ci; - ci = NULL; + ci = nullptr; } lines[i].clear(); @@ -6883,30 +6883,30 @@ void vpMbGenericTracker::TrackerWrapper::reInitModel(const vpImage::const_iterator it = kltPolygons.begin(); it != kltPolygons.end(); ++it) { vpMbtDistanceKltPoints *kltpoly = *it; - if (kltpoly != NULL) { + if (kltpoly != nullptr) { delete kltpoly; } - kltpoly = NULL; + kltpoly = nullptr; } kltPolygons.clear(); for (std::list::const_iterator it = kltCylinders.begin(); it != kltCylinders.end(); ++it) { vpMbtDistanceKltCylinder *kltPolyCylinder = *it; - if (kltPolyCylinder != NULL) { + if (kltPolyCylinder != nullptr) { delete kltPolyCylinder; } - kltPolyCylinder = NULL; + kltPolyCylinder = nullptr; } kltCylinders.clear(); // delete the structures used to display circles for (std::list::const_iterator it = circles_disp.begin(); it != circles_disp.end(); ++it) { ci = *it; - if (ci != NULL) { + if (ci != nullptr) { delete ci; } - ci = NULL; + ci = nullptr; } circles_disp.clear(); @@ -6917,14 +6917,14 @@ void vpMbGenericTracker::TrackerWrapper::reInitModel(const vpImage &I_color, const std::string &cad_name, const vpHomogeneousMatrix &cMo, bool verbose, const vpHomogeneousMatrix &T) { - reInitModel(NULL, &I_color, cad_name, cMo, verbose, T); + reInitModel(nullptr, &I_color, cad_name, cMo, verbose, T); } void vpMbGenericTracker::TrackerWrapper::resetTracker() @@ -7061,12 +7061,12 @@ void vpMbGenericTracker::TrackerWrapper::setPose(const vpImage *c void vpMbGenericTracker::TrackerWrapper::setPose(const vpImage &I, const vpHomogeneousMatrix &cdMo) { - setPose(&I, NULL, cdMo); + setPose(&I, nullptr, cdMo); } void vpMbGenericTracker::TrackerWrapper::setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo) { - setPose(NULL, &I_color, cdMo); + setPose(nullptr, &I_color, cdMo); } void vpMbGenericTracker::TrackerWrapper::setProjectionErrorComputation(const bool &flag) @@ -7147,12 +7147,12 @@ void vpMbGenericTracker::TrackerWrapper::track(const vpImage *con | KLT_TRACKER #endif ) && - ptr_I == NULL) { - throw vpException(vpException::fatalError, "Image pointer is NULL!"); + ptr_I == nullptr) { + throw vpException(vpException::fatalError, "Image pointer is nullptr!"); } if (m_trackerType & (DEPTH_NORMAL_TRACKER | DEPTH_DENSE_TRACKER) && !point_cloud) { // point_cloud == nullptr - throw vpException(vpException::fatalError, "Pointcloud smart pointer is NULL!"); + throw vpException(vpException::fatalError, "Pointcloud smart pointer is nullptr!"); } // Back-up cMo in case of exception diff --git a/modules/tracker/mbt/src/vpMbScanLine.cpp b/modules/tracker/mbt/src/vpMbScanLine.cpp index eda449b580..3ace72ea3f 100644 --- a/modules/tracker/mbt/src/vpMbScanLine.cpp +++ b/modules/tracker/mbt/src/vpMbScanLine.cpp @@ -62,7 +62,7 @@ vpMbScanLine::vpMbScanLine() : w(0), h(0), K(), maskBorder(0), mask(), primitive_ids(), visibility_samples(), depthTreshold(1e-06) #if defined(DEBUG_DISP) , - dispMaskDebug(NULL), dispLineDebug(NULL), linedebugImg() + dispMaskDebug(nullptr), dispLineDebug(nullptr), linedebugImg() #endif { #if defined(VISP_HAVE_X11) && defined(DEBUG_DISP) @@ -77,9 +77,9 @@ vpMbScanLine::vpMbScanLine() vpMbScanLine::~vpMbScanLine() { #if (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) && defined(DEBUG_DISP) - if (dispLineDebug != NULL) + if (dispLineDebug != nullptr) delete dispLineDebug; - if (dispMaskDebug != NULL) + if (dispMaskDebug != nullptr) delete dispMaskDebug; #endif } diff --git a/modules/tracker/mbt/src/vpMbTracker.cpp b/modules/tracker/mbt/src/vpMbTracker.cpp index b4beb1e3a6..45a27cd8f4 100644 --- a/modules/tracker/mbt/src/vpMbTracker.cpp +++ b/modules/tracker/mbt/src/vpMbTracker.cpp @@ -188,7 +188,7 @@ vpMbTracker::vpMbTracker() m_projectionErrorLines(), m_projectionErrorCylinders(), m_projectionErrorCircles(), m_projectionErrorFaces(), m_projectionErrorOgreShowConfigDialog(false), m_projectionErrorMe(), m_projectionErrorKernelSize(2), m_SobelX(5, 5), m_SobelY(5, 5), m_projectionErrorDisplay(false), m_projectionErrorDisplayLength(20), - m_projectionErrorDisplayThickness(1), m_projectionErrorCam(), m_mask(NULL), m_I(), m_sodb_init_called(false), + m_projectionErrorDisplayThickness(1), m_projectionErrorCam(), m_mask(nullptr), m_I(), m_sodb_init_called(false), m_rand() { oJo.eye(); @@ -208,25 +208,25 @@ vpMbTracker::~vpMbTracker() for (std::vector::const_iterator it = m_projectionErrorLines.begin(); it != m_projectionErrorLines.end(); ++it) { vpMbtDistanceLine *l = *it; - if (l != NULL) + if (l != nullptr) delete l; - l = NULL; + l = nullptr; } for (std::vector::const_iterator it = m_projectionErrorCylinders.begin(); it != m_projectionErrorCylinders.end(); ++it) { vpMbtDistanceCylinder *cy = *it; - if (cy != NULL) + if (cy != nullptr) delete cy; - cy = NULL; + cy = nullptr; } for (std::vector::const_iterator it = m_projectionErrorCircles.begin(); it != m_projectionErrorCircles.end(); ++it) { vpMbtDistanceCircle *ci = *it; - if (ci != NULL) + if (ci != nullptr) delete ci; - ci = NULL; + ci = nullptr; } #if defined(VISP_HAVE_COIN3D) && (COIN_MAJOR_VERSION >= 2) if (m_sodb_init_called) { @@ -319,7 +319,7 @@ void vpMbTracker::initClick(const vpImage *const I, const vpImage m_cMo = last_cMo; } else { - vpDisplay *d_help = NULL; + vpDisplay *d_help = nullptr; if (I) { vpDisplay::display(*I); @@ -389,9 +389,9 @@ void vpMbTracker::initClick(const vpImage *const I, const vpImage vpImage Iref; vpImageIo::read(Iref, dispF); #if defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV) - const int winXPos = I != NULL ? I->display->getWindowXPosition() : I_color->display->getWindowXPosition(); - const int winYPos = I != NULL ? I->display->getWindowYPosition() : I_color->display->getWindowYPosition(); - unsigned int width = I != NULL ? I->getWidth() : I_color->getWidth(); + const int winXPos = I != nullptr ? I->display->getWindowXPosition() : I_color->display->getWindowXPosition(); + const int winYPos = I != nullptr ? I->display->getWindowYPosition() : I_color->display->getWindowYPosition(); + unsigned int width = I != nullptr ? I->getWidth() : I_color->getWidth(); d_help->init(Iref, winXPos + (int)width + 80, winYPos, "Where to initialize..."); vpDisplay::display(Iref); vpDisplay::flush(Iref); @@ -400,9 +400,9 @@ void vpMbTracker::initClick(const vpImage *const I, const vpImage } } catch (...) { - if (d_help != NULL) { + if (d_help != nullptr) { delete d_help; - d_help = NULL; + d_help = nullptr; } } #else //#ifdef VISP_HAVE_MODULE_IO @@ -545,9 +545,9 @@ void vpMbTracker::initClick(const vpImage *const I, const vpImage else savePose(poseSavingFilename); - if (d_help != NULL) { + if (d_help != nullptr) { delete d_help; - d_help = NULL; + d_help = nullptr; } } @@ -595,7 +595,7 @@ void vpMbTracker::initClick(const vpImage *const I, const vpImage void vpMbTracker::initClick(const vpImage &I, const std::string &initFile, bool displayHelp, const vpHomogeneousMatrix &T) { - initClick(&I, NULL, initFile, displayHelp, T); + initClick(&I, nullptr, initFile, displayHelp, T); } /*! @@ -632,7 +632,7 @@ void vpMbTracker::initClick(const vpImage &I, const std::string & void vpMbTracker::initClick(const vpImage &I_color, const std::string &initFile, bool displayHelp, const vpHomogeneousMatrix &T) { - initClick(NULL, &I_color, initFile, displayHelp, T); + initClick(nullptr, &I_color, initFile, displayHelp, T); } void vpMbTracker::initClick(const vpImage *const I, const vpImage *const I_color, @@ -647,7 +647,7 @@ void vpMbTracker::initClick(const vpImage *const I, const vpImage vpDisplay::flush(*I_color); } - vpDisplay *d_help = NULL; + vpDisplay *d_help = nullptr; vpPose pose; std::vector P; @@ -683,9 +683,9 @@ void vpMbTracker::initClick(const vpImage *const I, const vpImage #endif } catch (...) { - if (d_help != NULL) { + if (d_help != nullptr) { delete d_help; - d_help = NULL; + d_help = nullptr; } } } @@ -780,9 +780,9 @@ void vpMbTracker::initClick(const vpImage *const I, const vpImage vpDisplay::displayFrame(*I_color, m_cMo, m_cam, 0.05, vpColor::red); } - if (d_help != NULL) { + if (d_help != nullptr) { delete d_help; - d_help = NULL; + d_help = nullptr; } if (I) @@ -807,7 +807,7 @@ void vpMbTracker::initClick(const vpImage *const I, const vpImage void vpMbTracker::initClick(const vpImage &I, const std::vector &points3D_list, const std::string &displayFile) { - initClick(&I, NULL, points3D_list, displayFile); + initClick(&I, nullptr, points3D_list, displayFile); } /*! @@ -824,7 +824,7 @@ void vpMbTracker::initClick(const vpImage &I, const std::vector &I_color, const std::vector &points3D_list, const std::string &displayFile) { - initClick(NULL, &I_color, points3D_list, displayFile); + initClick(nullptr, &I_color, points3D_list, displayFile); } #endif //#ifdef VISP_HAVE_MODULE_GUI @@ -984,7 +984,7 @@ void vpMbTracker::initFromPoints(const vpImage *const I, const vp */ void vpMbTracker::initFromPoints(const vpImage &I, const std::string &initFile) { - initFromPoints(&I, NULL, initFile); + initFromPoints(&I, nullptr, initFile); } /*! @@ -1013,7 +1013,7 @@ void vpMbTracker::initFromPoints(const vpImage &I, const std::str */ void vpMbTracker::initFromPoints(const vpImage &I_color, const std::string &initFile) { - initFromPoints(NULL, &I_color, initFile); + initFromPoints(nullptr, &I_color, initFile); } void vpMbTracker::initFromPoints(const vpImage *const I, const vpImage *const I_color, @@ -1059,7 +1059,7 @@ void vpMbTracker::initFromPoints(const vpImage *const I, const vp void vpMbTracker::initFromPoints(const vpImage &I, const std::vector &points2D_list, const std::vector &points3D_list) { - initFromPoints(&I, NULL, points2D_list, points3D_list); + initFromPoints(&I, nullptr, points2D_list, points3D_list); } /*! @@ -1073,7 +1073,7 @@ void vpMbTracker::initFromPoints(const vpImage &I, const std::vec void vpMbTracker::initFromPoints(const vpImage &I_color, const std::vector &points2D_list, const std::vector &points3D_list) { - initFromPoints(NULL, &I_color, points2D_list, points3D_list); + initFromPoints(nullptr, &I_color, points2D_list, points3D_list); } void vpMbTracker::initFromPose(const vpImage *const I, const vpImage *const I_color, @@ -1135,7 +1135,7 @@ void vpMbTracker::initFromPose(const vpImage *const I, const vpIm */ void vpMbTracker::initFromPose(const vpImage &I, const std::string &initFile) { - initFromPose(&I, NULL, initFile); + initFromPose(&I, nullptr, initFile); } /*! @@ -1158,7 +1158,7 @@ void vpMbTracker::initFromPose(const vpImage &I, const std::strin */ void vpMbTracker::initFromPose(const vpImage &I_color, const std::string &initFile) { - initFromPose(NULL, &I_color, initFile); + initFromPose(nullptr, &I_color, initFile); } /*! @@ -1516,7 +1516,7 @@ void vpMbTracker::loadVRMLModel(const std::string &modelFile) if (!in.isFileVRML2()) { SoSeparator *sceneGraph = SoDB::readAll(&in); - if (sceneGraph == NULL) { /*return -1;*/ + if (sceneGraph == nullptr) { /*return -1;*/ } sceneGraph->ref(); @@ -1529,7 +1529,7 @@ void vpMbTracker::loadVRMLModel(const std::string &modelFile) } else { sceneGraphVRML2 = SoDB::readAllVRML(&in); - if (sceneGraphVRML2 == NULL) { /*return -1;*/ + if (sceneGraphVRML2 == nullptr) { /*return -1;*/ } sceneGraphVRML2->ref(); } @@ -1684,7 +1684,7 @@ void vpMbTracker::loadCAOModel(const std::string &modelFile, std::vector::max(), fileId.widen('\n')); // skip the rest of the line nbLines += caoNbrLine; - unsigned int *caoLinePoints = NULL; + unsigned int *caoLinePoints = nullptr; if (verbose || (parent && !header)) { std::lock_guard lock(g_mutex_cout); std::cout << "> " << caoNbrLine << " lines" << std::endl; @@ -2883,7 +2883,7 @@ void vpMbTracker::computeVVSCheckLevenbergMarquardt(unsigned int iter, vpColVect m_cMo = cMoPrev; error = m_error_prev; - if (w != NULL && m_w_prev != NULL) { + if (w != nullptr && m_w_prev != nullptr) { *w = *m_w_prev; } reStartFromLastIncrement = true; @@ -2911,7 +2911,7 @@ void vpMbTracker::computeVVSPoseEstimation(const bool isoJoIdentity, unsigned in mu /= 10.0; error_prev = error; - if (w != NULL && m_w_prev != NULL) + if (w != nullptr && m_w_prev != nullptr) *m_w_prev = *w; break; } @@ -2942,7 +2942,7 @@ void vpMbTracker::computeVVSPoseEstimation(const bool isoJoIdentity, unsigned in mu /= 10.0; error_prev = error; - if (w != NULL && m_w_prev != NULL) + if (w != nullptr && m_w_prev != nullptr) *m_w_prev = *w; break; } @@ -3536,7 +3536,7 @@ double vpMbTracker::computeProjectionErrorImpl(const vpImage &I, vpMbtDistanceLine *l = *it; if (l->isVisible() && l->isTracked()) { for (size_t a = 0; a < l->meline.size(); a++) { - if (l->meline[a] != NULL) { + if (l->meline[a] != nullptr) { double lineNormGradient; unsigned int lineNbFeatures; l->meline[a]->computeProjectionError(I, lineNormGradient, lineNbFeatures, m_SobelX, m_SobelY, @@ -3553,7 +3553,7 @@ double vpMbTracker::computeProjectionErrorImpl(const vpImage &I, it != m_projectionErrorCylinders.end(); ++it) { vpMbtDistanceCylinder *cy = *it; if (cy->isVisible() && cy->isTracked()) { - if (cy->meline1 != NULL) { + if (cy->meline1 != nullptr) { double cylinderNormGradient = 0; unsigned int cylinderNbFeatures = 0; cy->meline1->computeProjectionError(I, cylinderNormGradient, cylinderNbFeatures, m_SobelX, m_SobelY, @@ -3563,7 +3563,7 @@ double vpMbTracker::computeProjectionErrorImpl(const vpImage &I, nbFeatures += cylinderNbFeatures; } - if (cy->meline2 != NULL) { + if (cy->meline2 != nullptr) { double cylinderNormGradient = 0; unsigned int cylinderNbFeatures = 0; cy->meline2->computeProjectionError(I, cylinderNormGradient, cylinderNbFeatures, m_SobelX, m_SobelY, @@ -3578,7 +3578,7 @@ double vpMbTracker::computeProjectionErrorImpl(const vpImage &I, for (std::vector::const_iterator it = m_projectionErrorCircles.begin(); it != m_projectionErrorCircles.end(); ++it) { vpMbtDistanceCircle *c = *it; - if (c->isVisible() && c->isTracked() && c->meEllipse != NULL) { + if (c->isVisible() && c->isTracked() && c->meEllipse != nullptr) { double circleNormGradient = 0; unsigned int circleNbFeatures = 0; c->meEllipse->computeProjectionError(I, circleNormGradient, circleNbFeatures, m_SobelX, m_SobelY, @@ -3616,9 +3616,9 @@ void vpMbTracker::projectionErrorResetMovingEdges() for (std::vector::const_iterator it = m_projectionErrorLines.begin(); it != m_projectionErrorLines.end(); ++it) { for (size_t a = 0; a < (*it)->meline.size(); a++) { - if ((*it)->meline[a] != NULL) { + if ((*it)->meline[a] != nullptr) { delete (*it)->meline[a]; - (*it)->meline[a] = NULL; + (*it)->meline[a] = nullptr; } } @@ -3629,13 +3629,13 @@ void vpMbTracker::projectionErrorResetMovingEdges() for (std::vector::const_iterator it = m_projectionErrorCylinders.begin(); it != m_projectionErrorCylinders.end(); ++it) { - if ((*it)->meline1 != NULL) { + if ((*it)->meline1 != nullptr) { delete (*it)->meline1; - (*it)->meline1 = NULL; + (*it)->meline1 = nullptr; } - if ((*it)->meline2 != NULL) { + if ((*it)->meline2 != nullptr) { delete (*it)->meline2; - (*it)->meline2 = NULL; + (*it)->meline2 = nullptr; } (*it)->nbFeature = 0; @@ -3645,9 +3645,9 @@ void vpMbTracker::projectionErrorResetMovingEdges() for (std::vector::const_iterator it = m_projectionErrorCircles.begin(); it != m_projectionErrorCircles.end(); ++it) { - if ((*it)->meEllipse != NULL) { + if ((*it)->meEllipse != nullptr) { delete (*it)->meEllipse; - (*it)->meEllipse = NULL; + (*it)->meEllipse = nullptr; } (*it)->nbFeature = 0; } @@ -3686,7 +3686,7 @@ void vpMbTracker::projectionErrorInitMovingEdge(const vpImage &I, else { l->setVisible(false); for (size_t a = 0; a < l->meline.size(); a++) { - if (l->meline[a] != NULL) + if (l->meline[a] != nullptr) delete l->meline[a]; if (a < l->nbFeature.size()) l->nbFeature[a] = 0; @@ -3714,19 +3714,19 @@ void vpMbTracker::projectionErrorInitMovingEdge(const vpImage &I, if (isvisible) { cy->setVisible(true); - if (cy->meline1 == NULL || cy->meline2 == NULL) { + if (cy->meline1 == nullptr || cy->meline2 == nullptr) { if (cy->isTracked()) cy->initMovingEdge(I, _cMo, doNotTrack, m_mask); } } else { cy->setVisible(false); - if (cy->meline1 != NULL) + if (cy->meline1 != nullptr) delete cy->meline1; - if (cy->meline2 != NULL) + if (cy->meline2 != nullptr) delete cy->meline2; - cy->meline1 = NULL; - cy->meline2 = NULL; + cy->meline1 = nullptr; + cy->meline2 = nullptr; cy->nbFeature = 0; cy->nbFeaturel1 = 0; cy->nbFeaturel2 = 0; @@ -3748,16 +3748,16 @@ void vpMbTracker::projectionErrorInitMovingEdge(const vpImage &I, if (isvisible) { ci->setVisible(true); - if (ci->meEllipse == NULL) { + if (ci->meEllipse == nullptr) { if (ci->isTracked()) ci->initMovingEdge(I, _cMo, doNotTrack, m_mask); } } else { ci->setVisible(false); - if (ci->meEllipse != NULL) + if (ci->meEllipse != nullptr) delete ci->meEllipse; - ci->meEllipse = NULL; + ci->meEllipse = nullptr; ci->nbFeature = 0; } } diff --git a/modules/tracker/mbt/src/vpMbtXmlGenericParser.cpp b/modules/tracker/mbt/src/vpMbtXmlGenericParser.cpp index bfdc7900e5..c3707d7735 100644 --- a/modules/tracker/mbt/src/vpMbtXmlGenericParser.cpp +++ b/modules/tracker/mbt/src/vpMbtXmlGenericParser.cpp @@ -79,7 +79,7 @@ class vpMbtXmlGenericParser::Impl // https://en.cppreference.com/w/cpp/locale/setlocale // When called from Java binding, the locale seems to be changed to the default system locale // It thus mess with the parsing of numbers with pugixml and comma decimal separator environment - if (std::setlocale(LC_ALL, "C") == NULL) { + if (std::setlocale(LC_ALL, "C") == nullptr) { std::cerr << "Cannot set locale to C" << std::endl; } m_call_setlocale = false; diff --git a/modules/tracker/mbt/test/generic-with-dataset/testGenericTracker.cpp b/modules/tracker/mbt/test/generic-with-dataset/testGenericTracker.cpp index 2167f43c26..03273fbce1 100644 --- a/modules/tracker/mbt/test/generic-with-dataset/testGenericTracker.cpp +++ b/modules/tracker/mbt/test/generic-with-dataset/testGenericTracker.cpp @@ -159,7 +159,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, bool &click_all use_color_image = true; break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; @@ -172,7 +172,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, bool &click_all if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -706,7 +706,7 @@ int main(int argc, const char *argv[]) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/modules/tracker/mbt/test/generic-with-dataset/testGenericTrackerDepth.cpp b/modules/tracker/mbt/test/generic-with-dataset/testGenericTrackerDepth.cpp index bcdbe470f0..23af518084 100644 --- a/modules/tracker/mbt/test/generic-with-dataset/testGenericTrackerDepth.cpp +++ b/modules/tracker/mbt/test/generic-with-dataset/testGenericTrackerDepth.cpp @@ -137,7 +137,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, bool &click_all use_color_image = true; break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; @@ -150,7 +150,7 @@ bool getOptions(int argc, const char **argv, std::string &ipath, bool &click_all if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -469,7 +469,7 @@ int main(int argc, const char *argv[]) // Test if an input path is set if (opt_ipath.empty() && env_ipath.empty()) { - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << std::endl << "ERROR:" << std::endl; std::cerr << " Use -i option or set VISP_INPUT_IMAGE_PATH " << std::endl << " environment variable to specify the location of the " << std::endl diff --git a/modules/tracker/mbt/test/testTukeyEstimator.cpp b/modules/tracker/mbt/test/testTukeyEstimator.cpp index 95810fe5fd..6b9aa248fa 100644 --- a/modules/tracker/mbt/test/testTukeyEstimator.cpp +++ b/modules/tracker/mbt/test/testTukeyEstimator.cpp @@ -54,7 +54,7 @@ int main() double stdev = 0.5, mean = 0.0, noise_threshold = 1e-3; vpGaussRand noise(stdev, mean); - noise.seed((unsigned int)time(NULL)); + noise.seed((unsigned int)time(nullptr)); vpColVector residues_col((unsigned int)nb_elements); vpColVector weights_col, weights_col_save; diff --git a/modules/tracker/me/include/visp3/me/vpMeEllipse.h b/modules/tracker/me/include/visp3/me/vpMeEllipse.h index e71ffad56b..41b24437dc 100644 --- a/modules/tracker/me/include/visp3/me/vpMeEllipse.h +++ b/modules/tracker/me/include/visp3/me/vpMeEllipse.h @@ -257,12 +257,12 @@ class VISP_EXPORT vpMeEllipse : public vpMeTracker * \param I : Image in which the ellipse appears. * \param param : Vector with the five parameters \f$(u_c, v_c, n_{20}, n_{11}, n_{02})\f$ defining the ellipse * (expressed in pixels). - * \param pt1 : Image point defining the first extremity of the arc or NULL to track a complete ellipse. - * \param pt2 : Image point defining the second extremity of the arc or NULL to track a complete ellipse. + * \param pt1 : Image point defining the first extremity of the arc or nullptr to track a complete ellipse. + * \param pt2 : Image point defining the second extremity of the arc or nullptr to track a complete ellipse. * \param trackCircle : When true enable tracking of a circle, when false the tracking of an ellipse. */ - void initTracking(const vpImage &I, const vpColVector ¶m, vpImagePoint *pt1 = NULL, - const vpImagePoint *pt2 = NULL, bool trackCircle = false); + void initTracking(const vpImage &I, const vpColVector ¶m, vpImagePoint *pt1 = nullptr, + const vpImagePoint *pt2 = nullptr, bool trackCircle = false); /*! * Print the parameters \f$ K = {K_0, ..., K_5} \f$, the coordinates of the diff --git a/modules/tracker/me/include/visp3/me/vpMeTracker.h b/modules/tracker/me/include/visp3/me/vpMeTracker.h index 743799af19..758d3cbbaf 100644 --- a/modules/tracker/me/include/visp3/me/vpMeTracker.h +++ b/modules/tracker/me/include/visp3/me/vpMeTracker.h @@ -146,7 +146,7 @@ class VISP_EXPORT vpMeTracker : public vpTracker * Test whether the pixel is inside the mask. Mask values that are set to true * are considered in the tracking. * - * \param mask: Mask image or NULL if not wanted. Mask values that are set to true + * \param mask: Mask image or nullptr if not wanted. Mask values that are set to true * are considered in the tracking. To disable a pixel, set false. * \param i : Pixel coordinate along the rows. * \param j : Pixel coordinate along the columns. diff --git a/modules/tracker/me/src/moving-edges/vpMe.cpp b/modules/tracker/me/src/moving-edges/vpMe.cpp index f58df242ab..1ad911484c 100644 --- a/modules/tracker/me/src/moving-edges/vpMe.cpp +++ b/modules/tracker/me/src/moving-edges/vpMe.cpp @@ -328,7 +328,7 @@ static void calcul_masques(vpColVector &angle, // definitions des angles theta void vpMe::initMask() { - if (m_mask != NULL) + if (m_mask != nullptr) delete[] m_mask; m_mask = new vpMatrix[m_mask_number]; @@ -364,7 +364,7 @@ void vpMe::print() vpMe::vpMe() : m_likelihood_threshold_type(OLD_THRESHOLD), m_threshold(10000), m_mu1(0.5), m_mu2(0.5), m_min_samplestep(4), m_anglestep(1), m_mask_sign(0), m_range(4), m_sample_step(10), - m_ntotal_sample(0), m_points_to_track(500), m_mask_size(5), m_mask_number(180), m_strip(2), m_mask(NULL) + m_ntotal_sample(0), m_points_to_track(500), m_mask_size(5), m_mask_number(180), m_strip(2), m_mask(nullptr) { m_anglestep = (180 / m_mask_number); @@ -374,16 +374,16 @@ vpMe::vpMe() vpMe::vpMe(const vpMe &me) : m_likelihood_threshold_type(OLD_THRESHOLD), m_threshold(10000), m_mu1(0.5), m_mu2(0.5), m_min_samplestep(4), m_anglestep(1), m_mask_sign(0), m_range(4), m_sample_step(10), - m_ntotal_sample(0), m_points_to_track(500), m_mask_size(5), m_mask_number(180), m_strip(2), m_mask(NULL) + m_ntotal_sample(0), m_points_to_track(500), m_mask_size(5), m_mask_number(180), m_strip(2), m_mask(nullptr) { *this = me; } vpMe &vpMe::operator=(const vpMe &me) { - if (m_mask != NULL) { + if (m_mask != nullptr) { delete[] m_mask; - m_mask = NULL; + m_mask = nullptr; } m_likelihood_threshold_type = me.m_likelihood_threshold_type; @@ -407,9 +407,9 @@ vpMe &vpMe::operator=(const vpMe &me) vpMe &vpMe::operator=(const vpMe &&me) { - if (m_mask != NULL) { + if (m_mask != nullptr) { delete[] m_mask; - m_mask = NULL; + m_mask = nullptr; } m_likelihood_threshold_type = std::move(me.m_likelihood_threshold_type); m_threshold = std::move(me.m_threshold); @@ -432,9 +432,9 @@ vpMe &vpMe::operator=(const vpMe &&me) vpMe::~vpMe() { - if (m_mask != NULL) { + if (m_mask != nullptr) { delete[] m_mask; - m_mask = NULL; + m_mask = nullptr; } } diff --git a/modules/tracker/me/src/moving-edges/vpMeEllipse.cpp b/modules/tracker/me/src/moving-edges/vpMeEllipse.cpp index e21a6badac..cfc355620b 100644 --- a/modules/tracker/me/src/moving-edges/vpMeEllipse.cpp +++ b/modules/tracker/me/src/moving-edges/vpMeEllipse.cpp @@ -1081,7 +1081,7 @@ void vpMeEllipse::initTracking(const vpImage &I, const vpColVecto const vpImagePoint *pt2, bool trackCircle) { m_trackCircle = trackCircle; - if (pt1 != NULL && pt2 != NULL) { + if (pt1 != nullptr && pt2 != nullptr) { m_trackArc = true; } // useful for sample(I) : uc, vc, a, b, e, Ki, alpha1, alpha2 diff --git a/modules/tracker/me/src/moving-edges/vpMeNurbs.cpp b/modules/tracker/me/src/moving-edges/vpMeNurbs.cpp index d6c28a541b..4d932953d7 100644 --- a/modules/tracker/me/src/moving-edges/vpMeNurbs.cpp +++ b/modules/tracker/me/src/moving-edges/vpMeNurbs.cpp @@ -241,10 +241,10 @@ void vpMeNurbs::sample(const vpImage &I, bool doNotTrack) list.clear(); double u = 0.0; - vpImagePoint *pt = NULL; + vpImagePoint *pt = nullptr; vpImagePoint pt_1(-rows, -cols); while (u <= 1.0) { - if (pt != NULL) + if (pt != nullptr) delete[] pt; pt = nurbs.computeCurveDersPoint(u, 1); double delta = computeDelta(pt[1].get_i(), pt[1].get_j()); @@ -261,7 +261,7 @@ void vpMeNurbs::sample(const vpImage &I, bool doNotTrack) } u = u + step; } - if (pt != NULL) + if (pt != nullptr) delete[] pt; } @@ -286,7 +286,7 @@ void vpMeNurbs::updateDelta() std::list::iterator it = list.begin(); vpImagePoint Cu; - vpImagePoint *der = NULL; + vpImagePoint *der = nullptr; double step = 0.01; while (u < 1 && it != list.end()) { vpMeSite s = *it; @@ -299,7 +299,7 @@ void vpMeNurbs::updateDelta() } u -= step; - if (der != NULL) + if (der != nullptr) delete[] der; der = nurbs.computeCurveDersPoint(u, 1); // vpImagePoint toto(der[0].get_i(),der[0].get_j()); @@ -311,7 +311,7 @@ void vpMeNurbs::updateDelta() d = 1e6; d_1 = 1.5e6; } - if (der != NULL) + if (der != nullptr) delete[] der; } @@ -320,8 +320,8 @@ void vpMeNurbs::seekExtremities(const vpImage &I) int rows = (int)I.getHeight(); int cols = (int)I.getWidth(); - vpImagePoint *begin = NULL; - vpImagePoint *end = NULL; + vpImagePoint *begin = nullptr; + vpImagePoint *end = nullptr; begin = nurbs.computeCurveDersPoint(0.0, 1); end = nurbs.computeCurveDersPoint(1.0, 1); @@ -421,8 +421,8 @@ void vpMeNurbs::seekExtremities(const vpImage &I) else { list.pop_front(); } - /*if(begin != NULL)*/ delete[] begin; - /*if(end != NULL) */ delete[] end; + /*if(begin != nullptr)*/ delete[] begin; + /*if(end != nullptr) */ delete[] end; } void vpMeNurbs::seekExtremitiesCanny(const vpImage &I) @@ -432,7 +432,7 @@ void vpMeNurbs::seekExtremitiesCanny(const vpImage &I) pt = list.back(); vpImagePoint lastPoint(pt.ifloat, pt.jfloat); if (beginPtFound >= 3 && farFromImageEdge(I, firstPoint)) { - vpImagePoint *begin = NULL; + vpImagePoint *begin = nullptr; begin = nurbs.computeCurveDersPoint(0.0, 1); vpImage Isub(32, 32); // Sub image. vpImagePoint topLeft(begin[0].get_i() - 15, begin[0].get_j() - 15); @@ -552,12 +552,12 @@ void vpMeNurbs::seekExtremitiesCanny(const vpImage &I) me->setRange(memory_range); } - /* if (begin != NULL) */ delete[] begin; + /* if (begin != nullptr) */ delete[] begin; beginPtFound = 0; } if (endPtFound >= 3 && farFromImageEdge(I, lastPoint)) { - vpImagePoint *end = NULL; + vpImagePoint *end = nullptr; end = nurbs.computeCurveDersPoint(1.0, 1); vpImage Isub(32, 32); // Sub image. @@ -682,7 +682,7 @@ void vpMeNurbs::seekExtremitiesCanny(const vpImage &I) me->setRange(memory_range); } - /* if (end != NULL) */ delete[] end; + /* if (end != nullptr) */ delete[] end; endPtFound = 0; } } @@ -702,7 +702,7 @@ void vpMeNurbs::localReSample(const vpImage &I) { int rows = (int)I.getHeight(); int cols = (int)I.getWidth(); - vpImagePoint *iP = NULL; + vpImagePoint *iP = nullptr; int n = (int)numberOfSignal(); @@ -753,9 +753,9 @@ void vpMeNurbs::localReSample(const vpImage &I) while (vpImagePoint::sqrDistance(iP[0], iPend) > vpMath::sqr(me->getSampleStep()) && u < uend) { u += 0.01; - /*if (iP!=NULL)*/ { + /*if (iP!=nullptr)*/ { delete[] iP; - iP = NULL; + iP = nullptr; } iP = nurbs.computeCurveDersPoint(u, 1); if (vpImagePoint::sqrDistance(iP[0], iP_1) > vpMath::sqr(me->getSampleStep()) && @@ -771,9 +771,9 @@ void vpMeNurbs::localReSample(const vpImage &I) } } } - /*if (iP!=NULL)*/ { + /*if (iP!=nullptr)*/ { delete[] iP; - iP = NULL; + iP = nullptr; } } } diff --git a/modules/tracker/me/src/moving-edges/vpMeTracker.cpp b/modules/tracker/me/src/moving-edges/vpMeTracker.cpp index 7ff2234fe0..111a5f2e02 100644 --- a/modules/tracker/me/src/moving-edges/vpMeTracker.cpp +++ b/modules/tracker/me/src/moving-edges/vpMeTracker.cpp @@ -55,7 +55,7 @@ void vpMeTracker::init() } vpMeTracker::vpMeTracker() - : list(), me(NULL), init_range(1), nGoodElement(0), m_mask(NULL), selectDisplay(vpMeSite::NONE) + : list(), me(nullptr), init_range(1), nGoodElement(0), m_mask(nullptr), selectDisplay(vpMeSite::NONE) #ifdef VISP_BUILD_DEPRECATED_FUNCTIONS , query_range(0), display_point(false) @@ -65,7 +65,7 @@ vpMeTracker::vpMeTracker() } vpMeTracker::vpMeTracker(const vpMeTracker &meTracker) - : vpTracker(meTracker), list(), me(NULL), init_range(1), nGoodElement(0), m_mask(NULL), selectDisplay(vpMeSite::NONE) + : vpTracker(meTracker), list(), me(nullptr), init_range(1), nGoodElement(0), m_mask(nullptr), selectDisplay(vpMeSite::NONE) #ifdef VISP_BUILD_DEPRECATED_FUNCTIONS , query_range(0), display_point(false) @@ -123,7 +123,7 @@ unsigned int vpMeTracker::totalNumberOfSignal() { return (unsigned int)list.size bool vpMeTracker::inMask(const vpImage *mask, unsigned int i, unsigned int j) { try { - return (mask == NULL || mask->getValue(i, j)); + return (mask == nullptr || mask->getValue(i, j)); } catch (vpException &) { return false; diff --git a/modules/tracker/me/src/moving-edges/vpNurbs.cpp b/modules/tracker/me/src/moving-edges/vpNurbs.cpp index c253f55ea7..5a8dd1a08f 100644 --- a/modules/tracker/me/src/moving-edges/vpNurbs.cpp +++ b/modules/tracker/me/src/moving-edges/vpNurbs.cpp @@ -53,7 +53,7 @@ vpNurbs::vpNurbs(const vpNurbs &nurbs) : vpBSpline(nurbs), weights(nurbs.weights vpImagePoint vpNurbs::computeCurvePoint(double l_u, unsigned int l_i, unsigned int l_p, std::vector &l_knots, std::vector &l_controlPoints, std::vector &l_weights) { - vpBasisFunction *N = NULL; + vpBasisFunction *N = nullptr; N = computeBasisFuns(l_u, l_i, l_p, l_knots); vpImagePoint pt; @@ -69,7 +69,7 @@ vpImagePoint vpNurbs::computeCurvePoint(double l_u, unsigned int l_i, unsigned i pt.set_i(ic / wc); pt.set_j(jc / wc); - if (N != NULL) + if (N != nullptr) delete[] N; return pt; @@ -77,7 +77,7 @@ vpImagePoint vpNurbs::computeCurvePoint(double l_u, unsigned int l_i, unsigned i vpImagePoint vpNurbs::computeCurvePoint(double u) { - vpBasisFunction *N = NULL; + vpBasisFunction *N = nullptr; N = computeBasisFuns(u); vpImagePoint pt; @@ -93,7 +93,7 @@ vpImagePoint vpNurbs::computeCurvePoint(double u) pt.set_i(ic / wc); pt.set_j(jc / wc); - if (N != NULL) + if (N != nullptr) delete[] N; return pt; @@ -104,7 +104,7 @@ vpMatrix vpNurbs::computeCurveDers(double l_u, unsigned int l_i, unsigned int l_ std::vector &l_weights) { vpMatrix derivate(l_der + 1, 3); - vpBasisFunction **N = NULL; + vpBasisFunction **N = nullptr; N = computeDersBasisFuns(l_u, l_i, l_p, l_der, l_knots); for (unsigned int k = 0; k <= l_der; k++) { @@ -119,7 +119,7 @@ vpMatrix vpNurbs::computeCurveDers(double l_u, unsigned int l_i, unsigned int l_ } } - if (N != NULL) { + if (N != nullptr) { for (unsigned int i = 0; i <= l_der; i++) delete[] N[i]; delete[] N; @@ -130,7 +130,7 @@ vpMatrix vpNurbs::computeCurveDers(double l_u, unsigned int l_i, unsigned int l_ vpMatrix vpNurbs::computeCurveDers(double u, unsigned int der) { vpMatrix derivate(der + 1, 3); - vpBasisFunction **N = NULL; + vpBasisFunction **N = nullptr; N = computeDersBasisFuns(u, der); for (unsigned int k = 0; k <= der; k++) { @@ -144,7 +144,7 @@ vpMatrix vpNurbs::computeCurveDers(double u, unsigned int der) } } - if (N != NULL) + if (N != nullptr) delete[] N; return derivate; diff --git a/modules/tracker/me/test/testNurbs.cpp b/modules/tracker/me/test/testNurbs.cpp index 4eceb74edc..49e853206a 100644 --- a/modules/tracker/me/test/testNurbs.cpp +++ b/modules/tracker/me/test/testNurbs.cpp @@ -121,7 +121,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) display = false; break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; @@ -134,7 +134,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; @@ -255,13 +255,13 @@ int main(int argc, const char **argv) unsigned int i = Nurbs.findSpan(5 / 2.0); std::cout << "The knot interval number for the value u = 5/2 is : " << i << std::endl; - vpBasisFunction *N = NULL; + vpBasisFunction *N = nullptr; N = Nurbs.computeBasisFuns(5 / 2.0); std::cout << "The nonvanishing basis functions N(u=5/2) are :" << std::endl; for (unsigned int j = 0; j < Nurbs.get_p() + 1; j++) std::cout << N[j].value << std::endl; - vpBasisFunction **N2 = NULL; + vpBasisFunction **N2 = nullptr; N2 = Nurbs.computeDersBasisFuns(5 / 2.0, 2); std::cout << "The first derivatives of the basis functions N'(u=5/2) are :" << std::endl; for (unsigned int j = 0; j < Nurbs.get_p() + 1; j++) @@ -345,9 +345,9 @@ int main(int argc, const char **argv) vpDisplay::getClick(I3); } - if (N != NULL) + if (N != nullptr) delete[] N; - if (N2 != NULL) { + if (N2 != nullptr) { for (int j = 0; j <= 2; j++) delete[] N2[j]; delete[] N2; diff --git a/modules/tracker/tt/include/visp3/tt/vpTemplateTracker.h b/modules/tracker/tt/include/visp3/tt/vpTemplateTracker.h index 171ccfa9d0..68e1bb1902 100644 --- a/modules/tracker/tt/include/visp3/tt/vpTemplateTracker.h +++ b/modules/tracker/tt/include/visp3/tt/vpTemplateTracker.h @@ -145,15 +145,15 @@ class VISP_EXPORT vpTemplateTracker public: //! Default constructor. vpTemplateTracker() - : nbLvlPyr(0), l0Pyr(0), pyrInitialised(false), ptTemplate(NULL), ptTemplatePyr(NULL), ptTemplateInit(false), - templateSize(0), templateSizePyr(NULL), ptTemplateSelect(NULL), ptTemplateSelectPyr(NULL), - ptTemplateSelectInit(false), templateSelectSize(0), ptTemplateSupp(NULL), ptTemplateSuppPyr(NULL), - ptTemplateCompo(NULL), ptTemplateCompoPyr(NULL), zoneTracked(NULL), zoneTrackedPyr(NULL), pyr_IDes(NULL), H(), - Hdesire(), HdesirePyr(NULL), HLM(), HLMdesire(), HLMdesirePyr(NULL), HLMdesireInverse(), - HLMdesireInversePyr(NULL), G(), gain(0), thresholdGradient(0), costFunctionVerification(false), blur(false), - useBrent(false), nbIterBrent(0), taillef(0), fgG(NULL), fgdG(NULL), ratioPixelIn(0), mod_i(0), mod_j(0), + : nbLvlPyr(0), l0Pyr(0), pyrInitialised(false), ptTemplate(nullptr), ptTemplatePyr(nullptr), ptTemplateInit(false), + templateSize(0), templateSizePyr(nullptr), ptTemplateSelect(nullptr), ptTemplateSelectPyr(nullptr), + ptTemplateSelectInit(false), templateSelectSize(0), ptTemplateSupp(nullptr), ptTemplateSuppPyr(nullptr), + ptTemplateCompo(nullptr), ptTemplateCompoPyr(nullptr), zoneTracked(nullptr), zoneTrackedPyr(nullptr), pyr_IDes(nullptr), H(), + Hdesire(), HdesirePyr(nullptr), HLM(), HLMdesire(), HLMdesirePyr(nullptr), HLMdesireInverse(), + HLMdesireInversePyr(nullptr), G(), gain(0), thresholdGradient(0), costFunctionVerification(false), blur(false), + useBrent(false), nbIterBrent(0), taillef(0), fgG(nullptr), fgdG(nullptr), ratioPixelIn(0), mod_i(0), mod_j(0), nbParam(), lambdaDep(0), iterationMax(0), iterationGlobale(0), diverge(false), nbIteration(0), - useCompositionnal(false), useInverse(false), Warp(NULL), p(), dp(), X1(), X2(), dW(), BI(), dIx(), dIy(), + useCompositionnal(false), useInverse(false), Warp(nullptr), p(), dp(), X1(), X2(), dW(), BI(), dIx(), dIy(), zoneRef_() { } diff --git a/modules/tracker/tt/include/visp3/tt/vpTemplateTrackerHeader.h b/modules/tracker/tt/include/visp3/tt/vpTemplateTrackerHeader.h index 11c9a13f9b..23ca9507b0 100644 --- a/modules/tracker/tt/include/visp3/tt/vpTemplateTrackerHeader.h +++ b/modules/tracker/tt/include/visp3/tt/vpTemplateTrackerHeader.h @@ -75,7 +75,7 @@ struct vpTemplateTrackerPoint { double *dW; double *HiG; - vpTemplateTrackerPoint() : x(0), y(0), dx(0), dy(0), val(0), dW(NULL), HiG(NULL) {} + vpTemplateTrackerPoint() : x(0), y(0), dx(0), dy(0), val(0), dW(nullptr), HiG(nullptr) {} }; /*! \struct vpTemplateTrackerPointCompo @@ -83,7 +83,7 @@ struct vpTemplateTrackerPoint { */ struct vpTemplateTrackerPointCompo { double *dW; - vpTemplateTrackerPointCompo() : dW(NULL) {} + vpTemplateTrackerPointCompo() : dW(nullptr) {} }; #ifndef DOXYGEN_SHOULD_SKIP_THIS @@ -96,7 +96,7 @@ struct vpTemplateTrackerPointSuppMIInv { double *d2W; double *d2Wx; double *d2Wy; - vpTemplateTrackerPointSuppMIInv() : et(0), ct(0), BtInit(NULL), Bt(NULL), dBt(NULL), d2W(NULL), d2Wx(NULL), d2Wy(NULL) + vpTemplateTrackerPointSuppMIInv() : et(0), ct(0), BtInit(nullptr), Bt(nullptr), dBt(nullptr), d2W(nullptr), d2Wx(nullptr), d2Wy(nullptr) { } }; diff --git a/modules/tracker/tt/src/vpTemplateTracker.cpp b/modules/tracker/tt/src/vpTemplateTracker.cpp index 6df6e7530c..7dd4e4c17c 100644 --- a/modules/tracker/tt/src/vpTemplateTracker.cpp +++ b/modules/tracker/tt/src/vpTemplateTracker.cpp @@ -41,13 +41,13 @@ #include vpTemplateTracker::vpTemplateTracker(vpTemplateTrackerWarp *_warp) - : nbLvlPyr(1), l0Pyr(0), pyrInitialised(false), evolRMS(0), x_pos(), y_pos(), evolRMS_eps(1e-4), ptTemplate(NULL), - ptTemplatePyr(NULL), ptTemplateInit(false), templateSize(0), templateSizePyr(NULL), ptTemplateSelect(NULL), - ptTemplateSelectPyr(NULL), ptTemplateSelectInit(false), templateSelectSize(0), ptTemplateSupp(NULL), - ptTemplateSuppPyr(NULL), ptTemplateCompo(NULL), ptTemplateCompoPyr(NULL), zoneTracked(NULL), zoneTrackedPyr(NULL), - pyr_IDes(NULL), H(), Hdesire(), HdesirePyr(), HLM(), HLMdesire(), HLMdesirePyr(), HLMdesireInverse(), + : nbLvlPyr(1), l0Pyr(0), pyrInitialised(false), evolRMS(0), x_pos(), y_pos(), evolRMS_eps(1e-4), ptTemplate(nullptr), + ptTemplatePyr(nullptr), ptTemplateInit(false), templateSize(0), templateSizePyr(nullptr), ptTemplateSelect(nullptr), + ptTemplateSelectPyr(nullptr), ptTemplateSelectInit(false), templateSelectSize(0), ptTemplateSupp(nullptr), + ptTemplateSuppPyr(nullptr), ptTemplateCompo(nullptr), ptTemplateCompoPyr(nullptr), zoneTracked(nullptr), zoneTrackedPyr(nullptr), + pyr_IDes(nullptr), H(), Hdesire(), HdesirePyr(), HLM(), HLMdesire(), HLMdesirePyr(), HLMdesireInverse(), HLMdesireInversePyr(), G(), gain(1.), thresholdGradient(40), costFunctionVerification(false), blur(true), - useBrent(false), nbIterBrent(3), taillef(7), fgG(NULL), fgdG(NULL), ratioPixelIn(0), mod_i(1), mod_j(1), nbParam(0), + useBrent(false), nbIterBrent(3), taillef(7), fgG(nullptr), fgdG(nullptr), ratioPixelIn(0), mod_i(1), mod_j(1), nbParam(0), lambdaDep(0.001), iterationMax(30), iterationGlobale(0), diverge(false), nbIteration(0), useCompositionnal(true), useInverse(false), Warp(_warp), p(0), dp(), X1(), X2(), dW(), BI(), dIx(), dIy(), zoneRef_() { @@ -169,7 +169,7 @@ void vpTemplateTracker::resetTracker() } } delete[] ptTemplatePyr; - ptTemplatePyr = NULL; + ptTemplatePyr = nullptr; } if (ptTemplateCompoPyr) { @@ -182,7 +182,7 @@ void vpTemplateTracker::resetTracker() } } delete[] ptTemplateCompoPyr; - ptTemplateCompoPyr = NULL; + ptTemplateCompoPyr = nullptr; } if (ptTemplateSuppPyr) { @@ -200,7 +200,7 @@ void vpTemplateTracker::resetTracker() } } delete[] ptTemplateSuppPyr; - ptTemplateSuppPyr = NULL; + ptTemplateSuppPyr = nullptr; } if (ptTemplateSelectPyr) { @@ -209,37 +209,37 @@ void vpTemplateTracker::resetTracker() delete[] ptTemplateSelectPyr[i]; } delete[] ptTemplateSelectPyr; - ptTemplateSelectPyr = NULL; + ptTemplateSelectPyr = nullptr; } if (templateSizePyr) { delete[] templateSizePyr; - templateSizePyr = NULL; + templateSizePyr = nullptr; } if (HdesirePyr) { delete[] HdesirePyr; - HdesirePyr = NULL; + HdesirePyr = nullptr; } if (HLMdesirePyr) { delete[] HLMdesirePyr; - HLMdesirePyr = NULL; + HLMdesirePyr = nullptr; } if (HLMdesireInversePyr) { delete[] HLMdesireInversePyr; - HLMdesireInversePyr = NULL; + HLMdesireInversePyr = nullptr; } if (zoneTrackedPyr) { delete[] zoneTrackedPyr; - zoneTrackedPyr = NULL; + zoneTrackedPyr = nullptr; } if (pyr_IDes) { delete[] pyr_IDes; - pyr_IDes = NULL; + pyr_IDes = nullptr; } } else { if (ptTemplateInit) { @@ -248,7 +248,7 @@ void vpTemplateTracker::resetTracker() delete[] ptTemplate[point].HiG; } delete[] ptTemplate; - ptTemplate = NULL; + ptTemplate = nullptr; ptTemplateInit = false; } if (ptTemplateCompo) { @@ -256,7 +256,7 @@ void vpTemplateTracker::resetTracker() delete[] ptTemplateCompo[point].dW; } delete[] ptTemplateCompo; - ptTemplateCompo = NULL; + ptTemplateCompo = nullptr; } if (ptTemplateSupp) { for (unsigned int point = 0; point < templateSize; point++) { @@ -268,12 +268,12 @@ void vpTemplateTracker::resetTracker() delete[] ptTemplateSupp[point].d2Wy; } delete[] ptTemplateSupp; - ptTemplateSupp = NULL; + ptTemplateSupp = nullptr; } if (ptTemplateSelectInit) { if (ptTemplateSelect) { delete[] ptTemplateSelect; - ptTemplateSelect = NULL; + ptTemplateSelect = nullptr; } } } @@ -519,10 +519,10 @@ void vpTemplateTracker::initPyramidal(unsigned int nbLvl, unsigned int l0) ptTemplateSuppPyr = new vpTemplateTrackerPointSuppMIInv *[nbLvlPyr]; ptTemplateCompoPyr = new vpTemplateTrackerPointCompo *[nbLvlPyr]; for (unsigned int i = 0; i < nbLvlPyr; i++) { - ptTemplatePyr[i] = NULL; - ptTemplateSuppPyr[i] = NULL; - ptTemplateSelectPyr[i] = NULL; - ptTemplateCompoPyr[i] = NULL; + ptTemplatePyr[i] = nullptr; + ptTemplateSuppPyr[i] = nullptr; + ptTemplateSelectPyr[i] = nullptr; + ptTemplateCompoPyr[i] = nullptr; } templateSizePyr = new unsigned int[nbLvlPyr]; HdesirePyr = new vpMatrix[nbLvlPyr]; diff --git a/modules/tracker/tt_mi/include/visp3/tt_mi/vpTemplateTrackerMI.h b/modules/tracker/tt_mi/include/visp3/tt_mi/vpTemplateTrackerMI.h index 90cdc1f4f2..5bd1206e53 100644 --- a/modules/tracker/tt_mi/include/visp3/tt_mi/vpTemplateTrackerMI.h +++ b/modules/tracker/tt_mi/include/visp3/tt_mi/vpTemplateTrackerMI.h @@ -126,8 +126,8 @@ class VISP_EXPORT vpTemplateTrackerMI : public vpTemplateTracker // vpTemplateTrackerMI(const vpTemplateTrackerMI &) // : vpTemplateTracker(), hessianComputation(USE_HESSIEN_NORMAL), // ApproxHessian(HESSIAN_0), lambda(0), - // temp(NULL), Prt(NULL), dPrt(NULL), Pt(NULL), Pr(NULL), d2Prt(NULL), - // PrtTout(NULL), dprtemp(NULL), PrtD(NULL), dPrtD(NULL), + // temp(nullptr), Prt(nullptr), dPrt(nullptr), Pt(nullptr), Pr(nullptr), d2Prt(nullptr), + // PrtTout(nullptr), dprtemp(nullptr), PrtD(nullptr), dPrtD(nullptr), // influBspline(0), bspline(0), Nc(0), Ncb(0), d2Ix(), d2Iy(), d2Ixy(), // MI_preEstimation(0), MI_postEstimation(0), NMI_preEstimation(0), // NMI_postEstimation(0), covarianceMatrix(), computeCovariance(false) @@ -144,8 +144,8 @@ class VISP_EXPORT vpTemplateTrackerMI : public vpTemplateTracker public: //! Default constructor. vpTemplateTrackerMI() - : vpTemplateTracker(), hessianComputation(USE_HESSIEN_NORMAL), ApproxHessian(HESSIAN_0), lambda(0), temp(NULL), - Prt(NULL), dPrt(NULL), Pt(NULL), Pr(NULL), d2Prt(NULL), PrtTout(NULL), dprtemp(NULL), PrtD(NULL), dPrtD(NULL), + : vpTemplateTracker(), hessianComputation(USE_HESSIEN_NORMAL), ApproxHessian(HESSIAN_0), lambda(0), temp(nullptr), + Prt(nullptr), dPrt(nullptr), Pt(nullptr), Pr(nullptr), d2Prt(nullptr), PrtTout(nullptr), dprtemp(nullptr), PrtD(nullptr), dPrtD(nullptr), influBspline(0), bspline(0), Nc(0), Ncb(0), d2Ix(), d2Iy(), d2Ixy(), MI_preEstimation(0), MI_postEstimation(0), NMI_preEstimation(0), NMI_postEstimation(0), covarianceMatrix(), computeCovariance(false), m_du(), m_dv(), m_A(), m_dB(), m_d2u(), m_d2v(), m_dA() diff --git a/modules/tracker/tt_mi/src/mi/vpTemplateTrackerMI.cpp b/modules/tracker/tt_mi/src/mi/vpTemplateTrackerMI.cpp index dd29cc1a0c..a66ea1eb23 100644 --- a/modules/tracker/tt_mi/src/mi/vpTemplateTrackerMI.cpp +++ b/modules/tracker/tt_mi/src/mi/vpTemplateTrackerMI.cpp @@ -77,8 +77,8 @@ void vpTemplateTrackerMI::setBspline(const vpBsplineType &newbs) } vpTemplateTrackerMI::vpTemplateTrackerMI(vpTemplateTrackerWarp *_warp) - : vpTemplateTracker(_warp), hessianComputation(USE_HESSIEN_NORMAL), ApproxHessian(HESSIAN_NEW), lambda(0), temp(NULL), - Prt(NULL), dPrt(NULL), Pt(NULL), Pr(NULL), d2Prt(NULL), PrtTout(NULL), dprtemp(NULL), PrtD(NULL), dPrtD(NULL), + : vpTemplateTracker(_warp), hessianComputation(USE_HESSIEN_NORMAL), ApproxHessian(HESSIAN_NEW), lambda(0), temp(nullptr), + Prt(nullptr), dPrt(nullptr), Pt(nullptr), Pr(nullptr), d2Prt(nullptr), PrtTout(nullptr), dprtemp(nullptr), PrtD(nullptr), dPrtD(nullptr), influBspline(0), bspline(3), Nc(8), Ncb(0), d2Ix(), d2Iy(), d2Ixy(), MI_preEstimation(0), MI_postEstimation(0), NMI_preEstimation(0), NMI_postEstimation(0), covarianceMatrix(), computeCovariance(false) { diff --git a/modules/vision/include/visp3/vision/vpHomography.h b/modules/vision/include/visp3/vision/vpHomography.h index 5aa9f40b58..f7285cf638 100644 --- a/modules/vision/include/visp3/vision/vpHomography.h +++ b/modules/vision/include/visp3/vision/vpHomography.h @@ -288,7 +288,7 @@ class VISP_EXPORT vpHomography : public vpArray2D * * \return \f$\bf H^{-1}\f$ */ - vpHomography inverse(double sv_threshold = 1e-16, unsigned int *rank = NULL) const; + vpHomography inverse(double sv_threshold = 1e-16, unsigned int *rank = nullptr) const; /*! * Invert the homography. diff --git a/modules/vision/include/visp3/vision/vpKeyPoint.h b/modules/vision/include/visp3/vision/vpKeyPoint.h index e92de87162..26ffadf8c4 100644 --- a/modules/vision/include/visp3/vision/vpKeyPoint.h +++ b/modules/vision/include/visp3/vision/vpKeyPoint.h @@ -523,7 +523,7 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint std::vector &candidates, const std::vector &polygons, const std::vector > &roisPt, - std::vector &points, cv::Mat *descriptors = NULL); + std::vector &points, cv::Mat *descriptors = nullptr); /*! * Keep only keypoints located on faces and compute for those keypoints the 3D @@ -545,7 +545,7 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint std::vector &candidates, const std::vector &polygons, const std::vector > &roisPt, - std::vector &points, cv::Mat *descriptors = NULL); + std::vector &points, cv::Mat *descriptors = nullptr); /*! * Keep only keypoints located on cylinders and compute the 3D coordinates in @@ -566,7 +566,7 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector &candidates, const std::vector &cylinders, const std::vector > > &vectorOfCylinderRois, - std::vector &points, cv::Mat *descriptors = NULL); + std::vector &points, cv::Mat *descriptors = nullptr); /*! * Keep only vpImagePoint located on cylinders and compute the 3D coordinates @@ -587,7 +587,7 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector &candidates, const std::vector &cylinders, const std::vector > > &vectorOfCylinderRois, - std::vector &points, cv::Mat *descriptors = NULL); + std::vector &points, cv::Mat *descriptors = nullptr); /*! * Compute the pose using the correspondence between 2D points and 3D points @@ -604,7 +604,7 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint */ bool computePose(const std::vector &imagePoints, const std::vector &objectPoints, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector &inlierIndex, - double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = NULL); + double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = nullptr); /*! * Compute the pose using the correspondence between 2D points and 3D points @@ -619,7 +619,7 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint * \return True if the pose has been computed, false otherwise (not enough points, or size list mismatch). */ bool computePose(const std::vector &objectVpPoints, vpHomogeneousMatrix &cMo, std::vector &inliers, - double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = NULL); + double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = nullptr); /*! * Compute the pose using the correspondence between 2D points and 3D points @@ -636,7 +636,7 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint */ bool computePose(const std::vector &objectVpPoints, vpHomogeneousMatrix &cMo, std::vector &inliers, std::vector &inlierIndex, double &elapsedTime, - bool (*func)(const vpHomogeneousMatrix &) = NULL); + bool (*func)(const vpHomogeneousMatrix &) = nullptr); /*! * Initialize the size of the matching image (case with a matching side by @@ -758,7 +758,7 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint */ void detectExtractAffine(const vpImage &I, std::vector > &listOfKeypoints, std::vector &listOfDescriptors, - std::vector > *listOfAffineI = NULL); + std::vector > *listOfAffineI = nullptr); /*! * Display the reference and the detected keypoints in the images. @@ -879,7 +879,7 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint * cannot be extracted, we need to remove the corresponding 3D point. */ void extract(const vpImage &I, std::vector &keyPoints, cv::Mat &descriptors, - std::vector *trainPoints = NULL); + std::vector *trainPoints = nullptr); /*! * Extract the descriptors for each keypoints of the list. @@ -892,7 +892,7 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint * cannot be extracted, we need to remove the corresponding 3D point. */ void extract(const vpImage &I_color, std::vector &keyPoints, cv::Mat &descriptors, - std::vector *trainPoints = NULL); + std::vector *trainPoints = nullptr); /*! * Extract the descriptors for each keypoints of the list. @@ -905,7 +905,7 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint * be extracted, we need to remove the corresponding 3D point. */ void extract(const cv::Mat &matImg, std::vector &keyPoints, cv::Mat &descriptors, - std::vector *trainPoints = NULL); + std::vector *trainPoints = nullptr); /*! * Extract the descriptors for each keypoints of the list. @@ -919,7 +919,7 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint * cannot be extracted, we need to remove the corresponding 3D point. */ void extract(const vpImage &I, std::vector &keyPoints, cv::Mat &descriptors, - double &elapsedTime, std::vector *trainPoints = NULL); + double &elapsedTime, std::vector *trainPoints = nullptr); /*! * Extract the descriptors for each keypoints of the list. @@ -933,7 +933,7 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint * cannot be extracted, we need to remove the corresponding 3D point. */ void extract(const vpImage &I_color, std::vector &keyPoints, cv::Mat &descriptors, - double &elapsedTime, std::vector *trainPoints = NULL); + double &elapsedTime, std::vector *trainPoints = nullptr); /*! * Extract the descriptors for each keypoints of the list. @@ -947,7 +947,7 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint * cannot be extracted, we need to remove the corresponding 3D point. */ void extract(const cv::Mat &matImg, std::vector &keyPoints, cv::Mat &descriptors, double &elapsedTime, - std::vector *trainPoints = NULL); + std::vector *trainPoints = nullptr); /*! * Get the covariance matrix when estimating the pose using the Virtual @@ -992,7 +992,7 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint * Get the detector pointer. * \param type : Type of the detector. * - * \return The detector or NULL if the type passed in parameter does not + * \return The detector or nullptr if the type passed in parameter does not * exist. */ inline cv::Ptr getDetector(const vpFeatureDetectorType &type) const @@ -1018,7 +1018,7 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint * Get the detector pointer. * \param name : Name of the detector. * - * \return The detector or NULL if the name passed in parameter does not + * \return The detector or nullptr if the name passed in parameter does not * exist. */ inline cv::Ptr getDetector(const std::string &name) const @@ -1048,7 +1048,7 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint * Get the extractor pointer. * \param type : Type of the descriptor extractor. * - * \return The descriptor extractor or NULL if the name passed in parameter + * \return The descriptor extractor or nullptr if the name passed in parameter * does not exist. */ inline cv::Ptr getExtractor(const vpFeatureDescriptorType &type) const @@ -1074,7 +1074,7 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint * Get the extractor pointer. * \param name : Name of the descriptor extractor. * - * \return The descriptor extractor or NULL if the name passed in parameter + * \return The descriptor extractor or nullptr if the name passed in parameter * does not exist. */ inline cv::Ptr getExtractor(const std::string &name) const @@ -1381,7 +1381,7 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint * \return True if the matching and the pose estimation are OK, false otherwise. */ bool matchPoint(const vpImage &I, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, - bool (*func)(const vpHomogeneousMatrix &) = NULL, const vpRect &rectangle = vpRect()); + bool (*func)(const vpHomogeneousMatrix &) = nullptr, const vpRect &rectangle = vpRect()); /*! * Match keypoints detected in the image with those built in the reference @@ -1399,7 +1399,7 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint * \return True if the matching and the pose estimation are OK, false otherwise. */ bool matchPoint(const vpImage &I, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, - double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = NULL, + double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = nullptr, const vpRect &rectangle = vpRect()); /*! @@ -1423,9 +1423,9 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint * \return True if the object is present, false otherwise. */ bool matchPointAndDetect(const vpImage &I, vpRect &boundingBox, vpImagePoint ¢erOfGravity, - const bool isPlanarObject = true, std::vector *imPts1 = NULL, - std::vector *imPts2 = NULL, double *meanDescriptorDistance = NULL, - double *detectionScore = NULL, const vpRect &rectangle = vpRect()); + const bool isPlanarObject = true, std::vector *imPts1 = nullptr, + std::vector *imPts2 = nullptr, double *meanDescriptorDistance = nullptr, + double *detectionScore = nullptr, const vpRect &rectangle = vpRect()); /*! * Match keypoints detected in the image with those built in the reference @@ -1448,7 +1448,7 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint */ bool matchPointAndDetect(const vpImage &I, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, double &error, double &elapsedTime, vpRect &boundingBox, vpImagePoint ¢erOfGravity, - bool (*func)(const vpHomogeneousMatrix &) = NULL, const vpRect &rectangle = vpRect()); + bool (*func)(const vpHomogeneousMatrix &) = nullptr, const vpRect &rectangle = vpRect()); /*! * Match keypoints detected in the image with those built in the reference @@ -1495,7 +1495,7 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint * \return True if the matching and the pose estimation are OK, false otherwise. */ bool matchPoint(const vpImage &I_color, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, - bool (*func)(const vpHomogeneousMatrix &) = NULL, const vpRect &rectangle = vpRect()); + bool (*func)(const vpHomogeneousMatrix &) = nullptr, const vpRect &rectangle = vpRect()); /*! * Match keypoints detected in the image with those built in the reference @@ -1513,7 +1513,7 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint * \return True if the matching and the pose estimation are OK, false otherwise. */ bool matchPoint(const vpImage &I_color, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, - double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = NULL, + double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = nullptr, const vpRect &rectangle = vpRect()); /*! @@ -1735,7 +1735,7 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint m_useKnn = true; #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000) - if (m_matcher != NULL && m_matcherName == "BruteForce") { + if (m_matcher != nullptr && m_matcherName == "BruteForce") { // if a matcher is already initialized, disable the crossCheck // because it will not work with knnMatch m_matcher->set("crossCheck", false); @@ -1746,7 +1746,7 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint m_useKnn = false; #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000) - if (m_matcher != NULL && m_matcherName == "BruteForce") { + if (m_matcher != nullptr && m_matcherName == "BruteForce") { // if a matcher is already initialized, set the crossCheck mode if // necessary m_matcher->set("crossCheck", m_useBruteForceCrossCheck); @@ -1909,10 +1909,10 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint { // Only available with BruteForce and with k=1 (i.e not used with a // ratioDistanceThreshold method) - if (m_matcher != NULL && !m_useKnn && m_matcherName == "BruteForce") { + if (m_matcher != nullptr && !m_useKnn && m_matcherName == "BruteForce") { m_matcher->set("crossCheck", useCrossCheck); } - else if (m_matcher != NULL && m_useKnn && m_matcherName == "BruteForce") { + else if (m_matcher != nullptr && m_useKnn && m_matcherName == "BruteForce") { std::cout << "Warning, you try to set the crossCheck parameter with a " "BruteForce matcher but knn is enabled"; std::cout << " (the filtering method uses a ratio constraint)" << std::endl; diff --git a/modules/vision/include/visp3/vision/vpPose.h b/modules/vision/include/visp3/vision/vpPose.h index abd2644b59..fbc0101b5c 100644 --- a/modules/vision/include/visp3/vision/vpPose.h +++ b/modules/vision/include/visp3/vision/vpPose.h @@ -181,7 +181,7 @@ class VISP_EXPORT vpPose * has the smallest residual. * - vpPose::RANSAC: Robust Ransac aproach (doesn't need an initialization) */ - bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, bool (*func)(const vpHomogeneousMatrix &) = NULL); + bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, bool (*func)(const vpHomogeneousMatrix &) = nullptr); /*! * @brief Method that first computes the pose \b cMo using the linear approaches of Dementhon and Lagrange @@ -254,7 +254,7 @@ class VISP_EXPORT vpPose * \param p_d: if different from null, it will be set to equal the d coefficient of the potential plan. * \return true if points are coplanar false otherwise. */ - bool coplanar(int &coplanar_plane_type, double *p_a = NULL, double *p_b = NULL, double *p_c = NULL, double *p_d = NULL); + bool coplanar(int &coplanar_plane_type, double *p_a = nullptr, double *p_b = nullptr, double *p_c = nullptr, double *p_d = nullptr); /*! * Display the coordinates of the points in the image plane that are used to @@ -286,14 +286,14 @@ class VISP_EXPORT vpPose * Compute the pose of a planar object using Lagrange approach. * * \param cMo : Estimated pose. No initialisation is requested to estimate cMo. - * \param p_isPlan : if different from NULL, indicates if the object is planar or not. - * \param p_a : if different from NULL, the a coefficient of the plan formed by the points. - * \param p_b : if different from NULL, the b coefficient of the plan formed by the points. - * \param p_c : if different from NULL, the c coefficient of the plan formed by the points. - * \param p_d : if different from NULL, the d coefficient of the plan formed by the points. + * \param p_isPlan : if different from nullptr, indicates if the object is planar or not. + * \param p_a : if different from nullptr, the a coefficient of the plan formed by the points. + * \param p_b : if different from nullptr, the b coefficient of the plan formed by the points. + * \param p_c : if different from nullptr, the c coefficient of the plan formed by the points. + * \param p_d : if different from nullptr, the d coefficient of the plan formed by the points. */ - void poseLagrangePlan(vpHomogeneousMatrix &cMo, bool *p_isPlan = NULL, double *p_a = NULL, double *p_b = NULL, - double *p_c = NULL, double *p_d = NULL); + void poseLagrangePlan(vpHomogeneousMatrix &cMo, bool *p_isPlan = nullptr, double *p_a = nullptr, double *p_b = nullptr, + double *p_c = nullptr, double *p_d = nullptr); /*! * Compute the pose of a non planar object using Lagrange approach. @@ -324,7 +324,7 @@ class VISP_EXPORT vpPose * The number of threads used can then be set with setNbParallelRansacThreads(). * Filter flag can be used with setRansacFilterFlag(). */ - bool poseRansac(vpHomogeneousMatrix &cMo, bool (*func)(const vpHomogeneousMatrix &) = NULL); + bool poseRansac(vpHomogeneousMatrix &cMo, bool (*func)(const vpHomogeneousMatrix &) = nullptr); /*! * Compute the pose using virtual visual servoing approach and @@ -522,7 +522,7 @@ class VISP_EXPORT vpPose static bool computePlanarObjectPoseFromRGBD(const vpImage &depthMap, const std::vector &corners, const vpCameraParameters &colorIntrinsics, const std::vector &point3d, vpHomogeneousMatrix &cMo, - double *confidence_index = NULL); + double *confidence_index = nullptr); /*! * Compute the pose of multiple planar object from corresponding 2D-3D point coordinates and depth map. @@ -560,7 +560,7 @@ class VISP_EXPORT vpPose const std::vector > &corners, const vpCameraParameters &colorIntrinsics, const std::vector > &point3d, - vpHomogeneousMatrix &cMo, double *confidence_index = NULL, + vpHomogeneousMatrix &cMo, double *confidence_index = nullptr, bool coplanar_points = true); /*! @@ -645,7 +645,7 @@ class VISP_EXPORT vpPose const unsigned int &numberOfInlierToReachAConsensus, const double &threshold, unsigned int &ninliers, std::vector &listInliers, vpHomogeneousMatrix &cMo, const int &maxNbTrials = 10000, bool useParallelRansac = true, unsigned int nthreads = 0, - bool (*func)(const vpHomogeneousMatrix &) = NULL); + bool (*func)(const vpHomogeneousMatrix &) = nullptr); /*! * Carries out the camera pose the image of a rectangle and diff --git a/modules/vision/include/visp3/vision/vpPoseFeatures.h b/modules/vision/include/visp3/vision/vpPoseFeatures.h index 5a70f55e9c..0a621cccab 100644 --- a/modules/vision/include/visp3/vision/vpPoseFeatures.h +++ b/modules/vision/include/visp3/vision/vpPoseFeatures.h @@ -573,7 +573,7 @@ class VISP_EXPORT vpPoseFeatures { FeatureType *desiredFeature; FirstParamType firstParam; - vpDuo() : desiredFeature(NULL), firstParam() { } + vpDuo() : desiredFeature(nullptr), firstParam() { } }; template struct vpTrio @@ -582,7 +582,7 @@ class VISP_EXPORT vpPoseFeatures FirstParamType firstParam; SecondParamType secondParam; - vpTrio() : desiredFeature(NULL), firstParam(), secondParam() { } + vpTrio() : desiredFeature(nullptr), firstParam(), secondParam() { } }; #endif //#ifndef DOXYGEN_SHOULD_SKIP_THIS diff --git a/modules/vision/src/homography-estimation/vpHomography.cpp b/modules/vision/src/homography-estimation/vpHomography.cpp index 0e88aae60c..ea60522fea 100644 --- a/modules/vision/src/homography-estimation/vpHomography.cpp +++ b/modules/vision/src/homography-estimation/vpHomography.cpp @@ -125,7 +125,7 @@ vpHomography vpHomography::inverse(double sv_threshold, unsigned int *rank) cons vpMatrix M = (*this).convert(); vpMatrix Minv; unsigned int r = M.pseudoInverse(Minv, sv_threshold); - if (rank != NULL) { + if (rank != nullptr) { *rank = r; } diff --git a/modules/vision/src/homography-estimation/vpHomographyRansac.cpp b/modules/vision/src/homography-estimation/vpHomographyRansac.cpp index 891e494cf2..dfb1ad5a6d 100644 --- a/modules/vision/src/homography-estimation/vpHomographyRansac.cpp +++ b/modules/vision/src/homography-estimation/vpHomographyRansac.cpp @@ -293,7 +293,7 @@ bool vpHomography::ransac(const std::vector &xb, const std::vector best_consensus; std::vector cur_consensus; diff --git a/modules/vision/src/key-point/vpKeyPoint.cpp b/modules/vision/src/key-point/vpKeyPoint.cpp index ce49956d51..994b18ec01 100644 --- a/modules/vision/src/key-point/vpKeyPoint.cpp +++ b/modules/vision/src/key-point/vpKeyPoint.cpp @@ -485,7 +485,7 @@ void vpKeyPoint::compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, co vpKeyPoint::compute3D(it2->first, roisPt[cpt1], cam, cMo, pt); points.push_back(pt); - if (descriptors != NULL) { + if (descriptors != nullptr) { desc.push_back(descriptors->row((int)it2->second)); } @@ -498,7 +498,7 @@ void vpKeyPoint::compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, co } } - if (descriptors != NULL) { + if (descriptors != nullptr) { desc.copyTo(*descriptors); } } @@ -531,7 +531,7 @@ void vpKeyPoint::compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, co vpKeyPoint::compute3D(it2->first, roisPt[cpt1], cam, cMo, pt); points.push_back(pt); - if (descriptors != NULL) { + if (descriptors != nullptr) { desc.push_back(descriptors->row((int)it2->second)); } @@ -590,7 +590,7 @@ void vpKeyPoint::compute3DForPointsOnCylinders( pt.setWorldCoordinates(point_obj); points.push_back(cv::Point3f((float)pt.get_oX(), (float)pt.get_oY(), (float)pt.get_oZ())); - if (descriptors != NULL) { + if (descriptors != nullptr) { desc.push_back(descriptors->row((int)cpt_keypoint)); } @@ -601,7 +601,7 @@ void vpKeyPoint::compute3DForPointsOnCylinders( } } - if (descriptors != NULL) { + if (descriptors != nullptr) { desc.copyTo(*descriptors); } } @@ -650,7 +650,7 @@ void vpKeyPoint::compute3DForPointsOnCylinders( pt.setWorldCoordinates(point_obj); points.push_back(pt); - if (descriptors != NULL) { + if (descriptors != nullptr) { desc.push_back(descriptors->row((int)cpt_keypoint)); } @@ -661,7 +661,7 @@ void vpKeyPoint::compute3DForPointsOnCylinders( } } - if (descriptors != NULL) { + if (descriptors != nullptr) { desc.copyTo(*descriptors); } } @@ -738,7 +738,7 @@ bool vpKeyPoint::computePose(const std::vector &imagePoints, const vpThetaUVector thetaUVector(rvec.at(0), rvec.at(1), rvec.at(2)); cMo = vpHomogeneousMatrix(translationVec, thetaUVector); - if (func != NULL) { + if (func != nullptr) { // Check the final pose returned by solvePnPRansac to discard // solutions which do not respect the pose criterion. if (!func(cMo)) { @@ -809,7 +809,7 @@ bool vpKeyPoint::computePose(const std::vector &objectVpPoints, vpHomog return false; } - // if(func != NULL && isRansacPoseEstimationOk) { + // if(func != nullptr && isRansacPoseEstimationOk) { // //Check the final pose returned by the Ransac VVS pose estimation as // in rare some cases // //we can converge toward a final cMo that does not respect the pose @@ -1088,7 +1088,7 @@ void vpKeyPoint::displayMatching(const vpImage &IRef, vpImage queryImageKeyPoints; @@ -1115,7 +1115,7 @@ void vpKeyPoint::displayMatching(const vpImage &IRef, vpImage queryImageKeyPoints; @@ -1142,7 +1142,7 @@ void vpKeyPoint::displayMatching(const vpImage &IRef, vpImage &I unsigned int lineThickness, const vpColor &color) { bool randomColor = (color == vpColor::none); - srand((unsigned int)time(NULL)); + srand((unsigned int)time(nullptr)); vpColor currentColor = color; std::vector queryImageKeyPoints; @@ -1440,7 +1440,7 @@ void vpKeyPoint::extract(const cv::Mat &matImg, std::vector &keyPo if (first) { first = false; // Check if we have 3D object points information - if (trainPoints != NULL && !trainPoints->empty()) { + if (trainPoints != nullptr && !trainPoints->empty()) { // Copy the input list of keypoints, keypoints that cannot be computed // are removed in the function compute std::vector keyPoints_tmp = keyPoints; @@ -1499,7 +1499,7 @@ void vpKeyPoint::extract(const cv::Mat &matImg, std::vector &keyPo cv::Mat descriptors_tmp; for (std::vector::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) { if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) { - if (trainPoints != NULL && !trainPoints->empty()) { + if (trainPoints != nullptr && !trainPoints->empty()) { trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]); } @@ -1509,7 +1509,7 @@ void vpKeyPoint::extract(const cv::Mat &matImg, std::vector &keyPo } } - if (trainPoints != NULL) { + if (trainPoints != nullptr) { // Copy trainPoints_tmp to m_trainPoints *trainPoints = trainPoints_tmp; } @@ -1733,7 +1733,7 @@ void vpKeyPoint::initDetector(const std::string &detectorName) #if (VISP_HAVE_OPENCV_VERSION < 0x030000) m_detectors[detectorName] = cv::FeatureDetector::create(detectorName); - if (m_detectors[detectorName] == NULL) { + if (m_detectors[detectorName] == nullptr) { std::stringstream ss_msg; ss_msg << "Fail to initialize the detector: " << detectorName << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << "."; @@ -2249,7 +2249,7 @@ void vpKeyPoint::initMatcher(const std::string &matcherName) } #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000) - if (m_matcher != NULL && !m_useKnn && matcherName == "BruteForce") { + if (m_matcher != nullptr && !m_useKnn && matcherName == "BruteForce") { m_matcher->set("crossCheck", m_useBruteForceCrossCheck); } #endif @@ -3268,7 +3268,7 @@ bool vpKeyPoint::matchPointAndDetect(const vpImage &I, vpRect &bo std::vector *imPts1, std::vector *imPts2, double *meanDescriptorDistance, double *detectionScore, const vpRect &rectangle) { - if (imPts1 != NULL && imPts2 != NULL) { + if (imPts1 != nullptr && imPts2 != nullptr) { imPts1->clear(); imPts2->clear(); } @@ -3283,10 +3283,10 @@ bool vpKeyPoint::matchPointAndDetect(const vpImage &I, vpRect &bo meanDescriptorDistanceTmp /= (double)m_filteredMatches.size(); double score = (double)m_filteredMatches.size() / meanDescriptorDistanceTmp; - if (meanDescriptorDistance != NULL) { + if (meanDescriptorDistance != nullptr) { *meanDescriptorDistance = meanDescriptorDistanceTmp; } - if (detectionScore != NULL) { + if (detectionScore != nullptr) { *detectionScore = score; } @@ -3323,11 +3323,11 @@ bool vpKeyPoint::matchPointAndDetect(const vpImage &I, vpRect &bo if (reprojectionError < 6.0) { inliers.push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x)); - if (imPts1 != NULL) { + if (imPts1 != nullptr) { imPts1->push_back(vpImagePoint((double)points1[i].y, (double)points1[i].x)); } - if (imPts2 != NULL) { + if (imPts2 != nullptr) { imPts2->push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x)); } } @@ -3341,11 +3341,11 @@ bool vpKeyPoint::matchPointAndDetect(const vpImage &I, vpRect &bo if (fundamentalInliers.at((int)i, 0)) { inliers.push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x)); - if (imPts1 != NULL) { + if (imPts1 != nullptr) { imPts1->push_back(vpImagePoint((double)points1[i].y, (double)points1[i].x)); } - if (imPts2 != NULL) { + if (imPts2 != nullptr) { imPts2->push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x)); } } @@ -3449,7 +3449,7 @@ void vpKeyPoint::detectExtractAffine(const vpImage &I, affineSkew(t, phi, timg, mask, Ai); - if (listOfAffineI != NULL) { + if (listOfAffineI != nullptr) { cv::Mat img_disp; bitwise_and(mask, timg, img_disp); vpImage tI; @@ -3502,7 +3502,7 @@ void vpKeyPoint::detectExtractAffine(const vpImage &I, listOfKeypoints.resize(listOfAffineParams.size()); listOfDescriptors.resize(listOfAffineParams.size()); - if (listOfAffineI != NULL) { + if (listOfAffineI != nullptr) { listOfAffineI->resize(listOfAffineParams.size()); } @@ -3518,7 +3518,7 @@ void vpKeyPoint::detectExtractAffine(const vpImage &I, affineSkew(listOfAffineParams[(size_t)cpt].first, listOfAffineParams[(size_t)cpt].second, timg, mask, Ai); - if (listOfAffineI != NULL) { + if (listOfAffineI != nullptr) { cv::Mat img_disp; bitwise_and(mask, timg, img_disp); vpImage tI; diff --git a/modules/vision/src/pose-estimation/vpPose.cpp b/modules/vision/src/pose-estimation/vpPose.cpp index 0d281d338b..b81f76f9bd 100644 --- a/modules/vision/src/pose-estimation/vpPose.cpp +++ b/modules/vision/src/pose-estimation/vpPose.cpp @@ -261,21 +261,21 @@ bool vpPose::coplanar(int &coplanar_plane_type, double *p_a, double *p_b, double vpDEBUG_TRACE(10, " points are coplanar "); // vpTRACE(" points are coplanar ") ; - // If the points are coplanar and the input/output parameters are different from NULL, + // If the points are coplanar and the input/output parameters are different from nullptr, // getting the values of the plan coefficient and storing in the input/output parameters - if (p_a != NULL) { + if (p_a != nullptr) { *p_a = a; } - if (p_b != NULL) { + if (p_b != nullptr) { *p_b = b; } - if (p_c != NULL) { + if (p_c != nullptr) { *p_c = c; } - if (p_d != NULL) { + if (p_d != nullptr) { *p_d = d; } diff --git a/modules/vision/src/pose-estimation/vpPoseLagrange.cpp b/modules/vision/src/pose-estimation/vpPoseLagrange.cpp index 33dbe8eea7..56f5bbd847 100644 --- a/modules/vision/src/pose-estimation/vpPoseLagrange.cpp +++ b/modules/vision/src/pose-estimation/vpPoseLagrange.cpp @@ -252,9 +252,9 @@ void vpPose::poseLagrangePlan(vpHomogeneousMatrix &cMo, bool *p_isPlan, double * double a, b, c, d; // Checking if coplanar has already been called and if the plan coefficients have been given - if ((p_isPlan != NULL) && (p_a != NULL) && (p_b != NULL) && (p_c != NULL) && (p_d != NULL)) { + if ((p_isPlan != nullptr) && (p_a != nullptr) && (p_b != nullptr) && (p_c != nullptr) && (p_d != nullptr)) { if (*p_isPlan) { - // All the pointers towards the plan coefficients are different from NULL => using them in the rest of the method + // All the pointers towards the plan coefficients are different from nullptr => using them in the rest of the method a = *p_a; b = *p_b; c = *p_c; @@ -266,7 +266,7 @@ void vpPose::poseLagrangePlan(vpHomogeneousMatrix &cMo, bool *p_isPlan, double * } } else { - // At least one of the coefficient is a NULL pointer => calling coplanar by ourselves + // At least one of the coefficient is a nullptr pointer => calling coplanar by ourselves int coplanarType; bool areCoplanar = coplanar(coplanarType, &a, &b, &c, &d); if (!areCoplanar) { diff --git a/modules/vision/src/pose-estimation/vpPoseRGBD.cpp b/modules/vision/src/pose-estimation/vpPoseRGBD.cpp index 56d813fead..3dbf651bf0 100644 --- a/modules/vision/src/pose-estimation/vpPoseRGBD.cpp +++ b/modules/vision/src/pose-estimation/vpPoseRGBD.cpp @@ -155,7 +155,7 @@ bool vpPose::computePlanarObjectPoseFromRGBD(const vpImage &depthMap, con point3d.size(), corners.size())); } std::vector pose_points; - if (confidence_index != NULL) { + if (confidence_index != nullptr) { *confidence_index = 0.0; } @@ -223,7 +223,7 @@ bool vpPose::computePlanarObjectPoseFromRGBD(const vpImage &depthMap, con vpPose pose; pose.addPoints(pose_points); if (pose.computePose(vpPose::VIRTUAL_VS, cMo)) { - if (confidence_index != NULL) { + if (confidence_index != nullptr) { *confidence_index = std::min(1.0, normalized_weights * static_cast(nb_points_3d) / polygon.getArea()); } return true; @@ -247,7 +247,7 @@ bool vpPose::computePlanarObjectPoseFromRGBD(const vpImage &depthMap, point3d.size(), corners.size())); } std::vector pose_points; - if (confidence_index != NULL) { + if (confidence_index != nullptr) { *confidence_index = 0.0; } @@ -418,7 +418,7 @@ bool vpPose::computePlanarObjectPoseFromRGBD(const vpImage &depthMap, vpPose pose; pose.addPoints(pose_points); if (pose.computePose(vpPose::VIRTUAL_VS, cMo)) { - if (confidence_index != NULL) { + if (confidence_index != nullptr) { *confidence_index = std::min(1.0, normalized_weights * static_cast(nb_points_3d) / totalArea); } return true; diff --git a/modules/vision/src/pose-estimation/vpPoseRansac.cpp b/modules/vision/src/pose-estimation/vpPoseRansac.cpp index cd470e4ec4..7008fe4832 100644 --- a/modules/vision/src/pose-estimation/vpPoseRansac.cpp +++ b/modules/vision/src/pose-estimation/vpPoseRansac.cpp @@ -220,7 +220,7 @@ bool vpPose::vpRansacFunctor::poseRansacImpl() // Filter the pose using some criterion (orientation angles, // translations, etc.) bool isPoseValid = true; - if (m_func != NULL) { + if (m_func != nullptr) { isPoseValid = m_func(cMo_tmp); if (isPoseValid) { m_cMo = cMo_tmp; @@ -378,7 +378,7 @@ bool vpPose::poseRansac(vpHomogeneousMatrix &cMo, bool (*func)(const vpHomogeneo int splitTrials = ransacMaxTrials / nbThreads; for (size_t i = 0; i < (size_t)nbThreads; i++) { - unsigned int initial_seed = (unsigned int)i; //((unsigned int) time(NULL) ^ i); + unsigned int initial_seed = (unsigned int)i; //((unsigned int) time(nullptr) ^ i); if (i < (size_t)nbThreads - 1) { ransacWorkers.emplace_back(cMo, ransacNbInlierConsensus, splitTrials, ransacThreshold, initial_seed, checkDegeneratePoints, listOfUniquePoints, func); @@ -476,7 +476,7 @@ bool vpPose::poseRansac(vpHomogeneousMatrix &cMo, bool (*func)(const vpHomogeneo // In some rare cases, the final pose could not respect the pose // criterion even if the 4 minimal points picked respect the pose // criterion. - if (func != NULL && !func(cMo)) { + if (func != nullptr && !func(cMo)) { return false; } diff --git a/modules/vision/test/keypoint-with-dataset/testKeyPoint-2.cpp b/modules/vision/test/keypoint-with-dataset/testKeyPoint-2.cpp index 4234bd2f76..8c0391e565 100644 --- a/modules/vision/test/keypoint-with-dataset/testKeyPoint-2.cpp +++ b/modules/vision/test/keypoint-with-dataset/testKeyPoint-2.cpp @@ -120,7 +120,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display, use_parallel_ransac = true; break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; @@ -133,7 +133,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display, if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/modules/vision/test/keypoint-with-dataset/testKeyPoint-3.cpp b/modules/vision/test/keypoint-with-dataset/testKeyPoint-3.cpp index 1587b3c40f..1040f85c2f 100644 --- a/modules/vision/test/keypoint-with-dataset/testKeyPoint-3.cpp +++ b/modules/vision/test/keypoint-with-dataset/testKeyPoint-3.cpp @@ -114,7 +114,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) display = false; break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; @@ -127,7 +127,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/modules/vision/test/keypoint-with-dataset/testKeyPoint-4.cpp b/modules/vision/test/keypoint-with-dataset/testKeyPoint-4.cpp index 012ed3ed94..55282dc3f3 100644 --- a/modules/vision/test/keypoint-with-dataset/testKeyPoint-4.cpp +++ b/modules/vision/test/keypoint-with-dataset/testKeyPoint-4.cpp @@ -116,7 +116,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) display = false; break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; @@ -129,7 +129,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/modules/vision/test/keypoint-with-dataset/testKeyPoint-5.cpp b/modules/vision/test/keypoint-with-dataset/testKeyPoint-5.cpp index 2a8297ce4a..d896830b4e 100644 --- a/modules/vision/test/keypoint-with-dataset/testKeyPoint-5.cpp +++ b/modules/vision/test/keypoint-with-dataset/testKeyPoint-5.cpp @@ -117,7 +117,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) display = false; break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; @@ -130,7 +130,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/modules/vision/test/keypoint-with-dataset/testKeyPoint-6.cpp b/modules/vision/test/keypoint-with-dataset/testKeyPoint-6.cpp index 3fe0ee74cb..d8fd45b188 100644 --- a/modules/vision/test/keypoint-with-dataset/testKeyPoint-6.cpp +++ b/modules/vision/test/keypoint-with-dataset/testKeyPoint-6.cpp @@ -110,7 +110,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) display = false; break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; @@ -123,7 +123,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/modules/vision/test/keypoint-with-dataset/testKeyPoint-7.cpp b/modules/vision/test/keypoint-with-dataset/testKeyPoint-7.cpp index 611e1f5b15..e1143220c8 100644 --- a/modules/vision/test/keypoint-with-dataset/testKeyPoint-7.cpp +++ b/modules/vision/test/keypoint-with-dataset/testKeyPoint-7.cpp @@ -108,7 +108,7 @@ bool getOptions(int argc, const char **argv, std::string &opath, const std::stri opath = optarg_; break; case 'h': - usage(argv[0], NULL, opath, user); + usage(argv[0], nullptr, opath, user); return false; break; @@ -123,7 +123,7 @@ bool getOptions(int argc, const char **argv, std::string &opath, const std::stri if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL, opath, user); + usage(argv[0], nullptr, opath, user); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/modules/vision/test/keypoint-with-dataset/testKeyPoint.cpp b/modules/vision/test/keypoint-with-dataset/testKeyPoint.cpp index 3771d0c945..7240c1e375 100644 --- a/modules/vision/test/keypoint-with-dataset/testKeyPoint.cpp +++ b/modules/vision/test/keypoint-with-dataset/testKeyPoint.cpp @@ -113,7 +113,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) display = false; break; case 'h': - usage(argv[0], NULL); + usage(argv[0], nullptr); return false; break; @@ -126,7 +126,7 @@ bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display) if ((c == 1) || (c == -1)) { // standalone param or error - usage(argv[0], NULL); + usage(argv[0], nullptr); std::cerr << "ERROR: " << std::endl; std::cerr << " Bad argument " << optarg_ << std::endl << std::endl; return false; diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMoment.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMoment.h index 2517506534..0881c74a3e 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMoment.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMoment.h @@ -140,7 +140,7 @@ class vpMoment; * //more than a shortcut to the vpMomentObject) * bm.linkTo(mdb); //add basic moment to moment database * - * vpFeatureMomentBasic fmb(mdb,0,0,1,NULL); + * vpFeatureMomentBasic fmb(mdb,0,0,1,nullptr); * * //update and compute the vpMoment BEFORE doing any operations with vpFeatureMoment * bm.update(obj); @@ -169,8 +169,8 @@ class VISP_EXPORT vpFeatureMoment : public vpBasicFeature // private: //#ifndef DOXYGEN_SHOULD_SKIP_THIS // vpFeatureMoment(const vpFeatureMoment &fm) - // : vpBasicFeature(), moment(NULL), moments(fm.moments), - // featureMomentsDataBase(NULL), + // : vpBasicFeature(), moment(nullptr), moments(fm.moments), + // featureMomentsDataBase(nullptr), // interaction_matrices(), A(0), B(0), C(0) // { // throw vpException(vpException::functionNotImplementedError, "Not @@ -198,8 +198,8 @@ class VISP_EXPORT vpFeatureMoment : public vpBasicFeature * number of lines by this parameter. */ vpFeatureMoment(vpMomentDatabase &data_base, double A_ = 0.0, double B_ = 0.0, double C_ = 0.0, - vpFeatureMomentDatabase *featureMoments = NULL, unsigned int nbmatrices = 1) - : vpBasicFeature(), moment(NULL), moments(data_base), featureMomentsDataBase(featureMoments), + vpFeatureMomentDatabase *featureMoments = nullptr, unsigned int nbmatrices = 1) + : vpBasicFeature(), moment(nullptr), moments(data_base), featureMomentsDataBase(featureMoments), interaction_matrices(nbmatrices), A(A_), B(B_), C(C_), _name() { } @@ -265,12 +265,12 @@ class VISP_EXPORT vpMomentGenericFeature : public vpFeatureMoment /*! * No specific moment name. */ - const char *momentName() const { return NULL; } + const char *momentName() const { return nullptr; } /*! * No specific feature name. */ - virtual const char *name() const { return NULL; } + virtual const char *name() const { return nullptr; } }; #endif diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentAlpha.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentAlpha.h index f8214c084d..0c5966cf5d 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentAlpha.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentAlpha.h @@ -110,7 +110,7 @@ class VISP_EXPORT vpFeatureMomentAlpha : public vpFeatureMoment * \param featureMoments : Feature database. */ vpFeatureMomentAlpha(vpMomentDatabase &data_base, double A_, double B_, double C_, - vpFeatureMomentDatabase *featureMoments = NULL) + vpFeatureMomentDatabase *featureMoments = nullptr) : vpFeatureMoment(data_base, A_, B_, C_, featureMoments, 1) { } diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentArea.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentArea.h index 08d16431f9..cee3586df5 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentArea.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentArea.h @@ -65,7 +65,7 @@ class VISP_EXPORT vpFeatureMomentArea : public vpFeatureMoment * \param featureMoments : Feature database. */ vpFeatureMomentArea(vpMomentDatabase &data_base, double A_, double B_, double C_, - vpFeatureMomentDatabase *featureMoments = NULL) + vpFeatureMomentDatabase *featureMoments = nullptr) : vpFeatureMoment(data_base, A_, B_, C_, featureMoments, 1) { } diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentAreaNormalized.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentAreaNormalized.h index 4cc159960b..dfaf6fc045 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentAreaNormalized.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentAreaNormalized.h @@ -91,7 +91,7 @@ class VISP_EXPORT vpFeatureMomentAreaNormalized : public vpFeatureMoment * \param featureMoments : Feature database. */ vpFeatureMomentAreaNormalized(vpMomentDatabase &database, double A_, double B_, double C_, - vpFeatureMomentDatabase *featureMoments = NULL) + vpFeatureMomentDatabase *featureMoments = nullptr) : vpFeatureMoment(database, A_, B_, C_, featureMoments, 1) { } void compute_interaction() override; @@ -186,7 +186,7 @@ class VISP_EXPORT vpFeatureMomentAreaNormalized : public vpFeatureMoment * \param featureMoments : Feature database. */ vpFeatureMomentAreaNormalized(vpMomentDatabase &data_base, double A_, double B_, double C_, - vpFeatureMomentDatabase *featureMoments = NULL) + vpFeatureMomentDatabase *featureMoments = nullptr) : vpFeatureMoment(data_base, A_, B_, C_, featureMoments, 1) { } void compute_interaction() override; diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentBasic.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentBasic.h index 9a8855bb27..02195d8ea5 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentBasic.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentBasic.h @@ -80,7 +80,7 @@ class VISP_EXPORT vpFeatureMomentBasic : public vpFeatureMoment public: vpFeatureMomentBasic(vpMomentDatabase &moments, double A, double B, double C, - vpFeatureMomentDatabase *featureMoments = NULL); + vpFeatureMomentDatabase *featureMoments = nullptr); void compute_interaction() override; #ifndef DOXYGEN_SHOULD_SKIP_THIS diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCInvariant.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCInvariant.h index 466fd87c58..0f5b5da2de 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCInvariant.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCInvariant.h @@ -100,7 +100,7 @@ class VISP_EXPORT vpFeatureMomentCInvariant : public vpFeatureMoment * \param featureMoments : Feature database. */ vpFeatureMomentCInvariant(vpMomentDatabase &moments, double A, double B, double C, - vpFeatureMomentDatabase *featureMoments = NULL) + vpFeatureMomentDatabase *featureMoments = nullptr) : vpFeatureMoment(moments, A, B, C, featureMoments, 16) { } void compute_interaction() override; @@ -238,7 +238,7 @@ class VISP_EXPORT vpFeatureMomentCInvariant : public vpFeatureMoment * \param featureMoments : Feature database. */ vpFeatureMomentCInvariant(vpMomentDatabase &data_base, double A_, double B_, double C_, - vpFeatureMomentDatabase *featureMoments = NULL) + vpFeatureMomentDatabase *featureMoments = nullptr) : vpFeatureMoment(data_base, A_, B_, C_, featureMoments, 16), LI(16) { } diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCentered.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCentered.h index d48712d45d..8762be8960 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCentered.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCentered.h @@ -84,7 +84,7 @@ class VISP_EXPORT vpFeatureMomentCentered : public vpFeatureMoment public: vpFeatureMomentCentered(vpMomentDatabase &moments, double A, double B, double C, - vpFeatureMomentDatabase *featureMoments = NULL); + vpFeatureMomentDatabase *featureMoments = nullptr); void compute_interaction() override; #ifndef DOXYGEN_SHOULD_SKIP_THIS diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentDatabase.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentDatabase.h index c4856154eb..858f5b1a93 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentDatabase.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentDatabase.h @@ -155,7 +155,7 @@ class VISP_EXPORT vpFeatureMomentDatabase struct vpCmpStr_t { bool operator()(const char *a, const char *b) const { return std::strcmp(a, b) < 0; } - char *operator=(const char *) { return NULL; } // Only to avoid a warning under Visual with /Wall flag + char *operator=(const char *) { return nullptr; } // Only to avoid a warning under Visual with /Wall flag }; std::map featureMomentsDataBase; void add(vpFeatureMoment &featureMoment, char *name); diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentGravityCenter.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentGravityCenter.h index 28c148890a..8958915f68 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentGravityCenter.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentGravityCenter.h @@ -157,7 +157,7 @@ class VISP_EXPORT vpFeatureMomentGravityCenter : public vpFeatureMoment * \param featureMoments : Feature database. */ vpFeatureMomentGravityCenter(vpMomentDatabase &database, double A_, double B_, double C_, - vpFeatureMomentDatabase *featureMoments = NULL) + vpFeatureMomentDatabase *featureMoments = nullptr) : vpFeatureMoment(database, A_, B_, C_, featureMoments, 2) { } @@ -229,7 +229,7 @@ class VISP_EXPORT vpFeatureMomentGravityCenter : public vpFeatureMoment * \param featureMoments : Feature database. */ vpFeatureMomentGravityCenter(vpMomentDatabase &data_base, double A_, double B_, double C_, - vpFeatureMomentDatabase *featureMoments = NULL) + vpFeatureMomentDatabase *featureMoments = nullptr) : vpFeatureMoment(data_base, A_, B_, C_, featureMoments, 2) { } diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentGravityCenterNormalized.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentGravityCenterNormalized.h index eb408e2b97..c75dc0d401 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentGravityCenterNormalized.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentGravityCenterNormalized.h @@ -98,7 +98,7 @@ class VISP_EXPORT vpFeatureMomentGravityCenterNormalized : public vpFeatureMomen * \param featureMoments : Feature database. */ vpFeatureMomentGravityCenterNormalized(vpMomentDatabase &database, double A_, double B_, double C_, - vpFeatureMomentDatabase *featureMoments = NULL) + vpFeatureMomentDatabase *featureMoments = nullptr) : vpFeatureMoment(database, A_, B_, C_, featureMoments, 2) { } void compute_interaction() override; @@ -252,7 +252,7 @@ class VISP_EXPORT vpFeatureMomentGravityCenterNormalized : public vpFeatureMomen * \param featureMoments : Feature database. */ vpFeatureMomentGravityCenterNormalized(vpMomentDatabase &data_base, double A_, double B_, double C_, - vpFeatureMomentDatabase *featureMoments = NULL) + vpFeatureMomentDatabase *featureMoments = nullptr) : vpFeatureMoment(data_base, A_, B_, C_, featureMoments, 2) { } void compute_interaction() override; diff --git a/modules/visual_features/src/visual-feature/vpBasicFeature.cpp b/modules/visual_features/src/visual-feature/vpBasicFeature.cpp index cb3b1a6be3..1f219c43b9 100644 --- a/modules/visual_features/src/visual-feature/vpBasicFeature.cpp +++ b/modules/visual_features/src/visual-feature/vpBasicFeature.cpp @@ -55,16 +55,16 @@ const unsigned int vpBasicFeature::FEATURE_LINE[32] = { /*! Default constructor. */ -vpBasicFeature::vpBasicFeature() : s(), dim_s(0), flags(NULL), nbParameters(0), deallocate(vpBasicFeature::user) {} +vpBasicFeature::vpBasicFeature() : s(), dim_s(0), flags(nullptr), nbParameters(0), deallocate(vpBasicFeature::user) {} /*! Destructor that free allocated memory. */ vpBasicFeature::~vpBasicFeature() { - if (flags != NULL) { + if (flags != nullptr) { delete[] flags; - flags = NULL; + flags = nullptr; } } @@ -72,7 +72,7 @@ vpBasicFeature::~vpBasicFeature() Copy constructor. */ vpBasicFeature::vpBasicFeature(const vpBasicFeature &f) - : s(), dim_s(0), flags(NULL), nbParameters(0), deallocate(vpBasicFeature::user) + : s(), dim_s(0), flags(nullptr), nbParameters(0), deallocate(vpBasicFeature::user) { *this = f; } @@ -128,7 +128,7 @@ vpColVector vpBasicFeature::get_s(unsigned int select) const void vpBasicFeature::resetFlags() { - if (flags != NULL) { + if (flags != nullptr) { for (unsigned int i = 0; i < nbParameters; i++) flags[i] = false; } @@ -138,7 +138,7 @@ void vpBasicFeature::resetFlags() //! interaction matrix without having updated the feature. void vpBasicFeature::setFlags() { - if (flags != NULL) { + if (flags != nullptr) { for (unsigned int i = 0; i < nbParameters; i++) flags[i] = true; } diff --git a/modules/visual_features/src/visual-feature/vpFeatureDepth.cpp b/modules/visual_features/src/visual-feature/vpFeatureDepth.cpp index 5cb0cb5a05..bc0c494552 100644 --- a/modules/visual_features/src/visual-feature/vpFeatureDepth.cpp +++ b/modules/visual_features/src/visual-feature/vpFeatureDepth.cpp @@ -77,7 +77,7 @@ void vpFeatureDepth::init() // memory allocation s.resize(dim_s); - if (flags == NULL) + if (flags == nullptr) flags = new bool[nbParameters]; for (unsigned int i = 0; i < nbParameters; i++) flags[i] = false; diff --git a/modules/visual_features/src/visual-feature/vpFeatureEllipse.cpp b/modules/visual_features/src/visual-feature/vpFeatureEllipse.cpp index fe758ff51b..025110b466 100644 --- a/modules/visual_features/src/visual-feature/vpFeatureEllipse.cpp +++ b/modules/visual_features/src/visual-feature/vpFeatureEllipse.cpp @@ -68,7 +68,7 @@ void vpFeatureEllipse::init() // memory allocation s.resize(dim_s); - if (flags == NULL) + if (flags == nullptr) flags = new bool[nbParameters]; for (unsigned int i = 0; i < nbParameters; i++) flags[i] = false; diff --git a/modules/visual_features/src/visual-feature/vpFeatureLine.cpp b/modules/visual_features/src/visual-feature/vpFeatureLine.cpp index f225cbf4ed..f7d30b06d3 100644 --- a/modules/visual_features/src/visual-feature/vpFeatureLine.cpp +++ b/modules/visual_features/src/visual-feature/vpFeatureLine.cpp @@ -81,7 +81,7 @@ void vpFeatureLine::init() // s[0] = rho // s[1] = theta s.resize(dim_s); - if (flags == NULL) + if (flags == nullptr) flags = new bool[nbParameters]; for (unsigned int i = 0; i < nbParameters; i++) flags[i] = false; diff --git a/modules/visual_features/src/visual-feature/vpFeatureLuminance.cpp b/modules/visual_features/src/visual-feature/vpFeatureLuminance.cpp index c801bf45be..7c48ad5000 100644 --- a/modules/visual_features/src/visual-feature/vpFeatureLuminance.cpp +++ b/modules/visual_features/src/visual-feature/vpFeatureLuminance.cpp @@ -55,7 +55,7 @@ */ void vpFeatureLuminance::init() { - if (flags == NULL) + if (flags == nullptr) flags = new bool[nbParameters]; for (unsigned int i = 0; i < nbParameters; i++) flags[i] = false; @@ -84,7 +84,7 @@ void vpFeatureLuminance::init(unsigned int _nbr, unsigned int _nbc, double _Z) s.resize(dim_s); - if (pixInfo != NULL) + if (pixInfo != nullptr) delete[] pixInfo; pixInfo = new vpLuminance[dim_s]; @@ -95,11 +95,11 @@ void vpFeatureLuminance::init(unsigned int _nbr, unsigned int _nbc, double _Z) /*! Default constructor that build a visual feature. */ -vpFeatureLuminance::vpFeatureLuminance() : Z(1), nbr(0), nbc(0), bord(10), pixInfo(NULL), firstTimeIn(0), cam() +vpFeatureLuminance::vpFeatureLuminance() : Z(1), nbr(0), nbc(0), bord(10), pixInfo(nullptr), firstTimeIn(0), cam() { nbParameters = 1; dim_s = 0; - flags = NULL; + flags = nullptr; init(); } @@ -108,7 +108,7 @@ vpFeatureLuminance::vpFeatureLuminance() : Z(1), nbr(0), nbc(0), bord(10), pixIn Copy constructor. */ vpFeatureLuminance::vpFeatureLuminance(const vpFeatureLuminance &f) - : vpBasicFeature(f), Z(1), nbr(0), nbc(0), bord(10), pixInfo(NULL), firstTimeIn(0), cam() + : vpBasicFeature(f), Z(1), nbr(0), nbc(0), bord(10), pixInfo(nullptr), firstTimeIn(0), cam() { *this = f; } @@ -137,7 +137,7 @@ vpFeatureLuminance &vpFeatureLuminance::operator=(const vpFeatureLuminance &f) */ vpFeatureLuminance::~vpFeatureLuminance() { - if (pixInfo != NULL) + if (pixInfo != nullptr) delete[] pixInfo; } diff --git a/modules/visual_features/src/visual-feature/vpFeatureMoment.cpp b/modules/visual_features/src/visual-feature/vpFeatureMoment.cpp index 5f7e9515af..490f4cc90f 100644 --- a/modules/visual_features/src/visual-feature/vpFeatureMoment.cpp +++ b/modules/visual_features/src/visual-feature/vpFeatureMoment.cpp @@ -56,7 +56,7 @@ void vpFeatureMoment::init() * vpMoment associated to it. This partly explains why vpFeatureMomentBasic * cannot be used directly as a visual feature. */ - if (this->moment != NULL) + if (this->moment != nullptr) dim_s = (unsigned int)this->moment->get().size(); else dim_s = 0; @@ -68,7 +68,7 @@ void vpFeatureMoment::init() for (unsigned int i = 0; i < dim_s; i++) s[i] = 0; - if (flags == NULL) + if (flags == nullptr) flags = new bool[nbParameters]; for (unsigned int i = 0; i < nbParameters; i++) flags[i] = false; @@ -150,14 +150,14 @@ void vpFeatureMoment::update(double A_, double B_, double C_) this->B = B_; this->C = C_; - if (moment == NULL) { + if (moment == nullptr) { bool found; this->moment = &(moments.get(momentName(), found)); if (!found) throw vpException(vpException::notInitialized, "Moment not found for feature"); } nbParameters = 1; - if (this->moment != NULL) { + if (this->moment != nullptr) { dim_s = (unsigned int)this->moment->get().size(); s.resize(dim_s); @@ -165,7 +165,7 @@ void vpFeatureMoment::update(double A_, double B_, double C_) for (unsigned int i = 0; i < dim_s; i++) s[i] = this->moment->get()[i]; - if (flags == NULL) + if (flags == nullptr) flags = new bool[nbParameters]; for (unsigned int i = 0; i < nbParameters; i++) flags[i] = false; diff --git a/modules/visual_features/src/visual-feature/vpFeaturePoint.cpp b/modules/visual_features/src/visual-feature/vpFeaturePoint.cpp index d22d11a691..2ccaf8150f 100644 --- a/modules/visual_features/src/visual-feature/vpFeaturePoint.cpp +++ b/modules/visual_features/src/visual-feature/vpFeaturePoint.cpp @@ -71,7 +71,7 @@ void vpFeaturePoint::init() // memory allocation s.resize(dim_s); - if (flags == NULL) + if (flags == nullptr) flags = new bool[nbParameters]; for (unsigned int i = 0; i < nbParameters; i++) flags[i] = false; diff --git a/modules/visual_features/src/visual-feature/vpFeaturePoint3D.cpp b/modules/visual_features/src/visual-feature/vpFeaturePoint3D.cpp index 5eceae66b5..5b667d40f5 100644 --- a/modules/visual_features/src/visual-feature/vpFeaturePoint3D.cpp +++ b/modules/visual_features/src/visual-feature/vpFeaturePoint3D.cpp @@ -65,7 +65,7 @@ void vpFeaturePoint3D::init() // memory allocation s.resize(dim_s); - if (flags == NULL) + if (flags == nullptr) flags = new bool[nbParameters]; for (unsigned int i = 0; i < nbParameters; i++) flags[i] = false; diff --git a/modules/visual_features/src/visual-feature/vpFeaturePointPolar.cpp b/modules/visual_features/src/visual-feature/vpFeaturePointPolar.cpp index c5f924c844..57534d3ec1 100644 --- a/modules/visual_features/src/visual-feature/vpFeaturePointPolar.cpp +++ b/modules/visual_features/src/visual-feature/vpFeaturePointPolar.cpp @@ -77,7 +77,7 @@ void vpFeaturePointPolar::init() // memory allocation s.resize(dim_s); - if (flags == NULL) + if (flags == nullptr) flags = new bool[nbParameters]; for (unsigned int i = 0; i < nbParameters; i++) flags[i] = false; diff --git a/modules/visual_features/src/visual-feature/vpFeatureSegment.cpp b/modules/visual_features/src/visual-feature/vpFeatureSegment.cpp index 7a724ab5a5..d0b3f76ec9 100644 --- a/modules/visual_features/src/visual-feature/vpFeatureSegment.cpp +++ b/modules/visual_features/src/visual-feature/vpFeatureSegment.cpp @@ -61,7 +61,7 @@ void vpFeatureSegment::init() // memory allocation s.resize(dim_s); - if (flags == NULL) + if (flags == nullptr) flags = new bool[nbParameters]; for (unsigned int i = 0; i < nbParameters; i++) flags[i] = false; diff --git a/modules/visual_features/src/visual-feature/vpFeatureThetaU.cpp b/modules/visual_features/src/visual-feature/vpFeatureThetaU.cpp index a11dfc298a..7c3c348fec 100644 --- a/modules/visual_features/src/visual-feature/vpFeatureThetaU.cpp +++ b/modules/visual_features/src/visual-feature/vpFeatureThetaU.cpp @@ -68,7 +68,7 @@ void vpFeatureThetaU::init() // memory allocation s.resize(dim_s); - if (flags == NULL) + if (flags == nullptr) flags = new bool[nbParameters]; for (unsigned int i = 0; i < nbParameters; i++) flags[i] = false; diff --git a/modules/visual_features/src/visual-feature/vpFeatureTranslation.cpp b/modules/visual_features/src/visual-feature/vpFeatureTranslation.cpp index afb2451846..1a1063913f 100644 --- a/modules/visual_features/src/visual-feature/vpFeatureTranslation.cpp +++ b/modules/visual_features/src/visual-feature/vpFeatureTranslation.cpp @@ -69,7 +69,7 @@ void vpFeatureTranslation::init() // memory allocation s.resize(dim_s); - if (flags == NULL) + if (flags == nullptr) flags = new bool[nbParameters]; for (unsigned int i = 0; i < nbParameters; i++) flags[i] = false; @@ -587,7 +587,7 @@ void vpFeatureTranslation::print(unsigned int select) const */ vpFeatureTranslation *vpFeatureTranslation::duplicate() const { - vpFeatureTranslation *feature = NULL; + vpFeatureTranslation *feature = nullptr; if (translation == cdMc) feature = new vpFeatureTranslation(cdMc); if (translation == cMo) diff --git a/modules/visual_features/src/visual-feature/vpFeatureVanishingPoint.cpp b/modules/visual_features/src/visual-feature/vpFeatureVanishingPoint.cpp index e682e8b29c..3875e5677a 100644 --- a/modules/visual_features/src/visual-feature/vpFeatureVanishingPoint.cpp +++ b/modules/visual_features/src/visual-feature/vpFeatureVanishingPoint.cpp @@ -67,7 +67,7 @@ void vpFeatureVanishingPoint::init() // memory allocation s.resize(dim_s); - if (flags == NULL) + if (flags == nullptr) flags = new bool[nbParameters]; for (unsigned int i = 0; i < nbParameters; i++) flags[i] = false; diff --git a/modules/vs/src/vpServo.cpp b/modules/vs/src/vpServo.cpp index 50e67df55a..ddd180e656 100644 --- a/modules/vs/src/vpServo.cpp +++ b/modules/vs/src/vpServo.cpp @@ -112,14 +112,14 @@ void vpServo::kill() for (std::list::iterator it = featureList.begin(); it != featureList.end(); ++it) { if ((*it)->getDeallocate() == vpBasicFeature::vpServo) { delete (*it); - (*it) = NULL; + (*it) = nullptr; } } // desired list for (std::list::iterator it = desiredFeatureList.begin(); it != desiredFeatureList.end(); ++it) { if ((*it)->getDeallocate() == vpBasicFeature::vpServo) { delete (*it); - (*it) = NULL; + (*it) = nullptr; } } diff --git a/modules/vs/test/visual-feature/testFeatureSegment.cpp b/modules/vs/test/visual-feature/testFeatureSegment.cpp index 5d5bd4935f..b5b260547c 100644 --- a/modules/vs/test/visual-feature/testFeatureSegment.cpp +++ b/modules/vs/test/visual-feature/testFeatureSegment.cpp @@ -79,10 +79,10 @@ int main(int argc, const char **argv) #if (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) {"-d", vpParseArgv::ARGV_CONSTANT_INT, 0, (char *)&opt_no_display, "Disable display and graphics viewer."}, #endif - {"-normalized", vpParseArgv::ARGV_INT, (char *)NULL, (char *)&opt_normalized, + {"-normalized", vpParseArgv::ARGV_INT, (char *)nullptr, (char *)&opt_normalized, "1 to use normalized features, 0 for non normalized."}, - {"-h", vpParseArgv::ARGV_HELP, (char *)NULL, (char *)NULL, "Print the help."}, - {(char *)NULL, vpParseArgv::ARGV_END, (char *)NULL, (char *)NULL, (char *)NULL} + {"-h", vpParseArgv::ARGV_HELP, (char *)nullptr, (char *)nullptr, "Print the help."}, + {(char *)nullptr, vpParseArgv::ARGV_END, (char *)nullptr, (char *)nullptr, (char *)nullptr} }; // Read the command line options @@ -103,7 +103,7 @@ int main(int argc, const char **argv) vpCameraParameters cam(640., 480., 320., 240.); #if defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) - vpDisplay *display = NULL; + vpDisplay *display = nullptr; if (!opt_no_display) { #if defined(VISP_HAVE_X11) display = new vpDisplayX; @@ -178,7 +178,7 @@ int main(int argc, const char **argv) #endif #if (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) - vpPlot *graph = NULL; + vpPlot *graph = nullptr; if (opt_curves) { // Create a window (700 by 700) at position (100, 200) with two graphics graph = new vpPlot(2, 500, 500, 700, 10, "Curves..."); @@ -239,11 +239,11 @@ int main(int argc, const char **argv) } while ((task.getError()).sumSquare() > 0.0005); #if (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) - if (graph != NULL) + if (graph != nullptr) delete graph; #endif #if (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) - if (!opt_no_display && display != NULL) + if (!opt_no_display && display != nullptr) delete display; #endif diff --git a/tutorial/computer-vision/tutorial-pose-from-points-realsense-T265.cpp b/tutorial/computer-vision/tutorial-pose-from-points-realsense-T265.cpp index e7e088ad36..c0165739a7 100644 --- a/tutorial/computer-vision/tutorial-pose-from-points-realsense-T265.cpp +++ b/tutorial/computer-vision/tutorial-pose-from-points-realsense-T265.cpp @@ -45,9 +45,9 @@ int main(int argc, char **argv) g.open(config); if (opt_camera_index == 1) // Left camera - g.acquire(&I, NULL, NULL); + g.acquire(&I, nullptr, nullptr); else - g.acquire(NULL, &I, NULL); + g.acquire(nullptr, &I, nullptr); std::cout << "Read camera parameters from Realsense device" << std::endl; // Parameters of our camera @@ -86,9 +86,9 @@ int main(int argc, char **argv) double t_begin = vpTime::measureTimeMs(); if (opt_camera_index == 1) - g.acquire(&I, NULL, NULL); + g.acquire(&I, nullptr, nullptr); else - g.acquire(NULL, &I, NULL); + g.acquire(nullptr, &I, nullptr); vpDisplay::display(I); if (apply_cv) { diff --git a/tutorial/detection/barcode/tutorial-barcode-detector-live.cpp b/tutorial/detection/barcode/tutorial-barcode-detector-live.cpp index a4fea0a893..34f788d806 100644 --- a/tutorial/detection/barcode/tutorial-barcode-detector-live.cpp +++ b/tutorial/detection/barcode/tutorial-barcode-detector-live.cpp @@ -67,7 +67,7 @@ int main(int argc, const char **argv) #endif vpDisplay::setTitle(I, "ViSP viewer"); - vpDetectorBase *detector = NULL; + vpDetectorBase *detector = nullptr; #if (defined(VISP_HAVE_ZBAR) && defined(VISP_HAVE_DMTX)) if (opt_barcode == 0) detector = new vpDetectorQRCode; diff --git a/tutorial/detection/barcode/tutorial-barcode-detector.cpp b/tutorial/detection/barcode/tutorial-barcode-detector.cpp index ed75fa2f53..f00d6c0216 100644 --- a/tutorial/detection/barcode/tutorial-barcode-detector.cpp +++ b/tutorial/detection/barcode/tutorial-barcode-detector.cpp @@ -27,7 +27,7 @@ int main(int argc, const char **argv) #endif //! [Create base detector] - vpDetectorBase *detector = NULL; + vpDetectorBase *detector = nullptr; //! [Create base detector] #if (defined(VISP_HAVE_ZBAR) && defined(VISP_HAVE_DMTX)) diff --git a/tutorial/detection/dnn/tutorial-dnn-tensorrt-live.cpp b/tutorial/detection/dnn/tutorial-dnn-tensorrt-live.cpp index 3d2ae7e1b2..945935689f 100644 --- a/tutorial/detection/dnn/tutorial-dnn-tensorrt-live.cpp +++ b/tutorial/detection/dnn/tutorial-dnn-tensorrt-live.cpp @@ -193,7 +193,7 @@ bool parseOnnxModel(const std::string &model_path, TRTUniquePtr infer { nvinfer1::createInferRuntime(gLogger) }; - engine.reset(infer->deserializeCudaEngine(engineStream, engineSize, NULL)); + engine.reset(infer->deserializeCudaEngine(engineStream, engineSize, nullptr)); context.reset(engine->createExecutionContext()); return true; @@ -281,7 +281,7 @@ bool parseOnnxModel(const std::string &model_path, TRTUniquePtr I_undist(height, width); g.open(config); - g.acquire(&I_left, NULL, NULL); + g.acquire(&I_left, nullptr, nullptr); std::cout << "Read camera parameters from Realsense device" << std::endl; vpCameraParameters cam_left, cam_undistort; @@ -111,8 +111,8 @@ int main(int argc, const char **argv) std::cout << "nThreads : " << nThreads << std::endl; std::cout << "Z aligned: " << align_frame << std::endl; - vpDisplay *display_left = NULL; - vpDisplay *display_undistort = NULL; + vpDisplay *display_left = nullptr; + vpDisplay *display_undistort = nullptr; if (!display_off) { #ifdef VISP_HAVE_X11 display_left = new vpDisplayX(I_left, 100, 30, "Left image"); @@ -150,7 +150,7 @@ int main(int argc, const char **argv) double t = vpTime::measureTimeMs(); //! [Acquisition] - g.acquire(&I_left, NULL, NULL); + g.acquire(&I_left, nullptr, nullptr); //! [Undistorting image] vpImageTools::undistort(I_left, mapU, mapV, mapDu, mapDv, I_undist); diff --git a/tutorial/detection/tag/tutorial-apriltag-detector-live-rgbd-realsense.cpp b/tutorial/detection/tag/tutorial-apriltag-detector-live-rgbd-realsense.cpp index 4331e930c3..d721296063 100644 --- a/tutorial/detection/tag/tutorial-apriltag-detector-live-rgbd-realsense.cpp +++ b/tutorial/detection/tag/tutorial-apriltag-detector-live-rgbd-realsense.cpp @@ -99,7 +99,7 @@ int main(int argc, const char **argv) rs2::align align_to_color = RS2_STREAM_COLOR; g.acquire(reinterpret_cast(I_color.bitmap), reinterpret_cast(I_depth_raw.bitmap), - NULL, NULL, &align_to_color); + nullptr, nullptr, &align_to_color); std::cout << "Read camera parameters from Realsense device" << std::endl; vpCameraParameters cam; @@ -116,9 +116,9 @@ int main(int argc, const char **argv) vpImage depthMap; vpImageConvert::createDepthHistogram(I_depth_raw, I_depth); - vpDisplay *d1 = NULL; - vpDisplay *d2 = NULL; - vpDisplay *d3 = NULL; + vpDisplay *d1 = nullptr; + vpDisplay *d2 = nullptr; + vpDisplay *d3 = nullptr; if (!display_off) { #ifdef VISP_HAVE_X11 d1 = new vpDisplayX(I_color, 100, 30, "Pose from Homography"); @@ -152,7 +152,7 @@ int main(int argc, const char **argv) //! [Acquisition] g.acquire(reinterpret_cast(I_color.bitmap), - reinterpret_cast(I_depth_raw.bitmap), NULL, NULL, &align_to_color); + reinterpret_cast(I_depth_raw.bitmap), nullptr, nullptr, &align_to_color); //! [Acquisition] I_color2 = I_color; diff --git a/tutorial/detection/tag/tutorial-apriltag-detector-live-rgbd-structure-core.cpp b/tutorial/detection/tag/tutorial-apriltag-detector-live-rgbd-structure-core.cpp index 91002963ca..54a4c158db 100644 --- a/tutorial/detection/tag/tutorial-apriltag-detector-live-rgbd-structure-core.cpp +++ b/tutorial/detection/tag/tutorial-apriltag-detector-live-rgbd-structure-core.cpp @@ -114,9 +114,9 @@ int main(int argc, const char **argv) vpImage depthMap; vpImageConvert::createDepthHistogram(I_depth_raw, I_depth); - vpDisplay *d1 = NULL; - vpDisplay *d2 = NULL; - vpDisplay *d3 = NULL; + vpDisplay *d1 = nullptr; + vpDisplay *d2 = nullptr; + vpDisplay *d3 = nullptr; if (!display_off) { #ifdef VISP_HAVE_X11 d1 = new vpDisplayX(I_color, 100, 30, "Pose from Homography"); diff --git a/tutorial/detection/tag/tutorial-apriltag-detector-live.cpp b/tutorial/detection/tag/tutorial-apriltag-detector-live.cpp index 1aeb9c2c59..f855805155 100644 --- a/tutorial/detection/tag/tutorial-apriltag-detector-live.cpp +++ b/tutorial/detection/tag/tutorial-apriltag-detector-live.cpp @@ -183,7 +183,7 @@ int main(int argc, const char **argv) std::cout << "nThreads : " << nThreads << std::endl; std::cout << "Z aligned: " << align_frame << std::endl; - vpDisplay *d = NULL; + vpDisplay *d = nullptr; if (!display_off) { #ifdef VISP_HAVE_X11 d = new vpDisplayX(I); diff --git a/tutorial/grabber/tutorial-grabber-1394.cpp b/tutorial/grabber/tutorial-grabber-1394.cpp index fe7bfddf5c..8b5794cf30 100644 --- a/tutorial/grabber/tutorial-grabber-1394.cpp +++ b/tutorial/grabber/tutorial-grabber-1394.cpp @@ -130,7 +130,7 @@ int main(int argc, const char *argv[]) std::cout << "Image size : " << I.getWidth() << " " << I.getHeight() << std::endl; - vpDisplay *d = NULL; + vpDisplay *d = nullptr; if (opt_display) { #if !(defined(VISP_HAVE_X11) || defined(VISP_HAVE_OPENCV)) std::cout << "No image viewer is available..." << std::endl; diff --git a/tutorial/grabber/tutorial-grabber-basler-pylon.cpp b/tutorial/grabber/tutorial-grabber-basler-pylon.cpp index d663548c24..4b267054ba 100644 --- a/tutorial/grabber/tutorial-grabber-basler-pylon.cpp +++ b/tutorial/grabber/tutorial-grabber-basler-pylon.cpp @@ -151,7 +151,7 @@ int main(int argc, const char *argv[]) std::cout << "Image size : " << I.getWidth() << " " << I.getHeight() << std::endl; - vpDisplay *d = NULL; + vpDisplay *d = nullptr; if (opt_display) { #if !(defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV)) std::cout << "No image viewer is available..." << std::endl; diff --git a/tutorial/grabber/tutorial-grabber-bebop2.cpp b/tutorial/grabber/tutorial-grabber-bebop2.cpp index 7d5af10bd6..f88bd769c3 100644 --- a/tutorial/grabber/tutorial-grabber-bebop2.cpp +++ b/tutorial/grabber/tutorial-grabber-bebop2.cpp @@ -144,7 +144,7 @@ int main(int argc, const char **argv) std::cout << "Image size : " << I.getWidth() << " " << I.getHeight() << std::endl; - vpDisplay *d = NULL; + vpDisplay *d = nullptr; if (opt_display) { #if !(defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV)) std::cout << "No image viewer is available..." << std::endl; diff --git a/tutorial/grabber/tutorial-grabber-flycapture.cpp b/tutorial/grabber/tutorial-grabber-flycapture.cpp index d9b65ea13d..fcf4b75c12 100644 --- a/tutorial/grabber/tutorial-grabber-flycapture.cpp +++ b/tutorial/grabber/tutorial-grabber-flycapture.cpp @@ -132,7 +132,7 @@ int main(int argc, const char *argv[]) std::cout << "Image size : " << I.getWidth() << " " << I.getHeight() << std::endl; - vpDisplay *d = NULL; + vpDisplay *d = nullptr; if (opt_display) { #if !(defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV)) std::cout << "No image viewer is available..." << std::endl; diff --git a/tutorial/grabber/tutorial-grabber-ids-ueye.cpp b/tutorial/grabber/tutorial-grabber-ids-ueye.cpp index 7c12ec12c2..b52ba492ce 100644 --- a/tutorial/grabber/tutorial-grabber-ids-ueye.cpp +++ b/tutorial/grabber/tutorial-grabber-ids-ueye.cpp @@ -297,7 +297,7 @@ int main(int argc, const char *argv[]) std::cout << "Config file : " << (opt_config_file.empty() ? "empty" : opt_config_file) << std::endl; std::cout << "Image size : " << I.getWidth() << " " << I.getHeight() << std::endl; - vpDisplay *d = NULL; + vpDisplay *d = nullptr; if (opt_display) { #if !(defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV)) std::cout << "No image viewer is available..." << std::endl; diff --git a/tutorial/grabber/tutorial-grabber-multiple-realsense.cpp b/tutorial/grabber/tutorial-grabber-multiple-realsense.cpp index 7df532fc8f..7f7fd8bcd3 100644 --- a/tutorial/grabber/tutorial-grabber-multiple-realsense.cpp +++ b/tutorial/grabber/tutorial-grabber-multiple-realsense.cpp @@ -83,7 +83,7 @@ int main(int argc, char **argv) for (size_t i = 0; i < type_serial_nb.size(); i++) { if (cam_found[i]) { if (type_serial_nb[i].first == "T265") { // T265. - g[i].acquire(&I[i], NULL, NULL); + g[i].acquire(&I[i], nullptr, nullptr); #if defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV) if (!d[i].isInitialised()) { diff --git a/tutorial/grabber/tutorial-grabber-opencv-threaded.cpp b/tutorial/grabber/tutorial-grabber-opencv-threaded.cpp index 1c378ea21c..b42cc9f944 100644 --- a/tutorial/grabber/tutorial-grabber-opencv-threaded.cpp +++ b/tutorial/grabber/tutorial-grabber-opencv-threaded.cpp @@ -73,9 +73,9 @@ vpThread::Return displayFunction(vpThread::Args args) t_CaptureState capture_state_; bool display_initialized_ = false; #if defined(VISP_HAVE_X11) - vpDisplayX *d_ = NULL; + vpDisplayX *d_ = nullptr; #elif defined(VISP_HAVE_GDI) - vpDisplayGDI *d_ = NULL; + vpDisplayGDI *d_ = nullptr; #endif do { diff --git a/tutorial/grabber/tutorial-grabber-opencv.cpp b/tutorial/grabber/tutorial-grabber-opencv.cpp index 7978d6e770..5113934a3b 100644 --- a/tutorial/grabber/tutorial-grabber-opencv.cpp +++ b/tutorial/grabber/tutorial-grabber-opencv.cpp @@ -139,7 +139,7 @@ int main(int argc, const char *argv[]) #endif vpImageConvert::convert(frame, I); - vpDisplayOpenCV *d = NULL; + vpDisplayOpenCV *d = nullptr; if (opt_display) { d = new vpDisplayOpenCV(I); } diff --git a/tutorial/grabber/tutorial-grabber-realsense-T265.cpp b/tutorial/grabber/tutorial-grabber-realsense-T265.cpp index a592372027..193075629d 100644 --- a/tutorial/grabber/tutorial-grabber-realsense-T265.cpp +++ b/tutorial/grabber/tutorial-grabber-realsense-T265.cpp @@ -111,7 +111,7 @@ int main(int argc, const char *argv[]) std::cout << "Image size : " << I_left.getWidth() << " " << I_right.getHeight() << std::endl; - vpDisplay *display_left = NULL, *display_right = NULL; + vpDisplay *display_left = nullptr, *display_right = nullptr; if (opt_display) { #if !(defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV)) std::cout << "No image viewer is available..." << std::endl; @@ -148,7 +148,7 @@ int main(int argc, const char *argv[]) vpDisplay::display(I_right); quit = image_queue_left.record(I_left); - quit |= image_queue_right.record(I_right, NULL, image_queue_left.getRecordingTrigger(), true); + quit |= image_queue_right.record(I_right, nullptr, image_queue_left.getRecordingTrigger(), true); std::stringstream ss; ss << "Acquisition time: " << std::setprecision(3) << vpTime::measureTimeMs() - t << " ms"; diff --git a/tutorial/grabber/tutorial-grabber-realsense.cpp b/tutorial/grabber/tutorial-grabber-realsense.cpp index 6c39b2a632..a4644a250b 100644 --- a/tutorial/grabber/tutorial-grabber-realsense.cpp +++ b/tutorial/grabber/tutorial-grabber-realsense.cpp @@ -157,7 +157,7 @@ int main(int argc, const char *argv[]) std::cout << "Image size : " << I.getWidth() << " " << I.getHeight() << std::endl; - vpDisplay *d = NULL; + vpDisplay *d = nullptr; if (opt_display) { #if !(defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV)) std::cout << "No image viewer is available..." << std::endl; diff --git a/tutorial/grabber/tutorial-grabber-rgbd-D435-structurecore.cpp b/tutorial/grabber/tutorial-grabber-rgbd-D435-structurecore.cpp index 0108f2002a..971a599138 100644 --- a/tutorial/grabber/tutorial-grabber-rgbd-D435-structurecore.cpp +++ b/tutorial/grabber/tutorial-grabber-rgbd-D435-structurecore.cpp @@ -47,7 +47,7 @@ int main(int argc, char **argv) // Acquiring images. for (;;) { rs.acquire(reinterpret_cast(I_color_rs.bitmap), - reinterpret_cast(rs_I_depth_raw.bitmap), NULL, NULL, NULL, NULL, NULL); + reinterpret_cast(rs_I_depth_raw.bitmap), nullptr, nullptr, nullptr, nullptr, nullptr); sc.acquire(reinterpret_cast(I_color_sc.bitmap), reinterpret_cast(sc_I_depth_raw.bitmap)); diff --git a/tutorial/grabber/tutorial-grabber-structure-core.cpp b/tutorial/grabber/tutorial-grabber-structure-core.cpp index b668684ce8..2bfd2ecd0b 100644 --- a/tutorial/grabber/tutorial-grabber-structure-core.cpp +++ b/tutorial/grabber/tutorial-grabber-structure-core.cpp @@ -158,7 +158,7 @@ int main(int argc, const char *argv[]) I_depth = vpImage(g.getHeight(vpOccipitalStructure::depth), g.getWidth(vpOccipitalStructure::depth)); I_depth_raw = vpImage(g.getHeight(vpOccipitalStructure::depth), g.getWidth(vpOccipitalStructure::depth)); - vpDisplay *display_visible = NULL, *display_depth = NULL; + vpDisplay *display_visible = nullptr, *display_depth = nullptr; if (opt_display) { #if !(defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV)) std::cout << "No image viewer is available..." << std::endl; @@ -200,7 +200,7 @@ int main(int argc, const char *argv[]) vpDisplay::display(I_depth); quit = image_queue_visible.record(I_color); - quit |= image_queue_depth.record(I_depth, NULL, image_queue_visible.getRecordingTrigger(), true); + quit |= image_queue_depth.record(I_depth, nullptr, image_queue_visible.getRecordingTrigger(), true); std::stringstream ss; ss << "Acquisition time: " << std::setprecision(3) << vpTime::measureTimeMs() - t << " ms"; diff --git a/tutorial/grabber/tutorial-grabber-v4l2-threaded.cpp b/tutorial/grabber/tutorial-grabber-v4l2-threaded.cpp index 07880bbfb1..5a0c538a0a 100644 --- a/tutorial/grabber/tutorial-grabber-v4l2-threaded.cpp +++ b/tutorial/grabber/tutorial-grabber-v4l2-threaded.cpp @@ -61,7 +61,7 @@ vpThread::Return displayFunction(vpThread::Args args) t_CaptureState capture_state_; bool display_initialized_ = false; #if defined(VISP_HAVE_X11) - vpDisplayX *d_ = NULL; + vpDisplayX *d_ = nullptr; #endif do { diff --git a/tutorial/grabber/tutorial-grabber-v4l2.cpp b/tutorial/grabber/tutorial-grabber-v4l2.cpp index 245f04a2fb..64ecb33ad2 100644 --- a/tutorial/grabber/tutorial-grabber-v4l2.cpp +++ b/tutorial/grabber/tutorial-grabber-v4l2.cpp @@ -145,7 +145,7 @@ int main(int argc, const char *argv[]) std::cout << "Image size : " << I.getWidth() << " " << I.getHeight() << std::endl; - vpDisplay *d = NULL; + vpDisplay *d = nullptr; if (opt_display) { #if !(defined(VISP_HAVE_X11) || defined(VISP_HAVE_OPENCV)) std::cout << "No image viewer is available..." << std::endl; diff --git a/tutorial/ios/AprilTagLiveCamera/AprilTagLiveCamera.xcodeproj/project.pbxproj b/tutorial/ios/AprilTagLiveCamera/AprilTagLiveCamera.xcodeproj/project.pbxproj index 2b35534f7a..4b1d52b41a 100644 --- a/tutorial/ios/AprilTagLiveCamera/AprilTagLiveCamera.xcodeproj/project.pbxproj +++ b/tutorial/ios/AprilTagLiveCamera/AprilTagLiveCamera.xcodeproj/project.pbxproj @@ -223,7 +223,7 @@ isa = XCBuildConfiguration; buildSettings = { ALWAYS_SEARCH_USER_PATHS = NO; - CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NONnullptr = YES; CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; CLANG_CXX_LANGUAGE_STANDARD = "gnu++14"; CLANG_CXX_LIBRARY = "libc++"; @@ -241,7 +241,7 @@ CLANG_WARN_ENUM_CONVERSION = YES; CLANG_WARN_INFINITE_RECURSION = YES; CLANG_WARN_INT_CONVERSION = YES; - CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; + CLANG_WARN_NON_LITERAL_nullptr_CONVERSION = YES; CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; @@ -284,7 +284,7 @@ isa = XCBuildConfiguration; buildSettings = { ALWAYS_SEARCH_USER_PATHS = NO; - CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NONnullptr = YES; CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; CLANG_CXX_LANGUAGE_STANDARD = "gnu++14"; CLANG_CXX_LIBRARY = "libc++"; @@ -302,7 +302,7 @@ CLANG_WARN_ENUM_CONVERSION = YES; CLANG_WARN_INFINITE_RECURSION = YES; CLANG_WARN_INT_CONVERSION = YES; - CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; + CLANG_WARN_NON_LITERAL_nullptr_CONVERSION = YES; CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; diff --git a/tutorial/ios/AprilTagLiveCamera/VispHelper/ImageConversion.mm b/tutorial/ios/AprilTagLiveCamera/VispHelper/ImageConversion.mm index 6075d1baca..3eab39980f 100755 --- a/tutorial/ios/AprilTagLiveCamera/VispHelper/ImageConversion.mm +++ b/tutorial/ios/AprilTagLiveCamera/VispHelper/ImageConversion.mm @@ -156,7 +156,7 @@ + (UIImage *)UIImageFromVpImageColor:(const vpImage &)I colorSpace, // colorspace kCGImageAlphaNone|kCGBitmapByteOrderDefault,// bitmap info provider, // CGDataProviderRef - NULL, // decode + nullptr, // decode false, // should interpolate kCGRenderingIntentDefault // intent ); @@ -192,7 +192,7 @@ + (UIImage *)UIImageFromVpImageGray:(const vpImage &)I colorSpace, // colorspace kCGImageAlphaNone|kCGBitmapByteOrderDefault,// bitmap info provider, // CGDataProviderRef - NULL, // decode + nullptr, // decode false, // should interpolate kCGRenderingIntentDefault // intent ); @@ -211,4 +211,3 @@ + (UIImage *)UIImageFromVpImageGray:(const vpImage &)I @end #endif - diff --git a/tutorial/ios/AprilTagLiveCamera/VispHelper/ImageDisplayWithContext.h b/tutorial/ios/AprilTagLiveCamera/VispHelper/ImageDisplayWithContext.h index 34fd2eafcd..1b41a85db4 100644 --- a/tutorial/ios/AprilTagLiveCamera/VispHelper/ImageDisplayWithContext.h +++ b/tutorial/ios/AprilTagLiveCamera/VispHelper/ImageDisplayWithContext.h @@ -37,7 +37,7 @@ #import "ImageDisplay.h" -NS_ASSUME_NONNULL_BEGIN +NS_ASSUME_NONnullptr_BEGIN @interface ImageDisplay (withContext) @@ -49,4 +49,4 @@ NS_ASSUME_NONNULL_BEGIN @end -NS_ASSUME_NONNULL_END +NS_ASSUME_NONnullptr_END diff --git a/tutorial/ios/GettingStarted/GettingStarted.xcodeproj/project.pbxproj b/tutorial/ios/GettingStarted/GettingStarted.xcodeproj/project.pbxproj index 6bcf630100..4f223c7077 100644 --- a/tutorial/ios/GettingStarted/GettingStarted.xcodeproj/project.pbxproj +++ b/tutorial/ios/GettingStarted/GettingStarted.xcodeproj/project.pbxproj @@ -182,7 +182,7 @@ isa = XCBuildConfiguration; buildSettings = { ALWAYS_SEARCH_USER_PATHS = NO; - CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NONnullptr = YES; CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; CLANG_CXX_LIBRARY = "libc++"; @@ -231,7 +231,7 @@ isa = XCBuildConfiguration; buildSettings = { ALWAYS_SEARCH_USER_PATHS = NO; - CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NONnullptr = YES; CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; CLANG_CXX_LIBRARY = "libc++"; diff --git a/tutorial/ios/StartedAprilTag/StartedAprilTag.xcodeproj/project.pbxproj b/tutorial/ios/StartedAprilTag/StartedAprilTag.xcodeproj/project.pbxproj index c46dc4a9e7..4576317c9d 100644 --- a/tutorial/ios/StartedAprilTag/StartedAprilTag.xcodeproj/project.pbxproj +++ b/tutorial/ios/StartedAprilTag/StartedAprilTag.xcodeproj/project.pbxproj @@ -198,7 +198,7 @@ isa = XCBuildConfiguration; buildSettings = { ALWAYS_SEARCH_USER_PATHS = NO; - CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NONnullptr = YES; CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; CLANG_CXX_LIBRARY = "libc++"; @@ -247,7 +247,7 @@ isa = XCBuildConfiguration; buildSettings = { ALWAYS_SEARCH_USER_PATHS = NO; - CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NONnullptr = YES; CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; CLANG_CXX_LIBRARY = "libc++"; diff --git a/tutorial/ios/StartedAprilTag/StartedAprilTag/ImageConversion.mm b/tutorial/ios/StartedAprilTag/StartedAprilTag/ImageConversion.mm index 5666d8d6ec..156710a531 100644 --- a/tutorial/ios/StartedAprilTag/StartedAprilTag/ImageConversion.mm +++ b/tutorial/ios/StartedAprilTag/StartedAprilTag/ImageConversion.mm @@ -156,7 +156,7 @@ + (UIImage *)UIImageFromVpImageColor:(const vpImage &)I colorSpace, // colorspace kCGImageAlphaNone|kCGBitmapByteOrderDefault,// bitmap info provider, // CGDataProviderRef - NULL, // decode + nullptr, // decode false, // should interpolate kCGRenderingIntentDefault // intent ); @@ -192,7 +192,7 @@ + (UIImage *)UIImageFromVpImageGray:(const vpImage &)I colorSpace, // colorspace kCGImageAlphaNone|kCGBitmapByteOrderDefault,// bitmap info provider, // CGDataProviderRef - NULL, // decode + nullptr, // decode false, // should interpolate kCGRenderingIntentDefault // intent ); @@ -211,4 +211,3 @@ + (UIImage *)UIImageFromVpImageGray:(const vpImage &)I @end #endif - diff --git a/tutorial/ios/StartedImageProc/StartedImageProc.xcodeproj/project.pbxproj b/tutorial/ios/StartedImageProc/StartedImageProc.xcodeproj/project.pbxproj index f6e5edad88..a8bbd2bc00 100644 --- a/tutorial/ios/StartedImageProc/StartedImageProc.xcodeproj/project.pbxproj +++ b/tutorial/ios/StartedImageProc/StartedImageProc.xcodeproj/project.pbxproj @@ -192,7 +192,7 @@ isa = XCBuildConfiguration; buildSettings = { ALWAYS_SEARCH_USER_PATHS = NO; - CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NONnullptr = YES; CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; CLANG_CXX_LIBRARY = "libc++"; CLANG_ENABLE_MODULES = YES; @@ -240,7 +240,7 @@ isa = XCBuildConfiguration; buildSettings = { ALWAYS_SEARCH_USER_PATHS = NO; - CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NONnullptr = YES; CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; CLANG_CXX_LIBRARY = "libc++"; CLANG_ENABLE_MODULES = YES; diff --git a/tutorial/ios/StartedImageProc/StartedImageProc/ImageConversion.mm b/tutorial/ios/StartedImageProc/StartedImageProc/ImageConversion.mm index 5666d8d6ec..156710a531 100644 --- a/tutorial/ios/StartedImageProc/StartedImageProc/ImageConversion.mm +++ b/tutorial/ios/StartedImageProc/StartedImageProc/ImageConversion.mm @@ -156,7 +156,7 @@ + (UIImage *)UIImageFromVpImageColor:(const vpImage &)I colorSpace, // colorspace kCGImageAlphaNone|kCGBitmapByteOrderDefault,// bitmap info provider, // CGDataProviderRef - NULL, // decode + nullptr, // decode false, // should interpolate kCGRenderingIntentDefault // intent ); @@ -192,7 +192,7 @@ + (UIImage *)UIImageFromVpImageGray:(const vpImage &)I colorSpace, // colorspace kCGImageAlphaNone|kCGBitmapByteOrderDefault,// bitmap info provider, // CGDataProviderRef - NULL, // decode + nullptr, // decode false, // should interpolate kCGRenderingIntentDefault // intent ); @@ -211,4 +211,3 @@ + (UIImage *)UIImageFromVpImageGray:(const vpImage &)I @end #endif - diff --git a/tutorial/matlab/tutorial-matlab.cpp b/tutorial/matlab/tutorial-matlab.cpp index f365026e6e..5bc97c1785 100644 --- a/tutorial/matlab/tutorial-matlab.cpp +++ b/tutorial/matlab/tutorial-matlab.cpp @@ -38,7 +38,7 @@ int main() mxArray *T = mxCreateDoubleMatrix(xRows, xCols, mxREAL); // MATLAB array to store output data from MATLAB - mxArray *D = NULL; + mxArray *D = nullptr; //! [MATLABVariables] // Temporary variable to hold Output data diff --git a/tutorial/robot/mbot/arduino/mbot-serial-controller/mbot-serial-controller.ino b/tutorial/robot/mbot/arduino/mbot-serial-controller/mbot-serial-controller.ino index 093fdcd6dd..4e1ed09875 100644 --- a/tutorial/robot/mbot/arduino/mbot-serial-controller/mbot-serial-controller.ino +++ b/tutorial/robot/mbot/arduino/mbot-serial-controller/mbot-serial-controller.ino @@ -13,7 +13,7 @@ MeEncoderOnBoard Encoder_1(SLOT1); MeEncoderOnBoard Encoder_2(SLOT2); // Me Auriga hardware serial port to dial with Raspberry -MeSerial mySerial(PORT_5); +MeSerial mySerial(PORT_5); // LED ring MeRGBLed rgbled(0, 12); @@ -85,7 +85,7 @@ String getValue(String string, String key, String separator) char * tmp; char * str; str = strtok_r((char*)string.c_str(), separator.c_str(), &tmp); - if(str!=NULL && strcmp(str,key.c_str())==0) { + if(str!=nullptr && strcmp(str,key.c_str())==0) { val = String(tmp); } } @@ -95,15 +95,15 @@ String getValue(String string, String key, String separator) void setup() { // Serial connexion with Raspberry mySerial.begin(115200); // Opens serial port at 115200 bps - + // Serial connexion with console used for debug info Serial.begin(115200); // Opens serial port at 9600 bps - + // Led ring rgbled.setpin(44); rgbled.setColor(0, 0, 0, 0); // Turn all LED off rgbled.show(); - + // Motors - Set Pwm 8KHz TCCR1A = _BV(WGM10); TCCR1B = _BV(CS11) | _BV(WGM12); @@ -183,7 +183,7 @@ void loop() { } } rgb[3] = atoi(string_rgb.c_str()); - + rgbled.setColor(rgb[0], rgb[1], rgb[2], rgb[3]); rgbled.show(); } @@ -201,6 +201,6 @@ void loop() { else { rgbled.setColor(6, 0, 1, 0); // Left LED turn GREEN rgbled.setColor(12, 0, 1, 0); // Right LED turn GREEN - rgbled.show(); + rgbled.show(); } -} +} diff --git a/tutorial/robot/mbot/raspberry/visp/mbot-apriltag-2D-half-vs.cpp b/tutorial/robot/mbot/raspberry/visp/mbot-apriltag-2D-half-vs.cpp index 6c81ee38ba..03884fcf4c 100644 --- a/tutorial/robot/mbot/raspberry/visp/mbot-apriltag-2D-half-vs.cpp +++ b/tutorial/robot/mbot/raspberry/visp/mbot-apriltag-2D-half-vs.cpp @@ -79,7 +79,7 @@ int main(int argc, const char **argv) // if motor left: led 3 blue // if motor right: led 4 blue - vpSerial *serial = NULL; + vpSerial *serial = nullptr; if (!serial_off) { serial = new vpSerial("/dev/ttyAMA0", 115200); @@ -97,7 +97,7 @@ int main(int argc, const char **argv) g.setScale(1); g.acquire(I); - vpDisplay *d = NULL; + vpDisplay *d = nullptr; vpImage O; #ifdef VISP_HAVE_X11 if (display_on) { diff --git a/tutorial/robot/mbot/raspberry/visp/mbot-apriltag-ibvs.cpp b/tutorial/robot/mbot/raspberry/visp/mbot-apriltag-ibvs.cpp index 94ac5f992d..9a5ce986db 100644 --- a/tutorial/robot/mbot/raspberry/visp/mbot-apriltag-ibvs.cpp +++ b/tutorial/robot/mbot/raspberry/visp/mbot-apriltag-ibvs.cpp @@ -83,7 +83,7 @@ int main(int argc, const char **argv) // if motor left: led 3 blue // if motor right: led 4 blue - vpSerial *serial = NULL; + vpSerial *serial = nullptr; if (!serial_off) { serial = new vpSerial("/dev/ttyAMA0", 115200); @@ -101,7 +101,7 @@ int main(int argc, const char **argv) g.setScale(1); g.acquire(I); - vpDisplay *d = NULL; + vpDisplay *d = nullptr; vpImage O; #ifdef VISP_HAVE_X11 if (display_on) { diff --git a/tutorial/robot/mbot/raspberry/visp/mbot-apriltag-pbvs.cpp b/tutorial/robot/mbot/raspberry/visp/mbot-apriltag-pbvs.cpp index e32164a65f..d9726cf305 100644 --- a/tutorial/robot/mbot/raspberry/visp/mbot-apriltag-pbvs.cpp +++ b/tutorial/robot/mbot/raspberry/visp/mbot-apriltag-pbvs.cpp @@ -73,7 +73,7 @@ int main(int argc, const char **argv) // if motor left: led 3 blue // if motor right: led 4 blue - vpSerial *serial = NULL; + vpSerial *serial = nullptr; if (!serial_off) { serial = new vpSerial("/dev/ttyAMA0", 115200); @@ -91,7 +91,7 @@ int main(int argc, const char **argv) g.setScale(1); g.acquire(I); - vpDisplay *d = NULL; + vpDisplay *d = nullptr; vpImage O; #ifdef VISP_HAVE_X11 if (display_on) { diff --git a/tutorial/tracking/model-based/generic-apriltag/tutorial-mb-generic-tracker-apriltag-rs2.cpp b/tutorial/tracking/model-based/generic-apriltag/tutorial-mb-generic-tracker-apriltag-rs2.cpp index 08a514324d..0a15fd9487 100644 --- a/tutorial/tracking/model-based/generic-apriltag/tutorial-mb-generic-tracker-apriltag-rs2.cpp +++ b/tutorial/tracking/model-based/generic-apriltag/tutorial-mb-generic-tracker-apriltag-rs2.cpp @@ -277,8 +277,8 @@ int main(int argc, const char **argv) std::cout << " Projection error: " << opt_projection_error_threshold << std::endl; // Construct display - vpDisplay *d_gray = NULL; - vpDisplay *d_depth = NULL; + vpDisplay *d_gray = nullptr; + vpDisplay *d_depth = nullptr; if (!display_off) { #ifdef VISP_HAVE_X11 @@ -370,10 +370,10 @@ int main(int argc, const char **argv) while (state != state_quit) { if (opt_use_depth) { #ifdef VISP_HAVE_PCL - realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, NULL, pointcloud, NULL); + realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, nullptr, pointcloud, nullptr); #else - realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, &pointcloud, NULL, - NULL); + realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, &pointcloud, nullptr, + nullptr); #endif vpImageConvert::convert(I_color, I_gray); vpImageConvert::createDepthHistogram(I_depth_raw, I_depth); diff --git a/tutorial/tracking/model-based/generic-apriltag/tutorial-mb-generic-tracker-apriltag-webcam.cpp b/tutorial/tracking/model-based/generic-apriltag/tutorial-mb-generic-tracker-apriltag-webcam.cpp index db19ce6318..4f25ea7001 100644 --- a/tutorial/tracking/model-based/generic-apriltag/tutorial-mb-generic-tracker-apriltag-webcam.cpp +++ b/tutorial/tracking/model-based/generic-apriltag/tutorial-mb-generic-tracker-apriltag-webcam.cpp @@ -244,7 +244,7 @@ int main(int argc, const char **argv) std::cout << " Projection error: " << opt_projection_error_threshold << std::endl; // Construct display - vpDisplay *d = NULL; + vpDisplay *d = nullptr; if (!display_off) { #ifdef VISP_HAVE_X11 d = new vpDisplayX(I); diff --git a/tutorial/tracking/model-based/generic-rgbd/tutorial-mb-generic-tracker-rgbd-realsense-json-settings.cpp b/tutorial/tracking/model-based/generic-rgbd/tutorial-mb-generic-tracker-rgbd-realsense-json-settings.cpp index 9813864be5..bf3ad1c696 100644 --- a/tutorial/tracking/model-based/generic-rgbd/tutorial-mb-generic-tracker-rgbd-realsense-json-settings.cpp +++ b/tutorial/tracking/model-based/generic-rgbd/tutorial-mb-generic-tracker-rgbd-realsense-json-settings.cpp @@ -182,7 +182,7 @@ int main(int argc, char *argv []) d2.init(I_depth, _posx + I_gray.getWidth() + 10, _posy, "Depth stream"); while (true) { - realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, NULL, NULL); + realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, nullptr, nullptr); if (use_color) { vpImageConvert::convert(I_color, I_gray); @@ -223,7 +223,7 @@ int main(int argc, char *argv []) bool tracking_failed = false; // Acquire images and update tracker input data - realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, &pointcloud, NULL, NULL); + realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, &pointcloud, nullptr, nullptr); if (use_color) { vpImageConvert::convert(I_color, I_gray); diff --git a/tutorial/tracking/model-based/generic-rgbd/tutorial-mb-generic-tracker-rgbd-realsense.cpp b/tutorial/tracking/model-based/generic-rgbd/tutorial-mb-generic-tracker-rgbd-realsense.cpp index ea6eb1db86..d9997ada0c 100644 --- a/tutorial/tracking/model-based/generic-rgbd/tutorial-mb-generic-tracker-rgbd-realsense.cpp +++ b/tutorial/tracking/model-based/generic-rgbd/tutorial-mb-generic-tracker-rgbd-realsense.cpp @@ -181,7 +181,7 @@ int main(int argc, char *argv[]) d2.init(I_depth, _posx + I_gray.getWidth() + 10, _posy, "Depth stream"); while (true) { - realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, NULL, NULL); + realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, nullptr, nullptr); if (use_edges || use_klt) { vpImageConvert::convert(I_color, I_gray); @@ -319,7 +319,7 @@ int main(int argc, char *argv[]) bool tracking_failed = false; // Acquire images and update tracker input data - realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, &pointcloud, NULL, NULL); + realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, &pointcloud, nullptr, nullptr); if (use_edges || use_klt || run_auto_init) { vpImageConvert::convert(I_color, I_gray); diff --git a/tutorial/tracking/model-based/generic-rgbd/tutorial-mb-generic-tracker-rgbd-structure-core.cpp b/tutorial/tracking/model-based/generic-rgbd/tutorial-mb-generic-tracker-rgbd-structure-core.cpp index 0888b859ec..623f510f33 100644 --- a/tutorial/tracking/model-based/generic-rgbd/tutorial-mb-generic-tracker-rgbd-structure-core.cpp +++ b/tutorial/tracking/model-based/generic-rgbd/tutorial-mb-generic-tracker-rgbd-structure-core.cpp @@ -178,7 +178,7 @@ int main(int argc, char *argv[]) d2.init(I_depth, _posx + I_gray.getWidth() + 10, _posy, "Depth stream"); while (true) { - sc.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, NULL, NULL, NULL); + sc.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, nullptr, nullptr, nullptr); if (use_edges || use_klt) { vpImageConvert::convert(I_color, I_gray); diff --git a/tutorial/tracking/model-based/generic/tutorial-mb-generic-tracker-full.cpp b/tutorial/tracking/model-based/generic/tutorial-mb-generic-tracker-full.cpp index a201e10225..02e666324a 100644 --- a/tutorial/tracking/model-based/generic/tutorial-mb-generic-tracker-full.cpp +++ b/tutorial/tracking/model-based/generic/tutorial-mb-generic-tracker-full.cpp @@ -26,9 +26,9 @@ int main(int argc, char **argv) unsigned int thickness = 2; vpImage I; - vpDisplay *display = NULL; - vpPlot *plot = NULL; - vpVideoWriter *writer = NULL; + vpDisplay *display = nullptr; + vpPlot *plot = nullptr; + vpVideoWriter *writer = nullptr; try { for (int i = 0; i < argc; i++) { diff --git a/tutorial/tracking/model-based/generic/tutorial-mb-generic-tracker-live.cpp b/tutorial/tracking/model-based/generic/tutorial-mb-generic-tracker-live.cpp index b8853ed417..5520d37ab7 100644 --- a/tutorial/tracking/model-based/generic/tutorial-mb-generic-tracker-live.cpp +++ b/tutorial/tracking/model-based/generic/tutorial-mb-generic-tracker-live.cpp @@ -206,7 +206,7 @@ int main(int argc, char **argv) #endif //! [Grabber] - vpDisplay *display = NULL; + vpDisplay *display = nullptr; #if defined(VISP_HAVE_X11) display = new vpDisplayX; #elif defined(VISP_HAVE_GDI) diff --git a/tutorial/tracking/model-based/generic/tutorial-mb-generic-tracker.cpp b/tutorial/tracking/model-based/generic/tutorial-mb-generic-tracker.cpp index c687d1d01e..1dcd1d4c35 100644 --- a/tutorial/tracking/model-based/generic/tutorial-mb-generic-tracker.cpp +++ b/tutorial/tracking/model-based/generic/tutorial-mb-generic-tracker.cpp @@ -54,7 +54,7 @@ int main(int argc, char **argv) g.setFileName(opt_videoname); g.open(I); - vpDisplay *display = NULL; + vpDisplay *display = nullptr; #if defined(VISP_HAVE_X11) display = new vpDisplayX; #elif defined(VISP_HAVE_GDI) diff --git a/tutorial/tracking/model-based/old/generic/tutorial-mb-tracker-full.cpp b/tutorial/tracking/model-based/old/generic/tutorial-mb-tracker-full.cpp index ad19b67f52..a0ec20dbea 100644 --- a/tutorial/tracking/model-based/old/generic/tutorial-mb-tracker-full.cpp +++ b/tutorial/tracking/model-based/old/generic/tutorial-mb-tracker-full.cpp @@ -58,7 +58,7 @@ int main(int argc, char **argv) g.setFileName(opt_videoname); g.open(I); - vpDisplay *display = NULL; + vpDisplay *display = nullptr; #if defined(VISP_HAVE_X11) display = new vpDisplayX; #elif defined(VISP_HAVE_GDI) diff --git a/tutorial/tracking/model-based/old/generic/tutorial-mb-tracker.cpp b/tutorial/tracking/model-based/old/generic/tutorial-mb-tracker.cpp index a164189975..18dcb8108e 100644 --- a/tutorial/tracking/model-based/old/generic/tutorial-mb-tracker.cpp +++ b/tutorial/tracking/model-based/old/generic/tutorial-mb-tracker.cpp @@ -56,7 +56,7 @@ int main(int argc, char **argv) g.setFileName(opt_videoname); g.open(I); - vpDisplay *display = NULL; + vpDisplay *display = nullptr; #if defined(VISP_HAVE_X11) display = new vpDisplayX; #elif defined(VISP_HAVE_GDI) From 32bf0ed2030c18557e37a7dde5ade3704ad5160e Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Sat, 28 Oct 2023 17:40:20 +0200 Subject: [PATCH 36/43] Fix typo comming -> coming --- doc/tutorial/visual-servo/tutorial-pixhawk-vs.dox | 2 +- example/coin-simulator/simulateCircle2DCamVelocity.cpp | 2 +- .../coin-simulator/simulateFourPoints2DCartesianCamVelocity.cpp | 2 +- example/coin-simulator/simulateFourPoints2DPolarCamVelocity.cpp | 2 +- example/device/display/displayD3D.cpp | 2 +- example/device/display/displayGDI.cpp | 2 +- example/device/display/displayGTK.cpp | 2 +- example/device/display/displayOpenCV.cpp | 2 +- example/device/display/displaySequence.cpp | 2 +- example/device/display/displayX.cpp | 2 +- example/device/display/displayXMulti.cpp | 2 +- example/device/framegrabber/grab1394Two.cpp | 2 +- example/device/framegrabber/grabDisk.cpp | 2 +- example/direct-visual-servoing/photometricVisualServoing.cpp | 2 +- .../photometricVisualServoingWithoutVpServo.cpp | 2 +- example/image/imageDiskRW.cpp | 2 +- example/ogre-simulator/AROgre.cpp | 2 +- example/ogre-simulator/AROgreBasic.cpp | 2 +- example/pose-estimation/poseVirtualVS.cpp | 2 +- example/tools/histogram.cpp | 2 +- example/tracking/trackDot.cpp | 2 +- example/tracking/trackDot2.cpp | 2 +- example/tracking/trackDot2WithAutoDetection.cpp | 2 +- example/tracking/trackKltOpencv.cpp | 2 +- example/tracking/trackMeCircle.cpp | 2 +- example/tracking/trackMeEllipse.cpp | 2 +- example/tracking/trackMeLine.cpp | 2 +- example/tracking/trackMeNurbs.cpp | 2 +- example/video/imageSequenceReader.cpp | 2 +- example/video/videoReader.cpp | 2 +- modules/core/test/image-with-dataset/testConversion.cpp | 2 +- modules/core/test/image-with-dataset/testCrop.cpp | 2 +- modules/core/test/image-with-dataset/testCropAdvanced.cpp | 2 +- modules/core/test/image-with-dataset/testImageComparison.cpp | 2 +- modules/core/test/image-with-dataset/testImageFilter.cpp | 2 +- .../test/image-with-dataset/testImageNormalizedCorrelation.cpp | 2 +- .../core/test/image-with-dataset/testImageTemplateMatching.cpp | 2 +- modules/core/test/image-with-dataset/testIoPGM.cpp | 2 +- modules/core/test/image-with-dataset/testIoPPM.cpp | 2 +- modules/core/test/image-with-dataset/testPerformanceLUT.cpp | 2 +- modules/core/test/image-with-dataset/testReadImage.cpp | 2 +- modules/core/test/image-with-dataset/testUndistortImage.cpp | 2 +- .../core/test/tools/histogram-with-dataset/testHistogram.cpp | 2 +- modules/gui/test/display-with-dataset/testClick.cpp | 2 +- modules/gui/test/display-with-dataset/testMouseEvent.cpp | 2 +- modules/gui/test/display-with-dataset/testVideoDevice.cpp | 2 +- modules/imgproc/test/with-dataset/testAutoThreshold.cpp | 2 +- modules/imgproc/test/with-dataset/testConnectedComponents.cpp | 2 +- modules/imgproc/test/with-dataset/testContours.cpp | 2 +- modules/imgproc/test/with-dataset/testFloodFill.cpp | 2 +- modules/imgproc/test/with-dataset/testImgproc.cpp | 2 +- modules/robot/src/light/vpRingLight.cpp | 2 +- modules/robot/src/real-robot/bebop2/vpRobotBebop2.cpp | 2 +- .../tracker/blob/test/tracking-with-dataset/testTrackDot.cpp | 2 +- 54 files changed, 54 insertions(+), 54 deletions(-) diff --git a/doc/tutorial/visual-servo/tutorial-pixhawk-vs.dox b/doc/tutorial/visual-servo/tutorial-pixhawk-vs.dox index fc67ffae45..064fb4d963 100644 --- a/doc/tutorial/visual-servo/tutorial-pixhawk-vs.dox +++ b/doc/tutorial/visual-servo/tutorial-pixhawk-vs.dox @@ -201,7 +201,7 @@ $ mavproxy.py --master=/dev/ttyACM0 --out=udpout:192.168.30.111:14550 --out=udpo \subsection pixhawk_stream_mocap Stream MoCap to Pixhawk -The code sendMocapToPixhawk.cpp allows you to send the MoCap stream comming from a Qualisys or Vicon motion capture system +The code sendMocapToPixhawk.cpp allows you to send the MoCap stream coming from a Qualisys or Vicon motion capture system to the Pixhawk using MavLink. This code is based on vpMocapQualisys and vpMocapVicon classes that allows to get the vehicle FLU body frame in the MoCAP ENU reference frame. Then we use vpRobotMavsdk class and more precisely vpRobotMavsdk::sendMocapData() that internally send the FRD body frame pose with respect to the NED reference frame as a MavLink message to the Pixhawk. diff --git a/example/coin-simulator/simulateCircle2DCamVelocity.cpp b/example/coin-simulator/simulateCircle2DCamVelocity.cpp index 060f9be702..093fdb7af2 100644 --- a/example/coin-simulator/simulateCircle2DCamVelocity.cpp +++ b/example/coin-simulator/simulateCircle2DCamVelocity.cpp @@ -313,7 +313,7 @@ int main(int argc, const char **argv) ipath = opt_ipath; // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/example/coin-simulator/simulateFourPoints2DCartesianCamVelocity.cpp b/example/coin-simulator/simulateFourPoints2DCartesianCamVelocity.cpp index 7c5e547753..fa8694487f 100644 --- a/example/coin-simulator/simulateFourPoints2DCartesianCamVelocity.cpp +++ b/example/coin-simulator/simulateFourPoints2DCartesianCamVelocity.cpp @@ -324,7 +324,7 @@ int main(int argc, const char **argv) ipath = opt_ipath; // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/example/coin-simulator/simulateFourPoints2DPolarCamVelocity.cpp b/example/coin-simulator/simulateFourPoints2DPolarCamVelocity.cpp index aeea07dd54..e6b98431cc 100644 --- a/example/coin-simulator/simulateFourPoints2DPolarCamVelocity.cpp +++ b/example/coin-simulator/simulateFourPoints2DPolarCamVelocity.cpp @@ -346,7 +346,7 @@ int main(int argc, const char **argv) ipath = opt_ipath; // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/example/device/display/displayD3D.cpp b/example/device/display/displayD3D.cpp index 0967e93bd0..6d892470a2 100644 --- a/example/device/display/displayD3D.cpp +++ b/example/device/display/displayD3D.cpp @@ -241,7 +241,7 @@ int main(int argc, const char **argv) } // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/example/device/display/displayGDI.cpp b/example/device/display/displayGDI.cpp index c3daf5eca2..92728dec92 100644 --- a/example/device/display/displayGDI.cpp +++ b/example/device/display/displayGDI.cpp @@ -241,7 +241,7 @@ int main(int argc, const char **argv) } // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/example/device/display/displayGTK.cpp b/example/device/display/displayGTK.cpp index f14e4094bf..5ed0623fe8 100644 --- a/example/device/display/displayGTK.cpp +++ b/example/device/display/displayGTK.cpp @@ -248,7 +248,7 @@ int main(int argc, const char **argv) } // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/example/device/display/displayOpenCV.cpp b/example/device/display/displayOpenCV.cpp index f3dfed335f..9a3a9aa54a 100644 --- a/example/device/display/displayOpenCV.cpp +++ b/example/device/display/displayOpenCV.cpp @@ -249,7 +249,7 @@ int main(int argc, const char **argv) } // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/example/device/display/displaySequence.cpp b/example/device/display/displaySequence.cpp index 4f8cd6d4ad..b8647b5b0d 100644 --- a/example/device/display/displaySequence.cpp +++ b/example/device/display/displaySequence.cpp @@ -264,7 +264,7 @@ int main(int argc, const char **argv) ipath = opt_ipath; // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty() && opt_ppath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/example/device/display/displayX.cpp b/example/device/display/displayX.cpp index bed058729b..47a52eae04 100644 --- a/example/device/display/displayX.cpp +++ b/example/device/display/displayX.cpp @@ -247,7 +247,7 @@ int main(int argc, const char **argv) } // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/example/device/display/displayXMulti.cpp b/example/device/display/displayXMulti.cpp index d1ada273a4..4666e71377 100644 --- a/example/device/display/displayXMulti.cpp +++ b/example/device/display/displayXMulti.cpp @@ -249,7 +249,7 @@ int main(int argc, const char **argv) } // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/example/device/framegrabber/grab1394Two.cpp b/example/device/framegrabber/grab1394Two.cpp index 79d12ba46c..4e0634dc52 100644 --- a/example/device/framegrabber/grab1394Two.cpp +++ b/example/device/framegrabber/grab1394Two.cpp @@ -423,7 +423,7 @@ int main(int argc, const char **argv) ncameras = 1; // acquisition from only one camera } // Offset is used to set the correspondancy between and image and the - // camera. For example, images comming from camera (i+offset) are + // camera. For example, images coming from camera (i+offset) are // available in I[i] offset = camera; diff --git a/example/device/framegrabber/grabDisk.cpp b/example/device/framegrabber/grabDisk.cpp index cb4ff2dc05..c3c3424fef 100644 --- a/example/device/framegrabber/grabDisk.cpp +++ b/example/device/framegrabber/grabDisk.cpp @@ -253,7 +253,7 @@ int main(int argc, const char **argv) ipath = opt_ipath; // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/example/direct-visual-servoing/photometricVisualServoing.cpp b/example/direct-visual-servoing/photometricVisualServoing.cpp index 3d64ef22be..b5e2c42a9a 100644 --- a/example/direct-visual-servoing/photometricVisualServoing.cpp +++ b/example/direct-visual-servoing/photometricVisualServoing.cpp @@ -199,7 +199,7 @@ int main(int argc, const char **argv) ipath = opt_ipath; // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/example/direct-visual-servoing/photometricVisualServoingWithoutVpServo.cpp b/example/direct-visual-servoing/photometricVisualServoingWithoutVpServo.cpp index 19ead6ef91..8efc71e088 100644 --- a/example/direct-visual-servoing/photometricVisualServoingWithoutVpServo.cpp +++ b/example/direct-visual-servoing/photometricVisualServoingWithoutVpServo.cpp @@ -200,7 +200,7 @@ int main(int argc, const char **argv) ipath = opt_ipath; // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/example/image/imageDiskRW.cpp b/example/image/imageDiskRW.cpp index c4f39f762a..a26addbfc6 100644 --- a/example/image/imageDiskRW.cpp +++ b/example/image/imageDiskRW.cpp @@ -224,7 +224,7 @@ int main(int argc, const char **argv) } // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/example/ogre-simulator/AROgre.cpp b/example/ogre-simulator/AROgre.cpp index 05b4b36f24..9e6699830b 100644 --- a/example/ogre-simulator/AROgre.cpp +++ b/example/ogre-simulator/AROgre.cpp @@ -560,7 +560,7 @@ int main(int argc, const char **argv) ipath = opt_ipath; // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty() && opt_ppath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/example/ogre-simulator/AROgreBasic.cpp b/example/ogre-simulator/AROgreBasic.cpp index 7f740ac9c0..fd6869b368 100644 --- a/example/ogre-simulator/AROgreBasic.cpp +++ b/example/ogre-simulator/AROgreBasic.cpp @@ -424,7 +424,7 @@ int main(int argc, const char **argv) ipath = opt_ipath; // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty() && opt_ppath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/example/pose-estimation/poseVirtualVS.cpp b/example/pose-estimation/poseVirtualVS.cpp index 74060d07fc..d7cc269439 100644 --- a/example/pose-estimation/poseVirtualVS.cpp +++ b/example/pose-estimation/poseVirtualVS.cpp @@ -275,7 +275,7 @@ int main(int argc, const char **argv) ipath = opt_ipath; // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (opt_ipath.empty() && opt_ppath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/example/tools/histogram.cpp b/example/tools/histogram.cpp index 22b54ee5ba..df3923794a 100644 --- a/example/tools/histogram.cpp +++ b/example/tools/histogram.cpp @@ -217,7 +217,7 @@ int main(int argc, const char **argv) } // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (opt_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/example/tracking/trackDot.cpp b/example/tracking/trackDot.cpp index 3e5d59141b..ee24e66073 100644 --- a/example/tracking/trackDot.cpp +++ b/example/tracking/trackDot.cpp @@ -250,7 +250,7 @@ int main(int argc, const char **argv) ipath = opt_ipath; // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty() && opt_ppath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/example/tracking/trackDot2.cpp b/example/tracking/trackDot2.cpp index d5172f9b09..a3e0bd9f12 100644 --- a/example/tracking/trackDot2.cpp +++ b/example/tracking/trackDot2.cpp @@ -249,7 +249,7 @@ int main(int argc, const char **argv) ipath = opt_ipath; // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty() && opt_ppath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/example/tracking/trackDot2WithAutoDetection.cpp b/example/tracking/trackDot2WithAutoDetection.cpp index a36f81e180..4372509e7d 100644 --- a/example/tracking/trackDot2WithAutoDetection.cpp +++ b/example/tracking/trackDot2WithAutoDetection.cpp @@ -297,7 +297,7 @@ int main(int argc, const char **argv) ipath = opt_ipath; // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty() && opt_ppath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/example/tracking/trackKltOpencv.cpp b/example/tracking/trackKltOpencv.cpp index 1047b4d5bc..73df296ded 100644 --- a/example/tracking/trackKltOpencv.cpp +++ b/example/tracking/trackKltOpencv.cpp @@ -257,7 +257,7 @@ int main(int argc, const char **argv) ipath = opt_ipath; // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty() && opt_ppath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/example/tracking/trackMeCircle.cpp b/example/tracking/trackMeCircle.cpp index 26e787f8ca..7baf042c21 100644 --- a/example/tracking/trackMeCircle.cpp +++ b/example/tracking/trackMeCircle.cpp @@ -208,7 +208,7 @@ int main(int argc, const char **argv) ipath = opt_ipath; // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/example/tracking/trackMeEllipse.cpp b/example/tracking/trackMeEllipse.cpp index 3df632b198..b9ddd05ae0 100644 --- a/example/tracking/trackMeEllipse.cpp +++ b/example/tracking/trackMeEllipse.cpp @@ -336,7 +336,7 @@ int main(int argc, const char **argv) ipath = opt_ipath; // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty() && opt_ppath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/example/tracking/trackMeLine.cpp b/example/tracking/trackMeLine.cpp index e578375201..2057390cf4 100644 --- a/example/tracking/trackMeLine.cpp +++ b/example/tracking/trackMeLine.cpp @@ -256,7 +256,7 @@ int main(int argc, const char **argv) ipath = opt_ipath; // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty() && opt_ppath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/example/tracking/trackMeNurbs.cpp b/example/tracking/trackMeNurbs.cpp index 9b0efefe0b..82a7d3c3e6 100644 --- a/example/tracking/trackMeNurbs.cpp +++ b/example/tracking/trackMeNurbs.cpp @@ -209,7 +209,7 @@ int main(int argc, const char **argv) ipath = opt_ipath; // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/example/video/imageSequenceReader.cpp b/example/video/imageSequenceReader.cpp index c697273777..a8e841bbfa 100644 --- a/example/video/imageSequenceReader.cpp +++ b/example/video/imageSequenceReader.cpp @@ -227,7 +227,7 @@ int main(int argc, const char **argv) ipath = opt_ipath; // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty() && opt_ppath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/example/video/videoReader.cpp b/example/video/videoReader.cpp index 9b156b9983..57992dd3bc 100644 --- a/example/video/videoReader.cpp +++ b/example/video/videoReader.cpp @@ -207,7 +207,7 @@ int main(int argc, const char **argv) ipath = opt_ipath; // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty() && opt_ppath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/modules/core/test/image-with-dataset/testConversion.cpp b/modules/core/test/image-with-dataset/testConversion.cpp index 3fc667e281..c164315390 100644 --- a/modules/core/test/image-with-dataset/testConversion.cpp +++ b/modules/core/test/image-with-dataset/testConversion.cpp @@ -224,7 +224,7 @@ int main(int argc, const char **argv) } // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (opt_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/modules/core/test/image-with-dataset/testCrop.cpp b/modules/core/test/image-with-dataset/testCrop.cpp index 75333030ae..ef420ee246 100644 --- a/modules/core/test/image-with-dataset/testCrop.cpp +++ b/modules/core/test/image-with-dataset/testCrop.cpp @@ -214,7 +214,7 @@ int main(int argc, const char **argv) } // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (opt_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/modules/core/test/image-with-dataset/testCropAdvanced.cpp b/modules/core/test/image-with-dataset/testCropAdvanced.cpp index 7e748ead18..06c662b6f4 100644 --- a/modules/core/test/image-with-dataset/testCropAdvanced.cpp +++ b/modules/core/test/image-with-dataset/testCropAdvanced.cpp @@ -205,7 +205,7 @@ int main(int argc, const char **argv) } // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (opt_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/modules/core/test/image-with-dataset/testImageComparison.cpp b/modules/core/test/image-with-dataset/testImageComparison.cpp index e5782b9651..19821c0726 100644 --- a/modules/core/test/image-with-dataset/testImageComparison.cpp +++ b/modules/core/test/image-with-dataset/testImageComparison.cpp @@ -165,7 +165,7 @@ int main(int argc, const char **argv) } // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/modules/core/test/image-with-dataset/testImageFilter.cpp b/modules/core/test/image-with-dataset/testImageFilter.cpp index 51315e8b0e..d05719987c 100644 --- a/modules/core/test/image-with-dataset/testImageFilter.cpp +++ b/modules/core/test/image-with-dataset/testImageFilter.cpp @@ -219,7 +219,7 @@ int main(int argc, const char *argv[]) ipath = opt_ipath; // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/modules/core/test/image-with-dataset/testImageNormalizedCorrelation.cpp b/modules/core/test/image-with-dataset/testImageNormalizedCorrelation.cpp index 7b49cb53db..6819c9bde6 100644 --- a/modules/core/test/image-with-dataset/testImageNormalizedCorrelation.cpp +++ b/modules/core/test/image-with-dataset/testImageNormalizedCorrelation.cpp @@ -169,7 +169,7 @@ int main(int argc, const char **argv) } // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/modules/core/test/image-with-dataset/testImageTemplateMatching.cpp b/modules/core/test/image-with-dataset/testImageTemplateMatching.cpp index ffab07fb18..e3f589c703 100644 --- a/modules/core/test/image-with-dataset/testImageTemplateMatching.cpp +++ b/modules/core/test/image-with-dataset/testImageTemplateMatching.cpp @@ -230,7 +230,7 @@ int main(int argc, const char **argv) } // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/modules/core/test/image-with-dataset/testIoPGM.cpp b/modules/core/test/image-with-dataset/testIoPGM.cpp index b6dcb78ee8..2c5d507367 100644 --- a/modules/core/test/image-with-dataset/testIoPGM.cpp +++ b/modules/core/test/image-with-dataset/testIoPGM.cpp @@ -208,7 +208,7 @@ int main(int argc, const char **argv) } // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/modules/core/test/image-with-dataset/testIoPPM.cpp b/modules/core/test/image-with-dataset/testIoPPM.cpp index 363e480708..5319ce63ac 100644 --- a/modules/core/test/image-with-dataset/testIoPPM.cpp +++ b/modules/core/test/image-with-dataset/testIoPPM.cpp @@ -210,7 +210,7 @@ int main(int argc, const char **argv) } // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/modules/core/test/image-with-dataset/testPerformanceLUT.cpp b/modules/core/test/image-with-dataset/testPerformanceLUT.cpp index 0705fbebf4..9d8f95db45 100644 --- a/modules/core/test/image-with-dataset/testPerformanceLUT.cpp +++ b/modules/core/test/image-with-dataset/testPerformanceLUT.cpp @@ -299,7 +299,7 @@ int main(int argc, const char **argv) } // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/modules/core/test/image-with-dataset/testReadImage.cpp b/modules/core/test/image-with-dataset/testReadImage.cpp index 13fecb8b98..7d63c4faa4 100644 --- a/modules/core/test/image-with-dataset/testReadImage.cpp +++ b/modules/core/test/image-with-dataset/testReadImage.cpp @@ -174,7 +174,7 @@ int main(int argc, const char **argv) ipath = opt_ipath; // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/modules/core/test/image-with-dataset/testUndistortImage.cpp b/modules/core/test/image-with-dataset/testUndistortImage.cpp index 9f63d5687f..cbe07c8476 100644 --- a/modules/core/test/image-with-dataset/testUndistortImage.cpp +++ b/modules/core/test/image-with-dataset/testUndistortImage.cpp @@ -239,7 +239,7 @@ int main(int argc, const char **argv) } // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (opt_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/modules/core/test/tools/histogram-with-dataset/testHistogram.cpp b/modules/core/test/tools/histogram-with-dataset/testHistogram.cpp index 6dfb62ce80..e3e2ec5100 100644 --- a/modules/core/test/tools/histogram-with-dataset/testHistogram.cpp +++ b/modules/core/test/tools/histogram-with-dataset/testHistogram.cpp @@ -226,7 +226,7 @@ int main(int argc, const char **argv) ipath = opt_ipath; // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/modules/gui/test/display-with-dataset/testClick.cpp b/modules/gui/test/display-with-dataset/testClick.cpp index 3d2602b086..4417c1cd1f 100644 --- a/modules/gui/test/display-with-dataset/testClick.cpp +++ b/modules/gui/test/display-with-dataset/testClick.cpp @@ -299,7 +299,7 @@ int main(int argc, const char **argv) ipath = opt_ipath; // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/modules/gui/test/display-with-dataset/testMouseEvent.cpp b/modules/gui/test/display-with-dataset/testMouseEvent.cpp index 0b208897b3..863ab1c87d 100644 --- a/modules/gui/test/display-with-dataset/testMouseEvent.cpp +++ b/modules/gui/test/display-with-dataset/testMouseEvent.cpp @@ -373,7 +373,7 @@ int main(int argc, const char **argv) ipath = opt_ipath; // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty() && opt_ppath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/modules/gui/test/display-with-dataset/testVideoDevice.cpp b/modules/gui/test/display-with-dataset/testVideoDevice.cpp index d73a428e71..e5ed80be5c 100644 --- a/modules/gui/test/display-with-dataset/testVideoDevice.cpp +++ b/modules/gui/test/display-with-dataset/testVideoDevice.cpp @@ -297,7 +297,7 @@ int main(int argc, const char **argv) ipath = opt_ipath; // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/modules/imgproc/test/with-dataset/testAutoThreshold.cpp b/modules/imgproc/test/with-dataset/testAutoThreshold.cpp index b4eca72f85..2de7a84f55 100644 --- a/modules/imgproc/test/with-dataset/testAutoThreshold.cpp +++ b/modules/imgproc/test/with-dataset/testAutoThreshold.cpp @@ -213,7 +213,7 @@ int main(int argc, const char **argv) } // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/modules/imgproc/test/with-dataset/testConnectedComponents.cpp b/modules/imgproc/test/with-dataset/testConnectedComponents.cpp index 970002f264..de888663c9 100644 --- a/modules/imgproc/test/with-dataset/testConnectedComponents.cpp +++ b/modules/imgproc/test/with-dataset/testConnectedComponents.cpp @@ -252,7 +252,7 @@ int main(int argc, const char **argv) } // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/modules/imgproc/test/with-dataset/testContours.cpp b/modules/imgproc/test/with-dataset/testContours.cpp index 029e2d8ece..b1c7a9b9a3 100644 --- a/modules/imgproc/test/with-dataset/testContours.cpp +++ b/modules/imgproc/test/with-dataset/testContours.cpp @@ -239,7 +239,7 @@ int main(int argc, const char **argv) } // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/modules/imgproc/test/with-dataset/testFloodFill.cpp b/modules/imgproc/test/with-dataset/testFloodFill.cpp index e03293ac7a..dfe48dcd43 100644 --- a/modules/imgproc/test/with-dataset/testFloodFill.cpp +++ b/modules/imgproc/test/with-dataset/testFloodFill.cpp @@ -231,7 +231,7 @@ int main(int argc, const char **argv) } // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/modules/imgproc/test/with-dataset/testImgproc.cpp b/modules/imgproc/test/with-dataset/testImgproc.cpp index c6b6a8a27d..50b60a772d 100644 --- a/modules/imgproc/test/with-dataset/testImgproc.cpp +++ b/modules/imgproc/test/with-dataset/testImgproc.cpp @@ -210,7 +210,7 @@ int main(int argc, const char **argv) } // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; diff --git a/modules/robot/src/light/vpRingLight.cpp b/modules/robot/src/light/vpRingLight.cpp index 207c6711ab..193e882168 100644 --- a/modules/robot/src/light/vpRingLight.cpp +++ b/modules/robot/src/light/vpRingLight.cpp @@ -134,7 +134,7 @@ void vpRingLight::pulse(double time) // Data set by the parallel port: // - D1: a pulse with duration fixed by time // - D2: 0 } - // - D3: 1 } To control the light directly throw the pulse comming from D1 + // - D3: 1 } To control the light directly throw the pulse coming from D1 // D2 and D3 are used to select the multiplexer output. // Light must be connected to output 1+,1- diff --git a/modules/robot/src/real-robot/bebop2/vpRobotBebop2.cpp b/modules/robot/src/real-robot/bebop2/vpRobotBebop2.cpp index 45d99cb07c..431172b633 100644 --- a/modules/robot/src/real-robot/bebop2/vpRobotBebop2.cpp +++ b/modules/robot/src/real-robot/bebop2/vpRobotBebop2.cpp @@ -1468,7 +1468,7 @@ void vpRobotBebop2::computeFrame(ARCONTROLLER_Frame_t *frame) av_frame_unref(m_picture); } - // Decoding frame comming from the drone + // Decoding frame coming from the drone m_packet.data = frame->data; m_packet.size = static_cast(frame->used); diff --git a/modules/tracker/blob/test/tracking-with-dataset/testTrackDot.cpp b/modules/tracker/blob/test/tracking-with-dataset/testTrackDot.cpp index 284e33d8d6..ff04770ebb 100644 --- a/modules/tracker/blob/test/tracking-with-dataset/testTrackDot.cpp +++ b/modules/tracker/blob/test/tracking-with-dataset/testTrackDot.cpp @@ -200,7 +200,7 @@ int main(int argc, const char **argv) ipath = opt_ipath; // Compare ipath and env_ipath. If they differ, we take into account - // the input path comming from the command line option + // the input path coming from the command line option if (!opt_ipath.empty() && !env_ipath.empty()) { if (ipath != env_ipath) { std::cout << std::endl << "WARNING: " << std::endl; From daaf0544b47e870e93eb7094ac004e775af41d50 Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Sat, 28 Oct 2023 18:20:23 +0200 Subject: [PATCH 37/43] Fix vpCircleHoughTransform when nlohman_jsn not available --- modules/imgproc/src/vpCircleHoughTransform.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/imgproc/src/vpCircleHoughTransform.cpp b/modules/imgproc/src/vpCircleHoughTransform.cpp index de36e4f398..f1be206cd4 100644 --- a/modules/imgproc/src/vpCircleHoughTransform.cpp +++ b/modules/imgproc/src/vpCircleHoughTransform.cpp @@ -98,6 +98,8 @@ vpCircleHoughTransform::saveConfigurationInJSON(const std::string &jsonPath) con m_algoParams.saveConfigurationInJSON(jsonPath); } +#endif + void vpCircleHoughTransform::initGaussianFilters() { @@ -665,5 +667,3 @@ std::ostream &operator<<(std::ostream &os, const vpCircleHoughTransform &detecto os << detector.toString(); return os; } - -#endif From 2b8de5a82249d8ff3b20081b6f1b4e99161fce49 Mon Sep 17 00:00:00 2001 From: rlagneau Date: Mon, 30 Oct 2023 09:21:33 +0100 Subject: [PATCH 38/43] [FIX] Fix problems of roundings in the computeAngularCoverageInRoI method due to the use of M_PIf --- .../core/include/visp3/core/vpImageCircle.h | 6 +++-- modules/core/src/image/vpImageCircle.cpp | 24 +++++++++++++------ 2 files changed, 21 insertions(+), 9 deletions(-) diff --git a/modules/core/include/visp3/core/vpImageCircle.h b/modules/core/include/visp3/core/vpImageCircle.h index 17fefa8b03..04a454fea8 100644 --- a/modules/core/include/visp3/core/vpImageCircle.h +++ b/modules/core/include/visp3/core/vpImageCircle.h @@ -76,19 +76,21 @@ class VISP_EXPORT vpImageCircle * Compute the angular coverage, in terms of radians, that is contained in the Region of Interest (RoI). * \sa computeArcLengthInRoI(), computeArcLengthInRoI(const vpRect &roi) * \param[in] roi The rectangular RoI in which we want to know the number of pixels of the circle that are contained. + * \param[in] roundingTolerance The tolerance on the angle when the angle is close to a negative multiple of 2 * M_PIf. * \return Returns angular coverage of a circle in a ROI as an angle value in radians. * More precisely, it returns 2.f * M_PI for a circle that is fully visible in the RoI, or the sum of the angles * of the arc(s) that is(are) visible in the RoI. */ - float computeAngularCoverageInRoI(const vpRect &roi) const; + float computeAngularCoverageInRoI(const vpRect &roi, const float &roundingTolerance = 0.001f) const; /*! * Compute the arc length, in terms of number of pixels, that is contained in the Region of Interest (RoI). * \sa computeAngularCoverageInRoI(), computeAngularCoverageInRoI(const vpRect &roi) * \param[in] roi The rectangular RoI in which we want to know the number of pixels of the circle that are contained. + * \param[in] roundingTolerance The tolerance on the angle when the angle is close to 2.f * M_PIf . * \return The number of pixels of the circle that are contained in the RoI. */ - float computeArcLengthInRoI(const vpRect &roi) const; + float computeArcLengthInRoI(const vpRect &roi, const float &roundingTolerance = 0.001f) const; /*! * Get the center of the image (2D) circle diff --git a/modules/core/src/image/vpImageCircle.cpp b/modules/core/src/image/vpImageCircle.cpp index f6b30c3bac..6c749b4205 100644 --- a/modules/core/src/image/vpImageCircle.cpp +++ b/modules/core/src/image/vpImageCircle.cpp @@ -894,7 +894,7 @@ void computeIntersectionsAllAxes(const float &u_c, const float &v_c, const float delta_theta += (theta_v_max_right - theta_u_max_bottom) + (theta_u_min_bottom - theta_v_max_left); } -float vpImageCircle::computeAngularCoverageInRoI(const vpRect &roi) const +float vpImageCircle::computeAngularCoverageInRoI(const vpRect &roi, const float &roundingTolerance) const { float delta_theta = 0.f; vpImagePoint center = m_center; @@ -989,16 +989,26 @@ float vpImageCircle::computeAngularCoverageInRoI(const vpRect &roi) const std::cerr << "vmin_roi = " << vmin_roi << "\tvmax_roi = " << vmax_roi << std::endl << std::flush; throw(vpException(vpException::fatalError, "This case should never happen. Please contact Inria to make fix the problem")); } - return delta_theta; -} -float vpImageCircle::computeArcLengthInRoI(const vpRect &roi) const -{ - float delta_theta = computeAngularCoverageInRoI(roi); if (delta_theta < 0) { // Needed since M_PIf is used - delta_theta += 4 * M_PIf; + float quotient = std::floor(delta_theta / (2.f * M_PIf)); + float rest = delta_theta - quotient * 2.f * M_PIf; + if (rest < roundingTolerance && delta_theta < -M_PIf) { + // If the angle is a negative multiple of 2.f * M_PIf we consider it to be 2.f * M_PIf + delta_theta = 2.f * M_PIf; + } + else { + //Otherwise, it corresponds to delta_theta modulo 2.f * M_PIf + delta_theta = rest; + } } + return delta_theta; +} + +float vpImageCircle::computeArcLengthInRoI(const vpRect &roi, const float &roundingTolerance) const +{ + float delta_theta = computeAngularCoverageInRoI(roi, roundingTolerance); return delta_theta * m_radius; } From 0124b27809a9616de623820437ca7fca3031c5e1 Mon Sep 17 00:00:00 2001 From: rlagneau Date: Mon, 30 Oct 2023 09:32:12 +0100 Subject: [PATCH 39/43] [FIX] Ensure a result between 0 and 2 Pi() --- modules/core/src/image/vpImageCircle.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/core/src/image/vpImageCircle.cpp b/modules/core/src/image/vpImageCircle.cpp index 6c749b4205..25dfdd12b2 100644 --- a/modules/core/src/image/vpImageCircle.cpp +++ b/modules/core/src/image/vpImageCircle.cpp @@ -990,10 +990,10 @@ float vpImageCircle::computeAngularCoverageInRoI(const vpRect &roi, const float throw(vpException(vpException::fatalError, "This case should never happen. Please contact Inria to make fix the problem")); } - if (delta_theta < 0) { // Needed since M_PIf is used + if (delta_theta < 0 || delta_theta > 2.f * M_PIf) { // Needed since M_PIf is used float quotient = std::floor(delta_theta / (2.f * M_PIf)); float rest = delta_theta - quotient * 2.f * M_PIf; - if (rest < roundingTolerance && delta_theta < -M_PIf) { + if (rest < roundingTolerance && (delta_theta < -M_PIf || delta_theta > M_PIf)) { // If the angle is a negative multiple of 2.f * M_PIf we consider it to be 2.f * M_PIf delta_theta = 2.f * M_PIf; } From 3d3cc3094ef35b254886eb19e4329f18eee67629 Mon Sep 17 00:00:00 2001 From: rlagneau Date: Mon, 30 Oct 2023 10:00:12 +0100 Subject: [PATCH 40/43] [FIX] Fixed a typo in the computation of the angles of intersection --- modules/core/src/image/vpImageCircle.cpp | 40 +++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/modules/core/src/image/vpImageCircle.cpp b/modules/core/src/image/vpImageCircle.cpp index 25dfdd12b2..4a7c4c6bcc 100644 --- a/modules/core/src/image/vpImageCircle.cpp +++ b/modules/core/src/image/vpImageCircle.cpp @@ -481,7 +481,7 @@ void computeIntersectionsTopLeftBottom(const float &u_c, const float &v_c, const std::pair crossing_theta_u_min, crossing_theta_u_max; std::pair crossing_theta_v_min, crossing_theta_v_max; float crossing_u_top = vmin_roi; // We cross the u-axis of the top axis of the RoI at the minimum v-coordinate of the RoI - float crossing_v = vmin_roi; // We cross the v-axis of the RoI at the minimum u-coordinate of the RoI + float crossing_v = umin_roi; // We cross the v-axis of the RoI at the minimum u-coordinate of the RoI computePerpendicularAxesIntersections(u_c, v_c, radius, crossing_u_top, crossing_v, crossing_theta_u_min, crossing_theta_u_max, crossing_theta_v_min, crossing_theta_v_max); @@ -501,25 +501,63 @@ void computeIntersectionsTopLeftBottom(const float &u_c, const float &v_c, const float theta_u_max_bottom = crossing_theta_u_max.first; float u_umin_bottom = crossing_theta_u_min.second; float u_umax_bottom = crossing_theta_u_max.second; + int cas = -1; if (u_umin_top >= umin_roi && u_umin_bottom >= umin_roi && v_vmin >= vmin_roi && v_vmax <= vmax_roi) { // case intersection top + left + bottom twice delta_theta = (theta_v_min - theta_u_min_top) + (theta_u_max_top - theta_u_max_bottom) + (theta_u_min_bottom - theta_v_max); + cas = 0; } else if (u_umin_top <= umin_roi && v_vmin <= vmin_roi && u_umin_bottom <= umin_roi && v_vmax >= vmax_roi) { // case intersection top and bottom delta_theta = (theta_u_max_top - theta_u_max_bottom); + cas = 1; } else if (u_umax_top <= umin_roi && u_umax_bottom <= umin_roi && v_vmin >= vmin_roi && v_vmax <= vmax_roi) { // case left only computeIntersectionsLeftBorderOnly(u_c, umin_roi, radius, delta_theta); + cas = 2; } else if (u_umax_bottom > umin_roi && v_vmin >= vmin_roi) { // case bottom/left corner computeIntersectionsBottomLeft(u_c, v_c, umin_roi, vmax_roi, radius, delta_theta); + cas = 3; } else if (u_umax_top > umin_roi && v_vmax <= vmax_roi) { // case top/left corner computeIntersectionsTopLeft(u_c, v_c, umin_roi, vmin_roi, radius, delta_theta); + cas = 4; + } + + if (delta_theta < 0.f) { + std::cout << "--- computeIntersectionsTopLeftBottom with negative result ---" << std::endl; + std::cout << "\tu_umin_top = " << u_umin_top << "\tu_umax_top = " << u_umax_top << std::endl; + std::cout << "\tu_umin_bot = " << u_umin_bottom << "\tu_umax_bot = " << u_umax_bottom << std::endl; + std::cout << "\tv_vmin = " << v_vmin << "\tv_vmax = " << v_vmax << std::endl; + std::cout << "\ttheta_u_min_top = " << theta_u_min_top << "\ttheta_u_max_top = " << theta_u_max_top << std::endl; + std::cout << "\ttheta_u_min_bot = " << theta_u_min_bottom << "\ttheta_u_max_bot = " << theta_u_max_bottom << std::endl; + std::cout << "\ttheta_v_min = " << theta_v_min << "\ttheta_v_max = " << theta_v_max << std::endl; + std::cout << "\tcas = "; + std::string nameCase; + switch (cas) { + case 0: + nameCase = "top + left + bottom twice"; + break; + case 1: + nameCase = "top and bottom"; + break; + case 2: + nameCase = "left only"; + break; + case 3: + nameCase = "bottom/left corner"; + break; + case 4: + nameCase = "top/left corner"; + break; + default: + throw (vpException(vpException::fatalError, "Uncorrect case")); + } + std::cout << nameCase << std::endl; } } From 0762907ce2b36607b8614ed7af2b09bbcdb794c6 Mon Sep 17 00:00:00 2001 From: rlagneau Date: Mon, 30 Oct 2023 11:09:56 +0100 Subject: [PATCH 41/43] [CORPS] Added a 'modulo' operation for float and double in vpMath + [FIX] Corrected visibility check for single axis crossing --- modules/core/include/visp3/core/vpMath.h | 48 +++++++-- modules/core/src/image/vpImageCircle.cpp | 53 +++------ .../test/tools/geometry/testImageCircle.cpp | 101 ++++++++++++++++++ 3 files changed, 153 insertions(+), 49 deletions(-) diff --git a/modules/core/include/visp3/core/vpMath.h b/modules/core/include/visp3/core/vpMath.h index db8bf2e9c7..17bbbe6235 100644 --- a/modules/core/include/visp3/core/vpMath.h +++ b/modules/core/include/visp3/core/vpMath.h @@ -140,7 +140,7 @@ class VISP_EXPORT vpMath if (theta1 > M_PIf) { theta1 -= 2.0f * M_PIf; } - else if (theta1 < -M_PIf) { + else if (theta1 <= -M_PIf) { theta1 += 2.0f * M_PIf; } return theta1; @@ -164,6 +164,36 @@ class VISP_EXPORT vpMath return theta1; } + /** + * \brief Gives the rest of \b value divided by \b modulo when + * the quotient can only be an integer. + * + * \param[in] value The value we want to know the rest in the "modulo" operation. + * \param[in] modulo The divider. + * \return float The rest as in a modulo operation. + */ + static float moduloFloat(const float &value, const float &modulo) + { + float quotient = std::floor(value / modulo); + float rest = value - quotient * modulo; + return rest; + } + + /** + * \brief Gives the rest of \b value divided by \b modulo when + * the quotient can only be an integer. + * + * \param[in] value The value we want to know the rest in the "modulo" operation. + * \param[in] modulo The divider. + * \return double The rest as in a modulo operation. + */ + static double moduloDouble(const double &value, const double &modulo) + { + double quotient = std::floor(value / modulo); + double rest = value - quotient * modulo; + return rest; + } + /*! Compute x square value. \return Square value \f$ x^2 \f$. @@ -193,9 +223,9 @@ class VISP_EXPORT vpMath } return (v < lower) ? lower : (upper < v) ? upper : v; #endif - } + } - // round x to the nearest integer + // round x to the nearest integer static inline int round(double x); // return the sign of x (+-1) @@ -330,14 +360,14 @@ class VISP_EXPORT vpMath private: static const double ang_min_sinc; static const double ang_min_mc; - }; +}; - // Begining of the inline functions definition +// Begining of the inline functions definition - /*! - Computes and returns x! - \param x : parameter of factorial function. - */ +/*! + Computes and returns x! + \param x : parameter of factorial function. +*/ double vpMath::fact(unsigned int x) { if ((x == 1) || (x == 0)) diff --git a/modules/core/src/image/vpImageCircle.cpp b/modules/core/src/image/vpImageCircle.cpp index 4a7c4c6bcc..fd9ed329ec 100644 --- a/modules/core/src/image/vpImageCircle.cpp +++ b/modules/core/src/image/vpImageCircle.cpp @@ -77,6 +77,9 @@ void computeIntersectionsLeftBorderOnly(const float &u_c, const float &umin_roi, float theta_min = std::min(theta1, theta2); float theta_max = std::max(theta1, theta2); delta_theta = theta_max - theta_min; + if (u_c < umin_roi && std::abs(delta_theta - 2 * M_PIf) < 2.f * std::numeric_limits::epsilon()) { + delta_theta = 0.f; + } } /*! @@ -99,6 +102,9 @@ void computeIntersectionsRightBorderOnly(const float &u_c, const float &umax_roi float theta_min = std::min(theta1, theta2); float theta_max = std::max(theta1, theta2); delta_theta = 2.f * M_PIf - (theta_max - theta_min); + if (u_c > umax_roi && std::abs(delta_theta - 2 * M_PIf) < 2.f * std::numeric_limits::epsilon()) { + delta_theta = 0.f; + } } /*! @@ -138,6 +144,9 @@ void computeIntersectionsTopBorderOnly(const float &v_c, const float &vmin_roi, else { delta_theta = theta_max - theta_min; } + if (v_c < vmin_roi && std::abs(delta_theta - 2 * M_PIf) < 2.f * std::numeric_limits::epsilon()) { + delta_theta = 0.f; + } } /*! @@ -177,6 +186,9 @@ void computeIntersectionsBottomBorderOnly(const float &v_c, const float &vmax_ro else { delta_theta = 2.f * M_PIf - (theta_max - theta_min); } + if (v_c > vmax_roi && std::abs(delta_theta - 2 * M_PIf) < 2.f * std::numeric_limits::epsilon()) { + delta_theta = 0.f; + } } /*! @@ -501,63 +513,25 @@ void computeIntersectionsTopLeftBottom(const float &u_c, const float &v_c, const float theta_u_max_bottom = crossing_theta_u_max.first; float u_umin_bottom = crossing_theta_u_min.second; float u_umax_bottom = crossing_theta_u_max.second; - int cas = -1; if (u_umin_top >= umin_roi && u_umin_bottom >= umin_roi && v_vmin >= vmin_roi && v_vmax <= vmax_roi) { // case intersection top + left + bottom twice delta_theta = (theta_v_min - theta_u_min_top) + (theta_u_max_top - theta_u_max_bottom) + (theta_u_min_bottom - theta_v_max); - cas = 0; } else if (u_umin_top <= umin_roi && v_vmin <= vmin_roi && u_umin_bottom <= umin_roi && v_vmax >= vmax_roi) { // case intersection top and bottom delta_theta = (theta_u_max_top - theta_u_max_bottom); - cas = 1; } else if (u_umax_top <= umin_roi && u_umax_bottom <= umin_roi && v_vmin >= vmin_roi && v_vmax <= vmax_roi) { // case left only computeIntersectionsLeftBorderOnly(u_c, umin_roi, radius, delta_theta); - cas = 2; } else if (u_umax_bottom > umin_roi && v_vmin >= vmin_roi) { // case bottom/left corner computeIntersectionsBottomLeft(u_c, v_c, umin_roi, vmax_roi, radius, delta_theta); - cas = 3; } else if (u_umax_top > umin_roi && v_vmax <= vmax_roi) { // case top/left corner computeIntersectionsTopLeft(u_c, v_c, umin_roi, vmin_roi, radius, delta_theta); - cas = 4; - } - - if (delta_theta < 0.f) { - std::cout << "--- computeIntersectionsTopLeftBottom with negative result ---" << std::endl; - std::cout << "\tu_umin_top = " << u_umin_top << "\tu_umax_top = " << u_umax_top << std::endl; - std::cout << "\tu_umin_bot = " << u_umin_bottom << "\tu_umax_bot = " << u_umax_bottom << std::endl; - std::cout << "\tv_vmin = " << v_vmin << "\tv_vmax = " << v_vmax << std::endl; - std::cout << "\ttheta_u_min_top = " << theta_u_min_top << "\ttheta_u_max_top = " << theta_u_max_top << std::endl; - std::cout << "\ttheta_u_min_bot = " << theta_u_min_bottom << "\ttheta_u_max_bot = " << theta_u_max_bottom << std::endl; - std::cout << "\ttheta_v_min = " << theta_v_min << "\ttheta_v_max = " << theta_v_max << std::endl; - std::cout << "\tcas = "; - std::string nameCase; - switch (cas) { - case 0: - nameCase = "top + left + bottom twice"; - break; - case 1: - nameCase = "top and bottom"; - break; - case 2: - nameCase = "left only"; - break; - case 3: - nameCase = "bottom/left corner"; - break; - case 4: - nameCase = "top/left corner"; - break; - default: - throw (vpException(vpException::fatalError, "Uncorrect case")); - } - std::cout << nameCase << std::endl; } } @@ -1029,8 +1003,7 @@ float vpImageCircle::computeAngularCoverageInRoI(const vpRect &roi, const float } if (delta_theta < 0 || delta_theta > 2.f * M_PIf) { // Needed since M_PIf is used - float quotient = std::floor(delta_theta / (2.f * M_PIf)); - float rest = delta_theta - quotient * 2.f * M_PIf; + float rest = vpMath::moduloFloat(delta_theta, 2.f * M_PIf); if (rest < roundingTolerance && (delta_theta < -M_PIf || delta_theta > M_PIf)) { // If the angle is a negative multiple of 2.f * M_PIf we consider it to be 2.f * M_PIf delta_theta = 2.f * M_PIf; diff --git a/modules/core/test/tools/geometry/testImageCircle.cpp b/modules/core/test/tools/geometry/testImageCircle.cpp index bc4cd432b1..8b78ae4ae3 100644 --- a/modules/core/test/tools/geometry/testImageCircle.cpp +++ b/modules/core/test/tools/geometry/testImageCircle.cpp @@ -173,6 +173,31 @@ int main() hasSucceeded &= isValueOK; } + // Test with circle touching the left border, all the circle is hidden + { + // Formula: uc = OFFSET - RADIUS * cos(theta) + // theta := PI + float uc = OFFSET - RADIUS; + float vc = OFFSET - 100.f; + vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); + float arcLengthCircle = circle.computeArcLengthInRoI(roi); + float theoreticalValue = 0.f; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); + std::string statusTest; + if (isValueOK) { + statusTest = "SUCCESS"; + } + else { + statusTest = "FAILED"; + } + std::cout << "Test with circle touching the left border, all the circle is hidden." << std::endl; + std::cout << "\tarc length =" << arcLengthCircle << std::endl; + std::cout << "\ttheoretical length =" << theoreticalValue << std::endl; + std::cout << "\ttest status = " << statusTest << std::endl; + + hasSucceeded &= isValueOK; + } + // Test with intersections with the right border, more than half a circle visible { // Formula: uc = OFFSET + WIDTH - RADIUS * cos(theta) @@ -248,6 +273,31 @@ int main() hasSucceeded &= isValueOK; } + // Test with circle touching the right border, all the circle is hidden + { + // Formula: uc = OFFSET + WIDTH - RADIUS * cos(theta) + // theta := 0 + float uc = OFFSET + WIDTH + RADIUS; + float vc = OFFSET + 100.f; + vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); + float arcLengthCircle = circle.computeArcLengthInRoI(roi); + float theoreticalValue = 0.f; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); + std::string statusTest; + if (isValueOK) { + statusTest = "SUCCESS"; + } + else { + statusTest = "FAILED"; + } + std::cout << "Test with circle touching the right border, all the circle is hidden." << std::endl; + std::cout << "\tarc length =" << arcLengthCircle << std::endl; + std::cout << "\ttheoretical length =" << theoreticalValue << std::endl; + std::cout << "\ttest status = " << statusTest << std::endl; + + hasSucceeded &= isValueOK; + } + // Test with intersections with the top border, more than half a circle visible { // v = vc - r sin(theta) @@ -326,6 +376,32 @@ int main() hasSucceeded &= isValueOK; } + // Test with circle touching the top border, all the circle is hidden + { + // v = vc - r sin(theta) + // Formula: vc = OFFSET + RADIUS * sin(theta) + float theta = -M_PI_2f; + float uc = OFFSET + 100.f; + float vc = OFFSET + RADIUS * sin(theta); + vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); + float arcLengthCircle = circle.computeArcLengthInRoI(roi); + float theoreticalValue = 0.f; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); + std::string statusTest; + if (isValueOK) { + statusTest = "SUCCESS"; + } + else { + statusTest = "FAILED"; + } + std::cout << "Test with circle touching the top border, all the circle is hidden." << std::endl; + std::cout << "\tarc length =" << arcLengthCircle << std::endl; + std::cout << "\ttheoretical length =" << theoreticalValue << std::endl; + std::cout << "\ttest status = " << statusTest << std::endl; + + hasSucceeded &= isValueOK; + } + // Test with intersections with the bottom border, more than half a circle visible { // v = vc - r sin(theta) @@ -402,6 +478,30 @@ int main() hasSucceeded &= isValueOK; } + // Test with circle touching the bottom border, all the circle is hidden + { + // Formula: vc = OFFSET + HEIGHT + RADIUS * sin(theta) + float uc = OFFSET + 100.f; + float vc = OFFSET + HEIGHT + RADIUS; + vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); + float arcLengthCircle = circle.computeArcLengthInRoI(roi); + float theoreticalValue = 0.f; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); + std::string statusTest; + if (isValueOK) { + statusTest = "SUCCESS"; + } + else { + statusTest = "FAILED"; + } + std::cout << "Test with circle touching the bottom border, all the circle is hidden." << std::endl; + std::cout << "\tarc length =" << arcLengthCircle << std::endl; + std::cout << "\ttheoretical length =" << theoreticalValue << std::endl; + std::cout << "\ttest status = " << statusTest << std::endl; + + hasSucceeded &= isValueOK; + } + // Test with intersections with the top and the left border, crossing each axis once in the RoI { // Formula: u_cross_top_max = uc + r cos (theta_u_top_max) >= umin ; vmin = vc - r sin(theta_u_top_max) @@ -2000,6 +2100,7 @@ int main() std::cout << "testImageCircle overall result: SUCCESS" << std::endl; return EXIT_SUCCESS; } + std::cout << "testImageCircle overall result: FAILED" << std::endl; return EXIT_FAILURE; } From e03dfe392b988020109decbb67032c65667924e7 Mon Sep 17 00:00:00 2001 From: rlagneau Date: Tue, 31 Oct 2023 09:45:59 +0100 Subject: [PATCH 42/43] Renamed the moduloFloat and moduloDouble methods into modulo --- modules/core/include/visp3/core/vpMath.h | 4 ++-- modules/core/src/image/vpImageCircle.cpp | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/core/include/visp3/core/vpMath.h b/modules/core/include/visp3/core/vpMath.h index 17bbbe6235..c321b18266 100644 --- a/modules/core/include/visp3/core/vpMath.h +++ b/modules/core/include/visp3/core/vpMath.h @@ -172,7 +172,7 @@ class VISP_EXPORT vpMath * \param[in] modulo The divider. * \return float The rest as in a modulo operation. */ - static float moduloFloat(const float &value, const float &modulo) + static float modulo(const float &value, const float &modulo) { float quotient = std::floor(value / modulo); float rest = value - quotient * modulo; @@ -187,7 +187,7 @@ class VISP_EXPORT vpMath * \param[in] modulo The divider. * \return double The rest as in a modulo operation. */ - static double moduloDouble(const double &value, const double &modulo) + static double modulo(const double &value, const double &modulo) { double quotient = std::floor(value / modulo); double rest = value - quotient * modulo; diff --git a/modules/core/src/image/vpImageCircle.cpp b/modules/core/src/image/vpImageCircle.cpp index fd9ed329ec..dfd8b6d7c3 100644 --- a/modules/core/src/image/vpImageCircle.cpp +++ b/modules/core/src/image/vpImageCircle.cpp @@ -1003,7 +1003,7 @@ float vpImageCircle::computeAngularCoverageInRoI(const vpRect &roi, const float } if (delta_theta < 0 || delta_theta > 2.f * M_PIf) { // Needed since M_PIf is used - float rest = vpMath::moduloFloat(delta_theta, 2.f * M_PIf); + float rest = vpMath::modulo(delta_theta, 2.f * M_PIf); if (rest < roundingTolerance && (delta_theta < -M_PIf || delta_theta > M_PIf)) { // If the angle is a negative multiple of 2.f * M_PIf we consider it to be 2.f * M_PIf delta_theta = 2.f * M_PIf; From 1a93ee07bbac9b1e30eaec73efa530380d2ee6f0 Mon Sep 17 00:00:00 2001 From: rlagneau Date: Tue, 31 Oct 2023 11:20:07 +0100 Subject: [PATCH 43/43] [CORPS] Added 8-connexity N x N dilatation and erosion methods in vpImageMorphology + corresponding unitary tests --- .../include/visp3/core/vpImageMorphology.h | 143 +++++++++++++++++- .../core/test/image/testImageMorphology.cpp | 84 +++++++++- 2 files changed, 218 insertions(+), 9 deletions(-) diff --git a/modules/core/include/visp3/core/vpImageMorphology.h b/modules/core/include/visp3/core/vpImageMorphology.h index 2fd2b6408b..cc83b2e251 100644 --- a/modules/core/include/visp3/core/vpImageMorphology.h +++ b/modules/core/include/visp3/core/vpImageMorphology.h @@ -76,19 +76,31 @@ class VISP_EXPORT vpImageMorphology private: /** - * @brief Modify the image by applying the \b operation on each of its elements on a 3x3 + * \brief Modify the image by applying the \b operation on each of its elements on a 3x3 * grid. * - * @param T Either a class such as vpRGBa or a type such as double, unsigned char ... - * @param I The image we want to modify. - * @param null_value The value that is padded to the input image to manage the borders. - * @param operation The operation to apply to its elements on a 3x3 grid. - * @param connexity Either a 4-connexity, if we want to take into account only the horizontal + * \tparam T Either a class such as vpRGBa or a type such as double, unsigned char ... + * \param[out] I The image we want to modify. + * \param[in] null_value The value that is padded to the input image to manage the borders. + * \param[in] operation The operation to apply to its elements on a 3x3 grid. + * \param[in] connexity Either a 4-connexity, if we want to take into account only the horizontal * and vertical neighbors, or a 8-connexity, if we want to also take into account the diagonal neighbors. */ template static void imageOperation(vpImage &I, const T &null_value, const T &(*operation)(const T &, const T &), const vpConnexityType &connexity = CONNEXITY_4); + /** + * \brief Modify the image by applying the \b operation on each of its elements on a \b size x \b size + * grid. The connexity that is used is a 8-connexity. + * + * \tparam T Any type such as double, unsigned char ... + * \param[out] I The image we want to modify. + * \param[in] size The size of the window with which we want to work. + * \param[in] operation The operation to apply to its elements. + */ + template + static void imageOperation(vpImage &I, const int &size, const T &(*operation)(const T &, const T &)); + public: template static void erosion(vpImage &I, Type value, Type value_out, vpConnexityType connexity = CONNEXITY_4); @@ -102,6 +114,12 @@ class VISP_EXPORT vpImageMorphology template static void dilatation(vpImage &I, const vpConnexityType &connexity = CONNEXITY_4); + template + static void erosion(vpImage &I, const int &size); + + template + static void dilatation(vpImage &I, const int &size); + #if defined(VISP_BUILD_DEPRECATED_FUNCTIONS) /*! @name Deprecated functions @@ -351,7 +369,7 @@ void vpImageMorphology::imageOperation(vpImage &I, const T &null_value, const \param I : Image to process. \param connexity : Type of connexity: 4 or 8. - \sa dilatation(vpImage &, const vpConnexityType &) + \sa dilatation(vpImage &, const vpConnexityType &) */ template void vpImageMorphology::erosion(vpImage &I, const vpConnexityType &connexity) @@ -381,7 +399,7 @@ void vpImageMorphology::erosion(vpImage &I, const vpConnexityType &connexity) \param I : Image to process. \param connexity : Type of connexity: 4 or 8. - \sa erosion(vpImage &, const vpConnexityType &) + \sa erosion(vpImage &, const vpConnexityType &) */ template void vpImageMorphology::dilatation(vpImage &I, const vpConnexityType &connexity) @@ -389,6 +407,115 @@ void vpImageMorphology::dilatation(vpImage &I, const vpConnexityType &connexi const T &(*operation)(const T & a, const T & b) = std::max; vpImageMorphology::imageOperation(I, std::numeric_limits::min(), operation, connexity); } + +/** + * \brief Dilatation of \b size >=3 with 8-connectivity. + * + * \tparam T Any type of image, except vpRGBa . + * \param[out] I The image to which the dilatation must be applied, where the dilatation corresponds + * to a max operator on a window of size \b size. + * \param[in] size The size of the window on which is performed the max operator for each pixel. + */ +template +void vpImageMorphology::imageOperation(vpImage &I, const int &size, const T &(*operation)(const T &, const T &)) +{ + if (size % 2 != 1) { + throw(vpException(vpException::badValue, "Dilatation kernel must be odd.")); + } + + const int width_in = I.getWidth(); + const int height_in = I.getHeight(); + int halfKernelSize = size / 2; + vpImage J = I; + + for (int r = 0; r < height_in; r++) { + // Computing the rows we can explore without going outside the limits of the image + int r_iterator_start = -halfKernelSize, r_iterator_stop = halfKernelSize + 1; + if (r - halfKernelSize < 0) { + r_iterator_start = -r; + } + else if (r + halfKernelSize >= height_in) { + r_iterator_stop = height_in - r; + } + for (int c = 0; c < width_in; c++) { + T value = I[r][c]; + // Computing the columns we can explore without going outside the limits of the image + int c_iterator_start = -halfKernelSize, c_iterator_stop = halfKernelSize + 1; + if (c - halfKernelSize < 0) { + c_iterator_start = -c; + } + else if (c + halfKernelSize >= width_in) { + c_iterator_stop = width_in - c; + } + for (int r_iterator = r_iterator_start; r_iterator < r_iterator_stop; r_iterator++) { + for (int c_iterator = c_iterator_start; c_iterator < c_iterator_stop; c_iterator++) { + value = operation(value, J[r + r_iterator][c + c_iterator]); + } + } + I[r][c] = value; + } + } +} + +/*! + Erode an image using the given structuring element. + + The erosion of \f$ A \left( x, y \right) \f$ by \f$ B \left (x, y + \right) \f$ is defined as: \f[ \left ( A \ominus B \right ) \left( x,y + \right) = \textbf{min} \left \{ A \left ( x+x', y+y' \right ) - B \left ( + x', y'\right ) | \left ( x', y'\right ) \subseteq D_B \right \} \f] where + \f$ D_B \f$ is the domain of the structuring element \f$ B \f$ and \f$ A + \left( x,y \right) \f$ is assumed to be \f$ + \infty \f$ outside the domain + of the image. + + In our case, the erosion is performed with a flat structuring element + \f$ \left( B \left( x,y \right) = 0 \right) \f$. The erosion using + such a structuring element is equivalent to a local-minimum operator: \f[ + \left ( A \ominus B \right ) \left( x,y \right) = \textbf{min} \left \{ A + \left ( x+x', y+y' \right ) | \left ( x', y'\right ) \subseteq D_B \right \} + \f] + + \param I : Image to process. + \param size : The size of the kernel + + \sa dilatation(vpImage &, const int &) +*/ +template +void vpImageMorphology::erosion(vpImage &I, const int &size) +{ + const T &(*operation)(const T & a, const T & b) = std::min; + vpImageMorphology::imageOperation(I, size, operation); +} + +/*! + Dilate an image using the given structuring element. + + The dilatation of \f$ A \left( x, y \right) \f$ by \f$ B \left + (x, y \right) \f$ is defined as: \f[ \left ( A \oplus B \right ) \left( x,y + \right) = \textbf{max} \left \{ A \left ( x-x', y-y' \right ) + B \left ( + x', y'\right ) | \left ( x', y'\right ) \subseteq D_B \right \} \f] where + \f$ D_B \f$ is the domain of the structuring element \f$ B \f$ and \f$ A + \left( x,y \right) \f$ is assumed to be \f$ - \infty \f$ outside the domain + of the image. + + In our case, the dilatation is performed with a flat structuring element + \f$ \left( B \left( x,y \right) = 0 \right) \f$. The dilatation using + such a structuring element is equivalent to a local-maximum operator: \f[ + \left ( A \oplus B \right ) \left( x,y \right) = \textbf{max} \left \{ A + \left ( x-x', y-y' \right ) | \left ( x', y'\right ) \subseteq D_B \right \} + \f] + + \param I : Image to process. + \param size : The size of the kernel. + + \sa erosion(vpImage &, const int &) +*/ +template +void vpImageMorphology::dilatation(vpImage &I, const int &size) +{ + const T &(*operation)(const T & a, const T & b) = std::max; + vpImageMorphology::imageOperation(I, size, operation); +} #endif /* diff --git a/modules/core/test/image/testImageMorphology.cpp b/modules/core/test/image/testImageMorphology.cpp index 6be8c128a2..8e051b8330 100644 --- a/modules/core/test/image/testImageMorphology.cpp +++ b/modules/core/test/image/testImageMorphology.cpp @@ -124,6 +124,26 @@ TEST_CASE("Binary image morphology", "[image_morphology]") CHECK((I_morpho_ref == I_morpho_tpl)); CHECK((I_morpho_ref == I_morpho)); } + + SECTION("8-connexity-size5") + { + vpImage I_dilatation_ref(8, 16, 1); + I_dilatation_ref[0][0] = 0; + I_dilatation_ref[0][1] = 0; + I_dilatation_ref[0][2] = 0; + I_dilatation_ref[6][12] = 0; + I_dilatation_ref[7][12] = 0; + vpImage I_dilatation = I; + vpImage I_erosion_ref(8, 16, 0); + vpImage I_erosion = I; + + const int size = 5; + vpImageMorphology::dilatation(I_dilatation, size); + vpImageMorphology::erosion(I_erosion, size); + + CHECK((I_dilatation_ref == I_dilatation)); + CHECK((I_erosion_ref == I_erosion)); + } } SECTION("Matlab reference") @@ -211,6 +231,37 @@ TEST_CASE("Gray image morphology", "[image_morphology]") CHECK((I_morpho_ref == I_morpho)); } + + SECTION("8-connexity-size5") + { + const int size = 5; + vpImage I_morpho(12, 12); + unsigned char count = 1; + for (int r = 0; r < 12; r++) { + for (int c = 0; c < 12; c++) { + I_morpho[r][c] = count; + count++; + } + } + unsigned char image_data_dilatation[12 * 12] = { + 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 36, 36, + 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 48, 48, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 60, 60, + 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 72, 72, + 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 84, 84, + 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 96, 96, + 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 108, 108, + 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 120, 120, + 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 132, 132, + 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 144, 144, + 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 144, 144, + 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 144, 144 }; + vpImage I_dilatation_ref(image_data_dilatation, 12, 12, true); + + vpImageMorphology::dilatation(I_morpho, size); + + CHECK((I_dilatation_ref == I_morpho)); + } } SECTION("Erosion") @@ -238,6 +289,37 @@ TEST_CASE("Gray image morphology", "[image_morphology]") CHECK((I_morpho_ref == I_morpho)); } + + SECTION("8-connexity-size5") + { + const int size = 5; + vpImage I_morpho(12, 12); + unsigned char count = 1; + for (int r = 0; r < 12; r++) { + for (int c = 0; c < 12; c++) { + I_morpho[r][c] = count; + count++; + } + } + unsigned char image_data_erosion[12 * 12] = { + 1, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 1, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 1, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 13, 13, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, + 25, 25, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, + 37, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, + 49, 49, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, + 61, 61, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 73, 73, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, + 85, 85, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, + 97, 97, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, + 109, 109, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118 }; + vpImage I_erosion_ref(image_data_erosion, 12, 12, true); + + vpImageMorphology::erosion(I_morpho, size); + + CHECK((I_erosion_ref == I_morpho)); + } } SECTION("Matlab reference") @@ -336,7 +418,7 @@ TEST_CASE("Gray image morphology", "[image_morphology]") } } -int main(int argc, char *argv []) +int main(int argc, char *argv[]) { Catch::Session session; // There must be exactly one instance